internal/backport: delete

Now that the deploy happens with Go 1.19, it's viable to start using
these packages from the standard library instead of their backported
temporary copies.

Keeping Go 1.18 happy during its last two months of support requires
a tiny complication in pkgdoc package. An alternative path is to not
drop internal/backport/go/doc and all of its dependencies right away,
but getting to zero backported packages sooner was hard to resist...

For golang/go#51800.

Change-Id: Ieb7a137a033d6b6850dfc019c8c0c767756cc30d
Reviewed-on: https://go-review.googlesource.com/c/website/+/456522
Reviewed-by: Bryan Mills <bcmills@google.com>
TryBot-Bypass: Dmitri Shuralyov <dmitshur@google.com>
Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
Auto-Submit: Dmitri Shuralyov <dmitshur@golang.org>
Run-TryBot: Dmitri Shuralyov <dmitshur@golang.org>
diff --git a/cmd/golangorg/server.go b/cmd/golangorg/server.go
index 88c8d32..239831a 100644
--- a/cmd/golangorg/server.go
+++ b/cmd/golangorg/server.go
@@ -14,6 +14,8 @@
 	"errors"
 	"flag"
 	"fmt"
+	"go/format"
+	"html/template"
 	"io/fs"
 	"io/ioutil"
 	"log"
@@ -31,8 +33,6 @@
 	"cloud.google.com/go/datastore"
 	"golang.org/x/build/repos"
 	"golang.org/x/website"
-	"golang.org/x/website/internal/backport/go/format"
-	"golang.org/x/website/internal/backport/html/template"
 	"golang.org/x/website/internal/blog"
 	"golang.org/x/website/internal/codewalk"
 	"golang.org/x/website/internal/dl"
diff --git a/cmd/googlegolangorg/main.go b/cmd/googlegolangorg/main.go
index d53d1ee..e143a43 100644
--- a/cmd/googlegolangorg/main.go
+++ b/cmd/googlegolangorg/main.go
@@ -3,11 +3,10 @@
 
 import (
 	"fmt"
+	"html/template"
 	"net/http"
 	"os"
 	"strings"
-
-	"golang.org/x/website/internal/backport/html/template"
 )
 
 var repoMap = map[string]*repoImport{
diff --git a/internal/api/api_test.go b/internal/api/api_test.go
index 7928627..98b1dd4 100644
--- a/internal/api/api_test.go
+++ b/internal/api/api_test.go
@@ -5,11 +5,10 @@
 package api
 
 import (
+	"go/build"
 	"os"
 	"runtime"
 	"testing"
-
-	"golang.org/x/website/internal/backport/go/build"
 )
 
 func TestParseVersionRow(t *testing.T) {
diff --git a/internal/backport/README.md b/internal/backport/README.md
deleted file mode 100644
index 92c80ad..0000000
--- a/internal/backport/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
-This directory contains backports of upcoming packages from the
-standard library, beyond the Go version supplied by the latest Google
-App Engine.
-
-The procedure for adding a backport is fairly manual: copy the files
-to a new directory and then global search and replace to modify import
-paths to point to the backport.
-
-As new versions of Go land on Google App Engine, this directory should
-be pruned as much as possible.
diff --git a/internal/backport/diff/diff.go b/internal/backport/diff/diff.go
deleted file mode 100644
index 47b2856..0000000
--- a/internal/backport/diff/diff.go
+++ /dev/null
@@ -1,261 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package diff
-
-import (
-	"bytes"
-	"fmt"
-	"sort"
-	"strings"
-)
-
-// A pair is a pair of values tracked for both the x and y side of a diff.
-// It is typically a pair of line indexes.
-type pair struct{ x, y int }
-
-// Diff returns an anchored diff of the two texts old and new
-// in the “unified diff” format. If old and new are identical,
-// Diff returns a nil slice (no output).
-//
-// Unix diff implementations typically look for a diff with
-// the smallest number of lines inserted and removed,
-// which can in the worst case take time quadratic in the
-// number of lines in the texts. As a result, many implementations
-// either can be made to run for a long time or cut off the search
-// after a predetermined amount of work.
-//
-// In contrast, this implementation looks for a diff with the
-// smallest number of “unique” lines inserted and removed,
-// where unique means a line that appears just once in both old and new.
-// We call this an “anchored diff” because the unique lines anchor
-// the chosen matching regions. An anchored diff is usually clearer
-// than a standard diff, because the algorithm does not try to
-// reuse unrelated blank lines or closing braces.
-// The algorithm also guarantees to run in O(n log n) time
-// instead of the standard O(n²) time.
-//
-// Some systems call this approach a “patience diff,” named for
-// the “patience sorting” algorithm, itself named for a solitaire card game.
-// We avoid that name for two reasons. First, the name has been used
-// for a few different variants of the algorithm, so it is imprecise.
-// Second, the name is frequently interpreted as meaning that you have
-// to wait longer (to be patient) for the diff, meaning that it is a slower algorithm,
-// when in fact the algorithm is faster than the standard one.
-func Diff(oldName string, old []byte, newName string, new []byte) []byte {
-	if bytes.Equal(old, new) {
-		return nil
-	}
-	x := lines(old)
-	y := lines(new)
-
-	// Print diff header.
-	var out bytes.Buffer
-	fmt.Fprintf(&out, "diff %s %s\n", oldName, newName)
-	fmt.Fprintf(&out, "--- %s\n", oldName)
-	fmt.Fprintf(&out, "+++ %s\n", newName)
-
-	// Loop over matches to consider,
-	// expanding each match to include surrounding lines,
-	// and then printing diff chunks.
-	// To avoid setup/teardown cases outside the loop,
-	// tgs returns a leading {0,0} and trailing {len(x), len(y)} pair
-	// in the sequence of matches.
-	var (
-		done  pair     // printed up to x[:done.x] and y[:done.y]
-		chunk pair     // start lines of current chunk
-		count pair     // number of lines from each side in current chunk
-		ctext []string // lines for current chunk
-	)
-	for _, m := range tgs(x, y) {
-		if m.x < done.x {
-			// Already handled scanning forward from earlier match.
-			continue
-		}
-
-		// Expand matching lines as far possible,
-		// establishing that x[start.x:end.x] == y[start.y:end.y].
-		// Note that on the first (or last) iteration we may (or definitey do)
-		// have an empty match: start.x==end.x and start.y==end.y.
-		start := m
-		for start.x > done.x && start.y > done.y && x[start.x-1] == y[start.y-1] {
-			start.x--
-			start.y--
-		}
-		end := m
-		for end.x < len(x) && end.y < len(y) && x[end.x] == y[end.y] {
-			end.x++
-			end.y++
-		}
-
-		// Emit the mismatched lines before start into this chunk.
-		// (No effect on first sentinel iteration, when start = {0,0}.)
-		for _, s := range x[done.x:start.x] {
-			ctext = append(ctext, "-"+s)
-			count.x++
-		}
-		for _, s := range y[done.y:start.y] {
-			ctext = append(ctext, "+"+s)
-			count.y++
-		}
-
-		// If we're not at EOF and have too few common lines,
-		// the chunk includes all the common lines and continues.
-		const C = 3 // number of context lines
-		if (end.x < len(x) || end.y < len(y)) &&
-			(end.x-start.x < C || (len(ctext) > 0 && end.x-start.x < 2*C)) {
-			for _, s := range x[start.x:end.x] {
-				ctext = append(ctext, " "+s)
-				count.x++
-				count.y++
-			}
-			done = end
-			continue
-		}
-
-		// End chunk with common lines for context.
-		if len(ctext) > 0 {
-			n := end.x - start.x
-			if n > C {
-				n = C
-			}
-			for _, s := range x[start.x : start.x+n] {
-				ctext = append(ctext, " "+s)
-				count.x++
-				count.y++
-			}
-			done = pair{start.x + n, start.y + n}
-
-			// Format and emit chunk.
-			// Convert line numbers to 1-indexed.
-			// Special case: empty file shows up as 0,0 not 1,0.
-			if count.x > 0 {
-				chunk.x++
-			}
-			if count.y > 0 {
-				chunk.y++
-			}
-			fmt.Fprintf(&out, "@@ -%d,%d +%d,%d @@\n", chunk.x, count.x, chunk.y, count.y)
-			for _, s := range ctext {
-				out.WriteString(s)
-			}
-			count.x = 0
-			count.y = 0
-			ctext = ctext[:0]
-		}
-
-		// If we reached EOF, we're done.
-		if end.x >= len(x) && end.y >= len(y) {
-			break
-		}
-
-		// Otherwise start a new chunk.
-		chunk = pair{end.x - C, end.y - C}
-		for _, s := range x[chunk.x:end.x] {
-			ctext = append(ctext, " "+s)
-			count.x++
-			count.y++
-		}
-		done = end
-	}
-
-	return out.Bytes()
-}
-
-// lines returns the lines in the file x, including newlines.
-// If the file does not end in a newline, one is supplied
-// along with a warning about the missing newline.
-func lines(x []byte) []string {
-	l := strings.SplitAfter(string(x), "\n")
-	if l[len(l)-1] == "" {
-		l = l[:len(l)-1]
-	} else {
-		// Treat last line as having a message about the missing newline attached,
-		// using the same text as BSD/GNU diff (including the leading backslash).
-		l[len(l)-1] += "\n\\ No newline at end of file\n"
-	}
-	return l
-}
-
-// tgs returns the pairs of indexes of the longest common subsequence
-// of unique lines in x and y, where a unique line is one that appears
-// once in x and once in y.
-//
-// The longest common subsequence algorithm is as described in
-// Thomas G. Szymanski, “A Special Case of the Maximal Common
-// Subsequence Problem,” Princeton TR #170 (January 1975),
-// available at https://research.swtch.com/tgs170.pdf.
-func tgs(x, y []string) []pair {
-	// Count the number of times each string appears in a and b.
-	// We only care about 0, 1, many, counted as 0, -1, -2
-	// for the x side and 0, -4, -8 for the y side.
-	// Using negative numbers now lets us distinguish positive line numbers later.
-	m := make(map[string]int)
-	for _, s := range x {
-		if c := m[s]; c > -2 {
-			m[s] = c - 1
-		}
-	}
-	for _, s := range y {
-		if c := m[s]; c > -8 {
-			m[s] = c - 4
-		}
-	}
-
-	// Now unique strings can be identified by m[s] = -1+-4.
-	//
-	// Gather the indexes of those strings in x and y, building:
-	//	xi[i] = increasing indexes of unique strings in x.
-	//	yi[i] = increasing indexes of unique strings in y.
-	//	inv[i] = index j such that x[xi[i]] = y[yi[j]].
-	var xi, yi, inv []int
-	for i, s := range y {
-		if m[s] == -1+-4 {
-			m[s] = len(yi)
-			yi = append(yi, i)
-		}
-	}
-	for i, s := range x {
-		if j, ok := m[s]; ok && j >= 0 {
-			xi = append(xi, i)
-			inv = append(inv, j)
-		}
-	}
-
-	// Apply Algorithm A from Szymanski's paper.
-	// In those terms, A = J = inv and B = [0, n).
-	// We add sentinel pairs {0,0}, and {len(x),len(y)}
-	// to the returned sequence, to help the processing loop.
-	J := inv
-	n := len(xi)
-	T := make([]int, n)
-	L := make([]int, n)
-	for i := range T {
-		T[i] = n + 1
-	}
-	for i := 0; i < n; i++ {
-		k := sort.Search(n, func(k int) bool {
-			return T[k] >= J[i]
-		})
-		T[k] = J[i]
-		L[i] = k + 1
-	}
-	k := 0
-	for _, v := range L {
-		if k < v {
-			k = v
-		}
-	}
-	seq := make([]pair, 2+k)
-	seq[1+k] = pair{len(x), len(y)} // sentinel at end
-	lastj := n
-	for i := n - 1; i >= 0; i-- {
-		if L[i] == k && J[i] < lastj {
-			seq[k] = pair{xi[i], yi[J[i]]}
-			k--
-		}
-	}
-	seq[0] = pair{0, 0} // sentinel at start
-	return seq
-}
diff --git a/internal/backport/diff/diff_test.go b/internal/backport/diff/diff_test.go
deleted file mode 100644
index ee15b00..0000000
--- a/internal/backport/diff/diff_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package diff
-
-import (
-	"bytes"
-	"path/filepath"
-	"testing"
-
-	"golang.org/x/tools/txtar"
-)
-
-func clean(text []byte) []byte {
-	text = bytes.ReplaceAll(text, []byte("$\n"), []byte("\n"))
-	text = bytes.TrimSuffix(text, []byte("^D\n"))
-	return text
-}
-
-func Test(t *testing.T) {
-	files, _ := filepath.Glob("testdata/*.txt")
-	if len(files) == 0 {
-		t.Fatalf("no testdata")
-	}
-
-	for _, file := range files {
-		t.Run(filepath.Base(file), func(t *testing.T) {
-			a, err := txtar.ParseFile(file)
-			if err != nil {
-				t.Fatal(err)
-			}
-			if len(a.Files) != 3 || a.Files[2].Name != "diff" {
-				t.Fatalf("%s: want three files, third named \"diff\"", file)
-			}
-			diffs := Diff(a.Files[0].Name, clean(a.Files[0].Data), a.Files[1].Name, clean(a.Files[1].Data))
-			want := clean(a.Files[2].Data)
-			if !bytes.Equal(diffs, want) {
-				t.Fatalf("%s: have:\n%s\nwant:\n%s\n%s", file,
-					diffs, want, Diff("have", diffs, "want", want))
-			}
-		})
-	}
-}
diff --git a/internal/backport/diff/testdata/allnew.txt b/internal/backport/diff/testdata/allnew.txt
deleted file mode 100644
index 8875649..0000000
--- a/internal/backport/diff/testdata/allnew.txt
+++ /dev/null
@@ -1,13 +0,0 @@
--- old --
--- new --
-a
-b
-c
--- diff --
-diff old new
---- old
-+++ new
-@@ -0,0 +1,3 @@
-+a
-+b
-+c
diff --git a/internal/backport/diff/testdata/allold.txt b/internal/backport/diff/testdata/allold.txt
deleted file mode 100644
index bcc9ac0..0000000
--- a/internal/backport/diff/testdata/allold.txt
+++ /dev/null
@@ -1,13 +0,0 @@
--- old --
-a
-b
-c
--- new --
--- diff --
-diff old new
---- old
-+++ new
-@@ -1,3 +0,0 @@
--a
--b
--c
diff --git a/internal/backport/diff/testdata/basic.txt b/internal/backport/diff/testdata/basic.txt
deleted file mode 100644
index d2565b5..0000000
--- a/internal/backport/diff/testdata/basic.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-Example from Hunt and McIlroy, “An Algorithm for Differential File Comparison.”
-https://www.cs.dartmouth.edu/~doug/diff.pdf
-
--- old --
-a
-b
-c
-d
-e
-f
-g
--- new --
-w
-a
-b
-x
-y
-z
-e
--- diff --
-diff old new
---- old
-+++ new
-@@ -1,7 +1,7 @@
-+w
- a
- b
--c
--d
-+x
-+y
-+z
- e
--f
--g
diff --git a/internal/backport/diff/testdata/dups.txt b/internal/backport/diff/testdata/dups.txt
deleted file mode 100644
index d10524d..0000000
--- a/internal/backport/diff/testdata/dups.txt
+++ /dev/null
@@ -1,40 +0,0 @@
--- old --
-a
-
-b
-
-c
-
-d
-
-e
-
-f
--- new --
-a
-
-B
-
-C
-
-d
-
-e
-
-f
--- diff --
-diff old new
---- old
-+++ new
-@@ -1,8 +1,8 @@
- a
- $
--b
--
--c
-+B
-+
-+C
- $
- d
- $
diff --git a/internal/backport/diff/testdata/end.txt b/internal/backport/diff/testdata/end.txt
deleted file mode 100644
index 158637c..0000000
--- a/internal/backport/diff/testdata/end.txt
+++ /dev/null
@@ -1,38 +0,0 @@
--- old --
-1
-2
-3
-4
-5
-6
-7
-eight
-nine
-ten
-eleven
--- new --
-1
-2
-3
-4
-5
-6
-7
-8
-9
-10
--- diff --
-diff old new
---- old
-+++ new
-@@ -5,7 +5,6 @@
- 5
- 6
- 7
--eight
--nine
--ten
--eleven
-+8
-+9
-+10
diff --git a/internal/backport/diff/testdata/eof.txt b/internal/backport/diff/testdata/eof.txt
deleted file mode 100644
index 5dc145c..0000000
--- a/internal/backport/diff/testdata/eof.txt
+++ /dev/null
@@ -1,9 +0,0 @@
--- old --
-a
-b
-c^D
--- new --
-a
-b
-c^D
--- diff --
diff --git a/internal/backport/diff/testdata/eof1.txt b/internal/backport/diff/testdata/eof1.txt
deleted file mode 100644
index 1ebf621..0000000
--- a/internal/backport/diff/testdata/eof1.txt
+++ /dev/null
@@ -1,18 +0,0 @@
--- old --
-a
-b
-c
--- new --
-a
-b
-c^D
--- diff --
-diff old new
---- old
-+++ new
-@@ -1,3 +1,3 @@
- a
- b
--c
-+c
-\ No newline at end of file
diff --git a/internal/backport/diff/testdata/eof2.txt b/internal/backport/diff/testdata/eof2.txt
deleted file mode 100644
index 047705e..0000000
--- a/internal/backport/diff/testdata/eof2.txt
+++ /dev/null
@@ -1,18 +0,0 @@
--- old --
-a
-b
-c^D
--- new --
-a
-b
-c
--- diff --
-diff old new
---- old
-+++ new
-@@ -1,3 +1,3 @@
- a
- b
--c
-\ No newline at end of file
-+c
diff --git a/internal/backport/diff/testdata/long.txt b/internal/backport/diff/testdata/long.txt
deleted file mode 100644
index 3fc99f7..0000000
--- a/internal/backport/diff/testdata/long.txt
+++ /dev/null
@@ -1,62 +0,0 @@
--- old --
-1
-2
-3
-4
-5
-6
-7
-8
-9
-10
-11
-12
-13
-14
-14½
-15
-16
-17
-18
-19
-20
--- new --
-1
-2
-3
-4
-5
-6
-8
-9
-10
-11
-12
-13
-14
-17
-18
-19
-20
--- diff --
-diff old new
---- old
-+++ new
-@@ -4,7 +4,6 @@
- 4
- 5
- 6
--7
- 8
- 9
- 10
-@@ -12,9 +11,6 @@
- 12
- 13
- 14
--14½
--15
--16
- 17
- 18
- 19
diff --git a/internal/backport/diff/testdata/same.txt b/internal/backport/diff/testdata/same.txt
deleted file mode 100644
index 86b1100..0000000
--- a/internal/backport/diff/testdata/same.txt
+++ /dev/null
@@ -1,5 +0,0 @@
--- old --
-hello world
--- new --
-hello world
--- diff --
diff --git a/internal/backport/diff/testdata/start.txt b/internal/backport/diff/testdata/start.txt
deleted file mode 100644
index 217b2fd..0000000
--- a/internal/backport/diff/testdata/start.txt
+++ /dev/null
@@ -1,34 +0,0 @@
--- old --
-e
-pi
-4
-5
-6
-7
-8
-9
-10
--- new --
-1
-2
-3
-4
-5
-6
-7
-8
-9
-10
--- diff --
-diff old new
---- old
-+++ new
-@@ -1,5 +1,6 @@
--e
--pi
-+1
-+2
-+3
- 4
- 5
- 6
diff --git a/internal/backport/diff/testdata/triv.txt b/internal/backport/diff/testdata/triv.txt
deleted file mode 100644
index ab5759f..0000000
--- a/internal/backport/diff/testdata/triv.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-Another example from Hunt and McIlroy,
-“An Algorithm for Differential File Comparison.”
-https://www.cs.dartmouth.edu/~doug/diff.pdf
-
-Anchored diff gives up on finding anything,
-since there are no unique lines.
-
--- old --
-a
-b
-c
-a
-b
-b
-a
--- new --
-c
-a
-b
-a
-b
-c
--- diff --
-diff old new
---- old
-+++ new
-@@ -1,7 +1,6 @@
--a
--b
--c
--a
--b
--b
--a
-+c
-+a
-+b
-+a
-+b
-+c
diff --git a/internal/backport/fmtsort/sort.go b/internal/backport/fmtsort/sort.go
deleted file mode 100644
index e0585dc..0000000
--- a/internal/backport/fmtsort/sort.go
+++ /dev/null
@@ -1,219 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package fmtsort provides a general stable ordering mechanism
-// for maps, on behalf of the fmt and text/template packages.
-// It is not guaranteed to be efficient and works only for types
-// that are valid map keys.
-package fmtsort
-
-import (
-	"reflect"
-	"sort"
-)
-
-// Note: Throughout this package we avoid calling reflect.Value.Interface as
-// it is not always legal to do so and it's easier to avoid the issue than to face it.
-
-// SortedMap represents a map's keys and values. The keys and values are
-// aligned in index order: Value[i] is the value in the map corresponding to Key[i].
-type SortedMap struct {
-	Key   []reflect.Value
-	Value []reflect.Value
-}
-
-func (o *SortedMap) Len() int           { return len(o.Key) }
-func (o *SortedMap) Less(i, j int) bool { return compare(o.Key[i], o.Key[j]) < 0 }
-func (o *SortedMap) Swap(i, j int) {
-	o.Key[i], o.Key[j] = o.Key[j], o.Key[i]
-	o.Value[i], o.Value[j] = o.Value[j], o.Value[i]
-}
-
-// Sort accepts a map and returns a SortedMap that has the same keys and
-// values but in a stable sorted order according to the keys, modulo issues
-// raised by unorderable key values such as NaNs.
-//
-// The ordering rules are more general than with Go's < operator:
-//
-//   - when applicable, nil compares low
-//   - ints, floats, and strings order by <
-//   - NaN compares less than non-NaN floats
-//   - bool compares false before true
-//   - complex compares real, then imag
-//   - pointers compare by machine address
-//   - channel values compare by machine address
-//   - structs compare each field in turn
-//   - arrays compare each element in turn.
-//     Otherwise identical arrays compare by length.
-//   - interface values compare first by reflect.Type describing the concrete type
-//     and then by concrete value as described in the previous rules.
-func Sort(mapValue reflect.Value) *SortedMap {
-	if mapValue.Type().Kind() != reflect.Map {
-		return nil
-	}
-	// Note: this code is arranged to not panic even in the presence
-	// of a concurrent map update. The runtime is responsible for
-	// yelling loudly if that happens. See issue 33275.
-	n := mapValue.Len()
-	key := make([]reflect.Value, 0, n)
-	value := make([]reflect.Value, 0, n)
-	iter := mapValue.MapRange()
-	for iter.Next() {
-		key = append(key, iter.Key())
-		value = append(value, iter.Value())
-	}
-	sorted := &SortedMap{
-		Key:   key,
-		Value: value,
-	}
-	sort.Stable(sorted)
-	return sorted
-}
-
-// compare compares two values of the same type. It returns -1, 0, 1
-// according to whether a > b (1), a == b (0), or a < b (-1).
-// If the types differ, it returns -1.
-// See the comment on Sort for the comparison rules.
-func compare(aVal, bVal reflect.Value) int {
-	aType, bType := aVal.Type(), bVal.Type()
-	if aType != bType {
-		return -1 // No good answer possible, but don't return 0: they're not equal.
-	}
-	switch aVal.Kind() {
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		a, b := aVal.Int(), bVal.Int()
-		switch {
-		case a < b:
-			return -1
-		case a > b:
-			return 1
-		default:
-			return 0
-		}
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		a, b := aVal.Uint(), bVal.Uint()
-		switch {
-		case a < b:
-			return -1
-		case a > b:
-			return 1
-		default:
-			return 0
-		}
-	case reflect.String:
-		a, b := aVal.String(), bVal.String()
-		switch {
-		case a < b:
-			return -1
-		case a > b:
-			return 1
-		default:
-			return 0
-		}
-	case reflect.Float32, reflect.Float64:
-		return floatCompare(aVal.Float(), bVal.Float())
-	case reflect.Complex64, reflect.Complex128:
-		a, b := aVal.Complex(), bVal.Complex()
-		if c := floatCompare(real(a), real(b)); c != 0 {
-			return c
-		}
-		return floatCompare(imag(a), imag(b))
-	case reflect.Bool:
-		a, b := aVal.Bool(), bVal.Bool()
-		switch {
-		case a == b:
-			return 0
-		case a:
-			return 1
-		default:
-			return -1
-		}
-	case reflect.Ptr, reflect.UnsafePointer:
-		a, b := aVal.Pointer(), bVal.Pointer()
-		switch {
-		case a < b:
-			return -1
-		case a > b:
-			return 1
-		default:
-			return 0
-		}
-	case reflect.Chan:
-		if c, ok := nilCompare(aVal, bVal); ok {
-			return c
-		}
-		ap, bp := aVal.Pointer(), bVal.Pointer()
-		switch {
-		case ap < bp:
-			return -1
-		case ap > bp:
-			return 1
-		default:
-			return 0
-		}
-	case reflect.Struct:
-		for i := 0; i < aVal.NumField(); i++ {
-			if c := compare(aVal.Field(i), bVal.Field(i)); c != 0 {
-				return c
-			}
-		}
-		return 0
-	case reflect.Array:
-		for i := 0; i < aVal.Len(); i++ {
-			if c := compare(aVal.Index(i), bVal.Index(i)); c != 0 {
-				return c
-			}
-		}
-		return 0
-	case reflect.Interface:
-		if c, ok := nilCompare(aVal, bVal); ok {
-			return c
-		}
-		c := compare(reflect.ValueOf(aVal.Elem().Type()), reflect.ValueOf(bVal.Elem().Type()))
-		if c != 0 {
-			return c
-		}
-		return compare(aVal.Elem(), bVal.Elem())
-	default:
-		// Certain types cannot appear as keys (maps, funcs, slices), but be explicit.
-		panic("bad type in compare: " + aType.String())
-	}
-}
-
-// nilCompare checks whether either value is nil. If not, the boolean is false.
-// If either value is nil, the boolean is true and the integer is the comparison
-// value. The comparison is defined to be 0 if both are nil, otherwise the one
-// nil value compares low. Both arguments must represent a chan, func,
-// interface, map, pointer, or slice.
-func nilCompare(aVal, bVal reflect.Value) (int, bool) {
-	if aVal.IsNil() {
-		if bVal.IsNil() {
-			return 0, true
-		}
-		return -1, true
-	}
-	if bVal.IsNil() {
-		return 1, true
-	}
-	return 0, false
-}
-
-// floatCompare compares two floating-point values. NaNs compare low.
-func floatCompare(a, b float64) int {
-	switch {
-	case isNaN(a):
-		return -1 // No good answer if b is a NaN so don't bother checking.
-	case isNaN(b):
-		return 1
-	case a < b:
-		return -1
-	case a > b:
-		return 1
-	}
-	return 0
-}
-
-func isNaN(a float64) bool {
-	return a != a
-}
diff --git a/internal/backport/go/ast/ast.go b/internal/backport/go/ast/ast.go
deleted file mode 100644
index 9efdcf7..0000000
--- a/internal/backport/go/ast/ast.go
+++ /dev/null
@@ -1,1067 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package ast declares the types used to represent syntax trees for Go
-// packages.
-package ast
-
-import (
-	"strings"
-
-	"golang.org/x/website/internal/backport/go/token"
-)
-
-// ----------------------------------------------------------------------------
-// Interfaces
-//
-// There are 3 main classes of nodes: Expressions and type nodes,
-// statement nodes, and declaration nodes. The node names usually
-// match the corresponding Go spec production names to which they
-// correspond. The node fields correspond to the individual parts
-// of the respective productions.
-//
-// All nodes contain position information marking the beginning of
-// the corresponding source text segment; it is accessible via the
-// Pos accessor method. Nodes may contain additional position info
-// for language constructs where comments may be found between parts
-// of the construct (typically any larger, parenthesized subpart).
-// That position information is needed to properly position comments
-// when printing the construct.
-
-// All node types implement the Node interface.
-type Node interface {
-	Pos() token.Pos // position of first character belonging to the node
-	End() token.Pos // position of first character immediately after the node
-}
-
-// All expression nodes implement the Expr interface.
-type Expr interface {
-	Node
-	exprNode()
-}
-
-// All statement nodes implement the Stmt interface.
-type Stmt interface {
-	Node
-	stmtNode()
-}
-
-// All declaration nodes implement the Decl interface.
-type Decl interface {
-	Node
-	declNode()
-}
-
-// ----------------------------------------------------------------------------
-// Comments
-
-// A Comment node represents a single //-style or /*-style comment.
-//
-// The Text field contains the comment text without carriage returns (\r) that
-// may have been present in the source. Because a comment's end position is
-// computed using len(Text), the position reported by End() does not match the
-// true source end position for comments containing carriage returns.
-type Comment struct {
-	Slash token.Pos // position of "/" starting the comment
-	Text  string    // comment text (excluding '\n' for //-style comments)
-}
-
-func (c *Comment) Pos() token.Pos { return c.Slash }
-func (c *Comment) End() token.Pos { return token.Pos(int(c.Slash) + len(c.Text)) }
-
-// A CommentGroup represents a sequence of comments
-// with no other tokens and no empty lines between.
-type CommentGroup struct {
-	List []*Comment // len(List) > 0
-}
-
-func (g *CommentGroup) Pos() token.Pos { return g.List[0].Pos() }
-func (g *CommentGroup) End() token.Pos { return g.List[len(g.List)-1].End() }
-
-func isWhitespace(ch byte) bool { return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' }
-
-func stripTrailingWhitespace(s string) string {
-	i := len(s)
-	for i > 0 && isWhitespace(s[i-1]) {
-		i--
-	}
-	return s[0:i]
-}
-
-// Text returns the text of the comment.
-// Comment markers (//, /*, and */), the first space of a line comment, and
-// leading and trailing empty lines are removed.
-// Comment directives like "//line" and "//go:noinline" are also removed.
-// Multiple empty lines are reduced to one, and trailing space on lines is trimmed.
-// Unless the result is empty, it is newline-terminated.
-func (g *CommentGroup) Text() string {
-	if g == nil {
-		return ""
-	}
-	comments := make([]string, len(g.List))
-	for i, c := range g.List {
-		comments[i] = c.Text
-	}
-
-	lines := make([]string, 0, 10) // most comments are less than 10 lines
-	for _, c := range comments {
-		// Remove comment markers.
-		// The parser has given us exactly the comment text.
-		switch c[1] {
-		case '/':
-			//-style comment (no newline at the end)
-			c = c[2:]
-			if len(c) == 0 {
-				// empty line
-				break
-			}
-			if c[0] == ' ' {
-				// strip first space - required for Example tests
-				c = c[1:]
-				break
-			}
-			if isDirective(c) {
-				// Ignore //go:noinline, //line, and so on.
-				continue
-			}
-		case '*':
-			/*-style comment */
-			c = c[2 : len(c)-2]
-		}
-
-		// Split on newlines.
-		cl := strings.Split(c, "\n")
-
-		// Walk lines, stripping trailing white space and adding to list.
-		for _, l := range cl {
-			lines = append(lines, stripTrailingWhitespace(l))
-		}
-	}
-
-	// Remove leading blank lines; convert runs of
-	// interior blank lines to a single blank line.
-	n := 0
-	for _, line := range lines {
-		if line != "" || n > 0 && lines[n-1] != "" {
-			lines[n] = line
-			n++
-		}
-	}
-	lines = lines[0:n]
-
-	// Add final "" entry to get trailing newline from Join.
-	if n > 0 && lines[n-1] != "" {
-		lines = append(lines, "")
-	}
-
-	return strings.Join(lines, "\n")
-}
-
-// isDirective reports whether c is a comment directive.
-// This code is also in go/printer.
-func isDirective(c string) bool {
-	// "//line " is a line directive.
-	// "//extern " is for gccgo.
-	// "//export " is for cgo.
-	// (The // has been removed.)
-	if strings.HasPrefix(c, "line ") || strings.HasPrefix(c, "extern ") || strings.HasPrefix(c, "export ") {
-		return true
-	}
-
-	// "//[a-z0-9]+:[a-z0-9]"
-	// (The // has been removed.)
-	colon := strings.Index(c, ":")
-	if colon <= 0 || colon+1 >= len(c) {
-		return false
-	}
-	for i := 0; i <= colon+1; i++ {
-		if i == colon {
-			continue
-		}
-		b := c[i]
-		if !('a' <= b && b <= 'z' || '0' <= b && b <= '9') {
-			return false
-		}
-	}
-	return true
-}
-
-// ----------------------------------------------------------------------------
-// Expressions and types
-
-// A Field represents a Field declaration list in a struct type,
-// a method list in an interface type, or a parameter/result declaration
-// in a signature.
-// Field.Names is nil for unnamed parameters (parameter lists which only contain types)
-// and embedded struct fields. In the latter case, the field name is the type name.
-type Field struct {
-	Doc     *CommentGroup // associated documentation; or nil
-	Names   []*Ident      // field/method/(type) parameter names; or nil
-	Type    Expr          // field/method/parameter type; or nil
-	Tag     *BasicLit     // field tag; or nil
-	Comment *CommentGroup // line comments; or nil
-}
-
-func (f *Field) Pos() token.Pos {
-	if len(f.Names) > 0 {
-		return f.Names[0].Pos()
-	}
-	if f.Type != nil {
-		return f.Type.Pos()
-	}
-	return token.NoPos
-}
-
-func (f *Field) End() token.Pos {
-	if f.Tag != nil {
-		return f.Tag.End()
-	}
-	if f.Type != nil {
-		return f.Type.End()
-	}
-	if len(f.Names) > 0 {
-		return f.Names[len(f.Names)-1].End()
-	}
-	return token.NoPos
-}
-
-// A FieldList represents a list of Fields, enclosed by parentheses,
-// curly braces, or square brackets.
-type FieldList struct {
-	Opening token.Pos // position of opening parenthesis/brace/bracket, if any
-	List    []*Field  // field list; or nil
-	Closing token.Pos // position of closing parenthesis/brace/bracket, if any
-}
-
-func (f *FieldList) Pos() token.Pos {
-	if f.Opening.IsValid() {
-		return f.Opening
-	}
-	// the list should not be empty in this case;
-	// be conservative and guard against bad ASTs
-	if len(f.List) > 0 {
-		return f.List[0].Pos()
-	}
-	return token.NoPos
-}
-
-func (f *FieldList) End() token.Pos {
-	if f.Closing.IsValid() {
-		return f.Closing + 1
-	}
-	// the list should not be empty in this case;
-	// be conservative and guard against bad ASTs
-	if n := len(f.List); n > 0 {
-		return f.List[n-1].End()
-	}
-	return token.NoPos
-}
-
-// NumFields returns the number of parameters or struct fields represented by a FieldList.
-func (f *FieldList) NumFields() int {
-	n := 0
-	if f != nil {
-		for _, g := range f.List {
-			m := len(g.Names)
-			if m == 0 {
-				m = 1
-			}
-			n += m
-		}
-	}
-	return n
-}
-
-// An expression is represented by a tree consisting of one
-// or more of the following concrete expression nodes.
-type (
-	// A BadExpr node is a placeholder for an expression containing
-	// syntax errors for which a correct expression node cannot be
-	// created.
-	//
-	BadExpr struct {
-		From, To token.Pos // position range of bad expression
-	}
-
-	// An Ident node represents an identifier.
-	Ident struct {
-		NamePos token.Pos // identifier position
-		Name    string    // identifier name
-		Obj     *Object   // denoted object; or nil
-	}
-
-	// An Ellipsis node stands for the "..." type in a
-	// parameter list or the "..." length in an array type.
-	//
-	Ellipsis struct {
-		Ellipsis token.Pos // position of "..."
-		Elt      Expr      // ellipsis element type (parameter lists only); or nil
-	}
-
-	// A BasicLit node represents a literal of basic type.
-	BasicLit struct {
-		ValuePos token.Pos   // literal position
-		Kind     token.Token // token.INT, token.FLOAT, token.IMAG, token.CHAR, or token.STRING
-		Value    string      // literal string; e.g. 42, 0x7f, 3.14, 1e-9, 2.4i, 'a', '\x7f', "foo" or `\m\n\o`
-	}
-
-	// A FuncLit node represents a function literal.
-	FuncLit struct {
-		Type *FuncType  // function type
-		Body *BlockStmt // function body
-	}
-
-	// A CompositeLit node represents a composite literal.
-	CompositeLit struct {
-		Type       Expr      // literal type; or nil
-		Lbrace     token.Pos // position of "{"
-		Elts       []Expr    // list of composite elements; or nil
-		Rbrace     token.Pos // position of "}"
-		Incomplete bool      // true if (source) expressions are missing in the Elts list
-	}
-
-	// A ParenExpr node represents a parenthesized expression.
-	ParenExpr struct {
-		Lparen token.Pos // position of "("
-		X      Expr      // parenthesized expression
-		Rparen token.Pos // position of ")"
-	}
-
-	// A SelectorExpr node represents an expression followed by a selector.
-	SelectorExpr struct {
-		X   Expr   // expression
-		Sel *Ident // field selector
-	}
-
-	// An IndexExpr node represents an expression followed by an index.
-	IndexExpr struct {
-		X      Expr      // expression
-		Lbrack token.Pos // position of "["
-		Index  Expr      // index expression
-		Rbrack token.Pos // position of "]"
-	}
-
-	// An IndexListExpr node represents an expression followed by multiple
-	// indices.
-	IndexListExpr struct {
-		X       Expr      // expression
-		Lbrack  token.Pos // position of "["
-		Indices []Expr    // index expressions
-		Rbrack  token.Pos // position of "]"
-	}
-
-	// A SliceExpr node represents an expression followed by slice indices.
-	SliceExpr struct {
-		X      Expr      // expression
-		Lbrack token.Pos // position of "["
-		Low    Expr      // begin of slice range; or nil
-		High   Expr      // end of slice range; or nil
-		Max    Expr      // maximum capacity of slice; or nil
-		Slice3 bool      // true if 3-index slice (2 colons present)
-		Rbrack token.Pos // position of "]"
-	}
-
-	// A TypeAssertExpr node represents an expression followed by a
-	// type assertion.
-	//
-	TypeAssertExpr struct {
-		X      Expr      // expression
-		Lparen token.Pos // position of "("
-		Type   Expr      // asserted type; nil means type switch X.(type)
-		Rparen token.Pos // position of ")"
-	}
-
-	// A CallExpr node represents an expression followed by an argument list.
-	CallExpr struct {
-		Fun      Expr      // function expression
-		Lparen   token.Pos // position of "("
-		Args     []Expr    // function arguments; or nil
-		Ellipsis token.Pos // position of "..." (token.NoPos if there is no "...")
-		Rparen   token.Pos // position of ")"
-	}
-
-	// A StarExpr node represents an expression of the form "*" Expression.
-	// Semantically it could be a unary "*" expression, or a pointer type.
-	//
-	StarExpr struct {
-		Star token.Pos // position of "*"
-		X    Expr      // operand
-	}
-
-	// A UnaryExpr node represents a unary expression.
-	// Unary "*" expressions are represented via StarExpr nodes.
-	//
-	UnaryExpr struct {
-		OpPos token.Pos   // position of Op
-		Op    token.Token // operator
-		X     Expr        // operand
-	}
-
-	// A BinaryExpr node represents a binary expression.
-	BinaryExpr struct {
-		X     Expr        // left operand
-		OpPos token.Pos   // position of Op
-		Op    token.Token // operator
-		Y     Expr        // right operand
-	}
-
-	// A KeyValueExpr node represents (key : value) pairs
-	// in composite literals.
-	//
-	KeyValueExpr struct {
-		Key   Expr
-		Colon token.Pos // position of ":"
-		Value Expr
-	}
-)
-
-// The direction of a channel type is indicated by a bit
-// mask including one or both of the following constants.
-type ChanDir int
-
-const (
-	SEND ChanDir = 1 << iota
-	RECV
-)
-
-// A type is represented by a tree consisting of one
-// or more of the following type-specific expression
-// nodes.
-type (
-	// An ArrayType node represents an array or slice type.
-	ArrayType struct {
-		Lbrack token.Pos // position of "["
-		Len    Expr      // Ellipsis node for [...]T array types, nil for slice types
-		Elt    Expr      // element type
-	}
-
-	// A StructType node represents a struct type.
-	StructType struct {
-		Struct     token.Pos  // position of "struct" keyword
-		Fields     *FieldList // list of field declarations
-		Incomplete bool       // true if (source) fields are missing in the Fields list
-	}
-
-	// Pointer types are represented via StarExpr nodes.
-
-	// A FuncType node represents a function type.
-	FuncType struct {
-		Func       token.Pos  // position of "func" keyword (token.NoPos if there is no "func")
-		TypeParams *FieldList // type parameters; or nil
-		Params     *FieldList // (incoming) parameters; non-nil
-		Results    *FieldList // (outgoing) results; or nil
-	}
-
-	// An InterfaceType node represents an interface type.
-	InterfaceType struct {
-		Interface  token.Pos  // position of "interface" keyword
-		Methods    *FieldList // list of embedded interfaces, methods, or types
-		Incomplete bool       // true if (source) methods or types are missing in the Methods list
-	}
-
-	// A MapType node represents a map type.
-	MapType struct {
-		Map   token.Pos // position of "map" keyword
-		Key   Expr
-		Value Expr
-	}
-
-	// A ChanType node represents a channel type.
-	ChanType struct {
-		Begin token.Pos // position of "chan" keyword or "<-" (whichever comes first)
-		Arrow token.Pos // position of "<-" (token.NoPos if there is no "<-")
-		Dir   ChanDir   // channel direction
-		Value Expr      // value type
-	}
-)
-
-// Pos and End implementations for expression/type nodes.
-
-func (x *BadExpr) Pos() token.Pos  { return x.From }
-func (x *Ident) Pos() token.Pos    { return x.NamePos }
-func (x *Ellipsis) Pos() token.Pos { return x.Ellipsis }
-func (x *BasicLit) Pos() token.Pos { return x.ValuePos }
-func (x *FuncLit) Pos() token.Pos  { return x.Type.Pos() }
-func (x *CompositeLit) Pos() token.Pos {
-	if x.Type != nil {
-		return x.Type.Pos()
-	}
-	return x.Lbrace
-}
-func (x *ParenExpr) Pos() token.Pos      { return x.Lparen }
-func (x *SelectorExpr) Pos() token.Pos   { return x.X.Pos() }
-func (x *IndexExpr) Pos() token.Pos      { return x.X.Pos() }
-func (x *IndexListExpr) Pos() token.Pos  { return x.X.Pos() }
-func (x *SliceExpr) Pos() token.Pos      { return x.X.Pos() }
-func (x *TypeAssertExpr) Pos() token.Pos { return x.X.Pos() }
-func (x *CallExpr) Pos() token.Pos       { return x.Fun.Pos() }
-func (x *StarExpr) Pos() token.Pos       { return x.Star }
-func (x *UnaryExpr) Pos() token.Pos      { return x.OpPos }
-func (x *BinaryExpr) Pos() token.Pos     { return x.X.Pos() }
-func (x *KeyValueExpr) Pos() token.Pos   { return x.Key.Pos() }
-func (x *ArrayType) Pos() token.Pos      { return x.Lbrack }
-func (x *StructType) Pos() token.Pos     { return x.Struct }
-func (x *FuncType) Pos() token.Pos {
-	if x.Func.IsValid() || x.Params == nil { // see issue 3870
-		return x.Func
-	}
-	return x.Params.Pos() // interface method declarations have no "func" keyword
-}
-func (x *InterfaceType) Pos() token.Pos { return x.Interface }
-func (x *MapType) Pos() token.Pos       { return x.Map }
-func (x *ChanType) Pos() token.Pos      { return x.Begin }
-
-func (x *BadExpr) End() token.Pos { return x.To }
-func (x *Ident) End() token.Pos   { return token.Pos(int(x.NamePos) + len(x.Name)) }
-func (x *Ellipsis) End() token.Pos {
-	if x.Elt != nil {
-		return x.Elt.End()
-	}
-	return x.Ellipsis + 3 // len("...")
-}
-func (x *BasicLit) End() token.Pos       { return token.Pos(int(x.ValuePos) + len(x.Value)) }
-func (x *FuncLit) End() token.Pos        { return x.Body.End() }
-func (x *CompositeLit) End() token.Pos   { return x.Rbrace + 1 }
-func (x *ParenExpr) End() token.Pos      { return x.Rparen + 1 }
-func (x *SelectorExpr) End() token.Pos   { return x.Sel.End() }
-func (x *IndexExpr) End() token.Pos      { return x.Rbrack + 1 }
-func (x *IndexListExpr) End() token.Pos  { return x.Rbrack + 1 }
-func (x *SliceExpr) End() token.Pos      { return x.Rbrack + 1 }
-func (x *TypeAssertExpr) End() token.Pos { return x.Rparen + 1 }
-func (x *CallExpr) End() token.Pos       { return x.Rparen + 1 }
-func (x *StarExpr) End() token.Pos       { return x.X.End() }
-func (x *UnaryExpr) End() token.Pos      { return x.X.End() }
-func (x *BinaryExpr) End() token.Pos     { return x.Y.End() }
-func (x *KeyValueExpr) End() token.Pos   { return x.Value.End() }
-func (x *ArrayType) End() token.Pos      { return x.Elt.End() }
-func (x *StructType) End() token.Pos     { return x.Fields.End() }
-func (x *FuncType) End() token.Pos {
-	if x.Results != nil {
-		return x.Results.End()
-	}
-	return x.Params.End()
-}
-func (x *InterfaceType) End() token.Pos { return x.Methods.End() }
-func (x *MapType) End() token.Pos       { return x.Value.End() }
-func (x *ChanType) End() token.Pos      { return x.Value.End() }
-
-// exprNode() ensures that only expression/type nodes can be
-// assigned to an Expr.
-func (*BadExpr) exprNode()        {}
-func (*Ident) exprNode()          {}
-func (*Ellipsis) exprNode()       {}
-func (*BasicLit) exprNode()       {}
-func (*FuncLit) exprNode()        {}
-func (*CompositeLit) exprNode()   {}
-func (*ParenExpr) exprNode()      {}
-func (*SelectorExpr) exprNode()   {}
-func (*IndexExpr) exprNode()      {}
-func (*IndexListExpr) exprNode()  {}
-func (*SliceExpr) exprNode()      {}
-func (*TypeAssertExpr) exprNode() {}
-func (*CallExpr) exprNode()       {}
-func (*StarExpr) exprNode()       {}
-func (*UnaryExpr) exprNode()      {}
-func (*BinaryExpr) exprNode()     {}
-func (*KeyValueExpr) exprNode()   {}
-
-func (*ArrayType) exprNode()     {}
-func (*StructType) exprNode()    {}
-func (*FuncType) exprNode()      {}
-func (*InterfaceType) exprNode() {}
-func (*MapType) exprNode()       {}
-func (*ChanType) exprNode()      {}
-
-// ----------------------------------------------------------------------------
-// Convenience functions for Idents
-
-// NewIdent creates a new Ident without position.
-// Useful for ASTs generated by code other than the Go parser.
-func NewIdent(name string) *Ident { return &Ident{token.NoPos, name, nil} }
-
-// IsExported reports whether name starts with an upper-case letter.
-func IsExported(name string) bool { return token.IsExported(name) }
-
-// IsExported reports whether id starts with an upper-case letter.
-func (id *Ident) IsExported() bool { return token.IsExported(id.Name) }
-
-func (id *Ident) String() string {
-	if id != nil {
-		return id.Name
-	}
-	return "<nil>"
-}
-
-// ----------------------------------------------------------------------------
-// Statements
-
-// A statement is represented by a tree consisting of one
-// or more of the following concrete statement nodes.
-type (
-	// A BadStmt node is a placeholder for statements containing
-	// syntax errors for which no correct statement nodes can be
-	// created.
-	//
-	BadStmt struct {
-		From, To token.Pos // position range of bad statement
-	}
-
-	// A DeclStmt node represents a declaration in a statement list.
-	DeclStmt struct {
-		Decl Decl // *GenDecl with CONST, TYPE, or VAR token
-	}
-
-	// An EmptyStmt node represents an empty statement.
-	// The "position" of the empty statement is the position
-	// of the immediately following (explicit or implicit) semicolon.
-	//
-	EmptyStmt struct {
-		Semicolon token.Pos // position of following ";"
-		Implicit  bool      // if set, ";" was omitted in the source
-	}
-
-	// A LabeledStmt node represents a labeled statement.
-	LabeledStmt struct {
-		Label *Ident
-		Colon token.Pos // position of ":"
-		Stmt  Stmt
-	}
-
-	// An ExprStmt node represents a (stand-alone) expression
-	// in a statement list.
-	//
-	ExprStmt struct {
-		X Expr // expression
-	}
-
-	// A SendStmt node represents a send statement.
-	SendStmt struct {
-		Chan  Expr
-		Arrow token.Pos // position of "<-"
-		Value Expr
-	}
-
-	// An IncDecStmt node represents an increment or decrement statement.
-	IncDecStmt struct {
-		X      Expr
-		TokPos token.Pos   // position of Tok
-		Tok    token.Token // INC or DEC
-	}
-
-	// An AssignStmt node represents an assignment or
-	// a short variable declaration.
-	//
-	AssignStmt struct {
-		Lhs    []Expr
-		TokPos token.Pos   // position of Tok
-		Tok    token.Token // assignment token, DEFINE
-		Rhs    []Expr
-	}
-
-	// A GoStmt node represents a go statement.
-	GoStmt struct {
-		Go   token.Pos // position of "go" keyword
-		Call *CallExpr
-	}
-
-	// A DeferStmt node represents a defer statement.
-	DeferStmt struct {
-		Defer token.Pos // position of "defer" keyword
-		Call  *CallExpr
-	}
-
-	// A ReturnStmt node represents a return statement.
-	ReturnStmt struct {
-		Return  token.Pos // position of "return" keyword
-		Results []Expr    // result expressions; or nil
-	}
-
-	// A BranchStmt node represents a break, continue, goto,
-	// or fallthrough statement.
-	//
-	BranchStmt struct {
-		TokPos token.Pos   // position of Tok
-		Tok    token.Token // keyword token (BREAK, CONTINUE, GOTO, FALLTHROUGH)
-		Label  *Ident      // label name; or nil
-	}
-
-	// A BlockStmt node represents a braced statement list.
-	BlockStmt struct {
-		Lbrace token.Pos // position of "{"
-		List   []Stmt
-		Rbrace token.Pos // position of "}", if any (may be absent due to syntax error)
-	}
-
-	// An IfStmt node represents an if statement.
-	IfStmt struct {
-		If   token.Pos // position of "if" keyword
-		Init Stmt      // initialization statement; or nil
-		Cond Expr      // condition
-		Body *BlockStmt
-		Else Stmt // else branch; or nil
-	}
-
-	// A CaseClause represents a case of an expression or type switch statement.
-	CaseClause struct {
-		Case  token.Pos // position of "case" or "default" keyword
-		List  []Expr    // list of expressions or types; nil means default case
-		Colon token.Pos // position of ":"
-		Body  []Stmt    // statement list; or nil
-	}
-
-	// A SwitchStmt node represents an expression switch statement.
-	SwitchStmt struct {
-		Switch token.Pos  // position of "switch" keyword
-		Init   Stmt       // initialization statement; or nil
-		Tag    Expr       // tag expression; or nil
-		Body   *BlockStmt // CaseClauses only
-	}
-
-	// A TypeSwitchStmt node represents a type switch statement.
-	TypeSwitchStmt struct {
-		Switch token.Pos  // position of "switch" keyword
-		Init   Stmt       // initialization statement; or nil
-		Assign Stmt       // x := y.(type) or y.(type)
-		Body   *BlockStmt // CaseClauses only
-	}
-
-	// A CommClause node represents a case of a select statement.
-	CommClause struct {
-		Case  token.Pos // position of "case" or "default" keyword
-		Comm  Stmt      // send or receive statement; nil means default case
-		Colon token.Pos // position of ":"
-		Body  []Stmt    // statement list; or nil
-	}
-
-	// A SelectStmt node represents a select statement.
-	SelectStmt struct {
-		Select token.Pos  // position of "select" keyword
-		Body   *BlockStmt // CommClauses only
-	}
-
-	// A ForStmt represents a for statement.
-	ForStmt struct {
-		For  token.Pos // position of "for" keyword
-		Init Stmt      // initialization statement; or nil
-		Cond Expr      // condition; or nil
-		Post Stmt      // post iteration statement; or nil
-		Body *BlockStmt
-	}
-
-	// A RangeStmt represents a for statement with a range clause.
-	RangeStmt struct {
-		For        token.Pos   // position of "for" keyword
-		Key, Value Expr        // Key, Value may be nil
-		TokPos     token.Pos   // position of Tok; invalid if Key == nil
-		Tok        token.Token // ILLEGAL if Key == nil, ASSIGN, DEFINE
-		X          Expr        // value to range over
-		Body       *BlockStmt
-	}
-)
-
-// Pos and End implementations for statement nodes.
-
-func (s *BadStmt) Pos() token.Pos        { return s.From }
-func (s *DeclStmt) Pos() token.Pos       { return s.Decl.Pos() }
-func (s *EmptyStmt) Pos() token.Pos      { return s.Semicolon }
-func (s *LabeledStmt) Pos() token.Pos    { return s.Label.Pos() }
-func (s *ExprStmt) Pos() token.Pos       { return s.X.Pos() }
-func (s *SendStmt) Pos() token.Pos       { return s.Chan.Pos() }
-func (s *IncDecStmt) Pos() token.Pos     { return s.X.Pos() }
-func (s *AssignStmt) Pos() token.Pos     { return s.Lhs[0].Pos() }
-func (s *GoStmt) Pos() token.Pos         { return s.Go }
-func (s *DeferStmt) Pos() token.Pos      { return s.Defer }
-func (s *ReturnStmt) Pos() token.Pos     { return s.Return }
-func (s *BranchStmt) Pos() token.Pos     { return s.TokPos }
-func (s *BlockStmt) Pos() token.Pos      { return s.Lbrace }
-func (s *IfStmt) Pos() token.Pos         { return s.If }
-func (s *CaseClause) Pos() token.Pos     { return s.Case }
-func (s *SwitchStmt) Pos() token.Pos     { return s.Switch }
-func (s *TypeSwitchStmt) Pos() token.Pos { return s.Switch }
-func (s *CommClause) Pos() token.Pos     { return s.Case }
-func (s *SelectStmt) Pos() token.Pos     { return s.Select }
-func (s *ForStmt) Pos() token.Pos        { return s.For }
-func (s *RangeStmt) Pos() token.Pos      { return s.For }
-
-func (s *BadStmt) End() token.Pos  { return s.To }
-func (s *DeclStmt) End() token.Pos { return s.Decl.End() }
-func (s *EmptyStmt) End() token.Pos {
-	if s.Implicit {
-		return s.Semicolon
-	}
-	return s.Semicolon + 1 /* len(";") */
-}
-func (s *LabeledStmt) End() token.Pos { return s.Stmt.End() }
-func (s *ExprStmt) End() token.Pos    { return s.X.End() }
-func (s *SendStmt) End() token.Pos    { return s.Value.End() }
-func (s *IncDecStmt) End() token.Pos {
-	return s.TokPos + 2 /* len("++") */
-}
-func (s *AssignStmt) End() token.Pos { return s.Rhs[len(s.Rhs)-1].End() }
-func (s *GoStmt) End() token.Pos     { return s.Call.End() }
-func (s *DeferStmt) End() token.Pos  { return s.Call.End() }
-func (s *ReturnStmt) End() token.Pos {
-	if n := len(s.Results); n > 0 {
-		return s.Results[n-1].End()
-	}
-	return s.Return + 6 // len("return")
-}
-func (s *BranchStmt) End() token.Pos {
-	if s.Label != nil {
-		return s.Label.End()
-	}
-	return token.Pos(int(s.TokPos) + len(s.Tok.String()))
-}
-func (s *BlockStmt) End() token.Pos {
-	if s.Rbrace.IsValid() {
-		return s.Rbrace + 1
-	}
-	if n := len(s.List); n > 0 {
-		return s.List[n-1].End()
-	}
-	return s.Lbrace + 1
-}
-func (s *IfStmt) End() token.Pos {
-	if s.Else != nil {
-		return s.Else.End()
-	}
-	return s.Body.End()
-}
-func (s *CaseClause) End() token.Pos {
-	if n := len(s.Body); n > 0 {
-		return s.Body[n-1].End()
-	}
-	return s.Colon + 1
-}
-func (s *SwitchStmt) End() token.Pos     { return s.Body.End() }
-func (s *TypeSwitchStmt) End() token.Pos { return s.Body.End() }
-func (s *CommClause) End() token.Pos {
-	if n := len(s.Body); n > 0 {
-		return s.Body[n-1].End()
-	}
-	return s.Colon + 1
-}
-func (s *SelectStmt) End() token.Pos { return s.Body.End() }
-func (s *ForStmt) End() token.Pos    { return s.Body.End() }
-func (s *RangeStmt) End() token.Pos  { return s.Body.End() }
-
-// stmtNode() ensures that only statement nodes can be
-// assigned to a Stmt.
-func (*BadStmt) stmtNode()        {}
-func (*DeclStmt) stmtNode()       {}
-func (*EmptyStmt) stmtNode()      {}
-func (*LabeledStmt) stmtNode()    {}
-func (*ExprStmt) stmtNode()       {}
-func (*SendStmt) stmtNode()       {}
-func (*IncDecStmt) stmtNode()     {}
-func (*AssignStmt) stmtNode()     {}
-func (*GoStmt) stmtNode()         {}
-func (*DeferStmt) stmtNode()      {}
-func (*ReturnStmt) stmtNode()     {}
-func (*BranchStmt) stmtNode()     {}
-func (*BlockStmt) stmtNode()      {}
-func (*IfStmt) stmtNode()         {}
-func (*CaseClause) stmtNode()     {}
-func (*SwitchStmt) stmtNode()     {}
-func (*TypeSwitchStmt) stmtNode() {}
-func (*CommClause) stmtNode()     {}
-func (*SelectStmt) stmtNode()     {}
-func (*ForStmt) stmtNode()        {}
-func (*RangeStmt) stmtNode()      {}
-
-// ----------------------------------------------------------------------------
-// Declarations
-
-// A Spec node represents a single (non-parenthesized) import,
-// constant, type, or variable declaration.
-type (
-	// The Spec type stands for any of *ImportSpec, *ValueSpec, and *TypeSpec.
-	Spec interface {
-		Node
-		specNode()
-	}
-
-	// An ImportSpec node represents a single package import.
-	ImportSpec struct {
-		Doc     *CommentGroup // associated documentation; or nil
-		Name    *Ident        // local package name (including "."); or nil
-		Path    *BasicLit     // import path
-		Comment *CommentGroup // line comments; or nil
-		EndPos  token.Pos     // end of spec (overrides Path.Pos if nonzero)
-	}
-
-	// A ValueSpec node represents a constant or variable declaration
-	// (ConstSpec or VarSpec production).
-	//
-	ValueSpec struct {
-		Doc     *CommentGroup // associated documentation; or nil
-		Names   []*Ident      // value names (len(Names) > 0)
-		Type    Expr          // value type; or nil
-		Values  []Expr        // initial values; or nil
-		Comment *CommentGroup // line comments; or nil
-	}
-
-	// A TypeSpec node represents a type declaration (TypeSpec production).
-	TypeSpec struct {
-		Doc        *CommentGroup // associated documentation; or nil
-		Name       *Ident        // type name
-		TypeParams *FieldList    // type parameters; or nil
-		Assign     token.Pos     // position of '=', if any
-		Type       Expr          // *Ident, *ParenExpr, *SelectorExpr, *StarExpr, or any of the *XxxTypes
-		Comment    *CommentGroup // line comments; or nil
-	}
-)
-
-// Pos and End implementations for spec nodes.
-
-func (s *ImportSpec) Pos() token.Pos {
-	if s.Name != nil {
-		return s.Name.Pos()
-	}
-	return s.Path.Pos()
-}
-func (s *ValueSpec) Pos() token.Pos { return s.Names[0].Pos() }
-func (s *TypeSpec) Pos() token.Pos  { return s.Name.Pos() }
-
-func (s *ImportSpec) End() token.Pos {
-	if s.EndPos != 0 {
-		return s.EndPos
-	}
-	return s.Path.End()
-}
-
-func (s *ValueSpec) End() token.Pos {
-	if n := len(s.Values); n > 0 {
-		return s.Values[n-1].End()
-	}
-	if s.Type != nil {
-		return s.Type.End()
-	}
-	return s.Names[len(s.Names)-1].End()
-}
-func (s *TypeSpec) End() token.Pos { return s.Type.End() }
-
-// specNode() ensures that only spec nodes can be
-// assigned to a Spec.
-func (*ImportSpec) specNode() {}
-func (*ValueSpec) specNode()  {}
-func (*TypeSpec) specNode()   {}
-
-// A declaration is represented by one of the following declaration nodes.
-type (
-	// A BadDecl node is a placeholder for a declaration containing
-	// syntax errors for which a correct declaration node cannot be
-	// created.
-	//
-	BadDecl struct {
-		From, To token.Pos // position range of bad declaration
-	}
-
-	// A GenDecl node (generic declaration node) represents an import,
-	// constant, type or variable declaration. A valid Lparen position
-	// (Lparen.IsValid()) indicates a parenthesized declaration.
-	//
-	// Relationship between Tok value and Specs element type:
-	//
-	//	token.IMPORT  *ImportSpec
-	//	token.CONST   *ValueSpec
-	//	token.TYPE    *TypeSpec
-	//	token.VAR     *ValueSpec
-	//
-	GenDecl struct {
-		Doc    *CommentGroup // associated documentation; or nil
-		TokPos token.Pos     // position of Tok
-		Tok    token.Token   // IMPORT, CONST, TYPE, or VAR
-		Lparen token.Pos     // position of '(', if any
-		Specs  []Spec
-		Rparen token.Pos // position of ')', if any
-	}
-
-	// A FuncDecl node represents a function declaration.
-	FuncDecl struct {
-		Doc  *CommentGroup // associated documentation; or nil
-		Recv *FieldList    // receiver (methods); or nil (functions)
-		Name *Ident        // function/method name
-		Type *FuncType     // function signature: type and value parameters, results, and position of "func" keyword
-		Body *BlockStmt    // function body; or nil for external (non-Go) function
-	}
-)
-
-// Pos and End implementations for declaration nodes.
-
-func (d *BadDecl) Pos() token.Pos  { return d.From }
-func (d *GenDecl) Pos() token.Pos  { return d.TokPos }
-func (d *FuncDecl) Pos() token.Pos { return d.Type.Pos() }
-
-func (d *BadDecl) End() token.Pos { return d.To }
-func (d *GenDecl) End() token.Pos {
-	if d.Rparen.IsValid() {
-		return d.Rparen + 1
-	}
-	return d.Specs[0].End()
-}
-func (d *FuncDecl) End() token.Pos {
-	if d.Body != nil {
-		return d.Body.End()
-	}
-	return d.Type.End()
-}
-
-// declNode() ensures that only declaration nodes can be
-// assigned to a Decl.
-func (*BadDecl) declNode()  {}
-func (*GenDecl) declNode()  {}
-func (*FuncDecl) declNode() {}
-
-// ----------------------------------------------------------------------------
-// Files and packages
-
-// A File node represents a Go source file.
-//
-// The Comments list contains all comments in the source file in order of
-// appearance, including the comments that are pointed to from other nodes
-// via Doc and Comment fields.
-//
-// For correct printing of source code containing comments (using packages
-// go/format and go/printer), special care must be taken to update comments
-// when a File's syntax tree is modified: For printing, comments are interspersed
-// between tokens based on their position. If syntax tree nodes are
-// removed or moved, relevant comments in their vicinity must also be removed
-// (from the File.Comments list) or moved accordingly (by updating their
-// positions). A CommentMap may be used to facilitate some of these operations.
-//
-// Whether and how a comment is associated with a node depends on the
-// interpretation of the syntax tree by the manipulating program: Except for Doc
-// and Comment comments directly associated with nodes, the remaining comments
-// are "free-floating" (see also issues #18593, #20744).
-type File struct {
-	Doc        *CommentGroup   // associated documentation; or nil
-	Package    token.Pos       // position of "package" keyword
-	Name       *Ident          // package name
-	Decls      []Decl          // top-level declarations; or nil
-	Scope      *Scope          // package scope (this file only)
-	Imports    []*ImportSpec   // imports in this file
-	Unresolved []*Ident        // unresolved identifiers in this file
-	Comments   []*CommentGroup // list of all comments in the source file
-}
-
-func (f *File) Pos() token.Pos { return f.Package }
-func (f *File) End() token.Pos {
-	if n := len(f.Decls); n > 0 {
-		return f.Decls[n-1].End()
-	}
-	return f.Name.End()
-}
-
-// A Package node represents a set of source files
-// collectively building a Go package.
-type Package struct {
-	Name    string             // package name
-	Scope   *Scope             // package scope across all files
-	Imports map[string]*Object // map of package id -> package object
-	Files   map[string]*File   // Go source files by filename
-}
-
-func (p *Package) Pos() token.Pos { return token.NoPos }
-func (p *Package) End() token.Pos { return token.NoPos }
diff --git a/internal/backport/go/ast/ast_test.go b/internal/backport/go/ast/ast_test.go
deleted file mode 100644
index 66ae884..0000000
--- a/internal/backport/go/ast/ast_test.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ast
-
-import (
-	"testing"
-)
-
-var comments = []struct {
-	list []string
-	text string
-}{
-	{[]string{"//"}, ""},
-	{[]string{"//   "}, ""},
-	{[]string{"//", "//", "//   "}, ""},
-	{[]string{"// foo   "}, "foo\n"},
-	{[]string{"//", "//", "// foo"}, "foo\n"},
-	{[]string{"// foo  bar  "}, "foo  bar\n"},
-	{[]string{"// foo", "// bar"}, "foo\nbar\n"},
-	{[]string{"// foo", "//", "//", "//", "// bar"}, "foo\n\nbar\n"},
-	{[]string{"// foo", "/* bar */"}, "foo\n bar\n"},
-	{[]string{"//", "//", "//", "// foo", "//", "//", "//"}, "foo\n"},
-
-	{[]string{"/**/"}, ""},
-	{[]string{"/*   */"}, ""},
-	{[]string{"/**/", "/**/", "/*   */"}, ""},
-	{[]string{"/* Foo   */"}, " Foo\n"},
-	{[]string{"/* Foo  Bar  */"}, " Foo  Bar\n"},
-	{[]string{"/* Foo*/", "/* Bar*/"}, " Foo\n Bar\n"},
-	{[]string{"/* Foo*/", "/**/", "/**/", "/**/", "// Bar"}, " Foo\n\nBar\n"},
-	{[]string{"/* Foo*/", "/*\n*/", "//", "/*\n*/", "// Bar"}, " Foo\n\nBar\n"},
-	{[]string{"/* Foo*/", "// Bar"}, " Foo\nBar\n"},
-	{[]string{"/* Foo\n Bar*/"}, " Foo\n Bar\n"},
-
-	{[]string{"// foo", "//go:noinline", "// bar", "//:baz"}, "foo\nbar\n:baz\n"},
-	{[]string{"// foo", "//lint123:ignore", "// bar"}, "foo\nbar\n"},
-}
-
-func TestCommentText(t *testing.T) {
-	for i, c := range comments {
-		list := make([]*Comment, len(c.list))
-		for i, s := range c.list {
-			list[i] = &Comment{Text: s}
-		}
-
-		text := (&CommentGroup{list}).Text()
-		if text != c.text {
-			t.Errorf("case %d: got %q; expected %q", i, text, c.text)
-		}
-	}
-}
-
-var isDirectiveTests = []struct {
-	in string
-	ok bool
-}{
-	{"abc", false},
-	{"go:inline", true},
-	{"Go:inline", false},
-	{"go:Inline", false},
-	{":inline", false},
-	{"lint:ignore", true},
-	{"lint:1234", true},
-	{"1234:lint", true},
-	{"go: inline", false},
-	{"go:", false},
-	{"go:*", false},
-	{"go:x*", true},
-	{"export foo", true},
-	{"extern foo", true},
-	{"expert foo", false},
-}
-
-func TestIsDirective(t *testing.T) {
-	for _, tt := range isDirectiveTests {
-		if ok := isDirective(tt.in); ok != tt.ok {
-			t.Errorf("isDirective(%q) = %v, want %v", tt.in, ok, tt.ok)
-		}
-	}
-}
diff --git a/internal/backport/go/ast/commentmap.go b/internal/backport/go/ast/commentmap.go
deleted file mode 100644
index 5e63605..0000000
--- a/internal/backport/go/ast/commentmap.go
+++ /dev/null
@@ -1,329 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ast
-
-import (
-	"bytes"
-	"fmt"
-	"golang.org/x/website/internal/backport/go/token"
-	"sort"
-)
-
-type byPos []*CommentGroup
-
-func (a byPos) Len() int           { return len(a) }
-func (a byPos) Less(i, j int) bool { return a[i].Pos() < a[j].Pos() }
-func (a byPos) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
-
-// sortComments sorts the list of comment groups in source order.
-func sortComments(list []*CommentGroup) {
-	// TODO(gri): Does it make sense to check for sorted-ness
-	//            first (because we know that sorted-ness is
-	//            very likely)?
-	if orderedList := byPos(list); !sort.IsSorted(orderedList) {
-		sort.Sort(orderedList)
-	}
-}
-
-// A CommentMap maps an AST node to a list of comment groups
-// associated with it. See NewCommentMap for a description of
-// the association.
-type CommentMap map[Node][]*CommentGroup
-
-func (cmap CommentMap) addComment(n Node, c *CommentGroup) {
-	list := cmap[n]
-	if len(list) == 0 {
-		list = []*CommentGroup{c}
-	} else {
-		list = append(list, c)
-	}
-	cmap[n] = list
-}
-
-type byInterval []Node
-
-func (a byInterval) Len() int { return len(a) }
-func (a byInterval) Less(i, j int) bool {
-	pi, pj := a[i].Pos(), a[j].Pos()
-	return pi < pj || pi == pj && a[i].End() > a[j].End()
-}
-func (a byInterval) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-// nodeList returns the list of nodes of the AST n in source order.
-func nodeList(n Node) []Node {
-	var list []Node
-	Inspect(n, func(n Node) bool {
-		// don't collect comments
-		switch n.(type) {
-		case nil, *CommentGroup, *Comment:
-			return false
-		}
-		list = append(list, n)
-		return true
-	})
-	// Note: The current implementation assumes that Inspect traverses the
-	//       AST in depth-first and thus _source_ order. If AST traversal
-	//       does not follow source order, the sorting call below will be
-	//       required.
-	// sort.Sort(byInterval(list))
-	return list
-}
-
-// A commentListReader helps iterating through a list of comment groups.
-type commentListReader struct {
-	fset     *token.FileSet
-	list     []*CommentGroup
-	index    int
-	comment  *CommentGroup  // comment group at current index
-	pos, end token.Position // source interval of comment group at current index
-}
-
-func (r *commentListReader) eol() bool {
-	return r.index >= len(r.list)
-}
-
-func (r *commentListReader) next() {
-	if !r.eol() {
-		r.comment = r.list[r.index]
-		r.pos = r.fset.Position(r.comment.Pos())
-		r.end = r.fset.Position(r.comment.End())
-		r.index++
-	}
-}
-
-// A nodeStack keeps track of nested nodes.
-// A node lower on the stack lexically contains the nodes higher on the stack.
-type nodeStack []Node
-
-// push pops all nodes that appear lexically before n
-// and then pushes n on the stack.
-func (s *nodeStack) push(n Node) {
-	s.pop(n.Pos())
-	*s = append((*s), n)
-}
-
-// pop pops all nodes that appear lexically before pos
-// (i.e., whose lexical extent has ended before or at pos).
-// It returns the last node popped.
-func (s *nodeStack) pop(pos token.Pos) (top Node) {
-	i := len(*s)
-	for i > 0 && (*s)[i-1].End() <= pos {
-		top = (*s)[i-1]
-		i--
-	}
-	*s = (*s)[0:i]
-	return top
-}
-
-// NewCommentMap creates a new comment map by associating comment groups
-// of the comments list with the nodes of the AST specified by node.
-//
-// A comment group g is associated with a node n if:
-//
-//   - g starts on the same line as n ends
-//   - g starts on the line immediately following n, and there is
-//     at least one empty line after g and before the next node
-//   - g starts before n and is not associated to the node before n
-//     via the previous rules
-//
-// NewCommentMap tries to associate a comment group to the "largest"
-// node possible: For instance, if the comment is a line comment
-// trailing an assignment, the comment is associated with the entire
-// assignment rather than just the last operand in the assignment.
-func NewCommentMap(fset *token.FileSet, node Node, comments []*CommentGroup) CommentMap {
-	if len(comments) == 0 {
-		return nil // no comments to map
-	}
-
-	cmap := make(CommentMap)
-
-	// set up comment reader r
-	tmp := make([]*CommentGroup, len(comments))
-	copy(tmp, comments) // don't change incoming comments
-	sortComments(tmp)
-	r := commentListReader{fset: fset, list: tmp} // !r.eol() because len(comments) > 0
-	r.next()
-
-	// create node list in lexical order
-	nodes := nodeList(node)
-	nodes = append(nodes, nil) // append sentinel
-
-	// set up iteration variables
-	var (
-		p     Node           // previous node
-		pend  token.Position // end of p
-		pg    Node           // previous node group (enclosing nodes of "importance")
-		pgend token.Position // end of pg
-		stack nodeStack      // stack of node groups
-	)
-
-	for _, q := range nodes {
-		var qpos token.Position
-		if q != nil {
-			qpos = fset.Position(q.Pos()) // current node position
-		} else {
-			// set fake sentinel position to infinity so that
-			// all comments get processed before the sentinel
-			const infinity = 1 << 30
-			qpos.Offset = infinity
-			qpos.Line = infinity
-		}
-
-		// process comments before current node
-		for r.end.Offset <= qpos.Offset {
-			// determine recent node group
-			if top := stack.pop(r.comment.Pos()); top != nil {
-				pg = top
-				pgend = fset.Position(pg.End())
-			}
-			// Try to associate a comment first with a node group
-			// (i.e., a node of "importance" such as a declaration);
-			// if that fails, try to associate it with the most recent
-			// node.
-			// TODO(gri) try to simplify the logic below
-			var assoc Node
-			switch {
-			case pg != nil &&
-				(pgend.Line == r.pos.Line ||
-					pgend.Line+1 == r.pos.Line && r.end.Line+1 < qpos.Line):
-				// 1) comment starts on same line as previous node group ends, or
-				// 2) comment starts on the line immediately after the
-				//    previous node group and there is an empty line before
-				//    the current node
-				// => associate comment with previous node group
-				assoc = pg
-			case p != nil &&
-				(pend.Line == r.pos.Line ||
-					pend.Line+1 == r.pos.Line && r.end.Line+1 < qpos.Line ||
-					q == nil):
-				// same rules apply as above for p rather than pg,
-				// but also associate with p if we are at the end (q == nil)
-				assoc = p
-			default:
-				// otherwise, associate comment with current node
-				if q == nil {
-					// we can only reach here if there was no p
-					// which would imply that there were no nodes
-					panic("internal error: no comments should be associated with sentinel")
-				}
-				assoc = q
-			}
-			cmap.addComment(assoc, r.comment)
-			if r.eol() {
-				return cmap
-			}
-			r.next()
-		}
-
-		// update previous node
-		p = q
-		pend = fset.Position(p.End())
-
-		// update previous node group if we see an "important" node
-		switch q.(type) {
-		case *File, *Field, Decl, Spec, Stmt:
-			stack.push(q)
-		}
-	}
-
-	return cmap
-}
-
-// Update replaces an old node in the comment map with the new node
-// and returns the new node. Comments that were associated with the
-// old node are associated with the new node.
-func (cmap CommentMap) Update(old, new Node) Node {
-	if list := cmap[old]; len(list) > 0 {
-		delete(cmap, old)
-		cmap[new] = append(cmap[new], list...)
-	}
-	return new
-}
-
-// Filter returns a new comment map consisting of only those
-// entries of cmap for which a corresponding node exists in
-// the AST specified by node.
-func (cmap CommentMap) Filter(node Node) CommentMap {
-	umap := make(CommentMap)
-	Inspect(node, func(n Node) bool {
-		if g := cmap[n]; len(g) > 0 {
-			umap[n] = g
-		}
-		return true
-	})
-	return umap
-}
-
-// Comments returns the list of comment groups in the comment map.
-// The result is sorted in source order.
-func (cmap CommentMap) Comments() []*CommentGroup {
-	list := make([]*CommentGroup, 0, len(cmap))
-	for _, e := range cmap {
-		list = append(list, e...)
-	}
-	sortComments(list)
-	return list
-}
-
-func summary(list []*CommentGroup) string {
-	const maxLen = 40
-	var buf bytes.Buffer
-
-	// collect comments text
-loop:
-	for _, group := range list {
-		// Note: CommentGroup.Text() does too much work for what we
-		//       need and would only replace this innermost loop.
-		//       Just do it explicitly.
-		for _, comment := range group.List {
-			if buf.Len() >= maxLen {
-				break loop
-			}
-			buf.WriteString(comment.Text)
-		}
-	}
-
-	// truncate if too long
-	if buf.Len() > maxLen {
-		buf.Truncate(maxLen - 3)
-		buf.WriteString("...")
-	}
-
-	// replace any invisibles with blanks
-	bytes := buf.Bytes()
-	for i, b := range bytes {
-		switch b {
-		case '\t', '\n', '\r':
-			bytes[i] = ' '
-		}
-	}
-
-	return string(bytes)
-}
-
-func (cmap CommentMap) String() string {
-	// print map entries in sorted order
-	var nodes []Node
-	for node := range cmap {
-		nodes = append(nodes, node)
-	}
-	sort.Sort(byInterval(nodes))
-
-	var buf bytes.Buffer
-	fmt.Fprintln(&buf, "CommentMap {")
-	for _, node := range nodes {
-		comment := cmap[node]
-		// print name of identifiers; print node type for other nodes
-		var s string
-		if ident, ok := node.(*Ident); ok {
-			s = ident.Name
-		} else {
-			s = fmt.Sprintf("%T", node)
-		}
-		fmt.Fprintf(&buf, "\t%p  %20s:  %s\n", node, s, summary(comment))
-	}
-	fmt.Fprintln(&buf, "}")
-	return buf.String()
-}
diff --git a/internal/backport/go/ast/commentmap_test.go b/internal/backport/go/ast/commentmap_test.go
deleted file mode 100644
index 67c2daf..0000000
--- a/internal/backport/go/ast/commentmap_test.go
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// To avoid a cyclic dependency with go/parser, this file is in a separate package.
-
-package ast_test
-
-import (
-	"bytes"
-	"fmt"
-	"sort"
-	"testing"
-
-	. "golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/parser"
-	"golang.org/x/website/internal/backport/go/token"
-)
-
-const src = `
-// the very first comment
-
-// package p
-package p /* the name is p */
-
-// imports
-import (
-	"bytes"     // bytes
-	"fmt"       // fmt
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/parser"
-)
-
-// T
-type T struct {
-	a, b, c int // associated with a, b, c
-	// associated with x, y
-	x, y float64    // float values
-	z    complex128 // complex value
-}
-// also associated with T
-
-// x
-var x = 0 // x = 0
-// also associated with x
-
-// f1
-func f1() {
-	/* associated with s1 */
-	s1()
-	// also associated with s1
-
-	// associated with s2
-
-	// also associated with s2
-	s2() // line comment for s2
-}
-// associated with f1
-// also associated with f1
-
-// associated with f2
-
-// f2
-func f2() {
-}
-
-func f3() {
-	i := 1 /* 1 */ + 2 // addition
-	_ = i
-}
-
-// the very last comment
-`
-
-// res maps a key of the form "line number: node type"
-// to the associated comments' text.
-var res = map[string]string{
-	" 5: *ast.File":       "the very first comment\npackage p\n",
-	" 5: *ast.Ident":      " the name is p\n",
-	" 8: *ast.GenDecl":    "imports\n",
-	" 9: *ast.ImportSpec": "bytes\n",
-	"10: *ast.ImportSpec": "fmt\n",
-	"16: *ast.GenDecl":    "T\nalso associated with T\n",
-	"17: *ast.Field":      "associated with a, b, c\n",
-	"19: *ast.Field":      "associated with x, y\nfloat values\n",
-	"20: *ast.Field":      "complex value\n",
-	"25: *ast.GenDecl":    "x\nx = 0\nalso associated with x\n",
-	"29: *ast.FuncDecl":   "f1\nassociated with f1\nalso associated with f1\n",
-	"31: *ast.ExprStmt":   " associated with s1\nalso associated with s1\n",
-	"37: *ast.ExprStmt":   "associated with s2\nalso associated with s2\nline comment for s2\n",
-	"45: *ast.FuncDecl":   "associated with f2\nf2\n",
-	"49: *ast.AssignStmt": "addition\n",
-	"49: *ast.BasicLit":   " 1\n",
-	"50: *ast.Ident":      "the very last comment\n",
-}
-
-func ctext(list []*CommentGroup) string {
-	var buf bytes.Buffer
-	for _, g := range list {
-		buf.WriteString(g.Text())
-	}
-	return buf.String()
-}
-
-func TestCommentMap(t *testing.T) {
-	fset := token.NewFileSet()
-	f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
-	if err != nil {
-		t.Fatal(err)
-	}
-	cmap := NewCommentMap(fset, f, f.Comments)
-
-	// very correct association of comments
-	for n, list := range cmap {
-		key := fmt.Sprintf("%2d: %T", fset.Position(n.Pos()).Line, n)
-		got := ctext(list)
-		want := res[key]
-		if got != want {
-			t.Errorf("%s: got %q; want %q", key, got, want)
-		}
-	}
-
-	// verify that no comments got lost
-	if n := len(cmap.Comments()); n != len(f.Comments) {
-		t.Errorf("got %d comment groups in map; want %d", n, len(f.Comments))
-	}
-
-	// support code to update test:
-	// set genMap to true to generate res map
-	const genMap = false
-	if genMap {
-		out := make([]string, 0, len(cmap))
-		for n, list := range cmap {
-			out = append(out, fmt.Sprintf("\t\"%2d: %T\":\t%q,", fset.Position(n.Pos()).Line, n, ctext(list)))
-		}
-		sort.Strings(out)
-		for _, s := range out {
-			fmt.Println(s)
-		}
-	}
-}
-
-func TestFilter(t *testing.T) {
-	fset := token.NewFileSet()
-	f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
-	if err != nil {
-		t.Fatal(err)
-	}
-	cmap := NewCommentMap(fset, f, f.Comments)
-
-	// delete variable declaration
-	for i, decl := range f.Decls {
-		if gen, ok := decl.(*GenDecl); ok && gen.Tok == token.VAR {
-			copy(f.Decls[i:], f.Decls[i+1:])
-			f.Decls = f.Decls[:len(f.Decls)-1]
-			break
-		}
-	}
-
-	// check if comments are filtered correctly
-	cc := cmap.Filter(f)
-	for n, list := range cc {
-		key := fmt.Sprintf("%2d: %T", fset.Position(n.Pos()).Line, n)
-		got := ctext(list)
-		want := res[key]
-		if key == "25: *ast.GenDecl" || got != want {
-			t.Errorf("%s: got %q; want %q", key, got, want)
-		}
-	}
-}
diff --git a/internal/backport/go/ast/example_test.go b/internal/backport/go/ast/example_test.go
deleted file mode 100644
index ae92618..0000000
--- a/internal/backport/go/ast/example_test.go
+++ /dev/null
@@ -1,206 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ast_test
-
-import (
-	"bytes"
-	"fmt"
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/format"
-	"golang.org/x/website/internal/backport/go/parser"
-	"golang.org/x/website/internal/backport/go/token"
-)
-
-// This example demonstrates how to inspect the AST of a Go program.
-func ExampleInspect() {
-	// src is the input for which we want to inspect the AST.
-	src := `
-package p
-const c = 1.0
-var X = f(3.14)*2 + c
-`
-
-	// Create the AST by parsing src.
-	fset := token.NewFileSet() // positions are relative to fset
-	f, err := parser.ParseFile(fset, "src.go", src, 0)
-	if err != nil {
-		panic(err)
-	}
-
-	// Inspect the AST and print all identifiers and literals.
-	ast.Inspect(f, func(n ast.Node) bool {
-		var s string
-		switch x := n.(type) {
-		case *ast.BasicLit:
-			s = x.Value
-		case *ast.Ident:
-			s = x.Name
-		}
-		if s != "" {
-			fmt.Printf("%s:\t%s\n", fset.Position(n.Pos()), s)
-		}
-		return true
-	})
-
-	// Output:
-	// src.go:2:9:	p
-	// src.go:3:7:	c
-	// src.go:3:11:	1.0
-	// src.go:4:5:	X
-	// src.go:4:9:	f
-	// src.go:4:11:	3.14
-	// src.go:4:17:	2
-	// src.go:4:21:	c
-}
-
-// This example shows what an AST looks like when printed for debugging.
-func ExamplePrint() {
-	// src is the input for which we want to print the AST.
-	src := `
-package main
-func main() {
-	println("Hello, World!")
-}
-`
-
-	// Create the AST by parsing src.
-	fset := token.NewFileSet() // positions are relative to fset
-	f, err := parser.ParseFile(fset, "", src, 0)
-	if err != nil {
-		panic(err)
-	}
-
-	// Print the AST.
-	ast.Print(fset, f)
-
-	// Output:
-	//      0  *ast.File {
-	//      1  .  Package: 2:1
-	//      2  .  Name: *ast.Ident {
-	//      3  .  .  NamePos: 2:9
-	//      4  .  .  Name: "main"
-	//      5  .  }
-	//      6  .  Decls: []ast.Decl (len = 1) {
-	//      7  .  .  0: *ast.FuncDecl {
-	//      8  .  .  .  Name: *ast.Ident {
-	//      9  .  .  .  .  NamePos: 3:6
-	//     10  .  .  .  .  Name: "main"
-	//     11  .  .  .  .  Obj: *ast.Object {
-	//     12  .  .  .  .  .  Kind: func
-	//     13  .  .  .  .  .  Name: "main"
-	//     14  .  .  .  .  .  Decl: *(obj @ 7)
-	//     15  .  .  .  .  }
-	//     16  .  .  .  }
-	//     17  .  .  .  Type: *ast.FuncType {
-	//     18  .  .  .  .  Func: 3:1
-	//     19  .  .  .  .  Params: *ast.FieldList {
-	//     20  .  .  .  .  .  Opening: 3:10
-	//     21  .  .  .  .  .  Closing: 3:11
-	//     22  .  .  .  .  }
-	//     23  .  .  .  }
-	//     24  .  .  .  Body: *ast.BlockStmt {
-	//     25  .  .  .  .  Lbrace: 3:13
-	//     26  .  .  .  .  List: []ast.Stmt (len = 1) {
-	//     27  .  .  .  .  .  0: *ast.ExprStmt {
-	//     28  .  .  .  .  .  .  X: *ast.CallExpr {
-	//     29  .  .  .  .  .  .  .  Fun: *ast.Ident {
-	//     30  .  .  .  .  .  .  .  .  NamePos: 4:2
-	//     31  .  .  .  .  .  .  .  .  Name: "println"
-	//     32  .  .  .  .  .  .  .  }
-	//     33  .  .  .  .  .  .  .  Lparen: 4:9
-	//     34  .  .  .  .  .  .  .  Args: []ast.Expr (len = 1) {
-	//     35  .  .  .  .  .  .  .  .  0: *ast.BasicLit {
-	//     36  .  .  .  .  .  .  .  .  .  ValuePos: 4:10
-	//     37  .  .  .  .  .  .  .  .  .  Kind: STRING
-	//     38  .  .  .  .  .  .  .  .  .  Value: "\"Hello, World!\""
-	//     39  .  .  .  .  .  .  .  .  }
-	//     40  .  .  .  .  .  .  .  }
-	//     41  .  .  .  .  .  .  .  Ellipsis: -
-	//     42  .  .  .  .  .  .  .  Rparen: 4:25
-	//     43  .  .  .  .  .  .  }
-	//     44  .  .  .  .  .  }
-	//     45  .  .  .  .  }
-	//     46  .  .  .  .  Rbrace: 5:1
-	//     47  .  .  .  }
-	//     48  .  .  }
-	//     49  .  }
-	//     50  .  Scope: *ast.Scope {
-	//     51  .  .  Objects: map[string]*ast.Object (len = 1) {
-	//     52  .  .  .  "main": *(obj @ 11)
-	//     53  .  .  }
-	//     54  .  }
-	//     55  .  Unresolved: []*ast.Ident (len = 1) {
-	//     56  .  .  0: *(obj @ 29)
-	//     57  .  }
-	//     58  }
-}
-
-// This example illustrates how to remove a variable declaration
-// in a Go program while maintaining correct comment association
-// using an ast.CommentMap.
-func ExampleCommentMap() {
-	// src is the input for which we create the AST that we
-	// are going to manipulate.
-	src := `
-// This is the package comment.
-package main
-
-// This comment is associated with the hello constant.
-const hello = "Hello, World!" // line comment 1
-
-// This comment is associated with the foo variable.
-var foo = hello // line comment 2
-
-// This comment is associated with the main function.
-func main() {
-	fmt.Println(hello) // line comment 3
-}
-`
-
-	// Create the AST by parsing src.
-	fset := token.NewFileSet() // positions are relative to fset
-	f, err := parser.ParseFile(fset, "src.go", src, parser.ParseComments)
-	if err != nil {
-		panic(err)
-	}
-
-	// Create an ast.CommentMap from the ast.File's comments.
-	// This helps keeping the association between comments
-	// and AST nodes.
-	cmap := ast.NewCommentMap(fset, f, f.Comments)
-
-	// Remove the first variable declaration from the list of declarations.
-	for i, decl := range f.Decls {
-		if gen, ok := decl.(*ast.GenDecl); ok && gen.Tok == token.VAR {
-			copy(f.Decls[i:], f.Decls[i+1:])
-			f.Decls = f.Decls[:len(f.Decls)-1]
-			break
-		}
-	}
-
-	// Use the comment map to filter comments that don't belong anymore
-	// (the comments associated with the variable declaration), and create
-	// the new comments list.
-	f.Comments = cmap.Filter(f).Comments()
-
-	// Print the modified AST.
-	var buf bytes.Buffer
-	if err := format.Node(&buf, fset, f); err != nil {
-		panic(err)
-	}
-	fmt.Printf("%s", buf.Bytes())
-
-	// Output:
-	// // This is the package comment.
-	// package main
-	//
-	// // This comment is associated with the hello constant.
-	// const hello = "Hello, World!" // line comment 1
-	//
-	// // This comment is associated with the main function.
-	// func main() {
-	// 	fmt.Println(hello) // line comment 3
-	// }
-}
diff --git a/internal/backport/go/ast/filter.go b/internal/backport/go/ast/filter.go
deleted file mode 100644
index 8c2ed9d..0000000
--- a/internal/backport/go/ast/filter.go
+++ /dev/null
@@ -1,488 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ast
-
-import (
-	"golang.org/x/website/internal/backport/go/token"
-	"sort"
-)
-
-// ----------------------------------------------------------------------------
-// Export filtering
-
-// exportFilter is a special filter function to extract exported nodes.
-func exportFilter(name string) bool {
-	return IsExported(name)
-}
-
-// FileExports trims the AST for a Go source file in place such that
-// only exported nodes remain: all top-level identifiers which are not exported
-// and their associated information (such as type, initial value, or function
-// body) are removed. Non-exported fields and methods of exported types are
-// stripped. The File.Comments list is not changed.
-//
-// FileExports reports whether there are exported declarations.
-func FileExports(src *File) bool {
-	return filterFile(src, exportFilter, true)
-}
-
-// PackageExports trims the AST for a Go package in place such that
-// only exported nodes remain. The pkg.Files list is not changed, so that
-// file names and top-level package comments don't get lost.
-//
-// PackageExports reports whether there are exported declarations;
-// it returns false otherwise.
-func PackageExports(pkg *Package) bool {
-	return filterPackage(pkg, exportFilter, true)
-}
-
-// ----------------------------------------------------------------------------
-// General filtering
-
-type Filter func(string) bool
-
-func filterIdentList(list []*Ident, f Filter) []*Ident {
-	j := 0
-	for _, x := range list {
-		if f(x.Name) {
-			list[j] = x
-			j++
-		}
-	}
-	return list[0:j]
-}
-
-// fieldName assumes that x is the type of an anonymous field and
-// returns the corresponding field name. If x is not an acceptable
-// anonymous field, the result is nil.
-func fieldName(x Expr) *Ident {
-	switch t := x.(type) {
-	case *Ident:
-		return t
-	case *SelectorExpr:
-		if _, ok := t.X.(*Ident); ok {
-			return t.Sel
-		}
-	case *StarExpr:
-		return fieldName(t.X)
-	}
-	return nil
-}
-
-func filterFieldList(fields *FieldList, filter Filter, export bool) (removedFields bool) {
-	if fields == nil {
-		return false
-	}
-	list := fields.List
-	j := 0
-	for _, f := range list {
-		keepField := false
-		if len(f.Names) == 0 {
-			// anonymous field
-			name := fieldName(f.Type)
-			keepField = name != nil && filter(name.Name)
-		} else {
-			n := len(f.Names)
-			f.Names = filterIdentList(f.Names, filter)
-			if len(f.Names) < n {
-				removedFields = true
-			}
-			keepField = len(f.Names) > 0
-		}
-		if keepField {
-			if export {
-				filterType(f.Type, filter, export)
-			}
-			list[j] = f
-			j++
-		}
-	}
-	if j < len(list) {
-		removedFields = true
-	}
-	fields.List = list[0:j]
-	return
-}
-
-func filterCompositeLit(lit *CompositeLit, filter Filter, export bool) {
-	n := len(lit.Elts)
-	lit.Elts = filterExprList(lit.Elts, filter, export)
-	if len(lit.Elts) < n {
-		lit.Incomplete = true
-	}
-}
-
-func filterExprList(list []Expr, filter Filter, export bool) []Expr {
-	j := 0
-	for _, exp := range list {
-		switch x := exp.(type) {
-		case *CompositeLit:
-			filterCompositeLit(x, filter, export)
-		case *KeyValueExpr:
-			if x, ok := x.Key.(*Ident); ok && !filter(x.Name) {
-				continue
-			}
-			if x, ok := x.Value.(*CompositeLit); ok {
-				filterCompositeLit(x, filter, export)
-			}
-		}
-		list[j] = exp
-		j++
-	}
-	return list[0:j]
-}
-
-func filterParamList(fields *FieldList, filter Filter, export bool) bool {
-	if fields == nil {
-		return false
-	}
-	var b bool
-	for _, f := range fields.List {
-		if filterType(f.Type, filter, export) {
-			b = true
-		}
-	}
-	return b
-}
-
-func filterType(typ Expr, f Filter, export bool) bool {
-	switch t := typ.(type) {
-	case *Ident:
-		return f(t.Name)
-	case *ParenExpr:
-		return filterType(t.X, f, export)
-	case *ArrayType:
-		return filterType(t.Elt, f, export)
-	case *StructType:
-		if filterFieldList(t.Fields, f, export) {
-			t.Incomplete = true
-		}
-		return len(t.Fields.List) > 0
-	case *FuncType:
-		b1 := filterParamList(t.Params, f, export)
-		b2 := filterParamList(t.Results, f, export)
-		return b1 || b2
-	case *InterfaceType:
-		if filterFieldList(t.Methods, f, export) {
-			t.Incomplete = true
-		}
-		return len(t.Methods.List) > 0
-	case *MapType:
-		b1 := filterType(t.Key, f, export)
-		b2 := filterType(t.Value, f, export)
-		return b1 || b2
-	case *ChanType:
-		return filterType(t.Value, f, export)
-	}
-	return false
-}
-
-func filterSpec(spec Spec, f Filter, export bool) bool {
-	switch s := spec.(type) {
-	case *ValueSpec:
-		s.Names = filterIdentList(s.Names, f)
-		s.Values = filterExprList(s.Values, f, export)
-		if len(s.Names) > 0 {
-			if export {
-				filterType(s.Type, f, export)
-			}
-			return true
-		}
-	case *TypeSpec:
-		if f(s.Name.Name) {
-			if export {
-				filterType(s.Type, f, export)
-			}
-			return true
-		}
-		if !export {
-			// For general filtering (not just exports),
-			// filter type even if name is not filtered
-			// out.
-			// If the type contains filtered elements,
-			// keep the declaration.
-			return filterType(s.Type, f, export)
-		}
-	}
-	return false
-}
-
-func filterSpecList(list []Spec, f Filter, export bool) []Spec {
-	j := 0
-	for _, s := range list {
-		if filterSpec(s, f, export) {
-			list[j] = s
-			j++
-		}
-	}
-	return list[0:j]
-}
-
-// FilterDecl trims the AST for a Go declaration in place by removing
-// all names (including struct field and interface method names, but
-// not from parameter lists) that don't pass through the filter f.
-//
-// FilterDecl reports whether there are any declared names left after
-// filtering.
-func FilterDecl(decl Decl, f Filter) bool {
-	return filterDecl(decl, f, false)
-}
-
-func filterDecl(decl Decl, f Filter, export bool) bool {
-	switch d := decl.(type) {
-	case *GenDecl:
-		d.Specs = filterSpecList(d.Specs, f, export)
-		return len(d.Specs) > 0
-	case *FuncDecl:
-		return f(d.Name.Name)
-	}
-	return false
-}
-
-// FilterFile trims the AST for a Go file in place by removing all
-// names from top-level declarations (including struct field and
-// interface method names, but not from parameter lists) that don't
-// pass through the filter f. If the declaration is empty afterwards,
-// the declaration is removed from the AST. Import declarations are
-// always removed. The File.Comments list is not changed.
-//
-// FilterFile reports whether there are any top-level declarations
-// left after filtering.
-func FilterFile(src *File, f Filter) bool {
-	return filterFile(src, f, false)
-}
-
-func filterFile(src *File, f Filter, export bool) bool {
-	j := 0
-	for _, d := range src.Decls {
-		if filterDecl(d, f, export) {
-			src.Decls[j] = d
-			j++
-		}
-	}
-	src.Decls = src.Decls[0:j]
-	return j > 0
-}
-
-// FilterPackage trims the AST for a Go package in place by removing
-// all names from top-level declarations (including struct field and
-// interface method names, but not from parameter lists) that don't
-// pass through the filter f. If the declaration is empty afterwards,
-// the declaration is removed from the AST. The pkg.Files list is not
-// changed, so that file names and top-level package comments don't get
-// lost.
-//
-// FilterPackage reports whether there are any top-level declarations
-// left after filtering.
-func FilterPackage(pkg *Package, f Filter) bool {
-	return filterPackage(pkg, f, false)
-}
-
-func filterPackage(pkg *Package, f Filter, export bool) bool {
-	hasDecls := false
-	for _, src := range pkg.Files {
-		if filterFile(src, f, export) {
-			hasDecls = true
-		}
-	}
-	return hasDecls
-}
-
-// ----------------------------------------------------------------------------
-// Merging of package files
-
-// The MergeMode flags control the behavior of MergePackageFiles.
-type MergeMode uint
-
-const (
-	// If set, duplicate function declarations are excluded.
-	FilterFuncDuplicates MergeMode = 1 << iota
-	// If set, comments that are not associated with a specific
-	// AST node (as Doc or Comment) are excluded.
-	FilterUnassociatedComments
-	// If set, duplicate import declarations are excluded.
-	FilterImportDuplicates
-)
-
-// nameOf returns the function (foo) or method name (foo.bar) for
-// the given function declaration. If the AST is incorrect for the
-// receiver, it assumes a function instead.
-func nameOf(f *FuncDecl) string {
-	if r := f.Recv; r != nil && len(r.List) == 1 {
-		// looks like a correct receiver declaration
-		t := r.List[0].Type
-		// dereference pointer receiver types
-		if p, _ := t.(*StarExpr); p != nil {
-			t = p.X
-		}
-		// the receiver type must be a type name
-		if p, _ := t.(*Ident); p != nil {
-			return p.Name + "." + f.Name.Name
-		}
-		// otherwise assume a function instead
-	}
-	return f.Name.Name
-}
-
-// separator is an empty //-style comment that is interspersed between
-// different comment groups when they are concatenated into a single group
-var separator = &Comment{token.NoPos, "//"}
-
-// MergePackageFiles creates a file AST by merging the ASTs of the
-// files belonging to a package. The mode flags control merging behavior.
-func MergePackageFiles(pkg *Package, mode MergeMode) *File {
-	// Count the number of package docs, comments and declarations across
-	// all package files. Also, compute sorted list of filenames, so that
-	// subsequent iterations can always iterate in the same order.
-	ndocs := 0
-	ncomments := 0
-	ndecls := 0
-	filenames := make([]string, len(pkg.Files))
-	i := 0
-	for filename, f := range pkg.Files {
-		filenames[i] = filename
-		i++
-		if f.Doc != nil {
-			ndocs += len(f.Doc.List) + 1 // +1 for separator
-		}
-		ncomments += len(f.Comments)
-		ndecls += len(f.Decls)
-	}
-	sort.Strings(filenames)
-
-	// Collect package comments from all package files into a single
-	// CommentGroup - the collected package documentation. In general
-	// there should be only one file with a package comment; but it's
-	// better to collect extra comments than drop them on the floor.
-	var doc *CommentGroup
-	var pos token.Pos
-	if ndocs > 0 {
-		list := make([]*Comment, ndocs-1) // -1: no separator before first group
-		i := 0
-		for _, filename := range filenames {
-			f := pkg.Files[filename]
-			if f.Doc != nil {
-				if i > 0 {
-					// not the first group - add separator
-					list[i] = separator
-					i++
-				}
-				for _, c := range f.Doc.List {
-					list[i] = c
-					i++
-				}
-				if f.Package > pos {
-					// Keep the maximum package clause position as
-					// position for the package clause of the merged
-					// files.
-					pos = f.Package
-				}
-			}
-		}
-		doc = &CommentGroup{list}
-	}
-
-	// Collect declarations from all package files.
-	var decls []Decl
-	if ndecls > 0 {
-		decls = make([]Decl, ndecls)
-		funcs := make(map[string]int) // map of func name -> decls index
-		i := 0                        // current index
-		n := 0                        // number of filtered entries
-		for _, filename := range filenames {
-			f := pkg.Files[filename]
-			for _, d := range f.Decls {
-				if mode&FilterFuncDuplicates != 0 {
-					// A language entity may be declared multiple
-					// times in different package files; only at
-					// build time declarations must be unique.
-					// For now, exclude multiple declarations of
-					// functions - keep the one with documentation.
-					//
-					// TODO(gri): Expand this filtering to other
-					//            entities (const, type, vars) if
-					//            multiple declarations are common.
-					if f, isFun := d.(*FuncDecl); isFun {
-						name := nameOf(f)
-						if j, exists := funcs[name]; exists {
-							// function declared already
-							if decls[j] != nil && decls[j].(*FuncDecl).Doc == nil {
-								// existing declaration has no documentation;
-								// ignore the existing declaration
-								decls[j] = nil
-							} else {
-								// ignore the new declaration
-								d = nil
-							}
-							n++ // filtered an entry
-						} else {
-							funcs[name] = i
-						}
-					}
-				}
-				decls[i] = d
-				i++
-			}
-		}
-
-		// Eliminate nil entries from the decls list if entries were
-		// filtered. We do this using a 2nd pass in order to not disturb
-		// the original declaration order in the source (otherwise, this
-		// would also invalidate the monotonically increasing position
-		// info within a single file).
-		if n > 0 {
-			i = 0
-			for _, d := range decls {
-				if d != nil {
-					decls[i] = d
-					i++
-				}
-			}
-			decls = decls[0:i]
-		}
-	}
-
-	// Collect import specs from all package files.
-	var imports []*ImportSpec
-	if mode&FilterImportDuplicates != 0 {
-		seen := make(map[string]bool)
-		for _, filename := range filenames {
-			f := pkg.Files[filename]
-			for _, imp := range f.Imports {
-				if path := imp.Path.Value; !seen[path] {
-					// TODO: consider handling cases where:
-					// - 2 imports exist with the same import path but
-					//   have different local names (one should probably
-					//   keep both of them)
-					// - 2 imports exist but only one has a comment
-					// - 2 imports exist and they both have (possibly
-					//   different) comments
-					imports = append(imports, imp)
-					seen[path] = true
-				}
-			}
-		}
-	} else {
-		// Iterate over filenames for deterministic order.
-		for _, filename := range filenames {
-			f := pkg.Files[filename]
-			imports = append(imports, f.Imports...)
-		}
-	}
-
-	// Collect comments from all package files.
-	var comments []*CommentGroup
-	if mode&FilterUnassociatedComments == 0 {
-		comments = make([]*CommentGroup, ncomments)
-		i := 0
-		for _, filename := range filenames {
-			f := pkg.Files[filename]
-			i += copy(comments[i:], f.Comments)
-		}
-	}
-
-	// TODO(gri) need to compute unresolved identifiers!
-	return &File{doc, pos, NewIdent(pkg.Name), decls, pkg.Scope, imports, nil, comments}
-}
diff --git a/internal/backport/go/ast/filter_test.go b/internal/backport/go/ast/filter_test.go
deleted file mode 100644
index fa5da6a..0000000
--- a/internal/backport/go/ast/filter_test.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// To avoid a cyclic dependency with go/parser, this file is in a separate package.
-
-package ast_test
-
-import (
-	"bytes"
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/format"
-	"golang.org/x/website/internal/backport/go/parser"
-	"golang.org/x/website/internal/backport/go/token"
-	"testing"
-)
-
-const input = `package p
-
-type t1 struct{}
-type t2 struct{}
-
-func f1() {}
-func f1() {}
-func f2() {}
-
-func (*t1) f1() {}
-func (t1) f1() {}
-func (t1) f2() {}
-
-func (t2) f1() {}
-func (t2) f2() {}
-func (x *t2) f2() {}
-`
-
-// Calling ast.MergePackageFiles with ast.FilterFuncDuplicates
-// keeps a duplicate entry with attached documentation in favor
-// of one without, and it favors duplicate entries appearing
-// later in the source over ones appearing earlier. This is why
-// (*t2).f2 is kept and t2.f2 is eliminated in this test case.
-const golden = `package p
-
-type t1 struct{}
-type t2 struct{}
-
-func f1() {}
-func f2() {}
-
-func (t1) f1() {}
-func (t1) f2() {}
-
-func (t2) f1() {}
-
-func (x *t2) f2() {}
-`
-
-func TestFilterDuplicates(t *testing.T) {
-	// parse input
-	fset := token.NewFileSet()
-	file, err := parser.ParseFile(fset, "", input, 0)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	// create package
-	files := map[string]*ast.File{"": file}
-	pkg, err := ast.NewPackage(fset, files, nil, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	// filter
-	merged := ast.MergePackageFiles(pkg, ast.FilterFuncDuplicates)
-
-	// pretty-print
-	var buf bytes.Buffer
-	if err := format.Node(&buf, fset, merged); err != nil {
-		t.Fatal(err)
-	}
-	output := buf.String()
-
-	if output != golden {
-		t.Errorf("incorrect output:\n%s", output)
-	}
-}
diff --git a/internal/backport/go/ast/import.go b/internal/backport/go/ast/import.go
deleted file mode 100644
index 7c70ef6..0000000
--- a/internal/backport/go/ast/import.go
+++ /dev/null
@@ -1,230 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ast
-
-import (
-	"golang.org/x/website/internal/backport/go/token"
-	"sort"
-	"strconv"
-)
-
-// SortImports sorts runs of consecutive import lines in import blocks in f.
-// It also removes duplicate imports when it is possible to do so without data loss.
-func SortImports(fset *token.FileSet, f *File) {
-	for _, d := range f.Decls {
-		d, ok := d.(*GenDecl)
-		if !ok || d.Tok != token.IMPORT {
-			// Not an import declaration, so we're done.
-			// Imports are always first.
-			break
-		}
-
-		if !d.Lparen.IsValid() {
-			// Not a block: sorted by default.
-			continue
-		}
-
-		// Identify and sort runs of specs on successive lines.
-		i := 0
-		specs := d.Specs[:0]
-		for j, s := range d.Specs {
-			if j > i && lineAt(fset, s.Pos()) > 1+lineAt(fset, d.Specs[j-1].End()) {
-				// j begins a new run. End this one.
-				specs = append(specs, sortSpecs(fset, f, d.Specs[i:j])...)
-				i = j
-			}
-		}
-		specs = append(specs, sortSpecs(fset, f, d.Specs[i:])...)
-		d.Specs = specs
-
-		// Deduping can leave a blank line before the rparen; clean that up.
-		if len(d.Specs) > 0 {
-			lastSpec := d.Specs[len(d.Specs)-1]
-			lastLine := lineAt(fset, lastSpec.Pos())
-			rParenLine := lineAt(fset, d.Rparen)
-			for rParenLine > lastLine+1 {
-				rParenLine--
-				fset.File(d.Rparen).MergeLine(rParenLine)
-			}
-		}
-	}
-}
-
-func lineAt(fset *token.FileSet, pos token.Pos) int {
-	return fset.PositionFor(pos, false).Line
-}
-
-func importPath(s Spec) string {
-	t, err := strconv.Unquote(s.(*ImportSpec).Path.Value)
-	if err == nil {
-		return t
-	}
-	return ""
-}
-
-func importName(s Spec) string {
-	n := s.(*ImportSpec).Name
-	if n == nil {
-		return ""
-	}
-	return n.Name
-}
-
-func importComment(s Spec) string {
-	c := s.(*ImportSpec).Comment
-	if c == nil {
-		return ""
-	}
-	return c.Text()
-}
-
-// collapse indicates whether prev may be removed, leaving only next.
-func collapse(prev, next Spec) bool {
-	if importPath(next) != importPath(prev) || importName(next) != importName(prev) {
-		return false
-	}
-	return prev.(*ImportSpec).Comment == nil
-}
-
-type posSpan struct {
-	Start token.Pos
-	End   token.Pos
-}
-
-type cgPos struct {
-	left bool // true if comment is to the left of the spec, false otherwise.
-	cg   *CommentGroup
-}
-
-func sortSpecs(fset *token.FileSet, f *File, specs []Spec) []Spec {
-	// Can't short-circuit here even if specs are already sorted,
-	// since they might yet need deduplication.
-	// A lone import, however, may be safely ignored.
-	if len(specs) <= 1 {
-		return specs
-	}
-
-	// Record positions for specs.
-	pos := make([]posSpan, len(specs))
-	for i, s := range specs {
-		pos[i] = posSpan{s.Pos(), s.End()}
-	}
-
-	// Identify comments in this range.
-	begSpecs := pos[0].Start
-	endSpecs := pos[len(pos)-1].End
-	beg := fset.File(begSpecs).LineStart(lineAt(fset, begSpecs))
-	endLine := lineAt(fset, endSpecs)
-	endFile := fset.File(endSpecs)
-	var end token.Pos
-	if endLine == endFile.LineCount() {
-		end = endSpecs
-	} else {
-		end = endFile.LineStart(endLine + 1) // beginning of next line
-	}
-	first := len(f.Comments)
-	last := -1
-	for i, g := range f.Comments {
-		if g.End() >= end {
-			break
-		}
-		// g.End() < end
-		if beg <= g.Pos() {
-			// comment is within the range [beg, end[ of import declarations
-			if i < first {
-				first = i
-			}
-			if i > last {
-				last = i
-			}
-		}
-	}
-
-	var comments []*CommentGroup
-	if last >= 0 {
-		comments = f.Comments[first : last+1]
-	}
-
-	// Assign each comment to the import spec on the same line.
-	importComments := map[*ImportSpec][]cgPos{}
-	specIndex := 0
-	for _, g := range comments {
-		for specIndex+1 < len(specs) && pos[specIndex+1].Start <= g.Pos() {
-			specIndex++
-		}
-		var left bool
-		// A block comment can appear before the first import spec.
-		if specIndex == 0 && pos[specIndex].Start > g.Pos() {
-			left = true
-		} else if specIndex+1 < len(specs) && // Or it can appear on the left of an import spec.
-			lineAt(fset, pos[specIndex].Start)+1 == lineAt(fset, g.Pos()) {
-			specIndex++
-			left = true
-		}
-		s := specs[specIndex].(*ImportSpec)
-		importComments[s] = append(importComments[s], cgPos{left: left, cg: g})
-	}
-
-	// Sort the import specs by import path.
-	// Remove duplicates, when possible without data loss.
-	// Reassign the import paths to have the same position sequence.
-	// Reassign each comment to the spec on the same line.
-	// Sort the comments by new position.
-	sort.Slice(specs, func(i, j int) bool {
-		ipath := importPath(specs[i])
-		jpath := importPath(specs[j])
-		if ipath != jpath {
-			return ipath < jpath
-		}
-		iname := importName(specs[i])
-		jname := importName(specs[j])
-		if iname != jname {
-			return iname < jname
-		}
-		return importComment(specs[i]) < importComment(specs[j])
-	})
-
-	// Dedup. Thanks to our sorting, we can just consider
-	// adjacent pairs of imports.
-	deduped := specs[:0]
-	for i, s := range specs {
-		if i == len(specs)-1 || !collapse(s, specs[i+1]) {
-			deduped = append(deduped, s)
-		} else {
-			p := s.Pos()
-			fset.File(p).MergeLine(lineAt(fset, p))
-		}
-	}
-	specs = deduped
-
-	// Fix up comment positions
-	for i, s := range specs {
-		s := s.(*ImportSpec)
-		if s.Name != nil {
-			s.Name.NamePos = pos[i].Start
-		}
-		s.Path.ValuePos = pos[i].Start
-		s.EndPos = pos[i].End
-		for _, g := range importComments[s] {
-			for _, c := range g.cg.List {
-				if g.left {
-					c.Slash = pos[i].Start - 1
-				} else {
-					// An import spec can have both block comment and a line comment
-					// to its right. In that case, both of them will have the same pos.
-					// But while formatting the AST, the line comment gets moved to
-					// after the block comment.
-					c.Slash = pos[i].End
-				}
-			}
-		}
-	}
-
-	sort.Slice(comments, func(i, j int) bool {
-		return comments[i].Pos() < comments[j].Pos()
-	})
-
-	return specs
-}
diff --git a/internal/backport/go/ast/issues_test.go b/internal/backport/go/ast/issues_test.go
deleted file mode 100644
index 3fb3205..0000000
--- a/internal/backport/go/ast/issues_test.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ast_test
-
-import (
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/parser"
-	"golang.org/x/website/internal/backport/go/token"
-	"testing"
-)
-
-func TestIssue33649(t *testing.T) {
-	for _, src := range []string{
-		`package p; func _()`,
-		`package p; func _() {`,
-		`package p; func _() { _ = 0`,
-		`package p; func _() { _ = 0 }`,
-	} {
-		fset := token.NewFileSet()
-		f, _ := parser.ParseFile(fset, "", src, parser.AllErrors)
-		if f == nil {
-			panic("invalid test setup: parser didn't return an AST")
-		}
-
-		// find corresponding token.File
-		var tf *token.File
-		fset.Iterate(func(f *token.File) bool {
-			tf = f
-			return true
-		})
-		tfEnd := tf.Base() + tf.Size()
-
-		fd := f.Decls[len(f.Decls)-1].(*ast.FuncDecl)
-		fdEnd := int(fd.End())
-
-		if fdEnd != tfEnd {
-			t.Errorf("%q: got fdEnd = %d; want %d (base = %d, size = %d)", src, fdEnd, tfEnd, tf.Base(), tf.Size())
-		}
-	}
-}
diff --git a/internal/backport/go/ast/print.go b/internal/backport/go/ast/print.go
deleted file mode 100644
index 16ce365..0000000
--- a/internal/backport/go/ast/print.go
+++ /dev/null
@@ -1,255 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file contains printing support for ASTs.
-
-package ast
-
-import (
-	"fmt"
-	"io"
-	"os"
-	"reflect"
-
-	"golang.org/x/website/internal/backport/go/token"
-)
-
-// A FieldFilter may be provided to Fprint to control the output.
-type FieldFilter func(name string, value reflect.Value) bool
-
-// NotNilFilter returns true for field values that are not nil;
-// it returns false otherwise.
-func NotNilFilter(_ string, v reflect.Value) bool {
-	switch v.Kind() {
-	case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
-		return !v.IsNil()
-	}
-	return true
-}
-
-// Fprint prints the (sub-)tree starting at AST node x to w.
-// If fset != nil, position information is interpreted relative
-// to that file set. Otherwise positions are printed as integer
-// values (file set specific offsets).
-//
-// A non-nil FieldFilter f may be provided to control the output:
-// struct fields for which f(fieldname, fieldvalue) is true are
-// printed; all others are filtered from the output. Unexported
-// struct fields are never printed.
-func Fprint(w io.Writer, fset *token.FileSet, x interface{}, f FieldFilter) error {
-	return fprint(w, fset, x, f)
-}
-
-func fprint(w io.Writer, fset *token.FileSet, x interface{}, f FieldFilter) (err error) {
-	// setup printer
-	p := printer{
-		output: w,
-		fset:   fset,
-		filter: f,
-		ptrmap: make(map[interface{}]int),
-		last:   '\n', // force printing of line number on first line
-	}
-
-	// install error handler
-	defer func() {
-		if e := recover(); e != nil {
-			err = e.(localError).err // re-panics if it's not a localError
-		}
-	}()
-
-	// print x
-	if x == nil {
-		p.printf("nil\n")
-		return
-	}
-	p.print(reflect.ValueOf(x))
-	p.printf("\n")
-
-	return
-}
-
-// Print prints x to standard output, skipping nil fields.
-// Print(fset, x) is the same as Fprint(os.Stdout, fset, x, NotNilFilter).
-func Print(fset *token.FileSet, x interface{}) error {
-	return Fprint(os.Stdout, fset, x, NotNilFilter)
-}
-
-type printer struct {
-	output io.Writer
-	fset   *token.FileSet
-	filter FieldFilter
-	ptrmap map[interface{}]int // *T -> line number
-	indent int                 // current indentation level
-	last   byte                // the last byte processed by Write
-	line   int                 // current line number
-}
-
-var indent = []byte(".  ")
-
-func (p *printer) Write(data []byte) (n int, err error) {
-	var m int
-	for i, b := range data {
-		// invariant: data[0:n] has been written
-		if b == '\n' {
-			m, err = p.output.Write(data[n : i+1])
-			n += m
-			if err != nil {
-				return
-			}
-			p.line++
-		} else if p.last == '\n' {
-			_, err = fmt.Fprintf(p.output, "%6d  ", p.line)
-			if err != nil {
-				return
-			}
-			for j := p.indent; j > 0; j-- {
-				_, err = p.output.Write(indent)
-				if err != nil {
-					return
-				}
-			}
-		}
-		p.last = b
-	}
-	if len(data) > n {
-		m, err = p.output.Write(data[n:])
-		n += m
-	}
-	return
-}
-
-// localError wraps locally caught errors so we can distinguish
-// them from genuine panics which we don't want to return as errors.
-type localError struct {
-	err error
-}
-
-// printf is a convenience wrapper that takes care of print errors.
-func (p *printer) printf(format string, args ...interface{}) {
-	if _, err := fmt.Fprintf(p, format, args...); err != nil {
-		panic(localError{err})
-	}
-}
-
-// Implementation note: Print is written for AST nodes but could be
-// used to print arbitrary data structures; such a version should
-// probably be in a different package.
-//
-// Note: This code detects (some) cycles created via pointers but
-// not cycles that are created via slices or maps containing the
-// same slice or map. Code for general data structures probably
-// should catch those as well.
-
-func (p *printer) print(x reflect.Value) {
-	if !NotNilFilter("", x) {
-		p.printf("nil")
-		return
-	}
-
-	switch x.Kind() {
-	case reflect.Interface:
-		p.print(x.Elem())
-
-	case reflect.Map:
-		p.printf("%s (len = %d) {", x.Type(), x.Len())
-		if x.Len() > 0 {
-			p.indent++
-			p.printf("\n")
-			for _, key := range x.MapKeys() {
-				p.print(key)
-				p.printf(": ")
-				p.print(x.MapIndex(key))
-				p.printf("\n")
-			}
-			p.indent--
-		}
-		p.printf("}")
-
-	case reflect.Ptr:
-		p.printf("*")
-		// type-checked ASTs may contain cycles - use ptrmap
-		// to keep track of objects that have been printed
-		// already and print the respective line number instead
-		ptr := x.Interface()
-		if line, exists := p.ptrmap[ptr]; exists {
-			p.printf("(obj @ %d)", line)
-		} else {
-			p.ptrmap[ptr] = p.line
-			p.print(x.Elem())
-		}
-
-	case reflect.Array:
-		p.printf("%s {", x.Type())
-		if x.Len() > 0 {
-			p.indent++
-			p.printf("\n")
-			for i, n := 0, x.Len(); i < n; i++ {
-				p.printf("%d: ", i)
-				p.print(x.Index(i))
-				p.printf("\n")
-			}
-			p.indent--
-		}
-		p.printf("}")
-
-	case reflect.Slice:
-		if s, ok := x.Interface().([]byte); ok {
-			p.printf("%#q", s)
-			return
-		}
-		p.printf("%s (len = %d) {", x.Type(), x.Len())
-		if x.Len() > 0 {
-			p.indent++
-			p.printf("\n")
-			for i, n := 0, x.Len(); i < n; i++ {
-				p.printf("%d: ", i)
-				p.print(x.Index(i))
-				p.printf("\n")
-			}
-			p.indent--
-		}
-		p.printf("}")
-
-	case reflect.Struct:
-		t := x.Type()
-		p.printf("%s {", t)
-		p.indent++
-		first := true
-		for i, n := 0, t.NumField(); i < n; i++ {
-			// exclude non-exported fields because their
-			// values cannot be accessed via reflection
-			if name := t.Field(i).Name; IsExported(name) {
-				value := x.Field(i)
-				if p.filter == nil || p.filter(name, value) {
-					if first {
-						p.printf("\n")
-						first = false
-					}
-					p.printf("%s: ", name)
-					p.print(value)
-					p.printf("\n")
-				}
-			}
-		}
-		p.indent--
-		p.printf("}")
-
-	default:
-		v := x.Interface()
-		switch v := v.(type) {
-		case string:
-			// print strings in quotes
-			p.printf("%q", v)
-			return
-		case token.Pos:
-			// position values can be printed nicely if we have a file set
-			if p.fset != nil {
-				p.printf("%s", p.fset.Position(v))
-				return
-			}
-		}
-		// default
-		p.printf("%v", v)
-	}
-}
diff --git a/internal/backport/go/ast/print_test.go b/internal/backport/go/ast/print_test.go
deleted file mode 100644
index 210f164..0000000
--- a/internal/backport/go/ast/print_test.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ast
-
-import (
-	"bytes"
-	"strings"
-	"testing"
-)
-
-var tests = []struct {
-	x interface{} // x is printed as s
-	s string
-}{
-	// basic types
-	{nil, "0  nil"},
-	{true, "0  true"},
-	{42, "0  42"},
-	{3.14, "0  3.14"},
-	{1 + 2.718i, "0  (1+2.718i)"},
-	{"foobar", "0  \"foobar\""},
-
-	// maps
-	{map[Expr]string{}, `0  map[ast.Expr]string (len = 0) {}`},
-	{map[string]int{"a": 1},
-		`0  map[string]int (len = 1) {
-		1  .  "a": 1
-		2  }`},
-
-	// pointers
-	{new(int), "0  *0"},
-
-	// arrays
-	{[0]int{}, `0  [0]int {}`},
-	{[3]int{1, 2, 3},
-		`0  [3]int {
-		1  .  0: 1
-		2  .  1: 2
-		3  .  2: 3
-		4  }`},
-	{[...]int{42},
-		`0  [1]int {
-		1  .  0: 42
-		2  }`},
-
-	// slices
-	{[]int{}, `0  []int (len = 0) {}`},
-	{[]int{1, 2, 3},
-		`0  []int (len = 3) {
-		1  .  0: 1
-		2  .  1: 2
-		3  .  2: 3
-		4  }`},
-
-	// structs
-	{struct{}{}, `0  struct {} {}`},
-	{struct{ x int }{007}, `0  struct { x int } {}`},
-	{struct{ X, y int }{42, 991},
-		`0  struct { X int; y int } {
-		1  .  X: 42
-		2  }`},
-	{struct{ X, Y int }{42, 991},
-		`0  struct { X int; Y int } {
-		1  .  X: 42
-		2  .  Y: 991
-		3  }`},
-}
-
-// Split s into lines, trim whitespace from all lines, and return
-// the concatenated non-empty lines.
-func trim(s string) string {
-	lines := strings.Split(s, "\n")
-	i := 0
-	for _, line := range lines {
-		line = strings.TrimSpace(line)
-		if line != "" {
-			lines[i] = line
-			i++
-		}
-	}
-	return strings.Join(lines[0:i], "\n")
-}
-
-func TestPrint(t *testing.T) {
-	var buf bytes.Buffer
-	for _, test := range tests {
-		buf.Reset()
-		if err := Fprint(&buf, nil, test.x, nil); err != nil {
-			t.Errorf("Fprint failed: %s", err)
-		}
-		if s, ts := trim(buf.String()), trim(test.s); s != ts {
-			t.Errorf("got:\n%s\nexpected:\n%s\n", s, ts)
-		}
-	}
-}
diff --git a/internal/backport/go/ast/resolve.go b/internal/backport/go/ast/resolve.go
deleted file mode 100644
index 4d3d3fd..0000000
--- a/internal/backport/go/ast/resolve.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements NewPackage.
-
-package ast
-
-import (
-	"fmt"
-	"golang.org/x/website/internal/backport/go/scanner"
-	"golang.org/x/website/internal/backport/go/token"
-	"strconv"
-)
-
-type pkgBuilder struct {
-	fset   *token.FileSet
-	errors scanner.ErrorList
-}
-
-func (p *pkgBuilder) error(pos token.Pos, msg string) {
-	p.errors.Add(p.fset.Position(pos), msg)
-}
-
-func (p *pkgBuilder) errorf(pos token.Pos, format string, args ...interface{}) {
-	p.error(pos, fmt.Sprintf(format, args...))
-}
-
-func (p *pkgBuilder) declare(scope, altScope *Scope, obj *Object) {
-	alt := scope.Insert(obj)
-	if alt == nil && altScope != nil {
-		// see if there is a conflicting declaration in altScope
-		alt = altScope.Lookup(obj.Name)
-	}
-	if alt != nil {
-		prevDecl := ""
-		if pos := alt.Pos(); pos.IsValid() {
-			prevDecl = fmt.Sprintf("\n\tprevious declaration at %s", p.fset.Position(pos))
-		}
-		p.error(obj.Pos(), fmt.Sprintf("%s redeclared in this block%s", obj.Name, prevDecl))
-	}
-}
-
-func resolve(scope *Scope, ident *Ident) bool {
-	for ; scope != nil; scope = scope.Outer {
-		if obj := scope.Lookup(ident.Name); obj != nil {
-			ident.Obj = obj
-			return true
-		}
-	}
-	return false
-}
-
-// An Importer resolves import paths to package Objects.
-// The imports map records the packages already imported,
-// indexed by package id (canonical import path).
-// An Importer must determine the canonical import path and
-// check the map to see if it is already present in the imports map.
-// If so, the Importer can return the map entry. Otherwise, the
-// Importer should load the package data for the given path into
-// a new *Object (pkg), record pkg in the imports map, and then
-// return pkg.
-type Importer func(imports map[string]*Object, path string) (pkg *Object, err error)
-
-// NewPackage creates a new Package node from a set of File nodes. It resolves
-// unresolved identifiers across files and updates each file's Unresolved list
-// accordingly. If a non-nil importer and universe scope are provided, they are
-// used to resolve identifiers not declared in any of the package files. Any
-// remaining unresolved identifiers are reported as undeclared. If the files
-// belong to different packages, one package name is selected and files with
-// different package names are reported and then ignored.
-// The result is a package node and a scanner.ErrorList if there were errors.
-func NewPackage(fset *token.FileSet, files map[string]*File, importer Importer, universe *Scope) (*Package, error) {
-	var p pkgBuilder
-	p.fset = fset
-
-	// complete package scope
-	pkgName := ""
-	pkgScope := NewScope(universe)
-	for _, file := range files {
-		// package names must match
-		switch name := file.Name.Name; {
-		case pkgName == "":
-			pkgName = name
-		case name != pkgName:
-			p.errorf(file.Package, "package %s; expected %s", name, pkgName)
-			continue // ignore this file
-		}
-
-		// collect top-level file objects in package scope
-		for _, obj := range file.Scope.Objects {
-			p.declare(pkgScope, nil, obj)
-		}
-	}
-
-	// package global mapping of imported package ids to package objects
-	imports := make(map[string]*Object)
-
-	// complete file scopes with imports and resolve identifiers
-	for _, file := range files {
-		// ignore file if it belongs to a different package
-		// (error has already been reported)
-		if file.Name.Name != pkgName {
-			continue
-		}
-
-		// build file scope by processing all imports
-		importErrors := false
-		fileScope := NewScope(pkgScope)
-		for _, spec := range file.Imports {
-			if importer == nil {
-				importErrors = true
-				continue
-			}
-			path, _ := strconv.Unquote(spec.Path.Value)
-			pkg, err := importer(imports, path)
-			if err != nil {
-				p.errorf(spec.Path.Pos(), "could not import %s (%s)", path, err)
-				importErrors = true
-				continue
-			}
-			// TODO(gri) If a local package name != "." is provided,
-			// global identifier resolution could proceed even if the
-			// import failed. Consider adjusting the logic here a bit.
-
-			// local name overrides imported package name
-			name := pkg.Name
-			if spec.Name != nil {
-				name = spec.Name.Name
-			}
-
-			// add import to file scope
-			if name == "." {
-				// merge imported scope with file scope
-				for _, obj := range pkg.Data.(*Scope).Objects {
-					p.declare(fileScope, pkgScope, obj)
-				}
-			} else if name != "_" {
-				// declare imported package object in file scope
-				// (do not re-use pkg in the file scope but create
-				// a new object instead; the Decl field is different
-				// for different files)
-				obj := NewObj(Pkg, name)
-				obj.Decl = spec
-				obj.Data = pkg.Data
-				p.declare(fileScope, pkgScope, obj)
-			}
-		}
-
-		// resolve identifiers
-		if importErrors {
-			// don't use the universe scope without correct imports
-			// (objects in the universe may be shadowed by imports;
-			// with missing imports, identifiers might get resolved
-			// incorrectly to universe objects)
-			pkgScope.Outer = nil
-		}
-		i := 0
-		for _, ident := range file.Unresolved {
-			if !resolve(fileScope, ident) {
-				p.errorf(ident.Pos(), "undeclared name: %s", ident.Name)
-				file.Unresolved[i] = ident
-				i++
-			}
-
-		}
-		file.Unresolved = file.Unresolved[0:i]
-		pkgScope.Outer = universe // reset universe scope
-	}
-
-	p.errors.Sort()
-	return &Package{pkgName, pkgScope, imports, files}, p.errors.Err()
-}
diff --git a/internal/backport/go/ast/scope.go b/internal/backport/go/ast/scope.go
deleted file mode 100644
index 165c935..0000000
--- a/internal/backport/go/ast/scope.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements scopes and the objects they contain.
-
-package ast
-
-import (
-	"bytes"
-	"fmt"
-	"golang.org/x/website/internal/backport/go/token"
-)
-
-// A Scope maintains the set of named language entities declared
-// in the scope and a link to the immediately surrounding (outer)
-// scope.
-type Scope struct {
-	Outer   *Scope
-	Objects map[string]*Object
-}
-
-// NewScope creates a new scope nested in the outer scope.
-func NewScope(outer *Scope) *Scope {
-	const n = 4 // initial scope capacity
-	return &Scope{outer, make(map[string]*Object, n)}
-}
-
-// Lookup returns the object with the given name if it is
-// found in scope s, otherwise it returns nil. Outer scopes
-// are ignored.
-func (s *Scope) Lookup(name string) *Object {
-	return s.Objects[name]
-}
-
-// Insert attempts to insert a named object obj into the scope s.
-// If the scope already contains an object alt with the same name,
-// Insert leaves the scope unchanged and returns alt. Otherwise
-// it inserts obj and returns nil.
-func (s *Scope) Insert(obj *Object) (alt *Object) {
-	if alt = s.Objects[obj.Name]; alt == nil {
-		s.Objects[obj.Name] = obj
-	}
-	return
-}
-
-// Debugging support
-func (s *Scope) String() string {
-	var buf bytes.Buffer
-	fmt.Fprintf(&buf, "scope %p {", s)
-	if s != nil && len(s.Objects) > 0 {
-		fmt.Fprintln(&buf)
-		for _, obj := range s.Objects {
-			fmt.Fprintf(&buf, "\t%s %s\n", obj.Kind, obj.Name)
-		}
-	}
-	fmt.Fprintf(&buf, "}\n")
-	return buf.String()
-}
-
-// ----------------------------------------------------------------------------
-// Objects
-
-// An Object describes a named language entity such as a package,
-// constant, type, variable, function (incl. methods), or label.
-//
-// The Data fields contains object-specific data:
-//
-//	Kind    Data type         Data value
-//	Pkg     *Scope            package scope
-//	Con     int               iota for the respective declaration
-type Object struct {
-	Kind ObjKind
-	Name string      // declared name
-	Decl interface{} // corresponding Field, XxxSpec, FuncDecl, LabeledStmt, AssignStmt, Scope; or nil
-	Data interface{} // object-specific data; or nil
-	Type interface{} // placeholder for type information; may be nil
-}
-
-// NewObj creates a new object of a given kind and name.
-func NewObj(kind ObjKind, name string) *Object {
-	return &Object{Kind: kind, Name: name}
-}
-
-// Pos computes the source position of the declaration of an object name.
-// The result may be an invalid position if it cannot be computed
-// (obj.Decl may be nil or not correct).
-func (obj *Object) Pos() token.Pos {
-	name := obj.Name
-	switch d := obj.Decl.(type) {
-	case *Field:
-		for _, n := range d.Names {
-			if n.Name == name {
-				return n.Pos()
-			}
-		}
-	case *ImportSpec:
-		if d.Name != nil && d.Name.Name == name {
-			return d.Name.Pos()
-		}
-		return d.Path.Pos()
-	case *ValueSpec:
-		for _, n := range d.Names {
-			if n.Name == name {
-				return n.Pos()
-			}
-		}
-	case *TypeSpec:
-		if d.Name.Name == name {
-			return d.Name.Pos()
-		}
-	case *FuncDecl:
-		if d.Name.Name == name {
-			return d.Name.Pos()
-		}
-	case *LabeledStmt:
-		if d.Label.Name == name {
-			return d.Label.Pos()
-		}
-	case *AssignStmt:
-		for _, x := range d.Lhs {
-			if ident, isIdent := x.(*Ident); isIdent && ident.Name == name {
-				return ident.Pos()
-			}
-		}
-	case *Scope:
-		// predeclared object - nothing to do for now
-	}
-	return token.NoPos
-}
-
-// ObjKind describes what an object represents.
-type ObjKind int
-
-// The list of possible Object kinds.
-const (
-	Bad ObjKind = iota // for error handling
-	Pkg                // package
-	Con                // constant
-	Typ                // type
-	Var                // variable
-	Fun                // function or method
-	Lbl                // label
-)
-
-var objKindStrings = [...]string{
-	Bad: "bad",
-	Pkg: "package",
-	Con: "const",
-	Typ: "type",
-	Var: "var",
-	Fun: "func",
-	Lbl: "label",
-}
-
-func (kind ObjKind) String() string { return objKindStrings[kind] }
diff --git a/internal/backport/go/ast/walk.go b/internal/backport/go/ast/walk.go
deleted file mode 100644
index a293c99..0000000
--- a/internal/backport/go/ast/walk.go
+++ /dev/null
@@ -1,398 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ast
-
-import "fmt"
-
-// A Visitor's Visit method is invoked for each node encountered by Walk.
-// If the result visitor w is not nil, Walk visits each of the children
-// of node with the visitor w, followed by a call of w.Visit(nil).
-type Visitor interface {
-	Visit(node Node) (w Visitor)
-}
-
-// Helper functions for common node lists. They may be empty.
-
-func walkIdentList(v Visitor, list []*Ident) {
-	for _, x := range list {
-		Walk(v, x)
-	}
-}
-
-func walkExprList(v Visitor, list []Expr) {
-	for _, x := range list {
-		Walk(v, x)
-	}
-}
-
-func walkStmtList(v Visitor, list []Stmt) {
-	for _, x := range list {
-		Walk(v, x)
-	}
-}
-
-func walkDeclList(v Visitor, list []Decl) {
-	for _, x := range list {
-		Walk(v, x)
-	}
-}
-
-// TODO(gri): Investigate if providing a closure to Walk leads to
-// simpler use (and may help eliminate Inspect in turn).
-
-// Walk traverses an AST in depth-first order: It starts by calling
-// v.Visit(node); node must not be nil. If the visitor w returned by
-// v.Visit(node) is not nil, Walk is invoked recursively with visitor
-// w for each of the non-nil children of node, followed by a call of
-// w.Visit(nil).
-func Walk(v Visitor, node Node) {
-	if v = v.Visit(node); v == nil {
-		return
-	}
-
-	// walk children
-	// (the order of the cases matches the order
-	// of the corresponding node types in ast.go)
-	switch n := node.(type) {
-	// Comments and fields
-	case *Comment:
-		// nothing to do
-
-	case *CommentGroup:
-		for _, c := range n.List {
-			Walk(v, c)
-		}
-
-	case *Field:
-		if n.Doc != nil {
-			Walk(v, n.Doc)
-		}
-		walkIdentList(v, n.Names)
-		if n.Type != nil {
-			Walk(v, n.Type)
-		}
-		if n.Tag != nil {
-			Walk(v, n.Tag)
-		}
-		if n.Comment != nil {
-			Walk(v, n.Comment)
-		}
-
-	case *FieldList:
-		for _, f := range n.List {
-			Walk(v, f)
-		}
-
-	// Expressions
-	case *BadExpr, *Ident, *BasicLit:
-		// nothing to do
-
-	case *Ellipsis:
-		if n.Elt != nil {
-			Walk(v, n.Elt)
-		}
-
-	case *FuncLit:
-		Walk(v, n.Type)
-		Walk(v, n.Body)
-
-	case *CompositeLit:
-		if n.Type != nil {
-			Walk(v, n.Type)
-		}
-		walkExprList(v, n.Elts)
-
-	case *ParenExpr:
-		Walk(v, n.X)
-
-	case *SelectorExpr:
-		Walk(v, n.X)
-		Walk(v, n.Sel)
-
-	case *IndexExpr:
-		Walk(v, n.X)
-		Walk(v, n.Index)
-
-	case *IndexListExpr:
-		Walk(v, n.X)
-		for _, index := range n.Indices {
-			Walk(v, index)
-		}
-
-	case *SliceExpr:
-		Walk(v, n.X)
-		if n.Low != nil {
-			Walk(v, n.Low)
-		}
-		if n.High != nil {
-			Walk(v, n.High)
-		}
-		if n.Max != nil {
-			Walk(v, n.Max)
-		}
-
-	case *TypeAssertExpr:
-		Walk(v, n.X)
-		if n.Type != nil {
-			Walk(v, n.Type)
-		}
-
-	case *CallExpr:
-		Walk(v, n.Fun)
-		walkExprList(v, n.Args)
-
-	case *StarExpr:
-		Walk(v, n.X)
-
-	case *UnaryExpr:
-		Walk(v, n.X)
-
-	case *BinaryExpr:
-		Walk(v, n.X)
-		Walk(v, n.Y)
-
-	case *KeyValueExpr:
-		Walk(v, n.Key)
-		Walk(v, n.Value)
-
-	// Types
-	case *ArrayType:
-		if n.Len != nil {
-			Walk(v, n.Len)
-		}
-		Walk(v, n.Elt)
-
-	case *StructType:
-		Walk(v, n.Fields)
-
-	case *FuncType:
-		if n.TypeParams != nil {
-			Walk(v, n.TypeParams)
-		}
-		if n.Params != nil {
-			Walk(v, n.Params)
-		}
-		if n.Results != nil {
-			Walk(v, n.Results)
-		}
-
-	case *InterfaceType:
-		Walk(v, n.Methods)
-
-	case *MapType:
-		Walk(v, n.Key)
-		Walk(v, n.Value)
-
-	case *ChanType:
-		Walk(v, n.Value)
-
-	// Statements
-	case *BadStmt:
-		// nothing to do
-
-	case *DeclStmt:
-		Walk(v, n.Decl)
-
-	case *EmptyStmt:
-		// nothing to do
-
-	case *LabeledStmt:
-		Walk(v, n.Label)
-		Walk(v, n.Stmt)
-
-	case *ExprStmt:
-		Walk(v, n.X)
-
-	case *SendStmt:
-		Walk(v, n.Chan)
-		Walk(v, n.Value)
-
-	case *IncDecStmt:
-		Walk(v, n.X)
-
-	case *AssignStmt:
-		walkExprList(v, n.Lhs)
-		walkExprList(v, n.Rhs)
-
-	case *GoStmt:
-		Walk(v, n.Call)
-
-	case *DeferStmt:
-		Walk(v, n.Call)
-
-	case *ReturnStmt:
-		walkExprList(v, n.Results)
-
-	case *BranchStmt:
-		if n.Label != nil {
-			Walk(v, n.Label)
-		}
-
-	case *BlockStmt:
-		walkStmtList(v, n.List)
-
-	case *IfStmt:
-		if n.Init != nil {
-			Walk(v, n.Init)
-		}
-		Walk(v, n.Cond)
-		Walk(v, n.Body)
-		if n.Else != nil {
-			Walk(v, n.Else)
-		}
-
-	case *CaseClause:
-		walkExprList(v, n.List)
-		walkStmtList(v, n.Body)
-
-	case *SwitchStmt:
-		if n.Init != nil {
-			Walk(v, n.Init)
-		}
-		if n.Tag != nil {
-			Walk(v, n.Tag)
-		}
-		Walk(v, n.Body)
-
-	case *TypeSwitchStmt:
-		if n.Init != nil {
-			Walk(v, n.Init)
-		}
-		Walk(v, n.Assign)
-		Walk(v, n.Body)
-
-	case *CommClause:
-		if n.Comm != nil {
-			Walk(v, n.Comm)
-		}
-		walkStmtList(v, n.Body)
-
-	case *SelectStmt:
-		Walk(v, n.Body)
-
-	case *ForStmt:
-		if n.Init != nil {
-			Walk(v, n.Init)
-		}
-		if n.Cond != nil {
-			Walk(v, n.Cond)
-		}
-		if n.Post != nil {
-			Walk(v, n.Post)
-		}
-		Walk(v, n.Body)
-
-	case *RangeStmt:
-		if n.Key != nil {
-			Walk(v, n.Key)
-		}
-		if n.Value != nil {
-			Walk(v, n.Value)
-		}
-		Walk(v, n.X)
-		Walk(v, n.Body)
-
-	// Declarations
-	case *ImportSpec:
-		if n.Doc != nil {
-			Walk(v, n.Doc)
-		}
-		if n.Name != nil {
-			Walk(v, n.Name)
-		}
-		Walk(v, n.Path)
-		if n.Comment != nil {
-			Walk(v, n.Comment)
-		}
-
-	case *ValueSpec:
-		if n.Doc != nil {
-			Walk(v, n.Doc)
-		}
-		walkIdentList(v, n.Names)
-		if n.Type != nil {
-			Walk(v, n.Type)
-		}
-		walkExprList(v, n.Values)
-		if n.Comment != nil {
-			Walk(v, n.Comment)
-		}
-
-	case *TypeSpec:
-		if n.Doc != nil {
-			Walk(v, n.Doc)
-		}
-		Walk(v, n.Name)
-		if n.TypeParams != nil {
-			Walk(v, n.TypeParams)
-		}
-		Walk(v, n.Type)
-		if n.Comment != nil {
-			Walk(v, n.Comment)
-		}
-
-	case *BadDecl:
-		// nothing to do
-
-	case *GenDecl:
-		if n.Doc != nil {
-			Walk(v, n.Doc)
-		}
-		for _, s := range n.Specs {
-			Walk(v, s)
-		}
-
-	case *FuncDecl:
-		if n.Doc != nil {
-			Walk(v, n.Doc)
-		}
-		if n.Recv != nil {
-			Walk(v, n.Recv)
-		}
-		Walk(v, n.Name)
-		Walk(v, n.Type)
-		if n.Body != nil {
-			Walk(v, n.Body)
-		}
-
-	// Files and packages
-	case *File:
-		if n.Doc != nil {
-			Walk(v, n.Doc)
-		}
-		Walk(v, n.Name)
-		walkDeclList(v, n.Decls)
-		// don't walk n.Comments - they have been
-		// visited already through the individual
-		// nodes
-
-	case *Package:
-		for _, f := range n.Files {
-			Walk(v, f)
-		}
-
-	default:
-		panic(fmt.Sprintf("ast.Walk: unexpected node type %T", n))
-	}
-
-	v.Visit(nil)
-}
-
-type inspector func(Node) bool
-
-func (f inspector) Visit(node Node) Visitor {
-	if f(node) {
-		return f
-	}
-	return nil
-}
-
-// Inspect traverses an AST in depth-first order: It starts by calling
-// f(node); node must not be nil. If f returns true, Inspect invokes f
-// recursively for each of the non-nil children of node, followed by a
-// call of f(nil).
-func Inspect(node Node, f func(Node) bool) {
-	Walk(inspector(f), node)
-}
diff --git a/internal/backport/go/build/build.go b/internal/backport/go/build/build.go
deleted file mode 100644
index 01aab3c..0000000
--- a/internal/backport/go/build/build.go
+++ /dev/null
@@ -1,1970 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package build
-
-import (
-	"bytes"
-	"errors"
-	"fmt"
-	"go/build"
-	"go/build/constraint"
-	"golang.org/x/website/internal/backport/go/doc"
-	"io"
-	"io/fs"
-	"io/ioutil"
-	"os"
-	"os/exec"
-	pathpkg "path"
-	"path/filepath"
-	"runtime"
-	"sort"
-	"strconv"
-	"strings"
-	"unicode"
-	"unicode/utf8"
-
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/token"
-)
-
-// A Context specifies the supporting context for a build.
-type Context struct {
-	GOARCH string // target architecture
-	GOOS   string // target operating system
-	GOROOT string // Go root
-	GOPATH string // Go path
-
-	// Dir is the caller's working directory, or the empty string to use
-	// the current directory of the running process. In module mode, this is used
-	// to locate the main module.
-	//
-	// If Dir is non-empty, directories passed to Import and ImportDir must
-	// be absolute.
-	Dir string
-
-	CgoEnabled  bool   // whether cgo files are included
-	UseAllFiles bool   // use files regardless of +build lines, file names
-	Compiler    string // compiler to assume when computing target paths
-
-	// The build, tool, and release tags specify build constraints
-	// that should be considered satisfied when processing +build lines.
-	// Clients creating a new context may customize BuildTags, which
-	// defaults to empty, but it is usually an error to customize ToolTags or ReleaseTags.
-	// ToolTags defaults to build tags appropriate to the current Go toolchain configuration.
-	// ReleaseTags defaults to the list of Go releases the current release is compatible with.
-	// BuildTags is not set for the Default build Context.
-	// In addition to the BuildTags, ToolTags, and ReleaseTags, build constraints
-	// consider the values of GOARCH and GOOS as satisfied tags.
-	// The last element in ReleaseTags is assumed to be the current release.
-	BuildTags   []string
-	ToolTags    []string
-	ReleaseTags []string
-
-	// The install suffix specifies a suffix to use in the name of the installation
-	// directory. By default it is empty, but custom builds that need to keep
-	// their outputs separate can set InstallSuffix to do so. For example, when
-	// using the race detector, the go command uses InstallSuffix = "race", so
-	// that on a Linux/386 system, packages are written to a directory named
-	// "linux_386_race" instead of the usual "linux_386".
-	InstallSuffix string
-
-	// By default, Import uses the operating system's file system calls
-	// to read directories and files. To read from other sources,
-	// callers can set the following functions. They all have default
-	// behaviors that use the local file system, so clients need only set
-	// the functions whose behaviors they wish to change.
-
-	// JoinPath joins the sequence of path fragments into a single path.
-	// If JoinPath is nil, Import uses filepath.Join.
-	JoinPath func(elem ...string) string
-
-	// SplitPathList splits the path list into a slice of individual paths.
-	// If SplitPathList is nil, Import uses filepath.SplitList.
-	SplitPathList func(list string) []string
-
-	// IsAbsPath reports whether path is an absolute path.
-	// If IsAbsPath is nil, Import uses filepath.IsAbs.
-	IsAbsPath func(path string) bool
-
-	// IsDir reports whether the path names a directory.
-	// If IsDir is nil, Import calls os.Stat and uses the result's IsDir method.
-	IsDir func(path string) bool
-
-	// HasSubdir reports whether dir is lexically a subdirectory of
-	// root, perhaps multiple levels below. It does not try to check
-	// whether dir exists.
-	// If so, HasSubdir sets rel to a slash-separated path that
-	// can be joined to root to produce a path equivalent to dir.
-	// If HasSubdir is nil, Import uses an implementation built on
-	// filepath.EvalSymlinks.
-	HasSubdir func(root, dir string) (rel string, ok bool)
-
-	// ReadDir returns a slice of fs.FileInfo, sorted by Name,
-	// describing the content of the named directory.
-	// If ReadDir is nil, Import uses ioutil.ReadDir.
-	ReadDir func(dir string) ([]fs.FileInfo, error)
-
-	// OpenFile opens a file (not a directory) for reading.
-	// If OpenFile is nil, Import uses os.Open.
-	OpenFile func(path string) (io.ReadCloser, error)
-}
-
-// joinPath calls ctxt.JoinPath (if not nil) or else filepath.Join.
-func (ctxt *Context) joinPath(elem ...string) string {
-	if f := ctxt.JoinPath; f != nil {
-		return f(elem...)
-	}
-	return filepath.Join(elem...)
-}
-
-// splitPathList calls ctxt.SplitPathList (if not nil) or else filepath.SplitList.
-func (ctxt *Context) splitPathList(s string) []string {
-	if f := ctxt.SplitPathList; f != nil {
-		return f(s)
-	}
-	return filepath.SplitList(s)
-}
-
-// isAbsPath calls ctxt.IsAbsPath (if not nil) or else filepath.IsAbs.
-func (ctxt *Context) isAbsPath(path string) bool {
-	if f := ctxt.IsAbsPath; f != nil {
-		return f(path)
-	}
-	return filepath.IsAbs(path)
-}
-
-// isDir calls ctxt.IsDir (if not nil) or else uses os.Stat.
-func (ctxt *Context) isDir(path string) bool {
-	if f := ctxt.IsDir; f != nil {
-		return f(path)
-	}
-	fi, err := os.Stat(path)
-	return err == nil && fi.IsDir()
-}
-
-// hasSubdir calls ctxt.HasSubdir (if not nil) or else uses
-// the local file system to answer the question.
-func (ctxt *Context) hasSubdir(root, dir string) (rel string, ok bool) {
-	if f := ctxt.HasSubdir; f != nil {
-		return f(root, dir)
-	}
-
-	// Try using paths we received.
-	if rel, ok = hasSubdir(root, dir); ok {
-		return
-	}
-
-	// Try expanding symlinks and comparing
-	// expanded against unexpanded and
-	// expanded against expanded.
-	rootSym, _ := filepath.EvalSymlinks(root)
-	dirSym, _ := filepath.EvalSymlinks(dir)
-
-	if rel, ok = hasSubdir(rootSym, dir); ok {
-		return
-	}
-	if rel, ok = hasSubdir(root, dirSym); ok {
-		return
-	}
-	return hasSubdir(rootSym, dirSym)
-}
-
-// hasSubdir reports if dir is within root by performing lexical analysis only.
-func hasSubdir(root, dir string) (rel string, ok bool) {
-	const sep = string(filepath.Separator)
-	root = filepath.Clean(root)
-	if !strings.HasSuffix(root, sep) {
-		root += sep
-	}
-	dir = filepath.Clean(dir)
-	if !strings.HasPrefix(dir, root) {
-		return "", false
-	}
-	return filepath.ToSlash(dir[len(root):]), true
-}
-
-// readDir calls ctxt.ReadDir (if not nil) or else ioutil.ReadDir.
-func (ctxt *Context) readDir(path string) ([]fs.FileInfo, error) {
-	if f := ctxt.ReadDir; f != nil {
-		return f(path)
-	}
-	// TODO: use os.ReadDir
-	return ioutil.ReadDir(path)
-}
-
-// openFile calls ctxt.OpenFile (if not nil) or else os.Open.
-func (ctxt *Context) openFile(path string) (io.ReadCloser, error) {
-	if fn := ctxt.OpenFile; fn != nil {
-		return fn(path)
-	}
-
-	f, err := os.Open(path)
-	if err != nil {
-		return nil, err // nil interface
-	}
-	return f, nil
-}
-
-// isFile determines whether path is a file by trying to open it.
-// It reuses openFile instead of adding another function to the
-// list in Context.
-func (ctxt *Context) isFile(path string) bool {
-	f, err := ctxt.openFile(path)
-	if err != nil {
-		return false
-	}
-	f.Close()
-	return true
-}
-
-// gopath returns the list of Go path directories.
-func (ctxt *Context) gopath() []string {
-	var all []string
-	for _, p := range ctxt.splitPathList(ctxt.GOPATH) {
-		if p == "" || p == ctxt.GOROOT {
-			// Empty paths are uninteresting.
-			// If the path is the GOROOT, ignore it.
-			// People sometimes set GOPATH=$GOROOT.
-			// Do not get confused by this common mistake.
-			continue
-		}
-		if strings.HasPrefix(p, "~") {
-			// Path segments starting with ~ on Unix are almost always
-			// users who have incorrectly quoted ~ while setting GOPATH,
-			// preventing it from expanding to $HOME.
-			// The situation is made more confusing by the fact that
-			// bash allows quoted ~ in $PATH (most shells do not).
-			// Do not get confused by this, and do not try to use the path.
-			// It does not exist, and printing errors about it confuses
-			// those users even more, because they think "sure ~ exists!".
-			// The go command diagnoses this situation and prints a
-			// useful error.
-			// On Windows, ~ is used in short names, such as c:\progra~1
-			// for c:\program files.
-			continue
-		}
-		all = append(all, p)
-	}
-	return all
-}
-
-// SrcDirs returns a list of package source root directories.
-// It draws from the current Go root and Go path but omits directories
-// that do not exist.
-func (ctxt *Context) SrcDirs() []string {
-	var all []string
-	if ctxt.GOROOT != "" && ctxt.Compiler != "gccgo" {
-		dir := ctxt.joinPath(ctxt.GOROOT, "src")
-		if ctxt.isDir(dir) {
-			all = append(all, dir)
-		}
-	}
-	for _, p := range ctxt.gopath() {
-		dir := ctxt.joinPath(p, "src")
-		if ctxt.isDir(dir) {
-			all = append(all, dir)
-		}
-	}
-	return all
-}
-
-// Default is the default Context for builds.
-// It uses the GOARCH, GOOS, GOROOT, and GOPATH environment variables
-// if set, or else the compiled code's GOARCH, GOOS, and GOROOT.
-var Default Context = defaultContext()
-
-func defaultContext() Context {
-	d := build.Default
-	return Context{
-		GOARCH:        d.GOARCH,
-		GOOS:          d.GOOS,
-		GOROOT:        d.GOROOT,
-		GOPATH:        d.GOPATH,
-		Dir:           d.Dir,
-		CgoEnabled:    d.CgoEnabled,
-		UseAllFiles:   d.UseAllFiles,
-		Compiler:      d.Compiler,
-		BuildTags:     d.BuildTags,
-		ReleaseTags:   d.ReleaseTags,
-		InstallSuffix: d.InstallSuffix,
-		JoinPath:      d.JoinPath,
-		SplitPathList: d.SplitPathList,
-		IsAbsPath:     d.IsAbsPath,
-		IsDir:         d.IsDir,
-		HasSubdir:     d.HasSubdir,
-		ReadDir:       d.ReadDir,
-		OpenFile:      d.OpenFile,
-	}
-}
-
-func defaultGOPATH() string {
-	env := "HOME"
-	if runtime.GOOS == "windows" {
-		env = "USERPROFILE"
-	} else if runtime.GOOS == "plan9" {
-		env = "home"
-	}
-	if home := os.Getenv(env); home != "" {
-		def := filepath.Join(home, "go")
-		if filepath.Clean(def) == filepath.Clean(runtime.GOROOT()) {
-			// Don't set the default GOPATH to GOROOT,
-			// as that will trigger warnings from the go tool.
-			return ""
-		}
-		return def
-	}
-	return ""
-}
-
-var defaultReleaseTags = build.Default.ReleaseTags
-
-func envOr(name, def string) string {
-	s := os.Getenv(name)
-	if s == "" {
-		return def
-	}
-	return s
-}
-
-// An ImportMode controls the behavior of the Import method.
-type ImportMode uint
-
-const (
-	// If FindOnly is set, Import stops after locating the directory
-	// that should contain the sources for a package. It does not
-	// read any files in the directory.
-	FindOnly ImportMode = 1 << iota
-
-	// If AllowBinary is set, Import can be satisfied by a compiled
-	// package object without corresponding sources.
-	//
-	// Deprecated:
-	// The supported way to create a compiled-only package is to
-	// write source code containing a //go:binary-only-package comment at
-	// the top of the file. Such a package will be recognized
-	// regardless of this flag setting (because it has source code)
-	// and will have BinaryOnly set to true in the returned Package.
-	AllowBinary
-
-	// If ImportComment is set, parse import comments on package statements.
-	// Import returns an error if it finds a comment it cannot understand
-	// or finds conflicting comments in multiple source files.
-	// See golang.org/s/go14customimport for more information.
-	ImportComment
-
-	// By default, Import searches vendor directories
-	// that apply in the given source directory before searching
-	// the GOROOT and GOPATH roots.
-	// If an Import finds and returns a package using a vendor
-	// directory, the resulting ImportPath is the complete path
-	// to the package, including the path elements leading up
-	// to and including "vendor".
-	// For example, if Import("y", "x/subdir", 0) finds
-	// "x/vendor/y", the returned package's ImportPath is "x/vendor/y",
-	// not plain "y".
-	// See golang.org/s/go15vendor for more information.
-	//
-	// Setting IgnoreVendor ignores vendor directories.
-	//
-	// In contrast to the package's ImportPath,
-	// the returned package's Imports, TestImports, and XTestImports
-	// are always the exact import paths from the source files:
-	// Import makes no attempt to resolve or check those paths.
-	IgnoreVendor
-)
-
-// A Package describes the Go package found in a directory.
-type Package struct {
-	Dir           string   // directory containing package sources
-	Name          string   // package name
-	ImportComment string   // path in import comment on package statement
-	Doc           string   // documentation synopsis
-	ImportPath    string   // import path of package ("" if unknown)
-	Root          string   // root of Go tree where this package lives
-	SrcRoot       string   // package source root directory ("" if unknown)
-	PkgRoot       string   // package install root directory ("" if unknown)
-	PkgTargetRoot string   // architecture dependent install root directory ("" if unknown)
-	BinDir        string   // command install directory ("" if unknown)
-	Goroot        bool     // package found in Go root
-	PkgObj        string   // installed .a file
-	AllTags       []string // tags that can influence file selection in this directory
-	ConflictDir   string   // this directory shadows Dir in $GOPATH
-	BinaryOnly    bool     // cannot be rebuilt from source (has //go:binary-only-package comment)
-
-	// Source files
-	GoFiles           []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles)
-	CgoFiles          []string // .go source files that import "C"
-	IgnoredGoFiles    []string // .go source files ignored for this build (including ignored _test.go files)
-	InvalidGoFiles    []string // .go source files with detected problems (parse error, wrong package name, and so on)
-	IgnoredOtherFiles []string // non-.go source files ignored for this build
-	CFiles            []string // .c source files
-	CXXFiles          []string // .cc, .cpp and .cxx source files
-	MFiles            []string // .m (Objective-C) source files
-	HFiles            []string // .h, .hh, .hpp and .hxx source files
-	FFiles            []string // .f, .F, .for and .f90 Fortran source files
-	SFiles            []string // .s source files
-	SwigFiles         []string // .swig files
-	SwigCXXFiles      []string // .swigcxx files
-	SysoFiles         []string // .syso system object files to add to archive
-
-	// Cgo directives
-	CgoCFLAGS    []string // Cgo CFLAGS directives
-	CgoCPPFLAGS  []string // Cgo CPPFLAGS directives
-	CgoCXXFLAGS  []string // Cgo CXXFLAGS directives
-	CgoFFLAGS    []string // Cgo FFLAGS directives
-	CgoLDFLAGS   []string // Cgo LDFLAGS directives
-	CgoPkgConfig []string // Cgo pkg-config directives
-
-	// Test information
-	TestGoFiles  []string // _test.go files in package
-	XTestGoFiles []string // _test.go files outside package
-
-	// Dependency information
-	Imports        []string                    // import paths from GoFiles, CgoFiles
-	ImportPos      map[string][]token.Position // line information for Imports
-	TestImports    []string                    // import paths from TestGoFiles
-	TestImportPos  map[string][]token.Position // line information for TestImports
-	XTestImports   []string                    // import paths from XTestGoFiles
-	XTestImportPos map[string][]token.Position // line information for XTestImports
-
-	// //go:embed patterns found in Go source files
-	// For example, if a source file says
-	//	//go:embed a* b.c
-	// then the list will contain those two strings as separate entries.
-	// (See package embed for more details about //go:embed.)
-	EmbedPatterns        []string                    // patterns from GoFiles, CgoFiles
-	EmbedPatternPos      map[string][]token.Position // line information for EmbedPatterns
-	TestEmbedPatterns    []string                    // patterns from TestGoFiles
-	TestEmbedPatternPos  map[string][]token.Position // line information for TestEmbedPatterns
-	XTestEmbedPatterns   []string                    // patterns from XTestGoFiles
-	XTestEmbedPatternPos map[string][]token.Position // line information for XTestEmbedPatternPos
-}
-
-// IsCommand reports whether the package is considered a
-// command to be installed (not just a library).
-// Packages named "main" are treated as commands.
-func (p *Package) IsCommand() bool {
-	return p.Name == "main"
-}
-
-// ImportDir is like Import but processes the Go package found in
-// the named directory.
-func (ctxt *Context) ImportDir(dir string, mode ImportMode) (*Package, error) {
-	return ctxt.Import(".", dir, mode)
-}
-
-// NoGoError is the error used by Import to describe a directory
-// containing no buildable Go source files. (It may still contain
-// test files, files hidden by build tags, and so on.)
-type NoGoError struct {
-	Dir string
-}
-
-func (e *NoGoError) Error() string {
-	return "no buildable Go source files in " + e.Dir
-}
-
-// MultiplePackageError describes a directory containing
-// multiple buildable Go source files for multiple packages.
-type MultiplePackageError struct {
-	Dir      string   // directory containing files
-	Packages []string // package names found
-	Files    []string // corresponding files: Files[i] declares package Packages[i]
-}
-
-func (e *MultiplePackageError) Error() string {
-	// Error string limited to two entries for compatibility.
-	return fmt.Sprintf("found packages %s (%s) and %s (%s) in %s", e.Packages[0], e.Files[0], e.Packages[1], e.Files[1], e.Dir)
-}
-
-func nameExt(name string) string {
-	i := strings.LastIndex(name, ".")
-	if i < 0 {
-		return ""
-	}
-	return name[i:]
-}
-
-// Import returns details about the Go package named by the import path,
-// interpreting local import paths relative to the srcDir directory.
-// If the path is a local import path naming a package that can be imported
-// using a standard import path, the returned package will set p.ImportPath
-// to that path.
-//
-// In the directory containing the package, .go, .c, .h, and .s files are
-// considered part of the package except for:
-//
-//   - .go files in package documentation
-//   - files starting with _ or . (likely editor temporary files)
-//   - files with build constraints not satisfied by the context
-//
-// If an error occurs, Import returns a non-nil error and a non-nil
-// *Package containing partial information.
-func (ctxt *Context) Import(path string, srcDir string, mode ImportMode) (*Package, error) {
-	p := &Package{
-		ImportPath: path,
-	}
-	if path == "" {
-		return p, fmt.Errorf("import %q: invalid import path", path)
-	}
-
-	var pkgtargetroot string
-	var pkga string
-	var pkgerr error
-	suffix := ""
-	if ctxt.InstallSuffix != "" {
-		suffix = "_" + ctxt.InstallSuffix
-	}
-	switch ctxt.Compiler {
-	case "gccgo":
-		pkgtargetroot = "pkg/gccgo_" + ctxt.GOOS + "_" + ctxt.GOARCH + suffix
-	case "gc":
-		pkgtargetroot = "pkg/" + ctxt.GOOS + "_" + ctxt.GOARCH + suffix
-	default:
-		// Save error for end of function.
-		pkgerr = fmt.Errorf("import %q: unknown compiler %q", path, ctxt.Compiler)
-	}
-	setPkga := func() {
-		switch ctxt.Compiler {
-		case "gccgo":
-			dir, elem := pathpkg.Split(p.ImportPath)
-			pkga = pkgtargetroot + "/" + dir + "lib" + elem + ".a"
-		case "gc":
-			pkga = pkgtargetroot + "/" + p.ImportPath + ".a"
-		}
-	}
-	setPkga()
-
-	binaryOnly := false
-	if IsLocalImport(path) {
-		pkga = "" // local imports have no installed path
-		if srcDir == "" {
-			return p, fmt.Errorf("import %q: import relative to unknown directory", path)
-		}
-		if !ctxt.isAbsPath(path) {
-			p.Dir = ctxt.joinPath(srcDir, path)
-		}
-		// p.Dir directory may or may not exist. Gather partial information first, check if it exists later.
-		// Determine canonical import path, if any.
-		// Exclude results where the import path would include /testdata/.
-		inTestdata := func(sub string) bool {
-			return strings.Contains(sub, "/testdata/") || strings.HasSuffix(sub, "/testdata") || strings.HasPrefix(sub, "testdata/") || sub == "testdata"
-		}
-		if ctxt.GOROOT != "" {
-			root := ctxt.joinPath(ctxt.GOROOT, "src")
-			if sub, ok := ctxt.hasSubdir(root, p.Dir); ok && !inTestdata(sub) {
-				p.Goroot = true
-				p.ImportPath = sub
-				p.Root = ctxt.GOROOT
-				setPkga() // p.ImportPath changed
-				goto Found
-			}
-		}
-		all := ctxt.gopath()
-		for i, root := range all {
-			rootsrc := ctxt.joinPath(root, "src")
-			if sub, ok := ctxt.hasSubdir(rootsrc, p.Dir); ok && !inTestdata(sub) {
-				// We found a potential import path for dir,
-				// but check that using it wouldn't find something
-				// else first.
-				if ctxt.GOROOT != "" && ctxt.Compiler != "gccgo" {
-					if dir := ctxt.joinPath(ctxt.GOROOT, "src", sub); ctxt.isDir(dir) {
-						p.ConflictDir = dir
-						goto Found
-					}
-				}
-				for _, earlyRoot := range all[:i] {
-					if dir := ctxt.joinPath(earlyRoot, "src", sub); ctxt.isDir(dir) {
-						p.ConflictDir = dir
-						goto Found
-					}
-				}
-
-				// sub would not name some other directory instead of this one.
-				// Record it.
-				p.ImportPath = sub
-				p.Root = root
-				setPkga() // p.ImportPath changed
-				goto Found
-			}
-		}
-		// It's okay that we didn't find a root containing dir.
-		// Keep going with the information we have.
-	} else {
-		if strings.HasPrefix(path, "/") {
-			return p, fmt.Errorf("import %q: cannot import absolute path", path)
-		}
-
-		if err := ctxt.importGo(p, path, srcDir, mode); err == nil {
-			goto Found
-		} else if err != errNoModules {
-			return p, err
-		}
-
-		gopath := ctxt.gopath() // needed twice below; avoid computing many times
-
-		// tried records the location of unsuccessful package lookups
-		var tried struct {
-			vendor []string
-			goroot string
-			gopath []string
-		}
-
-		// Vendor directories get first chance to satisfy import.
-		if mode&IgnoreVendor == 0 && srcDir != "" {
-			searchVendor := func(root string, isGoroot bool) bool {
-				sub, ok := ctxt.hasSubdir(root, srcDir)
-				if !ok || !strings.HasPrefix(sub, "src/") || strings.Contains(sub, "/testdata/") {
-					return false
-				}
-				for {
-					vendor := ctxt.joinPath(root, sub, "vendor")
-					if ctxt.isDir(vendor) {
-						dir := ctxt.joinPath(vendor, path)
-						if ctxt.isDir(dir) && hasGoFiles(ctxt, dir) {
-							p.Dir = dir
-							p.ImportPath = strings.TrimPrefix(pathpkg.Join(sub, "vendor", path), "src/")
-							p.Goroot = isGoroot
-							p.Root = root
-							setPkga() // p.ImportPath changed
-							return true
-						}
-						tried.vendor = append(tried.vendor, dir)
-					}
-					i := strings.LastIndex(sub, "/")
-					if i < 0 {
-						break
-					}
-					sub = sub[:i]
-				}
-				return false
-			}
-			if ctxt.Compiler != "gccgo" && searchVendor(ctxt.GOROOT, true) {
-				goto Found
-			}
-			for _, root := range gopath {
-				if searchVendor(root, false) {
-					goto Found
-				}
-			}
-		}
-
-		// Determine directory from import path.
-		if ctxt.GOROOT != "" {
-			// If the package path starts with "vendor/", only search GOROOT before
-			// GOPATH if the importer is also within GOROOT. That way, if the user has
-			// vendored in a package that is subsequently included in the standard
-			// distribution, they'll continue to pick up their own vendored copy.
-			gorootFirst := srcDir == "" || !strings.HasPrefix(path, "vendor/")
-			if !gorootFirst {
-				_, gorootFirst = ctxt.hasSubdir(ctxt.GOROOT, srcDir)
-			}
-			if gorootFirst {
-				dir := ctxt.joinPath(ctxt.GOROOT, "src", path)
-				if ctxt.Compiler != "gccgo" {
-					isDir := ctxt.isDir(dir)
-					binaryOnly = !isDir && mode&AllowBinary != 0 && pkga != "" && ctxt.isFile(ctxt.joinPath(ctxt.GOROOT, pkga))
-					if isDir || binaryOnly {
-						p.Dir = dir
-						p.Goroot = true
-						p.Root = ctxt.GOROOT
-						goto Found
-					}
-				}
-				tried.goroot = dir
-			}
-		}
-		for _, root := range gopath {
-			dir := ctxt.joinPath(root, "src", path)
-			isDir := ctxt.isDir(dir)
-			binaryOnly = !isDir && mode&AllowBinary != 0 && pkga != "" && ctxt.isFile(ctxt.joinPath(root, pkga))
-			if isDir || binaryOnly {
-				p.Dir = dir
-				p.Root = root
-				goto Found
-			}
-			tried.gopath = append(tried.gopath, dir)
-		}
-
-		// If we tried GOPATH first due to a "vendor/" prefix, fall back to GOPATH.
-		// That way, the user can still get useful results from 'go list' for
-		// standard-vendored paths passed on the command line.
-		if ctxt.GOROOT != "" && tried.goroot == "" {
-			dir := ctxt.joinPath(ctxt.GOROOT, "src", path)
-			if ctxt.Compiler != "gccgo" {
-				isDir := ctxt.isDir(dir)
-				binaryOnly = !isDir && mode&AllowBinary != 0 && pkga != "" && ctxt.isFile(ctxt.joinPath(ctxt.GOROOT, pkga))
-				if isDir || binaryOnly {
-					p.Dir = dir
-					p.Goroot = true
-					p.Root = ctxt.GOROOT
-					goto Found
-				}
-			}
-			tried.goroot = dir
-		}
-
-		// package was not found
-		var paths []string
-		format := "\t%s (vendor tree)"
-		for _, dir := range tried.vendor {
-			paths = append(paths, fmt.Sprintf(format, dir))
-			format = "\t%s"
-		}
-		if tried.goroot != "" {
-			paths = append(paths, fmt.Sprintf("\t%s (from $GOROOT)", tried.goroot))
-		} else {
-			paths = append(paths, "\t($GOROOT not set)")
-		}
-		format = "\t%s (from $GOPATH)"
-		for _, dir := range tried.gopath {
-			paths = append(paths, fmt.Sprintf(format, dir))
-			format = "\t%s"
-		}
-		if len(tried.gopath) == 0 {
-			paths = append(paths, "\t($GOPATH not set. For more details see: 'go help gopath')")
-		}
-		return p, fmt.Errorf("cannot find package %q in any of:\n%s", path, strings.Join(paths, "\n"))
-	}
-
-Found:
-	if p.Root != "" {
-		p.SrcRoot = ctxt.joinPath(p.Root, "src")
-		p.PkgRoot = ctxt.joinPath(p.Root, "pkg")
-		p.BinDir = ctxt.joinPath(p.Root, "bin")
-		if pkga != "" {
-			p.PkgTargetRoot = ctxt.joinPath(p.Root, pkgtargetroot)
-			p.PkgObj = ctxt.joinPath(p.Root, pkga)
-		}
-	}
-
-	// If it's a local import path, by the time we get here, we still haven't checked
-	// that p.Dir directory exists. This is the right time to do that check.
-	// We can't do it earlier, because we want to gather partial information for the
-	// non-nil *Package returned when an error occurs.
-	// We need to do this before we return early on FindOnly flag.
-	if IsLocalImport(path) && !ctxt.isDir(p.Dir) {
-		if ctxt.Compiler == "gccgo" && p.Goroot {
-			// gccgo has no sources for GOROOT packages.
-			return p, nil
-		}
-
-		// package was not found
-		return p, fmt.Errorf("cannot find package %q in:\n\t%s", p.ImportPath, p.Dir)
-	}
-
-	if mode&FindOnly != 0 {
-		return p, pkgerr
-	}
-	if binaryOnly && (mode&AllowBinary) != 0 {
-		return p, pkgerr
-	}
-
-	if ctxt.Compiler == "gccgo" && p.Goroot {
-		// gccgo has no sources for GOROOT packages.
-		return p, nil
-	}
-
-	dirs, err := ctxt.readDir(p.Dir)
-	if err != nil {
-		return p, err
-	}
-
-	var badGoError error
-	badFiles := make(map[string]bool)
-	badFile := func(name string, err error) {
-		if badGoError == nil {
-			badGoError = err
-		}
-		if !badFiles[name] {
-			p.InvalidGoFiles = append(p.InvalidGoFiles, name)
-			badFiles[name] = true
-		}
-	}
-
-	var Sfiles []string // files with ".S"(capital S)/.sx(capital s equivalent for case insensitive filesystems)
-	var firstFile, firstCommentFile string
-	embedPos := make(map[string][]token.Position)
-	testEmbedPos := make(map[string][]token.Position)
-	xTestEmbedPos := make(map[string][]token.Position)
-	importPos := make(map[string][]token.Position)
-	testImportPos := make(map[string][]token.Position)
-	xTestImportPos := make(map[string][]token.Position)
-	allTags := make(map[string]bool)
-	fset := token.NewFileSet()
-	for _, d := range dirs {
-		if d.IsDir() {
-			continue
-		}
-		if d.Mode()&fs.ModeSymlink != 0 {
-			if ctxt.isDir(ctxt.joinPath(p.Dir, d.Name())) {
-				// Symlinks to directories are not source files.
-				continue
-			}
-		}
-
-		name := d.Name()
-		ext := nameExt(name)
-
-		info, err := ctxt.matchFile(p.Dir, name, allTags, &p.BinaryOnly, fset)
-		if err != nil {
-			badFile(name, err)
-			continue
-		}
-		if info == nil {
-			if strings.HasPrefix(name, "_") || strings.HasPrefix(name, ".") {
-				// not due to build constraints - don't report
-			} else if ext == ".go" {
-				p.IgnoredGoFiles = append(p.IgnoredGoFiles, name)
-			} else if fileListForExt(p, ext) != nil {
-				p.IgnoredOtherFiles = append(p.IgnoredOtherFiles, name)
-			}
-			continue
-		}
-		data, filename := info.header, info.name
-
-		// Going to save the file. For non-Go files, can stop here.
-		switch ext {
-		case ".go":
-			// keep going
-		case ".S", ".sx":
-			// special case for cgo, handled at end
-			Sfiles = append(Sfiles, name)
-			continue
-		default:
-			if list := fileListForExt(p, ext); list != nil {
-				*list = append(*list, name)
-			}
-			continue
-		}
-
-		if info.parseErr != nil {
-			badFile(name, info.parseErr)
-			// Fall through: we might still have a partial AST in info.parsed,
-			// and we want to list files with parse errors anyway.
-		}
-
-		var pkg string
-		if info.parsed != nil {
-			pkg = info.parsed.Name.Name
-			if pkg == "documentation" {
-				p.IgnoredGoFiles = append(p.IgnoredGoFiles, name)
-				continue
-			}
-		}
-
-		isTest := strings.HasSuffix(name, "_test.go")
-		isXTest := false
-		if isTest && strings.HasSuffix(pkg, "_test") && p.Name != pkg {
-			isXTest = true
-			pkg = pkg[:len(pkg)-len("_test")]
-		}
-
-		if p.Name == "" {
-			p.Name = pkg
-			firstFile = name
-		} else if pkg != p.Name {
-			// TODO(#45999): The choice of p.Name is arbitrary based on file iteration
-			// order. Instead of resolving p.Name arbitrarily, we should clear out the
-			// existing name and mark the existing files as also invalid.
-			badFile(name, &MultiplePackageError{
-				Dir:      p.Dir,
-				Packages: []string{p.Name, pkg},
-				Files:    []string{firstFile, name},
-			})
-		}
-		// Grab the first package comment as docs, provided it is not from a test file.
-		if info.parsed != nil && info.parsed.Doc != nil && p.Doc == "" && !isTest && !isXTest {
-			p.Doc = doc.Synopsis(info.parsed.Doc.Text())
-		}
-
-		if mode&ImportComment != 0 {
-			qcom, line := findImportComment(data)
-			if line != 0 {
-				com, err := strconv.Unquote(qcom)
-				if err != nil {
-					badFile(name, fmt.Errorf("%s:%d: cannot parse import comment", filename, line))
-				} else if p.ImportComment == "" {
-					p.ImportComment = com
-					firstCommentFile = name
-				} else if p.ImportComment != com {
-					badFile(name, fmt.Errorf("found import comments %q (%s) and %q (%s) in %s", p.ImportComment, firstCommentFile, com, name, p.Dir))
-				}
-			}
-		}
-
-		// Record imports and information about cgo.
-		isCgo := false
-		for _, imp := range info.imports {
-			if imp.path == "C" {
-				if isTest {
-					badFile(name, fmt.Errorf("use of cgo in test %s not supported", filename))
-					continue
-				}
-				isCgo = true
-				if imp.doc != nil {
-					if err := ctxt.saveCgo(filename, p, imp.doc); err != nil {
-						badFile(name, err)
-					}
-				}
-			}
-		}
-
-		var fileList *[]string
-		var importMap, embedMap map[string][]token.Position
-		switch {
-		case isCgo:
-			allTags["cgo"] = true
-			if ctxt.CgoEnabled {
-				fileList = &p.CgoFiles
-				importMap = importPos
-				embedMap = embedPos
-			} else {
-				// Ignore imports and embeds from cgo files if cgo is disabled.
-				fileList = &p.IgnoredGoFiles
-			}
-		case isXTest:
-			fileList = &p.XTestGoFiles
-			importMap = xTestImportPos
-			embedMap = xTestEmbedPos
-		case isTest:
-			fileList = &p.TestGoFiles
-			importMap = testImportPos
-			embedMap = testEmbedPos
-		default:
-			fileList = &p.GoFiles
-			importMap = importPos
-			embedMap = embedPos
-		}
-		*fileList = append(*fileList, name)
-		if importMap != nil {
-			for _, imp := range info.imports {
-				importMap[imp.path] = append(importMap[imp.path], fset.Position(imp.pos))
-			}
-		}
-		if embedMap != nil {
-			for _, emb := range info.embeds {
-				embedMap[emb.pattern] = append(embedMap[emb.pattern], emb.pos)
-			}
-		}
-	}
-
-	for tag := range allTags {
-		p.AllTags = append(p.AllTags, tag)
-	}
-	sort.Strings(p.AllTags)
-
-	p.EmbedPatterns, p.EmbedPatternPos = cleanDecls(embedPos)
-	p.TestEmbedPatterns, p.TestEmbedPatternPos = cleanDecls(testEmbedPos)
-	p.XTestEmbedPatterns, p.XTestEmbedPatternPos = cleanDecls(xTestEmbedPos)
-
-	p.Imports, p.ImportPos = cleanDecls(importPos)
-	p.TestImports, p.TestImportPos = cleanDecls(testImportPos)
-	p.XTestImports, p.XTestImportPos = cleanDecls(xTestImportPos)
-
-	// add the .S/.sx files only if we are using cgo
-	// (which means gcc will compile them).
-	// The standard assemblers expect .s files.
-	if len(p.CgoFiles) > 0 {
-		p.SFiles = append(p.SFiles, Sfiles...)
-		sort.Strings(p.SFiles)
-	} else {
-		p.IgnoredOtherFiles = append(p.IgnoredOtherFiles, Sfiles...)
-		sort.Strings(p.IgnoredOtherFiles)
-	}
-
-	if badGoError != nil {
-		return p, badGoError
-	}
-	if len(p.GoFiles)+len(p.CgoFiles)+len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 {
-		return p, &NoGoError{p.Dir}
-	}
-	return p, pkgerr
-}
-
-func fileListForExt(p *Package, ext string) *[]string {
-	switch ext {
-	case ".c":
-		return &p.CFiles
-	case ".cc", ".cpp", ".cxx":
-		return &p.CXXFiles
-	case ".m":
-		return &p.MFiles
-	case ".h", ".hh", ".hpp", ".hxx":
-		return &p.HFiles
-	case ".f", ".F", ".for", ".f90":
-		return &p.FFiles
-	case ".s", ".S", ".sx":
-		return &p.SFiles
-	case ".swig":
-		return &p.SwigFiles
-	case ".swigcxx":
-		return &p.SwigCXXFiles
-	case ".syso":
-		return &p.SysoFiles
-	}
-	return nil
-}
-
-func uniq(list []string) []string {
-	if list == nil {
-		return nil
-	}
-	out := make([]string, len(list))
-	copy(out, list)
-	sort.Strings(out)
-	uniq := out[:0]
-	for _, x := range out {
-		if len(uniq) == 0 || uniq[len(uniq)-1] != x {
-			uniq = append(uniq, x)
-		}
-	}
-	return uniq
-}
-
-var errNoModules = errors.New("not using modules")
-
-// importGo checks whether it can use the go command to find the directory for path.
-// If using the go command is not appropriate, importGo returns errNoModules.
-// Otherwise, importGo tries using the go command and reports whether that succeeded.
-// Using the go command lets build.Import and build.Context.Import find code
-// in Go modules. In the long term we want tools to use go/packages (currently golang.org/x/tools/go/packages),
-// which will also use the go command.
-// Invoking the go command here is not very efficient in that it computes information
-// about the requested package and all dependencies and then only reports about the requested package.
-// Then we reinvoke it for every dependency. But this is still better than not working at all.
-// See golang.org/issue/26504.
-func (ctxt *Context) importGo(p *Package, path, srcDir string, mode ImportMode) error {
-	// To invoke the go command,
-	// we must not being doing special things like AllowBinary or IgnoreVendor,
-	// and all the file system callbacks must be nil (we're meant to use the local file system).
-	if mode&AllowBinary != 0 || mode&IgnoreVendor != 0 ||
-		ctxt.JoinPath != nil || ctxt.SplitPathList != nil || ctxt.IsAbsPath != nil || ctxt.IsDir != nil || ctxt.HasSubdir != nil || ctxt.ReadDir != nil || ctxt.OpenFile != nil || !equal(ctxt.ReleaseTags, defaultReleaseTags) {
-		return errNoModules
-	}
-
-	// Predict whether module aware mode is enabled by checking the value of
-	// GO111MODULE and looking for a go.mod file in the source directory or
-	// one of its parents. Running 'go env GOMOD' in the source directory would
-	// give a canonical answer, but we'd prefer not to execute another command.
-	go111Module := os.Getenv("GO111MODULE")
-	switch go111Module {
-	case "off":
-		return errNoModules
-	default: // "", "on", "auto", anything else
-		// Maybe use modules.
-	}
-
-	if srcDir != "" {
-		var absSrcDir string
-		if filepath.IsAbs(srcDir) {
-			absSrcDir = srcDir
-		} else if ctxt.Dir != "" {
-			return fmt.Errorf("go/build: Dir is non-empty, so relative srcDir is not allowed: %v", srcDir)
-		} else {
-			// Find the absolute source directory. hasSubdir does not handle
-			// relative paths (and can't because the callbacks don't support this).
-			var err error
-			absSrcDir, err = filepath.Abs(srcDir)
-			if err != nil {
-				return errNoModules
-			}
-		}
-
-		// If the source directory is in GOROOT, then the in-process code works fine
-		// and we should keep using it. Moreover, the 'go list' approach below doesn't
-		// take standard-library vendoring into account and will fail.
-		if _, ok := ctxt.hasSubdir(filepath.Join(ctxt.GOROOT, "src"), absSrcDir); ok {
-			return errNoModules
-		}
-	}
-
-	// For efficiency, if path is a standard library package, let the usual lookup code handle it.
-	if ctxt.GOROOT != "" {
-		dir := ctxt.joinPath(ctxt.GOROOT, "src", path)
-		if ctxt.isDir(dir) {
-			return errNoModules
-		}
-	}
-
-	// If GO111MODULE=auto, look to see if there is a go.mod.
-	// Since go1.13, it doesn't matter if we're inside GOPATH.
-	if go111Module == "auto" {
-		var (
-			parent string
-			err    error
-		)
-		if ctxt.Dir == "" {
-			parent, err = os.Getwd()
-			if err != nil {
-				// A nonexistent working directory can't be in a module.
-				return errNoModules
-			}
-		} else {
-			parent, err = filepath.Abs(ctxt.Dir)
-			if err != nil {
-				// If the caller passed a bogus Dir explicitly, that's materially
-				// different from not having modules enabled.
-				return err
-			}
-		}
-		for {
-			if f, err := ctxt.openFile(ctxt.joinPath(parent, "go.mod")); err == nil {
-				buf := make([]byte, 100)
-				_, err := f.Read(buf)
-				f.Close()
-				if err == nil || err == io.EOF {
-					// go.mod exists and is readable (is a file, not a directory).
-					break
-				}
-			}
-			d := filepath.Dir(parent)
-			if len(d) >= len(parent) {
-				return errNoModules // reached top of file system, no go.mod
-			}
-			parent = d
-		}
-	}
-
-	cmd := exec.Command("go", "list", "-e", "-compiler="+ctxt.Compiler, "-tags="+strings.Join(ctxt.BuildTags, ","), "-installsuffix="+ctxt.InstallSuffix, "-f={{.Dir}}\n{{.ImportPath}}\n{{.Root}}\n{{.Goroot}}\n{{if .Error}}{{.Error}}{{end}}\n", "--", path)
-
-	if ctxt.Dir != "" {
-		cmd.Dir = ctxt.Dir
-	}
-
-	var stdout, stderr strings.Builder
-	cmd.Stdout = &stdout
-	cmd.Stderr = &stderr
-
-	cgo := "0"
-	if ctxt.CgoEnabled {
-		cgo = "1"
-	}
-	cmd.Env = append(os.Environ(),
-		"GOOS="+ctxt.GOOS,
-		"GOARCH="+ctxt.GOARCH,
-		"GOROOT="+ctxt.GOROOT,
-		"GOPATH="+ctxt.GOPATH,
-		"CGO_ENABLED="+cgo,
-	)
-
-	if err := cmd.Run(); err != nil {
-		return fmt.Errorf("go/build: go list %s: %v\n%s\n", path, err, stderr.String())
-	}
-
-	f := strings.SplitN(stdout.String(), "\n", 5)
-	if len(f) != 5 {
-		return fmt.Errorf("go/build: importGo %s: unexpected output:\n%s\n", path, stdout.String())
-	}
-	dir := f[0]
-	errStr := strings.TrimSpace(f[4])
-	if errStr != "" && dir == "" {
-		// If 'go list' could not locate the package (dir is empty),
-		// return the same error that 'go list' reported.
-		return errors.New(errStr)
-	}
-
-	// If 'go list' did locate the package, ignore the error.
-	// It was probably related to loading source files, and we'll
-	// encounter it ourselves shortly if the FindOnly flag isn't set.
-	p.Dir = dir
-	p.ImportPath = f[1]
-	p.Root = f[2]
-	p.Goroot = f[3] == "true"
-	return nil
-}
-
-func equal(x, y []string) bool {
-	if len(x) != len(y) {
-		return false
-	}
-	for i, xi := range x {
-		if xi != y[i] {
-			return false
-		}
-	}
-	return true
-}
-
-// hasGoFiles reports whether dir contains any files with names ending in .go.
-// For a vendor check we must exclude directories that contain no .go files.
-// Otherwise it is not possible to vendor just a/b/c and still import the
-// non-vendored a/b. See golang.org/issue/13832.
-func hasGoFiles(ctxt *Context, dir string) bool {
-	ents, _ := ctxt.readDir(dir)
-	for _, ent := range ents {
-		if !ent.IsDir() && strings.HasSuffix(ent.Name(), ".go") {
-			return true
-		}
-	}
-	return false
-}
-
-func findImportComment(data []byte) (s string, line int) {
-	// expect keyword package
-	word, data := parseWord(data)
-	if string(word) != "package" {
-		return "", 0
-	}
-
-	// expect package name
-	_, data = parseWord(data)
-
-	// now ready for import comment, a // or /* */ comment
-	// beginning and ending on the current line.
-	for len(data) > 0 && (data[0] == ' ' || data[0] == '\t' || data[0] == '\r') {
-		data = data[1:]
-	}
-
-	var comment []byte
-	switch {
-	case bytes.HasPrefix(data, slashSlash):
-		comment, _, _ = bytesCut(data[2:], newline)
-	case bytes.HasPrefix(data, slashStar):
-		var ok bool
-		comment, _, ok = bytesCut(data[2:], starSlash)
-		if !ok {
-			// malformed comment
-			return "", 0
-		}
-		if bytes.Contains(comment, newline) {
-			return "", 0
-		}
-	}
-	comment = bytes.TrimSpace(comment)
-
-	// split comment into `import`, `"pkg"`
-	word, arg := parseWord(comment)
-	if string(word) != "import" {
-		return "", 0
-	}
-
-	line = 1 + bytes.Count(data[:cap(data)-cap(arg)], newline)
-	return strings.TrimSpace(string(arg)), line
-}
-
-var (
-	slashSlash = []byte("//")
-	slashStar  = []byte("/*")
-	starSlash  = []byte("*/")
-	newline    = []byte("\n")
-)
-
-// skipSpaceOrComment returns data with any leading spaces or comments removed.
-func skipSpaceOrComment(data []byte) []byte {
-	for len(data) > 0 {
-		switch data[0] {
-		case ' ', '\t', '\r', '\n':
-			data = data[1:]
-			continue
-		case '/':
-			if bytes.HasPrefix(data, slashSlash) {
-				i := bytes.Index(data, newline)
-				if i < 0 {
-					return nil
-				}
-				data = data[i+1:]
-				continue
-			}
-			if bytes.HasPrefix(data, slashStar) {
-				data = data[2:]
-				i := bytes.Index(data, starSlash)
-				if i < 0 {
-					return nil
-				}
-				data = data[i+2:]
-				continue
-			}
-		}
-		break
-	}
-	return data
-}
-
-// parseWord skips any leading spaces or comments in data
-// and then parses the beginning of data as an identifier or keyword,
-// returning that word and what remains after the word.
-func parseWord(data []byte) (word, rest []byte) {
-	data = skipSpaceOrComment(data)
-
-	// Parse past leading word characters.
-	rest = data
-	for {
-		r, size := utf8.DecodeRune(rest)
-		if unicode.IsLetter(r) || '0' <= r && r <= '9' || r == '_' {
-			rest = rest[size:]
-			continue
-		}
-		break
-	}
-
-	word = data[:len(data)-len(rest)]
-	if len(word) == 0 {
-		return nil, nil
-	}
-
-	return word, rest
-}
-
-// MatchFile reports whether the file with the given name in the given directory
-// matches the context and would be included in a Package created by ImportDir
-// of that directory.
-//
-// MatchFile considers the name of the file and may use ctxt.OpenFile to
-// read some or all of the file's content.
-func (ctxt *Context) MatchFile(dir, name string) (match bool, err error) {
-	info, err := ctxt.matchFile(dir, name, nil, nil, nil)
-	return info != nil, err
-}
-
-var dummyPkg Package
-
-// fileInfo records information learned about a file included in a build.
-type fileInfo struct {
-	name     string // full name including dir
-	header   []byte
-	fset     *token.FileSet
-	parsed   *ast.File
-	parseErr error
-	imports  []fileImport
-	embeds   []fileEmbed
-	embedErr error
-}
-
-type fileImport struct {
-	path string
-	pos  token.Pos
-	doc  *ast.CommentGroup
-}
-
-type fileEmbed struct {
-	pattern string
-	pos     token.Position
-}
-
-// matchFile determines whether the file with the given name in the given directory
-// should be included in the package being constructed.
-// If the file should be included, matchFile returns a non-nil *fileInfo (and a nil error).
-// Non-nil errors are reserved for unexpected problems.
-//
-// If name denotes a Go program, matchFile reads until the end of the
-// imports and returns that section of the file in the fileInfo's header field,
-// even though it only considers text until the first non-comment
-// for +build lines.
-//
-// If allTags is non-nil, matchFile records any encountered build tag
-// by setting allTags[tag] = true.
-func (ctxt *Context) matchFile(dir, name string, allTags map[string]bool, binaryOnly *bool, fset *token.FileSet) (*fileInfo, error) {
-	if strings.HasPrefix(name, "_") ||
-		strings.HasPrefix(name, ".") {
-		return nil, nil
-	}
-
-	i := strings.LastIndex(name, ".")
-	if i < 0 {
-		i = len(name)
-	}
-	ext := name[i:]
-
-	if !ctxt.goodOSArchFile(name, allTags) && !ctxt.UseAllFiles {
-		return nil, nil
-	}
-
-	if ext != ".go" && fileListForExt(&dummyPkg, ext) == nil {
-		// skip
-		return nil, nil
-	}
-
-	info := &fileInfo{name: ctxt.joinPath(dir, name), fset: fset}
-	if ext == ".syso" {
-		// binary, no reading
-		return info, nil
-	}
-
-	f, err := ctxt.openFile(info.name)
-	if err != nil {
-		return nil, err
-	}
-
-	if strings.HasSuffix(name, ".go") {
-		err = readGoInfo(f, info)
-		if strings.HasSuffix(name, "_test.go") {
-			binaryOnly = nil // ignore //go:binary-only-package comments in _test.go files
-		}
-	} else {
-		binaryOnly = nil // ignore //go:binary-only-package comments in non-Go sources
-		info.header, err = readComments(f)
-	}
-	f.Close()
-	if err != nil {
-		return nil, fmt.Errorf("read %s: %v", info.name, err)
-	}
-
-	// Look for +build comments to accept or reject the file.
-	ok, sawBinaryOnly, err := ctxt.shouldBuild(info.header, allTags)
-	if err != nil {
-		return nil, fmt.Errorf("%s: %v", name, err)
-	}
-	if !ok && !ctxt.UseAllFiles {
-		return nil, nil
-	}
-
-	if binaryOnly != nil && sawBinaryOnly {
-		*binaryOnly = true
-	}
-
-	return info, nil
-}
-
-func cleanDecls(m map[string][]token.Position) ([]string, map[string][]token.Position) {
-	all := make([]string, 0, len(m))
-	for path := range m {
-		all = append(all, path)
-	}
-	sort.Strings(all)
-	return all, m
-}
-
-// Import is shorthand for Default.Import.
-func Import(path, srcDir string, mode ImportMode) (*Package, error) {
-	return Default.Import(path, srcDir, mode)
-}
-
-// ImportDir is shorthand for Default.ImportDir.
-func ImportDir(dir string, mode ImportMode) (*Package, error) {
-	return Default.ImportDir(dir, mode)
-}
-
-var (
-	bPlusBuild = []byte("+build")
-
-	goBuildComment = []byte("//go:build")
-
-	errGoBuildWithoutBuild = errors.New("//go:build comment without // +build comment")
-	errMultipleGoBuild     = errors.New("multiple //go:build comments")
-)
-
-func isGoBuildComment(line []byte) bool {
-	if !bytes.HasPrefix(line, goBuildComment) {
-		return false
-	}
-	line = bytes.TrimSpace(line)
-	rest := line[len(goBuildComment):]
-	return len(rest) == 0 || len(bytes.TrimSpace(rest)) < len(rest)
-}
-
-// Special comment denoting a binary-only package.
-// See https://golang.org/design/2775-binary-only-packages
-// for more about the design of binary-only packages.
-var binaryOnlyComment = []byte("//go:binary-only-package")
-
-// shouldBuild reports whether it is okay to use this file,
-// The rule is that in the file's leading run of // comments
-// and blank lines, which must be followed by a blank line
-// (to avoid including a Go package clause doc comment),
-// lines beginning with '// +build' are taken as build directives.
-//
-// The file is accepted only if each such line lists something
-// matching the file. For example:
-//
-//	// +build windows linux
-//
-// marks the file as applicable only on Windows and Linux.
-//
-// For each build tag it consults, shouldBuild sets allTags[tag] = true.
-//
-// shouldBuild reports whether the file should be built
-// and whether a //go:binary-only-package comment was found.
-func (ctxt *Context) shouldBuild(content []byte, allTags map[string]bool) (shouldBuild, binaryOnly bool, err error) {
-	// Identify leading run of // comments and blank lines,
-	// which must be followed by a blank line.
-	// Also identify any //go:build comments.
-	content, goBuild, sawBinaryOnly, err := parseFileHeader(content)
-	if err != nil {
-		return false, false, err
-	}
-
-	// If //go:build line is present, it controls.
-	// Otherwise fall back to +build processing.
-	switch {
-	case goBuild != nil:
-		x, err := constraint.Parse(string(goBuild))
-		if err != nil {
-			return false, false, fmt.Errorf("parsing //go:build line: %v", err)
-		}
-		shouldBuild = ctxt.eval(x, allTags)
-
-	default:
-		shouldBuild = true
-		p := content
-		for len(p) > 0 {
-			line := p
-			if i := bytes.IndexByte(line, '\n'); i >= 0 {
-				line, p = line[:i], p[i+1:]
-			} else {
-				p = p[len(p):]
-			}
-			line = bytes.TrimSpace(line)
-			if !bytes.HasPrefix(line, slashSlash) || !bytes.Contains(line, bPlusBuild) {
-				continue
-			}
-			text := string(line)
-			if !constraint.IsPlusBuild(text) {
-				continue
-			}
-			if x, err := constraint.Parse(text); err == nil {
-				if !ctxt.eval(x, allTags) {
-					shouldBuild = false
-				}
-			}
-		}
-	}
-
-	return shouldBuild, sawBinaryOnly, nil
-}
-
-func parseFileHeader(content []byte) (trimmed, goBuild []byte, sawBinaryOnly bool, err error) {
-	end := 0
-	p := content
-	ended := false       // found non-blank, non-// line, so stopped accepting // +build lines
-	inSlashStar := false // in /* */ comment
-
-Lines:
-	for len(p) > 0 {
-		line := p
-		if i := bytes.IndexByte(line, '\n'); i >= 0 {
-			line, p = line[:i], p[i+1:]
-		} else {
-			p = p[len(p):]
-		}
-		line = bytes.TrimSpace(line)
-		if len(line) == 0 && !ended { // Blank line
-			// Remember position of most recent blank line.
-			// When we find the first non-blank, non-// line,
-			// this "end" position marks the latest file position
-			// where a // +build line can appear.
-			// (It must appear _before_ a blank line before the non-blank, non-// line.
-			// Yes, that's confusing, which is part of why we moved to //go:build lines.)
-			// Note that ended==false here means that inSlashStar==false,
-			// since seeing a /* would have set ended==true.
-			end = len(content) - len(p)
-			continue Lines
-		}
-		if !bytes.HasPrefix(line, slashSlash) { // Not comment line
-			ended = true
-		}
-
-		if !inSlashStar && isGoBuildComment(line) {
-			if goBuild != nil {
-				return nil, nil, false, errMultipleGoBuild
-			}
-			goBuild = line
-		}
-		if !inSlashStar && bytes.Equal(line, binaryOnlyComment) {
-			sawBinaryOnly = true
-		}
-
-	Comments:
-		for len(line) > 0 {
-			if inSlashStar {
-				if i := bytes.Index(line, starSlash); i >= 0 {
-					inSlashStar = false
-					line = bytes.TrimSpace(line[i+len(starSlash):])
-					continue Comments
-				}
-				continue Lines
-			}
-			if bytes.HasPrefix(line, slashSlash) {
-				continue Lines
-			}
-			if bytes.HasPrefix(line, slashStar) {
-				inSlashStar = true
-				line = bytes.TrimSpace(line[len(slashStar):])
-				continue Comments
-			}
-			// Found non-comment text.
-			break Lines
-		}
-	}
-
-	return content[:end], goBuild, sawBinaryOnly, nil
-}
-
-// saveCgo saves the information from the #cgo lines in the import "C" comment.
-// These lines set CFLAGS, CPPFLAGS, CXXFLAGS and LDFLAGS and pkg-config directives
-// that affect the way cgo's C code is built.
-func (ctxt *Context) saveCgo(filename string, di *Package, cg *ast.CommentGroup) error {
-	text := cg.Text()
-	for _, line := range strings.Split(text, "\n") {
-		orig := line
-
-		// Line is
-		//	#cgo [GOOS/GOARCH...] LDFLAGS: stuff
-		//
-		line = strings.TrimSpace(line)
-		if len(line) < 5 || line[:4] != "#cgo" || (line[4] != ' ' && line[4] != '\t') {
-			continue
-		}
-
-		// Split at colon.
-		line, argstr, ok := stringsCut(strings.TrimSpace(line[4:]), ":")
-		if !ok {
-			return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
-		}
-
-		// Parse GOOS/GOARCH stuff.
-		f := strings.Fields(line)
-		if len(f) < 1 {
-			return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
-		}
-
-		cond, verb := f[:len(f)-1], f[len(f)-1]
-		if len(cond) > 0 {
-			ok := false
-			for _, c := range cond {
-				if ctxt.matchAuto(c, nil) {
-					ok = true
-					break
-				}
-			}
-			if !ok {
-				continue
-			}
-		}
-
-		args, err := splitQuoted(argstr)
-		if err != nil {
-			return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
-		}
-		for i, arg := range args {
-			if arg, ok = expandSrcDir(arg, di.Dir); !ok {
-				return fmt.Errorf("%s: malformed #cgo argument: %s", filename, arg)
-			}
-			args[i] = arg
-		}
-
-		switch verb {
-		case "CFLAGS", "CPPFLAGS", "CXXFLAGS", "FFLAGS", "LDFLAGS":
-			// Change relative paths to absolute.
-			ctxt.makePathsAbsolute(args, di.Dir)
-		}
-
-		switch verb {
-		case "CFLAGS":
-			di.CgoCFLAGS = append(di.CgoCFLAGS, args...)
-		case "CPPFLAGS":
-			di.CgoCPPFLAGS = append(di.CgoCPPFLAGS, args...)
-		case "CXXFLAGS":
-			di.CgoCXXFLAGS = append(di.CgoCXXFLAGS, args...)
-		case "FFLAGS":
-			di.CgoFFLAGS = append(di.CgoFFLAGS, args...)
-		case "LDFLAGS":
-			di.CgoLDFLAGS = append(di.CgoLDFLAGS, args...)
-		case "pkg-config":
-			di.CgoPkgConfig = append(di.CgoPkgConfig, args...)
-		default:
-			return fmt.Errorf("%s: invalid #cgo verb: %s", filename, orig)
-		}
-	}
-	return nil
-}
-
-// expandSrcDir expands any occurrence of ${SRCDIR}, making sure
-// the result is safe for the shell.
-func expandSrcDir(str string, srcdir string) (string, bool) {
-	// "\" delimited paths cause safeCgoName to fail
-	// so convert native paths with a different delimiter
-	// to "/" before starting (eg: on windows).
-	srcdir = filepath.ToSlash(srcdir)
-
-	chunks := strings.Split(str, "${SRCDIR}")
-	if len(chunks) < 2 {
-		return str, safeCgoName(str)
-	}
-	ok := true
-	for _, chunk := range chunks {
-		ok = ok && (chunk == "" || safeCgoName(chunk))
-	}
-	ok = ok && (srcdir == "" || safeCgoName(srcdir))
-	res := strings.Join(chunks, srcdir)
-	return res, ok && res != ""
-}
-
-// makePathsAbsolute looks for compiler options that take paths and
-// makes them absolute. We do this because through the 1.8 release we
-// ran the compiler in the package directory, so any relative -I or -L
-// options would be relative to that directory. In 1.9 we changed to
-// running the compiler in the build directory, to get consistent
-// build results (issue #19964). To keep builds working, we change any
-// relative -I or -L options to be absolute.
-//
-// Using filepath.IsAbs and filepath.Join here means the results will be
-// different on different systems, but that's OK: -I and -L options are
-// inherently system-dependent.
-func (ctxt *Context) makePathsAbsolute(args []string, srcDir string) {
-	nextPath := false
-	for i, arg := range args {
-		if nextPath {
-			if !filepath.IsAbs(arg) {
-				args[i] = filepath.Join(srcDir, arg)
-			}
-			nextPath = false
-		} else if strings.HasPrefix(arg, "-I") || strings.HasPrefix(arg, "-L") {
-			if len(arg) == 2 {
-				nextPath = true
-			} else {
-				if !filepath.IsAbs(arg[2:]) {
-					args[i] = arg[:2] + filepath.Join(srcDir, arg[2:])
-				}
-			}
-		}
-	}
-}
-
-// NOTE: $ is not safe for the shell, but it is allowed here because of linker options like -Wl,$ORIGIN.
-// We never pass these arguments to a shell (just to programs we construct argv for), so this should be okay.
-// See golang.org/issue/6038.
-// The @ is for OS X. See golang.org/issue/13720.
-// The % is for Jenkins. See golang.org/issue/16959.
-// The ! is because module paths may use them. See golang.org/issue/26716.
-// The ~ and ^ are for sr.ht. See golang.org/issue/32260.
-const safeString = "+-.,/0123456789=ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz:$@%! ~^"
-
-func safeCgoName(s string) bool {
-	if s == "" {
-		return false
-	}
-	for i := 0; i < len(s); i++ {
-		if c := s[i]; c < utf8.RuneSelf && strings.IndexByte(safeString, c) < 0 {
-			return false
-		}
-	}
-	return true
-}
-
-// splitQuoted splits the string s around each instance of one or more consecutive
-// white space characters while taking into account quotes and escaping, and
-// returns an array of substrings of s or an empty list if s contains only white space.
-// Single quotes and double quotes are recognized to prevent splitting within the
-// quoted region, and are removed from the resulting substrings. If a quote in s
-// isn't closed err will be set and r will have the unclosed argument as the
-// last element. The backslash is used for escaping.
-//
-// For example, the following string:
-//
-//	a b:"c d" 'e''f'  "g\""
-//
-// Would be parsed as:
-//
-//	[]string{"a", "b:c d", "ef", `g"`}
-func splitQuoted(s string) (r []string, err error) {
-	var args []string
-	arg := make([]rune, len(s))
-	escaped := false
-	quoted := false
-	quote := '\x00'
-	i := 0
-	for _, rune := range s {
-		switch {
-		case escaped:
-			escaped = false
-		case rune == '\\':
-			escaped = true
-			continue
-		case quote != '\x00':
-			if rune == quote {
-				quote = '\x00'
-				continue
-			}
-		case rune == '"' || rune == '\'':
-			quoted = true
-			quote = rune
-			continue
-		case unicode.IsSpace(rune):
-			if quoted || i > 0 {
-				quoted = false
-				args = append(args, string(arg[:i]))
-				i = 0
-			}
-			continue
-		}
-		arg[i] = rune
-		i++
-	}
-	if quoted || i > 0 {
-		args = append(args, string(arg[:i]))
-	}
-	if quote != 0 {
-		err = errors.New("unclosed quote")
-	} else if escaped {
-		err = errors.New("unfinished escaping")
-	}
-	return args, err
-}
-
-// matchAuto interprets text as either a +build or //go:build expression (whichever works),
-// reporting whether the expression matches the build context.
-//
-// matchAuto is only used for testing of tag evaluation
-// and in #cgo lines, which accept either syntax.
-func (ctxt *Context) matchAuto(text string, allTags map[string]bool) bool {
-	if strings.ContainsAny(text, "&|()") {
-		text = "//go:build " + text
-	} else {
-		text = "// +build " + text
-	}
-	x, err := constraint.Parse(text)
-	if err != nil {
-		return false
-	}
-	return ctxt.eval(x, allTags)
-}
-
-func (ctxt *Context) eval(x constraint.Expr, allTags map[string]bool) bool {
-	return x.Eval(func(tag string) bool { return ctxt.matchTag(tag, allTags) })
-}
-
-// matchTag reports whether the name is one of:
-//
-//	cgo (if cgo is enabled)
-//	$GOOS
-//	$GOARCH
-//	ctxt.Compiler
-//	linux (if GOOS = android)
-//	solaris (if GOOS = illumos)
-//	tag (if tag is listed in ctxt.BuildTags or ctxt.ReleaseTags)
-//
-// It records all consulted tags in allTags.
-func (ctxt *Context) matchTag(name string, allTags map[string]bool) bool {
-	if allTags != nil {
-		allTags[name] = true
-	}
-
-	// special tags
-	if ctxt.CgoEnabled && name == "cgo" {
-		return true
-	}
-	if name == ctxt.GOOS || name == ctxt.GOARCH || name == ctxt.Compiler {
-		return true
-	}
-	if ctxt.GOOS == "android" && name == "linux" {
-		return true
-	}
-	if ctxt.GOOS == "illumos" && name == "solaris" {
-		return true
-	}
-	if ctxt.GOOS == "ios" && name == "darwin" {
-		return true
-	}
-
-	// other tags
-	for _, tag := range ctxt.BuildTags {
-		if tag == name {
-			return true
-		}
-	}
-	for _, tag := range ctxt.ToolTags {
-		if tag == name {
-			return true
-		}
-	}
-	for _, tag := range ctxt.ReleaseTags {
-		if tag == name {
-			return true
-		}
-	}
-
-	return false
-}
-
-// goodOSArchFile returns false if the name contains a $GOOS or $GOARCH
-// suffix which does not match the current system.
-// The recognized name formats are:
-//
-//	name_$(GOOS).*
-//	name_$(GOARCH).*
-//	name_$(GOOS)_$(GOARCH).*
-//	name_$(GOOS)_test.*
-//	name_$(GOARCH)_test.*
-//	name_$(GOOS)_$(GOARCH)_test.*
-//
-// Exceptions:
-// if GOOS=android, then files with GOOS=linux are also matched.
-// if GOOS=illumos, then files with GOOS=solaris are also matched.
-// if GOOS=ios, then files with GOOS=darwin are also matched.
-func (ctxt *Context) goodOSArchFile(name string, allTags map[string]bool) bool {
-	name, _, _ = stringsCut(name, ".")
-
-	// Before Go 1.4, a file called "linux.go" would be equivalent to having a
-	// build tag "linux" in that file. For Go 1.4 and beyond, we require this
-	// auto-tagging to apply only to files with a non-empty prefix, so
-	// "foo_linux.go" is tagged but "linux.go" is not. This allows new operating
-	// systems, such as android, to arrive without breaking existing code with
-	// innocuous source code in "android.go". The easiest fix: cut everything
-	// in the name before the initial _.
-	i := strings.Index(name, "_")
-	if i < 0 {
-		return true
-	}
-	name = name[i:] // ignore everything before first _
-
-	l := strings.Split(name, "_")
-	if n := len(l); n > 0 && l[n-1] == "test" {
-		l = l[:n-1]
-	}
-	n := len(l)
-	if n >= 2 && knownOS[l[n-2]] && knownArch[l[n-1]] {
-		return ctxt.matchTag(l[n-1], allTags) && ctxt.matchTag(l[n-2], allTags)
-	}
-	if n >= 1 && (knownOS[l[n-1]] || knownArch[l[n-1]]) {
-		return ctxt.matchTag(l[n-1], allTags)
-	}
-	return true
-}
-
-var knownOS = make(map[string]bool)
-var knownArch = make(map[string]bool)
-
-func init() {
-	for _, v := range strings.Fields(goosList) {
-		knownOS[v] = true
-	}
-	for _, v := range strings.Fields(goarchList) {
-		knownArch[v] = true
-	}
-}
-
-// ToolDir is the directory containing build tools.
-var ToolDir = getToolDir()
-
-// IsLocalImport reports whether the import path is
-// a local import path, like ".", "..", "./foo", or "../foo".
-func IsLocalImport(path string) bool {
-	return path == "." || path == ".." ||
-		strings.HasPrefix(path, "./") || strings.HasPrefix(path, "../")
-}
-
-// ArchChar returns "?" and an error.
-// In earlier versions of Go, the returned string was used to derive
-// the compiler and linker tool names, the default object file suffix,
-// and the default linker output name. As of Go 1.5, those strings
-// no longer vary by architecture; they are compile, link, .o, and a.out, respectively.
-func ArchChar(goarch string) (string, error) {
-	return "?", errors.New("architecture letter no longer used")
-}
-
-func bytesCut(s, sep []byte) (before, after []byte, found bool) {
-	if i := bytes.Index(s, sep); i >= 0 {
-		return s[:i], s[i+len(sep):], true
-	}
-	return s, nil, false
-}
-
-func stringsCut(s, sep string) (before, after string, found bool) {
-	if i := strings.Index(s, sep); i >= 0 {
-		return s[:i], s[i+len(sep):], true
-	}
-	return s, "", false
-}
diff --git a/internal/backport/go/build/doc.go b/internal/backport/go/build/doc.go
deleted file mode 100644
index 262f670..0000000
--- a/internal/backport/go/build/doc.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package build gathers information about Go packages.
-//
-// # Go Path
-//
-// The Go path is a list of directory trees containing Go source code.
-// It is consulted to resolve imports that cannot be found in the standard
-// Go tree. The default path is the value of the GOPATH environment
-// variable, interpreted as a path list appropriate to the operating system
-// (on Unix, the variable is a colon-separated string;
-// on Windows, a semicolon-separated string;
-// on Plan 9, a list).
-//
-// Each directory listed in the Go path must have a prescribed structure:
-//
-// The src/ directory holds source code. The path below 'src' determines
-// the import path or executable name.
-//
-// The pkg/ directory holds installed package objects.
-// As in the Go tree, each target operating system and
-// architecture pair has its own subdirectory of pkg
-// (pkg/GOOS_GOARCH).
-//
-// If DIR is a directory listed in the Go path, a package with
-// source in DIR/src/foo/bar can be imported as "foo/bar" and
-// has its compiled form installed to "DIR/pkg/GOOS_GOARCH/foo/bar.a"
-// (or, for gccgo, "DIR/pkg/gccgo/foo/libbar.a").
-//
-// The bin/ directory holds compiled commands.
-// Each command is named for its source directory, but only
-// using the final element, not the entire path. That is, the
-// command with source in DIR/src/foo/quux is installed into
-// DIR/bin/quux, not DIR/bin/foo/quux. The foo/ is stripped
-// so that you can add DIR/bin to your PATH to get at the
-// installed commands.
-//
-// Here's an example directory layout:
-//
-//	GOPATH=/home/user/gocode
-//
-//	/home/user/gocode/
-//	    src/
-//	        foo/
-//	            bar/               (go code in package bar)
-//	                x.go
-//	            quux/              (go code in package main)
-//	                y.go
-//	    bin/
-//	        quux                   (installed command)
-//	    pkg/
-//	        linux_amd64/
-//	            foo/
-//	                bar.a          (installed package object)
-//
-// # Build Constraints
-//
-// A build constraint, also known as a build tag, is a line comment that begins
-//
-//	//go:build
-//
-// that lists the conditions under which a file should be included in the
-// package. Build constraints may also be part of a file's name
-// (for example, source_windows.go will only be included if the target
-// operating system is windows).
-//
-// See 'go help buildconstraint'
-// (https://golang.org/cmd/go/#hdr-Build_constraints) for details.
-//
-// # Binary-Only Packages
-//
-// In Go 1.12 and earlier, it was possible to distribute packages in binary
-// form without including the source code used for compiling the package.
-// The package was distributed with a source file not excluded by build
-// constraints and containing a "//go:binary-only-package" comment. Like a
-// build constraint, this comment appeared at the top of a file, preceded
-// only by blank lines and other line comments and with a blank line
-// following the comment, to separate it from the package documentation.
-// Unlike build constraints, this comment is only recognized in non-test
-// Go source files.
-//
-// The minimal source code for a binary-only package was therefore:
-//
-//	//go:binary-only-package
-//
-//	package mypkg
-//
-// The source code could include additional Go code. That code was never
-// compiled but would be processed by tools like godoc and might be useful
-// as end-user documentation.
-//
-// "go build" and other commands no longer support binary-only-packages.
-// Import and ImportDir will still set the BinaryOnly flag in packages
-// containing these comments for use in tools and error messages.
-package build
diff --git a/internal/backport/go/build/gc.go b/internal/backport/go/build/gc.go
deleted file mode 100644
index e16e186..0000000
--- a/internal/backport/go/build/gc.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc
-// +build gc
-
-package build
-
-import (
-	"path/filepath"
-	"runtime"
-)
-
-// getToolDir returns the default value of ToolDir.
-func getToolDir() string {
-	return filepath.Join(runtime.GOROOT(), "pkg/tool/"+runtime.GOOS+"_"+runtime.GOARCH)
-}
diff --git a/internal/backport/go/build/gccgo.go b/internal/backport/go/build/gccgo.go
deleted file mode 100644
index c8ec704..0000000
--- a/internal/backport/go/build/gccgo.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gccgo
-// +build gccgo
-
-package build
-
-import "runtime"
-
-// getToolDir returns the default value of ToolDir.
-func getToolDir() string {
-	return envOr("GCCGOTOOLDIR", runtime.GCCGOTOOLDIR)
-}
diff --git a/internal/backport/go/build/read.go b/internal/backport/go/build/read.go
deleted file mode 100644
index e8fe9a0..0000000
--- a/internal/backport/go/build/read.go
+++ /dev/null
@@ -1,578 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package build
-
-import (
-	"bufio"
-	"bytes"
-	"errors"
-	"fmt"
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/parser"
-	"golang.org/x/website/internal/backport/go/token"
-	"io"
-	"strconv"
-	"strings"
-	"unicode"
-	"unicode/utf8"
-)
-
-type importReader struct {
-	b    *bufio.Reader
-	buf  []byte
-	peek byte
-	err  error
-	eof  bool
-	nerr int
-	pos  token.Position
-}
-
-var bom = []byte{0xef, 0xbb, 0xbf}
-
-func newImportReader(name string, r io.Reader) *importReader {
-	b := bufio.NewReader(r)
-	// Remove leading UTF-8 BOM.
-	// Per https://golang.org/ref/spec#Source_code_representation:
-	// a compiler may ignore a UTF-8-encoded byte order mark (U+FEFF)
-	// if it is the first Unicode code point in the source text.
-	if leadingBytes, err := b.Peek(3); err == nil && bytes.Equal(leadingBytes, bom) {
-		b.Discard(3)
-	}
-	return &importReader{
-		b: b,
-		pos: token.Position{
-			Filename: name,
-			Line:     1,
-			Column:   1,
-		},
-	}
-}
-
-func isIdent(c byte) bool {
-	return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '_' || c >= utf8.RuneSelf
-}
-
-var (
-	errSyntax = errors.New("syntax error")
-	errNUL    = errors.New("unexpected NUL in input")
-)
-
-// syntaxError records a syntax error, but only if an I/O error has not already been recorded.
-func (r *importReader) syntaxError() {
-	if r.err == nil {
-		r.err = errSyntax
-	}
-}
-
-// readByte reads the next byte from the input, saves it in buf, and returns it.
-// If an error occurs, readByte records the error in r.err and returns 0.
-func (r *importReader) readByte() byte {
-	c, err := r.b.ReadByte()
-	if err == nil {
-		r.buf = append(r.buf, c)
-		if c == 0 {
-			err = errNUL
-		}
-	}
-	if err != nil {
-		if err == io.EOF {
-			r.eof = true
-		} else if r.err == nil {
-			r.err = err
-		}
-		c = 0
-	}
-	return c
-}
-
-// readByteNoBuf is like readByte but doesn't buffer the byte.
-// It exhausts r.buf before reading from r.b.
-func (r *importReader) readByteNoBuf() byte {
-	var c byte
-	var err error
-	if len(r.buf) > 0 {
-		c = r.buf[0]
-		r.buf = r.buf[1:]
-	} else {
-		c, err = r.b.ReadByte()
-		if err == nil && c == 0 {
-			err = errNUL
-		}
-	}
-
-	if err != nil {
-		if err == io.EOF {
-			r.eof = true
-		} else if r.err == nil {
-			r.err = err
-		}
-		return 0
-	}
-	r.pos.Offset++
-	if c == '\n' {
-		r.pos.Line++
-		r.pos.Column = 1
-	} else {
-		r.pos.Column++
-	}
-	return c
-}
-
-// peekByte returns the next byte from the input reader but does not advance beyond it.
-// If skipSpace is set, peekByte skips leading spaces and comments.
-func (r *importReader) peekByte(skipSpace bool) byte {
-	if r.err != nil {
-		if r.nerr++; r.nerr > 10000 {
-			panic("go/build: import reader looping")
-		}
-		return 0
-	}
-
-	// Use r.peek as first input byte.
-	// Don't just return r.peek here: it might have been left by peekByte(false)
-	// and this might be peekByte(true).
-	c := r.peek
-	if c == 0 {
-		c = r.readByte()
-	}
-	for r.err == nil && !r.eof {
-		if skipSpace {
-			// For the purposes of this reader, semicolons are never necessary to
-			// understand the input and are treated as spaces.
-			switch c {
-			case ' ', '\f', '\t', '\r', '\n', ';':
-				c = r.readByte()
-				continue
-
-			case '/':
-				c = r.readByte()
-				if c == '/' {
-					for c != '\n' && r.err == nil && !r.eof {
-						c = r.readByte()
-					}
-				} else if c == '*' {
-					var c1 byte
-					for (c != '*' || c1 != '/') && r.err == nil {
-						if r.eof {
-							r.syntaxError()
-						}
-						c, c1 = c1, r.readByte()
-					}
-				} else {
-					r.syntaxError()
-				}
-				c = r.readByte()
-				continue
-			}
-		}
-		break
-	}
-	r.peek = c
-	return r.peek
-}
-
-// nextByte is like peekByte but advances beyond the returned byte.
-func (r *importReader) nextByte(skipSpace bool) byte {
-	c := r.peekByte(skipSpace)
-	r.peek = 0
-	return c
-}
-
-var goEmbed = []byte("go:embed")
-
-// findEmbed advances the input reader to the next //go:embed comment.
-// It reports whether it found a comment.
-// (Otherwise it found an error or EOF.)
-func (r *importReader) findEmbed(first bool) bool {
-	// The import block scan stopped after a non-space character,
-	// so the reader is not at the start of a line on the first call.
-	// After that, each //go:embed extraction leaves the reader
-	// at the end of a line.
-	startLine := !first
-	var c byte
-	for r.err == nil && !r.eof {
-		c = r.readByteNoBuf()
-	Reswitch:
-		switch c {
-		default:
-			startLine = false
-
-		case '\n':
-			startLine = true
-
-		case ' ', '\t':
-			// leave startLine alone
-
-		case '"':
-			startLine = false
-			for r.err == nil {
-				if r.eof {
-					r.syntaxError()
-				}
-				c = r.readByteNoBuf()
-				if c == '\\' {
-					r.readByteNoBuf()
-					if r.err != nil {
-						r.syntaxError()
-						return false
-					}
-					continue
-				}
-				if c == '"' {
-					c = r.readByteNoBuf()
-					goto Reswitch
-				}
-			}
-			goto Reswitch
-
-		case '`':
-			startLine = false
-			for r.err == nil {
-				if r.eof {
-					r.syntaxError()
-				}
-				c = r.readByteNoBuf()
-				if c == '`' {
-					c = r.readByteNoBuf()
-					goto Reswitch
-				}
-			}
-
-		case '\'':
-			startLine = false
-			for r.err == nil {
-				if r.eof {
-					r.syntaxError()
-				}
-				c = r.readByteNoBuf()
-				if c == '\\' {
-					r.readByteNoBuf()
-					if r.err != nil {
-						r.syntaxError()
-						return false
-					}
-					continue
-				}
-				if c == '\'' {
-					c = r.readByteNoBuf()
-					goto Reswitch
-				}
-			}
-
-		case '/':
-			c = r.readByteNoBuf()
-			switch c {
-			default:
-				startLine = false
-				goto Reswitch
-
-			case '*':
-				var c1 byte
-				for (c != '*' || c1 != '/') && r.err == nil {
-					if r.eof {
-						r.syntaxError()
-					}
-					c, c1 = c1, r.readByteNoBuf()
-				}
-				startLine = false
-
-			case '/':
-				if startLine {
-					// Try to read this as a //go:embed comment.
-					for i := range goEmbed {
-						c = r.readByteNoBuf()
-						if c != goEmbed[i] {
-							goto SkipSlashSlash
-						}
-					}
-					c = r.readByteNoBuf()
-					if c == ' ' || c == '\t' {
-						// Found one!
-						return true
-					}
-				}
-			SkipSlashSlash:
-				for c != '\n' && r.err == nil && !r.eof {
-					c = r.readByteNoBuf()
-				}
-				startLine = true
-			}
-		}
-	}
-	return false
-}
-
-// readKeyword reads the given keyword from the input.
-// If the keyword is not present, readKeyword records a syntax error.
-func (r *importReader) readKeyword(kw string) {
-	r.peekByte(true)
-	for i := 0; i < len(kw); i++ {
-		if r.nextByte(false) != kw[i] {
-			r.syntaxError()
-			return
-		}
-	}
-	if isIdent(r.peekByte(false)) {
-		r.syntaxError()
-	}
-}
-
-// readIdent reads an identifier from the input.
-// If an identifier is not present, readIdent records a syntax error.
-func (r *importReader) readIdent() {
-	c := r.peekByte(true)
-	if !isIdent(c) {
-		r.syntaxError()
-		return
-	}
-	for isIdent(r.peekByte(false)) {
-		r.peek = 0
-	}
-}
-
-// readString reads a quoted string literal from the input.
-// If an identifier is not present, readString records a syntax error.
-func (r *importReader) readString() {
-	switch r.nextByte(true) {
-	case '`':
-		for r.err == nil {
-			if r.nextByte(false) == '`' {
-				break
-			}
-			if r.eof {
-				r.syntaxError()
-			}
-		}
-	case '"':
-		for r.err == nil {
-			c := r.nextByte(false)
-			if c == '"' {
-				break
-			}
-			if r.eof || c == '\n' {
-				r.syntaxError()
-			}
-			if c == '\\' {
-				r.nextByte(false)
-			}
-		}
-	default:
-		r.syntaxError()
-	}
-}
-
-// readImport reads an import clause - optional identifier followed by quoted string -
-// from the input.
-func (r *importReader) readImport() {
-	c := r.peekByte(true)
-	if c == '.' {
-		r.peek = 0
-	} else if isIdent(c) {
-		r.readIdent()
-	}
-	r.readString()
-}
-
-// readComments is like io.ReadAll, except that it only reads the leading
-// block of comments in the file.
-func readComments(f io.Reader) ([]byte, error) {
-	r := newImportReader("", f)
-	r.peekByte(true)
-	if r.err == nil && !r.eof {
-		// Didn't reach EOF, so must have found a non-space byte. Remove it.
-		r.buf = r.buf[:len(r.buf)-1]
-	}
-	return r.buf, r.err
-}
-
-// readGoInfo expects a Go file as input and reads the file up to and including the import section.
-// It records what it learned in *info.
-// If info.fset is non-nil, readGoInfo parses the file and sets info.parsed, info.parseErr,
-// info.imports, info.embeds, and info.embedErr.
-//
-// It only returns an error if there are problems reading the file,
-// not for syntax errors in the file itself.
-func readGoInfo(f io.Reader, info *fileInfo) error {
-	r := newImportReader(info.name, f)
-
-	r.readKeyword("package")
-	r.readIdent()
-	for r.peekByte(true) == 'i' {
-		r.readKeyword("import")
-		if r.peekByte(true) == '(' {
-			r.nextByte(false)
-			for r.peekByte(true) != ')' && r.err == nil {
-				r.readImport()
-			}
-			r.nextByte(false)
-		} else {
-			r.readImport()
-		}
-	}
-
-	info.header = r.buf
-
-	// If we stopped successfully before EOF, we read a byte that told us we were done.
-	// Return all but that last byte, which would cause a syntax error if we let it through.
-	if r.err == nil && !r.eof {
-		info.header = r.buf[:len(r.buf)-1]
-	}
-
-	// If we stopped for a syntax error, consume the whole file so that
-	// we are sure we don't change the errors that go/parser returns.
-	if r.err == errSyntax {
-		r.err = nil
-		for r.err == nil && !r.eof {
-			r.readByte()
-		}
-		info.header = r.buf
-	}
-	if r.err != nil {
-		return r.err
-	}
-
-	if info.fset == nil {
-		return nil
-	}
-
-	// Parse file header & record imports.
-	info.parsed, info.parseErr = parser.ParseFile(info.fset, info.name, info.header, parser.ImportsOnly|parser.ParseComments)
-	if info.parseErr != nil {
-		return nil
-	}
-
-	hasEmbed := false
-	for _, decl := range info.parsed.Decls {
-		d, ok := decl.(*ast.GenDecl)
-		if !ok {
-			continue
-		}
-		for _, dspec := range d.Specs {
-			spec, ok := dspec.(*ast.ImportSpec)
-			if !ok {
-				continue
-			}
-			quoted := spec.Path.Value
-			path, err := strconv.Unquote(quoted)
-			if err != nil {
-				return fmt.Errorf("parser returned invalid quoted string: <%s>", quoted)
-			}
-			if path == "embed" {
-				hasEmbed = true
-			}
-
-			doc := spec.Doc
-			if doc == nil && len(d.Specs) == 1 {
-				doc = d.Doc
-			}
-			info.imports = append(info.imports, fileImport{path, spec.Pos(), doc})
-		}
-	}
-
-	// If the file imports "embed",
-	// we have to look for //go:embed comments
-	// in the remainder of the file.
-	// The compiler will enforce the mapping of comments to
-	// declared variables. We just need to know the patterns.
-	// If there were //go:embed comments earlier in the file
-	// (near the package statement or imports), the compiler
-	// will reject them. They can be (and have already been) ignored.
-	if hasEmbed {
-		var line []byte
-		for first := true; r.findEmbed(first); first = false {
-			line = line[:0]
-			pos := r.pos
-			for {
-				c := r.readByteNoBuf()
-				if c == '\n' || r.err != nil || r.eof {
-					break
-				}
-				line = append(line, c)
-			}
-			// Add args if line is well-formed.
-			// Ignore badly-formed lines - the compiler will report them when it finds them,
-			// and we can pretend they are not there to help go list succeed with what it knows.
-			embs, err := parseGoEmbed(string(line), pos)
-			if err == nil {
-				info.embeds = append(info.embeds, embs...)
-			}
-		}
-	}
-
-	return nil
-}
-
-// parseGoEmbed parses the text following "//go:embed" to extract the glob patterns.
-// It accepts unquoted space-separated patterns as well as double-quoted and back-quoted Go strings.
-// This is based on a similar function in cmd/compile/internal/gc/noder.go;
-// this version calculates position information as well.
-func parseGoEmbed(args string, pos token.Position) ([]fileEmbed, error) {
-	trimBytes := func(n int) {
-		pos.Offset += n
-		pos.Column += utf8.RuneCountInString(args[:n])
-		args = args[n:]
-	}
-	trimSpace := func() {
-		trim := strings.TrimLeftFunc(args, unicode.IsSpace)
-		trimBytes(len(args) - len(trim))
-	}
-
-	var list []fileEmbed
-	for trimSpace(); args != ""; trimSpace() {
-		var path string
-		pathPos := pos
-	Switch:
-		switch args[0] {
-		default:
-			i := len(args)
-			for j, c := range args {
-				if unicode.IsSpace(c) {
-					i = j
-					break
-				}
-			}
-			path = args[:i]
-			trimBytes(i)
-
-		case '`':
-			var ok bool
-			path, _, ok = stringsCut(args[1:], "`")
-			if !ok {
-				return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
-			}
-			trimBytes(1 + len(path) + 1)
-
-		case '"':
-			i := 1
-			for ; i < len(args); i++ {
-				if args[i] == '\\' {
-					i++
-					continue
-				}
-				if args[i] == '"' {
-					q, err := strconv.Unquote(args[:i+1])
-					if err != nil {
-						return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args[:i+1])
-					}
-					path = q
-					trimBytes(i + 1)
-					break Switch
-				}
-			}
-			if i >= len(args) {
-				return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
-			}
-		}
-
-		if args != "" {
-			r, _ := utf8.DecodeRuneInString(args)
-			if !unicode.IsSpace(r) {
-				return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
-			}
-		}
-		list = append(list, fileEmbed{path, pathPos})
-	}
-	return list, nil
-}
diff --git a/internal/backport/go/build/read_test.go b/internal/backport/go/build/read_test.go
deleted file mode 100644
index f15b153..0000000
--- a/internal/backport/go/build/read_test.go
+++ /dev/null
@@ -1,360 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package build
-
-import (
-	"fmt"
-	"io"
-	"strings"
-	"testing"
-
-	"golang.org/x/website/internal/backport/go/token"
-)
-
-const quote = "`"
-
-type readTest struct {
-	// Test input contains ℙ where readGoInfo should stop.
-	in  string
-	err string
-}
-
-var readGoInfoTests = []readTest{
-	{
-		`package p`,
-		"",
-	},
-	{
-		`package p; import "x"`,
-		"",
-	},
-	{
-		`package p; import . "x"`,
-		"",
-	},
-	{
-		`package p; import "x";ℙvar x = 1`,
-		"",
-	},
-	{
-		`package p
-
-		// comment
-
-		import "x"
-		import _ "x"
-		import a "x"
-
-		/* comment */
-
-		import (
-			"x" /* comment */
-			_ "x"
-			a "x" // comment
-			` + quote + `x` + quote + `
-			_ /*comment*/ ` + quote + `x` + quote + `
-			a ` + quote + `x` + quote + `
-		)
-		import (
-		)
-		import ()
-		import()import()import()
-		import();import();import()
-
-		ℙvar x = 1
-		`,
-		"",
-	},
-	{
-		"\ufeff𝔻" + `package p; import "x";ℙvar x = 1`,
-		"",
-	},
-}
-
-var readCommentsTests = []readTest{
-	{
-		`ℙpackage p`,
-		"",
-	},
-	{
-		`ℙpackage p; import "x"`,
-		"",
-	},
-	{
-		`ℙpackage p; import . "x"`,
-		"",
-	},
-	{
-		"\ufeff𝔻" + `ℙpackage p; import . "x"`,
-		"",
-	},
-	{
-		`// foo
-
-		/* bar */
-
-		/* quux */ // baz
-
-		/*/ zot */
-
-		// asdf
-		ℙHello, world`,
-		"",
-	},
-	{
-		"\ufeff𝔻" + `// foo
-
-		/* bar */
-
-		/* quux */ // baz
-
-		/*/ zot */
-
-		// asdf
-		ℙHello, world`,
-		"",
-	},
-}
-
-func testRead(t *testing.T, tests []readTest, read func(io.Reader) ([]byte, error)) {
-	for i, tt := range tests {
-		beforeP, afterP, _ := stringsCut1(tt.in, "ℙ")
-		in := beforeP + afterP
-		testOut := beforeP
-
-		if beforeD, afterD, ok := stringsCut1(beforeP, "𝔻"); ok {
-			in = beforeD + afterD + afterP
-			testOut = afterD
-		}
-
-		r := strings.NewReader(in)
-		buf, err := read(r)
-		if err != nil {
-			if tt.err == "" {
-				t.Errorf("#%d: err=%q, expected success (%q)", i, err, string(buf))
-			} else if !strings.Contains(err.Error(), tt.err) {
-				t.Errorf("#%d: err=%q, expected %q", i, err, tt.err)
-			}
-			continue
-		}
-		if tt.err != "" {
-			t.Errorf("#%d: success, expected %q", i, tt.err)
-			continue
-		}
-
-		out := string(buf)
-		if out != testOut {
-			t.Errorf("#%d: wrong output:\nhave %q\nwant %q\n", i, out, testOut)
-		}
-	}
-}
-
-func TestReadGoInfo(t *testing.T) {
-	testRead(t, readGoInfoTests, func(r io.Reader) ([]byte, error) {
-		var info fileInfo
-		err := readGoInfo(r, &info)
-		return info.header, err
-	})
-}
-
-func TestReadComments(t *testing.T) {
-	testRead(t, readCommentsTests, readComments)
-}
-
-var readFailuresTests = []readTest{
-	{
-		`package`,
-		"syntax error",
-	},
-	{
-		"package p\n\x00\nimport `math`\n",
-		"unexpected NUL in input",
-	},
-	{
-		`package p; import`,
-		"syntax error",
-	},
-	{
-		`package p; import "`,
-		"syntax error",
-	},
-	{
-		"package p; import ` \n\n",
-		"syntax error",
-	},
-	{
-		`package p; import "x`,
-		"syntax error",
-	},
-	{
-		`package p; import _`,
-		"syntax error",
-	},
-	{
-		`package p; import _ "`,
-		"syntax error",
-	},
-	{
-		`package p; import _ "x`,
-		"syntax error",
-	},
-	{
-		`package p; import .`,
-		"syntax error",
-	},
-	{
-		`package p; import . "`,
-		"syntax error",
-	},
-	{
-		`package p; import . "x`,
-		"syntax error",
-	},
-	{
-		`package p; import (`,
-		"syntax error",
-	},
-	{
-		`package p; import ("`,
-		"syntax error",
-	},
-	{
-		`package p; import ("x`,
-		"syntax error",
-	},
-	{
-		`package p; import ("x"`,
-		"syntax error",
-	},
-}
-
-func TestReadFailuresIgnored(t *testing.T) {
-	// Syntax errors should not be reported (false arg to readImports).
-	// Instead, entire file should be the output and no error.
-	// Convert tests not to return syntax errors.
-	tests := make([]readTest, len(readFailuresTests))
-	copy(tests, readFailuresTests)
-	for i := range tests {
-		tt := &tests[i]
-		if !strings.Contains(tt.err, "NUL") {
-			tt.err = ""
-		}
-	}
-	testRead(t, tests, func(r io.Reader) ([]byte, error) {
-		var info fileInfo
-		err := readGoInfo(r, &info)
-		return info.header, err
-	})
-}
-
-var readEmbedTests = []struct {
-	in, out string
-}{
-	{
-		"package p\n",
-		"",
-	},
-	{
-		"package p\nimport \"embed\"\nvar i int\n//go:embed x y z\nvar files embed.FS",
-		`test:4:12:x
-		 test:4:14:y
-		 test:4:16:z`,
-	},
-	{
-		"package p\nimport \"embed\"\nvar i int\n//go:embed x \"\\x79\" `z`\nvar files embed.FS",
-		`test:4:12:x
-		 test:4:14:y
-		 test:4:21:z`,
-	},
-	{
-		"package p\nimport \"embed\"\nvar i int\n//go:embed x y\n//go:embed z\nvar files embed.FS",
-		`test:4:12:x
-		 test:4:14:y
-		 test:5:12:z`,
-	},
-	{
-		"package p\nimport \"embed\"\nvar i int\n\t //go:embed x y\n\t //go:embed z\n\t var files embed.FS",
-		`test:4:14:x
-		 test:4:16:y
-		 test:5:14:z`,
-	},
-	{
-		"package p\nimport \"embed\"\n//go:embed x y z\nvar files embed.FS",
-		`test:3:12:x
-		 test:3:14:y
-		 test:3:16:z`,
-	},
-	{
-		"\ufeffpackage p\nimport \"embed\"\n//go:embed x y z\nvar files embed.FS",
-		`test:3:12:x
-		 test:3:14:y
-		 test:3:16:z`,
-	},
-	{
-		"package p\nimport \"embed\"\nvar s = \"/*\"\n//go:embed x\nvar files embed.FS",
-		`test:4:12:x`,
-	},
-	{
-		`package p
-		 import "embed"
-		 var s = "\"\\\\"
-		 //go:embed x
-		 var files embed.FS`,
-		`test:4:15:x`,
-	},
-	{
-		"package p\nimport \"embed\"\nvar s = `/*`\n//go:embed x\nvar files embed.FS",
-		`test:4:12:x`,
-	},
-	{
-		"package p\nimport \"embed\"\nvar s = z/ *y\n//go:embed pointer\nvar pointer embed.FS",
-		"test:4:12:pointer",
-	},
-	{
-		"package p\n//go:embed x y z\n", // no import, no scan
-		"",
-	},
-	{
-		"package p\n//go:embed x y z\nvar files embed.FS", // no import, no scan
-		"",
-	},
-	{
-		"\ufeffpackage p\n//go:embed x y z\nvar files embed.FS", // no import, no scan
-		"",
-	},
-}
-
-func TestReadEmbed(t *testing.T) {
-	fset := token.NewFileSet()
-	for i, tt := range readEmbedTests {
-		info := fileInfo{
-			name: "test",
-			fset: fset,
-		}
-		err := readGoInfo(strings.NewReader(tt.in), &info)
-		if err != nil {
-			t.Errorf("#%d: %v", i, err)
-			continue
-		}
-		b := &strings.Builder{}
-		sep := ""
-		for _, emb := range info.embeds {
-			fmt.Fprintf(b, "%s%v:%s", sep, emb.pos, emb.pattern)
-			sep = "\n"
-		}
-		got := b.String()
-		want := strings.Join(strings.Fields(tt.out), "\n")
-		if got != want {
-			t.Errorf("#%d: embeds:\n%s\nwant:\n%s", i, got, want)
-		}
-	}
-}
-
-func stringsCut1(s, sep string) (before, after string, found bool) {
-	if i := strings.Index(s, sep); i >= 0 {
-		return s[:i], s[i+len(sep):], true
-	}
-	return s, "", false
-}
diff --git a/internal/backport/go/build/syslist.go b/internal/backport/go/build/syslist.go
deleted file mode 100644
index 0f6e336..0000000
--- a/internal/backport/go/build/syslist.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package build
-
-// List of past, present, and future known GOOS and GOARCH values.
-// Do not remove from this list, as these are used for go/build filename matching.
-
-const goosList = "aix android darwin dragonfly freebsd hurd illumos ios js linux nacl netbsd openbsd plan9 solaris windows zos "
-const goarchList = "386 amd64 amd64p32 arm armbe arm64 arm64be loong64 mips mipsle mips64 mips64le mips64p32 mips64p32le ppc ppc64 ppc64le riscv riscv64 s390 s390x sparc sparc64 wasm "
diff --git a/internal/backport/go/build/syslist_test.go b/internal/backport/go/build/syslist_test.go
deleted file mode 100644
index 2b7b4c7..0000000
--- a/internal/backport/go/build/syslist_test.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package build
-
-import (
-	"runtime"
-	"testing"
-)
-
-var (
-	thisOS    = runtime.GOOS
-	thisArch  = runtime.GOARCH
-	otherOS   = anotherOS()
-	otherArch = anotherArch()
-)
-
-func anotherOS() string {
-	if thisOS != "darwin" && thisOS != "ios" {
-		return "darwin"
-	}
-	return "linux"
-}
-
-func anotherArch() string {
-	if thisArch != "amd64" {
-		return "amd64"
-	}
-	return "386"
-}
-
-type GoodFileTest struct {
-	name   string
-	result bool
-}
-
-var tests = []GoodFileTest{
-	{"file.go", true},
-	{"file.c", true},
-	{"file_foo.go", true},
-	{"file_" + thisArch + ".go", true},
-	{"file_" + otherArch + ".go", false},
-	{"file_" + thisOS + ".go", true},
-	{"file_" + otherOS + ".go", false},
-	{"file_" + thisOS + "_" + thisArch + ".go", true},
-	{"file_" + otherOS + "_" + thisArch + ".go", false},
-	{"file_" + thisOS + "_" + otherArch + ".go", false},
-	{"file_" + otherOS + "_" + otherArch + ".go", false},
-	{"file_foo_" + thisArch + ".go", true},
-	{"file_foo_" + otherArch + ".go", false},
-	{"file_" + thisOS + ".c", true},
-	{"file_" + otherOS + ".c", false},
-}
-
-func TestGoodOSArch(t *testing.T) {
-	for _, test := range tests {
-		if Default.goodOSArchFile(test.name, make(map[string]bool)) != test.result {
-			t.Fatalf("goodOSArchFile(%q) != %v", test.name, test.result)
-		}
-	}
-}
diff --git a/internal/backport/go/doc/Makefile b/internal/backport/go/doc/Makefile
deleted file mode 100644
index ca4948f..0000000
--- a/internal/backport/go/doc/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# Copyright 2009 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-# Script to test heading detection heuristic
-headscan: headscan.go
-	go build headscan.go
diff --git a/internal/backport/go/doc/comment.go b/internal/backport/go/doc/comment.go
deleted file mode 100644
index 6cf5926..0000000
--- a/internal/backport/go/doc/comment.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package doc
-
-import (
-	"io"
-
-	"golang.org/x/website/internal/backport/go/doc/comment"
-)
-
-// ToHTML converts comment text to formatted HTML.
-//
-// Deprecated: ToHTML cannot identify documentation links
-// in the doc comment, because they depend on knowing what
-// package the text came from, which is not included in this API.
-//
-// Given the *[doc.Package] p where text was found,
-// ToHTML(w, text, nil) can be replaced by:
-//
-//	w.Write(p.HTML(text))
-//
-// which is in turn shorthand for:
-//
-//	w.Write(p.Printer().HTML(p.Parser().Parse(text)))
-//
-// If words may be non-nil, the longer replacement is:
-//
-//	parser := p.Parser()
-//	parser.Words = words
-//	w.Write(p.Printer().HTML(parser.Parse(d)))
-func ToHTML(w io.Writer, text string, words map[string]string) {
-	p := new(Package).Parser()
-	p.Words = words
-	d := p.Parse(text)
-	pr := new(comment.Printer)
-	w.Write(pr.HTML(d))
-}
-
-// ToText converts comment text to formatted text.
-//
-// Deprecated: ToText cannot identify documentation links
-// in the doc comment, because they depend on knowing what
-// package the text came from, which is not included in this API.
-//
-// Given the *[doc.Package] p where text was found,
-// ToText(w, text, "", "\t", 80) can be replaced by:
-//
-//	w.Write(p.Text(text))
-//
-// In the general case, ToText(w, text, prefix, codePrefix, width)
-// can be replaced by:
-//
-//	d := p.Parser().Parse(text)
-//	pr := p.Printer()
-//	pr.TextPrefix = prefix
-//	pr.TextCodePrefix = codePrefix
-//	pr.TextWidth = width
-//	w.Write(pr.Text(d))
-//
-// See the documentation for [Package.Text] and [comment.Printer.Text]
-// for more details.
-func ToText(w io.Writer, text string, prefix, codePrefix string, width int) {
-	d := new(Package).Parser().Parse(text)
-	pr := &comment.Printer{
-		TextPrefix:     prefix,
-		TextCodePrefix: codePrefix,
-		TextWidth:      width,
-	}
-	w.Write(pr.Text(d))
-}
diff --git a/internal/backport/go/doc/comment/doc.go b/internal/backport/go/doc/comment/doc.go
deleted file mode 100644
index 45a476a..0000000
--- a/internal/backport/go/doc/comment/doc.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package comment implements parsing and reformatting of Go doc comments,
-(documentation comments), which are comments that immediately precede
-a top-level declaration of a package, const, func, type, or var.
-
-Go doc comment syntax is a simplified subset of Markdown that supports
-links, headings, paragraphs, lists (without nesting), and preformatted text blocks.
-The details of the syntax are documented at https://go.dev/doc/comment.
-
-To parse the text associated with a doc comment (after removing comment markers),
-use a [Parser]:
-
-	var p comment.Parser
-	doc := p.Parse(text)
-
-The result is a [*Doc].
-To reformat it as a doc comment, HTML, Markdown, or plain text,
-use a [Printer]:
-
-	var pr comment.Printer
-	os.Stdout.Write(pr.Text(doc))
-
-The [Parser] and [Printer] types are structs whose fields can be
-modified to customize the operations.
-For details, see the documentation for those types.
-
-Use cases that need additional control over reformatting can
-implement their own logic by inspecting the parsed syntax itself.
-See the documentation for [Doc], [Block], [Text] for an overview
-and links to additional types.
-*/
-package comment
diff --git a/internal/backport/go/doc/comment/html.go b/internal/backport/go/doc/comment/html.go
deleted file mode 100644
index bc076f6..0000000
--- a/internal/backport/go/doc/comment/html.go
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package comment
-
-import (
-	"bytes"
-	"fmt"
-	"strconv"
-)
-
-// An htmlPrinter holds the state needed for printing a Doc as HTML.
-type htmlPrinter struct {
-	*Printer
-	tight bool
-}
-
-// HTML returns an HTML formatting of the Doc.
-// See the [Printer] documentation for ways to customize the HTML output.
-func (p *Printer) HTML(d *Doc) []byte {
-	hp := &htmlPrinter{Printer: p}
-	var out bytes.Buffer
-	for _, x := range d.Content {
-		hp.block(&out, x)
-	}
-	return out.Bytes()
-}
-
-// block prints the block x to out.
-func (p *htmlPrinter) block(out *bytes.Buffer, x Block) {
-	switch x := x.(type) {
-	default:
-		fmt.Fprintf(out, "?%T", x)
-
-	case *Paragraph:
-		if !p.tight {
-			out.WriteString("<p>")
-		}
-		p.text(out, x.Text)
-		out.WriteString("\n")
-
-	case *Heading:
-		out.WriteString("<h")
-		h := strconv.Itoa(p.headingLevel())
-		out.WriteString(h)
-		if id := p.headingID(x); id != "" {
-			out.WriteString(` id="`)
-			p.escape(out, id)
-			out.WriteString(`"`)
-		}
-		out.WriteString(">")
-		p.text(out, x.Text)
-		out.WriteString("</h")
-		out.WriteString(h)
-		out.WriteString(">\n")
-
-	case *Code:
-		out.WriteString("<pre>")
-		p.escape(out, x.Text)
-		out.WriteString("</pre>\n")
-
-	case *List:
-		kind := "ol>\n"
-		if x.Items[0].Number == "" {
-			kind = "ul>\n"
-		}
-		out.WriteString("<")
-		out.WriteString(kind)
-		next := "1"
-		for _, item := range x.Items {
-			out.WriteString("<li")
-			if n := item.Number; n != "" {
-				if n != next {
-					out.WriteString(` value="`)
-					out.WriteString(n)
-					out.WriteString(`"`)
-					next = n
-				}
-				next = inc(next)
-			}
-			out.WriteString(">")
-			p.tight = !x.BlankBetween()
-			for _, blk := range item.Content {
-				p.block(out, blk)
-			}
-			p.tight = false
-		}
-		out.WriteString("</")
-		out.WriteString(kind)
-	}
-}
-
-// inc increments the decimal string s.
-// For example, inc("1199") == "1200".
-func inc(s string) string {
-	b := []byte(s)
-	for i := len(b) - 1; i >= 0; i-- {
-		if b[i] < '9' {
-			b[i]++
-			return string(b)
-		}
-		b[i] = '0'
-	}
-	return "1" + string(b)
-}
-
-// text prints the text sequence x to out.
-func (p *htmlPrinter) text(out *bytes.Buffer, x []Text) {
-	for _, t := range x {
-		switch t := t.(type) {
-		case Plain:
-			p.escape(out, string(t))
-		case Italic:
-			out.WriteString("<i>")
-			p.escape(out, string(t))
-			out.WriteString("</i>")
-		case *Link:
-			out.WriteString(`<a href="`)
-			p.escape(out, t.URL)
-			out.WriteString(`">`)
-			p.text(out, t.Text)
-			out.WriteString("</a>")
-		case *DocLink:
-			url := p.docLinkURL(t)
-			if url != "" {
-				out.WriteString(`<a href="`)
-				p.escape(out, url)
-				out.WriteString(`">`)
-			}
-			p.text(out, t.Text)
-			if url != "" {
-				out.WriteString("</a>")
-			}
-		}
-	}
-}
-
-// escape prints s to out as plain text,
-// escaping < & " ' and > to avoid being misinterpreted
-// in larger HTML constructs.
-func (p *htmlPrinter) escape(out *bytes.Buffer, s string) {
-	start := 0
-	for i := 0; i < len(s); i++ {
-		switch s[i] {
-		case '<':
-			out.WriteString(s[start:i])
-			out.WriteString("&lt;")
-			start = i + 1
-		case '&':
-			out.WriteString(s[start:i])
-			out.WriteString("&amp;")
-			start = i + 1
-		case '"':
-			out.WriteString(s[start:i])
-			out.WriteString("&quot;")
-			start = i + 1
-		case '\'':
-			out.WriteString(s[start:i])
-			out.WriteString("&apos;")
-			start = i + 1
-		case '>':
-			out.WriteString(s[start:i])
-			out.WriteString("&gt;")
-			start = i + 1
-		}
-	}
-	out.WriteString(s[start:])
-}
diff --git a/internal/backport/go/doc/comment/markdown.go b/internal/backport/go/doc/comment/markdown.go
deleted file mode 100644
index 26937a4..0000000
--- a/internal/backport/go/doc/comment/markdown.go
+++ /dev/null
@@ -1,188 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package comment
-
-import (
-	"bytes"
-	"fmt"
-	"strings"
-)
-
-// An mdPrinter holds the state needed for printing a Doc as Markdown.
-type mdPrinter struct {
-	*Printer
-	headingPrefix string
-	raw           bytes.Buffer
-}
-
-// Markdown returns a Markdown formatting of the Doc.
-// See the [Printer] documentation for ways to customize the Markdown output.
-func (p *Printer) Markdown(d *Doc) []byte {
-	mp := &mdPrinter{
-		Printer:       p,
-		headingPrefix: strings.Repeat("#", p.headingLevel()) + " ",
-	}
-
-	var out bytes.Buffer
-	for i, x := range d.Content {
-		if i > 0 {
-			out.WriteByte('\n')
-		}
-		mp.block(&out, x)
-	}
-	return out.Bytes()
-}
-
-// block prints the block x to out.
-func (p *mdPrinter) block(out *bytes.Buffer, x Block) {
-	switch x := x.(type) {
-	default:
-		fmt.Fprintf(out, "?%T", x)
-
-	case *Paragraph:
-		p.text(out, x.Text)
-		out.WriteString("\n")
-
-	case *Heading:
-		out.WriteString(p.headingPrefix)
-		p.text(out, x.Text)
-		if id := p.headingID(x); id != "" {
-			out.WriteString(" {#")
-			out.WriteString(id)
-			out.WriteString("}")
-		}
-		out.WriteString("\n")
-
-	case *Code:
-		md := x.Text
-		for md != "" {
-			var line string
-			line, md, _ = stringsCut(md, "\n")
-			if line != "" {
-				out.WriteString("\t")
-				out.WriteString(line)
-			}
-			out.WriteString("\n")
-		}
-
-	case *List:
-		loose := x.BlankBetween()
-		for i, item := range x.Items {
-			if i > 0 && loose {
-				out.WriteString("\n")
-			}
-			if n := item.Number; n != "" {
-				out.WriteString(" ")
-				out.WriteString(n)
-				out.WriteString(". ")
-			} else {
-				out.WriteString("  - ") // SP SP - SP
-			}
-			for i, blk := range item.Content {
-				const fourSpace = "    "
-				if i > 0 {
-					out.WriteString("\n" + fourSpace)
-				}
-				p.text(out, blk.(*Paragraph).Text)
-				out.WriteString("\n")
-			}
-		}
-	}
-}
-
-// text prints the text sequence x to out.
-func (p *mdPrinter) text(out *bytes.Buffer, x []Text) {
-	p.raw.Reset()
-	p.rawText(&p.raw, x)
-	line := bytes.TrimSpace(p.raw.Bytes())
-	if len(line) == 0 {
-		return
-	}
-	switch line[0] {
-	case '+', '-', '*', '#':
-		// Escape what would be the start of an unordered list or heading.
-		out.WriteByte('\\')
-	case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
-		i := 1
-		for i < len(line) && '0' <= line[i] && line[i] <= '9' {
-			i++
-		}
-		if i < len(line) && (line[i] == '.' || line[i] == ')') {
-			// Escape what would be the start of an ordered list.
-			out.Write(line[:i])
-			out.WriteByte('\\')
-			line = line[i:]
-		}
-	}
-	out.Write(line)
-}
-
-// rawText prints the text sequence x to out,
-// without worrying about escaping characters
-// that have special meaning at the start of a Markdown line.
-func (p *mdPrinter) rawText(out *bytes.Buffer, x []Text) {
-	for _, t := range x {
-		switch t := t.(type) {
-		case Plain:
-			p.escape(out, string(t))
-		case Italic:
-			out.WriteString("*")
-			p.escape(out, string(t))
-			out.WriteString("*")
-		case *Link:
-			out.WriteString("[")
-			p.rawText(out, t.Text)
-			out.WriteString("](")
-			out.WriteString(t.URL)
-			out.WriteString(")")
-		case *DocLink:
-			url := p.docLinkURL(t)
-			if url != "" {
-				out.WriteString("[")
-			}
-			p.rawText(out, t.Text)
-			if url != "" {
-				out.WriteString("](")
-				url = strings.ReplaceAll(url, "(", "%28")
-				url = strings.ReplaceAll(url, ")", "%29")
-				out.WriteString(url)
-				out.WriteString(")")
-			}
-		}
-	}
-}
-
-// escape prints s to out as plain text,
-// escaping special characters to avoid being misinterpreted
-// as Markdown markup sequences.
-func (p *mdPrinter) escape(out *bytes.Buffer, s string) {
-	start := 0
-	for i := 0; i < len(s); i++ {
-		switch s[i] {
-		case '\n':
-			// Turn all \n into spaces, for a few reasons:
-			//   - Avoid introducing paragraph breaks accidentally.
-			//   - Avoid the need to reindent after the newline.
-			//   - Avoid problems with Markdown renderers treating
-			//     every mid-paragraph newline as a <br>.
-			out.WriteString(s[start:i])
-			out.WriteByte(' ')
-			start = i + 1
-			continue
-		case '`', '_', '*', '[', '<', '\\':
-			// Not all of these need to be escaped all the time,
-			// but is valid and easy to do so.
-			// We assume the Markdown is being passed to a
-			// Markdown renderer, not edited by a person,
-			// so it's fine to have escapes that are not strictly
-			// necessary in some cases.
-			out.WriteString(s[start:i])
-			out.WriteByte('\\')
-			out.WriteByte(s[i])
-			start = i + 1
-		}
-	}
-	out.WriteString(s[start:])
-}
diff --git a/internal/backport/go/doc/comment/mkstd.sh b/internal/backport/go/doc/comment/mkstd.sh
deleted file mode 100755
index c9dee8c..0000000
--- a/internal/backport/go/doc/comment/mkstd.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-# Copyright 2022 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-# This could be a good use for embed but go/doc/comment
-# is built into the bootstrap go command, so it can't use embed.
-# Also not using embed lets us emit a string array directly
-# and avoid init-time work.
-
-(
-echo "// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Code generated by 'go generate' DO NOT EDIT.
-//go:generate ./mkstd.sh
-
-package comment
-
-var stdPkgs = []string{"
-go list std | grep -v / | sort | sed 's/.*/"&",/'
-echo "}"
-) | gofmt >std.go.tmp && mv std.go.tmp std.go
diff --git a/internal/backport/go/doc/comment/old_test.go b/internal/backport/go/doc/comment/old_test.go
deleted file mode 100644
index 944f94d..0000000
--- a/internal/backport/go/doc/comment/old_test.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// These tests are carried forward from the old go/doc implementation.
-
-package comment
-
-import "testing"
-
-var oldHeadingTests = []struct {
-	line string
-	ok   bool
-}{
-	{"Section", true},
-	{"A typical usage", true},
-	{"ΔΛΞ is Greek", true},
-	{"Foo 42", true},
-	{"", false},
-	{"section", false},
-	{"A typical usage:", false},
-	{"This code:", false},
-	{"δ is Greek", false},
-	{"Foo §", false},
-	{"Fermat's Last Sentence", true},
-	{"Fermat's", true},
-	{"'sX", false},
-	{"Ted 'Too' Bar", false},
-	{"Use n+m", false},
-	{"Scanning:", false},
-	{"N:M", false},
-}
-
-func TestIsOldHeading(t *testing.T) {
-	for _, tt := range oldHeadingTests {
-		if isOldHeading(tt.line, []string{"Text.", "", tt.line, "", "Text."}, 2) != tt.ok {
-			t.Errorf("isOldHeading(%q) = %v, want %v", tt.line, !tt.ok, tt.ok)
-		}
-	}
-}
-
-var autoURLTests = []struct {
-	in, out string
-}{
-	{"", ""},
-	{"http://[::1]:8080/foo.txt", "http://[::1]:8080/foo.txt"},
-	{"https://www.google.com) after", "https://www.google.com"},
-	{"https://www.google.com:30/x/y/z:b::c. After", "https://www.google.com:30/x/y/z:b::c"},
-	{"http://www.google.com/path/:;!-/?query=%34b#093124", "http://www.google.com/path/:;!-/?query=%34b#093124"},
-	{"http://www.google.com/path/:;!-/?query=%34bar#093124", "http://www.google.com/path/:;!-/?query=%34bar#093124"},
-	{"http://www.google.com/index.html! After", "http://www.google.com/index.html"},
-	{"http://www.google.com/", "http://www.google.com/"},
-	{"https://www.google.com/", "https://www.google.com/"},
-	{"http://www.google.com/path.", "http://www.google.com/path"},
-	{"http://en.wikipedia.org/wiki/Camellia_(cipher)", "http://en.wikipedia.org/wiki/Camellia_(cipher)"},
-	{"http://www.google.com/)", "http://www.google.com/"},
-	{"http://gmail.com)", "http://gmail.com"},
-	{"http://gmail.com))", "http://gmail.com"},
-	{"http://gmail.com ((http://gmail.com)) ()", "http://gmail.com"},
-	{"http://example.com/ quux!", "http://example.com/"},
-	{"http://example.com/%2f/ /world.", "http://example.com/%2f/"},
-	{"http: ipsum //host/path", ""},
-	{"javascript://is/not/linked", ""},
-	{"http://foo", "http://foo"},
-	{"https://www.example.com/person/][Person Name]]", "https://www.example.com/person/"},
-	{"http://golang.org/)", "http://golang.org/"},
-	{"http://golang.org/hello())", "http://golang.org/hello()"},
-	{"http://git.qemu.org/?p=qemu.git;a=blob;f=qapi-schema.json;hb=HEAD", "http://git.qemu.org/?p=qemu.git;a=blob;f=qapi-schema.json;hb=HEAD"},
-	{"https://foo.bar/bal/x(])", "https://foo.bar/bal/x"}, // inner ] causes (]) to be cut off from URL
-	{"http://bar(])", "http://bar"},                       // same
-}
-
-func TestAutoURL(t *testing.T) {
-	for _, tt := range autoURLTests {
-		url, ok := autoURL(tt.in)
-		if url != tt.out || ok != (tt.out != "") {
-			t.Errorf("autoURL(%q) = %q, %v, want %q, %v", tt.in, url, ok, tt.out, tt.out != "")
-		}
-	}
-}
diff --git a/internal/backport/go/doc/comment/parse.go b/internal/backport/go/doc/comment/parse.go
deleted file mode 100644
index 5448ab3..0000000
--- a/internal/backport/go/doc/comment/parse.go
+++ /dev/null
@@ -1,1271 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package comment
-
-import (
-	"sort"
-	"strings"
-	"unicode"
-	"unicode/utf8"
-)
-
-// A Doc is a parsed Go doc comment.
-type Doc struct {
-	// Content is the sequence of content blocks in the comment.
-	Content []Block
-
-	// Links is the link definitions in the comment.
-	Links []*LinkDef
-}
-
-// A LinkDef is a single link definition.
-type LinkDef struct {
-	Text string // the link text
-	URL  string // the link URL
-	Used bool   // whether the comment uses the definition
-}
-
-// A Block is block-level content in a doc comment,
-// one of [*Code], [*Heading], [*List], or [*Paragraph].
-type Block interface {
-	block()
-}
-
-// A Heading is a doc comment heading.
-type Heading struct {
-	Text []Text // the heading text
-}
-
-func (*Heading) block() {}
-
-// A List is a numbered or bullet list.
-// Lists are always non-empty: len(Items) > 0.
-// In a numbered list, every Items[i].Number is a non-empty string.
-// In a bullet list, every Items[i].Number is an empty string.
-type List struct {
-	// Items is the list items.
-	Items []*ListItem
-
-	// ForceBlankBefore indicates that the list must be
-	// preceded by a blank line when reformatting the comment,
-	// overriding the usual conditions. See the BlankBefore method.
-	//
-	// The comment parser sets ForceBlankBefore for any list
-	// that is preceded by a blank line, to make sure
-	// the blank line is preserved when printing.
-	ForceBlankBefore bool
-
-	// ForceBlankBetween indicates that list items must be
-	// separated by blank lines when reformatting the comment,
-	// overriding the usual conditions. See the BlankBetween method.
-	//
-	// The comment parser sets ForceBlankBetween for any list
-	// that has a blank line between any two of its items, to make sure
-	// the blank lines are preserved when printing.
-	ForceBlankBetween bool
-}
-
-func (*List) block() {}
-
-// BlankBefore reports whether a reformatting of the comment
-// should include a blank line before the list.
-// The default rule is the same as for [BlankBetween]:
-// if the list item content contains any blank lines
-// (meaning at least one item has multiple paragraphs)
-// then the list itself must be preceded by a blank line.
-// A preceding blank line can be forced by setting [List].ForceBlankBefore.
-func (l *List) BlankBefore() bool {
-	return l.ForceBlankBefore || l.BlankBetween()
-}
-
-// BlankBetween reports whether a reformatting of the comment
-// should include a blank line between each pair of list items.
-// The default rule is that if the list item content contains any blank lines
-// (meaning at least one item has multiple paragraphs)
-// then list items must themselves be separated by blank lines.
-// Blank line separators can be forced by setting [List].ForceBlankBetween.
-func (l *List) BlankBetween() bool {
-	if l.ForceBlankBetween {
-		return true
-	}
-	for _, item := range l.Items {
-		if len(item.Content) != 1 {
-			// Unreachable for parsed comments today,
-			// since the only way to get multiple item.Content
-			// is multiple paragraphs, which must have been
-			// separated by a blank line.
-			return true
-		}
-	}
-	return false
-}
-
-// A ListItem is a single item in a numbered or bullet list.
-type ListItem struct {
-	// Number is a decimal string in a numbered list
-	// or an empty string in a bullet list.
-	Number string // "1", "2", ...; "" for bullet list
-
-	// Content is the list content.
-	// Currently, restrictions in the parser and printer
-	// require every element of Content to be a *Paragraph.
-	Content []Block // Content of this item.
-}
-
-// A Paragraph is a paragraph of text.
-type Paragraph struct {
-	Text []Text
-}
-
-func (*Paragraph) block() {}
-
-// A Code is a preformatted code block.
-type Code struct {
-	// Text is the preformatted text, ending with a newline character.
-	// It may be multiple lines, each of which ends with a newline character.
-	// It is never empty, nor does it start or end with a blank line.
-	Text string
-}
-
-func (*Code) block() {}
-
-// A Text is text-level content in a doc comment,
-// one of [Plain], [Italic], [*Link], or [*DocLink].
-type Text interface {
-	text()
-}
-
-// A Plain is a string rendered as plain text (not italicized).
-type Plain string
-
-func (Plain) text() {}
-
-// An Italic is a string rendered as italicized text.
-type Italic string
-
-func (Italic) text() {}
-
-// A Link is a link to a specific URL.
-type Link struct {
-	Auto bool   // is this an automatic (implicit) link of a literal URL?
-	Text []Text // text of link
-	URL  string // target URL of link
-}
-
-func (*Link) text() {}
-
-// A DocLink is a link to documentation for a Go package or symbol.
-type DocLink struct {
-	Text []Text // text of link
-
-	// ImportPath, Recv, and Name identify the Go package or symbol
-	// that is the link target. The potential combinations of
-	// non-empty fields are:
-	//  - ImportPath: a link to another package
-	//  - ImportPath, Name: a link to a const, func, type, or var in another package
-	//  - ImportPath, Recv, Name: a link to a method in another package
-	//  - Name: a link to a const, func, type, or var in this package
-	//  - Recv, Name: a link to a method in this package
-	ImportPath string // import path
-	Recv       string // receiver type, without any pointer star, for methods
-	Name       string // const, func, type, var, or method name
-}
-
-func (*DocLink) text() {}
-
-// A Parser is a doc comment parser.
-// The fields in the struct can be filled in before calling Parse
-// in order to customize the details of the parsing process.
-type Parser struct {
-	// Words is a map of Go identifier words that
-	// should be italicized and potentially linked.
-	// If Words[w] is the empty string, then the word w
-	// is only italicized. Otherwise it is linked, using
-	// Words[w] as the link target.
-	// Words corresponds to the [go/doc.ToHTML] words parameter.
-	Words map[string]string
-
-	// LookupPackage resolves a package name to an import path.
-	//
-	// If LookupPackage(name) returns ok == true, then [name]
-	// (or [name.Sym] or [name.Sym.Method])
-	// is considered a documentation link to importPath's package docs.
-	// It is valid to return "", true, in which case name is considered
-	// to refer to the current package.
-	//
-	// If LookupPackage(name) returns ok == false,
-	// then [name] (or [name.Sym] or [name.Sym.Method])
-	// will not be considered a documentation link,
-	// except in the case where name is the full (but single-element) import path
-	// of a package in the standard library, such as in [math] or [io.Reader].
-	// LookupPackage is still called for such names,
-	// in order to permit references to imports of other packages
-	// with the same package names.
-	//
-	// Setting LookupPackage to nil is equivalent to setting it to
-	// a function that always returns "", false.
-	LookupPackage func(name string) (importPath string, ok bool)
-
-	// LookupSym reports whether a symbol name or method name
-	// exists in the current package.
-	//
-	// If LookupSym("", "Name") returns true, then [Name]
-	// is considered a documentation link for a const, func, type, or var.
-	//
-	// Similarly, if LookupSym("Recv", "Name") returns true,
-	// then [Recv.Name] is considered a documentation link for
-	// type Recv's method Name.
-	//
-	// Setting LookupSym to nil is equivalent to setting it to a function
-	// that always returns false.
-	LookupSym func(recv, name string) (ok bool)
-}
-
-// parseDoc is parsing state for a single doc comment.
-type parseDoc struct {
-	*Parser
-	*Doc
-	links     map[string]*LinkDef
-	lines     []string
-	lookupSym func(recv, name string) bool
-}
-
-// lookupPkg is called to look up the pkg in [pkg], [pkg.Name], and [pkg.Name.Recv].
-// If pkg has a slash, it is assumed to be the full import path and is returned with ok = true.
-//
-// Otherwise, pkg is probably a simple package name like "rand" (not "crypto/rand" or "math/rand").
-// d.LookupPackage provides a way for the caller to allow resolving such names with reference
-// to the imports in the surrounding package.
-//
-// There is one collision between these two cases: single-element standard library names
-// like "math" are full import paths but don't contain slashes. We let d.LookupPackage have
-// the first chance to resolve it, in case there's a different package imported as math,
-// and otherwise we refer to a built-in list of single-element standard library package names.
-func (d *parseDoc) lookupPkg(pkg string) (importPath string, ok bool) {
-	if strings.Contains(pkg, "/") { // assume a full import path
-		if validImportPath(pkg) {
-			return pkg, true
-		}
-		return "", false
-	}
-	if d.LookupPackage != nil {
-		// Give LookupPackage a chance.
-		if path, ok := d.LookupPackage(pkg); ok {
-			return path, true
-		}
-	}
-	return DefaultLookupPackage(pkg)
-}
-
-func isStdPkg(path string) bool {
-	// TODO(rsc): Use sort.Find once we don't have to worry about
-	// copying this code into older Go environments.
-	i := sort.Search(len(stdPkgs), func(i int) bool { return stdPkgs[i] >= path })
-	return i < len(stdPkgs) && stdPkgs[i] == path
-}
-
-// DefaultLookupPackage is the default package lookup
-// function, used when [Parser].LookupPackage is nil.
-// It recognizes names of the packages from the standard
-// library with single-element import paths, such as math,
-// which would otherwise be impossible to name.
-//
-// Note that the go/doc package provides a more sophisticated
-// lookup based on the imports used in the current package.
-func DefaultLookupPackage(name string) (importPath string, ok bool) {
-	if isStdPkg(name) {
-		return name, true
-	}
-	return "", false
-}
-
-// Parse parses the doc comment text and returns the *Doc form.
-// Comment markers (/* // and */) in the text must have already been removed.
-func (p *Parser) Parse(text string) *Doc {
-	lines := unindent(strings.Split(text, "\n"))
-	d := &parseDoc{
-		Parser:    p,
-		Doc:       new(Doc),
-		links:     make(map[string]*LinkDef),
-		lines:     lines,
-		lookupSym: func(recv, name string) bool { return false },
-	}
-	if p.LookupSym != nil {
-		d.lookupSym = p.LookupSym
-	}
-
-	// First pass: break into block structure and collect known links.
-	// The text is all recorded as Plain for now.
-	var prev span
-	for _, s := range parseSpans(lines) {
-		var b Block
-		switch s.kind {
-		default:
-			panic("go/doc/comment: internal error: unknown span kind")
-		case spanList:
-			b = d.list(lines[s.start:s.end], prev.end < s.start)
-		case spanCode:
-			b = d.code(lines[s.start:s.end])
-		case spanOldHeading:
-			b = d.oldHeading(lines[s.start])
-		case spanHeading:
-			b = d.heading(lines[s.start])
-		case spanPara:
-			b = d.paragraph(lines[s.start:s.end])
-		}
-		if b != nil {
-			d.Content = append(d.Content, b)
-		}
-		prev = s
-	}
-
-	// Second pass: interpret all the Plain text now that we know the links.
-	for _, b := range d.Content {
-		switch b := b.(type) {
-		case *Paragraph:
-			b.Text = d.parseLinkedText(string(b.Text[0].(Plain)))
-		case *List:
-			for _, i := range b.Items {
-				for _, c := range i.Content {
-					p := c.(*Paragraph)
-					p.Text = d.parseLinkedText(string(p.Text[0].(Plain)))
-				}
-			}
-		}
-	}
-
-	return d.Doc
-}
-
-// A span represents a single span of comment lines (lines[start:end])
-// of an identified kind (code, heading, paragraph, and so on).
-type span struct {
-	start int
-	end   int
-	kind  spanKind
-}
-
-// A spanKind describes the kind of span.
-type spanKind int
-
-const (
-	_ spanKind = iota
-	spanCode
-	spanHeading
-	spanList
-	spanOldHeading
-	spanPara
-)
-
-func parseSpans(lines []string) []span {
-	var spans []span
-
-	// The loop may process a line twice: once as unindented
-	// and again forced indented. So the maximum expected
-	// number of iterations is 2*len(lines). The repeating logic
-	// can be subtle, though, and to protect against introduction
-	// of infinite loops in future changes, we watch to see that
-	// we are not looping too much. A panic is better than a
-	// quiet infinite loop.
-	watchdog := 2 * len(lines)
-
-	i := 0
-	forceIndent := 0
-Spans:
-	for {
-		// Skip blank lines.
-		for i < len(lines) && lines[i] == "" {
-			i++
-		}
-		if i >= len(lines) {
-			break
-		}
-		if watchdog--; watchdog < 0 {
-			panic("go/doc/comment: internal error: not making progress")
-		}
-
-		var kind spanKind
-		start := i
-		end := i
-		if i < forceIndent || indented(lines[i]) {
-			// Indented (or force indented).
-			// Ends before next unindented. (Blank lines are OK.)
-			// If this is an unindented list that we are heuristically treating as indented,
-			// then accept unindented list item lines up to the first blank lines.
-			// The heuristic is disabled at blank lines to contain its effect
-			// to non-gofmt'ed sections of the comment.
-			unindentedListOK := isList(lines[i]) && i < forceIndent
-			i++
-			for i < len(lines) && (lines[i] == "" || i < forceIndent || indented(lines[i]) || (unindentedListOK && isList(lines[i]))) {
-				if lines[i] == "" {
-					unindentedListOK = false
-				}
-				i++
-			}
-
-			// Drop trailing blank lines.
-			end = i
-			for end > start && lines[end-1] == "" {
-				end--
-			}
-
-			// If indented lines are followed (without a blank line)
-			// by an unindented line ending in a brace,
-			// take that one line too. This fixes the common mistake
-			// of pasting in something like
-			//
-			// func main() {
-			//	fmt.Println("hello, world")
-			// }
-			//
-			// and forgetting to indent it.
-			// The heuristic will never trigger on a gofmt'ed comment,
-			// because any gofmt'ed code block or list would be
-			// followed by a blank line or end of comment.
-			if end < len(lines) && strings.HasPrefix(lines[end], "}") {
-				end++
-			}
-
-			if isList(lines[start]) {
-				kind = spanList
-			} else {
-				kind = spanCode
-			}
-		} else {
-			// Unindented. Ends at next blank or indented line.
-			i++
-			for i < len(lines) && lines[i] != "" && !indented(lines[i]) {
-				i++
-			}
-			end = i
-
-			// If unindented lines are followed (without a blank line)
-			// by an indented line that would start a code block,
-			// check whether the final unindented lines
-			// should be left for the indented section.
-			// This can happen for the common mistakes of
-			// unindented code or unindented lists.
-			// The heuristic will never trigger on a gofmt'ed comment,
-			// because any gofmt'ed code block would have a blank line
-			// preceding it after the unindented lines.
-			if i < len(lines) && lines[i] != "" && !isList(lines[i]) {
-				switch {
-				case isList(lines[i-1]):
-					// If the final unindented line looks like a list item,
-					// this may be the first indented line wrap of
-					// a mistakenly unindented list.
-					// Leave all the unindented list items.
-					forceIndent = end
-					end--
-					for end > start && isList(lines[end-1]) {
-						end--
-					}
-
-				case strings.HasSuffix(lines[i-1], "{") || strings.HasSuffix(lines[i-1], `\`):
-					// If the final unindented line ended in { or \
-					// it is probably the start of a misindented code block.
-					// Give the user a single line fix.
-					// Often that's enough; if not, the user can fix the others themselves.
-					forceIndent = end
-					end--
-				}
-
-				if start == end && forceIndent > start {
-					i = start
-					continue Spans
-				}
-			}
-
-			// Span is either paragraph or heading.
-			if end-start == 1 && isHeading(lines[start]) {
-				kind = spanHeading
-			} else if end-start == 1 && isOldHeading(lines[start], lines, start) {
-				kind = spanOldHeading
-			} else {
-				kind = spanPara
-			}
-		}
-
-		spans = append(spans, span{start, end, kind})
-		i = end
-	}
-
-	return spans
-}
-
-// indented reports whether line is indented
-// (starts with a leading space or tab).
-func indented(line string) bool {
-	return line != "" && (line[0] == ' ' || line[0] == '\t')
-}
-
-// unindent removes any common space/tab prefix
-// from each line in lines, returning a copy of lines in which
-// those prefixes have been trimmed from each line.
-// It also replaces any lines containing only spaces with blank lines (empty strings).
-func unindent(lines []string) []string {
-	// Trim leading and trailing blank lines.
-	for len(lines) > 0 && isBlank(lines[0]) {
-		lines = lines[1:]
-	}
-	for len(lines) > 0 && isBlank(lines[len(lines)-1]) {
-		lines = lines[:len(lines)-1]
-	}
-	if len(lines) == 0 {
-		return nil
-	}
-
-	// Compute and remove common indentation.
-	prefix := leadingSpace(lines[0])
-	for _, line := range lines[1:] {
-		if !isBlank(line) {
-			prefix = commonPrefix(prefix, leadingSpace(line))
-		}
-	}
-
-	out := make([]string, len(lines))
-	for i, line := range lines {
-		line = strings.TrimPrefix(line, prefix)
-		if strings.TrimSpace(line) == "" {
-			line = ""
-		}
-		out[i] = line
-	}
-	for len(out) > 0 && out[0] == "" {
-		out = out[1:]
-	}
-	for len(out) > 0 && out[len(out)-1] == "" {
-		out = out[:len(out)-1]
-	}
-	return out
-}
-
-// isBlank reports whether s is a blank line.
-func isBlank(s string) bool {
-	return len(s) == 0 || (len(s) == 1 && s[0] == '\n')
-}
-
-// commonPrefix returns the longest common prefix of a and b.
-func commonPrefix(a, b string) string {
-	i := 0
-	for i < len(a) && i < len(b) && a[i] == b[i] {
-		i++
-	}
-	return a[0:i]
-}
-
-// leadingSpace returns the longest prefix of s consisting of spaces and tabs.
-func leadingSpace(s string) string {
-	i := 0
-	for i < len(s) && (s[i] == ' ' || s[i] == '\t') {
-		i++
-	}
-	return s[:i]
-}
-
-// isOldHeading reports whether line is an old-style section heading.
-// line is all[off].
-func isOldHeading(line string, all []string, off int) bool {
-	if off <= 0 || all[off-1] != "" || off+2 >= len(all) || all[off+1] != "" || leadingSpace(all[off+2]) != "" {
-		return false
-	}
-
-	line = strings.TrimSpace(line)
-
-	// a heading must start with an uppercase letter
-	r, _ := utf8.DecodeRuneInString(line)
-	if !unicode.IsLetter(r) || !unicode.IsUpper(r) {
-		return false
-	}
-
-	// it must end in a letter or digit:
-	r, _ = utf8.DecodeLastRuneInString(line)
-	if !unicode.IsLetter(r) && !unicode.IsDigit(r) {
-		return false
-	}
-
-	// exclude lines with illegal characters. we allow "(),"
-	if strings.ContainsAny(line, ";:!?+*/=[]{}_^°&§~%#@<\">\\") {
-		return false
-	}
-
-	// allow "'" for possessive "'s" only
-	for b := line; ; {
-		var ok bool
-		if _, b, ok = stringsCut(b, "'"); !ok {
-			break
-		}
-		if b != "s" && !strings.HasPrefix(b, "s ") {
-			return false // ' not followed by s and then end-of-word
-		}
-	}
-
-	// allow "." when followed by non-space
-	for b := line; ; {
-		var ok bool
-		if _, b, ok = stringsCut(b, "."); !ok {
-			break
-		}
-		if b == "" || strings.HasPrefix(b, " ") {
-			return false // not followed by non-space
-		}
-	}
-
-	return true
-}
-
-// oldHeading returns the *Heading for the given old-style section heading line.
-func (d *parseDoc) oldHeading(line string) Block {
-	return &Heading{Text: []Text{Plain(strings.TrimSpace(line))}}
-}
-
-// isHeading reports whether line is a new-style section heading.
-func isHeading(line string) bool {
-	return len(line) >= 2 &&
-		line[0] == '#' &&
-		(line[1] == ' ' || line[1] == '\t') &&
-		strings.TrimSpace(line) != "#"
-}
-
-// heading returns the *Heading for the given new-style section heading line.
-func (d *parseDoc) heading(line string) Block {
-	return &Heading{Text: []Text{Plain(strings.TrimSpace(line[1:]))}}
-}
-
-// code returns a code block built from the lines.
-func (d *parseDoc) code(lines []string) *Code {
-	body := unindent(lines)
-	body = append(body, "") // to get final \n from Join
-	return &Code{Text: strings.Join(body, "\n")}
-}
-
-// paragraph returns a paragraph block built from the lines.
-// If the lines are link definitions, paragraph adds them to d and returns nil.
-func (d *parseDoc) paragraph(lines []string) Block {
-	// Is this a block of known links? Handle.
-	var defs []*LinkDef
-	for _, line := range lines {
-		def, ok := parseLink(line)
-		if !ok {
-			goto NoDefs
-		}
-		defs = append(defs, def)
-	}
-	for _, def := range defs {
-		d.Links = append(d.Links, def)
-		if d.links[def.Text] == nil {
-			d.links[def.Text] = def
-		}
-	}
-	return nil
-NoDefs:
-
-	return &Paragraph{Text: []Text{Plain(strings.Join(lines, "\n"))}}
-}
-
-// parseLink parses a single link definition line:
-//
-//	[text]: url
-//
-// It returns the link definition and whether the line was well formed.
-func parseLink(line string) (*LinkDef, bool) {
-	if line == "" || line[0] != '[' {
-		return nil, false
-	}
-	i := strings.Index(line, "]:")
-	if i < 0 || i+3 >= len(line) || (line[i+2] != ' ' && line[i+2] != '\t') {
-		return nil, false
-	}
-
-	text := line[1:i]
-	url := strings.TrimSpace(line[i+3:])
-	j := strings.Index(url, "://")
-	if j < 0 || !isScheme(url[:j]) {
-		return nil, false
-	}
-
-	// Line has right form and has valid scheme://.
-	// That's good enough for us - we are not as picky
-	// about the characters beyond the :// as we are
-	// when extracting inline URLs from text.
-	return &LinkDef{Text: text, URL: url}, true
-}
-
-// list returns a list built from the indented lines,
-// using forceBlankBefore as the value of the List's ForceBlankBefore field.
-func (d *parseDoc) list(lines []string, forceBlankBefore bool) *List {
-	num, _, _ := listMarker(lines[0])
-	var (
-		list *List = &List{ForceBlankBefore: forceBlankBefore}
-		item *ListItem
-		text []string
-	)
-	flush := func() {
-		if item != nil {
-			if para := d.paragraph(text); para != nil {
-				item.Content = append(item.Content, para)
-			}
-		}
-		text = nil
-	}
-
-	for _, line := range lines {
-		if n, after, ok := listMarker(line); ok && (n != "") == (num != "") {
-			// start new list item
-			flush()
-
-			item = &ListItem{Number: n}
-			list.Items = append(list.Items, item)
-			line = after
-		}
-		line = strings.TrimSpace(line)
-		if line == "" {
-			list.ForceBlankBetween = true
-			flush()
-			continue
-		}
-		text = append(text, strings.TrimSpace(line))
-	}
-	flush()
-	return list
-}
-
-// listMarker parses the line as beginning with a list marker.
-// If it can do that, it returns the numeric marker ("" for a bullet list),
-// the rest of the line, and ok == true.
-// Otherwise, it returns "", "", false.
-func listMarker(line string) (num, rest string, ok bool) {
-	line = strings.TrimSpace(line)
-	if line == "" {
-		return "", "", false
-	}
-
-	// Can we find a marker?
-	if r, n := utf8.DecodeRuneInString(line); r == '•' || r == '*' || r == '+' || r == '-' {
-		num, rest = "", line[n:]
-	} else if '0' <= line[0] && line[0] <= '9' {
-		n := 1
-		for n < len(line) && '0' <= line[n] && line[n] <= '9' {
-			n++
-		}
-		if n >= len(line) || (line[n] != '.' && line[n] != ')') {
-			return "", "", false
-		}
-		num, rest = line[:n], line[n+1:]
-	} else {
-		return "", "", false
-	}
-
-	if !indented(rest) || strings.TrimSpace(rest) == "" {
-		return "", "", false
-	}
-
-	return num, rest, true
-}
-
-// isList reports whether the line is the first line of a list,
-// meaning starts with a list marker after any indentation.
-// (The caller is responsible for checking the line is indented, as appropriate.)
-func isList(line string) bool {
-	_, _, ok := listMarker(line)
-	return ok
-}
-
-// parseLinkedText parses text that is allowed to contain explicit links,
-// such as [math.Sin] or [Go home page], into a slice of Text items.
-//
-// A “pkg” is only assumed to be a full import path if it starts with
-// a domain name (a path element with a dot) or is one of the packages
-// from the standard library (“[os]”, “[encoding/json]”, and so on).
-// To avoid problems with maps, generics, and array types, doc links
-// must be both preceded and followed by punctuation, spaces, tabs,
-// or the start or end of a line. An example problem would be treating
-// map[ast.Expr]TypeAndValue as containing a link.
-func (d *parseDoc) parseLinkedText(text string) []Text {
-	var out []Text
-	wrote := 0
-	flush := func(i int) {
-		if wrote < i {
-			out = d.parseText(out, text[wrote:i], true)
-			wrote = i
-		}
-	}
-
-	start := -1
-	var buf []byte
-	for i := 0; i < len(text); i++ {
-		c := text[i]
-		if c == '\n' || c == '\t' {
-			c = ' '
-		}
-		switch c {
-		case '[':
-			start = i
-		case ']':
-			if start >= 0 {
-				if def, ok := d.links[string(buf)]; ok {
-					def.Used = true
-					flush(start)
-					out = append(out, &Link{
-						Text: d.parseText(nil, text[start+1:i], false),
-						URL:  def.URL,
-					})
-					wrote = i + 1
-				} else if link, ok := d.docLink(text[start+1:i], text[:start], text[i+1:]); ok {
-					flush(start)
-					link.Text = d.parseText(nil, text[start+1:i], false)
-					out = append(out, link)
-					wrote = i + 1
-				}
-			}
-			start = -1
-			buf = buf[:0]
-		}
-		if start >= 0 && i != start {
-			buf = append(buf, c)
-		}
-	}
-
-	flush(len(text))
-	return out
-}
-
-// docLink parses text, which was found inside [ ] brackets,
-// as a doc link if possible, returning the DocLink and ok == true
-// or else nil, false.
-// The before and after strings are the text before the [ and after the ]
-// on the same line. Doc links must be preceded and followed by
-// punctuation, spaces, tabs, or the start or end of a line.
-func (d *parseDoc) docLink(text, before, after string) (link *DocLink, ok bool) {
-	if before != "" {
-		r, _ := utf8.DecodeLastRuneInString(before)
-		if !unicode.IsPunct(r) && r != ' ' && r != '\t' && r != '\n' {
-			return nil, false
-		}
-	}
-	if after != "" {
-		r, _ := utf8.DecodeRuneInString(after)
-		if !unicode.IsPunct(r) && r != ' ' && r != '\t' && r != '\n' {
-			return nil, false
-		}
-	}
-	if strings.HasPrefix(text, "*") {
-		text = text[1:]
-	}
-	pkg, name, ok := splitDocName(text)
-	var recv string
-	if ok {
-		pkg, recv, _ = splitDocName(pkg)
-	}
-	if pkg != "" {
-		if pkg, ok = d.lookupPkg(pkg); !ok {
-			return nil, false
-		}
-	} else {
-		if ok = d.lookupSym(recv, name); !ok {
-			return nil, false
-		}
-	}
-	link = &DocLink{
-		ImportPath: pkg,
-		Recv:       recv,
-		Name:       name,
-	}
-	return link, true
-}
-
-// If text is of the form before.Name, where Name is a capitalized Go identifier,
-// then splitDocName returns before, name, true.
-// Otherwise it returns text, "", false.
-func splitDocName(text string) (before, name string, foundDot bool) {
-	i := strings.LastIndex(text, ".")
-	name = text[i+1:]
-	if !isName(name) {
-		return text, "", false
-	}
-	if i >= 0 {
-		before = text[:i]
-	}
-	return before, name, true
-}
-
-// parseText parses s as text and returns the result of appending
-// those parsed Text elements to out.
-// parseText does not handle explicit links like [math.Sin] or [Go home page]:
-// those are handled by parseLinkedText.
-// If autoLink is true, then parseText recognizes URLs and words from d.Words
-// and converts those to links as appropriate.
-func (d *parseDoc) parseText(out []Text, s string, autoLink bool) []Text {
-	var w strings.Builder
-	wrote := 0
-	writeUntil := func(i int) {
-		w.WriteString(s[wrote:i])
-		wrote = i
-	}
-	flush := func(i int) {
-		writeUntil(i)
-		if w.Len() > 0 {
-			out = append(out, Plain(w.String()))
-			w.Reset()
-		}
-	}
-	for i := 0; i < len(s); {
-		t := s[i:]
-		if autoLink {
-			if url, ok := autoURL(t); ok {
-				flush(i)
-				// Note: The old comment parser would look up the URL in words
-				// and replace the target with words[URL] if it was non-empty.
-				// That would allow creating links that display as one URL but
-				// when clicked go to a different URL. Not sure what the point
-				// of that is, so we're not doing that lookup here.
-				out = append(out, &Link{Auto: true, Text: []Text{Plain(url)}, URL: url})
-				i += len(url)
-				wrote = i
-				continue
-			}
-			if id, ok := ident(t); ok {
-				url, italics := d.Words[id]
-				if !italics {
-					i += len(id)
-					continue
-				}
-				flush(i)
-				if url == "" {
-					out = append(out, Italic(id))
-				} else {
-					out = append(out, &Link{Auto: true, Text: []Text{Italic(id)}, URL: url})
-				}
-				i += len(id)
-				wrote = i
-				continue
-			}
-		}
-		switch {
-		case strings.HasPrefix(t, "``"):
-			if len(t) >= 3 && t[2] == '`' {
-				// Do not convert `` inside ```, in case people are mistakenly writing Markdown.
-				i += 3
-				for i < len(t) && t[i] == '`' {
-					i++
-				}
-				break
-			}
-			writeUntil(i)
-			w.WriteRune('“')
-			i += 2
-			wrote = i
-		case strings.HasPrefix(t, "''"):
-			writeUntil(i)
-			w.WriteRune('”')
-			i += 2
-			wrote = i
-		default:
-			i++
-		}
-	}
-	flush(len(s))
-	return out
-}
-
-// autoURL checks whether s begins with a URL that should be hyperlinked.
-// If so, it returns the URL, which is a prefix of s, and ok == true.
-// Otherwise it returns "", false.
-// The caller should skip over the first len(url) bytes of s
-// before further processing.
-func autoURL(s string) (url string, ok bool) {
-	// Find the ://. Fast path to pick off non-URL,
-	// since we call this at every position in the string.
-	// The shortest possible URL is ftp://x, 7 bytes.
-	var i int
-	switch {
-	case len(s) < 7:
-		return "", false
-	case s[3] == ':':
-		i = 3
-	case s[4] == ':':
-		i = 4
-	case s[5] == ':':
-		i = 5
-	case s[6] == ':':
-		i = 6
-	default:
-		return "", false
-	}
-	if i+3 > len(s) || s[i:i+3] != "://" {
-		return "", false
-	}
-
-	// Check valid scheme.
-	if !isScheme(s[:i]) {
-		return "", false
-	}
-
-	// Scan host part. Must have at least one byte,
-	// and must start and end in non-punctuation.
-	i += 3
-	if i >= len(s) || !isHost(s[i]) || isPunct(s[i]) {
-		return "", false
-	}
-	i++
-	end := i
-	for i < len(s) && isHost(s[i]) {
-		if !isPunct(s[i]) {
-			end = i + 1
-		}
-		i++
-	}
-	i = end
-
-	// At this point we are definitely returning a URL (scheme://host).
-	// We just have to find the longest path we can add to it.
-	// Heuristics abound.
-	// We allow parens, braces, and brackets,
-	// but only if they match (#5043, #22285).
-	// We allow .,:;?! in the path but not at the end,
-	// to avoid end-of-sentence punctuation (#18139, #16565).
-	stk := []byte{}
-	end = i
-Path:
-	for ; i < len(s); i++ {
-		if isPunct(s[i]) {
-			continue
-		}
-		if !isPath(s[i]) {
-			break
-		}
-		switch s[i] {
-		case '(':
-			stk = append(stk, ')')
-		case '{':
-			stk = append(stk, '}')
-		case '[':
-			stk = append(stk, ']')
-		case ')', '}', ']':
-			if len(stk) == 0 || stk[len(stk)-1] != s[i] {
-				break Path
-			}
-			stk = stk[:len(stk)-1]
-		}
-		if len(stk) == 0 {
-			end = i + 1
-		}
-	}
-
-	return s[:end], true
-}
-
-// isScheme reports whether s is a recognized URL scheme.
-// Note that if strings of new length (beyond 3-7)
-// are added here, the fast path at the top of autoURL will need updating.
-func isScheme(s string) bool {
-	switch s {
-	case "file",
-		"ftp",
-		"gopher",
-		"http",
-		"https",
-		"mailto",
-		"nntp":
-		return true
-	}
-	return false
-}
-
-// isHost reports whether c is a byte that can appear in a URL host,
-// like www.example.com or user@[::1]:8080
-func isHost(c byte) bool {
-	// mask is a 128-bit bitmap with 1s for allowed bytes,
-	// so that the byte c can be tested with a shift and an and.
-	// If c > 128, then 1<<c and 1<<(c-64) will both be zero,
-	// and this function will return false.
-	const mask = 0 |
-		(1<<26-1)<<'A' |
-		(1<<26-1)<<'a' |
-		(1<<10-1)<<'0' |
-		1<<'_' |
-		1<<'@' |
-		1<<'-' |
-		1<<'.' |
-		1<<'[' |
-		1<<']' |
-		1<<':'
-
-	return ((uint64(1)<<c)&(mask&(1<<64-1)) |
-		(uint64(1)<<(c-64))&(mask>>64)) != 0
-}
-
-// isPunct reports whether c is a punctuation byte that can appear
-// inside a path but not at the end.
-func isPunct(c byte) bool {
-	// mask is a 128-bit bitmap with 1s for allowed bytes,
-	// so that the byte c can be tested with a shift and an and.
-	// If c > 128, then 1<<c and 1<<(c-64) will both be zero,
-	// and this function will return false.
-	const mask = 0 |
-		1<<'.' |
-		1<<',' |
-		1<<':' |
-		1<<';' |
-		1<<'?' |
-		1<<'!'
-
-	return ((uint64(1)<<c)&(mask&(1<<64-1)) |
-		(uint64(1)<<(c-64))&(mask>>64)) != 0
-}
-
-// isPath reports whether c is a (non-punctuation) path byte.
-func isPath(c byte) bool {
-	// mask is a 128-bit bitmap with 1s for allowed bytes,
-	// so that the byte c can be tested with a shift and an and.
-	// If c > 128, then 1<<c and 1<<(c-64) will both be zero,
-	// and this function will return false.
-	const mask = 0 |
-		(1<<26-1)<<'A' |
-		(1<<26-1)<<'a' |
-		(1<<10-1)<<'0' |
-		1<<'$' |
-		1<<'\'' |
-		1<<'(' |
-		1<<')' |
-		1<<'*' |
-		1<<'+' |
-		1<<'&' |
-		1<<'#' |
-		1<<'=' |
-		1<<'@' |
-		1<<'~' |
-		1<<'_' |
-		1<<'/' |
-		1<<'-' |
-		1<<'[' |
-		1<<']' |
-		1<<'{' |
-		1<<'}' |
-		1<<'%'
-
-	return ((uint64(1)<<c)&(mask&(1<<64-1)) |
-		(uint64(1)<<(c-64))&(mask>>64)) != 0
-}
-
-// isName reports whether s is a capitalized Go identifier (like Name).
-func isName(s string) bool {
-	t, ok := ident(s)
-	if !ok || t != s {
-		return false
-	}
-	r, _ := utf8.DecodeRuneInString(s)
-	return unicode.IsUpper(r)
-}
-
-// ident checks whether s begins with a Go identifier.
-// If so, it returns the identifier, which is a prefix of s, and ok == true.
-// Otherwise it returns "", false.
-// The caller should skip over the first len(id) bytes of s
-// before further processing.
-func ident(s string) (id string, ok bool) {
-	// Scan [\pL_][\pL_0-9]*
-	n := 0
-	for n < len(s) {
-		if c := s[n]; c < utf8.RuneSelf {
-			if isIdentASCII(c) && (n > 0 || c < '0' || c > '9') {
-				n++
-				continue
-			}
-			break
-		}
-		r, nr := utf8.DecodeRuneInString(s[n:])
-		if unicode.IsLetter(r) {
-			n += nr
-			continue
-		}
-		break
-	}
-	return s[:n], n > 0
-}
-
-// isIdentASCII reports whether c is an ASCII identifier byte.
-func isIdentASCII(c byte) bool {
-	// mask is a 128-bit bitmap with 1s for allowed bytes,
-	// so that the byte c can be tested with a shift and an and.
-	// If c > 128, then 1<<c and 1<<(c-64) will both be zero,
-	// and this function will return false.
-	const mask = 0 |
-		(1<<26-1)<<'A' |
-		(1<<26-1)<<'a' |
-		(1<<10-1)<<'0' |
-		1<<'_'
-
-	return ((uint64(1)<<c)&(mask&(1<<64-1)) |
-		(uint64(1)<<(c-64))&(mask>>64)) != 0
-}
-
-// validImportPath reports whether path is a valid import path.
-// It is a lightly edited copy of golang.org/x/mod/module.CheckImportPath.
-func validImportPath(path string) bool {
-	if !utf8.ValidString(path) {
-		return false
-	}
-	if path == "" {
-		return false
-	}
-	if path[0] == '-' {
-		return false
-	}
-	if strings.Contains(path, "//") {
-		return false
-	}
-	if path[len(path)-1] == '/' {
-		return false
-	}
-	elemStart := 0
-	for i, r := range path {
-		if r == '/' {
-			if !validImportPathElem(path[elemStart:i]) {
-				return false
-			}
-			elemStart = i + 1
-		}
-	}
-	return validImportPathElem(path[elemStart:])
-}
-
-func validImportPathElem(elem string) bool {
-	if elem == "" || elem[0] == '.' || elem[len(elem)-1] == '.' {
-		return false
-	}
-	for i := 0; i < len(elem); i++ {
-		if !importPathOK(elem[i]) {
-			return false
-		}
-	}
-	return true
-}
-
-func importPathOK(c byte) bool {
-	// mask is a 128-bit bitmap with 1s for allowed bytes,
-	// so that the byte c can be tested with a shift and an and.
-	// If c > 128, then 1<<c and 1<<(c-64) will both be zero,
-	// and this function will return false.
-	const mask = 0 |
-		(1<<26-1)<<'A' |
-		(1<<26-1)<<'a' |
-		(1<<10-1)<<'0' |
-		1<<'-' |
-		1<<'.' |
-		1<<'~' |
-		1<<'_' |
-		1<<'+'
-
-	return ((uint64(1)<<c)&(mask&(1<<64-1)) |
-		(uint64(1)<<(c-64))&(mask>>64)) != 0
-}
-
-func stringsCut(s, sep string) (before, after string, found bool) {
-	if i := strings.Index(s, sep); i >= 0 {
-		return s[:i], s[i+len(sep):], true
-	}
-	return s, "", false
-}
diff --git a/internal/backport/go/doc/comment/parse_test.go b/internal/backport/go/doc/comment/parse_test.go
deleted file mode 100644
index bce733e..0000000
--- a/internal/backport/go/doc/comment/parse_test.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package comment
-
-import "testing"
-
-// See https://golang.org/issue/52353
-func Test52353(t *testing.T) {
-	ident("𫕐ﯯ")
-}
diff --git a/internal/backport/go/doc/comment/print.go b/internal/backport/go/doc/comment/print.go
deleted file mode 100644
index 4a57b89..0000000
--- a/internal/backport/go/doc/comment/print.go
+++ /dev/null
@@ -1,290 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package comment
-
-import (
-	"bytes"
-	"fmt"
-	"strings"
-)
-
-// A Printer is a doc comment printer.
-// The fields in the struct can be filled in before calling
-// any of the printing methods
-// in order to customize the details of the printing process.
-type Printer struct {
-	// HeadingLevel is the nesting level used for
-	// HTML and Markdown headings.
-	// If HeadingLevel is zero, it defaults to level 3,
-	// meaning to use <h3> and ###.
-	HeadingLevel int
-
-	// HeadingID is a function that computes the heading ID
-	// (anchor tag) to use for the heading h when generating
-	// HTML and Markdown. If HeadingID returns an empty string,
-	// then the heading ID is omitted.
-	// If HeadingID is nil, h.DefaultID is used.
-	HeadingID func(h *Heading) string
-
-	// DocLinkURL is a function that computes the URL for the given DocLink.
-	// If DocLinkURL is nil, then link.DefaultURL(p.DocLinkBaseURL) is used.
-	DocLinkURL func(link *DocLink) string
-
-	// DocLinkBaseURL is used when DocLinkURL is nil,
-	// passed to [DocLink.DefaultURL] to construct a DocLink's URL.
-	// See that method's documentation for details.
-	DocLinkBaseURL string
-
-	// TextPrefix is a prefix to print at the start of every line
-	// when generating text output using the Text method.
-	TextPrefix string
-
-	// TextCodePrefix is the prefix to print at the start of each
-	// preformatted (code block) line when generating text output,
-	// instead of (not in addition to) TextPrefix.
-	// If TextCodePrefix is the empty string, it defaults to TextPrefix+"\t".
-	TextCodePrefix string
-
-	// TextWidth is the maximum width text line to generate,
-	// measured in Unicode code points,
-	// excluding TextPrefix and the newline character.
-	// If TextWidth is zero, it defaults to 80 minus the number of code points in TextPrefix.
-	// If TextWidth is negative, there is no limit.
-	TextWidth int
-}
-
-func (p *Printer) headingLevel() int {
-	if p.HeadingLevel <= 0 {
-		return 3
-	}
-	return p.HeadingLevel
-}
-
-func (p *Printer) headingID(h *Heading) string {
-	if p.HeadingID == nil {
-		return h.DefaultID()
-	}
-	return p.HeadingID(h)
-}
-
-func (p *Printer) docLinkURL(link *DocLink) string {
-	if p.DocLinkURL != nil {
-		return p.DocLinkURL(link)
-	}
-	return link.DefaultURL(p.DocLinkBaseURL)
-}
-
-// DefaultURL constructs and returns the documentation URL for l,
-// using baseURL as a prefix for links to other packages.
-//
-// The possible forms returned by DefaultURL are:
-//   - baseURL/ImportPath, for a link to another package
-//   - baseURL/ImportPath#Name, for a link to a const, func, type, or var in another package
-//   - baseURL/ImportPath#Recv.Name, for a link to a method in another package
-//   - #Name, for a link to a const, func, type, or var in this package
-//   - #Recv.Name, for a link to a method in this package
-//
-// If baseURL ends in a trailing slash, then DefaultURL inserts
-// a slash between ImportPath and # in the anchored forms.
-// For example, here are some baseURL values and URLs they can generate:
-//
-//	"/pkg/" → "/pkg/math/#Sqrt"
-//	"/pkg"  → "/pkg/math#Sqrt"
-//	"/"     → "/math/#Sqrt"
-//	""      → "/math#Sqrt"
-func (l *DocLink) DefaultURL(baseURL string) string {
-	if l.ImportPath != "" {
-		slash := ""
-		if strings.HasSuffix(baseURL, "/") {
-			slash = "/"
-		} else {
-			baseURL += "/"
-		}
-		switch {
-		case l.Name == "":
-			return baseURL + l.ImportPath + slash
-		case l.Recv != "":
-			return baseURL + l.ImportPath + slash + "#" + l.Recv + "." + l.Name
-		default:
-			return baseURL + l.ImportPath + slash + "#" + l.Name
-		}
-	}
-	if l.Recv != "" {
-		return "#" + l.Recv + "." + l.Name
-	}
-	return "#" + l.Name
-}
-
-// DefaultID returns the default anchor ID for the heading h.
-//
-// The default anchor ID is constructed by converting every
-// rune that is not alphanumeric ASCII to an underscore
-// and then adding the prefix “hdr-”.
-// For example, if the heading text is “Go Doc Comments”,
-// the default ID is “hdr-Go_Doc_Comments”.
-func (h *Heading) DefaultID() string {
-	// Note: The “hdr-” prefix is important to avoid DOM clobbering attacks.
-	// See https://pkg.go.dev/github.com/google/safehtml#Identifier.
-	var out strings.Builder
-	var p textPrinter
-	p.oneLongLine(&out, h.Text)
-	s := strings.TrimSpace(out.String())
-	if s == "" {
-		return ""
-	}
-	out.Reset()
-	out.WriteString("hdr-")
-	for _, r := range s {
-		if r < 0x80 && isIdentASCII(byte(r)) {
-			out.WriteByte(byte(r))
-		} else {
-			out.WriteByte('_')
-		}
-	}
-	return out.String()
-}
-
-type commentPrinter struct {
-	*Printer
-	headingPrefix string
-	needDoc       map[string]bool
-}
-
-// Comment returns the standard Go formatting of the Doc,
-// without any comment markers.
-func (p *Printer) Comment(d *Doc) []byte {
-	cp := &commentPrinter{Printer: p}
-	var out bytes.Buffer
-	for i, x := range d.Content {
-		if i > 0 && blankBefore(x) {
-			out.WriteString("\n")
-		}
-		cp.block(&out, x)
-	}
-
-	// Print one block containing all the link definitions that were used,
-	// and then a second block containing all the unused ones.
-	// This makes it easy to clean up the unused ones: gofmt and
-	// delete the final block. And it's a nice visual signal without
-	// affecting the way the comment formats for users.
-	for i := 0; i < 2; i++ {
-		used := i == 0
-		first := true
-		for _, def := range d.Links {
-			if def.Used == used {
-				if first {
-					out.WriteString("\n")
-					first = false
-				}
-				out.WriteString("[")
-				out.WriteString(def.Text)
-				out.WriteString("]: ")
-				out.WriteString(def.URL)
-				out.WriteString("\n")
-			}
-		}
-	}
-
-	return out.Bytes()
-}
-
-// blankBefore reports whether the block x requires a blank line before it.
-// All blocks do, except for Lists that return false from x.BlankBefore().
-func blankBefore(x Block) bool {
-	if x, ok := x.(*List); ok {
-		return x.BlankBefore()
-	}
-	return true
-}
-
-// block prints the block x to out.
-func (p *commentPrinter) block(out *bytes.Buffer, x Block) {
-	switch x := x.(type) {
-	default:
-		fmt.Fprintf(out, "?%T", x)
-
-	case *Paragraph:
-		p.text(out, "", x.Text)
-		out.WriteString("\n")
-
-	case *Heading:
-		out.WriteString("# ")
-		p.text(out, "", x.Text)
-		out.WriteString("\n")
-
-	case *Code:
-		md := x.Text
-		for md != "" {
-			var line string
-			line, md, _ = stringsCut(md, "\n")
-			if line != "" {
-				out.WriteString("\t")
-				out.WriteString(line)
-			}
-			out.WriteString("\n")
-		}
-
-	case *List:
-		loose := x.BlankBetween()
-		for i, item := range x.Items {
-			if i > 0 && loose {
-				out.WriteString("\n")
-			}
-			out.WriteString(" ")
-			if item.Number == "" {
-				out.WriteString(" - ")
-			} else {
-				out.WriteString(item.Number)
-				out.WriteString(". ")
-			}
-			for i, blk := range item.Content {
-				const fourSpace = "    "
-				if i > 0 {
-					out.WriteString("\n" + fourSpace)
-				}
-				p.text(out, fourSpace, blk.(*Paragraph).Text)
-				out.WriteString("\n")
-			}
-		}
-	}
-}
-
-// text prints the text sequence x to out.
-func (p *commentPrinter) text(out *bytes.Buffer, indent string, x []Text) {
-	for _, t := range x {
-		switch t := t.(type) {
-		case Plain:
-			p.indent(out, indent, string(t))
-		case Italic:
-			p.indent(out, indent, string(t))
-		case *Link:
-			if t.Auto {
-				p.text(out, indent, t.Text)
-			} else {
-				out.WriteString("[")
-				p.text(out, indent, t.Text)
-				out.WriteString("]")
-			}
-		case *DocLink:
-			out.WriteString("[")
-			p.text(out, indent, t.Text)
-			out.WriteString("]")
-		}
-	}
-}
-
-// indent prints s to out, indenting with the indent string
-// after each newline in s.
-func (p *commentPrinter) indent(out *bytes.Buffer, indent, s string) {
-	for s != "" {
-		line, rest, ok := stringsCut(s, "\n")
-		out.WriteString(line)
-		if ok {
-			out.WriteString("\n")
-			out.WriteString(indent)
-		}
-		s = rest
-	}
-}
diff --git a/internal/backport/go/doc/comment/std.go b/internal/backport/go/doc/comment/std.go
deleted file mode 100644
index 71f15f4..0000000
--- a/internal/backport/go/doc/comment/std.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Code generated by 'go generate' DO NOT EDIT.
-//go:generate ./mkstd.sh
-
-package comment
-
-var stdPkgs = []string{
-	"bufio",
-	"bytes",
-	"context",
-	"crypto",
-	"embed",
-	"encoding",
-	"errors",
-	"expvar",
-	"flag",
-	"fmt",
-	"hash",
-	"html",
-	"image",
-	"io",
-	"log",
-	"math",
-	"mime",
-	"net",
-	"os",
-	"path",
-	"plugin",
-	"reflect",
-	"regexp",
-	"runtime",
-	"sort",
-	"strconv",
-	"strings",
-	"sync",
-	"syscall",
-	"testing",
-	"time",
-	"unicode",
-	"unsafe",
-}
diff --git a/internal/backport/go/doc/comment/std_test.go b/internal/backport/go/doc/comment/std_test.go
deleted file mode 100644
index 7f3aba8..0000000
--- a/internal/backport/go/doc/comment/std_test.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package comment
-
-import (
-	"os/exec"
-	"sort"
-	"strings"
-	"testing"
-
-	"golang.org/x/website/internal/backport/diff"
-)
-
-func TestStd(t *testing.T) {
-	out, err := exec.Command("go", "list", "std").CombinedOutput()
-	if err != nil {
-		t.Fatalf("%v\n%s", err, out)
-	}
-
-	var list []string
-	for _, pkg := range strings.Fields(string(out)) {
-		if !strings.Contains(pkg, "/") {
-			list = append(list, pkg)
-		}
-	}
-	sort.Strings(list)
-
-	have := strings.Join(stdPkgs, "\n") + "\n"
-	want := strings.Join(list, "\n") + "\n"
-	if have != want {
-		t.Errorf("stdPkgs is out of date: regenerate with 'go generate'\n%s", diff.Diff("stdPkgs", []byte(have), "want", []byte(want)))
-	}
-}
diff --git a/internal/backport/go/doc/comment/testdata/README.md b/internal/backport/go/doc/comment/testdata/README.md
deleted file mode 100644
index d6f2c54..0000000
--- a/internal/backport/go/doc/comment/testdata/README.md
+++ /dev/null
@@ -1,42 +0,0 @@
-This directory contains test files (*.txt) for the comment parser.
-
-The files are in [txtar format](https://pkg.go.dev/golang.org/x/tools/txtar).
-Consider this example:
-
-	-- input --
-	Hello.
-	-- gofmt --
-	Hello.
-	-- html --
-	<p>Hello.
-	-- markdown --
-	Hello.
-	-- text --
-	Hello.
-
-Each `-- name --` line introduces a new file with the given name.
-The file named “input” must be first and contains the input to
-[comment.Parser](https://pkg.go.dev/go/doc/comment/#Parser).
-
-The remaining files contain the expected output for the named format generated by
-[comment.Printer](https://pkg.go.dev/go/doc/comment/#Printer):
-“gofmt” for Printer.Comment (Go comment format, as used by gofmt),
-“html” for Printer.HTML, “markdown” for Printer.Markdown, and “text” for Printer.Text.
-The format can also be “dump” for a textual dump of the raw data structures.
-
-The text before the `-- input --` line, if present, is JSON to be unmarshalled
-to initialize a comment.Printer. For example, this test case sets the Printer's
-TextWidth field to 20:
-
-	{"TextWidth": 20}
-	-- input --
-	Package gob manages streams of gobs - binary values exchanged between an
-	Encoder (transmitter) and a Decoder (receiver).
-	-- text --
-	Package gob
-	manages streams
-	of gobs - binary
-	values exchanged
-	between an Encoder
-	(transmitter) and a
-	Decoder (receiver).
diff --git a/internal/backport/go/doc/comment/testdata/blank.txt b/internal/backport/go/doc/comment/testdata/blank.txt
deleted file mode 100644
index 9049fde..0000000
--- a/internal/backport/go/doc/comment/testdata/blank.txt
+++ /dev/null
@@ -1,12 +0,0 @@
--- input --
-	$
-	Blank line at start and end.
-	$
--- gofmt --
-Blank line at start and end.
--- text --
-Blank line at start and end.
--- markdown --
-Blank line at start and end.
--- html --
-<p>Blank line at start and end.
diff --git a/internal/backport/go/doc/comment/testdata/code.txt b/internal/backport/go/doc/comment/testdata/code.txt
deleted file mode 100644
index 06b1519..0000000
--- a/internal/backport/go/doc/comment/testdata/code.txt
+++ /dev/null
@@ -1,94 +0,0 @@
--- input --
-Text.
-	A tab-indented
-	(no, not eight-space indented)
-	code block and haiku.
-More text.
- One space
-  is
-   enough
-    to
-     start
-      a
-       block.
-More text.
-
-      Blocks
-    can
-
-  have
-    blank
-      lines.
--- gofmt --
-Text.
-
-	A tab-indented
-	(no, not eight-space indented)
-	code block and haiku.
-
-More text.
-
-	One space
-	 is
-	  enough
-	   to
-	    start
-	     a
-	      block.
-
-More text.
-
-	    Blocks
-	  can
-
-	have
-	  blank
-	    lines.
--- markdown --
-Text.
-
-	A tab-indented
-	(no, not eight-space indented)
-	code block and haiku.
-
-More text.
-
-	One space
-	 is
-	  enough
-	   to
-	    start
-	     a
-	      block.
-
-More text.
-
-	    Blocks
-	  can
-
-	have
-	  blank
-	    lines.
--- html --
-<p>Text.
-<pre>A tab-indented
-(no, not eight-space indented)
-code block and haiku.
-</pre>
-<p>More text.
-<pre>One space
- is
-  enough
-   to
-    start
-     a
-      block.
-</pre>
-<p>More text.
-<pre>    Blocks
-  can
-
-have
-  blank
-    lines.
-</pre>
diff --git a/internal/backport/go/doc/comment/testdata/code2.txt b/internal/backport/go/doc/comment/testdata/code2.txt
deleted file mode 100644
index 0810bed..0000000
--- a/internal/backport/go/doc/comment/testdata/code2.txt
+++ /dev/null
@@ -1,31 +0,0 @@
--- input --
-Text.
-
-	A tab-indented
-	(no, not eight-space indented)
-	code block and haiku.
-
-More text.
--- gofmt --
-Text.
-
-	A tab-indented
-	(no, not eight-space indented)
-	code block and haiku.
-
-More text.
--- markdown --
-Text.
-
-	A tab-indented
-	(no, not eight-space indented)
-	code block and haiku.
-
-More text.
--- html --
-<p>Text.
-<pre>A tab-indented
-(no, not eight-space indented)
-code block and haiku.
-</pre>
-<p>More text.
diff --git a/internal/backport/go/doc/comment/testdata/code3.txt b/internal/backport/go/doc/comment/testdata/code3.txt
deleted file mode 100644
index 4a96a0e..0000000
--- a/internal/backport/go/doc/comment/testdata/code3.txt
+++ /dev/null
@@ -1,33 +0,0 @@
--- input --
-Text.
-
-	$
-	A tab-indented
-	(surrounded by more blank lines)
-	code block and haiku.
-	$
-
-More text.
--- gofmt --
-Text.
-
-	A tab-indented
-	(surrounded by more blank lines)
-	code block and haiku.
-
-More text.
--- markdown --
-Text.
-
-	A tab-indented
-	(surrounded by more blank lines)
-	code block and haiku.
-
-More text.
--- html --
-<p>Text.
-<pre>A tab-indented
-(surrounded by more blank lines)
-code block and haiku.
-</pre>
-<p>More text.
diff --git a/internal/backport/go/doc/comment/testdata/code4.txt b/internal/backport/go/doc/comment/testdata/code4.txt
deleted file mode 100644
index f128c9a..0000000
--- a/internal/backport/go/doc/comment/testdata/code4.txt
+++ /dev/null
@@ -1,38 +0,0 @@
--- input --
-To test, run this command:
-  go test -more
-
-Or, to test specific things, run this command:
-
-go test -more \
-  -pkg first/package \
-  -pkg second/package \
-  -pkg third/package
-
-Happy testing!
--- gofmt --
-To test, run this command:
-
-	go test -more
-
-Or, to test specific things, run this command:
-
-	go test -more \
-	  -pkg first/package \
-	  -pkg second/package \
-	  -pkg third/package
-
-Happy testing!
--- markdown --
-To test, run this command:
-
-	go test -more
-
-Or, to test specific things, run this command:
-
-	go test -more \
-	  -pkg first/package \
-	  -pkg second/package \
-	  -pkg third/package
-
-Happy testing!
diff --git a/internal/backport/go/doc/comment/testdata/code5.txt b/internal/backport/go/doc/comment/testdata/code5.txt
deleted file mode 100644
index 0e340dd..0000000
--- a/internal/backport/go/doc/comment/testdata/code5.txt
+++ /dev/null
@@ -1,21 +0,0 @@
--- input --
-L1
-L2
-L3
-L4
-L5
-- L6 {
-	L7
-}
-L8
--- gofmt --
-L1
-L2
-L3
-L4
-L5
-  - L6 {
-    L7
-    }
-
-L8
diff --git a/internal/backport/go/doc/comment/testdata/code6.txt b/internal/backport/go/doc/comment/testdata/code6.txt
deleted file mode 100644
index d2915d1..0000000
--- a/internal/backport/go/doc/comment/testdata/code6.txt
+++ /dev/null
@@ -1,24 +0,0 @@
--- input --
-Run this program:
-
-func main() {
-	fmt.Println("hello, world")
-}
-
-Or this:
-
-go func() {
-	fmt.Println("hello, world")
-}()
--- gofmt --
-Run this program:
-
-	func main() {
-		fmt.Println("hello, world")
-	}
-
-Or this:
-
-	go func() {
-		fmt.Println("hello, world")
-	}()
diff --git a/internal/backport/go/doc/comment/testdata/crash1.txt b/internal/backport/go/doc/comment/testdata/crash1.txt
deleted file mode 100644
index 6bb2f6f..0000000
--- a/internal/backport/go/doc/comment/testdata/crash1.txt
+++ /dev/null
@@ -1,16 +0,0 @@
--- input --
-[]
-
-[]: http://
--- gofmt --
-[]
-
-[]: http://
--- html --
-<p><a href="http://"></a>
--- markdown --
-[](http://)
--- text --
-
-
-[]: http://
diff --git a/internal/backport/go/doc/comment/testdata/doclink.txt b/internal/backport/go/doc/comment/testdata/doclink.txt
deleted file mode 100644
index a932347..0000000
--- a/internal/backport/go/doc/comment/testdata/doclink.txt
+++ /dev/null
@@ -1,21 +0,0 @@
--- input --
-In this package, see [Doc] and [Parser.Parse].
-There is no [Undef] or [Undef.Method].
-See also the [comment] package,
-especially [comment.Doc] and [comment.Parser.Parse].
--- gofmt --
-In this package, see [Doc] and [Parser.Parse].
-There is no [Undef] or [Undef.Method].
-See also the [comment] package,
-especially [comment.Doc] and [comment.Parser.Parse].
--- text --
-In this package, see Doc and Parser.Parse. There is no [Undef] or
-[Undef.Method]. See also the comment package, especially comment.Doc and
-comment.Parser.Parse.
--- markdown --
-In this package, see [Doc](#Doc) and [Parser.Parse](#Parser.Parse). There is no \[Undef] or \[Undef.Method]. See also the [comment](/go/doc/comment) package, especially [comment.Doc](/go/doc/comment#Doc) and [comment.Parser.Parse](/go/doc/comment#Parser.Parse).
--- html --
-<p>In this package, see <a href="#Doc">Doc</a> and <a href="#Parser.Parse">Parser.Parse</a>.
-There is no [Undef] or [Undef.Method].
-See also the <a href="/go/doc/comment">comment</a> package,
-especially <a href="/go/doc/comment#Doc">comment.Doc</a> and <a href="/go/doc/comment#Parser.Parse">comment.Parser.Parse</a>.
diff --git a/internal/backport/go/doc/comment/testdata/doclink2.txt b/internal/backport/go/doc/comment/testdata/doclink2.txt
deleted file mode 100644
index ecd8e4e..0000000
--- a/internal/backport/go/doc/comment/testdata/doclink2.txt
+++ /dev/null
@@ -1,8 +0,0 @@
--- input --
-We use [io.Reader] a lot, and also a few map[io.Reader]string.
-
-Never [io.Reader]int or Slice[io.Reader] though.
--- markdown --
-We use [io.Reader](/io#Reader) a lot, and also a few map\[io.Reader]string.
-
-Never \[io.Reader]int or Slice\[io.Reader] though.
diff --git a/internal/backport/go/doc/comment/testdata/doclink3.txt b/internal/backport/go/doc/comment/testdata/doclink3.txt
deleted file mode 100644
index 0ccfb3d..0000000
--- a/internal/backport/go/doc/comment/testdata/doclink3.txt
+++ /dev/null
@@ -1,8 +0,0 @@
--- input --
-[encoding/json.Marshal] is a doc link.
-
-[rot13.Marshal] is not.
--- markdown --
-[encoding/json.Marshal](/encoding/json#Marshal) is a doc link.
-
-\[rot13.Marshal] is not.
diff --git a/internal/backport/go/doc/comment/testdata/doclink4.txt b/internal/backport/go/doc/comment/testdata/doclink4.txt
deleted file mode 100644
index c709527..0000000
--- a/internal/backport/go/doc/comment/testdata/doclink4.txt
+++ /dev/null
@@ -1,7 +0,0 @@
--- input --
-[io] at start of comment.
-[io] at start of line.
-At end of line: [io]
-At end of comment: [io]
--- markdown --
-[io](/io) at start of comment. [io](/io) at start of line. At end of line: [io](/io) At end of comment: [io](/io)
diff --git a/internal/backport/go/doc/comment/testdata/doclink5.txt b/internal/backport/go/doc/comment/testdata/doclink5.txt
deleted file mode 100644
index ac7b3ae..0000000
--- a/internal/backport/go/doc/comment/testdata/doclink5.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-{"DocLinkBaseURL": "https://pkg.go.dev"}
--- input --
-[encoding/json.Marshal] is a doc link.
--- markdown --
-[encoding/json.Marshal](https://pkg.go.dev/encoding/json#Marshal) is a doc link.
diff --git a/internal/backport/go/doc/comment/testdata/doclink6.txt b/internal/backport/go/doc/comment/testdata/doclink6.txt
deleted file mode 100644
index 1acd03b..0000000
--- a/internal/backport/go/doc/comment/testdata/doclink6.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-{"DocLinkBaseURL": "https://go.dev/pkg/"}
--- input --
-[encoding/json.Marshal] is a doc link, and so is [rsc.io/quote.NonExist].
--- markdown --
-[encoding/json.Marshal](https://go.dev/pkg/encoding/json/#Marshal) is a doc link, and so is [rsc.io/quote.NonExist](https://go.dev/pkg/rsc.io/quote/#NonExist).
diff --git a/internal/backport/go/doc/comment/testdata/doclink7.txt b/internal/backport/go/doc/comment/testdata/doclink7.txt
deleted file mode 100644
index d34979a..0000000
--- a/internal/backport/go/doc/comment/testdata/doclink7.txt
+++ /dev/null
@@ -1,4 +0,0 @@
--- input --
-You see more [*bytes.Buffer] than [bytes.Buffer].
--- markdown --
-You see more [\*bytes.Buffer](/bytes#Buffer) than [bytes.Buffer](/bytes#Buffer).
diff --git a/internal/backport/go/doc/comment/testdata/escape.txt b/internal/backport/go/doc/comment/testdata/escape.txt
deleted file mode 100644
index f54663f..0000000
--- a/internal/backport/go/doc/comment/testdata/escape.txt
+++ /dev/null
@@ -1,55 +0,0 @@
--- input --
-What the ~!@#$%^&*()_+-=`{}|[]\:";',./<>?
-
-+ Line
-
-- Line
-
-* Line
-
-999. Line
-
-## Line
--- gofmt --
-What the ~!@#$%^&*()_+-=`{}|[]\:";',./<>?
-
-+ Line
-
-- Line
-
-* Line
-
-999. Line
-
-## Line
--- text --
-What the ~!@#$%^&*()_+-=`{}|[]\:";',./<>?
-
-+ Line
-
-- Line
-
-* Line
-
-999. Line
-
-## Line
--- markdown --
-What the ~!@#$%^&\*()\_+-=\`{}|\[]\\:";',./\<>?
-
-\+ Line
-
-\- Line
-
-\* Line
-
-999\. Line
-
-\## Line
--- html --
-<p>What the ~!@#$%^&amp;*()_+-=`{}|[]\:&quot;;&apos;,./&lt;&gt;?
-<p>+ Line
-<p>- Line
-<p>* Line
-<p>999. Line
-<p>## Line
diff --git a/internal/backport/go/doc/comment/testdata/head.txt b/internal/backport/go/doc/comment/testdata/head.txt
deleted file mode 100644
index b99a8c5..0000000
--- a/internal/backport/go/doc/comment/testdata/head.txt
+++ /dev/null
@@ -1,92 +0,0 @@
--- input --
-Some text.
-
-An Old Heading
-
-Not An Old Heading.
-
-And some text.
-
-# A New Heading.
-
-And some more text.
-
-# Not a heading,
-because text follows it.
-
-Because text precedes it,
-# not a heading.
-
-## Not a heading either.
-
--- gofmt --
-Some text.
-
-# An Old Heading
-
-Not An Old Heading.
-
-And some text.
-
-# A New Heading.
-
-And some more text.
-
-# Not a heading,
-because text follows it.
-
-Because text precedes it,
-# not a heading.
-
-## Not a heading either.
-
--- text --
-Some text.
-
-# An Old Heading
-
-Not An Old Heading.
-
-And some text.
-
-# A New Heading.
-
-And some more text.
-
-# Not a heading, because text follows it.
-
-Because text precedes it, # not a heading.
-
-## Not a heading either.
-
--- markdown --
-Some text.
-
-### An Old Heading {#hdr-An_Old_Heading}
-
-Not An Old Heading.
-
-And some text.
-
-### A New Heading. {#hdr-A_New_Heading_}
-
-And some more text.
-
-\# Not a heading, because text follows it.
-
-Because text precedes it, # not a heading.
-
-\## Not a heading either.
-
--- html --
-<p>Some text.
-<h3 id="hdr-An_Old_Heading">An Old Heading</h3>
-<p>Not An Old Heading.
-<p>And some text.
-<h3 id="hdr-A_New_Heading_">A New Heading.</h3>
-<p>And some more text.
-<p># Not a heading,
-because text follows it.
-<p>Because text precedes it,
-# not a heading.
-<p>## Not a heading either.
diff --git a/internal/backport/go/doc/comment/testdata/head2.txt b/internal/backport/go/doc/comment/testdata/head2.txt
deleted file mode 100644
index d357632..0000000
--- a/internal/backport/go/doc/comment/testdata/head2.txt
+++ /dev/null
@@ -1,36 +0,0 @@
--- input --
-✦
-
-Almost a+heading
-
-✦
-
-Don't be a heading
-
-✦
-
-A.b is a heading
-
-✦
-
-A. b is not a heading
-
-✦
--- gofmt --
-✦
-
-Almost a+heading
-
-✦
-
-Don't be a heading
-
-✦
-
-# A.b is a heading
-
-✦
-
-A. b is not a heading
-
-✦
diff --git a/internal/backport/go/doc/comment/testdata/head3.txt b/internal/backport/go/doc/comment/testdata/head3.txt
deleted file mode 100644
index dbb7cb3..0000000
--- a/internal/backport/go/doc/comment/testdata/head3.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-{"HeadingLevel": 5}
--- input --
-# Heading
--- markdown --
-##### Heading {#hdr-Heading}
--- html --
-<h5 id="hdr-Heading">Heading</h5>
diff --git a/internal/backport/go/doc/comment/testdata/hello.txt b/internal/backport/go/doc/comment/testdata/hello.txt
deleted file mode 100644
index fb07f1e..0000000
--- a/internal/backport/go/doc/comment/testdata/hello.txt
+++ /dev/null
@@ -1,35 +0,0 @@
--- input --
-	Hello,
-	world
-
-	This is
-	a test.
--- dump --
-Doc
-	Paragraph
-		Plain
-			"Hello,\n"
-			"world"
-	Paragraph
-		Plain
-			"This is\n"
-			"a test."
--- gofmt --
-Hello,
-world
-
-This is
-a test.
--- html --
-<p>Hello,
-world
-<p>This is
-a test.
--- markdown --
-Hello, world
-
-This is a test.
--- text --
-Hello, world
-
-This is a test.
diff --git a/internal/backport/go/doc/comment/testdata/link.txt b/internal/backport/go/doc/comment/testdata/link.txt
deleted file mode 100644
index 551e306..0000000
--- a/internal/backport/go/doc/comment/testdata/link.txt
+++ /dev/null
@@ -1,17 +0,0 @@
--- input --
-The Go home page is https://go.dev/.
-It used to be https://golang.org.
-
--- gofmt --
-The Go home page is https://go.dev/.
-It used to be https://golang.org.
-
--- text --
-The Go home page is https://go.dev/. It used to be https://golang.org.
-
--- markdown --
-The Go home page is [https://go.dev/](https://go.dev/). It used to be [https://golang.org](https://golang.org).
-
--- html --
-<p>The Go home page is <a href="https://go.dev/">https://go.dev/</a>.
-It used to be <a href="https://golang.org">https://golang.org</a>.
diff --git a/internal/backport/go/doc/comment/testdata/link2.txt b/internal/backport/go/doc/comment/testdata/link2.txt
deleted file mode 100644
index 8637a32..0000000
--- a/internal/backport/go/doc/comment/testdata/link2.txt
+++ /dev/null
@@ -1,31 +0,0 @@
--- input --
-The Go home page is https://go.dev/.
-It used to be https://golang.org.
-https:// is not a link.
-Nor is https://
-https://☺ is not a link.
-https://:80 is not a link.
-
--- gofmt --
-The Go home page is https://go.dev/.
-It used to be https://golang.org.
-https:// is not a link.
-Nor is https://
-https://☺ is not a link.
-https://:80 is not a link.
-
--- text --
-The Go home page is https://go.dev/. It used to be https://golang.org. https://
-is not a link. Nor is https:// https://☺ is not a link. https://:80 is not a
-link.
-
--- markdown --
-The Go home page is [https://go.dev/](https://go.dev/). It used to be [https://golang.org](https://golang.org). https:// is not a link. Nor is https:// https://☺ is not a link. https://:80 is not a link.
-
--- html --
-<p>The Go home page is <a href="https://go.dev/">https://go.dev/</a>.
-It used to be <a href="https://golang.org">https://golang.org</a>.
-https:// is not a link.
-Nor is https://
-https://☺ is not a link.
-https://:80 is not a link.
diff --git a/internal/backport/go/doc/comment/testdata/link3.txt b/internal/backport/go/doc/comment/testdata/link3.txt
deleted file mode 100644
index 5a115b5..0000000
--- a/internal/backport/go/doc/comment/testdata/link3.txt
+++ /dev/null
@@ -1,14 +0,0 @@
--- input --
-Doc text.
-
-[Go home page]: https://go.dev
--- gofmt --
-Doc text.
-
-[Go home page]: https://go.dev
--- text --
-Doc text.
--- markdown --
-Doc text.
--- html --
-<p>Doc text.
diff --git a/internal/backport/go/doc/comment/testdata/link4.txt b/internal/backport/go/doc/comment/testdata/link4.txt
deleted file mode 100644
index 75f194c..0000000
--- a/internal/backport/go/doc/comment/testdata/link4.txt
+++ /dev/null
@@ -1,77 +0,0 @@
--- input --
-These are not links.
-
-[x
-
-[x]:
-
-[x]:https://go.dev
-
-[x]https://go.dev
-
-[x]: surprise://go.dev
-
-[x]: surprise!
-
-But this is, with a tab (although it's unused).
-
-[z]:	https://go.dev
--- gofmt --
-These are not links.
-
-[x
-
-[x]:
-
-[x]:https://go.dev
-
-[x]https://go.dev
-
-[x]: surprise://go.dev
-
-[x]: surprise!
-
-But this is, with a tab (although it's unused).
-
-[z]: https://go.dev
--- text --
-These are not links.
-
-[x
-
-[x]:
-
-[x]:https://go.dev
-
-[x]https://go.dev
-
-[x]: surprise://go.dev
-
-[x]: surprise!
-
-But this is, with a tab (although it's unused).
--- markdown --
-These are not links.
-
-\[x
-
-\[x]:
-
-\[x]:[https://go.dev](https://go.dev)
-
-\[x][https://go.dev](https://go.dev)
-
-\[x]: surprise://go.dev
-
-\[x]: surprise!
-
-But this is, with a tab (although it's unused).
--- html --
-<p>These are not links.
-<p>[x
-<p>[x]:
-<p>[x]:<a href="https://go.dev">https://go.dev</a>
-<p>[x]<a href="https://go.dev">https://go.dev</a>
-<p>[x]: surprise://go.dev
-<p>[x]: surprise!
-<p>But this is, with a tab (although it&apos;s unused).
diff --git a/internal/backport/go/doc/comment/testdata/link5.txt b/internal/backport/go/doc/comment/testdata/link5.txt
deleted file mode 100644
index b4fb588..0000000
--- a/internal/backport/go/doc/comment/testdata/link5.txt
+++ /dev/null
@@ -1,36 +0,0 @@
--- input --
-See the [Go home page] and the [pkg
-site].
-
-[Go home page]: https://go.dev/
-[pkg site]: https://pkg.go.dev
-[Go home page]: https://duplicate.ignored
-
-They're really great!
-
--- gofmt --
-See the [Go home page] and the [pkg
-site].
-
-They're really great!
-
-[Go home page]: https://go.dev/
-[pkg site]: https://pkg.go.dev
-
-[Go home page]: https://duplicate.ignored
-
--- text --
-See the Go home page and the pkg site.
-
-They're really great!
-
-[Go home page]: https://go.dev/
-[pkg site]: https://pkg.go.dev
--- markdown --
-See the [Go home page](https://go.dev/) and the [pkg site](https://pkg.go.dev).
-
-They're really great!
--- html --
-<p>See the <a href="https://go.dev/">Go home page</a> and the <a href="https://pkg.go.dev">pkg
-site</a>.
-<p>They&apos;re really great!
diff --git a/internal/backport/go/doc/comment/testdata/link6.txt b/internal/backport/go/doc/comment/testdata/link6.txt
deleted file mode 100644
index ff629b4..0000000
--- a/internal/backport/go/doc/comment/testdata/link6.txt
+++ /dev/null
@@ -1,50 +0,0 @@
--- input --
-URLs with punctuation are hard.
-We don't want to consume the end-of-sentence punctuation.
-
-For example, https://en.wikipedia.org/wiki/John_Adams_(miniseries).
-And https://example.com/[foo]/bar{.
-And https://example.com/(foo)/bar!
-And https://example.com/{foo}/bar{.
-And https://example.com/)baz{foo}.
-
-[And https://example.com/].
-
--- gofmt --
-URLs with punctuation are hard.
-We don't want to consume the end-of-sentence punctuation.
-
-For example, https://en.wikipedia.org/wiki/John_Adams_(miniseries).
-And https://example.com/[foo]/bar{.
-And https://example.com/(foo)/bar!
-And https://example.com/{foo}/bar{.
-And https://example.com/)baz{foo}.
-
-[And https://example.com/].
-
--- text --
-URLs with punctuation are hard. We don't want to consume the end-of-sentence
-punctuation.
-
-For example, https://en.wikipedia.org/wiki/John_Adams_(miniseries).
-And https://example.com/[foo]/bar{. And https://example.com/(foo)/bar! And
-https://example.com/{foo}/bar{. And https://example.com/)baz{foo}.
-
-[And https://example.com/].
-
--- markdown --
-URLs with punctuation are hard. We don't want to consume the end-of-sentence punctuation.
-
-For example, [https://en.wikipedia.org/wiki/John\_Adams\_(miniseries)](https://en.wikipedia.org/wiki/John_Adams_(miniseries)). And [https://example.com/\[foo]/bar](https://example.com/[foo]/bar){. And [https://example.com/(foo)/bar](https://example.com/(foo)/bar)! And [https://example.com/{foo}/bar](https://example.com/{foo}/bar){. And [https://example.com/](https://example.com/))baz{foo}.
-
-\[And [https://example.com/](https://example.com/)].
-
--- html --
-<p>URLs with punctuation are hard.
-We don&apos;t want to consume the end-of-sentence punctuation.
-<p>For example, <a href="https://en.wikipedia.org/wiki/John_Adams_(miniseries)">https://en.wikipedia.org/wiki/John_Adams_(miniseries)</a>.
-And <a href="https://example.com/[foo]/bar">https://example.com/[foo]/bar</a>{.
-And <a href="https://example.com/(foo)/bar">https://example.com/(foo)/bar</a>!
-And <a href="https://example.com/{foo}/bar">https://example.com/{foo}/bar</a>{.
-And <a href="https://example.com/">https://example.com/</a>)baz{foo}.
-<p>[And <a href="https://example.com/">https://example.com/</a>].
diff --git a/internal/backport/go/doc/comment/testdata/link7.txt b/internal/backport/go/doc/comment/testdata/link7.txt
deleted file mode 100644
index 89a8b31..0000000
--- a/internal/backport/go/doc/comment/testdata/link7.txt
+++ /dev/null
@@ -1,25 +0,0 @@
--- input --
-[math] is a package but this is not a doc link.
-
-[io] is a doc link.
-
-[math]: https://example.com
--- gofmt --
-[math] is a package but this is not a doc link.
-
-[io] is a doc link.
-
-[math]: https://example.com
--- text --
-math is a package but this is not a doc link.
-
-io is a doc link.
-
-[math]: https://example.com
--- markdown --
-[math](https://example.com) is a package but this is not a doc link.
-
-[io](/io) is a doc link.
--- html --
-<p><a href="https://example.com">math</a> is a package but this is not a doc link.
-<p><a href="/io">io</a> is a doc link.
diff --git a/internal/backport/go/doc/comment/testdata/linklist.txt b/internal/backport/go/doc/comment/testdata/linklist.txt
deleted file mode 100644
index baf4062..0000000
--- a/internal/backport/go/doc/comment/testdata/linklist.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-{"DocLinkBaseURL": "https://pkg.go.dev"}
--- input --
-Did you know?
-
-  - [encoding/json.Marshal] is a doc link. So is [encoding/json.Unmarshal].
--- text --
-Did you know?
-
-  - encoding/json.Marshal is a doc link. So is encoding/json.Unmarshal.
--- markdown --
-Did you know?
-
-  - [encoding/json.Marshal](https://pkg.go.dev/encoding/json#Marshal) is a doc link. So is [encoding/json.Unmarshal](https://pkg.go.dev/encoding/json#Unmarshal).
--- html --
-<p>Did you know?
-<ul>
-<li><a href="https://pkg.go.dev/encoding/json#Marshal">encoding/json.Marshal</a> is a doc link. So is <a href="https://pkg.go.dev/encoding/json#Unmarshal">encoding/json.Unmarshal</a>.
-</ul>
diff --git a/internal/backport/go/doc/comment/testdata/linklist2.txt b/internal/backport/go/doc/comment/testdata/linklist2.txt
deleted file mode 100644
index 81b3061..0000000
--- a/internal/backport/go/doc/comment/testdata/linklist2.txt
+++ /dev/null
@@ -1,39 +0,0 @@
-{"DocLinkBaseURL": "https://pkg.go.dev"}
--- input --
-Did you know?
-
-  - [testing.T] is one doc link.
-  - So is [testing.M].
-  - So is [testing.B].
-    This is the same list paragraph.
-
-    There is [testing.PB] in this list item, too!
--- text --
-Did you know?
-
-  - testing.T is one doc link.
-
-  - So is testing.M.
-
-  - So is testing.B. This is the same list paragraph.
-
-    There is testing.PB in this list item, too!
--- markdown --
-Did you know?
-
-  - [testing.T](https://pkg.go.dev/testing#T) is one doc link.
-
-  - So is [testing.M](https://pkg.go.dev/testing#M).
-
-  - So is [testing.B](https://pkg.go.dev/testing#B). This is the same list paragraph.
-
-    There is [testing.PB](https://pkg.go.dev/testing#PB) in this list item, too!
--- html --
-<p>Did you know?
-<ul>
-<li><p><a href="https://pkg.go.dev/testing#T">testing.T</a> is one doc link.
-<li><p>So is <a href="https://pkg.go.dev/testing#M">testing.M</a>.
-<li><p>So is <a href="https://pkg.go.dev/testing#B">testing.B</a>.
-This is the same list paragraph.
-<p>There is <a href="https://pkg.go.dev/testing#PB">testing.PB</a> in this list item, too!
-</ul>
diff --git a/internal/backport/go/doc/comment/testdata/linklist3.txt b/internal/backport/go/doc/comment/testdata/linklist3.txt
deleted file mode 100644
index 701a54e..0000000
--- a/internal/backport/go/doc/comment/testdata/linklist3.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-{"DocLinkBaseURL": "https://pkg.go.dev"}
--- input --
-Cool things:
-
-  - Foo
-  - [Go]
-  - Bar
-
-[Go]: https://go.dev/
--- text --
-Cool things:
-
-  - Foo
-  - Go
-  - Bar
-
-[Go]: https://go.dev/
--- markdown --
-Cool things:
-
-  - Foo
-  - [Go](https://go.dev/)
-  - Bar
-
--- html --
-<p>Cool things:
-<ul>
-<li>Foo
-<li><a href="https://go.dev/">Go</a>
-<li>Bar
-</ul>
diff --git a/internal/backport/go/doc/comment/testdata/linklist4.txt b/internal/backport/go/doc/comment/testdata/linklist4.txt
deleted file mode 100644
index db39ec4..0000000
--- a/internal/backport/go/doc/comment/testdata/linklist4.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-{"DocLinkBaseURL": "https://pkg.go.dev"}
--- input --
-Cool things:
-
-  - Foo
-  - [Go] is great
-    
-    [Go]: https://go.dev/
-  - Bar
-
--- text --
-Cool things:
-
-  - Foo
-
-  - Go is great
-
-  - Bar
-
-[Go]: https://go.dev/
--- markdown --
-Cool things:
-
-  - Foo
-
-  - [Go](https://go.dev/) is great
-
-  - Bar
-
--- html --
-<p>Cool things:
-<ul>
-<li><p>Foo
-<li><p><a href="https://go.dev/">Go</a> is great
-<li><p>Bar
-</ul>
diff --git a/internal/backport/go/doc/comment/testdata/list.txt b/internal/backport/go/doc/comment/testdata/list.txt
deleted file mode 100644
index 455782f..0000000
--- a/internal/backport/go/doc/comment/testdata/list.txt
+++ /dev/null
@@ -1,48 +0,0 @@
--- input --
-Text.
-- Not a list.
- - Here is the list.
-     • Using multiple bullets.
-          * Indentation does not matter.
-     + Lots of bullets.
-More text.
-
--- gofmt --
-Text.
-- Not a list.
-  - Here is the list.
-  - Using multiple bullets.
-  - Indentation does not matter.
-  - Lots of bullets.
-
-More text.
-
--- text --
-Text. - Not a list.
-  - Here is the list.
-  - Using multiple bullets.
-  - Indentation does not matter.
-  - Lots of bullets.
-
-More text.
-
--- markdown --
-Text. - Not a list.
-
-  - Here is the list.
-  - Using multiple bullets.
-  - Indentation does not matter.
-  - Lots of bullets.
-
-More text.
-
--- html --
-<p>Text.
-- Not a list.
-<ul>
-<li>Here is the list.
-<li>Using multiple bullets.
-<li>Indentation does not matter.
-<li>Lots of bullets.
-</ul>
-<p>More text.
diff --git a/internal/backport/go/doc/comment/testdata/list10.txt b/internal/backport/go/doc/comment/testdata/list10.txt
deleted file mode 100644
index 9c49083..0000000
--- a/internal/backport/go/doc/comment/testdata/list10.txt
+++ /dev/null
@@ -1,13 +0,0 @@
--- input --
-
-	1. This list
-	2. Starts the comment
-	3. And also has a blank line before it.
-
-All of which is a little weird.
--- gofmt --
- 1. This list
- 2. Starts the comment
- 3. And also has a blank line before it.
-
-All of which is a little weird.
diff --git a/internal/backport/go/doc/comment/testdata/list2.txt b/internal/backport/go/doc/comment/testdata/list2.txt
deleted file mode 100644
index c390b3d..0000000
--- a/internal/backport/go/doc/comment/testdata/list2.txt
+++ /dev/null
@@ -1,57 +0,0 @@
--- input --
-Text.
- 1. Uno
-   2) Dos
- 3. Tres
-   5. Cinco
- 7. Siete
-   11. Once
- 12. Doce
- 13. Trece.
-
--- gofmt --
-Text.
- 1. Uno
- 2. Dos
- 3. Tres
- 5. Cinco
- 7. Siete
- 11. Once
- 12. Doce
- 13. Trece.
-
--- text --
-Text.
- 1. Uno
- 2. Dos
- 3. Tres
- 5. Cinco
- 7. Siete
- 11. Once
- 12. Doce
- 13. Trece.
-
--- markdown --
-Text.
-
- 1. Uno
- 2. Dos
- 3. Tres
- 5. Cinco
- 7. Siete
- 11. Once
- 12. Doce
- 13. Trece.
-
--- html --
-<p>Text.
-<ol>
-<li>Uno
-<li>Dos
-<li>Tres
-<li value="5">Cinco
-<li value="7">Siete
-<li value="11">Once
-<li>Doce
-<li>Trece.
-</ol>
diff --git a/internal/backport/go/doc/comment/testdata/list3.txt b/internal/backport/go/doc/comment/testdata/list3.txt
deleted file mode 100644
index d7d345d..0000000
--- a/internal/backport/go/doc/comment/testdata/list3.txt
+++ /dev/null
@@ -1,32 +0,0 @@
--- input --
-Text.
-
- 1. Uno
- 1. Dos
- 1. Tres
- 1. Quatro
-
--- gofmt --
-Text.
-
- 1. Uno
- 1. Dos
- 1. Tres
- 1. Quatro
-
--- markdown --
-Text.
-
- 1. Uno
- 1. Dos
- 1. Tres
- 1. Quatro
-
--- html --
-<p>Text.
-<ol>
-<li>Uno
-<li value="1">Dos
-<li value="1">Tres
-<li value="1">Quatro
-</ol>
diff --git a/internal/backport/go/doc/comment/testdata/list4.txt b/internal/backport/go/doc/comment/testdata/list4.txt
deleted file mode 100644
index 9c28d65..0000000
--- a/internal/backport/go/doc/comment/testdata/list4.txt
+++ /dev/null
@@ -1,38 +0,0 @@
--- input --
-Text.
-  1. List
-2. Not indented, not a list.
-  3. Another list.
-
--- gofmt --
-Text.
- 1. List
-
-2. Not indented, not a list.
- 3. Another list.
-
--- text --
-Text.
- 1. List
-
-2. Not indented, not a list.
- 3. Another list.
-
--- markdown --
-Text.
-
- 1. List
-
-2\. Not indented, not a list.
-
- 3. Another list.
-
--- html --
-<p>Text.
-<ol>
-<li>List
-</ol>
-<p>2. Not indented, not a list.
-<ol>
-<li value="3">Another list.
-</ol>
diff --git a/internal/backport/go/doc/comment/testdata/list5.txt b/internal/backport/go/doc/comment/testdata/list5.txt
deleted file mode 100644
index a5128e5..0000000
--- a/internal/backport/go/doc/comment/testdata/list5.txt
+++ /dev/null
@@ -1,40 +0,0 @@
--- input --
-Text.
-
-  1. One
-  999999999999999999999. Big
-  1000000000000000000000. Bigger
-  1000000000000000000001. Biggest
-
--- gofmt --
-Text.
-
- 1. One
- 999999999999999999999. Big
- 1000000000000000000000. Bigger
- 1000000000000000000001. Biggest
-
--- text --
-Text.
-
- 1. One
- 999999999999999999999. Big
- 1000000000000000000000. Bigger
- 1000000000000000000001. Biggest
-
--- markdown --
-Text.
-
- 1. One
- 999999999999999999999. Big
- 1000000000000000000000. Bigger
- 1000000000000000000001. Biggest
-
--- html --
-<p>Text.
-<ol>
-<li>One
-<li value="999999999999999999999">Big
-<li>Bigger
-<li>Biggest
-</ol>
diff --git a/internal/backport/go/doc/comment/testdata/list6.txt b/internal/backport/go/doc/comment/testdata/list6.txt
deleted file mode 100644
index ffc0122..0000000
--- a/internal/backport/go/doc/comment/testdata/list6.txt
+++ /dev/null
@@ -1,129 +0,0 @@
--- input --
-Text.
- - List immediately after.
- - Another.
-
-More text.
-
- - List after blank line.
- - Another.
-
-Even more text.
- - List immediately after.
-
- - Blank line between items.
-
-Yet more text.
-
- - Another list after blank line.
-
- - Blank line between items.
-
-Still more text.
- - One list item.
-
-   Multiple paragraphs.
--- dump --
-Doc
-	Paragraph
-		Plain "Text."
-	List ForceBlankBefore=false ForceBlankBetween=false
-		Item Number=""
-			Paragraph
-				Plain "List immediately after."
-		Item Number=""
-			Paragraph
-				Plain "Another."
-	Paragraph
-		Plain "More text."
-	List ForceBlankBefore=true ForceBlankBetween=false
-		Item Number=""
-			Paragraph
-				Plain "List after blank line."
-		Item Number=""
-			Paragraph
-				Plain "Another."
-	Paragraph
-		Plain "Even more text."
-	List ForceBlankBefore=false ForceBlankBetween=true
-		Item Number=""
-			Paragraph
-				Plain "List immediately after."
-		Item Number=""
-			Paragraph
-				Plain "Blank line between items."
-	Paragraph
-		Plain "Yet more text."
-	List ForceBlankBefore=true ForceBlankBetween=true
-		Item Number=""
-			Paragraph
-				Plain "Another list after blank line."
-		Item Number=""
-			Paragraph
-				Plain "Blank line between items."
-	Paragraph
-		Plain "Still more text."
-	List ForceBlankBefore=false ForceBlankBetween=true
-		Item Number=""
-			Paragraph
-				Plain "One list item."
-			Paragraph
-				Plain "Multiple paragraphs."
-
--- gofmt --
-Text.
-  - List immediately after.
-  - Another.
-
-More text.
-
-  - List after blank line.
-  - Another.
-
-Even more text.
-
-  - List immediately after.
-
-  - Blank line between items.
-
-Yet more text.
-
-  - Another list after blank line.
-
-  - Blank line between items.
-
-Still more text.
-
-  - One list item.
-
-    Multiple paragraphs.
-
--- markdown --
-Text.
-
-  - List immediately after.
-  - Another.
-
-More text.
-
-  - List after blank line.
-  - Another.
-
-Even more text.
-
-  - List immediately after.
-
-  - Blank line between items.
-
-Yet more text.
-
-  - Another list after blank line.
-
-  - Blank line between items.
-
-Still more text.
-
-  - One list item.
-
-    Multiple paragraphs.
-
diff --git a/internal/backport/go/doc/comment/testdata/list7.txt b/internal/backport/go/doc/comment/testdata/list7.txt
deleted file mode 100644
index 4466050..0000000
--- a/internal/backport/go/doc/comment/testdata/list7.txt
+++ /dev/null
@@ -1,98 +0,0 @@
--- input --
-Almost list markers (but not quite):
-
- -
-
-❦
-
- - $
-
-❦
-
- - $
-
-❦
-
-  $
-   $
-
-❦
-
- 1! List.
-
-❦
--- gofmt --
-Almost list markers (but not quite):
-
-	-
-
-❦
-
-	- $
-
-❦
-
-	- $
-
-❦
-
-❦
-
-	1! List.
-
-❦
--- text --
-Almost list markers (but not quite):
-
-	-
-
-❦
-
-	-
-
-❦
-
-	-
-
-❦
-
-❦
-
-	1! List.
-
-❦
--- markdown --
-Almost list markers (but not quite):
-
-	-
-
-❦
-
-	- $
-
-❦
-
-	- $
-
-❦
-
-❦
-
-	1! List.
-
-❦
--- html --
-<p>Almost list markers (but not quite):
-<pre>-
-</pre>
-<p>❦
-<pre>- $
-</pre>
-<p>❦
-<pre>- $
-</pre>
-<p>❦
-<p>❦
-<pre>1! List.
-</pre>
-<p>❦
diff --git a/internal/backport/go/doc/comment/testdata/list8.txt b/internal/backport/go/doc/comment/testdata/list8.txt
deleted file mode 100644
index fc46b0d..0000000
--- a/internal/backport/go/doc/comment/testdata/list8.txt
+++ /dev/null
@@ -1,56 +0,0 @@
--- input --
-Loose lists.
-  - A
-
-    B
-  - C
-    D
-  - E
-  - F
--- gofmt --
-Loose lists.
-
-  - A
-
-    B
-
-  - C
-    D
-
-  - E
-
-  - F
--- text --
-Loose lists.
-
-  - A
-
-    B
-
-  - C D
-
-  - E
-
-  - F
--- markdown --
-Loose lists.
-
-  - A
-
-    B
-
-  - C D
-
-  - E
-
-  - F
--- html --
-<p>Loose lists.
-<ul>
-<li><p>A
-<p>B
-<li><p>C
-D
-<li><p>E
-<li><p>F
-</ul>
diff --git a/internal/backport/go/doc/comment/testdata/list9.txt b/internal/backport/go/doc/comment/testdata/list9.txt
deleted file mode 100644
index 48e4673..0000000
--- a/internal/backport/go/doc/comment/testdata/list9.txt
+++ /dev/null
@@ -1,30 +0,0 @@
--- input --
-Text.
-
-1. Not a list
-2. because it is
-3. unindented.
-
-4. This one
-  is a list
-  because of the indented text.
-5. More wrapped
-  items.
-6. And unwrapped.
-
-7. The blank line stops the heuristic.
--- gofmt --
-Text.
-
-1. Not a list
-2. because it is
-3. unindented.
-
- 4. This one
-    is a list
-    because of the indented text.
- 5. More wrapped
-    items.
- 6. And unwrapped.
-
-7. The blank line stops the heuristic.
diff --git a/internal/backport/go/doc/comment/testdata/para.txt b/internal/backport/go/doc/comment/testdata/para.txt
deleted file mode 100644
index 2355fa8..0000000
--- a/internal/backport/go/doc/comment/testdata/para.txt
+++ /dev/null
@@ -1,17 +0,0 @@
--- input --
-Hello, world.
-This is a paragraph.
-
--- gofmt --
-Hello, world.
-This is a paragraph.
-
--- text --
-Hello, world. This is a paragraph.
-
--- markdown --
-Hello, world. This is a paragraph.
-
--- html --
-<p>Hello, world.
-This is a paragraph.
diff --git a/internal/backport/go/doc/comment/testdata/quote.txt b/internal/backport/go/doc/comment/testdata/quote.txt
deleted file mode 100644
index b64adae..0000000
--- a/internal/backport/go/doc/comment/testdata/quote.txt
+++ /dev/null
@@ -1,15 +0,0 @@
--- input --
-Doubled single quotes like `` and '' turn into Unicode double quotes,
-but single quotes ` and ' do not.
-Misplaced markdown fences ``` do not either.
--- gofmt --
-Doubled single quotes like “ and ” turn into Unicode double quotes,
-but single quotes ` and ' do not.
-Misplaced markdown fences ``` do not either.
--- text --
-Doubled single quotes like “ and ” turn into Unicode double quotes, but single
-quotes ` and ' do not. Misplaced markdown fences ``` do not either.
--- html --
-<p>Doubled single quotes like “ and ” turn into Unicode double quotes,
-but single quotes ` and &apos; do not.
-Misplaced markdown fences ``` do not either.
diff --git a/internal/backport/go/doc/comment/testdata/text.txt b/internal/backport/go/doc/comment/testdata/text.txt
deleted file mode 100644
index c4de6e2..0000000
--- a/internal/backport/go/doc/comment/testdata/text.txt
+++ /dev/null
@@ -1,62 +0,0 @@
-{"TextPrefix":"|", "TextCodePrefix": "@"}
--- input --
-Hello, world
- Code block here.
-More text.
-Tight list
- - one
- - two
- - three
-Loose list
- - one
-
- - two
-
- - three
-
-# Heading
-
-More text.
--- gofmt --
-Hello, world
-
-	Code block here.
-
-More text.
-Tight list
-  - one
-  - two
-  - three
-
-Loose list
-
-  - one
-
-  - two
-
-  - three
-
-# Heading
-
-More text.
--- text --
-|Hello, world
-|
-@Code block here.
-|
-|More text. Tight list
-|  - one
-|  - two
-|  - three
-|
-|Loose list
-|
-|  - one
-|
-|  - two
-|
-|  - three
-|
-|# Heading
-|
-|More text.
diff --git a/internal/backport/go/doc/comment/testdata/text2.txt b/internal/backport/go/doc/comment/testdata/text2.txt
deleted file mode 100644
index a099d0b..0000000
--- a/internal/backport/go/doc/comment/testdata/text2.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-{"TextWidth": -1}
--- input --
-Package gob manages streams of gobs - binary values exchanged between an
-Encoder (transmitter) and a Decoder (receiver). A typical use is
-transporting arguments and results of remote procedure calls (RPCs) such as
-those provided by package "net/rpc".
-
-The implementation compiles a custom codec for each data type in the stream
-and is most efficient when a single Encoder is used to transmit a stream of
-values, amortizing the cost of compilation.
--- text --
-Package gob manages streams of gobs - binary values exchanged between an Encoder (transmitter) and a Decoder (receiver). A typical use is transporting arguments and results of remote procedure calls (RPCs) such as those provided by package "net/rpc".
-
-The implementation compiles a custom codec for each data type in the stream and is most efficient when a single Encoder is used to transmit a stream of values, amortizing the cost of compilation.
diff --git a/internal/backport/go/doc/comment/testdata/text3.txt b/internal/backport/go/doc/comment/testdata/text3.txt
deleted file mode 100644
index 75d2c37..0000000
--- a/internal/backport/go/doc/comment/testdata/text3.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-{"TextWidth": 30}
--- input --
-Package gob manages streams of gobs - binary values exchanged between an
-Encoder (transmitter) and a Decoder (receiver). A typical use is
-transporting arguments and results of remote procedure calls (RPCs) such as
-those provided by package "net/rpc".
-
-The implementation compiles a custom codec for each data type in the stream
-and is most efficient when a single Encoder is used to transmit a stream of
-values, amortizing the cost of compilation.
--- text --
-Package gob manages streams
-of gobs - binary values
-exchanged between an Encoder
-(transmitter) and a Decoder
-(receiver). A typical use is
-transporting arguments and
-results of remote procedure
-calls (RPCs) such as those
-provided by package "net/rpc".
-
-The implementation compiles
-a custom codec for each data
-type in the stream and is
-most efficient when a single
-Encoder is used to transmit a
-stream of values, amortizing
-the cost of compilation.
diff --git a/internal/backport/go/doc/comment/testdata/text4.txt b/internal/backport/go/doc/comment/testdata/text4.txt
deleted file mode 100644
index e429985..0000000
--- a/internal/backport/go/doc/comment/testdata/text4.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-{"TextWidth": 29}
--- input --
-Package gob manages streams of gobs - binary values exchanged between an
-Encoder (transmitter) and a Decoder (receiver). A typical use is
-transporting arguments and results of remote procedure calls (RPCs) such as
-those provided by package "net/rpc".
-
-The implementation compiles a custom codec for each data type in the stream
-and is most efficient when a single Encoder is used to transmit a stream of
-values, amortizing the cost of compilation.
--- text --
-Package gob manages streams
-of gobs - binary values
-exchanged between an Encoder
-(transmitter) and a Decoder
-(receiver). A typical use
-is transporting arguments
-and results of remote
-procedure calls (RPCs) such
-as those provided by package
-"net/rpc".
-
-The implementation compiles
-a custom codec for each data
-type in the stream and is
-most efficient when a single
-Encoder is used to transmit a
-stream of values, amortizing
-the cost of compilation.
diff --git a/internal/backport/go/doc/comment/testdata/text5.txt b/internal/backport/go/doc/comment/testdata/text5.txt
deleted file mode 100644
index 2408fc5..0000000
--- a/internal/backport/go/doc/comment/testdata/text5.txt
+++ /dev/null
@@ -1,38 +0,0 @@
-{"TextWidth": 20}
--- input --
-Package gob manages streams of gobs - binary values exchanged between an
-Encoder (transmitter) and a Decoder (receiver). A typical use is
-transporting arguments and results of remote procedure calls (RPCs) such as
-those provided by package "net/rpc".
-
-The implementation compiles a custom codec for each data type in the stream
-and is most efficient when a single Encoder is used to transmit a stream of
-values, amortizing the cost of compilation.
--- text --
-Package gob
-manages streams
-of gobs - binary
-values exchanged
-between an Encoder
-(transmitter) and a
-Decoder (receiver).
-A typical use
-is transporting
-arguments and
-results of remote
-procedure calls
-(RPCs) such as those
-provided by package
-"net/rpc".
-
-The implementation
-compiles a custom
-codec for each
-data type in the
-stream and is most
-efficient when a
-single Encoder is
-used to transmit a
-stream of values,
-amortizing the cost
-of compilation.
diff --git a/internal/backport/go/doc/comment/testdata/text6.txt b/internal/backport/go/doc/comment/testdata/text6.txt
deleted file mode 100644
index d6deff5..0000000
--- a/internal/backport/go/doc/comment/testdata/text6.txt
+++ /dev/null
@@ -1,18 +0,0 @@
--- input --
-Package gob manages streams of gobs - binary values exchanged between an
-Encoder (transmitter) and a Decoder (receiver). A typical use is
-transporting arguments and results of remote procedure calls (RPCs) such as
-those provided by package "net/rpc".
-
-The implementation compiles a custom codec for each data type in the stream
-and is most efficient when a single Encoder is used to transmit a stream of
-values, amortizing the cost of compilation.
--- text --
-Package gob manages streams of gobs - binary values exchanged between an Encoder
-(transmitter) and a Decoder (receiver). A typical use is transporting arguments
-and results of remote procedure calls (RPCs) such as those provided by package
-"net/rpc".
-
-The implementation compiles a custom codec for each data type in the stream and
-is most efficient when a single Encoder is used to transmit a stream of values,
-amortizing the cost of compilation.
diff --git a/internal/backport/go/doc/comment/testdata/text7.txt b/internal/backport/go/doc/comment/testdata/text7.txt
deleted file mode 100644
index c9fb6d3..0000000
--- a/internal/backport/go/doc/comment/testdata/text7.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-{"TextPrefix": "                    "}
--- input --
-Package gob manages streams of gobs - binary values exchanged between an
-Encoder (transmitter) and a Decoder (receiver). A typical use is
-transporting arguments and results of remote procedure calls (RPCs) such as
-those provided by package "net/rpc".
-
-The implementation compiles a custom codec for each data type in the stream
-and is most efficient when a single Encoder is used to transmit a stream of
-values, amortizing the cost of compilation.
--- text --
-                    Package gob manages streams of gobs - binary values
-                    exchanged between an Encoder (transmitter) and a Decoder
-                    (receiver). A typical use is transporting arguments and
-                    results of remote procedure calls (RPCs) such as those
-                    provided by package "net/rpc".
-
-                    The implementation compiles a custom codec for each data
-                    type in the stream and is most efficient when a single
-                    Encoder is used to transmit a stream of values, amortizing
-                    the cost of compilation.
diff --git a/internal/backport/go/doc/comment/testdata/text8.txt b/internal/backport/go/doc/comment/testdata/text8.txt
deleted file mode 100644
index 560ac95..0000000
--- a/internal/backport/go/doc/comment/testdata/text8.txt
+++ /dev/null
@@ -1,94 +0,0 @@
-{"TextWidth": 40}
--- input --
-If the arguments have version suffixes (like @latest or @v1.0.0), "go install"
-builds packages in module-aware mode, ignoring the go.mod file in the current
-directory or any parent directory, if there is one. This is useful for
-installing executables without affecting the dependencies of the main module.
-To eliminate ambiguity about which module versions are used in the build, the
-arguments must satisfy the following constraints:
-
- - Arguments must be package paths or package patterns (with "..." wildcards).
- They must not be standard packages (like fmt), meta-patterns (std, cmd,
- all), or relative or absolute file paths.
-
- - All arguments must have the same version suffix. Different queries are not
- allowed, even if they refer to the same version.
-
- - All arguments must refer to packages in the same module at the same version.
-
- - Package path arguments must refer to main packages. Pattern arguments
- will only match main packages.
-
- - No module is considered the "main" module. If the module containing
- packages named on the command line has a go.mod file, it must not contain
- directives (replace and exclude) that would cause it to be interpreted
- differently than if it were the main module. The module must not require
- a higher version of itself.
-
- - Vendor directories are not used in any module. (Vendor directories are not
- included in the module zip files downloaded by 'go install'.)
-
-If the arguments don't have version suffixes, "go install" may run in
-module-aware mode or GOPATH mode, depending on the GO111MODULE environment
-variable and the presence of a go.mod file. See 'go help modules' for details.
-If module-aware mode is enabled, "go install" runs in the context of the main
-module.
--- text --
-If the arguments have version suffixes
-(like @latest or @v1.0.0), "go install"
-builds packages in module-aware mode,
-ignoring the go.mod file in the current
-directory or any parent directory,
-if there is one. This is useful for
-installing executables without affecting
-the dependencies of the main module.
-To eliminate ambiguity about which
-module versions are used in the build,
-the arguments must satisfy the following
-constraints:
-
-  - Arguments must be package paths
-    or package patterns (with "..."
-    wildcards). They must not be
-    standard packages (like fmt),
-    meta-patterns (std, cmd, all),
-    or relative or absolute file paths.
-
-  - All arguments must have the same
-    version suffix. Different queries
-    are not allowed, even if they refer
-    to the same version.
-
-  - All arguments must refer to packages
-    in the same module at the same
-    version.
-
-  - Package path arguments must refer
-    to main packages. Pattern arguments
-    will only match main packages.
-
-  - No module is considered the "main"
-    module. If the module containing
-    packages named on the command line
-    has a go.mod file, it must not
-    contain directives (replace and
-    exclude) that would cause it to be
-    interpreted differently than if it
-    were the main module. The module
-    must not require a higher version of
-    itself.
-
-  - Vendor directories are not used in
-    any module. (Vendor directories are
-    not included in the module zip files
-    downloaded by 'go install'.)
-
-If the arguments don't have version
-suffixes, "go install" may run in
-module-aware mode or GOPATH mode,
-depending on the GO111MODULE environment
-variable and the presence of a go.mod
-file. See 'go help modules' for details.
-If module-aware mode is enabled,
-"go install" runs in the context of the
-main module.
diff --git a/internal/backport/go/doc/comment/testdata/text9.txt b/internal/backport/go/doc/comment/testdata/text9.txt
deleted file mode 100644
index 07a64aa..0000000
--- a/internal/backport/go/doc/comment/testdata/text9.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-{"TextPrefix":"|", "TextCodePrefix": "@"}
--- input --
-Hello, world
- Code block here.
--- gofmt --
-Hello, world
-
-	Code block here.
--- text --
-|Hello, world
-|
-@Code block here.
diff --git a/internal/backport/go/doc/comment/testdata/words.txt b/internal/backport/go/doc/comment/testdata/words.txt
deleted file mode 100644
index 63c7e1a..0000000
--- a/internal/backport/go/doc/comment/testdata/words.txt
+++ /dev/null
@@ -1,10 +0,0 @@
--- input --
-This is an italicword and a linkedword and Unicöde.
--- gofmt --
-This is an italicword and a linkedword and Unicöde.
--- text --
-This is an italicword and a linkedword and Unicöde.
--- markdown --
-This is an *italicword* and a [*linkedword*](https://example.com/linkedword) and Unicöde.
--- html --
-<p>This is an <i>italicword</i> and a <a href="https://example.com/linkedword"><i>linkedword</i></a> and Unicöde.
diff --git a/internal/backport/go/doc/comment/testdata_test.go b/internal/backport/go/doc/comment/testdata_test.go
deleted file mode 100644
index fa952d2..0000000
--- a/internal/backport/go/doc/comment/testdata_test.go
+++ /dev/null
@@ -1,203 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package comment
-
-import (
-	"bytes"
-	"encoding/json"
-	"fmt"
-	"path/filepath"
-	"strings"
-	"testing"
-
-	"golang.org/x/tools/txtar"
-	"golang.org/x/website/internal/backport/diff"
-)
-
-func TestTestdata(t *testing.T) {
-	files, _ := filepath.Glob("testdata/*.txt")
-	if len(files) == 0 {
-		t.Fatalf("no testdata")
-	}
-	var p Parser
-	p.Words = map[string]string{
-		"italicword": "",
-		"linkedword": "https://example.com/linkedword",
-	}
-	p.LookupPackage = func(name string) (importPath string, ok bool) {
-		if name == "comment" {
-			return "go/doc/comment", true
-		}
-		return DefaultLookupPackage(name)
-	}
-	p.LookupSym = func(recv, name string) (ok bool) {
-		if recv == "Parser" && name == "Parse" ||
-			recv == "" && name == "Doc" ||
-			recv == "" && name == "NoURL" {
-			return true
-		}
-		return false
-	}
-
-	stripDollars := func(b []byte) []byte {
-		// Remove trailing $ on lines.
-		// They make it easier to see lines with trailing spaces,
-		// as well as turning them into lines without trailing spaces,
-		// in case editors remove trailing spaces.
-		return bytes.ReplaceAll(b, []byte("$\n"), []byte("\n"))
-	}
-	for _, file := range files {
-		t.Run(filepath.Base(file), func(t *testing.T) {
-			var pr Printer
-			a, err := txtar.ParseFile(file)
-			if err != nil {
-				t.Fatal(err)
-			}
-			if len(a.Comment) > 0 {
-				err := json.Unmarshal(a.Comment, &pr)
-				if err != nil {
-					t.Fatalf("unmarshalling top json: %v", err)
-				}
-			}
-			if len(a.Files) < 1 || a.Files[0].Name != "input" {
-				t.Fatalf("first file is not %q", "input")
-			}
-			d := p.Parse(string(stripDollars(a.Files[0].Data)))
-			for _, f := range a.Files[1:] {
-				want := stripDollars(f.Data)
-				for len(want) >= 2 && want[len(want)-1] == '\n' && want[len(want)-2] == '\n' {
-					want = want[:len(want)-1]
-				}
-				var out []byte
-				switch f.Name {
-				default:
-					t.Fatalf("unknown output file %q", f.Name)
-				case "dump":
-					out = dump(d)
-				case "gofmt":
-					out = pr.Comment(d)
-				case "html":
-					out = pr.HTML(d)
-				case "markdown":
-					out = pr.Markdown(d)
-				case "text":
-					out = pr.Text(d)
-				}
-				if string(out) != string(want) {
-					t.Errorf("%s: %s", file, diff.Diff(f.Name, want, "have", out))
-				}
-			}
-		})
-	}
-}
-
-func dump(d *Doc) []byte {
-	var out bytes.Buffer
-	dumpTo(&out, 0, d)
-	return out.Bytes()
-}
-
-func dumpTo(out *bytes.Buffer, indent int, x interface{}) {
-	switch x := x.(type) {
-	default:
-		fmt.Fprintf(out, "?%T", x)
-
-	case *Doc:
-		fmt.Fprintf(out, "Doc")
-		dumpTo(out, indent+1, x.Content)
-		if len(x.Links) > 0 {
-			dumpNL(out, indent+1)
-			fmt.Fprintf(out, "Links")
-			dumpTo(out, indent+2, x.Links)
-		}
-		fmt.Fprintf(out, "\n")
-
-	case []*LinkDef:
-		for _, def := range x {
-			dumpNL(out, indent)
-			dumpTo(out, indent, def)
-		}
-
-	case *LinkDef:
-		fmt.Fprintf(out, "LinkDef Used:%v Text:%q URL:%s", x.Used, x.Text, x.URL)
-
-	case []Block:
-		for _, blk := range x {
-			dumpNL(out, indent)
-			dumpTo(out, indent, blk)
-		}
-
-	case *Heading:
-		fmt.Fprintf(out, "Heading")
-		dumpTo(out, indent+1, x.Text)
-
-	case *List:
-		fmt.Fprintf(out, "List ForceBlankBefore=%v ForceBlankBetween=%v", x.ForceBlankBefore, x.ForceBlankBetween)
-		dumpTo(out, indent+1, x.Items)
-
-	case []*ListItem:
-		for _, item := range x {
-			dumpNL(out, indent)
-			dumpTo(out, indent, item)
-		}
-
-	case *ListItem:
-		fmt.Fprintf(out, "Item Number=%q", x.Number)
-		dumpTo(out, indent+1, x.Content)
-
-	case *Paragraph:
-		fmt.Fprintf(out, "Paragraph")
-		dumpTo(out, indent+1, x.Text)
-
-	case *Code:
-		fmt.Fprintf(out, "Code")
-		dumpTo(out, indent+1, x.Text)
-
-	case []Text:
-		for _, t := range x {
-			dumpNL(out, indent)
-			dumpTo(out, indent, t)
-		}
-
-	case Plain:
-		if !strings.Contains(string(x), "\n") {
-			fmt.Fprintf(out, "Plain %q", string(x))
-		} else {
-			fmt.Fprintf(out, "Plain")
-			dumpTo(out, indent+1, string(x))
-		}
-
-	case Italic:
-		if !strings.Contains(string(x), "\n") {
-			fmt.Fprintf(out, "Italic %q", string(x))
-		} else {
-			fmt.Fprintf(out, "Italic")
-			dumpTo(out, indent+1, string(x))
-		}
-
-	case string:
-		for _, line := range strings.SplitAfter(x, "\n") {
-			if line != "" {
-				dumpNL(out, indent)
-				fmt.Fprintf(out, "%q", line)
-			}
-		}
-
-	case *Link:
-		fmt.Fprintf(out, "Link %q", x.URL)
-		dumpTo(out, indent+1, x.Text)
-
-	case *DocLink:
-		fmt.Fprintf(out, "DocLink pkg:%q, recv:%q, name:%q", x.ImportPath, x.Recv, x.Name)
-		dumpTo(out, indent+1, x.Text)
-	}
-}
-
-func dumpNL(out *bytes.Buffer, n int) {
-	out.WriteByte('\n')
-	for i := 0; i < n; i++ {
-		out.WriteByte('\t')
-	}
-}
diff --git a/internal/backport/go/doc/comment/text.go b/internal/backport/go/doc/comment/text.go
deleted file mode 100644
index 4ab2da4..0000000
--- a/internal/backport/go/doc/comment/text.go
+++ /dev/null
@@ -1,337 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package comment
-
-import (
-	"bytes"
-	"fmt"
-	"sort"
-	"strings"
-	"unicode/utf8"
-)
-
-// A textPrinter holds the state needed for printing a Doc as plain text.
-type textPrinter struct {
-	*Printer
-	long       strings.Builder
-	prefix     string
-	codePrefix string
-	width      int
-}
-
-// Text returns a textual formatting of the Doc.
-// See the [Printer] documentation for ways to customize the text output.
-func (p *Printer) Text(d *Doc) []byte {
-	tp := &textPrinter{
-		Printer:    p,
-		prefix:     p.TextPrefix,
-		codePrefix: p.TextCodePrefix,
-		width:      p.TextWidth,
-	}
-	if tp.codePrefix == "" {
-		tp.codePrefix = p.TextPrefix + "\t"
-	}
-	if tp.width == 0 {
-		tp.width = 80 - utf8.RuneCountInString(tp.prefix)
-	}
-
-	var out bytes.Buffer
-	for i, x := range d.Content {
-		if i > 0 && blankBefore(x) {
-			out.WriteString(tp.prefix)
-			writeNL(&out)
-		}
-		tp.block(&out, x)
-	}
-	anyUsed := false
-	for _, def := range d.Links {
-		if def.Used {
-			anyUsed = true
-			break
-		}
-	}
-	if anyUsed {
-		writeNL(&out)
-		for _, def := range d.Links {
-			if def.Used {
-				fmt.Fprintf(&out, "[%s]: %s\n", def.Text, def.URL)
-			}
-		}
-	}
-	return out.Bytes()
-}
-
-// writeNL calls out.WriteByte('\n')
-// but first trims trailing spaces on the previous line.
-func writeNL(out *bytes.Buffer) {
-	// Trim trailing spaces.
-	data := out.Bytes()
-	n := 0
-	for n < len(data) && (data[len(data)-n-1] == ' ' || data[len(data)-n-1] == '\t') {
-		n++
-	}
-	if n > 0 {
-		out.Truncate(len(data) - n)
-	}
-	out.WriteByte('\n')
-}
-
-// block prints the block x to out.
-func (p *textPrinter) block(out *bytes.Buffer, x Block) {
-	switch x := x.(type) {
-	default:
-		fmt.Fprintf(out, "?%T\n", x)
-
-	case *Paragraph:
-		out.WriteString(p.prefix)
-		p.text(out, "", x.Text)
-
-	case *Heading:
-		out.WriteString(p.prefix)
-		out.WriteString("# ")
-		p.text(out, "", x.Text)
-
-	case *Code:
-		text := x.Text
-		for text != "" {
-			var line string
-			line, text, _ = stringsCut(text, "\n")
-			if line != "" {
-				out.WriteString(p.codePrefix)
-				out.WriteString(line)
-			}
-			writeNL(out)
-		}
-
-	case *List:
-		loose := x.BlankBetween()
-		for i, item := range x.Items {
-			if i > 0 && loose {
-				out.WriteString(p.prefix)
-				writeNL(out)
-			}
-			out.WriteString(p.prefix)
-			out.WriteString(" ")
-			if item.Number == "" {
-				out.WriteString(" - ")
-			} else {
-				out.WriteString(item.Number)
-				out.WriteString(". ")
-			}
-			for i, blk := range item.Content {
-				const fourSpace = "    "
-				if i > 0 {
-					writeNL(out)
-					out.WriteString(p.prefix)
-					out.WriteString(fourSpace)
-				}
-				p.text(out, fourSpace, blk.(*Paragraph).Text)
-			}
-		}
-	}
-}
-
-// text prints the text sequence x to out.
-func (p *textPrinter) text(out *bytes.Buffer, indent string, x []Text) {
-	p.oneLongLine(&p.long, x)
-	words := strings.Fields(p.long.String())
-	p.long.Reset()
-
-	var seq []int
-	if p.width < 0 || len(words) == 0 {
-		seq = []int{0, len(words)} // one long line
-	} else {
-		seq = wrap(words, p.width-utf8.RuneCountInString(indent))
-	}
-	for i := 0; i+1 < len(seq); i++ {
-		if i > 0 {
-			out.WriteString(p.prefix)
-			out.WriteString(indent)
-		}
-		for j, w := range words[seq[i]:seq[i+1]] {
-			if j > 0 {
-				out.WriteString(" ")
-			}
-			out.WriteString(w)
-		}
-		writeNL(out)
-	}
-}
-
-// oneLongLine prints the text sequence x to out as one long line,
-// without worrying about line wrapping.
-// Explicit links have the [ ] dropped to improve readability.
-func (p *textPrinter) oneLongLine(out *strings.Builder, x []Text) {
-	for _, t := range x {
-		switch t := t.(type) {
-		case Plain:
-			out.WriteString(string(t))
-		case Italic:
-			out.WriteString(string(t))
-		case *Link:
-			p.oneLongLine(out, t.Text)
-		case *DocLink:
-			p.oneLongLine(out, t.Text)
-		}
-	}
-}
-
-// wrap wraps words into lines of at most max runes,
-// minimizing the sum of the squares of the leftover lengths
-// at the end of each line (except the last, of course),
-// with a preference for ending lines at punctuation (.,:;).
-//
-// The returned slice gives the indexes of the first words
-// on each line in the wrapped text with a final entry of len(words).
-// Thus the lines are words[seq[0]:seq[1]], words[seq[1]:seq[2]],
-// ..., words[seq[len(seq)-2]:seq[len(seq)-1]].
-//
-// The implementation runs in O(n log n) time, where n = len(words),
-// using the algorithm described in D. S. Hirschberg and L. L. Larmore,
-// “[The least weight subsequence problem],” FOCS 1985, pp. 137-143.
-//
-// [The least weight subsequence problem]: https://doi.org/10.1109/SFCS.1985.60
-func wrap(words []string, max int) (seq []int) {
-	// The algorithm requires that our scoring function be concave,
-	// meaning that for all i₀ ≤ i₁ < j₀ ≤ j₁,
-	// weight(i₀, j₀) + weight(i₁, j₁) ≤ weight(i₀, j₁) + weight(i₁, j₀).
-	//
-	// Our weights are two-element pairs [hi, lo]
-	// ordered by elementwise comparison.
-	// The hi entry counts the weight for lines that are longer than max,
-	// and the lo entry counts the weight for lines that are not.
-	// This forces the algorithm to first minimize the number of lines
-	// that are longer than max, which correspond to lines with
-	// single very long words. Having done that, it can move on to
-	// minimizing the lo score, which is more interesting.
-	//
-	// The lo score is the sum for each line of the square of the
-	// number of spaces remaining at the end of the line and a
-	// penalty of 64 given out for not ending the line in a
-	// punctuation character (.,:;).
-	// The penalty is somewhat arbitrarily chosen by trying
-	// different amounts and judging how nice the wrapped text looks.
-	// Roughly speaking, using 64 means that we are willing to
-	// end a line with eight blank spaces in order to end at a
-	// punctuation character, even if the next word would fit in
-	// those spaces.
-	//
-	// We care about ending in punctuation characters because
-	// it makes the text easier to skim if not too many sentences
-	// or phrases begin with a single word on the previous line.
-
-	// A score is the score (also called weight) for a given line.
-	// add and cmp add and compare scores.
-	type score struct {
-		hi int64
-		lo int64
-	}
-	add := func(s, t score) score { return score{s.hi + t.hi, s.lo + t.lo} }
-	cmp := func(s, t score) int {
-		switch {
-		case s.hi < t.hi:
-			return -1
-		case s.hi > t.hi:
-			return +1
-		case s.lo < t.lo:
-			return -1
-		case s.lo > t.lo:
-			return +1
-		}
-		return 0
-	}
-
-	// total[j] is the total number of runes
-	// (including separating spaces) in words[:j].
-	total := make([]int, len(words)+1)
-	total[0] = 0
-	for i, s := range words {
-		total[1+i] = total[i] + utf8.RuneCountInString(s) + 1
-	}
-
-	// weight returns weight(i, j).
-	weight := func(i, j int) score {
-		// On the last line, there is zero weight for being too short.
-		n := total[j] - 1 - total[i]
-		if j == len(words) && n <= max {
-			return score{0, 0}
-		}
-
-		// Otherwise the weight is the penalty plus the square of the number of
-		// characters remaining on the line or by which the line goes over.
-		// In the latter case, that value goes in the hi part of the score.
-		// (See note above.)
-		p := wrapPenalty(words[j-1])
-		v := int64(max-n) * int64(max-n)
-		if n > max {
-			return score{v, p}
-		}
-		return score{0, v + p}
-	}
-
-	// The rest of this function is “The Basic Algorithm” from
-	// Hirschberg and Larmore's conference paper,
-	// using the same names as in the paper.
-	f := []score{{0, 0}}
-	g := func(i, j int) score { return add(f[i], weight(i, j)) }
-
-	bridge := func(a, b, c int) bool {
-		k := c + sort.Search(len(words)+1-c, func(k int) bool {
-			k += c
-			return cmp(g(a, k), g(b, k)) > 0
-		})
-		if k > len(words) {
-			return true
-		}
-		return cmp(g(c, k), g(b, k)) <= 0
-	}
-
-	// d is a one-ended deque implemented as a slice.
-	d := make([]int, 1, len(words))
-	d[0] = 0
-	bestleft := make([]int, 1, len(words))
-	bestleft[0] = -1
-	for m := 1; m < len(words); m++ {
-		f = append(f, g(d[0], m))
-		bestleft = append(bestleft, d[0])
-		for len(d) > 1 && cmp(g(d[1], m+1), g(d[0], m+1)) <= 0 {
-			d = d[1:] // “Retire”
-		}
-		for len(d) > 1 && bridge(d[len(d)-2], d[len(d)-1], m) {
-			d = d[:len(d)-1] // “Fire”
-		}
-		if cmp(g(m, len(words)), g(d[len(d)-1], len(words))) < 0 {
-			d = append(d, m) // “Hire”
-			// The next few lines are not in the paper but are necessary
-			// to handle two-word inputs correctly. It appears to be
-			// just a bug in the paper's pseudocode.
-			if len(d) == 2 && cmp(g(d[1], m+1), g(d[0], m+1)) <= 0 {
-				d = d[1:]
-			}
-		}
-	}
-	bestleft = append(bestleft, d[0])
-
-	// Recover least weight sequence from bestleft.
-	n := 1
-	for m := len(words); m > 0; m = bestleft[m] {
-		n++
-	}
-	seq = make([]int, n)
-	for m := len(words); m > 0; m = bestleft[m] {
-		n--
-		seq[n] = m
-	}
-	return seq
-}
-
-// wrapPenalty is the penalty for inserting a line break after word s.
-func wrapPenalty(s string) int64 {
-	switch s[len(s)-1] {
-	case '.', ',', ':', ';':
-		return 0
-	}
-	return 64
-}
diff --git a/internal/backport/go/doc/comment/wrap_test.go b/internal/backport/go/doc/comment/wrap_test.go
deleted file mode 100644
index f9802c9..0000000
--- a/internal/backport/go/doc/comment/wrap_test.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package comment
-
-import (
-	"flag"
-	"fmt"
-	"math/rand"
-	"testing"
-	"time"
-	"unicode/utf8"
-)
-
-var wrapSeed = flag.Int64("wrapseed", 0, "use `seed` for wrap test (default auto-seeds)")
-
-func TestWrap(t *testing.T) {
-	if *wrapSeed == 0 {
-		*wrapSeed = time.Now().UnixNano()
-	}
-	t.Logf("-wrapseed=%#x\n", *wrapSeed)
-	r := rand.New(rand.NewSource(*wrapSeed))
-
-	// Generate words of random length.
-	s := "1234567890αβcdefghijklmnopqrstuvwxyz"
-	sN := utf8.RuneCountInString(s)
-	var words []string
-	for i := 0; i < 100; i++ {
-		n := 1 + r.Intn(sN-1)
-		if n >= 12 {
-			n++ // extra byte for β
-		}
-		if n >= 11 {
-			n++ // extra byte for α
-		}
-		words = append(words, s[:n])
-	}
-
-	for n := 1; n <= len(words) && !t.Failed(); n++ {
-		t.Run(fmt.Sprint("n=", n), func(t *testing.T) {
-			words := words[:n]
-			t.Logf("words: %v", words)
-			for max := 1; max < 100 && !t.Failed(); max++ {
-				t.Run(fmt.Sprint("max=", max), func(t *testing.T) {
-					seq := wrap(words, max)
-
-					// Compute score for seq.
-					start := 0
-					score := int64(0)
-					if len(seq) == 0 {
-						t.Fatalf("wrap seq is empty")
-					}
-					if seq[0] != 0 {
-						t.Fatalf("wrap seq does not start with 0")
-					}
-					for _, n := range seq[1:] {
-						if n <= start {
-							t.Fatalf("wrap seq is non-increasing: %v", seq)
-						}
-						if n > len(words) {
-							t.Fatalf("wrap seq contains %d > %d: %v", n, len(words), seq)
-						}
-						size := -1
-						for _, s := range words[start:n] {
-							size += 1 + utf8.RuneCountInString(s)
-						}
-						if n-start == 1 && size >= max {
-							// no score
-						} else if size > max {
-							t.Fatalf("wrap used overlong line %d:%d: %v", start, n, words[start:n])
-						} else if n != len(words) {
-							score += int64(max-size)*int64(max-size) + wrapPenalty(words[n-1])
-						}
-						start = n
-					}
-					if start != len(words) {
-						t.Fatalf("wrap seq does not use all words (%d < %d): %v", start, len(words), seq)
-					}
-
-					// Check that score matches slow reference implementation.
-					slowSeq, slowScore := wrapSlow(words, max)
-					if score != slowScore {
-						t.Fatalf("wrap score = %d != wrapSlow score %d\nwrap: %v\nslow: %v", score, slowScore, seq, slowSeq)
-					}
-				})
-			}
-		})
-	}
-}
-
-// wrapSlow is an O(n²) reference implementation for wrap.
-// It returns a minimal-score sequence along with the score.
-// It is OK if wrap returns a different sequence as long as that
-// sequence has the same score.
-func wrapSlow(words []string, max int) (seq []int, score int64) {
-	// Quadratic dynamic programming algorithm for line wrapping problem.
-	// best[i] tracks the best score possible for words[:i],
-	// assuming that for i < len(words) the line breaks after those words.
-	// bestleft[i] tracks the previous line break for best[i].
-	best := make([]int64, len(words)+1)
-	bestleft := make([]int, len(words)+1)
-	best[0] = 0
-	for i, w := range words {
-		if utf8.RuneCountInString(w) >= max {
-			// Overlong word must appear on line by itself. No effect on score.
-			best[i+1] = best[i]
-			continue
-		}
-		best[i+1] = 1e18
-		p := wrapPenalty(w)
-		n := -1
-		for j := i; j >= 0; j-- {
-			n += 1 + utf8.RuneCountInString(words[j])
-			if n > max {
-				break
-			}
-			line := int64(n-max)*int64(n-max) + p
-			if i == len(words)-1 {
-				line = 0 // no score for final line being too short
-			}
-			s := best[j] + line
-			if best[i+1] > s {
-				best[i+1] = s
-				bestleft[i+1] = j
-			}
-		}
-	}
-
-	// Recover least weight sequence from bestleft.
-	n := 1
-	for m := len(words); m > 0; m = bestleft[m] {
-		n++
-	}
-	seq = make([]int, n)
-	for m := len(words); m > 0; m = bestleft[m] {
-		n--
-		seq[n] = m
-	}
-	return seq, best[len(words)]
-}
diff --git a/internal/backport/go/doc/comment_test.go b/internal/backport/go/doc/comment_test.go
deleted file mode 100644
index fba8a75..0000000
--- a/internal/backport/go/doc/comment_test.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package doc
-
-import (
-	"bytes"
-	"testing"
-
-	"golang.org/x/website/internal/backport/diff"
-	"golang.org/x/website/internal/backport/go/parser"
-	"golang.org/x/website/internal/backport/go/token"
-)
-
-func TestComment(t *testing.T) {
-	fset := token.NewFileSet()
-	pkgs, err := parser.ParseDir(fset, "testdata/pkgdoc", nil, parser.ParseComments)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if pkgs["pkgdoc"] == nil {
-		t.Fatal("missing package pkgdoc")
-	}
-	pkg := New(pkgs["pkgdoc"], "testdata/pkgdoc", 0)
-
-	var (
-		input           = "[T] and [U] are types, and [T.M] is a method, but [V] is a broken link. [rand.Int] and [crand.Reader] are things.\n"
-		wantHTML        = `<p><a href="#T">T</a> and <a href="#U">U</a> are types, and <a href="#T.M">T.M</a> is a method, but [V] is a broken link. <a href="/math/rand#Int">rand.Int</a> and <a href="/crypto/rand#Reader">crand.Reader</a> are things.` + "\n"
-		wantOldHTML     = "<p>[T] and [U] are <i>types</i>, and [T.M] is a method, but [V] is a broken link. [rand.Int] and [crand.Reader] are things.\n"
-		wantMarkdown    = "[T](#T) and [U](#U) are types, and [T.M](#T.M) is a method, but \\[V] is a broken link. [rand.Int](/math/rand#Int) and [crand.Reader](/crypto/rand#Reader) are things.\n"
-		wantText        = "T and U are types, and T.M is a method, but [V] is a broken link. rand.Int and\ncrand.Reader are things.\n"
-		wantOldText     = "[T] and [U] are types, and [T.M] is a method, but [V] is a broken link.\n[rand.Int] and [crand.Reader] are things.\n"
-		wantSynopsis    = "T and U are types, and T.M is a method, but [V] is a broken link."
-		wantOldSynopsis = "[T] and [U] are types, and [T.M] is a method, but [V] is a broken link."
-	)
-
-	if b := pkg.HTML(input); string(b) != wantHTML {
-		t.Errorf("%s", diff.Diff("pkg.HTML", b, "want", []byte(wantHTML)))
-	}
-	if b := pkg.Markdown(input); string(b) != wantMarkdown {
-		t.Errorf("%s", diff.Diff("pkg.Markdown", b, "want", []byte(wantMarkdown)))
-	}
-	if b := pkg.Text(input); string(b) != wantText {
-		t.Errorf("%s", diff.Diff("pkg.Text", b, "want", []byte(wantText)))
-	}
-	if b := pkg.Synopsis(input); b != wantSynopsis {
-		t.Errorf("%s", diff.Diff("pkg.Synopsis", []byte(b), "want", []byte(wantText)))
-	}
-
-	var buf bytes.Buffer
-
-	buf.Reset()
-	ToHTML(&buf, input, map[string]string{"types": ""})
-	if b := buf.Bytes(); string(b) != wantOldHTML {
-		t.Errorf("%s", diff.Diff("ToHTML", b, "want", []byte(wantOldHTML)))
-	}
-
-	buf.Reset()
-	ToText(&buf, input, "", "\t", 80)
-	if b := buf.Bytes(); string(b) != wantOldText {
-		t.Errorf("%s", diff.Diff("ToText", b, "want", []byte(wantOldText)))
-	}
-
-	if b := Synopsis(input); b != wantOldSynopsis {
-		t.Errorf("%s", diff.Diff("Synopsis", []byte(b), "want", []byte(wantOldText)))
-	}
-}
diff --git a/internal/backport/go/doc/doc.go b/internal/backport/go/doc/doc.go
deleted file mode 100644
index ba1dd39..0000000
--- a/internal/backport/go/doc/doc.go
+++ /dev/null
@@ -1,350 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package doc extracts source code documentation from a Go AST.
-package doc
-
-import (
-	"fmt"
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/doc/comment"
-	"golang.org/x/website/internal/backport/go/token"
-	"strings"
-)
-
-// Package is the documentation for an entire package.
-type Package struct {
-	Doc        string
-	Name       string
-	ImportPath string
-	Imports    []string
-	Filenames  []string
-	Notes      map[string][]*Note
-
-	// Deprecated: For backward compatibility Bugs is still populated,
-	// but all new code should use Notes instead.
-	Bugs []string
-
-	// declarations
-	Consts []*Value
-	Types  []*Type
-	Vars   []*Value
-	Funcs  []*Func
-
-	// Examples is a sorted list of examples associated with
-	// the package. Examples are extracted from _test.go files
-	// provided to NewFromFiles.
-	Examples []*Example
-
-	importByName map[string]string
-	syms         map[string]bool
-}
-
-// Value is the documentation for a (possibly grouped) var or const declaration.
-type Value struct {
-	Doc   string
-	Names []string // var or const names in declaration order
-	Decl  *ast.GenDecl
-
-	order int
-}
-
-// Type is the documentation for a type declaration.
-type Type struct {
-	Doc  string
-	Name string
-	Decl *ast.GenDecl
-
-	// associated declarations
-	Consts  []*Value // sorted list of constants of (mostly) this type
-	Vars    []*Value // sorted list of variables of (mostly) this type
-	Funcs   []*Func  // sorted list of functions returning this type
-	Methods []*Func  // sorted list of methods (including embedded ones) of this type
-
-	// Examples is a sorted list of examples associated with
-	// this type. Examples are extracted from _test.go files
-	// provided to NewFromFiles.
-	Examples []*Example
-}
-
-// Func is the documentation for a func declaration.
-type Func struct {
-	Doc  string
-	Name string
-	Decl *ast.FuncDecl
-
-	// methods
-	// (for functions, these fields have the respective zero value)
-	Recv  string // actual   receiver "T" or "*T"
-	Orig  string // original receiver "T" or "*T"
-	Level int    // embedding level; 0 means not embedded
-
-	// Examples is a sorted list of examples associated with this
-	// function or method. Examples are extracted from _test.go files
-	// provided to NewFromFiles.
-	Examples []*Example
-}
-
-// A Note represents a marked comment starting with "MARKER(uid): note body".
-// Any note with a marker of 2 or more upper case [A-Z] letters and a uid of
-// at least one character is recognized. The ":" following the uid is optional.
-// Notes are collected in the Package.Notes map indexed by the notes marker.
-type Note struct {
-	Pos, End token.Pos // position range of the comment containing the marker
-	UID      string    // uid found with the marker
-	Body     string    // note body text
-}
-
-// Mode values control the operation of New and NewFromFiles.
-type Mode int
-
-const (
-	// AllDecls says to extract documentation for all package-level
-	// declarations, not just exported ones.
-	AllDecls Mode = 1 << iota
-
-	// AllMethods says to show all embedded methods, not just the ones of
-	// invisible (unexported) anonymous fields.
-	AllMethods
-
-	// PreserveAST says to leave the AST unmodified. Originally, pieces of
-	// the AST such as function bodies were nil-ed out to save memory in
-	// godoc, but not all programs want that behavior.
-	PreserveAST
-)
-
-// New computes the package documentation for the given package AST.
-// New takes ownership of the AST pkg and may edit or overwrite it.
-// To have the Examples fields populated, use NewFromFiles and include
-// the package's _test.go files.
-func New(pkg *ast.Package, importPath string, mode Mode) *Package {
-	var r reader
-	r.readPackage(pkg, mode)
-	r.computeMethodSets()
-	r.cleanupTypes()
-	p := &Package{
-		Doc:        r.doc,
-		Name:       pkg.Name,
-		ImportPath: importPath,
-		Imports:    sortedKeys(r.imports),
-		Filenames:  r.filenames,
-		Notes:      r.notes,
-		Bugs:       noteBodies(r.notes["BUG"]),
-		Consts:     sortedValues(r.values, token.CONST),
-		Types:      sortedTypes(r.types, mode&AllMethods != 0),
-		Vars:       sortedValues(r.values, token.VAR),
-		Funcs:      sortedFuncs(r.funcs, true),
-
-		importByName: r.importByName,
-		syms:         make(map[string]bool),
-	}
-
-	p.collectValues(p.Consts)
-	p.collectValues(p.Vars)
-	p.collectTypes(p.Types)
-	p.collectFuncs(p.Funcs)
-
-	return p
-}
-
-func (p *Package) collectValues(values []*Value) {
-	for _, v := range values {
-		for _, name := range v.Names {
-			p.syms[name] = true
-		}
-	}
-}
-
-func (p *Package) collectTypes(types []*Type) {
-	for _, t := range types {
-		if p.syms[t.Name] {
-			// Shouldn't be any cycles but stop just in case.
-			continue
-		}
-		p.syms[t.Name] = true
-		p.collectValues(t.Consts)
-		p.collectValues(t.Vars)
-		p.collectFuncs(t.Funcs)
-		p.collectFuncs(t.Methods)
-	}
-}
-
-func (p *Package) collectFuncs(funcs []*Func) {
-	for _, f := range funcs {
-		if f.Recv != "" {
-			p.syms[strings.TrimPrefix(f.Recv, "*")+"."+f.Name] = true
-		} else {
-			p.syms[f.Name] = true
-		}
-	}
-}
-
-// NewFromFiles computes documentation for a package.
-//
-// The package is specified by a list of *ast.Files and corresponding
-// file set, which must not be nil.
-// NewFromFiles uses all provided files when computing documentation,
-// so it is the caller's responsibility to provide only the files that
-// match the desired build context. "go/build".Context.MatchFile can
-// be used for determining whether a file matches a build context with
-// the desired GOOS and GOARCH values, and other build constraints.
-// The import path of the package is specified by importPath.
-//
-// Examples found in _test.go files are associated with the corresponding
-// type, function, method, or the package, based on their name.
-// If the example has a suffix in its name, it is set in the
-// Example.Suffix field. Examples with malformed names are skipped.
-//
-// Optionally, a single extra argument of type Mode can be provided to
-// control low-level aspects of the documentation extraction behavior.
-//
-// NewFromFiles takes ownership of the AST files and may edit them,
-// unless the PreserveAST Mode bit is on.
-func NewFromFiles(fset *token.FileSet, files []*ast.File, importPath string, opts ...interface{}) (*Package, error) {
-	// Check for invalid API usage.
-	if fset == nil {
-		panic(fmt.Errorf("doc.NewFromFiles: no token.FileSet provided (fset == nil)"))
-	}
-	var mode Mode
-	switch len(opts) { // There can only be 0 or 1 options, so a simple switch works for now.
-	case 0:
-		// Nothing to do.
-	case 1:
-		m, ok := opts[0].(Mode)
-		if !ok {
-			panic(fmt.Errorf("doc.NewFromFiles: option argument type must be doc.Mode"))
-		}
-		mode = m
-	default:
-		panic(fmt.Errorf("doc.NewFromFiles: there must not be more than 1 option argument"))
-	}
-
-	// Collect .go and _test.go files.
-	var (
-		goFiles     = make(map[string]*ast.File)
-		testGoFiles []*ast.File
-	)
-	for i := range files {
-		f := fset.File(files[i].Pos())
-		if f == nil {
-			return nil, fmt.Errorf("file files[%d] is not found in the provided file set", i)
-		}
-		switch name := f.Name(); {
-		case strings.HasSuffix(name, ".go") && !strings.HasSuffix(name, "_test.go"):
-			goFiles[name] = files[i]
-		case strings.HasSuffix(name, "_test.go"):
-			testGoFiles = append(testGoFiles, files[i])
-		default:
-			return nil, fmt.Errorf("file files[%d] filename %q does not have a .go extension", i, name)
-		}
-	}
-
-	// TODO(dmitshur,gri): A relatively high level call to ast.NewPackage with a simpleImporter
-	// ast.Importer implementation is made below. It might be possible to short-circuit and simplify.
-
-	// Compute package documentation.
-	pkg, _ := ast.NewPackage(fset, goFiles, simpleImporter, nil) // Ignore errors that can happen due to unresolved identifiers.
-	p := New(pkg, importPath, mode)
-	classifyExamples(p, Examples(testGoFiles...))
-	return p, nil
-}
-
-// simpleImporter returns a (dummy) package object named by the last path
-// component of the provided package path (as is the convention for packages).
-// This is sufficient to resolve package identifiers without doing an actual
-// import. It never returns an error.
-func simpleImporter(imports map[string]*ast.Object, path string) (*ast.Object, error) {
-	pkg := imports[path]
-	if pkg == nil {
-		// note that strings.LastIndex returns -1 if there is no "/"
-		pkg = ast.NewObj(ast.Pkg, path[strings.LastIndex(path, "/")+1:])
-		pkg.Data = ast.NewScope(nil) // required by ast.NewPackage for dot-import
-		imports[path] = pkg
-	}
-	return pkg, nil
-}
-
-// lookupSym reports whether the package has a given symbol or method.
-//
-// If recv == "", HasSym reports whether the package has a top-level
-// const, func, type, or var named name.
-//
-// If recv != "", HasSym reports whether the package has a type
-// named recv with a method named name.
-func (p *Package) lookupSym(recv, name string) bool {
-	if recv != "" {
-		return p.syms[recv+"."+name]
-	}
-	return p.syms[name]
-}
-
-// lookupPackage returns the import path identified by name
-// in the given package. If name uniquely identifies a single import,
-// then lookupPackage returns that import.
-// If multiple packages are imported as name, importPath returns "", false.
-// Otherwise, if name is the name of p itself, importPath returns "", true,
-// to signal a reference to p.
-// Otherwise, importPath returns "", false.
-func (p *Package) lookupPackage(name string) (importPath string, ok bool) {
-	if path, ok := p.importByName[name]; ok {
-		if path == "" {
-			return "", false // multiple imports used the name
-		}
-		return path, true // found import
-	}
-	if p.Name == name {
-		return "", true // allow reference to this package
-	}
-	return "", false // unknown name
-}
-
-// Parser returns a doc comment parser configured
-// for parsing doc comments from package p.
-// Each call returns a new parser, so that the caller may
-// customize it before use.
-func (p *Package) Parser() *comment.Parser {
-	return &comment.Parser{
-		LookupPackage: p.lookupPackage,
-		LookupSym:     p.lookupSym,
-	}
-}
-
-// Printer returns a doc comment printer configured
-// for printing doc comments from package p.
-// Each call returns a new printer, so that the caller may
-// customize it before use.
-func (p *Package) Printer() *comment.Printer {
-	// No customization today, but having p.Printer()
-	// gives us flexibility in the future, and it is convenient for callers.
-	return &comment.Printer{}
-}
-
-// HTML returns formatted HTML for the doc comment text.
-//
-// To customize details of the HTML, use [Package.Printer]
-// to obtain a [comment.Printer], and configure it
-// before calling its HTML method.
-func (p *Package) HTML(text string) []byte {
-	return p.Printer().HTML(p.Parser().Parse(text))
-}
-
-// Markdown returns formatted Markdown for the doc comment text.
-//
-// To customize details of the Markdown, use [Package.Printer]
-// to obtain a [comment.Printer], and configure it
-// before calling its Markdown method.
-func (p *Package) Markdown(text string) []byte {
-	return p.Printer().Markdown(p.Parser().Parse(text))
-}
-
-// Text returns formatted text for the doc comment text,
-// wrapped to 80 Unicode code points and using tabs for
-// code block indentation.
-//
-// To customize details of the formatting, use [Package.Printer]
-// to obtain a [comment.Printer], and configure it
-// before calling its Text method.
-func (p *Package) Text(text string) []byte {
-	return p.Printer().Text(p.Parser().Parse(text))
-}
diff --git a/internal/backport/go/doc/doc_test.go b/internal/backport/go/doc/doc_test.go
deleted file mode 100644
index 8448fb3..0000000
--- a/internal/backport/go/doc/doc_test.go
+++ /dev/null
@@ -1,297 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package doc
-
-import (
-	"bytes"
-	"flag"
-	"fmt"
-	"io/fs"
-	"os"
-	"path/filepath"
-	"regexp"
-	"strings"
-	"testing"
-	"text/template"
-
-	"golang.org/x/website/internal/backport/diff"
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/parser"
-	"golang.org/x/website/internal/backport/go/printer"
-	"golang.org/x/website/internal/backport/go/token"
-)
-
-var update = flag.Bool("update", false, "update golden (.out) files")
-var files = flag.String("files", "", "consider only Go test files matching this regular expression")
-
-const dataDir = "testdata"
-
-var templateTxt = readTemplate("template.txt")
-
-func readTemplate(filename string) *template.Template {
-	t := template.New(filename)
-	t.Funcs(template.FuncMap{
-		"node":     nodeFmt,
-		"synopsis": synopsisFmt,
-		"indent":   indentFmt,
-	})
-	return template.Must(t.ParseFiles(filepath.Join(dataDir, filename)))
-}
-
-func nodeFmt(node interface{}, fset *token.FileSet) string {
-	var buf bytes.Buffer
-	printer.Fprint(&buf, fset, node)
-	return strings.ReplaceAll(strings.TrimSpace(buf.String()), "\n", "\n\t")
-}
-
-func synopsisFmt(s string) string {
-	const n = 64
-	if len(s) > n {
-		// cut off excess text and go back to a word boundary
-		s = s[0:n]
-		if i := strings.LastIndexAny(s, "\t\n "); i >= 0 {
-			s = s[0:i]
-		}
-		s = strings.TrimSpace(s) + " ..."
-	}
-	return "// " + strings.ReplaceAll(s, "\n", " ")
-}
-
-func indentFmt(indent, s string) string {
-	end := ""
-	if strings.HasSuffix(s, "\n") {
-		end = "\n"
-		s = s[:len(s)-1]
-	}
-	return indent + strings.ReplaceAll(s, "\n", "\n"+indent) + end
-}
-
-func isGoFile(fi fs.FileInfo) bool {
-	name := fi.Name()
-	return !fi.IsDir() &&
-		len(name) > 0 && name[0] != '.' && // ignore .files
-		filepath.Ext(name) == ".go"
-}
-
-type bundle struct {
-	*Package
-	FSet *token.FileSet
-}
-
-func test(t *testing.T, mode Mode) {
-	// determine file filter
-	filter := isGoFile
-	if *files != "" {
-		rx, err := regexp.Compile(*files)
-		if err != nil {
-			t.Fatal(err)
-		}
-		filter = func(fi fs.FileInfo) bool {
-			return isGoFile(fi) && rx.MatchString(fi.Name())
-		}
-	}
-
-	// get packages
-	fset := token.NewFileSet()
-	pkgs, err := parser.ParseDir(fset, dataDir, filter, parser.ParseComments)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	// test packages
-	for _, pkg := range pkgs {
-		t.Run(pkg.Name, func(t *testing.T) {
-			importPath := dataDir + "/" + pkg.Name
-			var files []*ast.File
-			for _, f := range pkg.Files {
-				files = append(files, f)
-			}
-			doc, err := NewFromFiles(fset, files, importPath, mode)
-			if err != nil {
-				t.Fatal(err)
-			}
-
-			// golden files always use / in filenames - canonicalize them
-			for i, filename := range doc.Filenames {
-				doc.Filenames[i] = filepath.ToSlash(filename)
-			}
-
-			// print documentation
-			var buf bytes.Buffer
-			if err := templateTxt.Execute(&buf, bundle{doc, fset}); err != nil {
-				t.Fatal(err)
-			}
-			got := buf.Bytes()
-
-			// update golden file if necessary
-			golden := filepath.Join(dataDir, fmt.Sprintf("%s.%d.golden", pkg.Name, mode))
-			if *update {
-				err := os.WriteFile(golden, got, 0644)
-				if err != nil {
-					t.Fatal(err)
-				}
-			}
-
-			// get golden file
-			want, err := os.ReadFile(golden)
-			if err != nil {
-				t.Fatal(err)
-			}
-
-			// compare
-			if !bytes.Equal(got, want) {
-				t.Errorf("package %s\n\t%s", pkg.Name, diff.Diff("got", got, "want", want))
-			}
-		})
-	}
-}
-
-func Test(t *testing.T) {
-	t.Run("default", func(t *testing.T) { test(t, 0) })
-	t.Run("AllDecls", func(t *testing.T) { test(t, AllDecls) })
-	t.Run("AllMethods", func(t *testing.T) { test(t, AllMethods) })
-}
-
-/* generics
-
-func TestFuncs(t *testing.T) {
-	fset := token.NewFileSet()
-	file, err := parser.ParseFile(fset, "funcs.go", strings.NewReader(funcsTestFile), parser.ParseComments)
-	if err != nil {
-		t.Fatal(err)
-	}
-	doc, err := NewFromFiles(fset, []*ast.File{file}, "importPath", Mode(0))
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	for _, f := range doc.Funcs {
-		f.Decl = nil
-	}
-	for _, ty := range doc.Types {
-		for _, f := range ty.Funcs {
-			f.Decl = nil
-		}
-		for _, m := range ty.Methods {
-			m.Decl = nil
-		}
-	}
-
-	compareFuncs := func(t *testing.T, msg string, got, want *Func) {
-		// ignore Decl and Examples
-		got.Decl = nil
-		got.Examples = nil
-		if !(got.Doc == want.Doc &&
-			got.Name == want.Name &&
-			got.Recv == want.Recv &&
-			got.Orig == want.Orig &&
-			got.Level == want.Level) {
-			t.Errorf("%s:\ngot  %+v\nwant %+v", msg, got, want)
-		}
-	}
-
-	compareSlices(t, "Funcs", doc.Funcs, funcsPackage.Funcs, compareFuncs)
-	compareSlices(t, "Types", doc.Types, funcsPackage.Types, func(t *testing.T, msg string, got, want *Type) {
-		if got.Name != want.Name {
-			t.Errorf("%s.Name: got %q, want %q", msg, got.Name, want.Name)
-		} else {
-			compareSlices(t, got.Name+".Funcs", got.Funcs, want.Funcs, compareFuncs)
-			compareSlices(t, got.Name+".Methods", got.Methods, want.Methods, compareFuncs)
-		}
-	})
-}
-
-func compareSlices[E interface{}](t *testing.T, name string, got, want []E, compareElem func(*testing.T, string, E, E)) {
-	if len(got) != len(want) {
-		t.Errorf("%s: got %d, want %d", name, len(got), len(want))
-	}
-	for i := 0; i < len(got) && i < len(want); i++ {
-		compareElem(t, fmt.Sprintf("%s[%d]", name, i), got[i], want[i])
-	}
-}
-*/
-
-const funcsTestFile = `
-package funcs
-
-func F() {}
-
-type S1 struct {
-	S2  // embedded, exported
-	s3  // embedded, unexported
-}
-
-func NewS1()  S1 {return S1{} }
-func NewS1p() *S1 { return &S1{} }
-
-func (S1) M1() {}
-func (r S1) M2() {}
-func(S1) m3() {}		// unexported not shown
-func (*S1) P1() {}		// pointer receiver
-
-type S2 int
-func (S2) M3() {}		// shown on S2
-
-type s3 int
-func (s3) M4() {}		// shown on S1
-
-type G1[T interface{}] struct {
-	*s3
-}
-
-func NewG1[T interface{}]() G1[T] { return G1[T]{} }
-
-func (G1[T]) MG1() {}
-func (*G1[U]) MG2() {}
-
-type G2[T, U interface{}] struct {}
-
-func NewG2[T, U interface{}]() G2[T, U] { return G2[T, U]{} }
-
-func (G2[T, U]) MG3() {}
-func (*G2[A, B]) MG4() {}
-
-
-`
-
-var funcsPackage = &Package{
-	Funcs: []*Func{{Name: "F"}},
-	Types: []*Type{
-		{
-			Name:  "G1",
-			Funcs: []*Func{{Name: "NewG1"}},
-			Methods: []*Func{
-				{Name: "M4", Recv: "G1", // TODO: synthesize a param for G1?
-					Orig: "s3", Level: 1},
-				{Name: "MG1", Recv: "G1[T]", Orig: "G1[T]", Level: 0},
-				{Name: "MG2", Recv: "*G1[U]", Orig: "*G1[U]", Level: 0},
-			},
-		},
-		{
-			Name:  "G2",
-			Funcs: []*Func{{Name: "NewG2"}},
-			Methods: []*Func{
-				{Name: "MG3", Recv: "G2[T, U]", Orig: "G2[T, U]", Level: 0},
-				{Name: "MG4", Recv: "*G2[A, B]", Orig: "*G2[A, B]", Level: 0},
-			},
-		},
-		{
-			Name:  "S1",
-			Funcs: []*Func{{Name: "NewS1"}, {Name: "NewS1p"}},
-			Methods: []*Func{
-				{Name: "M1", Recv: "S1", Orig: "S1", Level: 0},
-				{Name: "M2", Recv: "S1", Orig: "S1", Level: 0},
-				{Name: "M4", Recv: "S1", Orig: "s3", Level: 1},
-				{Name: "P1", Recv: "*S1", Orig: "*S1", Level: 0},
-			},
-		},
-		{
-			Name: "S2",
-			Methods: []*Func{
-				{Name: "M3", Recv: "S2", Orig: "S2", Level: 0},
-			},
-		},
-	},
-}
diff --git a/internal/backport/go/doc/example.go b/internal/backport/go/doc/example.go
deleted file mode 100644
index 959c4f2..0000000
--- a/internal/backport/go/doc/example.go
+++ /dev/null
@@ -1,723 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Extract example functions from file ASTs.
-
-package doc
-
-import (
-	"path"
-	"regexp"
-	"sort"
-	"strconv"
-	"strings"
-	"unicode"
-	"unicode/utf8"
-
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/token"
-)
-
-// An Example represents an example function found in a test source file.
-type Example struct {
-	Name        string // name of the item being exemplified (including optional suffix)
-	Suffix      string // example suffix, without leading '_' (only populated by NewFromFiles)
-	Doc         string // example function doc string
-	Code        ast.Node
-	Play        *ast.File // a whole program version of the example
-	Comments    []*ast.CommentGroup
-	Output      string // expected output
-	Unordered   bool
-	EmptyOutput bool // expect empty output
-	Order       int  // original source code order
-}
-
-// Examples returns the examples found in testFiles, sorted by Name field.
-// The Order fields record the order in which the examples were encountered.
-// The Suffix field is not populated when Examples is called directly, it is
-// only populated by NewFromFiles for examples it finds in _test.go files.
-//
-// Playable Examples must be in a package whose name ends in "_test".
-// An Example is "playable" (the Play field is non-nil) in either of these
-// circumstances:
-//   - The example function is self-contained: the function references only
-//     identifiers from other packages (or predeclared identifiers, such as
-//     "int") and the test file does not include a dot import.
-//   - The entire test file is the example: the file contains exactly one
-//     example function, zero test, fuzz test, or benchmark function, and at
-//     least one top-level function, type, variable, or constant declaration
-//     other than the example function.
-func Examples(testFiles ...*ast.File) []*Example {
-	var list []*Example
-	for _, file := range testFiles {
-		hasTests := false // file contains tests, fuzz test, or benchmarks
-		numDecl := 0      // number of non-import declarations in the file
-		var flist []*Example
-		for _, decl := range file.Decls {
-			if g, ok := decl.(*ast.GenDecl); ok && g.Tok != token.IMPORT {
-				numDecl++
-				continue
-			}
-			f, ok := decl.(*ast.FuncDecl)
-			if !ok || f.Recv != nil {
-				continue
-			}
-			numDecl++
-			name := f.Name.Name
-			if isTest(name, "Test") || isTest(name, "Benchmark") || isTest(name, "Fuzz") {
-				hasTests = true
-				continue
-			}
-			if !isTest(name, "Example") {
-				continue
-			}
-			if params := f.Type.Params; len(params.List) != 0 {
-				continue // function has params; not a valid example
-			}
-			if f.Body == nil { // ast.File.Body nil dereference (see issue 28044)
-				continue
-			}
-			var doc string
-			if f.Doc != nil {
-				doc = f.Doc.Text()
-			}
-			output, unordered, hasOutput := exampleOutput(f.Body, file.Comments)
-			flist = append(flist, &Example{
-				Name:        name[len("Example"):],
-				Doc:         doc,
-				Code:        f.Body,
-				Play:        playExample(file, f),
-				Comments:    file.Comments,
-				Output:      output,
-				Unordered:   unordered,
-				EmptyOutput: output == "" && hasOutput,
-				Order:       len(flist),
-			})
-		}
-		if !hasTests && numDecl > 1 && len(flist) == 1 {
-			// If this file only has one example function, some
-			// other top-level declarations, and no tests or
-			// benchmarks, use the whole file as the example.
-			flist[0].Code = file
-			flist[0].Play = playExampleFile(file)
-		}
-		list = append(list, flist...)
-	}
-	// sort by name
-	sort.Slice(list, func(i, j int) bool {
-		return list[i].Name < list[j].Name
-	})
-	return list
-}
-
-var outputPrefix = regexp.MustCompile(`(?i)^[[:space:]]*(unordered )?output:`)
-
-// Extracts the expected output and whether there was a valid output comment
-func exampleOutput(b *ast.BlockStmt, comments []*ast.CommentGroup) (output string, unordered, ok bool) {
-	if _, last := lastComment(b, comments); last != nil {
-		// test that it begins with the correct prefix
-		text := last.Text()
-		if loc := outputPrefix.FindStringSubmatchIndex(text); loc != nil {
-			if loc[2] != -1 {
-				unordered = true
-			}
-			text = text[loc[1]:]
-			// Strip zero or more spaces followed by \n or a single space.
-			text = strings.TrimLeft(text, " ")
-			if len(text) > 0 && text[0] == '\n' {
-				text = text[1:]
-			}
-			return text, unordered, true
-		}
-	}
-	return "", false, false // no suitable comment found
-}
-
-// isTest tells whether name looks like a test, example, fuzz test, or
-// benchmark. It is a Test (say) if there is a character after Test that is not
-// a lower-case letter. (We don't want Testiness.)
-func isTest(name, prefix string) bool {
-	if !strings.HasPrefix(name, prefix) {
-		return false
-	}
-	if len(name) == len(prefix) { // "Test" is ok
-		return true
-	}
-	rune, _ := utf8.DecodeRuneInString(name[len(prefix):])
-	return !unicode.IsLower(rune)
-}
-
-// playExample synthesizes a new *ast.File based on the provided
-// file with the provided function body as the body of main.
-func playExample(file *ast.File, f *ast.FuncDecl) *ast.File {
-	body := f.Body
-
-	if !strings.HasSuffix(file.Name.Name, "_test") {
-		// We don't support examples that are part of the
-		// greater package (yet).
-		return nil
-	}
-
-	// Collect top-level declarations in the file.
-	topDecls := make(map[*ast.Object]ast.Decl)
-	typMethods := make(map[string][]ast.Decl)
-
-	for _, decl := range file.Decls {
-		switch d := decl.(type) {
-		case *ast.FuncDecl:
-			if d.Recv == nil {
-				topDecls[d.Name.Obj] = d
-			} else {
-				if len(d.Recv.List) == 1 {
-					t := d.Recv.List[0].Type
-					tname, _ := baseTypeName(t)
-					typMethods[tname] = append(typMethods[tname], d)
-				}
-			}
-		case *ast.GenDecl:
-			for _, spec := range d.Specs {
-				switch s := spec.(type) {
-				case *ast.TypeSpec:
-					topDecls[s.Name.Obj] = d
-				case *ast.ValueSpec:
-					for _, name := range s.Names {
-						topDecls[name.Obj] = d
-					}
-				}
-			}
-		}
-	}
-
-	// Find unresolved identifiers and uses of top-level declarations.
-	depDecls, unresolved := findDeclsAndUnresolved(body, topDecls, typMethods)
-
-	// Remove predeclared identifiers from unresolved list.
-	for n := range unresolved {
-		if predeclaredTypes[n] || predeclaredConstants[n] || predeclaredFuncs[n] {
-			delete(unresolved, n)
-		}
-	}
-
-	// Use unresolved identifiers to determine the imports used by this
-	// example. The heuristic assumes package names match base import
-	// paths for imports w/o renames (should be good enough most of the time).
-	var namedImports []ast.Spec
-	var blankImports []ast.Spec // _ imports
-
-	// To preserve the blank lines between groups of imports, find the
-	// start position of each group, and assign that position to all
-	// imports from that group.
-	groupStarts := findImportGroupStarts(file.Imports)
-	groupStart := func(s *ast.ImportSpec) token.Pos {
-		for i, start := range groupStarts {
-			if s.Path.ValuePos < start {
-				return groupStarts[i-1]
-			}
-		}
-		return groupStarts[len(groupStarts)-1]
-	}
-
-	for _, s := range file.Imports {
-		p, err := strconv.Unquote(s.Path.Value)
-		if err != nil {
-			continue
-		}
-		if p == "syscall/js" {
-			// We don't support examples that import syscall/js,
-			// because the package syscall/js is not available in the playground.
-			return nil
-		}
-		n := path.Base(p)
-		if s.Name != nil {
-			n = s.Name.Name
-			switch n {
-			case "_":
-				blankImports = append(blankImports, s)
-				continue
-			case ".":
-				// We can't resolve dot imports (yet).
-				return nil
-			}
-		}
-		if unresolved[n] {
-			// Copy the spec and its path to avoid modifying the original.
-			spec := *s
-			path := *s.Path
-			spec.Path = &path
-			spec.Path.ValuePos = groupStart(&spec)
-			namedImports = append(namedImports, &spec)
-			delete(unresolved, n)
-		}
-	}
-
-	// If there are other unresolved identifiers, give up because this
-	// synthesized file is not going to build.
-	if len(unresolved) > 0 {
-		return nil
-	}
-
-	// Include documentation belonging to blank imports.
-	var comments []*ast.CommentGroup
-	for _, s := range blankImports {
-		if c := s.(*ast.ImportSpec).Doc; c != nil {
-			comments = append(comments, c)
-		}
-	}
-
-	// Include comments that are inside the function body.
-	for _, c := range file.Comments {
-		if body.Pos() <= c.Pos() && c.End() <= body.End() {
-			comments = append(comments, c)
-		}
-	}
-
-	// Strip the "Output:" or "Unordered output:" comment and adjust body
-	// end position.
-	body, comments = stripOutputComment(body, comments)
-
-	// Include documentation belonging to dependent declarations.
-	for _, d := range depDecls {
-		switch d := d.(type) {
-		case *ast.GenDecl:
-			if d.Doc != nil {
-				comments = append(comments, d.Doc)
-			}
-		case *ast.FuncDecl:
-			if d.Doc != nil {
-				comments = append(comments, d.Doc)
-			}
-		}
-	}
-
-	// Synthesize import declaration.
-	importDecl := &ast.GenDecl{
-		Tok:    token.IMPORT,
-		Lparen: 1, // Need non-zero Lparen and Rparen so that printer
-		Rparen: 1, // treats this as a factored import.
-	}
-	importDecl.Specs = append(namedImports, blankImports...)
-
-	// Synthesize main function.
-	funcDecl := &ast.FuncDecl{
-		Name: ast.NewIdent("main"),
-		Type: f.Type,
-		Body: body,
-	}
-
-	decls := make([]ast.Decl, 0, 2+len(depDecls))
-	decls = append(decls, importDecl)
-	decls = append(decls, depDecls...)
-	decls = append(decls, funcDecl)
-
-	sort.Slice(decls, func(i, j int) bool {
-		return decls[i].Pos() < decls[j].Pos()
-	})
-	sort.Slice(comments, func(i, j int) bool {
-		return comments[i].Pos() < comments[j].Pos()
-	})
-
-	// Synthesize file.
-	return &ast.File{
-		Name:     ast.NewIdent("main"),
-		Decls:    decls,
-		Comments: comments,
-	}
-}
-
-// findDeclsAndUnresolved returns all the top-level declarations mentioned in
-// the body, and a set of unresolved symbols (those that appear in the body but
-// have no declaration in the program).
-//
-// topDecls maps objects to the top-level declaration declaring them (not
-// necessarily obj.Decl, as obj.Decl will be a Spec for GenDecls, but
-// topDecls[obj] will be the GenDecl itself).
-func findDeclsAndUnresolved(body ast.Node, topDecls map[*ast.Object]ast.Decl, typMethods map[string][]ast.Decl) ([]ast.Decl, map[string]bool) {
-	// This function recursively finds every top-level declaration used
-	// transitively by the body, populating usedDecls and usedObjs. Then it
-	// trims down the declarations to include only the symbols actually
-	// referenced by the body.
-
-	unresolved := make(map[string]bool)
-	var depDecls []ast.Decl
-	usedDecls := make(map[ast.Decl]bool)   // set of top-level decls reachable from the body
-	usedObjs := make(map[*ast.Object]bool) // set of objects reachable from the body (each declared by a usedDecl)
-
-	var inspectFunc func(ast.Node) bool
-	inspectFunc = func(n ast.Node) bool {
-		switch e := n.(type) {
-		case *ast.Ident:
-			if e.Obj == nil && e.Name != "_" {
-				unresolved[e.Name] = true
-			} else if d := topDecls[e.Obj]; d != nil {
-
-				usedObjs[e.Obj] = true
-				if !usedDecls[d] {
-					usedDecls[d] = true
-					depDecls = append(depDecls, d)
-				}
-			}
-			return true
-		case *ast.SelectorExpr:
-			// For selector expressions, only inspect the left hand side.
-			// (For an expression like fmt.Println, only add "fmt" to the
-			// set of unresolved names, not "Println".)
-			ast.Inspect(e.X, inspectFunc)
-			return false
-		case *ast.KeyValueExpr:
-			// For key value expressions, only inspect the value
-			// as the key should be resolved by the type of the
-			// composite literal.
-			ast.Inspect(e.Value, inspectFunc)
-			return false
-		}
-		return true
-	}
-
-	inspectFieldList := func(fl *ast.FieldList) {
-		if fl != nil {
-			for _, f := range fl.List {
-				ast.Inspect(f.Type, inspectFunc)
-			}
-		}
-	}
-
-	// Find the decls immediately referenced by body.
-	ast.Inspect(body, inspectFunc)
-	// Now loop over them, adding to the list when we find a new decl that the
-	// body depends on. Keep going until we don't find anything new.
-	for i := 0; i < len(depDecls); i++ {
-		switch d := depDecls[i].(type) {
-		case *ast.FuncDecl:
-			// Inpect type parameters.
-			inspectFieldList(d.Type.TypeParams)
-			// Inspect types of parameters and results. See #28492.
-			inspectFieldList(d.Type.Params)
-			inspectFieldList(d.Type.Results)
-
-			// Functions might not have a body. See #42706.
-			if d.Body != nil {
-				ast.Inspect(d.Body, inspectFunc)
-			}
-		case *ast.GenDecl:
-			for _, spec := range d.Specs {
-				switch s := spec.(type) {
-				case *ast.TypeSpec:
-					inspectFieldList(s.TypeParams)
-					ast.Inspect(s.Type, inspectFunc)
-					depDecls = append(depDecls, typMethods[s.Name.Name]...)
-				case *ast.ValueSpec:
-					if s.Type != nil {
-						ast.Inspect(s.Type, inspectFunc)
-					}
-					for _, val := range s.Values {
-						ast.Inspect(val, inspectFunc)
-					}
-				}
-			}
-		}
-	}
-
-	// Some decls include multiple specs, such as a variable declaration with
-	// multiple variables on the same line, or a parenthesized declaration. Trim
-	// the declarations to include only the specs that are actually mentioned.
-	// However, if there is a constant group with iota, leave it all: later
-	// constant declarations in the group may have no value and so cannot stand
-	// on their own, and removing any constant from the group could change the
-	// values of subsequent ones.
-	// See testdata/examples/iota.go for a minimal example.
-	var ds []ast.Decl
-	for _, d := range depDecls {
-		switch d := d.(type) {
-		case *ast.FuncDecl:
-			ds = append(ds, d)
-		case *ast.GenDecl:
-			containsIota := false // does any spec have iota?
-			// Collect all Specs that were mentioned in the example.
-			var specs []ast.Spec
-			for _, s := range d.Specs {
-				switch s := s.(type) {
-				case *ast.TypeSpec:
-					if usedObjs[s.Name.Obj] {
-						specs = append(specs, s)
-					}
-				case *ast.ValueSpec:
-					if !containsIota {
-						containsIota = hasIota(s)
-					}
-					// A ValueSpec may have multiple names (e.g. "var a, b int").
-					// Keep only the names that were mentioned in the example.
-					// Exception: the multiple names have a single initializer (which
-					// would be a function call with multiple return values). In that
-					// case, keep everything.
-					if len(s.Names) > 1 && len(s.Values) == 1 {
-						specs = append(specs, s)
-						continue
-					}
-					ns := *s
-					ns.Names = nil
-					ns.Values = nil
-					for i, n := range s.Names {
-						if usedObjs[n.Obj] {
-							ns.Names = append(ns.Names, n)
-							if s.Values != nil {
-								ns.Values = append(ns.Values, s.Values[i])
-							}
-						}
-					}
-					if len(ns.Names) > 0 {
-						specs = append(specs, &ns)
-					}
-				}
-			}
-			if len(specs) > 0 {
-				// Constant with iota? Keep it all.
-				if d.Tok == token.CONST && containsIota {
-					ds = append(ds, d)
-				} else {
-					// Synthesize a GenDecl with just the Specs we need.
-					nd := *d // copy the GenDecl
-					nd.Specs = specs
-					if len(specs) == 1 {
-						// Remove grouping parens if there is only one spec.
-						nd.Lparen = 0
-					}
-					ds = append(ds, &nd)
-				}
-			}
-		}
-	}
-	return ds, unresolved
-}
-
-func hasIota(s ast.Spec) bool {
-	has := false
-	ast.Inspect(s, func(n ast.Node) bool {
-		// Check that this is the special built-in "iota" identifier, not
-		// a user-defined shadow.
-		if id, ok := n.(*ast.Ident); ok && id.Name == "iota" && id.Obj == nil {
-			has = true
-			return false
-		}
-		return true
-	})
-	return has
-}
-
-// findImportGroupStarts finds the start positions of each sequence of import
-// specs that are not separated by a blank line.
-func findImportGroupStarts(imps []*ast.ImportSpec) []token.Pos {
-	startImps := findImportGroupStarts1(imps)
-	groupStarts := make([]token.Pos, len(startImps))
-	for i, imp := range startImps {
-		groupStarts[i] = imp.Pos()
-	}
-	return groupStarts
-}
-
-// Helper for findImportGroupStarts to ease testing.
-func findImportGroupStarts1(origImps []*ast.ImportSpec) []*ast.ImportSpec {
-	// Copy to avoid mutation.
-	imps := make([]*ast.ImportSpec, len(origImps))
-	copy(imps, origImps)
-	// Assume the imports are sorted by position.
-	sort.Slice(imps, func(i, j int) bool { return imps[i].Pos() < imps[j].Pos() })
-	// Assume gofmt has been applied, so there is a blank line between adjacent imps
-	// if and only if they are more than 2 positions apart (newline, tab).
-	var groupStarts []*ast.ImportSpec
-	prevEnd := token.Pos(-2)
-	for _, imp := range imps {
-		if imp.Pos()-prevEnd > 2 {
-			groupStarts = append(groupStarts, imp)
-		}
-		prevEnd = imp.End()
-		// Account for end-of-line comments.
-		if imp.Comment != nil {
-			prevEnd = imp.Comment.End()
-		}
-	}
-	return groupStarts
-}
-
-// playExampleFile takes a whole file example and synthesizes a new *ast.File
-// such that the example is function main in package main.
-func playExampleFile(file *ast.File) *ast.File {
-	// Strip copyright comment if present.
-	comments := file.Comments
-	if len(comments) > 0 && strings.HasPrefix(comments[0].Text(), "Copyright") {
-		comments = comments[1:]
-	}
-
-	// Copy declaration slice, rewriting the ExampleX function to main.
-	var decls []ast.Decl
-	for _, d := range file.Decls {
-		if f, ok := d.(*ast.FuncDecl); ok && isTest(f.Name.Name, "Example") {
-			// Copy the FuncDecl, as it may be used elsewhere.
-			newF := *f
-			newF.Name = ast.NewIdent("main")
-			newF.Body, comments = stripOutputComment(f.Body, comments)
-			d = &newF
-		}
-		decls = append(decls, d)
-	}
-
-	// Copy the File, as it may be used elsewhere.
-	f := *file
-	f.Name = ast.NewIdent("main")
-	f.Decls = decls
-	f.Comments = comments
-	return &f
-}
-
-// stripOutputComment finds and removes the "Output:" or "Unordered output:"
-// comment from body and comments, and adjusts the body block's end position.
-func stripOutputComment(body *ast.BlockStmt, comments []*ast.CommentGroup) (*ast.BlockStmt, []*ast.CommentGroup) {
-	// Do nothing if there is no "Output:" or "Unordered output:" comment.
-	i, last := lastComment(body, comments)
-	if last == nil || !outputPrefix.MatchString(last.Text()) {
-		return body, comments
-	}
-
-	// Copy body and comments, as the originals may be used elsewhere.
-	newBody := &ast.BlockStmt{
-		Lbrace: body.Lbrace,
-		List:   body.List,
-		Rbrace: last.Pos(),
-	}
-	newComments := make([]*ast.CommentGroup, len(comments)-1)
-	copy(newComments, comments[:i])
-	copy(newComments[i:], comments[i+1:])
-	return newBody, newComments
-}
-
-// lastComment returns the last comment inside the provided block.
-func lastComment(b *ast.BlockStmt, c []*ast.CommentGroup) (i int, last *ast.CommentGroup) {
-	if b == nil {
-		return
-	}
-	pos, end := b.Pos(), b.End()
-	for j, cg := range c {
-		if cg.Pos() < pos {
-			continue
-		}
-		if cg.End() > end {
-			break
-		}
-		i, last = j, cg
-	}
-	return
-}
-
-// classifyExamples classifies examples and assigns them to the Examples field
-// of the relevant Func, Type, or Package that the example is associated with.
-//
-// The classification process is ambiguous in some cases:
-//
-//   - ExampleFoo_Bar matches a type named Foo_Bar
-//     or a method named Foo.Bar.
-//   - ExampleFoo_bar matches a type named Foo_bar
-//     or Foo (with a "bar" suffix).
-//
-// Examples with malformed names are not associated with anything.
-func classifyExamples(p *Package, examples []*Example) {
-	if len(examples) == 0 {
-		return
-	}
-	// Mapping of names for funcs, types, and methods to the example listing.
-	ids := make(map[string]*[]*Example)
-	ids[""] = &p.Examples // package-level examples have an empty name
-	for _, f := range p.Funcs {
-		if !token.IsExported(f.Name) {
-			continue
-		}
-		ids[f.Name] = &f.Examples
-	}
-	for _, t := range p.Types {
-		if !token.IsExported(t.Name) {
-			continue
-		}
-		ids[t.Name] = &t.Examples
-		for _, f := range t.Funcs {
-			if !token.IsExported(f.Name) {
-				continue
-			}
-			ids[f.Name] = &f.Examples
-		}
-		for _, m := range t.Methods {
-			if !token.IsExported(m.Name) {
-				continue
-			}
-			ids[strings.TrimPrefix(nameWithoutInst(m.Recv), "*")+"_"+m.Name] = &m.Examples
-		}
-	}
-
-	// Group each example with the associated func, type, or method.
-	for _, ex := range examples {
-		// Consider all possible split points for the suffix
-		// by starting at the end of string (no suffix case),
-		// then trying all positions that contain a '_' character.
-		//
-		// An association is made on the first successful match.
-		// Examples with malformed names that match nothing are skipped.
-		for i := len(ex.Name); i >= 0; i = strings.LastIndexByte(ex.Name[:i], '_') {
-			prefix, suffix, ok := splitExampleName(ex.Name, i)
-			if !ok {
-				continue
-			}
-			exs, ok := ids[prefix]
-			if !ok {
-				continue
-			}
-			ex.Suffix = suffix
-			*exs = append(*exs, ex)
-			break
-		}
-	}
-
-	// Sort list of example according to the user-specified suffix name.
-	for _, exs := range ids {
-		sort.Slice((*exs), func(i, j int) bool {
-			return (*exs)[i].Suffix < (*exs)[j].Suffix
-		})
-	}
-}
-
-// nameWithoutInst returns name if name has no brackets. If name contains
-// brackets, then it returns name with all the contents between (and including)
-// the outermost left and right bracket removed.
-//
-// Adapted from debug/gosym/symtab.go:Sym.nameWithoutInst.
-func nameWithoutInst(name string) string {
-	start := strings.Index(name, "[")
-	if start < 0 {
-		return name
-	}
-	end := strings.LastIndex(name, "]")
-	if end < 0 {
-		// Malformed name, should contain closing bracket too.
-		return name
-	}
-	return name[0:start] + name[end+1:]
-}
-
-// splitExampleName attempts to split example name s at index i,
-// and reports if that produces a valid split. The suffix may be
-// absent. Otherwise, it must start with a lower-case letter and
-// be preceded by '_'.
-//
-// One of i == len(s) or s[i] == '_' must be true.
-func splitExampleName(s string, i int) (prefix, suffix string, ok bool) {
-	if i == len(s) {
-		return s, "", true
-	}
-	if i == len(s)-1 {
-		return "", "", false
-	}
-	prefix, suffix = s[:i], s[i+1:]
-	return prefix, suffix, isExampleSuffix(suffix)
-}
-
-func isExampleSuffix(s string) bool {
-	r, size := utf8.DecodeRuneInString(s)
-	return size > 0 && unicode.IsLower(r)
-}
diff --git a/internal/backport/go/doc/example_internal_test.go b/internal/backport/go/doc/example_internal_test.go
deleted file mode 100644
index 82ce982..0000000
--- a/internal/backport/go/doc/example_internal_test.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package doc
-
-import (
-	"golang.org/x/website/internal/backport/go/parser"
-	"golang.org/x/website/internal/backport/go/token"
-	"reflect"
-	"strconv"
-	"strings"
-	"testing"
-)
-
-func TestImportGroupStarts(t *testing.T) {
-	for _, test := range []struct {
-		name string
-		in   string
-		want []string // paths of group-starting imports
-	}{
-		{
-			name: "one group",
-			in: `package p
-import (
-	"a"
-	"b"
-	"c"
-	"d"
-)
-`,
-			want: []string{"a"},
-		},
-		{
-			name: "several groups",
-			in: `package p
-import (
-	"a"
-
-	"b"
-	"c"
-
-	"d"
-)
-`,
-			want: []string{"a", "b", "d"},
-		},
-		{
-			name: "extra space",
-			in: `package p
-import (
-	"a"
-
-
-	"b"
-	"c"
-
-
-	"d"
-)
-`,
-			want: []string{"a", "b", "d"},
-		},
-		{
-			name: "line comment",
-			in: `package p
-import (
-	"a" // comment
-	"b" // comment
-
-	"c"
-)`,
-			want: []string{"a", "c"},
-		},
-		{
-			name: "named import",
-			in: `package p
-import (
-	"a"
-	n "b"
-
-	m "c"
-	"d"
-)`,
-			want: []string{"a", "c"},
-		},
-		{
-			name: "blank import",
-			in: `package p
-import (
-	"a"
-
-	_ "b"
-
-	_ "c"
-	"d"
-)`,
-			want: []string{"a", "b", "c"},
-		},
-	} {
-		t.Run(test.name, func(t *testing.T) {
-			fset := token.NewFileSet()
-			file, err := parser.ParseFile(fset, "test.go", strings.NewReader(test.in), parser.ParseComments)
-			if err != nil {
-				t.Fatal(err)
-			}
-			imps := findImportGroupStarts1(file.Imports)
-			got := make([]string, len(imps))
-			for i, imp := range imps {
-				got[i], err = strconv.Unquote(imp.Path.Value)
-				if err != nil {
-					t.Fatal(err)
-				}
-			}
-			if !reflect.DeepEqual(got, test.want) {
-				t.Errorf("got %v, want %v", got, test.want)
-			}
-		})
-	}
-
-}
diff --git a/internal/backport/go/doc/example_test.go b/internal/backport/go/doc/example_test.go
deleted file mode 100644
index ecb0bf8..0000000
--- a/internal/backport/go/doc/example_test.go
+++ /dev/null
@@ -1,344 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package doc_test
-
-import (
-	"bytes"
-	"fmt"
-	"path/filepath"
-	"reflect"
-	"strings"
-	"testing"
-
-	"golang.org/x/tools/txtar"
-	"golang.org/x/website/internal/backport/diff"
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/doc"
-	"golang.org/x/website/internal/backport/go/format"
-	"golang.org/x/website/internal/backport/go/parser"
-	"golang.org/x/website/internal/backport/go/token"
-)
-
-func TestExamples(t *testing.T) {
-	dir := filepath.Join("testdata", "examples")
-	filenames, err := filepath.Glob(filepath.Join(dir, "*.go"))
-	if err != nil {
-		t.Fatal(err)
-	}
-	for _, filename := range filenames {
-		t.Run(strings.TrimSuffix(filepath.Base(filename), ".go"), func(t *testing.T) {
-			fset := token.NewFileSet()
-			astFile, err := parser.ParseFile(fset, filename, nil, parser.ParseComments)
-			if err != nil {
-				t.Fatal(err)
-			}
-			goldenFilename := strings.TrimSuffix(filename, ".go") + ".golden"
-			archive, err := txtar.ParseFile(goldenFilename)
-			if err != nil {
-				t.Fatal(err)
-			}
-			golden := map[string]string{}
-			for _, f := range archive.Files {
-				golden[f.Name] = strings.TrimSpace(string(f.Data))
-			}
-
-			// Collect the results of doc.Examples in a map keyed by example name.
-			examples := map[string]*doc.Example{}
-			for _, e := range doc.Examples(astFile) {
-				examples[e.Name] = e
-				// Treat missing sections in the golden as empty.
-				for _, kind := range []string{"Play", "Output"} {
-					key := e.Name + "." + kind
-					if _, ok := golden[key]; !ok {
-						golden[key] = ""
-					}
-				}
-			}
-
-			// Each section in the golden file corresponds to an example we expect
-			// to see.
-			for sectionName, want := range golden {
-				name, kind, found := stringsCut(sectionName, ".")
-				if !found {
-					t.Fatalf("bad section name %q, want EXAMPLE_NAME.KIND", sectionName)
-				}
-				ex := examples[name]
-				if ex == nil {
-					t.Fatalf("no example named %q", name)
-				}
-
-				var got string
-				switch kind {
-				case "Play":
-					got = strings.TrimSpace(formatFile(t, fset, ex.Play))
-
-				case "Output":
-					got = strings.TrimSpace(ex.Output)
-				default:
-					t.Fatalf("bad section kind %q", kind)
-				}
-
-				if got != want {
-					t.Errorf("%s mismatch:\n%s", sectionName,
-						diff.Diff("want", []byte(want), "got", []byte(got)))
-				}
-			}
-		})
-	}
-}
-
-func formatFile(t *testing.T, fset *token.FileSet, n *ast.File) string {
-	t.Helper()
-	if n == nil {
-		return "<nil>"
-	}
-	var buf bytes.Buffer
-	if err := format.Node(&buf, fset, n); err != nil {
-		t.Fatal(err)
-	}
-	return buf.String()
-}
-
-// This example illustrates how to use NewFromFiles
-// to compute package documentation with examples.
-func ExampleNewFromFiles() {
-	// src and test are two source files that make up
-	// a package whose documentation will be computed.
-	const src = `
-// This is the package comment.
-package p
-
-import "fmt"
-
-// This comment is associated with the Greet function.
-func Greet(who string) {
-	fmt.Printf("Hello, %s!\n", who)
-}
-`
-	const test = `
-package p_test
-
-// This comment is associated with the ExampleGreet_world example.
-func ExampleGreet_world() {
-	Greet("world")
-}
-`
-
-	// Create the AST by parsing src and test.
-	fset := token.NewFileSet()
-	files := []*ast.File{
-		mustParse(fset, "src.go", src),
-		mustParse(fset, "src_test.go", test),
-	}
-
-	// Compute package documentation with examples.
-	p, err := doc.NewFromFiles(fset, files, "example.com/p")
-	if err != nil {
-		panic(err)
-	}
-
-	fmt.Printf("package %s - %s", p.Name, p.Doc)
-	fmt.Printf("func %s - %s", p.Funcs[0].Name, p.Funcs[0].Doc)
-	fmt.Printf(" ⤷ example with suffix %q - %s", p.Funcs[0].Examples[0].Suffix, p.Funcs[0].Examples[0].Doc)
-
-	// Output:
-	// package p - This is the package comment.
-	// func Greet - This comment is associated with the Greet function.
-	//  ⤷ example with suffix "world" - This comment is associated with the ExampleGreet_world example.
-}
-
-func TestClassifyExamples(t *testing.T) {
-	const src = `
-package p
-
-const Const1 = 0
-var   Var1   = 0
-
-type (
-	Type1     int
-	Type1_Foo int
-	Type1_foo int
-	type2     int
-
-	Embed struct { Type1 }
-	Uembed struct { type2 }
-)
-
-func Func1()     {}
-func Func1_Foo() {}
-func Func1_foo() {}
-func func2()     {}
-
-func (Type1) Func1() {}
-func (Type1) Func1_Foo() {}
-func (Type1) Func1_foo() {}
-func (Type1) func2() {}
-
-func (type2) Func1() {}
-
-type (
-	Conflict          int
-	Conflict_Conflict int
-	Conflict_conflict int
-)
-
-func (Conflict) Conflict() {}
-
-func GFunc[T interface{}]() {}
-
-type GType[T interface{}] int
-
-func (GType[T]) M() {}
-`
-	const test = `
-package p_test
-
-func ExampleConst1() {} // invalid - no support for consts and vars
-func ExampleVar1()   {} // invalid - no support for consts and vars
-
-func Example()               {}
-func Example_()              {} // invalid - suffix must start with a lower-case letter
-func Example_suffix()        {}
-func Example_suffix_xX_X_x() {}
-func Example_世界()           {} // invalid - suffix must start with a lower-case letter
-func Example_123()           {} // invalid - suffix must start with a lower-case letter
-func Example_BadSuffix()     {} // invalid - suffix must start with a lower-case letter
-
-func ExampleType1()               {}
-func ExampleType1_()              {} // invalid - suffix must start with a lower-case letter
-func ExampleType1_suffix()        {}
-func ExampleType1_BadSuffix()     {} // invalid - suffix must start with a lower-case letter
-func ExampleType1_Foo()           {}
-func ExampleType1_Foo_suffix()    {}
-func ExampleType1_Foo_BadSuffix() {} // invalid - suffix must start with a lower-case letter
-func ExampleType1_foo()           {}
-func ExampleType1_foo_suffix()    {}
-func ExampleType1_foo_Suffix()    {} // matches Type1, instead of Type1_foo
-func Exampletype2()               {} // invalid - cannot match unexported
-
-func ExampleFunc1()               {}
-func ExampleFunc1_()              {} // invalid - suffix must start with a lower-case letter
-func ExampleFunc1_suffix()        {}
-func ExampleFunc1_BadSuffix()     {} // invalid - suffix must start with a lower-case letter
-func ExampleFunc1_Foo()           {}
-func ExampleFunc1_Foo_suffix()    {}
-func ExampleFunc1_Foo_BadSuffix() {} // invalid - suffix must start with a lower-case letter
-func ExampleFunc1_foo()           {}
-func ExampleFunc1_foo_suffix()    {}
-func ExampleFunc1_foo_Suffix()    {} // matches Func1, instead of Func1_foo
-func Examplefunc1()               {} // invalid - cannot match unexported
-
-func ExampleType1_Func1()               {}
-func ExampleType1_Func1_()              {} // invalid - suffix must start with a lower-case letter
-func ExampleType1_Func1_suffix()        {}
-func ExampleType1_Func1_BadSuffix()     {} // invalid - suffix must start with a lower-case letter
-func ExampleType1_Func1_Foo()           {}
-func ExampleType1_Func1_Foo_suffix()    {}
-func ExampleType1_Func1_Foo_BadSuffix() {} // invalid - suffix must start with a lower-case letter
-func ExampleType1_Func1_foo()           {}
-func ExampleType1_Func1_foo_suffix()    {}
-func ExampleType1_Func1_foo_Suffix()    {} // matches Type1.Func1, instead of Type1.Func1_foo
-func ExampleType1_func2()               {} // matches Type1, instead of Type1.func2
-
-func ExampleEmbed_Func1()         {} // invalid - no support for forwarded methods from embedding exported type
-func ExampleUembed_Func1()        {} // methods from embedding unexported types are OK
-func ExampleUembed_Func1_suffix() {}
-
-func ExampleConflict_Conflict()        {} // ambiguous with either Conflict or Conflict_Conflict type
-func ExampleConflict_conflict()        {} // ambiguous with either Conflict or Conflict_conflict type
-func ExampleConflict_Conflict_suffix() {} // ambiguous with either Conflict or Conflict_Conflict type
-func ExampleConflict_conflict_suffix() {} // ambiguous with either Conflict or Conflict_conflict type
-
-func ExampleGFunc() {}
-func ExampleGFunc_suffix() {}
-
-func ExampleGType_M() {}
-func ExampleGType_M_suffix() {}
-`
-
-	// Parse literal source code as a *doc.Package.
-	fset := token.NewFileSet()
-	files := []*ast.File{
-		mustParse(fset, "src.go", src),
-		mustParse(fset, "src_test.go", test),
-	}
-	p, err := doc.NewFromFiles(fset, files, "example.com/p")
-	if err != nil {
-		t.Fatalf("doc.NewFromFiles: %v", err)
-	}
-
-	// Collect the association of examples to top-level identifiers.
-	got := map[string][]string{}
-	got[""] = exampleNames(p.Examples)
-	for _, f := range p.Funcs {
-		got[f.Name] = exampleNames(f.Examples)
-	}
-	for _, t := range p.Types {
-		got[t.Name] = exampleNames(t.Examples)
-		for _, f := range t.Funcs {
-			got[f.Name] = exampleNames(f.Examples)
-		}
-		for _, m := range t.Methods {
-			got[t.Name+"."+m.Name] = exampleNames(m.Examples)
-		}
-	}
-
-	want := map[string][]string{
-		"": {"", "suffix", "suffix_xX_X_x"}, // Package-level examples.
-
-		"Type1":     {"", "foo_Suffix", "func2", "suffix"},
-		"Type1_Foo": {"", "suffix"},
-		"Type1_foo": {"", "suffix"},
-
-		"Func1":     {"", "foo_Suffix", "suffix"},
-		"Func1_Foo": {"", "suffix"},
-		"Func1_foo": {"", "suffix"},
-
-		"Type1.Func1":     {"", "foo_Suffix", "suffix"},
-		"Type1.Func1_Foo": {"", "suffix"},
-		"Type1.Func1_foo": {"", "suffix"},
-
-		"Uembed.Func1": {"", "suffix"},
-
-		// These are implementation dependent due to the ambiguous parsing.
-		"Conflict_Conflict": {"", "suffix"},
-		"Conflict_conflict": {"", "suffix"},
-
-		"GFunc":   {"", "suffix"},
-		"GType.M": {"", "suffix"},
-	}
-
-	for id := range got {
-		if !reflect.DeepEqual(got[id], want[id]) {
-			t.Errorf("classification mismatch for %q:\ngot  %q\nwant %q", id, got[id], want[id])
-		}
-		delete(want, id)
-	}
-	if len(want) > 0 {
-		t.Errorf("did not find:\n%q", want)
-	}
-}
-
-func exampleNames(exs []*doc.Example) (out []string) {
-	for _, ex := range exs {
-		out = append(out, ex.Suffix)
-	}
-	return out
-}
-
-func mustParse(fset *token.FileSet, filename, src string) *ast.File {
-	f, err := parser.ParseFile(fset, filename, src, parser.ParseComments)
-	if err != nil {
-		panic(err)
-	}
-	return f
-}
-
-func stringsCut(s, sep string) (before, after string, found bool) {
-	if i := strings.Index(s, sep); i >= 0 {
-		return s[:i], s[i+len(sep):], true
-	}
-	return s, "", false
-}
diff --git a/internal/backport/go/doc/exports.go b/internal/backport/go/doc/exports.go
deleted file mode 100644
index c2b91d5..0000000
--- a/internal/backport/go/doc/exports.go
+++ /dev/null
@@ -1,324 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements export filtering of an AST.
-
-package doc
-
-import (
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/token"
-)
-
-// filterIdentList removes unexported names from list in place
-// and returns the resulting list.
-func filterIdentList(list []*ast.Ident) []*ast.Ident {
-	j := 0
-	for _, x := range list {
-		if token.IsExported(x.Name) {
-			list[j] = x
-			j++
-		}
-	}
-	return list[0:j]
-}
-
-var underscore = ast.NewIdent("_")
-
-func filterCompositeLit(lit *ast.CompositeLit, filter Filter, export bool) {
-	n := len(lit.Elts)
-	lit.Elts = filterExprList(lit.Elts, filter, export)
-	if len(lit.Elts) < n {
-		lit.Incomplete = true
-	}
-}
-
-func filterExprList(list []ast.Expr, filter Filter, export bool) []ast.Expr {
-	j := 0
-	for _, exp := range list {
-		switch x := exp.(type) {
-		case *ast.CompositeLit:
-			filterCompositeLit(x, filter, export)
-		case *ast.KeyValueExpr:
-			if x, ok := x.Key.(*ast.Ident); ok && !filter(x.Name) {
-				continue
-			}
-			if x, ok := x.Value.(*ast.CompositeLit); ok {
-				filterCompositeLit(x, filter, export)
-			}
-		}
-		list[j] = exp
-		j++
-	}
-	return list[0:j]
-}
-
-// updateIdentList replaces all unexported identifiers with underscore
-// and reports whether at least one exported name exists.
-func updateIdentList(list []*ast.Ident) (hasExported bool) {
-	for i, x := range list {
-		if token.IsExported(x.Name) {
-			hasExported = true
-		} else {
-			list[i] = underscore
-		}
-	}
-	return hasExported
-}
-
-// hasExportedName reports whether list contains any exported names.
-func hasExportedName(list []*ast.Ident) bool {
-	for _, x := range list {
-		if x.IsExported() {
-			return true
-		}
-	}
-	return false
-}
-
-// removeAnonymousField removes anonymous fields named name from an interface.
-func removeAnonymousField(name string, ityp *ast.InterfaceType) {
-	list := ityp.Methods.List // we know that ityp.Methods != nil
-	j := 0
-	for _, field := range list {
-		keepField := true
-		if n := len(field.Names); n == 0 {
-			// anonymous field
-			if fname, _ := baseTypeName(field.Type); fname == name {
-				keepField = false
-			}
-		}
-		if keepField {
-			list[j] = field
-			j++
-		}
-	}
-	if j < len(list) {
-		ityp.Incomplete = true
-	}
-	ityp.Methods.List = list[0:j]
-}
-
-// filterFieldList removes unexported fields (field names) from the field list
-// in place and reports whether fields were removed. Anonymous fields are
-// recorded with the parent type. filterType is called with the types of
-// all remaining fields.
-func (r *reader) filterFieldList(parent *namedType, fields *ast.FieldList, ityp *ast.InterfaceType) (removedFields bool) {
-	if fields == nil {
-		return
-	}
-	list := fields.List
-	j := 0
-	for _, field := range list {
-		keepField := false
-		if n := len(field.Names); n == 0 {
-			// anonymous field or embedded type or union element
-			fname := r.recordAnonymousField(parent, field.Type)
-			if fname != "" {
-				if token.IsExported(fname) {
-					keepField = true
-				} else if ityp != nil && predeclaredTypes[fname] {
-					// possibly an embedded predeclared type; keep it for now but
-					// remember this interface so that it can be fixed if name is also
-					// defined locally
-					keepField = true
-					r.remember(fname, ityp)
-				}
-			} else {
-				// If we're operating on an interface, assume that this is an embedded
-				// type or union element.
-				//
-				// TODO(rfindley): consider traversing into approximation/unions
-				// elements to see if they are entirely unexported.
-				keepField = ityp != nil
-			}
-		} else {
-			field.Names = filterIdentList(field.Names)
-			if len(field.Names) < n {
-				removedFields = true
-			}
-			if len(field.Names) > 0 {
-				keepField = true
-			}
-		}
-		if keepField {
-			r.filterType(nil, field.Type)
-			list[j] = field
-			j++
-		}
-	}
-	if j < len(list) {
-		removedFields = true
-	}
-	fields.List = list[0:j]
-	return
-}
-
-// filterParamList applies filterType to each parameter type in fields.
-func (r *reader) filterParamList(fields *ast.FieldList) {
-	if fields != nil {
-		for _, f := range fields.List {
-			r.filterType(nil, f.Type)
-		}
-	}
-}
-
-// filterType strips any unexported struct fields or method types from typ
-// in place. If fields (or methods) have been removed, the corresponding
-// struct or interface type has the Incomplete field set to true.
-func (r *reader) filterType(parent *namedType, typ ast.Expr) {
-	switch t := typ.(type) {
-	case *ast.Ident:
-		// nothing to do
-	case *ast.ParenExpr:
-		r.filterType(nil, t.X)
-	case *ast.StarExpr: // possibly an embedded type literal
-		r.filterType(nil, t.X)
-	case *ast.UnaryExpr:
-		if t.Op == token.TILDE { // approximation element
-			r.filterType(nil, t.X)
-		}
-	case *ast.BinaryExpr:
-		if t.Op == token.OR { // union
-			r.filterType(nil, t.X)
-			r.filterType(nil, t.Y)
-		}
-	case *ast.ArrayType:
-		r.filterType(nil, t.Elt)
-	case *ast.StructType:
-		if r.filterFieldList(parent, t.Fields, nil) {
-			t.Incomplete = true
-		}
-	case *ast.FuncType:
-		r.filterParamList(t.TypeParams)
-		r.filterParamList(t.Params)
-		r.filterParamList(t.Results)
-	case *ast.InterfaceType:
-		if r.filterFieldList(parent, t.Methods, t) {
-			t.Incomplete = true
-		}
-	case *ast.MapType:
-		r.filterType(nil, t.Key)
-		r.filterType(nil, t.Value)
-	case *ast.ChanType:
-		r.filterType(nil, t.Value)
-	}
-}
-
-func (r *reader) filterSpec(spec ast.Spec) bool {
-	switch s := spec.(type) {
-	case *ast.ImportSpec:
-		// always keep imports so we can collect them
-		return true
-	case *ast.ValueSpec:
-		s.Values = filterExprList(s.Values, token.IsExported, true)
-		if len(s.Values) > 0 || s.Type == nil && len(s.Values) == 0 {
-			// If there are values declared on RHS, just replace the unexported
-			// identifiers on the LHS with underscore, so that it matches
-			// the sequence of expression on the RHS.
-			//
-			// Similarly, if there are no type and values, then this expression
-			// must be following an iota expression, where order matters.
-			if updateIdentList(s.Names) {
-				r.filterType(nil, s.Type)
-				return true
-			}
-		} else {
-			s.Names = filterIdentList(s.Names)
-			if len(s.Names) > 0 {
-				r.filterType(nil, s.Type)
-				return true
-			}
-		}
-	case *ast.TypeSpec:
-		// Don't filter type parameters here, by analogy with function parameters
-		// which are not filtered for top-level function declarations.
-		if name := s.Name.Name; token.IsExported(name) {
-			r.filterType(r.lookupType(s.Name.Name), s.Type)
-			return true
-		} else if IsPredeclared(name) {
-			if r.shadowedPredecl == nil {
-				r.shadowedPredecl = make(map[string]bool)
-			}
-			r.shadowedPredecl[name] = true
-		}
-	}
-	return false
-}
-
-// copyConstType returns a copy of typ with position pos.
-// typ must be a valid constant type.
-// In practice, only (possibly qualified) identifiers are possible.
-func copyConstType(typ ast.Expr, pos token.Pos) ast.Expr {
-	switch typ := typ.(type) {
-	case *ast.Ident:
-		return &ast.Ident{Name: typ.Name, NamePos: pos}
-	case *ast.SelectorExpr:
-		if id, ok := typ.X.(*ast.Ident); ok {
-			// presumably a qualified identifier
-			return &ast.SelectorExpr{
-				Sel: ast.NewIdent(typ.Sel.Name),
-				X:   &ast.Ident{Name: id.Name, NamePos: pos},
-			}
-		}
-	}
-	return nil // shouldn't happen, but be conservative and don't panic
-}
-
-func (r *reader) filterSpecList(list []ast.Spec, tok token.Token) []ast.Spec {
-	if tok == token.CONST {
-		// Propagate any type information that would get lost otherwise
-		// when unexported constants are filtered.
-		var prevType ast.Expr
-		for _, spec := range list {
-			spec := spec.(*ast.ValueSpec)
-			if spec.Type == nil && len(spec.Values) == 0 && prevType != nil {
-				// provide current spec with an explicit type
-				spec.Type = copyConstType(prevType, spec.Pos())
-			}
-			if hasExportedName(spec.Names) {
-				// exported names are preserved so there's no need to propagate the type
-				prevType = nil
-			} else {
-				prevType = spec.Type
-			}
-		}
-	}
-
-	j := 0
-	for _, s := range list {
-		if r.filterSpec(s) {
-			list[j] = s
-			j++
-		}
-	}
-	return list[0:j]
-}
-
-func (r *reader) filterDecl(decl ast.Decl) bool {
-	switch d := decl.(type) {
-	case *ast.GenDecl:
-		d.Specs = r.filterSpecList(d.Specs, d.Tok)
-		return len(d.Specs) > 0
-	case *ast.FuncDecl:
-		// ok to filter these methods early because any
-		// conflicting method will be filtered here, too -
-		// thus, removing these methods early will not lead
-		// to the false removal of possible conflicts
-		return token.IsExported(d.Name.Name)
-	}
-	return false
-}
-
-// fileExports removes unexported declarations from src in place.
-func (r *reader) fileExports(src *ast.File) {
-	j := 0
-	for _, d := range src.Decls {
-		if r.filterDecl(d) {
-			src.Decls[j] = d
-			j++
-		}
-	}
-	src.Decls = src.Decls[0:j]
-}
diff --git a/internal/backport/go/doc/filter.go b/internal/backport/go/doc/filter.go
deleted file mode 100644
index ddf0368..0000000
--- a/internal/backport/go/doc/filter.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package doc
-
-import "golang.org/x/website/internal/backport/go/ast"
-
-type Filter func(string) bool
-
-func matchFields(fields *ast.FieldList, f Filter) bool {
-	if fields != nil {
-		for _, field := range fields.List {
-			for _, name := range field.Names {
-				if f(name.Name) {
-					return true
-				}
-			}
-		}
-	}
-	return false
-}
-
-func matchDecl(d *ast.GenDecl, f Filter) bool {
-	for _, d := range d.Specs {
-		switch v := d.(type) {
-		case *ast.ValueSpec:
-			for _, name := range v.Names {
-				if f(name.Name) {
-					return true
-				}
-			}
-		case *ast.TypeSpec:
-			if f(v.Name.Name) {
-				return true
-			}
-			// We don't match ordinary parameters in filterFuncs, so by analogy don't
-			// match type parameters here.
-			switch t := v.Type.(type) {
-			case *ast.StructType:
-				if matchFields(t.Fields, f) {
-					return true
-				}
-			case *ast.InterfaceType:
-				if matchFields(t.Methods, f) {
-					return true
-				}
-			}
-		}
-	}
-	return false
-}
-
-func filterValues(a []*Value, f Filter) []*Value {
-	w := 0
-	for _, vd := range a {
-		if matchDecl(vd.Decl, f) {
-			a[w] = vd
-			w++
-		}
-	}
-	return a[0:w]
-}
-
-func filterFuncs(a []*Func, f Filter) []*Func {
-	w := 0
-	for _, fd := range a {
-		if f(fd.Name) {
-			a[w] = fd
-			w++
-		}
-	}
-	return a[0:w]
-}
-
-func filterTypes(a []*Type, f Filter) []*Type {
-	w := 0
-	for _, td := range a {
-		n := 0 // number of matches
-		if matchDecl(td.Decl, f) {
-			n = 1
-		} else {
-			// type name doesn't match, but we may have matching consts, vars, factories or methods
-			td.Consts = filterValues(td.Consts, f)
-			td.Vars = filterValues(td.Vars, f)
-			td.Funcs = filterFuncs(td.Funcs, f)
-			td.Methods = filterFuncs(td.Methods, f)
-			n += len(td.Consts) + len(td.Vars) + len(td.Funcs) + len(td.Methods)
-		}
-		if n > 0 {
-			a[w] = td
-			w++
-		}
-	}
-	return a[0:w]
-}
-
-// Filter eliminates documentation for names that don't pass through the filter f.
-// TODO(gri): Recognize "Type.Method" as a name.
-func (p *Package) Filter(f Filter) {
-	p.Consts = filterValues(p.Consts, f)
-	p.Vars = filterValues(p.Vars, f)
-	p.Types = filterTypes(p.Types, f)
-	p.Funcs = filterFuncs(p.Funcs, f)
-	p.Doc = "" // don't show top-level package doc
-}
diff --git a/internal/backport/go/doc/headscan b/internal/backport/go/doc/headscan
deleted file mode 100755
index 1c20463..0000000
--- a/internal/backport/go/doc/headscan
+++ /dev/null
Binary files differ
diff --git a/internal/backport/go/doc/headscan.go b/internal/backport/go/doc/headscan.go
deleted file mode 100644
index 7529525..0000000
--- a/internal/backport/go/doc/headscan.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build ignore
-// +build ignore
-
-/*
-The headscan command extracts comment headings from package files;
-it is used to detect false positives which may require an adjustment
-to the comment formatting heuristics in comment.go.
-
-Usage: headscan [-root root_directory]
-
-By default, the $GOROOT/src directory is scanned.
-*/
-package main
-
-import (
-	"bytes"
-	"flag"
-	"fmt"
-	"io/fs"
-	"os"
-	"path/filepath"
-	"regexp"
-	"runtime"
-	"strings"
-
-	"golang.org/x/website/internal/backport/go/doc"
-	"golang.org/x/website/internal/backport/go/parser"
-	"golang.org/x/website/internal/backport/go/token"
-)
-
-var (
-	root    = flag.String("root", filepath.Join(runtime.GOROOT(), "src"), "root of filesystem tree to scan")
-	verbose = flag.Bool("v", false, "verbose mode")
-)
-
-// ToHTML in comment.go assigns a (possibly blank) ID to each heading
-var html_h = regexp.MustCompile(`<h3 id="[^"]*">`)
-
-const html_endh = "</h3>\n"
-
-func isGoFile(fi fs.FileInfo) bool {
-	return strings.HasSuffix(fi.Name(), ".go") &&
-		!strings.HasSuffix(fi.Name(), "_test.go")
-}
-
-func appendHeadings(list []string, comment string) []string {
-	var buf bytes.Buffer
-	doc.ToHTML(&buf, comment, nil)
-	for s := buf.String(); s != ""; {
-		loc := html_h.FindStringIndex(s)
-		if len(loc) == 0 {
-			break
-		}
-		var inner string
-		inner, s, _ = strings.Cut(s[loc[1]:], html_endh)
-		list = append(list, inner)
-	}
-	return list
-}
-
-func main() {
-	flag.Parse()
-	fset := token.NewFileSet()
-	nheadings := 0
-	err := filepath.WalkDir(*root, func(path string, info fs.DirEntry, err error) error {
-		if !info.IsDir() {
-			return nil
-		}
-		pkgs, err := parser.ParseDir(fset, path, isGoFile, parser.ParseComments)
-		if err != nil {
-			if *verbose {
-				fmt.Fprintln(os.Stderr, err)
-			}
-			return nil
-		}
-		for _, pkg := range pkgs {
-			d := doc.New(pkg, path, doc.Mode(0))
-			list := appendHeadings(nil, d.Doc)
-			for _, d := range d.Consts {
-				list = appendHeadings(list, d.Doc)
-			}
-			for _, d := range d.Types {
-				list = appendHeadings(list, d.Doc)
-			}
-			for _, d := range d.Vars {
-				list = appendHeadings(list, d.Doc)
-			}
-			for _, d := range d.Funcs {
-				list = appendHeadings(list, d.Doc)
-			}
-			if len(list) > 0 {
-				// directories may contain multiple packages;
-				// print path and package name
-				fmt.Printf("%s (package %s)\n", path, pkg.Name)
-				for _, h := range list {
-					fmt.Printf("\t%s\n", h)
-				}
-				nheadings += len(list)
-			}
-		}
-		return nil
-	})
-	if err != nil {
-		fmt.Fprintln(os.Stderr, err)
-		os.Exit(1)
-	}
-	fmt.Println(nheadings, "headings found")
-}
diff --git a/internal/backport/go/doc/reader.go b/internal/backport/go/doc/reader.go
deleted file mode 100644
index 2c9ba03..0000000
--- a/internal/backport/go/doc/reader.go
+++ /dev/null
@@ -1,1030 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package doc
-
-import (
-	"fmt"
-	"path"
-	"regexp"
-	"sort"
-	"strconv"
-	"strings"
-	"unicode"
-	"unicode/utf8"
-
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/token"
-)
-
-// ----------------------------------------------------------------------------
-// function/method sets
-//
-// Internally, we treat functions like methods and collect them in method sets.
-
-// A methodSet describes a set of methods. Entries where Decl == nil are conflict
-// entries (more than one method with the same name at the same embedding level).
-type methodSet map[string]*Func
-
-// recvString returns a string representation of recv of the form "T", "*T",
-// "T[A, ...]", "*T[A, ...]" or "BADRECV" (if not a proper receiver type).
-func recvString(recv ast.Expr) string {
-	switch t := recv.(type) {
-	case *ast.Ident:
-		return t.Name
-	case *ast.StarExpr:
-		return "*" + recvString(t.X)
-	case *ast.IndexExpr:
-		// Generic type with one parameter.
-		return fmt.Sprintf("%s[%s]", recvString(t.X), recvParam(t.Index))
-	case *ast.IndexListExpr:
-		// Generic type with multiple parameters.
-		if len(t.Indices) > 0 {
-			var b strings.Builder
-			b.WriteString(recvString(t.X))
-			b.WriteByte('[')
-			b.WriteString(recvParam(t.Indices[0]))
-			for _, e := range t.Indices[1:] {
-				b.WriteString(", ")
-				b.WriteString(recvParam(e))
-			}
-			b.WriteByte(']')
-			return b.String()
-		}
-	}
-	return "BADRECV"
-}
-
-func recvParam(p ast.Expr) string {
-	if id, ok := p.(*ast.Ident); ok {
-		return id.Name
-	}
-	return "BADPARAM"
-}
-
-// set creates the corresponding Func for f and adds it to mset.
-// If there are multiple f's with the same name, set keeps the first
-// one with documentation; conflicts are ignored. The boolean
-// specifies whether to leave the AST untouched.
-func (mset methodSet) set(f *ast.FuncDecl, preserveAST bool) {
-	name := f.Name.Name
-	if g := mset[name]; g != nil && g.Doc != "" {
-		// A function with the same name has already been registered;
-		// since it has documentation, assume f is simply another
-		// implementation and ignore it. This does not happen if the
-		// caller is using go/build.ScanDir to determine the list of
-		// files implementing a package.
-		return
-	}
-	// function doesn't exist or has no documentation; use f
-	recv := ""
-	if f.Recv != nil {
-		var typ ast.Expr
-		// be careful in case of incorrect ASTs
-		if list := f.Recv.List; len(list) == 1 {
-			typ = list[0].Type
-		}
-		recv = recvString(typ)
-	}
-	mset[name] = &Func{
-		Doc:  f.Doc.Text(),
-		Name: name,
-		Decl: f,
-		Recv: recv,
-		Orig: recv,
-	}
-	if !preserveAST {
-		f.Doc = nil // doc consumed - remove from AST
-	}
-}
-
-// add adds method m to the method set; m is ignored if the method set
-// already contains a method with the same name at the same or a higher
-// level than m.
-func (mset methodSet) add(m *Func) {
-	old := mset[m.Name]
-	if old == nil || m.Level < old.Level {
-		mset[m.Name] = m
-		return
-	}
-	if m.Level == old.Level {
-		// conflict - mark it using a method with nil Decl
-		mset[m.Name] = &Func{
-			Name:  m.Name,
-			Level: m.Level,
-		}
-	}
-}
-
-// ----------------------------------------------------------------------------
-// Named types
-
-// baseTypeName returns the name of the base type of x (or "")
-// and whether the type is imported or not.
-func baseTypeName(x ast.Expr) (name string, imported bool) {
-	switch t := x.(type) {
-	case *ast.Ident:
-		return t.Name, false
-	case *ast.IndexExpr:
-		return baseTypeName(t.X)
-	case *ast.IndexListExpr:
-		return baseTypeName(t.X)
-	case *ast.SelectorExpr:
-		if _, ok := t.X.(*ast.Ident); ok {
-			// only possible for qualified type names;
-			// assume type is imported
-			return t.Sel.Name, true
-		}
-	case *ast.ParenExpr:
-		return baseTypeName(t.X)
-	case *ast.StarExpr:
-		return baseTypeName(t.X)
-	}
-	return "", false
-}
-
-// An embeddedSet describes a set of embedded types.
-type embeddedSet map[*namedType]bool
-
-// A namedType represents a named unqualified (package local, or possibly
-// predeclared) type. The namedType for a type name is always found via
-// reader.lookupType.
-type namedType struct {
-	doc  string       // doc comment for type
-	name string       // type name
-	decl *ast.GenDecl // nil if declaration hasn't been seen yet
-
-	isEmbedded bool        // true if this type is embedded
-	isStruct   bool        // true if this type is a struct
-	embedded   embeddedSet // true if the embedded type is a pointer
-
-	// associated declarations
-	values  []*Value // consts and vars
-	funcs   methodSet
-	methods methodSet
-}
-
-// ----------------------------------------------------------------------------
-// AST reader
-
-// reader accumulates documentation for a single package.
-// It modifies the AST: Comments (declaration documentation)
-// that have been collected by the reader are set to nil
-// in the respective AST nodes so that they are not printed
-// twice (once when printing the documentation and once when
-// printing the corresponding AST node).
-type reader struct {
-	mode Mode
-
-	// package properties
-	doc       string // package documentation, if any
-	filenames []string
-	notes     map[string][]*Note
-
-	// imports
-	imports      map[string]int
-	hasDotImp    bool // if set, package contains a dot import
-	importByName map[string]string
-
-	// declarations
-	values []*Value // consts and vars
-	order  int      // sort order of const and var declarations (when we can't use a name)
-	types  map[string]*namedType
-	funcs  methodSet
-
-	// support for package-local shadowing of predeclared types
-	shadowedPredecl map[string]bool
-	fixmap          map[string][]*ast.InterfaceType
-}
-
-func (r *reader) isVisible(name string) bool {
-	return r.mode&AllDecls != 0 || token.IsExported(name)
-}
-
-// lookupType returns the base type with the given name.
-// If the base type has not been encountered yet, a new
-// type with the given name but no associated declaration
-// is added to the type map.
-func (r *reader) lookupType(name string) *namedType {
-	if name == "" || name == "_" {
-		return nil // no type docs for anonymous types
-	}
-	if typ, found := r.types[name]; found {
-		return typ
-	}
-	// type not found - add one without declaration
-	typ := &namedType{
-		name:     name,
-		embedded: make(embeddedSet),
-		funcs:    make(methodSet),
-		methods:  make(methodSet),
-	}
-	r.types[name] = typ
-	return typ
-}
-
-// recordAnonymousField registers fieldType as the type of an
-// anonymous field in the parent type. If the field is imported
-// (qualified name) or the parent is nil, the field is ignored.
-// The function returns the field name.
-func (r *reader) recordAnonymousField(parent *namedType, fieldType ast.Expr) (fname string) {
-	fname, imp := baseTypeName(fieldType)
-	if parent == nil || imp {
-		return
-	}
-	if ftype := r.lookupType(fname); ftype != nil {
-		ftype.isEmbedded = true
-		_, ptr := fieldType.(*ast.StarExpr)
-		parent.embedded[ftype] = ptr
-	}
-	return
-}
-
-func (r *reader) readDoc(comment *ast.CommentGroup) {
-	// By convention there should be only one package comment
-	// but collect all of them if there are more than one.
-	text := comment.Text()
-	if r.doc == "" {
-		r.doc = text
-		return
-	}
-	r.doc += "\n" + text
-}
-
-func (r *reader) remember(predecl string, typ *ast.InterfaceType) {
-	if r.fixmap == nil {
-		r.fixmap = make(map[string][]*ast.InterfaceType)
-	}
-	r.fixmap[predecl] = append(r.fixmap[predecl], typ)
-}
-
-func specNames(specs []ast.Spec) []string {
-	names := make([]string, 0, len(specs)) // reasonable estimate
-	for _, s := range specs {
-		// s guaranteed to be an *ast.ValueSpec by readValue
-		for _, ident := range s.(*ast.ValueSpec).Names {
-			names = append(names, ident.Name)
-		}
-	}
-	return names
-}
-
-// readValue processes a const or var declaration.
-func (r *reader) readValue(decl *ast.GenDecl) {
-	// determine if decl should be associated with a type
-	// Heuristic: For each typed entry, determine the type name, if any.
-	//            If there is exactly one type name that is sufficiently
-	//            frequent, associate the decl with the respective type.
-	domName := ""
-	domFreq := 0
-	prev := ""
-	n := 0
-	for _, spec := range decl.Specs {
-		s, ok := spec.(*ast.ValueSpec)
-		if !ok {
-			continue // should not happen, but be conservative
-		}
-		name := ""
-		switch {
-		case s.Type != nil:
-			// a type is present; determine its name
-			if n, imp := baseTypeName(s.Type); !imp {
-				name = n
-			}
-		case decl.Tok == token.CONST && len(s.Values) == 0:
-			// no type or value is present but we have a constant declaration;
-			// use the previous type name (possibly the empty string)
-			name = prev
-		}
-		if name != "" {
-			// entry has a named type
-			if domName != "" && domName != name {
-				// more than one type name - do not associate
-				// with any type
-				domName = ""
-				break
-			}
-			domName = name
-			domFreq++
-		}
-		prev = name
-		n++
-	}
-
-	// nothing to do w/o a legal declaration
-	if n == 0 {
-		return
-	}
-
-	// determine values list with which to associate the Value for this decl
-	values := &r.values
-	const threshold = 0.75
-	if domName != "" && r.isVisible(domName) && domFreq >= int(float64(len(decl.Specs))*threshold) {
-		// typed entries are sufficiently frequent
-		if typ := r.lookupType(domName); typ != nil {
-			values = &typ.values // associate with that type
-		}
-	}
-
-	*values = append(*values, &Value{
-		Doc:   decl.Doc.Text(),
-		Names: specNames(decl.Specs),
-		Decl:  decl,
-		order: r.order,
-	})
-	if r.mode&PreserveAST == 0 {
-		decl.Doc = nil // doc consumed - remove from AST
-	}
-	// Note: It's important that the order used here is global because the cleanupTypes
-	// methods may move values associated with types back into the global list. If the
-	// order is list-specific, sorting is not deterministic because the same order value
-	// may appear multiple times (was bug, found when fixing #16153).
-	r.order++
-}
-
-// fields returns a struct's fields or an interface's methods.
-func fields(typ ast.Expr) (list []*ast.Field, isStruct bool) {
-	var fields *ast.FieldList
-	switch t := typ.(type) {
-	case *ast.StructType:
-		fields = t.Fields
-		isStruct = true
-	case *ast.InterfaceType:
-		fields = t.Methods
-	}
-	if fields != nil {
-		list = fields.List
-	}
-	return
-}
-
-// readType processes a type declaration.
-func (r *reader) readType(decl *ast.GenDecl, spec *ast.TypeSpec) {
-	typ := r.lookupType(spec.Name.Name)
-	if typ == nil {
-		return // no name or blank name - ignore the type
-	}
-
-	// A type should be added at most once, so typ.decl
-	// should be nil - if it is not, simply overwrite it.
-	typ.decl = decl
-
-	// compute documentation
-	doc := spec.Doc
-	if doc == nil {
-		// no doc associated with the spec, use the declaration doc, if any
-		doc = decl.Doc
-	}
-	if r.mode&PreserveAST == 0 {
-		spec.Doc = nil // doc consumed - remove from AST
-		decl.Doc = nil // doc consumed - remove from AST
-	}
-	typ.doc = doc.Text()
-
-	// record anonymous fields (they may contribute methods)
-	// (some fields may have been recorded already when filtering
-	// exports, but that's ok)
-	var list []*ast.Field
-	list, typ.isStruct = fields(spec.Type)
-	for _, field := range list {
-		if len(field.Names) == 0 {
-			r.recordAnonymousField(typ, field.Type)
-		}
-	}
-}
-
-// isPredeclared reports whether n denotes a predeclared type.
-func (r *reader) isPredeclared(n string) bool {
-	return predeclaredTypes[n] && r.types[n] == nil
-}
-
-// readFunc processes a func or method declaration.
-func (r *reader) readFunc(fun *ast.FuncDecl) {
-	// strip function body if requested.
-	if r.mode&PreserveAST == 0 {
-		fun.Body = nil
-	}
-
-	// associate methods with the receiver type, if any
-	if fun.Recv != nil {
-		// method
-		if len(fun.Recv.List) == 0 {
-			// should not happen (incorrect AST); (See issue 17788)
-			// don't show this method
-			return
-		}
-		recvTypeName, imp := baseTypeName(fun.Recv.List[0].Type)
-		if imp {
-			// should not happen (incorrect AST);
-			// don't show this method
-			return
-		}
-		if typ := r.lookupType(recvTypeName); typ != nil {
-			typ.methods.set(fun, r.mode&PreserveAST != 0)
-		}
-		// otherwise ignore the method
-		// TODO(gri): There may be exported methods of non-exported types
-		// that can be called because of exported values (consts, vars, or
-		// function results) of that type. Could determine if that is the
-		// case and then show those methods in an appropriate section.
-		return
-	}
-
-	// Associate factory functions with the first visible result type, as long as
-	// others are predeclared types.
-	if fun.Type.Results.NumFields() >= 1 {
-		var typ *namedType // type to associate the function with
-		numResultTypes := 0
-		for _, res := range fun.Type.Results.List {
-			factoryType := res.Type
-			if t, ok := factoryType.(*ast.ArrayType); ok {
-				// We consider functions that return slices or arrays of type
-				// T (or pointers to T) as factory functions of T.
-				factoryType = t.Elt
-			}
-			if n, imp := baseTypeName(factoryType); !imp && r.isVisible(n) && !r.isPredeclared(n) {
-				if lookupTypeParam(n, fun.Type.TypeParams) != nil {
-					// Issue #49477: don't associate fun with its type parameter result.
-					// A type parameter is not a defined type.
-					continue
-				}
-				if t := r.lookupType(n); t != nil {
-					typ = t
-					numResultTypes++
-					if numResultTypes > 1 {
-						break
-					}
-				}
-			}
-		}
-		// If there is exactly one result type,
-		// associate the function with that type.
-		if numResultTypes == 1 {
-			typ.funcs.set(fun, r.mode&PreserveAST != 0)
-			return
-		}
-	}
-
-	// just an ordinary function
-	r.funcs.set(fun, r.mode&PreserveAST != 0)
-}
-
-// lookupTypeParam searches for type parameters named name within the tparams
-// field list, returning the relevant identifier if found, or nil if not.
-func lookupTypeParam(name string, tparams *ast.FieldList) *ast.Ident {
-	if tparams == nil {
-		return nil
-	}
-	for _, field := range tparams.List {
-		for _, id := range field.Names {
-			if id.Name == name {
-				return id
-			}
-		}
-	}
-	return nil
-}
-
-var (
-	noteMarker    = `([A-Z][A-Z]+)\(([^)]+)\):?`                    // MARKER(uid), MARKER at least 2 chars, uid at least 1 char
-	noteMarkerRx  = regexp.MustCompile(`^[ \t]*` + noteMarker)      // MARKER(uid) at text start
-	noteCommentRx = regexp.MustCompile(`^/[/*][ \t]*` + noteMarker) // MARKER(uid) at comment start
-)
-
-// clean replaces each sequence of space, \r, or \t characters
-// with a single space and removes any trailing and leading spaces.
-func clean(s string) string {
-	var b []byte
-	p := byte(' ')
-	for i := 0; i < len(s); i++ {
-		q := s[i]
-		if q == '\r' || q == '\t' {
-			q = ' '
-		}
-		if q != ' ' || p != ' ' {
-			b = append(b, q)
-			p = q
-		}
-	}
-	// remove trailing blank, if any
-	if n := len(b); n > 0 && p == ' ' {
-		b = b[0 : n-1]
-	}
-	return string(b)
-}
-
-// readNote collects a single note from a sequence of comments.
-func (r *reader) readNote(list []*ast.Comment) {
-	text := (&ast.CommentGroup{List: list}).Text()
-	if m := noteMarkerRx.FindStringSubmatchIndex(text); m != nil {
-		// The note body starts after the marker.
-		// We remove any formatting so that we don't
-		// get spurious line breaks/indentation when
-		// showing the TODO body.
-		body := clean(text[m[1]:])
-		if body != "" {
-			marker := text[m[2]:m[3]]
-			r.notes[marker] = append(r.notes[marker], &Note{
-				Pos:  list[0].Pos(),
-				End:  list[len(list)-1].End(),
-				UID:  text[m[4]:m[5]],
-				Body: body,
-			})
-		}
-	}
-}
-
-// readNotes extracts notes from comments.
-// A note must start at the beginning of a comment with "MARKER(uid):"
-// and is followed by the note body (e.g., "// BUG(gri): fix this").
-// The note ends at the end of the comment group or at the start of
-// another note in the same comment group, whichever comes first.
-func (r *reader) readNotes(comments []*ast.CommentGroup) {
-	for _, group := range comments {
-		i := -1 // comment index of most recent note start, valid if >= 0
-		list := group.List
-		for j, c := range list {
-			if noteCommentRx.MatchString(c.Text) {
-				if i >= 0 {
-					r.readNote(list[i:j])
-				}
-				i = j
-			}
-		}
-		if i >= 0 {
-			r.readNote(list[i:])
-		}
-	}
-}
-
-// readFile adds the AST for a source file to the reader.
-func (r *reader) readFile(src *ast.File) {
-	// add package documentation
-	if src.Doc != nil {
-		r.readDoc(src.Doc)
-		if r.mode&PreserveAST == 0 {
-			src.Doc = nil // doc consumed - remove from AST
-		}
-	}
-
-	// add all declarations but for functions which are processed in a separate pass
-	for _, decl := range src.Decls {
-		switch d := decl.(type) {
-		case *ast.GenDecl:
-			switch d.Tok {
-			case token.IMPORT:
-				// imports are handled individually
-				for _, spec := range d.Specs {
-					if s, ok := spec.(*ast.ImportSpec); ok {
-						if import_, err := strconv.Unquote(s.Path.Value); err == nil {
-							r.imports[import_] = 1
-							var name string
-							if s.Name != nil {
-								name = s.Name.Name
-								if name == "." {
-									r.hasDotImp = true
-								}
-							}
-							if name != "." {
-								if name == "" {
-									name = assumedPackageName(import_)
-								}
-								old, ok := r.importByName[name]
-								if !ok {
-									r.importByName[name] = import_
-								} else if old != import_ && old != "" {
-									r.importByName[name] = "" // ambiguous
-								}
-							}
-						}
-					}
-				}
-			case token.CONST, token.VAR:
-				// constants and variables are always handled as a group
-				r.readValue(d)
-			case token.TYPE:
-				// types are handled individually
-				if len(d.Specs) == 1 && !d.Lparen.IsValid() {
-					// common case: single declaration w/o parentheses
-					// (if a single declaration is parenthesized,
-					// create a new fake declaration below, so that
-					// go/doc type declarations always appear w/o
-					// parentheses)
-					if s, ok := d.Specs[0].(*ast.TypeSpec); ok {
-						r.readType(d, s)
-					}
-					break
-				}
-				for _, spec := range d.Specs {
-					if s, ok := spec.(*ast.TypeSpec); ok {
-						// use an individual (possibly fake) declaration
-						// for each type; this also ensures that each type
-						// gets to (re-)use the declaration documentation
-						// if there's none associated with the spec itself
-						fake := &ast.GenDecl{
-							Doc: d.Doc,
-							// don't use the existing TokPos because it
-							// will lead to the wrong selection range for
-							// the fake declaration if there are more
-							// than one type in the group (this affects
-							// src/cmd/godoc/godoc.go's posLink_urlFunc)
-							TokPos: s.Pos(),
-							Tok:    token.TYPE,
-							Specs:  []ast.Spec{s},
-						}
-						r.readType(fake, s)
-					}
-				}
-			}
-		}
-	}
-
-	// collect MARKER(...): annotations
-	r.readNotes(src.Comments)
-	if r.mode&PreserveAST == 0 {
-		src.Comments = nil // consumed unassociated comments - remove from AST
-	}
-}
-
-func (r *reader) readPackage(pkg *ast.Package, mode Mode) {
-	// initialize reader
-	r.filenames = make([]string, len(pkg.Files))
-	r.imports = make(map[string]int)
-	r.mode = mode
-	r.types = make(map[string]*namedType)
-	r.funcs = make(methodSet)
-	r.notes = make(map[string][]*Note)
-	r.importByName = make(map[string]string)
-
-	// sort package files before reading them so that the
-	// result does not depend on map iteration order
-	i := 0
-	for filename := range pkg.Files {
-		r.filenames[i] = filename
-		i++
-	}
-	sort.Strings(r.filenames)
-
-	// process files in sorted order
-	for _, filename := range r.filenames {
-		f := pkg.Files[filename]
-		if mode&AllDecls == 0 {
-			r.fileExports(f)
-		}
-		r.readFile(f)
-	}
-
-	for name, path := range r.importByName {
-		if path == "" {
-			delete(r.importByName, name)
-		}
-	}
-
-	// process functions now that we have better type information
-	for _, f := range pkg.Files {
-		for _, decl := range f.Decls {
-			if d, ok := decl.(*ast.FuncDecl); ok {
-				r.readFunc(d)
-			}
-		}
-	}
-}
-
-// ----------------------------------------------------------------------------
-// Types
-
-func customizeRecv(f *Func, recvTypeName string, embeddedIsPtr bool, level int) *Func {
-	if f == nil || f.Decl == nil || f.Decl.Recv == nil || len(f.Decl.Recv.List) != 1 {
-		return f // shouldn't happen, but be safe
-	}
-
-	// copy existing receiver field and set new type
-	newField := *f.Decl.Recv.List[0]
-	origPos := newField.Type.Pos()
-	_, origRecvIsPtr := newField.Type.(*ast.StarExpr)
-	newIdent := &ast.Ident{NamePos: origPos, Name: recvTypeName}
-	var typ ast.Expr = newIdent
-	if !embeddedIsPtr && origRecvIsPtr {
-		newIdent.NamePos++ // '*' is one character
-		typ = &ast.StarExpr{Star: origPos, X: newIdent}
-	}
-	newField.Type = typ
-
-	// copy existing receiver field list and set new receiver field
-	newFieldList := *f.Decl.Recv
-	newFieldList.List = []*ast.Field{&newField}
-
-	// copy existing function declaration and set new receiver field list
-	newFuncDecl := *f.Decl
-	newFuncDecl.Recv = &newFieldList
-
-	// copy existing function documentation and set new declaration
-	newF := *f
-	newF.Decl = &newFuncDecl
-	newF.Recv = recvString(typ)
-	// the Orig field never changes
-	newF.Level = level
-
-	return &newF
-}
-
-// collectEmbeddedMethods collects the embedded methods of typ in mset.
-func (r *reader) collectEmbeddedMethods(mset methodSet, typ *namedType, recvTypeName string, embeddedIsPtr bool, level int, visited embeddedSet) {
-	visited[typ] = true
-	for embedded, isPtr := range typ.embedded {
-		// Once an embedded type is embedded as a pointer type
-		// all embedded types in those types are treated like
-		// pointer types for the purpose of the receiver type
-		// computation; i.e., embeddedIsPtr is sticky for this
-		// embedding hierarchy.
-		thisEmbeddedIsPtr := embeddedIsPtr || isPtr
-		for _, m := range embedded.methods {
-			// only top-level methods are embedded
-			if m.Level == 0 {
-				mset.add(customizeRecv(m, recvTypeName, thisEmbeddedIsPtr, level))
-			}
-		}
-		if !visited[embedded] {
-			r.collectEmbeddedMethods(mset, embedded, recvTypeName, thisEmbeddedIsPtr, level+1, visited)
-		}
-	}
-	delete(visited, typ)
-}
-
-// computeMethodSets determines the actual method sets for each type encountered.
-func (r *reader) computeMethodSets() {
-	for _, t := range r.types {
-		// collect embedded methods for t
-		if t.isStruct {
-			// struct
-			r.collectEmbeddedMethods(t.methods, t, t.name, false, 1, make(embeddedSet))
-		} else {
-			// interface
-			// TODO(gri) fix this
-		}
-	}
-
-	// For any predeclared names that are declared locally, don't treat them as
-	// exported fields anymore.
-	for predecl := range r.shadowedPredecl {
-		for _, ityp := range r.fixmap[predecl] {
-			removeAnonymousField(predecl, ityp)
-		}
-	}
-}
-
-// cleanupTypes removes the association of functions and methods with
-// types that have no declaration. Instead, these functions and methods
-// are shown at the package level. It also removes types with missing
-// declarations or which are not visible.
-func (r *reader) cleanupTypes() {
-	for _, t := range r.types {
-		visible := r.isVisible(t.name)
-		predeclared := predeclaredTypes[t.name]
-
-		if t.decl == nil && (predeclared || visible && (t.isEmbedded || r.hasDotImp)) {
-			// t.name is a predeclared type (and was not redeclared in this package),
-			// or it was embedded somewhere but its declaration is missing (because
-			// the AST is incomplete), or we have a dot-import (and all bets are off):
-			// move any associated values, funcs, and methods back to the top-level so
-			// that they are not lost.
-			// 1) move values
-			r.values = append(r.values, t.values...)
-			// 2) move factory functions
-			for name, f := range t.funcs {
-				// in a correct AST, package-level function names
-				// are all different - no need to check for conflicts
-				r.funcs[name] = f
-			}
-			// 3) move methods
-			if !predeclared {
-				for name, m := range t.methods {
-					// don't overwrite functions with the same name - drop them
-					if _, found := r.funcs[name]; !found {
-						r.funcs[name] = m
-					}
-				}
-			}
-		}
-		// remove types w/o declaration or which are not visible
-		if t.decl == nil || !visible {
-			delete(r.types, t.name)
-		}
-	}
-}
-
-// ----------------------------------------------------------------------------
-// Sorting
-
-type data struct {
-	n    int
-	swap func(i, j int)
-	less func(i, j int) bool
-}
-
-func (d *data) Len() int           { return d.n }
-func (d *data) Swap(i, j int)      { d.swap(i, j) }
-func (d *data) Less(i, j int) bool { return d.less(i, j) }
-
-// sortBy is a helper function for sorting
-func sortBy(less func(i, j int) bool, swap func(i, j int), n int) {
-	sort.Sort(&data{n, swap, less})
-}
-
-func sortedKeys(m map[string]int) []string {
-	list := make([]string, len(m))
-	i := 0
-	for key := range m {
-		list[i] = key
-		i++
-	}
-	sort.Strings(list)
-	return list
-}
-
-// sortingName returns the name to use when sorting d into place.
-func sortingName(d *ast.GenDecl) string {
-	if len(d.Specs) == 1 {
-		if s, ok := d.Specs[0].(*ast.ValueSpec); ok {
-			return s.Names[0].Name
-		}
-	}
-	return ""
-}
-
-func sortedValues(m []*Value, tok token.Token) []*Value {
-	list := make([]*Value, len(m)) // big enough in any case
-	i := 0
-	for _, val := range m {
-		if val.Decl.Tok == tok {
-			list[i] = val
-			i++
-		}
-	}
-	list = list[0:i]
-
-	sortBy(
-		func(i, j int) bool {
-			if ni, nj := sortingName(list[i].Decl), sortingName(list[j].Decl); ni != nj {
-				return ni < nj
-			}
-			return list[i].order < list[j].order
-		},
-		func(i, j int) { list[i], list[j] = list[j], list[i] },
-		len(list),
-	)
-
-	return list
-}
-
-func sortedTypes(m map[string]*namedType, allMethods bool) []*Type {
-	list := make([]*Type, len(m))
-	i := 0
-	for _, t := range m {
-		list[i] = &Type{
-			Doc:     t.doc,
-			Name:    t.name,
-			Decl:    t.decl,
-			Consts:  sortedValues(t.values, token.CONST),
-			Vars:    sortedValues(t.values, token.VAR),
-			Funcs:   sortedFuncs(t.funcs, true),
-			Methods: sortedFuncs(t.methods, allMethods),
-		}
-		i++
-	}
-
-	sortBy(
-		func(i, j int) bool { return list[i].Name < list[j].Name },
-		func(i, j int) { list[i], list[j] = list[j], list[i] },
-		len(list),
-	)
-
-	return list
-}
-
-func removeStar(s string) string {
-	if len(s) > 0 && s[0] == '*' {
-		return s[1:]
-	}
-	return s
-}
-
-func sortedFuncs(m methodSet, allMethods bool) []*Func {
-	list := make([]*Func, len(m))
-	i := 0
-	for _, m := range m {
-		// determine which methods to include
-		switch {
-		case m.Decl == nil:
-			// exclude conflict entry
-		case allMethods, m.Level == 0, !token.IsExported(removeStar(m.Orig)):
-			// forced inclusion, method not embedded, or method
-			// embedded but original receiver type not exported
-			list[i] = m
-			i++
-		}
-	}
-	list = list[0:i]
-	sortBy(
-		func(i, j int) bool { return list[i].Name < list[j].Name },
-		func(i, j int) { list[i], list[j] = list[j], list[i] },
-		len(list),
-	)
-	return list
-}
-
-// noteBodies returns a list of note body strings given a list of notes.
-// This is only used to populate the deprecated Package.Bugs field.
-func noteBodies(notes []*Note) []string {
-	var list []string
-	for _, n := range notes {
-		list = append(list, n.Body)
-	}
-	return list
-}
-
-// ----------------------------------------------------------------------------
-// Predeclared identifiers
-
-// IsPredeclared reports whether s is a predeclared identifier.
-func IsPredeclared(s string) bool {
-	return predeclaredTypes[s] || predeclaredFuncs[s] || predeclaredConstants[s]
-}
-
-var predeclaredTypes = map[string]bool{
-	"interface{}": true,
-	"bool":        true,
-	"byte":        true,
-	"comparable":  true,
-	"complex64":   true,
-	"complex128":  true,
-	"error":       true,
-	"float32":     true,
-	"float64":     true,
-	"int":         true,
-	"int8":        true,
-	"int16":       true,
-	"int32":       true,
-	"int64":       true,
-	"rune":        true,
-	"string":      true,
-	"uint":        true,
-	"uint8":       true,
-	"uint16":      true,
-	"uint32":      true,
-	"uint64":      true,
-	"uintptr":     true,
-}
-
-var predeclaredFuncs = map[string]bool{
-	"append":  true,
-	"cap":     true,
-	"close":   true,
-	"complex": true,
-	"copy":    true,
-	"delete":  true,
-	"imag":    true,
-	"len":     true,
-	"make":    true,
-	"new":     true,
-	"panic":   true,
-	"print":   true,
-	"println": true,
-	"real":    true,
-	"recover": true,
-}
-
-var predeclaredConstants = map[string]bool{
-	"false": true,
-	"iota":  true,
-	"nil":   true,
-	"true":  true,
-}
-
-// assumedPackageName returns the assumed package name
-// for a given import path. This is a copy of
-// golang.org/x/tools/internal/imports.ImportPathToAssumedName.
-func assumedPackageName(importPath string) string {
-	notIdentifier := func(ch rune) bool {
-		return !('a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' ||
-			'0' <= ch && ch <= '9' ||
-			ch == '_' ||
-			ch >= utf8.RuneSelf && (unicode.IsLetter(ch) || unicode.IsDigit(ch)))
-	}
-
-	base := path.Base(importPath)
-	if strings.HasPrefix(base, "v") {
-		if _, err := strconv.Atoi(base[1:]); err == nil {
-			dir := path.Dir(importPath)
-			if dir != "." {
-				base = path.Base(dir)
-			}
-		}
-	}
-	base = strings.TrimPrefix(base, "go-")
-	if i := strings.IndexFunc(base, notIdentifier); i >= 0 {
-		base = base[:i]
-	}
-	return base
-}
diff --git a/internal/backport/go/doc/synopsis.go b/internal/backport/go/doc/synopsis.go
deleted file mode 100644
index 7ac23e3..0000000
--- a/internal/backport/go/doc/synopsis.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package doc
-
-import (
-	"strings"
-	"unicode"
-
-	"golang.org/x/website/internal/backport/go/doc/comment"
-)
-
-// firstSentence returns the first sentence in s.
-// The sentence ends after the first period followed by space and
-// not preceded by exactly one uppercase letter.
-func firstSentence(s string) string {
-	var ppp, pp, p rune
-	for i, q := range s {
-		if q == '\n' || q == '\r' || q == '\t' {
-			q = ' '
-		}
-		if q == ' ' && p == '.' && (!unicode.IsUpper(pp) || unicode.IsUpper(ppp)) {
-			return s[:i]
-		}
-		if p == '。' || p == '.' {
-			return s[:i]
-		}
-		ppp, pp, p = pp, p, q
-	}
-	return s
-}
-
-// Synopsis returns a cleaned version of the first sentence in text.
-//
-// Deprecated: New programs should use [Package.Synopsis] instead,
-// which handles links in text properly.
-func Synopsis(text string) string {
-	var p Package
-	return p.Synopsis(text)
-}
-
-// IllegalPrefixes is a list of lower-case prefixes that identify
-// a comment as not being a doc comment.
-// This helps to avoid misinterpreting the common mistake
-// of a copyright notice immediately before a package statement
-// as being a doc comment.
-var IllegalPrefixes = []string{
-	"copyright",
-	"all rights",
-	"author",
-}
-
-// Synopsis returns a cleaned version of the first sentence in text.
-// That sentence ends after the first period followed by space and not
-// preceded by exactly one uppercase letter, or at the first paragraph break.
-// The result string has no \n, \r, or \t characters and uses only single
-// spaces between words. If text starts with any of the IllegalPrefixes,
-// the result is the empty string.
-func (p *Package) Synopsis(text string) string {
-	text = firstSentence(text)
-	lower := strings.ToLower(text)
-	for _, prefix := range IllegalPrefixes {
-		if strings.HasPrefix(lower, prefix) {
-			return ""
-		}
-	}
-	pr := p.Printer()
-	pr.TextWidth = -1
-	d := p.Parser().Parse(text)
-	if len(d.Content) == 0 {
-		return ""
-	}
-	if _, ok := d.Content[0].(*comment.Paragraph); !ok {
-		return ""
-	}
-	d.Content = d.Content[:1] // might be blank lines, code blocks, etc in “first sentence”
-	return strings.TrimSpace(string(pr.Text(d)))
-}
diff --git a/internal/backport/go/doc/synopsis_test.go b/internal/backport/go/doc/synopsis_test.go
deleted file mode 100644
index 158c734..0000000
--- a/internal/backport/go/doc/synopsis_test.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package doc
-
-import "testing"
-
-var tests = []struct {
-	txt string
-	fsl int
-	syn string
-}{
-	{"", 0, ""},
-	{"foo", 3, "foo"},
-	{"foo.", 4, "foo."},
-	{"foo.bar", 7, "foo.bar"},
-	{"  foo.  ", 6, "foo."},
-	{"  foo\t  bar.\n", 12, "foo bar."},
-	{"  foo\t  bar.\n", 12, "foo bar."},
-	{"a  b\n\nc\r\rd\t\t", 12, "a b"},
-	{"a  b\n\nc\r\rd\t\t  . BLA", 15, "a b"},
-	{"Package poems by T.S.Eliot. To rhyme...", 27, "Package poems by T.S.Eliot."},
-	{"Package poems by T. S. Eliot. To rhyme...", 29, "Package poems by T. S. Eliot."},
-	{"foo implements the foo ABI. The foo ABI is...", 27, "foo implements the foo ABI."},
-	{"Package\nfoo. ..", 12, "Package foo."},
-	{"P . Q.", 3, "P ."},
-	{"P. Q.   ", 8, "P. Q."},
-	{"Package Καλημέρα κόσμε.", 36, "Package Καλημέρα κόσμε."},
-	{"Package こんにちは 世界\n", 31, "Package こんにちは 世界"},
-	{"Package こんにちは。世界", 26, "Package こんにちは。"},
-	{"Package 안녕.世界", 17, "Package 안녕."},
-	{"Package foo does bar.", 21, "Package foo does bar."},
-	{"Copyright 2012 Google, Inc. Package foo does bar.", 27, ""},
-	{"All Rights reserved. Package foo does bar.", 20, ""},
-	{"All rights reserved. Package foo does bar.", 20, ""},
-	{"Authors: foo@bar.com. Package foo does bar.", 21, ""},
-	{"typically invoked as ``go tool asm'',", 37, "typically invoked as “go tool asm”,"},
-}
-
-func TestSynopsis(t *testing.T) {
-	for _, e := range tests {
-		fs := firstSentence(e.txt)
-		if fs != e.txt[:e.fsl] {
-			t.Errorf("firstSentence(%q) = %q, want %q", e.txt, fs, e.txt[:e.fsl])
-		}
-		syn := Synopsis(e.txt)
-		if syn != e.syn {
-			t.Errorf("Synopsis(%q) = %q, want %q", e.txt, syn, e.syn)
-		}
-	}
-}
diff --git a/internal/backport/go/doc/testdata/a.0.golden b/internal/backport/go/doc/testdata/a.0.golden
deleted file mode 100644
index 7e680b8..0000000
--- a/internal/backport/go/doc/testdata/a.0.golden
+++ /dev/null
@@ -1,52 +0,0 @@
-// comment 0  comment 1 
-PACKAGE a
-
-IMPORTPATH
-	testdata/a
-
-FILENAMES
-	testdata/a0.go
-	testdata/a1.go
-
-BUGS .Bugs is now deprecated, please use .Notes instead
-	bug0
-
-	bug1
-
-
-BUGS
-BUG(uid)	bug0
-
-BUG(uid)	bug1
-
-
-NOTES
-NOTE(uid)	
-
-NOTE(foo)	1 of 4 - this is the first line of note 1
-	- note 1 continues on this 2nd line
-	- note 1 continues on this 3rd line
-
-NOTE(foo)	2 of 4
-
-NOTE(bar)	3 of 4
-
-NOTE(bar)	4 of 4
-	- this is the last line of note 4
-
-NOTE(bam)	This note which contains a (parenthesized) subphrase
-	 must appear in its entirety.
-
-NOTE(xxx)	The ':' after the marker and uid is optional.
-
-
-SECBUGS
-SECBUG(uid)	sec hole 0
-	need to fix asap
-
-
-TODOS
-TODO(uid)	todo0
-
-TODO(uid)	todo1
-
diff --git a/internal/backport/go/doc/testdata/a.1.golden b/internal/backport/go/doc/testdata/a.1.golden
deleted file mode 100644
index 7e680b8..0000000
--- a/internal/backport/go/doc/testdata/a.1.golden
+++ /dev/null
@@ -1,52 +0,0 @@
-// comment 0  comment 1 
-PACKAGE a
-
-IMPORTPATH
-	testdata/a
-
-FILENAMES
-	testdata/a0.go
-	testdata/a1.go
-
-BUGS .Bugs is now deprecated, please use .Notes instead
-	bug0
-
-	bug1
-
-
-BUGS
-BUG(uid)	bug0
-
-BUG(uid)	bug1
-
-
-NOTES
-NOTE(uid)	
-
-NOTE(foo)	1 of 4 - this is the first line of note 1
-	- note 1 continues on this 2nd line
-	- note 1 continues on this 3rd line
-
-NOTE(foo)	2 of 4
-
-NOTE(bar)	3 of 4
-
-NOTE(bar)	4 of 4
-	- this is the last line of note 4
-
-NOTE(bam)	This note which contains a (parenthesized) subphrase
-	 must appear in its entirety.
-
-NOTE(xxx)	The ':' after the marker and uid is optional.
-
-
-SECBUGS
-SECBUG(uid)	sec hole 0
-	need to fix asap
-
-
-TODOS
-TODO(uid)	todo0
-
-TODO(uid)	todo1
-
diff --git a/internal/backport/go/doc/testdata/a.2.golden b/internal/backport/go/doc/testdata/a.2.golden
deleted file mode 100644
index 7e680b8..0000000
--- a/internal/backport/go/doc/testdata/a.2.golden
+++ /dev/null
@@ -1,52 +0,0 @@
-// comment 0  comment 1 
-PACKAGE a
-
-IMPORTPATH
-	testdata/a
-
-FILENAMES
-	testdata/a0.go
-	testdata/a1.go
-
-BUGS .Bugs is now deprecated, please use .Notes instead
-	bug0
-
-	bug1
-
-
-BUGS
-BUG(uid)	bug0
-
-BUG(uid)	bug1
-
-
-NOTES
-NOTE(uid)	
-
-NOTE(foo)	1 of 4 - this is the first line of note 1
-	- note 1 continues on this 2nd line
-	- note 1 continues on this 3rd line
-
-NOTE(foo)	2 of 4
-
-NOTE(bar)	3 of 4
-
-NOTE(bar)	4 of 4
-	- this is the last line of note 4
-
-NOTE(bam)	This note which contains a (parenthesized) subphrase
-	 must appear in its entirety.
-
-NOTE(xxx)	The ':' after the marker and uid is optional.
-
-
-SECBUGS
-SECBUG(uid)	sec hole 0
-	need to fix asap
-
-
-TODOS
-TODO(uid)	todo0
-
-TODO(uid)	todo1
-
diff --git a/internal/backport/go/doc/testdata/a0.go b/internal/backport/go/doc/testdata/a0.go
deleted file mode 100644
index 2420c8a..0000000
--- a/internal/backport/go/doc/testdata/a0.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// comment 0
-package a
-
-//BUG(uid): bug0
-
-//TODO(uid): todo0
-
-// A note with some spaces after it, should be ignored (watch out for
-// emacs modes that remove trailing whitespace).
-//NOTE(uid):
-
-// SECBUG(uid): sec hole 0
-// need to fix asap
-
-// Multiple notes may be in the same comment group and should be
-// recognized individually. Notes may start in the middle of a
-// comment group as long as they start at the beginning of an
-// individual comment.
-//
-// NOTE(foo): 1 of 4 - this is the first line of note 1
-// - note 1 continues on this 2nd line
-// - note 1 continues on this 3rd line
-// NOTE(foo): 2 of 4
-// NOTE(bar): 3 of 4
-/* NOTE(bar): 4 of 4 */
-// - this is the last line of note 4
-//
-//
-
-// NOTE(bam): This note which contains a (parenthesized) subphrase
-//            must appear in its entirety.
-
-// NOTE(xxx) The ':' after the marker and uid is optional.
-
-// NOTE(): NO uid - should not show up.
-// NOTE()  NO uid - should not show up.
diff --git a/internal/backport/go/doc/testdata/a1.go b/internal/backport/go/doc/testdata/a1.go
deleted file mode 100644
index 9fad1e0..0000000
--- a/internal/backport/go/doc/testdata/a1.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// comment 1
-package a
-
-//BUG(uid): bug1
-
-//TODO(uid): todo1
-
-//TODO(): ignored
diff --git a/internal/backport/go/doc/testdata/b.0.golden b/internal/backport/go/doc/testdata/b.0.golden
deleted file mode 100644
index c06246a..0000000
--- a/internal/backport/go/doc/testdata/b.0.golden
+++ /dev/null
@@ -1,74 +0,0 @@
-// 
-PACKAGE b
-
-IMPORTPATH
-	testdata/b
-
-IMPORTS
-	a
-
-FILENAMES
-	testdata/b.go
-
-CONSTANTS
-	// 
-	const (
-		C1	notExported	= iota
-		C2
-	
-		C4
-		C5
-	)
-
-	// 
-	const C notExported = 0
-
-	// 
-	const Pi = 3.14	// Pi
-
-
-VARIABLES
-	// 
-	var (
-		U1, U2, U4, U5	notExported
-	
-		U7	notExported	= 7
-	)
-
-	// 
-	var MaxInt int	// MaxInt
-
-	// 
-	var V notExported
-
-	// 
-	var V1, V2, V4, V5 notExported
-
-
-FUNCTIONS
-	// Associated with comparable type if AllDecls is set. 
-	func ComparableFactory() comparable
-
-	// 
-	func F(x int) int
-
-	// 
-	func F1() notExported
-
-	// Always under the package functions list. 
-	func NotAFactory() int
-
-	// Associated with uint type if AllDecls is set. 
-	func UintFactory() uint
-
-
-TYPES
-	// 
-	type T struct{}	// T
-
-	// 
-	var V T	// v
-
-	// 
-	func (x *T) M()
-
diff --git a/internal/backport/go/doc/testdata/b.1.golden b/internal/backport/go/doc/testdata/b.1.golden
deleted file mode 100644
index 2b62c34..0000000
--- a/internal/backport/go/doc/testdata/b.1.golden
+++ /dev/null
@@ -1,89 +0,0 @@
-// 
-PACKAGE b
-
-IMPORTPATH
-	testdata/b
-
-IMPORTS
-	a
-
-FILENAMES
-	testdata/b.go
-
-CONSTANTS
-	// 
-	const Pi = 3.14	// Pi
-
-
-VARIABLES
-	// 
-	var MaxInt int	// MaxInt
-
-
-FUNCTIONS
-	// 
-	func F(x int) int
-
-	// Always under the package functions list. 
-	func NotAFactory() int
-
-
-TYPES
-	// 
-	type T struct{}	// T
-
-	// 
-	var V T	// v
-
-	// 
-	func (x *T) M()
-
-	// Should only appear if AllDecls is set. 
-	type comparable struct{}	// overrides a predeclared type comparable
-
-	// Associated with comparable type if AllDecls is set. 
-	func ComparableFactory() comparable
-
-	// 
-	type notExported int
-
-	// 
-	const (
-		C1	notExported	= iota
-		C2
-		c3
-		C4
-		C5
-	)
-
-	// 
-	const C notExported = 0
-
-	// 
-	var (
-		U1, U2, u3, U4, U5	notExported
-		u6			notExported
-		U7			notExported	= 7
-	)
-
-	// 
-	var V notExported
-
-	// 
-	var V1, V2, v3, V4, V5 notExported
-
-	// 
-	func F1() notExported
-
-	// 
-	func f2() notExported
-
-	// Should only appear if AllDecls is set. 
-	type uint struct{}	// overrides a predeclared type uint
-
-	// Associated with uint type if AllDecls is set. 
-	func UintFactory() uint
-
-	// Associated with uint type if AllDecls is set. 
-	func uintFactory() uint
-
diff --git a/internal/backport/go/doc/testdata/b.2.golden b/internal/backport/go/doc/testdata/b.2.golden
deleted file mode 100644
index c06246a..0000000
--- a/internal/backport/go/doc/testdata/b.2.golden
+++ /dev/null
@@ -1,74 +0,0 @@
-// 
-PACKAGE b
-
-IMPORTPATH
-	testdata/b
-
-IMPORTS
-	a
-
-FILENAMES
-	testdata/b.go
-
-CONSTANTS
-	// 
-	const (
-		C1	notExported	= iota
-		C2
-	
-		C4
-		C5
-	)
-
-	// 
-	const C notExported = 0
-
-	// 
-	const Pi = 3.14	// Pi
-
-
-VARIABLES
-	// 
-	var (
-		U1, U2, U4, U5	notExported
-	
-		U7	notExported	= 7
-	)
-
-	// 
-	var MaxInt int	// MaxInt
-
-	// 
-	var V notExported
-
-	// 
-	var V1, V2, V4, V5 notExported
-
-
-FUNCTIONS
-	// Associated with comparable type if AllDecls is set. 
-	func ComparableFactory() comparable
-
-	// 
-	func F(x int) int
-
-	// 
-	func F1() notExported
-
-	// Always under the package functions list. 
-	func NotAFactory() int
-
-	// Associated with uint type if AllDecls is set. 
-	func UintFactory() uint
-
-
-TYPES
-	// 
-	type T struct{}	// T
-
-	// 
-	var V T	// v
-
-	// 
-	func (x *T) M()
-
diff --git a/internal/backport/go/doc/testdata/b.go b/internal/backport/go/doc/testdata/b.go
deleted file mode 100644
index 61b512b..0000000
--- a/internal/backport/go/doc/testdata/b.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package b
-
-import "a"
-
-// ----------------------------------------------------------------------------
-// Basic declarations
-
-const Pi = 3.14   // Pi
-var MaxInt int    // MaxInt
-type T struct{}   // T
-var V T           // v
-func F(x int) int {} // F
-func (x *T) M()   {} // M
-
-// Corner cases: association with (presumed) predeclared types
-
-// Always under the package functions list.
-func NotAFactory() int {}
-
-// Associated with uint type if AllDecls is set.
-func UintFactory() uint {}
-
-// Associated with uint type if AllDecls is set.
-func uintFactory() uint {}
-
-// Associated with comparable type if AllDecls is set.
-func ComparableFactory() comparable {}
-
-// Should only appear if AllDecls is set.
-type uint struct{} // overrides a predeclared type uint
-
-// Should only appear if AllDecls is set.
-type comparable struct{} // overrides a predeclared type comparable
-
-// ----------------------------------------------------------------------------
-// Exported declarations associated with non-exported types must always be shown.
-
-type notExported int
-
-const C notExported = 0
-
-const (
-	C1 notExported = iota
-	C2
-	c3
-	C4
-	C5
-)
-
-var V notExported
-var V1, V2, v3, V4, V5 notExported
-
-var (
-	U1, U2, u3, U4, U5 notExported
-	u6                 notExported
-	U7                 notExported = 7
-)
-
-func F1() notExported {}
-func f2() notExported {}
diff --git a/internal/backport/go/doc/testdata/benchmark.go b/internal/backport/go/doc/testdata/benchmark.go
deleted file mode 100644
index 61b5cf1..0000000
--- a/internal/backport/go/doc/testdata/benchmark.go
+++ /dev/null
@@ -1,295 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package testing
-
-import (
-	"flag"
-	"fmt"
-	"os"
-	"runtime"
-	"time"
-)
-
-var matchBenchmarks = flag.String("test.bench", "", "regular expression to select benchmarks to run")
-var benchTime = flag.Duration("test.benchtime", 1*time.Second, "approximate run time for each benchmark")
-
-// An internal type but exported because it is cross-package; part of the implementation
-// of go test.
-type InternalBenchmark struct {
-	Name string
-	F    func(b *B)
-}
-
-// B is a type passed to Benchmark functions to manage benchmark
-// timing and to specify the number of iterations to run.
-type B struct {
-	common
-	N         int
-	benchmark InternalBenchmark
-	bytes     int64
-	timerOn   bool
-	result    BenchmarkResult
-}
-
-// StartTimer starts timing a test. This function is called automatically
-// before a benchmark starts, but it can also used to resume timing after
-// a call to StopTimer.
-func (b *B) StartTimer() {
-	if !b.timerOn {
-		b.start = time.Now()
-		b.timerOn = true
-	}
-}
-
-// StopTimer stops timing a test. This can be used to pause the timer
-// while performing complex initialization that you don't
-// want to measure.
-func (b *B) StopTimer() {
-	if b.timerOn {
-		b.duration += time.Now().Sub(b.start)
-		b.timerOn = false
-	}
-}
-
-// ResetTimer sets the elapsed benchmark time to zero.
-// It does not affect whether the timer is running.
-func (b *B) ResetTimer() {
-	if b.timerOn {
-		b.start = time.Now()
-	}
-	b.duration = 0
-}
-
-// SetBytes records the number of bytes processed in a single operation.
-// If this is called, the benchmark will report ns/op and MB/s.
-func (b *B) SetBytes(n int64) { b.bytes = n }
-
-func (b *B) nsPerOp() int64 {
-	if b.N <= 0 {
-		return 0
-	}
-	return b.duration.Nanoseconds() / int64(b.N)
-}
-
-// runN runs a single benchmark for the specified number of iterations.
-func (b *B) runN(n int) {
-	// Try to get a comparable environment for each run
-	// by clearing garbage from previous runs.
-	runtime.GC()
-	b.N = n
-	b.ResetTimer()
-	b.StartTimer()
-	b.benchmark.F(b)
-	b.StopTimer()
-}
-
-func min(x, y int) int {
-	if x > y {
-		return y
-	}
-	return x
-}
-
-func max(x, y int) int {
-	if x < y {
-		return y
-	}
-	return x
-}
-
-// roundDown10 rounds a number down to the nearest power of 10.
-func roundDown10(n int) int {
-	var tens = 0
-	// tens = floor(log_10(n))
-	for n > 10 {
-		n = n / 10
-		tens++
-	}
-	// result = 10^tens
-	result := 1
-	for i := 0; i < tens; i++ {
-		result *= 10
-	}
-	return result
-}
-
-// roundUp rounds x up to a number of the form [1eX, 2eX, 5eX].
-func roundUp(n int) int {
-	base := roundDown10(n)
-	if n < (2 * base) {
-		return 2 * base
-	}
-	if n < (5 * base) {
-		return 5 * base
-	}
-	return 10 * base
-}
-
-// run times the benchmark function in a separate goroutine.
-func (b *B) run() BenchmarkResult {
-	go b.launch()
-	<-b.signal
-	return b.result
-}
-
-// launch launches the benchmark function. It gradually increases the number
-// of benchmark iterations until the benchmark runs for a second in order
-// to get a reasonable measurement. It prints timing information in this form
-//
-//	testing.BenchmarkHello	100000		19 ns/op
-//
-// launch is run by the fun function as a separate goroutine.
-func (b *B) launch() {
-	// Run the benchmark for a single iteration in case it's expensive.
-	n := 1
-
-	// Signal that we're done whether we return normally
-	// or by FailNow's runtime.Goexit.
-	defer func() {
-		b.signal <- b
-	}()
-
-	b.runN(n)
-	// Run the benchmark for at least the specified amount of time.
-	d := *benchTime
-	for !b.failed && b.duration < d && n < 1e9 {
-		last := n
-		// Predict iterations/sec.
-		if b.nsPerOp() == 0 {
-			n = 1e9
-		} else {
-			n = int(d.Nanoseconds() / b.nsPerOp())
-		}
-		// Run more iterations than we think we'll need for a second (1.5x).
-		// Don't grow too fast in case we had timing errors previously.
-		// Be sure to run at least one more than last time.
-		n = max(min(n+n/2, 100*last), last+1)
-		// Round up to something easy to read.
-		n = roundUp(n)
-		b.runN(n)
-	}
-	b.result = BenchmarkResult{b.N, b.duration, b.bytes}
-}
-
-// The results of a benchmark run.
-type BenchmarkResult struct {
-	N     int           // The number of iterations.
-	T     time.Duration // The total time taken.
-	Bytes int64         // Bytes processed in one iteration.
-}
-
-func (r BenchmarkResult) NsPerOp() int64 {
-	if r.N <= 0 {
-		return 0
-	}
-	return r.T.Nanoseconds() / int64(r.N)
-}
-
-func (r BenchmarkResult) mbPerSec() float64 {
-	if r.Bytes <= 0 || r.T <= 0 || r.N <= 0 {
-		return 0
-	}
-	return (float64(r.Bytes) * float64(r.N) / 1e6) / r.T.Seconds()
-}
-
-func (r BenchmarkResult) String() string {
-	mbs := r.mbPerSec()
-	mb := ""
-	if mbs != 0 {
-		mb = fmt.Sprintf("\t%7.2f MB/s", mbs)
-	}
-	nsop := r.NsPerOp()
-	ns := fmt.Sprintf("%10d ns/op", nsop)
-	if r.N > 0 && nsop < 100 {
-		// The format specifiers here make sure that
-		// the ones digits line up for all three possible formats.
-		if nsop < 10 {
-			ns = fmt.Sprintf("%13.2f ns/op", float64(r.T.Nanoseconds())/float64(r.N))
-		} else {
-			ns = fmt.Sprintf("%12.1f ns/op", float64(r.T.Nanoseconds())/float64(r.N))
-		}
-	}
-	return fmt.Sprintf("%8d\t%s%s", r.N, ns, mb)
-}
-
-// An internal function but exported because it is cross-package; part of the implementation
-// of go test.
-func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) {
-	// If no flag was specified, don't run benchmarks.
-	if len(*matchBenchmarks) == 0 {
-		return
-	}
-	for _, Benchmark := range benchmarks {
-		matched, err := matchString(*matchBenchmarks, Benchmark.Name)
-		if err != nil {
-			fmt.Fprintf(os.Stderr, "testing: invalid regexp for -test.bench: %s\n", err)
-			os.Exit(1)
-		}
-		if !matched {
-			continue
-		}
-		for _, procs := range cpuList {
-			runtime.GOMAXPROCS(procs)
-			b := &B{
-				common: common{
-					signal: make(chan interface{}),
-				},
-				benchmark: Benchmark,
-			}
-			benchName := Benchmark.Name
-			if procs != 1 {
-				benchName = fmt.Sprintf("%s-%d", Benchmark.Name, procs)
-			}
-			fmt.Printf("%s\t", benchName)
-			r := b.run()
-			if b.failed {
-				// The output could be very long here, but probably isn't.
-				// We print it all, regardless, because we don't want to trim the reason
-				// the benchmark failed.
-				fmt.Printf("--- FAIL: %s\n%s", benchName, b.output)
-				continue
-			}
-			fmt.Printf("%v\n", r)
-			// Unlike with tests, we ignore the -chatty flag and always print output for
-			// benchmarks since the output generation time will skew the results.
-			if len(b.output) > 0 {
-				b.trimOutput()
-				fmt.Printf("--- BENCH: %s\n%s", benchName, b.output)
-			}
-			if p := runtime.GOMAXPROCS(-1); p != procs {
-				fmt.Fprintf(os.Stderr, "testing: %s left GOMAXPROCS set to %d\n", benchName, p)
-			}
-		}
-	}
-}
-
-// trimOutput shortens the output from a benchmark, which can be very long.
-func (b *B) trimOutput() {
-	// The output is likely to appear multiple times because the benchmark
-	// is run multiple times, but at least it will be seen. This is not a big deal
-	// because benchmarks rarely print, but just in case, we trim it if it's too long.
-	const maxNewlines = 10
-	for nlCount, j := 0, 0; j < len(b.output); j++ {
-		if b.output[j] == '\n' {
-			nlCount++
-			if nlCount >= maxNewlines {
-				b.output = append(b.output[:j], "\n\t... [output truncated]\n"...)
-				break
-			}
-		}
-	}
-}
-
-// Benchmark benchmarks a single function. Useful for creating
-// custom benchmarks that do not use go test.
-func Benchmark(f func(b *B)) BenchmarkResult {
-	b := &B{
-		common: common{
-			signal: make(chan interface{}),
-		},
-		benchmark: InternalBenchmark{"", f},
-	}
-	return b.run()
-}
diff --git a/internal/backport/go/doc/testdata/blank.0.golden b/internal/backport/go/doc/testdata/blank.0.golden
deleted file mode 100644
index 70f2929..0000000
--- a/internal/backport/go/doc/testdata/blank.0.golden
+++ /dev/null
@@ -1,62 +0,0 @@
-// Package blank is a go/doc test for the handling of _. See issue ...
-PACKAGE blank
-
-IMPORTPATH
-	testdata/blank
-
-IMPORTS
-	os
-
-FILENAMES
-	testdata/blank.go
-
-CONSTANTS
-	// T constants counting from unexported constants. 
-	const (
-		C1	T
-		C2
-	
-		C3
-	
-		C4	int
-	)
-
-	// Constants with a single type that is not propagated. 
-	const (
-		Default		= 0644
-		Useless		= 0312
-		WideOpen	= 0777
-	)
-
-	// Constants with an imported type that is propagated. 
-	const (
-		M1	os.FileMode
-		M2
-		M3
-	)
-
-	// Package constants. 
-	const (
-		I1	int
-		I2
-	)
-
-
-TYPES
-	// S has a padding field. 
-	type S struct {
-		H	uint32
-	
-		A	uint8
-		// contains filtered or unexported fields
-	}
-
-	// 
-	type T int
-
-	// T constants counting from a blank constant. 
-	const (
-		T1	T
-		T2
-	)
-
diff --git a/internal/backport/go/doc/testdata/blank.1.golden b/internal/backport/go/doc/testdata/blank.1.golden
deleted file mode 100644
index 8098cb6..0000000
--- a/internal/backport/go/doc/testdata/blank.1.golden
+++ /dev/null
@@ -1,83 +0,0 @@
-// Package blank is a go/doc test for the handling of _. See issue ...
-PACKAGE blank
-
-IMPORTPATH
-	testdata/blank
-
-IMPORTS
-	os
-
-FILENAMES
-	testdata/blank.go
-
-CONSTANTS
-	// T constants counting from unexported constants. 
-	const (
-		tweedledee	T	= iota
-		tweedledum
-		C1
-		C2
-		alice
-		C3
-		redQueen	int	= iota
-		C4
-	)
-
-	// Constants with a single type that is not propagated. 
-	const (
-		zero		os.FileMode	= 0
-		Default				= 0644
-		Useless				= 0312
-		WideOpen			= 0777
-	)
-
-	// Constants with an imported type that is propagated. 
-	const (
-		zero	os.FileMode	= 0
-		M1
-		M2
-		M3
-	)
-
-	// Package constants. 
-	const (
-		_	int	= iota
-		I1
-		I2
-	)
-
-	// Unexported constants counting from blank iota. See issue 9615. 
-	const (
-		_	= iota
-		one	= iota + 1
-	)
-
-
-VARIABLES
-	// 
-	var _ = T(55)
-
-
-FUNCTIONS
-	// 
-	func _()
-
-
-TYPES
-	// S has a padding field. 
-	type S struct {
-		H	uint32
-		_	uint8
-		A	uint8
-	}
-
-	// 
-	type T int
-
-	// T constants counting from a blank constant. 
-	const (
-		_	T	= iota
-		T1
-		T2
-	)
-
diff --git a/internal/backport/go/doc/testdata/blank.2.golden b/internal/backport/go/doc/testdata/blank.2.golden
deleted file mode 100644
index 70f2929..0000000
--- a/internal/backport/go/doc/testdata/blank.2.golden
+++ /dev/null
@@ -1,62 +0,0 @@
-// Package blank is a go/doc test for the handling of _. See issue ...
-PACKAGE blank
-
-IMPORTPATH
-	testdata/blank
-
-IMPORTS
-	os
-
-FILENAMES
-	testdata/blank.go
-
-CONSTANTS
-	// T constants counting from unexported constants. 
-	const (
-		C1	T
-		C2
-	
-		C3
-	
-		C4	int
-	)
-
-	// Constants with a single type that is not propagated. 
-	const (
-		Default		= 0644
-		Useless		= 0312
-		WideOpen	= 0777
-	)
-
-	// Constants with an imported type that is propagated. 
-	const (
-		M1	os.FileMode
-		M2
-		M3
-	)
-
-	// Package constants. 
-	const (
-		I1	int
-		I2
-	)
-
-
-TYPES
-	// S has a padding field. 
-	type S struct {
-		H	uint32
-	
-		A	uint8
-		// contains filtered or unexported fields
-	}
-
-	// 
-	type T int
-
-	// T constants counting from a blank constant. 
-	const (
-		T1	T
-		T2
-	)
-
diff --git a/internal/backport/go/doc/testdata/blank.go b/internal/backport/go/doc/testdata/blank.go
deleted file mode 100644
index 5ea6186..0000000
--- a/internal/backport/go/doc/testdata/blank.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package blank is a go/doc test for the handling of _.
-// See issue 5397.
-package blank
-
-import "os"
-
-type T int
-
-// T constants counting from a blank constant.
-const (
-	_ T = iota
-	T1
-	T2
-)
-
-// T constants counting from unexported constants.
-const (
-	tweedledee T = iota
-	tweedledum
-	C1
-	C2
-	alice
-	C3
-	redQueen int = iota
-	C4
-)
-
-// Constants with a single type that is not propagated.
-const (
-	zero     os.FileMode = 0
-	Default              = 0644
-	Useless              = 0312
-	WideOpen             = 0777
-)
-
-// Constants with an imported type that is propagated.
-const (
-	zero os.FileMode = 0
-	M1
-	M2
-	M3
-)
-
-// Package constants.
-const (
-	_ int = iota
-	I1
-	I2
-)
-
-// Unexported constants counting from blank iota.
-// See issue 9615.
-const (
-	_   = iota
-	one = iota + 1
-)
-
-// Blanks not in doc output:
-
-// S has a padding field.
-type S struct {
-	H uint32
-	_ uint8
-	A uint8
-}
-
-func _() {}
-
-type _ T
-
-var _ = T(55)
diff --git a/internal/backport/go/doc/testdata/bugpara.0.golden b/internal/backport/go/doc/testdata/bugpara.0.golden
deleted file mode 100644
index 5804859..0000000
--- a/internal/backport/go/doc/testdata/bugpara.0.golden
+++ /dev/null
@@ -1,20 +0,0 @@
-// 
-PACKAGE bugpara
-
-IMPORTPATH
-	testdata/bugpara
-
-FILENAMES
-	testdata/bugpara.go
-
-BUGS .Bugs is now deprecated, please use .Notes instead
-	Sometimes bugs have multiple paragraphs.
-	
-	Like this one.
-
-
-BUGS
-BUG(rsc)	Sometimes bugs have multiple paragraphs.
-	
-	Like this one.
-
diff --git a/internal/backport/go/doc/testdata/bugpara.1.golden b/internal/backport/go/doc/testdata/bugpara.1.golden
deleted file mode 100644
index 5804859..0000000
--- a/internal/backport/go/doc/testdata/bugpara.1.golden
+++ /dev/null
@@ -1,20 +0,0 @@
-// 
-PACKAGE bugpara
-
-IMPORTPATH
-	testdata/bugpara
-
-FILENAMES
-	testdata/bugpara.go
-
-BUGS .Bugs is now deprecated, please use .Notes instead
-	Sometimes bugs have multiple paragraphs.
-	
-	Like this one.
-
-
-BUGS
-BUG(rsc)	Sometimes bugs have multiple paragraphs.
-	
-	Like this one.
-
diff --git a/internal/backport/go/doc/testdata/bugpara.2.golden b/internal/backport/go/doc/testdata/bugpara.2.golden
deleted file mode 100644
index 5804859..0000000
--- a/internal/backport/go/doc/testdata/bugpara.2.golden
+++ /dev/null
@@ -1,20 +0,0 @@
-// 
-PACKAGE bugpara
-
-IMPORTPATH
-	testdata/bugpara
-
-FILENAMES
-	testdata/bugpara.go
-
-BUGS .Bugs is now deprecated, please use .Notes instead
-	Sometimes bugs have multiple paragraphs.
-	
-	Like this one.
-
-
-BUGS
-BUG(rsc)	Sometimes bugs have multiple paragraphs.
-	
-	Like this one.
-
diff --git a/internal/backport/go/doc/testdata/bugpara.go b/internal/backport/go/doc/testdata/bugpara.go
deleted file mode 100644
index 0360a6f..0000000
--- a/internal/backport/go/doc/testdata/bugpara.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package bugpara
-
-// BUG(rsc): Sometimes bugs have multiple paragraphs.
-//
-// Like this one.
diff --git a/internal/backport/go/doc/testdata/c.0.golden b/internal/backport/go/doc/testdata/c.0.golden
deleted file mode 100644
index e21959b..0000000
--- a/internal/backport/go/doc/testdata/c.0.golden
+++ /dev/null
@@ -1,48 +0,0 @@
-// 
-PACKAGE c
-
-IMPORTPATH
-	testdata/c
-
-IMPORTS
-	a
-
-FILENAMES
-	testdata/c.go
-
-TYPES
-	// A (should see this) 
-	type A struct{}
-
-	// B (should see this) 
-	type B struct{}
-
-	// C (should see this) 
-	type C struct{}
-
-	// D (should see this) 
-	type D struct{}
-
-	// E1 (should see this) 
-	type E1 struct{}
-
-	// E (should see this for E2 and E3) 
-	type E2 struct{}
-
-	// E (should see this for E2 and E3) 
-	type E3 struct{}
-
-	// E4 (should see this) 
-	type E4 struct{}
-
-	// 
-	type T1 struct{}
-
-	// 
-	func (t1 *T1) M()
-
-	// T2 must not show methods of local T1 
-	type T2 struct {
-		a.T1	// not the same as locally declared T1
-	}
-
diff --git a/internal/backport/go/doc/testdata/c.1.golden b/internal/backport/go/doc/testdata/c.1.golden
deleted file mode 100644
index e21959b..0000000
--- a/internal/backport/go/doc/testdata/c.1.golden
+++ /dev/null
@@ -1,48 +0,0 @@
-// 
-PACKAGE c
-
-IMPORTPATH
-	testdata/c
-
-IMPORTS
-	a
-
-FILENAMES
-	testdata/c.go
-
-TYPES
-	// A (should see this) 
-	type A struct{}
-
-	// B (should see this) 
-	type B struct{}
-
-	// C (should see this) 
-	type C struct{}
-
-	// D (should see this) 
-	type D struct{}
-
-	// E1 (should see this) 
-	type E1 struct{}
-
-	// E (should see this for E2 and E3) 
-	type E2 struct{}
-
-	// E (should see this for E2 and E3) 
-	type E3 struct{}
-
-	// E4 (should see this) 
-	type E4 struct{}
-
-	// 
-	type T1 struct{}
-
-	// 
-	func (t1 *T1) M()
-
-	// T2 must not show methods of local T1 
-	type T2 struct {
-		a.T1	// not the same as locally declared T1
-	}
-
diff --git a/internal/backport/go/doc/testdata/c.2.golden b/internal/backport/go/doc/testdata/c.2.golden
deleted file mode 100644
index e21959b..0000000
--- a/internal/backport/go/doc/testdata/c.2.golden
+++ /dev/null
@@ -1,48 +0,0 @@
-// 
-PACKAGE c
-
-IMPORTPATH
-	testdata/c
-
-IMPORTS
-	a
-
-FILENAMES
-	testdata/c.go
-
-TYPES
-	// A (should see this) 
-	type A struct{}
-
-	// B (should see this) 
-	type B struct{}
-
-	// C (should see this) 
-	type C struct{}
-
-	// D (should see this) 
-	type D struct{}
-
-	// E1 (should see this) 
-	type E1 struct{}
-
-	// E (should see this for E2 and E3) 
-	type E2 struct{}
-
-	// E (should see this for E2 and E3) 
-	type E3 struct{}
-
-	// E4 (should see this) 
-	type E4 struct{}
-
-	// 
-	type T1 struct{}
-
-	// 
-	func (t1 *T1) M()
-
-	// T2 must not show methods of local T1 
-	type T2 struct {
-		a.T1	// not the same as locally declared T1
-	}
-
diff --git a/internal/backport/go/doc/testdata/c.go b/internal/backport/go/doc/testdata/c.go
deleted file mode 100644
index e0f3919..0000000
--- a/internal/backport/go/doc/testdata/c.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package c
-
-import "a"
-
-// ----------------------------------------------------------------------------
-// Test that empty declarations don't cause problems
-
-const ()
-
-type ()
-
-var ()
-
-// ----------------------------------------------------------------------------
-// Test that types with documentation on both, the Decl and the Spec node
-// are handled correctly.
-
-// A (should see this)
-type A struct{}
-
-// B (should see this)
-type (
-	B struct{}
-)
-
-type (
-	// C (should see this)
-	C struct{}
-)
-
-// D (should not see this)
-type (
-	// D (should see this)
-	D struct{}
-)
-
-// E (should see this for E2 and E3)
-type (
-	// E1 (should see this)
-	E1 struct{}
-	E2 struct{}
-	E3 struct{}
-	// E4 (should see this)
-	E4 struct{}
-)
-
-// ----------------------------------------------------------------------------
-// Test that local and imported types are different when
-// handling anonymous fields.
-
-type T1 struct{}
-
-func (t1 *T1) M() {}
-
-// T2 must not show methods of local T1
-type T2 struct {
-	a.T1 // not the same as locally declared T1
-}
diff --git a/internal/backport/go/doc/testdata/d.0.golden b/internal/backport/go/doc/testdata/d.0.golden
deleted file mode 100644
index c005199..0000000
--- a/internal/backport/go/doc/testdata/d.0.golden
+++ /dev/null
@@ -1,104 +0,0 @@
-// 
-PACKAGE d
-
-IMPORTPATH
-	testdata/d
-
-FILENAMES
-	testdata/d1.go
-	testdata/d2.go
-
-CONSTANTS
-	// CBx constants should appear before CAx constants. 
-	const (
-		CB2	= iota	// before CB1
-		CB1		// before CB0
-		CB0		// at end
-	)
-
-	// CAx constants should appear after CBx constants. 
-	const (
-		CA2	= iota	// before CA1
-		CA1		// before CA0
-		CA0		// at end
-	)
-
-	// C0 should be first. 
-	const C0 = 0
-
-	// C1 should be second. 
-	const C1 = 1
-
-	// C2 should be third. 
-	const C2 = 2
-
-	// 
-	const (
-		// Single const declarations inside ()'s are considered ungrouped
-		// and show up in sorted order.
-		Cungrouped = 0
-	)
-
-
-VARIABLES
-	// VBx variables should appear before VAx variables. 
-	var (
-		VB2	int	// before VB1
-		VB1	int	// before VB0
-		VB0	int	// at end
-	)
-
-	// VAx variables should appear after VBx variables. 
-	var (
-		VA2	int	// before VA1
-		VA1	int	// before VA0
-		VA0	int	// at end
-	)
-
-	// V0 should be first. 
-	var V0 uintptr
-
-	// V1 should be second. 
-	var V1 uint
-
-	// V2 should be third. 
-	var V2 int
-
-	// 
-	var (
-		// Single var declarations inside ()'s are considered ungrouped
-		// and show up in sorted order.
-		Vungrouped = 0
-	)
-
-
-FUNCTIONS
-	// F0 should be first. 
-	func F0()
-
-	// F1 should be second. 
-	func F1()
-
-	// F2 should be third. 
-	func F2()
-
-
-TYPES
-	// T0 should be first. 
-	type T0 struct{}
-
-	// T1 should be second. 
-	type T1 struct{}
-
-	// T2 should be third. 
-	type T2 struct{}
-
-	// TG0 should be first. 
-	type TG0 struct{}
-
-	// TG1 should be second. 
-	type TG1 struct{}
-
-	// TG2 should be third. 
-	type TG2 struct{}
-
diff --git a/internal/backport/go/doc/testdata/d.1.golden b/internal/backport/go/doc/testdata/d.1.golden
deleted file mode 100644
index c005199..0000000
--- a/internal/backport/go/doc/testdata/d.1.golden
+++ /dev/null
@@ -1,104 +0,0 @@
-// 
-PACKAGE d
-
-IMPORTPATH
-	testdata/d
-
-FILENAMES
-	testdata/d1.go
-	testdata/d2.go
-
-CONSTANTS
-	// CBx constants should appear before CAx constants. 
-	const (
-		CB2	= iota	// before CB1
-		CB1		// before CB0
-		CB0		// at end
-	)
-
-	// CAx constants should appear after CBx constants. 
-	const (
-		CA2	= iota	// before CA1
-		CA1		// before CA0
-		CA0		// at end
-	)
-
-	// C0 should be first. 
-	const C0 = 0
-
-	// C1 should be second. 
-	const C1 = 1
-
-	// C2 should be third. 
-	const C2 = 2
-
-	// 
-	const (
-		// Single const declarations inside ()'s are considered ungrouped
-		// and show up in sorted order.
-		Cungrouped = 0
-	)
-
-
-VARIABLES
-	// VBx variables should appear before VAx variables. 
-	var (
-		VB2	int	// before VB1
-		VB1	int	// before VB0
-		VB0	int	// at end
-	)
-
-	// VAx variables should appear after VBx variables. 
-	var (
-		VA2	int	// before VA1
-		VA1	int	// before VA0
-		VA0	int	// at end
-	)
-
-	// V0 should be first. 
-	var V0 uintptr
-
-	// V1 should be second. 
-	var V1 uint
-
-	// V2 should be third. 
-	var V2 int
-
-	// 
-	var (
-		// Single var declarations inside ()'s are considered ungrouped
-		// and show up in sorted order.
-		Vungrouped = 0
-	)
-
-
-FUNCTIONS
-	// F0 should be first. 
-	func F0()
-
-	// F1 should be second. 
-	func F1()
-
-	// F2 should be third. 
-	func F2()
-
-
-TYPES
-	// T0 should be first. 
-	type T0 struct{}
-
-	// T1 should be second. 
-	type T1 struct{}
-
-	// T2 should be third. 
-	type T2 struct{}
-
-	// TG0 should be first. 
-	type TG0 struct{}
-
-	// TG1 should be second. 
-	type TG1 struct{}
-
-	// TG2 should be third. 
-	type TG2 struct{}
-
diff --git a/internal/backport/go/doc/testdata/d.2.golden b/internal/backport/go/doc/testdata/d.2.golden
deleted file mode 100644
index c005199..0000000
--- a/internal/backport/go/doc/testdata/d.2.golden
+++ /dev/null
@@ -1,104 +0,0 @@
-// 
-PACKAGE d
-
-IMPORTPATH
-	testdata/d
-
-FILENAMES
-	testdata/d1.go
-	testdata/d2.go
-
-CONSTANTS
-	// CBx constants should appear before CAx constants. 
-	const (
-		CB2	= iota	// before CB1
-		CB1		// before CB0
-		CB0		// at end
-	)
-
-	// CAx constants should appear after CBx constants. 
-	const (
-		CA2	= iota	// before CA1
-		CA1		// before CA0
-		CA0		// at end
-	)
-
-	// C0 should be first. 
-	const C0 = 0
-
-	// C1 should be second. 
-	const C1 = 1
-
-	// C2 should be third. 
-	const C2 = 2
-
-	// 
-	const (
-		// Single const declarations inside ()'s are considered ungrouped
-		// and show up in sorted order.
-		Cungrouped = 0
-	)
-
-
-VARIABLES
-	// VBx variables should appear before VAx variables. 
-	var (
-		VB2	int	// before VB1
-		VB1	int	// before VB0
-		VB0	int	// at end
-	)
-
-	// VAx variables should appear after VBx variables. 
-	var (
-		VA2	int	// before VA1
-		VA1	int	// before VA0
-		VA0	int	// at end
-	)
-
-	// V0 should be first. 
-	var V0 uintptr
-
-	// V1 should be second. 
-	var V1 uint
-
-	// V2 should be third. 
-	var V2 int
-
-	// 
-	var (
-		// Single var declarations inside ()'s are considered ungrouped
-		// and show up in sorted order.
-		Vungrouped = 0
-	)
-
-
-FUNCTIONS
-	// F0 should be first. 
-	func F0()
-
-	// F1 should be second. 
-	func F1()
-
-	// F2 should be third. 
-	func F2()
-
-
-TYPES
-	// T0 should be first. 
-	type T0 struct{}
-
-	// T1 should be second. 
-	type T1 struct{}
-
-	// T2 should be third. 
-	type T2 struct{}
-
-	// TG0 should be first. 
-	type TG0 struct{}
-
-	// TG1 should be second. 
-	type TG1 struct{}
-
-	// TG2 should be third. 
-	type TG2 struct{}
-
diff --git a/internal/backport/go/doc/testdata/d1.go b/internal/backport/go/doc/testdata/d1.go
deleted file mode 100644
index ebd6941..0000000
--- a/internal/backport/go/doc/testdata/d1.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Test cases for sort order of declarations.
-
-package d
-
-// C2 should be third.
-const C2 = 2
-
-// V2 should be third.
-var V2 int
-
-// CBx constants should appear before CAx constants.
-const (
-	CB2 = iota // before CB1
-	CB1        // before CB0
-	CB0        // at end
-)
-
-// VBx variables should appear before VAx variables.
-var (
-	VB2 int // before VB1
-	VB1 int // before VB0
-	VB0 int // at end
-)
-
-const (
-	// Single const declarations inside ()'s are considered ungrouped
-	// and show up in sorted order.
-	Cungrouped = 0
-)
-
-var (
-	// Single var declarations inside ()'s are considered ungrouped
-	// and show up in sorted order.
-	Vungrouped = 0
-)
-
-// T2 should be third.
-type T2 struct{}
-
-// Grouped types are sorted nevertheless.
-type (
-	// TG2 should be third.
-	TG2 struct{}
-
-	// TG1 should be second.
-	TG1 struct{}
-
-	// TG0 should be first.
-	TG0 struct{}
-)
-
-// F2 should be third.
-func F2() {}
diff --git a/internal/backport/go/doc/testdata/d2.go b/internal/backport/go/doc/testdata/d2.go
deleted file mode 100644
index 2f56f4f..0000000
--- a/internal/backport/go/doc/testdata/d2.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Test cases for sort order of declarations.
-
-package d
-
-// C1 should be second.
-const C1 = 1
-
-// C0 should be first.
-const C0 = 0
-
-// V1 should be second.
-var V1 uint
-
-// V0 should be first.
-var V0 uintptr
-
-// CAx constants should appear after CBx constants.
-const (
-	CA2 = iota // before CA1
-	CA1        // before CA0
-	CA0        // at end
-)
-
-// VAx variables should appear after VBx variables.
-var (
-	VA2 int // before VA1
-	VA1 int // before VA0
-	VA0 int // at end
-)
-
-// T1 should be second.
-type T1 struct{}
-
-// T0 should be first.
-type T0 struct{}
-
-// F1 should be second.
-func F1() {}
-
-// F0 should be first.
-func F0() {}
diff --git a/internal/backport/go/doc/testdata/e.0.golden b/internal/backport/go/doc/testdata/e.0.golden
deleted file mode 100644
index 6987e58..0000000
--- a/internal/backport/go/doc/testdata/e.0.golden
+++ /dev/null
@@ -1,109 +0,0 @@
-// The package e is a go/doc test for embedded methods. 
-PACKAGE e
-
-IMPORTPATH
-	testdata/e
-
-FILENAMES
-	testdata/e.go
-
-TYPES
-	// T1 has no embedded (level 1) M method due to conflict. 
-	type T1 struct {
-		// contains filtered or unexported fields
-	}
-
-	// T2 has only M as top-level method. 
-	type T2 struct {
-		// contains filtered or unexported fields
-	}
-
-	// T2.M should appear as method of T2. 
-	func (T2) M()
-
-	// T3 has only M as top-level method. 
-	type T3 struct {
-		// contains filtered or unexported fields
-	}
-
-	// T3.M should appear as method of T3. 
-	func (T3) M()
-
-	// 
-	type T4 struct{}
-
-	// T4.M should appear as method of T5 only if AllMethods is set. 
-	func (*T4) M()
-
-	// 
-	type T5 struct {
-		T4
-	}
-
-	// 
-	type U1 struct {
-		*U1
-	}
-
-	// U1.M should appear as method of U1. 
-	func (*U1) M()
-
-	// 
-	type U2 struct {
-		*U3
-	}
-
-	// U2.M should appear as method of U2 and as method of U3 only if ...
-	func (*U2) M()
-
-	// 
-	type U3 struct {
-		*U2
-	}
-
-	// U3.N should appear as method of U3 and as method of U2 only if ...
-	func (*U3) N()
-
-	// 
-	type U4 struct {
-		// contains filtered or unexported fields
-	}
-
-	// U4.M should appear as method of U4. 
-	func (*U4) M()
-
-	// 
-	type V1 struct {
-		*V2
-		*V5
-	}
-
-	// 
-	type V2 struct {
-		*V3
-	}
-
-	// 
-	type V3 struct {
-		*V4
-	}
-
-	// 
-	type V4 struct {
-		*V5
-	}
-
-	// V4.M should appear as method of V2 and V3 if AllMethods is set. 
-	func (*V4) M()
-
-	// 
-	type V5 struct {
-		*V6
-	}
-
-	// 
-	type V6 struct{}
-
-	// V6.M should appear as method of V1 and V5 if AllMethods is set. 
-	func (*V6) M()
-
diff --git a/internal/backport/go/doc/testdata/e.1.golden b/internal/backport/go/doc/testdata/e.1.golden
deleted file mode 100644
index cbe22e0..0000000
--- a/internal/backport/go/doc/testdata/e.1.golden
+++ /dev/null
@@ -1,144 +0,0 @@
-// The package e is a go/doc test for embedded methods. 
-PACKAGE e
-
-IMPORTPATH
-	testdata/e
-
-FILENAMES
-	testdata/e.go
-
-TYPES
-	// T1 has no embedded (level 1) M method due to conflict. 
-	type T1 struct {
-		t1
-		t2
-	}
-
-	// T2 has only M as top-level method. 
-	type T2 struct {
-		t1
-	}
-
-	// T2.M should appear as method of T2. 
-	func (T2) M()
-
-	// T3 has only M as top-level method. 
-	type T3 struct {
-		t1e
-		t2e
-	}
-
-	// T3.M should appear as method of T3. 
-	func (T3) M()
-
-	// 
-	type T4 struct{}
-
-	// T4.M should appear as method of T5 only if AllMethods is set. 
-	func (*T4) M()
-
-	// 
-	type T5 struct {
-		T4
-	}
-
-	// 
-	type U1 struct {
-		*U1
-	}
-
-	// U1.M should appear as method of U1. 
-	func (*U1) M()
-
-	// 
-	type U2 struct {
-		*U3
-	}
-
-	// U2.M should appear as method of U2 and as method of U3 only if ...
-	func (*U2) M()
-
-	// 
-	type U3 struct {
-		*U2
-	}
-
-	// U3.N should appear as method of U3 and as method of U2 only if ...
-	func (*U3) N()
-
-	// 
-	type U4 struct {
-		*u5
-	}
-
-	// U4.M should appear as method of U4. 
-	func (*U4) M()
-
-	// 
-	type V1 struct {
-		*V2
-		*V5
-	}
-
-	// 
-	type V2 struct {
-		*V3
-	}
-
-	// 
-	type V3 struct {
-		*V4
-	}
-
-	// 
-	type V4 struct {
-		*V5
-	}
-
-	// V4.M should appear as method of V2 and V3 if AllMethods is set. 
-	func (*V4) M()
-
-	// 
-	type V5 struct {
-		*V6
-	}
-
-	// 
-	type V6 struct{}
-
-	// V6.M should appear as method of V1 and V5 if AllMethods is set. 
-	func (*V6) M()
-
-	// 
-	type t1 struct{}
-
-	// t1.M should not appear as method in a Tx type. 
-	func (t1) M()
-
-	// 
-	type t1e struct {
-		t1
-	}
-
-	// t1.M should not appear as method in a Tx type. 
-	func (t1e) M()
-
-	// 
-	type t2 struct{}
-
-	// t2.M should not appear as method in a Tx type. 
-	func (t2) M()
-
-	// 
-	type t2e struct {
-		t2
-	}
-
-	// t2.M should not appear as method in a Tx type. 
-	func (t2e) M()
-
-	// 
-	type u5 struct {
-		*U4
-	}
-
diff --git a/internal/backport/go/doc/testdata/e.2.golden b/internal/backport/go/doc/testdata/e.2.golden
deleted file mode 100644
index e7b05e8..0000000
--- a/internal/backport/go/doc/testdata/e.2.golden
+++ /dev/null
@@ -1,130 +0,0 @@
-// The package e is a go/doc test for embedded methods. 
-PACKAGE e
-
-IMPORTPATH
-	testdata/e
-
-FILENAMES
-	testdata/e.go
-
-TYPES
-	// T1 has no embedded (level 1) M method due to conflict. 
-	type T1 struct {
-		// contains filtered or unexported fields
-	}
-
-	// T2 has only M as top-level method. 
-	type T2 struct {
-		// contains filtered or unexported fields
-	}
-
-	// T2.M should appear as method of T2. 
-	func (T2) M()
-
-	// T3 has only M as top-level method. 
-	type T3 struct {
-		// contains filtered or unexported fields
-	}
-
-	// T3.M should appear as method of T3. 
-	func (T3) M()
-
-	// 
-	type T4 struct{}
-
-	// T4.M should appear as method of T5 only if AllMethods is set. 
-	func (*T4) M()
-
-	// 
-	type T5 struct {
-		T4
-	}
-
-	// T4.M should appear as method of T5 only if AllMethods is set. 
-	func (*T5) M()
-
-	// 
-	type U1 struct {
-		*U1
-	}
-
-	// U1.M should appear as method of U1. 
-	func (*U1) M()
-
-	// 
-	type U2 struct {
-		*U3
-	}
-
-	// U2.M should appear as method of U2 and as method of U3 only if ...
-	func (*U2) M()
-
-	// U3.N should appear as method of U3 and as method of U2 only if ...
-	func (U2) N()
-
-	// 
-	type U3 struct {
-		*U2
-	}
-
-	// U2.M should appear as method of U2 and as method of U3 only if ...
-	func (U3) M()
-
-	// U3.N should appear as method of U3 and as method of U2 only if ...
-	func (*U3) N()
-
-	// 
-	type U4 struct {
-		// contains filtered or unexported fields
-	}
-
-	// U4.M should appear as method of U4. 
-	func (*U4) M()
-
-	// 
-	type V1 struct {
-		*V2
-		*V5
-	}
-
-	// V6.M should appear as method of V1 and V5 if AllMethods is set. 
-	func (V1) M()
-
-	// 
-	type V2 struct {
-		*V3
-	}
-
-	// V4.M should appear as method of V2 and V3 if AllMethods is set. 
-	func (V2) M()
-
-	// 
-	type V3 struct {
-		*V4
-	}
-
-	// V4.M should appear as method of V2 and V3 if AllMethods is set. 
-	func (V3) M()
-
-	// 
-	type V4 struct {
-		*V5
-	}
-
-	// V4.M should appear as method of V2 and V3 if AllMethods is set. 
-	func (*V4) M()
-
-	// 
-	type V5 struct {
-		*V6
-	}
-
-	// V6.M should appear as method of V1 and V5 if AllMethods is set. 
-	func (V5) M()
-
-	// 
-	type V6 struct{}
-
-	// V6.M should appear as method of V1 and V5 if AllMethods is set. 
-	func (*V6) M()
-
diff --git a/internal/backport/go/doc/testdata/e.go b/internal/backport/go/doc/testdata/e.go
deleted file mode 100644
index ec432e3..0000000
--- a/internal/backport/go/doc/testdata/e.go
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// The package e is a go/doc test for embedded methods.
-package e
-
-// ----------------------------------------------------------------------------
-// Conflicting methods M must not show up.
-
-type t1 struct{}
-
-// t1.M should not appear as method in a Tx type.
-func (t1) M() {}
-
-type t2 struct{}
-
-// t2.M should not appear as method in a Tx type.
-func (t2) M() {}
-
-// T1 has no embedded (level 1) M method due to conflict.
-type T1 struct {
-	t1
-	t2
-}
-
-// ----------------------------------------------------------------------------
-// Higher-level method M wins over lower-level method M.
-
-// T2 has only M as top-level method.
-type T2 struct {
-	t1
-}
-
-// T2.M should appear as method of T2.
-func (T2) M() {}
-
-// ----------------------------------------------------------------------------
-// Higher-level method M wins over lower-level conflicting methods M.
-
-type t1e struct {
-	t1
-}
-
-type t2e struct {
-	t2
-}
-
-// T3 has only M as top-level method.
-type T3 struct {
-	t1e
-	t2e
-}
-
-// T3.M should appear as method of T3.
-func (T3) M() {}
-
-// ----------------------------------------------------------------------------
-// Don't show conflicting methods M embedded via an exported and non-exported
-// type.
-
-// T1 has no embedded (level 1) M method due to conflict.
-type T4 struct {
-	t2
-	T2
-}
-
-// ----------------------------------------------------------------------------
-// Don't show embedded methods of exported anonymous fields unless AllMethods
-// is set.
-
-type T4 struct{}
-
-// T4.M should appear as method of T5 only if AllMethods is set.
-func (*T4) M() {}
-
-type T5 struct {
-	T4
-}
-
-// ----------------------------------------------------------------------------
-// Recursive type declarations must not lead to endless recursion.
-
-type U1 struct {
-	*U1
-}
-
-// U1.M should appear as method of U1.
-func (*U1) M() {}
-
-type U2 struct {
-	*U3
-}
-
-// U2.M should appear as method of U2 and as method of U3 only if AllMethods is set.
-func (*U2) M() {}
-
-type U3 struct {
-	*U2
-}
-
-// U3.N should appear as method of U3 and as method of U2 only if AllMethods is set.
-func (*U3) N() {}
-
-type U4 struct {
-	*u5
-}
-
-// U4.M should appear as method of U4.
-func (*U4) M() {}
-
-type u5 struct {
-	*U4
-}
-
-// ----------------------------------------------------------------------------
-// A higher-level embedded type (and its methods) wins over the same type (and
-// its methods) embedded at a lower level.
-
-type V1 struct {
-	*V2
-	*V5
-}
-
-type V2 struct {
-	*V3
-}
-
-type V3 struct {
-	*V4
-}
-
-type V4 struct {
-	*V5
-}
-
-type V5 struct {
-	*V6
-}
-
-type V6 struct{}
-
-// V4.M should appear as method of V2 and V3 if AllMethods is set.
-func (*V4) M() {}
-
-// V6.M should appear as method of V1 and V5 if AllMethods is set.
-func (*V6) M() {}
diff --git a/internal/backport/go/doc/testdata/error1.0.golden b/internal/backport/go/doc/testdata/error1.0.golden
deleted file mode 100644
index 6c6fe5d..0000000
--- a/internal/backport/go/doc/testdata/error1.0.golden
+++ /dev/null
@@ -1,30 +0,0 @@
-// 
-PACKAGE error1
-
-IMPORTPATH
-	testdata/error1
-
-FILENAMES
-	testdata/error1.go
-
-TYPES
-	// 
-	type I0 interface {
-		// When embedded, the predeclared error interface
-		// must remain visible in interface types.
-		error
-	}
-
-	// 
-	type S0 struct {
-		// contains filtered or unexported fields
-	}
-
-	// 
-	type T0 struct {
-		ExportedField interface {
-			// error should be visible
-			error
-		}
-	}
-
diff --git a/internal/backport/go/doc/testdata/error1.1.golden b/internal/backport/go/doc/testdata/error1.1.golden
deleted file mode 100644
index a8dc2e7..0000000
--- a/internal/backport/go/doc/testdata/error1.1.golden
+++ /dev/null
@@ -1,32 +0,0 @@
-// 
-PACKAGE error1
-
-IMPORTPATH
-	testdata/error1
-
-FILENAMES
-	testdata/error1.go
-
-TYPES
-	// 
-	type I0 interface {
-		// When embedded, the predeclared error interface
-		// must remain visible in interface types.
-		error
-	}
-
-	// 
-	type S0 struct {
-		// In struct types, an embedded error must only be visible
-		// if AllDecls is set.
-		error
-	}
-
-	// 
-	type T0 struct {
-		ExportedField interface {
-			// error should be visible
-			error
-		}
-	}
-
diff --git a/internal/backport/go/doc/testdata/error1.2.golden b/internal/backport/go/doc/testdata/error1.2.golden
deleted file mode 100644
index 6c6fe5d..0000000
--- a/internal/backport/go/doc/testdata/error1.2.golden
+++ /dev/null
@@ -1,30 +0,0 @@
-// 
-PACKAGE error1
-
-IMPORTPATH
-	testdata/error1
-
-FILENAMES
-	testdata/error1.go
-
-TYPES
-	// 
-	type I0 interface {
-		// When embedded, the predeclared error interface
-		// must remain visible in interface types.
-		error
-	}
-
-	// 
-	type S0 struct {
-		// contains filtered or unexported fields
-	}
-
-	// 
-	type T0 struct {
-		ExportedField interface {
-			// error should be visible
-			error
-		}
-	}
-
diff --git a/internal/backport/go/doc/testdata/error1.go b/internal/backport/go/doc/testdata/error1.go
deleted file mode 100644
index 3c777a7..0000000
--- a/internal/backport/go/doc/testdata/error1.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package error1
-
-type I0 interface {
-	// When embedded, the predeclared error interface
-	// must remain visible in interface types.
-	error
-}
-
-type T0 struct {
-	ExportedField interface {
-		// error should be visible
-		error
-	}
-}
-
-type S0 struct {
-	// In struct types, an embedded error must only be visible
-	// if AllDecls is set.
-	error
-}
diff --git a/internal/backport/go/doc/testdata/error2.0.golden b/internal/backport/go/doc/testdata/error2.0.golden
deleted file mode 100644
index dedfe41..0000000
--- a/internal/backport/go/doc/testdata/error2.0.golden
+++ /dev/null
@@ -1,27 +0,0 @@
-// 
-PACKAGE error2
-
-IMPORTPATH
-	testdata/error2
-
-FILENAMES
-	testdata/error2.go
-
-TYPES
-	// 
-	type I0 interface {
-		// contains filtered or unexported methods
-	}
-
-	// 
-	type S0 struct {
-		// contains filtered or unexported fields
-	}
-
-	// 
-	type T0 struct {
-		ExportedField interface {
-			// contains filtered or unexported methods
-		}
-	}
-
diff --git a/internal/backport/go/doc/testdata/error2.1.golden b/internal/backport/go/doc/testdata/error2.1.golden
deleted file mode 100644
index dbcc1b0..0000000
--- a/internal/backport/go/doc/testdata/error2.1.golden
+++ /dev/null
@@ -1,37 +0,0 @@
-// 
-PACKAGE error2
-
-IMPORTPATH
-	testdata/error2
-
-FILENAMES
-	testdata/error2.go
-
-TYPES
-	// 
-	type I0 interface {
-		// When embedded, the locally-declared error interface
-		// is only visible if all declarations are shown.
-		error
-	}
-
-	// 
-	type S0 struct {
-		// In struct types, an embedded error must only be visible
-		// if AllDecls is set.
-		error
-	}
-
-	// 
-	type T0 struct {
-		ExportedField interface {
-			// error should not be visible
-			error
-		}
-	}
-
-	// This error declaration shadows the predeclared error type. 
-	type error interface {
-		Error() string
-	}
-
diff --git a/internal/backport/go/doc/testdata/error2.2.golden b/internal/backport/go/doc/testdata/error2.2.golden
deleted file mode 100644
index dedfe41..0000000
--- a/internal/backport/go/doc/testdata/error2.2.golden
+++ /dev/null
@@ -1,27 +0,0 @@
-// 
-PACKAGE error2
-
-IMPORTPATH
-	testdata/error2
-
-FILENAMES
-	testdata/error2.go
-
-TYPES
-	// 
-	type I0 interface {
-		// contains filtered or unexported methods
-	}
-
-	// 
-	type S0 struct {
-		// contains filtered or unexported fields
-	}
-
-	// 
-	type T0 struct {
-		ExportedField interface {
-			// contains filtered or unexported methods
-		}
-	}
-
diff --git a/internal/backport/go/doc/testdata/error2.go b/internal/backport/go/doc/testdata/error2.go
deleted file mode 100644
index 6ee96c2..0000000
--- a/internal/backport/go/doc/testdata/error2.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package error2
-
-type I0 interface {
-	// When embedded, the locally-declared error interface
-	// is only visible if all declarations are shown.
-	error
-}
-
-type T0 struct {
-	ExportedField interface {
-		// error should not be visible
-		error
-	}
-}
-
-type S0 struct {
-	// In struct types, an embedded error must only be visible
-	// if AllDecls is set.
-	error
-}
-
-// This error declaration shadows the predeclared error type.
-type error interface {
-	Error() string
-}
diff --git a/internal/backport/go/doc/testdata/example.go b/internal/backport/go/doc/testdata/example.go
deleted file mode 100644
index fdeda13..0000000
--- a/internal/backport/go/doc/testdata/example.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package testing
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"os"
-	"strings"
-	"time"
-)
-
-type InternalExample struct {
-	Name   string
-	F      func()
-	Output string
-}
-
-func RunExamples(examples []InternalExample) (ok bool) {
-	ok = true
-
-	var eg InternalExample
-
-	stdout, stderr := os.Stdout, os.Stderr
-	defer func() {
-		os.Stdout, os.Stderr = stdout, stderr
-		if e := recover(); e != nil {
-			fmt.Printf("--- FAIL: %s\npanic: %v\n", eg.Name, e)
-			os.Exit(1)
-		}
-	}()
-
-	for _, eg = range examples {
-		if *chatty {
-			fmt.Printf("=== RUN: %s\n", eg.Name)
-		}
-
-		// capture stdout and stderr
-		r, w, err := os.Pipe()
-		if err != nil {
-			fmt.Fprintln(os.Stderr, err)
-			os.Exit(1)
-		}
-		os.Stdout, os.Stderr = w, w
-		outC := make(chan string)
-		go func() {
-			buf := new(bytes.Buffer)
-			_, err := io.Copy(buf, r)
-			if err != nil {
-				fmt.Fprintf(stderr, "testing: copying pipe: %v\n", err)
-				os.Exit(1)
-			}
-			outC <- buf.String()
-		}()
-
-		// run example
-		t0 := time.Now()
-		eg.F()
-		dt := time.Now().Sub(t0)
-
-		// close pipe, restore stdout/stderr, get output
-		w.Close()
-		os.Stdout, os.Stderr = stdout, stderr
-		out := <-outC
-
-		// report any errors
-		tstr := fmt.Sprintf("(%.2f seconds)", dt.Seconds())
-		if g, e := strings.TrimSpace(out), strings.TrimSpace(eg.Output); g != e {
-			fmt.Printf("--- FAIL: %s %s\ngot:\n%s\nwant:\n%s\n",
-				eg.Name, tstr, g, e)
-			ok = false
-		} else if *chatty {
-			fmt.Printf("--- PASS: %s %s\n", eg.Name, tstr)
-		}
-	}
-
-	return
-}
diff --git a/internal/backport/go/doc/testdata/examples/README.md b/internal/backport/go/doc/testdata/examples/README.md
deleted file mode 100644
index a1c18e8..0000000
--- a/internal/backport/go/doc/testdata/examples/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-These files are processed by example_test.go:TestExamples.
-
-A .golden file is a txtar file with two sections for each example that should be
-created by doc.Examples from the corresponding .go file.
-
-One section, named EXAMPLE_NAME.Output, contains the example's output,
-the value of the field Example.Output.
-
-The other, named EXAMPLE_NAME.Play, contains the formatted code for a playable
-version of the example, the value of the field Example.Play.
-
-If a section is missing, it is treated as being empty.
diff --git a/internal/backport/go/doc/testdata/examples/empty.go b/internal/backport/go/doc/testdata/examples/empty.go
deleted file mode 100644
index 0b10420..0000000
--- a/internal/backport/go/doc/testdata/examples/empty.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package p
-
-func Example() {}
-func Example_a()
diff --git a/internal/backport/go/doc/testdata/examples/empty.golden b/internal/backport/go/doc/testdata/examples/empty.golden
deleted file mode 100644
index 2aafd20..0000000
--- a/internal/backport/go/doc/testdata/examples/empty.golden
+++ /dev/null
@@ -1,6 +0,0 @@
--- .Play --
-package main
-
-func main() {}
-func main()
-
diff --git a/internal/backport/go/doc/testdata/examples/generic_constraints.go b/internal/backport/go/doc/testdata/examples/generic_constraints.go
deleted file mode 100644
index ea5d2b3..0000000
--- a/internal/backport/go/doc/testdata/examples/generic_constraints.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package p_test
-
-import (
-	"fmt"
-	"time"
-)
-
-type C1 interface {
-	string | int
-}
-
-type C2 interface {
-	M(time.Time)
-}
-
-type G[T C1] int
-
-func g[T C2](x T) {}
-
-type Tm int
-
-func (Tm) M(time.Time) {}
-
-type Foo int
-
-func Example() {
-	fmt.Println("hello")
-}
-
-func ExampleGeneric() {
-	var x G[string]
-	g(Tm(3))
-	fmt.Println(x)
-}
diff --git a/internal/backport/go/doc/testdata/examples/generic_constraints.golden b/internal/backport/go/doc/testdata/examples/generic_constraints.golden
deleted file mode 100644
index 6c7b0ed..0000000
--- a/internal/backport/go/doc/testdata/examples/generic_constraints.golden
+++ /dev/null
@@ -1,39 +0,0 @@
--- .Play --
-package main
-
-import (
-	"fmt"
-)
-
-func main() {
-	fmt.Println("hello")
-}
--- Generic.Play --
-package main
-
-import (
-	"fmt"
-	"time"
-)
-
-type C1 interface {
-	string | int
-}
-
-type C2 interface {
-	M(time.Time)
-}
-
-type G[T C1] int
-
-func g[T C2](x T) {}
-
-type Tm int
-
-func (Tm) M(time.Time) {}
-
-func main() {
-	var x G[string]
-	g(Tm(3))
-	fmt.Println(x)
-}
diff --git a/internal/backport/go/doc/testdata/examples/import_groups.go b/internal/backport/go/doc/testdata/examples/import_groups.go
deleted file mode 100644
index 05f21ca..0000000
--- a/internal/backport/go/doc/testdata/examples/import_groups.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package foo_test
-
-import (
-	"fmt"
-	"time"
-
-	"golang.org/x/time/rate"
-)
-
-func Example() {
-	fmt.Println("Hello, world!")
-	// Output: Hello, world!
-}
-
-func ExampleLimiter() {
-	// Uses fmt, time and rate.
-	l := rate.NewLimiter(rate.Every(time.Second), 1)
-	fmt.Println(l)
-}
diff --git a/internal/backport/go/doc/testdata/examples/import_groups.golden b/internal/backport/go/doc/testdata/examples/import_groups.golden
deleted file mode 100644
index efe2cc1..0000000
--- a/internal/backport/go/doc/testdata/examples/import_groups.golden
+++ /dev/null
@@ -1,27 +0,0 @@
--- .Play --
-package main
-
-import (
-	"fmt"
-)
-
-func main() {
-	fmt.Println("Hello, world!")
-}
--- .Output --
-Hello, world!
--- Limiter.Play --
-package main
-
-import (
-	"fmt"
-	"time"
-
-	"golang.org/x/time/rate"
-)
-
-func main() {
-	// Uses fmt, time and rate.
-	l := rate.NewLimiter(rate.Every(time.Second), 1)
-	fmt.Println(l)
-}
diff --git a/internal/backport/go/doc/testdata/examples/import_groups_named.go b/internal/backport/go/doc/testdata/examples/import_groups_named.go
deleted file mode 100644
index 377022b..0000000
--- a/internal/backport/go/doc/testdata/examples/import_groups_named.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package foo_test
-
-import (
-	"fmt"
-	tm "time"
-
-	r "golang.org/x/time/rate"
-)
-
-func Example() {
-	fmt.Println("Hello, world!")
-	// Output: Hello, world!
-}
-
-func ExampleLimiter() {
-	// Uses fmt, time and rate.
-	l := r.NewLimiter(r.Every(tm.Second), 1)
-	fmt.Println(l)
-}
diff --git a/internal/backport/go/doc/testdata/examples/import_groups_named.golden b/internal/backport/go/doc/testdata/examples/import_groups_named.golden
deleted file mode 100644
index 9baf373..0000000
--- a/internal/backport/go/doc/testdata/examples/import_groups_named.golden
+++ /dev/null
@@ -1,27 +0,0 @@
--- .Play --
-package main
-
-import (
-	"fmt"
-)
-
-func main() {
-	fmt.Println("Hello, world!")
-}
--- .Output --
-Hello, world!
--- Limiter.Play --
-package main
-
-import (
-	"fmt"
-	tm "time"
-
-	r "golang.org/x/time/rate"
-)
-
-func main() {
-	// Uses fmt, time and rate.
-	l := r.NewLimiter(r.Every(tm.Second), 1)
-	fmt.Println(l)
-}
diff --git a/internal/backport/go/doc/testdata/examples/inspect_signature.go b/internal/backport/go/doc/testdata/examples/inspect_signature.go
deleted file mode 100644
index c4a36e7..0000000
--- a/internal/backport/go/doc/testdata/examples/inspect_signature.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package foo_test
-
-import (
-	"bytes"
-	"io"
-)
-
-func getReader() io.Reader { return nil }
-
-func do(b bytes.Reader) {}
-
-func Example() {
-	getReader()
-	do()
-	// Output:
-}
-
-func ExampleIgnored() {
-}
diff --git a/internal/backport/go/doc/testdata/examples/inspect_signature.golden b/internal/backport/go/doc/testdata/examples/inspect_signature.golden
deleted file mode 100644
index c0d9b2e..0000000
--- a/internal/backport/go/doc/testdata/examples/inspect_signature.golden
+++ /dev/null
@@ -1,24 +0,0 @@
--- .Play --
-package main
-
-import (
-	"bytes"
-	"io"
-)
-
-func getReader() io.Reader { return nil }
-
-func do(b bytes.Reader) {}
-
-func main() {
-	getReader()
-	do()
-}
--- Ignored.Play --
-package main
-
-import ()
-
-func main() {
-}
-
diff --git a/internal/backport/go/doc/testdata/examples/iota.go b/internal/backport/go/doc/testdata/examples/iota.go
deleted file mode 100644
index c878b77..0000000
--- a/internal/backport/go/doc/testdata/examples/iota.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package foo_test
-
-const (
-	a = iota
-	b
-)
-
-const (
-	c = 3
-	d = 4
-)
-
-const (
-	e = iota
-	f
-)
-
-// The example refers to only one of the constants in the iota group, but we
-// must keep all of them because of the iota. The second group of constants can
-// be trimmed. The third has an iota, but is unused, so it can be eliminated.
-
-func Example() {
-	_ = b
-	_ = d
-}
-
-// Need two examples to hit the playExample function.
-
-func Example2() {
-}
diff --git a/internal/backport/go/doc/testdata/examples/iota.golden b/internal/backport/go/doc/testdata/examples/iota.golden
deleted file mode 100644
index 7487702..0000000
--- a/internal/backport/go/doc/testdata/examples/iota.golden
+++ /dev/null
@@ -1,23 +0,0 @@
--- .Play --
-package main
-
-import ()
-
-const (
-	a = iota
-	b
-)
-
-const d = 4
-
-func main() {
-	_ = b
-	_ = d
-}
--- 2.Play --
-package main
-
-import ()
-
-func main() {
-}
diff --git a/internal/backport/go/doc/testdata/examples/issue43658.go b/internal/backport/go/doc/testdata/examples/issue43658.go
deleted file mode 100644
index 385223a..0000000
--- a/internal/backport/go/doc/testdata/examples/issue43658.go
+++ /dev/null
@@ -1,223 +0,0 @@
-// Copyright ©2016 The Gonum Authors. All rights reserved.
-// Copyright 2021 The Go Authors. All rights reserved.
-// (above line required for our license-header checker)
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package community_test
-
-import (
-	"fmt"
-	"log"
-	"sort"
-
-	"golang.org/x/exp/rand"
-
-	"gonum.org/v1/gonum/graph/community"
-	"gonum.org/v1/gonum/graph/internal/ordered"
-	"gonum.org/v1/gonum/graph/simple"
-)
-
-func ExampleProfile_simple() {
-	// Profile calls Modularize which implements the Louvain modularization algorithm.
-	// Since this is a randomized algorithm we use a defined random source to ensure
-	// consistency between test runs. In practice, results will not differ greatly
-	// between runs with different PRNG seeds.
-	src := rand.NewSource(1)
-
-	// Create dumbell graph:
-	//
-	//  0       4
-	//  |\     /|
-	//  | 2 - 3 |
-	//  |/     \|
-	//  1       5
-	//
-	g := simple.NewUndirectedGraph()
-	for u, e := range smallDumbell {
-		for v := range e {
-			g.SetEdge(simple.Edge{F: simple.Node(u), T: simple.Node(v)})
-		}
-	}
-
-	// Get the profile of internal node weight for resolutions
-	// between 0.1 and 10 using logarithmic bisection.
-	p, err := community.Profile(
-		community.ModularScore(g, community.Weight, 10, src),
-		true, 1e-3, 0.1, 10,
-	)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	// Print out each step with communities ordered.
-	for _, d := range p {
-		comm := d.Communities()
-		for _, c := range comm {
-			sort.Sort(ordered.ByID(c))
-		}
-		sort.Sort(ordered.BySliceIDs(comm))
-		fmt.Printf("Low:%.2v High:%.2v Score:%v Communities:%v Q=%.3v\n",
-			d.Low, d.High, d.Score, comm, community.Q(g, comm, d.Low))
-	}
-
-	// Output:
-	// Low:0.1 High:0.29 Score:14 Communities:[[0 1 2 3 4 5]] Q=0.9
-	// Low:0.29 High:2.3 Score:12 Communities:[[0 1 2] [3 4 5]] Q=0.714
-	// Low:2.3 High:3.5 Score:4 Communities:[[0 1] [2] [3] [4 5]] Q=-0.31
-	// Low:3.5 High:10 Score:0 Communities:[[0] [1] [2] [3] [4] [5]] Q=-0.607
-}
-
-// intset is an integer set.
-type intset map[int]struct{}
-
-func linksTo(i ...int) intset {
-	if len(i) == 0 {
-		return nil
-	}
-	s := make(intset)
-	for _, v := range i {
-		s[v] = struct{}{}
-	}
-	return s
-}
-
-var (
-	smallDumbell = []intset{
-		0: linksTo(1, 2),
-		1: linksTo(2),
-		2: linksTo(3),
-		3: linksTo(4, 5),
-		4: linksTo(5),
-		5: nil,
-	}
-
-	// http://www.slate.com/blogs/the_world_/2014/07/17/the_middle_east_friendship_chart.html
-	middleEast = struct{ friends, complicated, enemies []intset }{
-		// green cells
-		friends: []intset{
-			0:  nil,
-			1:  linksTo(5, 7, 9, 12),
-			2:  linksTo(11),
-			3:  linksTo(4, 5, 10),
-			4:  linksTo(3, 5, 10),
-			5:  linksTo(1, 3, 4, 8, 10, 12),
-			6:  nil,
-			7:  linksTo(1, 12),
-			8:  linksTo(5, 9, 11),
-			9:  linksTo(1, 8, 12),
-			10: linksTo(3, 4, 5),
-			11: linksTo(2, 8),
-			12: linksTo(1, 5, 7, 9),
-		},
-
-		// yellow cells
-		complicated: []intset{
-			0:  linksTo(2, 4),
-			1:  linksTo(4, 8),
-			2:  linksTo(0, 3, 4, 5, 8, 9),
-			3:  linksTo(2, 8, 11),
-			4:  linksTo(0, 1, 2, 8),
-			5:  linksTo(2),
-			6:  nil,
-			7:  linksTo(9, 11),
-			8:  linksTo(1, 2, 3, 4, 10, 12),
-			9:  linksTo(2, 7, 11),
-			10: linksTo(8),
-			11: linksTo(3, 7, 9, 12),
-			12: linksTo(8, 11),
-		},
-
-		// red cells
-		enemies: []intset{
-			0:  linksTo(1, 3, 5, 6, 7, 8, 9, 10, 11, 12),
-			1:  linksTo(0, 2, 3, 6, 10, 11),
-			2:  linksTo(1, 6, 7, 10, 12),
-			3:  linksTo(0, 1, 6, 7, 9, 12),
-			4:  linksTo(6, 7, 9, 11, 12),
-			5:  linksTo(0, 6, 7, 9, 11),
-			6:  linksTo(0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12),
-			7:  linksTo(0, 2, 3, 4, 5, 6, 8, 10),
-			8:  linksTo(0, 6, 7),
-			9:  linksTo(0, 3, 4, 5, 6, 10),
-			10: linksTo(0, 1, 2, 6, 7, 9, 11, 12),
-			11: linksTo(0, 1, 4, 5, 6, 10),
-			12: linksTo(0, 2, 3, 4, 6, 10),
-		},
-	}
-)
-
-var friends, enemies *simple.WeightedUndirectedGraph
-
-func init() {
-	friends = simple.NewWeightedUndirectedGraph(0, 0)
-	for u, e := range middleEast.friends {
-		// Ensure unconnected nodes are included.
-		if friends.Node(int64(u)) == nil {
-			friends.AddNode(simple.Node(u))
-		}
-		for v := range e {
-			friends.SetWeightedEdge(simple.WeightedEdge{F: simple.Node(u), T: simple.Node(v), W: 1})
-		}
-	}
-	enemies = simple.NewWeightedUndirectedGraph(0, 0)
-	for u, e := range middleEast.enemies {
-		// Ensure unconnected nodes are included.
-		if enemies.Node(int64(u)) == nil {
-			enemies.AddNode(simple.Node(u))
-		}
-		for v := range e {
-			enemies.SetWeightedEdge(simple.WeightedEdge{F: simple.Node(u), T: simple.Node(v), W: -1})
-		}
-	}
-}
-
-func ExampleProfile_multiplex() {
-	// Profile calls ModularizeMultiplex which implements the Louvain modularization
-	// algorithm. Since this is a randomized algorithm we use a defined random source
-	// to ensure consistency between test runs. In practice, results will not differ
-	// greatly between runs with different PRNG seeds.
-	src := rand.NewSource(1)
-
-	// The undirected graphs, friends and enemies, are the political relationships
-	// in the Middle East as described in the Slate article:
-	// http://www.slate.com/blogs/the_world_/2014/07/17/the_middle_east_friendship_chart.html
-	g, err := community.NewUndirectedLayers(friends, enemies)
-	if err != nil {
-		log.Fatal(err)
-	}
-	weights := []float64{1, -1}
-
-	// Get the profile of internal node weight for resolutions
-	// between 0.1 and 10 using logarithmic bisection.
-	p, err := community.Profile(
-		community.ModularMultiplexScore(g, weights, true, community.WeightMultiplex, 10, src),
-		true, 1e-3, 0.1, 10,
-	)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	// Print out each step with communities ordered.
-	for _, d := range p {
-		comm := d.Communities()
-		for _, c := range comm {
-			sort.Sort(ordered.ByID(c))
-		}
-		sort.Sort(ordered.BySliceIDs(comm))
-		fmt.Printf("Low:%.2v High:%.2v Score:%v Communities:%v Q=%.3v\n",
-			d.Low, d.High, d.Score, comm, community.QMultiplex(g, comm, weights, []float64{d.Low}))
-	}
-
-	// Output:
-	// Low:0.1 High:0.72 Score:26 Communities:[[0] [1 7 9 12] [2 8 11] [3 4 5 10] [6]] Q=[24.7 1.97]
-	// Low:0.72 High:1.1 Score:24 Communities:[[0 6] [1 7 9 12] [2 8 11] [3 4 5 10]] Q=[16.9 14.1]
-	// Low:1.1 High:1.2 Score:18 Communities:[[0 2 6 11] [1 7 9 12] [3 4 5 8 10]] Q=[9.16 25.1]
-	// Low:1.2 High:1.6 Score:10 Communities:[[0 3 4 5 6 10] [1 7 9 12] [2 8 11]] Q=[10.5 26.7]
-	// Low:1.6 High:1.6 Score:8 Communities:[[0 1 6 7 9 12] [2 8 11] [3 4 5 10]] Q=[5.56 39.8]
-	// Low:1.6 High:1.8 Score:2 Communities:[[0 2 3 4 5 6 10] [1 7 8 9 11 12]] Q=[-1.82 48.6]
-	// Low:1.8 High:2.3 Score:-6 Communities:[[0 2 3 4 5 6 8 10 11] [1 7 9 12]] Q=[-5 57.5]
-	// Low:2.3 High:2.4 Score:-10 Communities:[[0 1 2 6 7 8 9 11 12] [3 4 5 10]] Q=[-11.2 79]
-	// Low:2.4 High:4.3 Score:-52 Communities:[[0 1 2 3 4 5 6 7 8 9 10 11 12]] Q=[-46.1 117]
-	// Low:4.3 High:10 Score:-54 Communities:[[0 1 2 3 4 6 7 8 9 10 11 12] [5]] Q=[-82 254]
-}
diff --git a/internal/backport/go/doc/testdata/examples/issue43658.golden b/internal/backport/go/doc/testdata/examples/issue43658.golden
deleted file mode 100644
index 5200d14..0000000
--- a/internal/backport/go/doc/testdata/examples/issue43658.golden
+++ /dev/null
@@ -1,156 +0,0 @@
--- Profile_simple.Play --
-package main
-
-import (
-	"fmt"
-	"log"
-	"sort"
-
-	"golang.org/x/exp/rand"
-
-	"gonum.org/v1/gonum/graph/community"
-	"gonum.org/v1/gonum/graph/internal/ordered"
-	"gonum.org/v1/gonum/graph/simple"
-)
-
-func main() {
-	// Profile calls Modularize which implements the Louvain modularization algorithm.
-	// Since this is a randomized algorithm we use a defined random source to ensure
-	// consistency between test runs. In practice, results will not differ greatly
-	// between runs with different PRNG seeds.
-	src := rand.NewSource(1)
-
-	// Create dumbell graph:
-	//
-	//  0       4
-	//  |\     /|
-	//  | 2 - 3 |
-	//  |/     \|
-	//  1       5
-	//
-	g := simple.NewUndirectedGraph()
-	for u, e := range smallDumbell {
-		for v := range e {
-			g.SetEdge(simple.Edge{F: simple.Node(u), T: simple.Node(v)})
-		}
-	}
-
-	// Get the profile of internal node weight for resolutions
-	// between 0.1 and 10 using logarithmic bisection.
-	p, err := community.Profile(
-		community.ModularScore(g, community.Weight, 10, src),
-		true, 1e-3, 0.1, 10,
-	)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	// Print out each step with communities ordered.
-	for _, d := range p {
-		comm := d.Communities()
-		for _, c := range comm {
-			sort.Sort(ordered.ByID(c))
-		}
-		sort.Sort(ordered.BySliceIDs(comm))
-		fmt.Printf("Low:%.2v High:%.2v Score:%v Communities:%v Q=%.3v\n",
-			d.Low, d.High, d.Score, comm, community.Q(g, comm, d.Low))
-	}
-
-}
-
-// intset is an integer set.
-type intset map[int]struct{}
-
-func linksTo(i ...int) intset {
-	if len(i) == 0 {
-		return nil
-	}
-	s := make(intset)
-	for _, v := range i {
-		s[v] = struct{}{}
-	}
-	return s
-}
-
-var smallDumbell = []intset{
-	0: linksTo(1, 2),
-	1: linksTo(2),
-	2: linksTo(3),
-	3: linksTo(4, 5),
-	4: linksTo(5),
-	5: nil,
-}
-
--- Profile_simple.Output --
-Low:0.1 High:0.29 Score:14 Communities:[[0 1 2 3 4 5]] Q=0.9
-Low:0.29 High:2.3 Score:12 Communities:[[0 1 2] [3 4 5]] Q=0.714
-Low:2.3 High:3.5 Score:4 Communities:[[0 1] [2] [3] [4 5]] Q=-0.31
-Low:3.5 High:10 Score:0 Communities:[[0] [1] [2] [3] [4] [5]] Q=-0.607
-
--- Profile_multiplex.Play --
-
-package main
-
-import (
-	"fmt"
-	"log"
-	"sort"
-
-	"golang.org/x/exp/rand"
-
-	"gonum.org/v1/gonum/graph/community"
-	"gonum.org/v1/gonum/graph/internal/ordered"
-	"gonum.org/v1/gonum/graph/simple"
-)
-
-var friends, enemies *simple.WeightedUndirectedGraph
-
-func main() {
-	// Profile calls ModularizeMultiplex which implements the Louvain modularization
-	// algorithm. Since this is a randomized algorithm we use a defined random source
-	// to ensure consistency between test runs. In practice, results will not differ
-	// greatly between runs with different PRNG seeds.
-	src := rand.NewSource(1)
-
-	// The undirected graphs, friends and enemies, are the political relationships
-	// in the Middle East as described in the Slate article:
-	// http://www.slate.com/blogs/the_world_/2014/07/17/the_middle_east_friendship_chart.html
-	g, err := community.NewUndirectedLayers(friends, enemies)
-	if err != nil {
-		log.Fatal(err)
-	}
-	weights := []float64{1, -1}
-
-	// Get the profile of internal node weight for resolutions
-	// between 0.1 and 10 using logarithmic bisection.
-	p, err := community.Profile(
-		community.ModularMultiplexScore(g, weights, true, community.WeightMultiplex, 10, src),
-		true, 1e-3, 0.1, 10,
-	)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	// Print out each step with communities ordered.
-	for _, d := range p {
-		comm := d.Communities()
-		for _, c := range comm {
-			sort.Sort(ordered.ByID(c))
-		}
-		sort.Sort(ordered.BySliceIDs(comm))
-		fmt.Printf("Low:%.2v High:%.2v Score:%v Communities:%v Q=%.3v\n",
-			d.Low, d.High, d.Score, comm, community.QMultiplex(g, comm, weights, []float64{d.Low}))
-	}
-
-}
--- Profile_multiplex.Output --
-Low:0.1 High:0.72 Score:26 Communities:[[0] [1 7 9 12] [2 8 11] [3 4 5 10] [6]] Q=[24.7 1.97]
-Low:0.72 High:1.1 Score:24 Communities:[[0 6] [1 7 9 12] [2 8 11] [3 4 5 10]] Q=[16.9 14.1]
-Low:1.1 High:1.2 Score:18 Communities:[[0 2 6 11] [1 7 9 12] [3 4 5 8 10]] Q=[9.16 25.1]
-Low:1.2 High:1.6 Score:10 Communities:[[0 3 4 5 6 10] [1 7 9 12] [2 8 11]] Q=[10.5 26.7]
-Low:1.6 High:1.6 Score:8 Communities:[[0 1 6 7 9 12] [2 8 11] [3 4 5 10]] Q=[5.56 39.8]
-Low:1.6 High:1.8 Score:2 Communities:[[0 2 3 4 5 6 10] [1 7 8 9 11 12]] Q=[-1.82 48.6]
-Low:1.8 High:2.3 Score:-6 Communities:[[0 2 3 4 5 6 8 10 11] [1 7 9 12]] Q=[-5 57.5]
-Low:2.3 High:2.4 Score:-10 Communities:[[0 1 2 6 7 8 9 11 12] [3 4 5 10]] Q=[-11.2 79]
-Low:2.4 High:4.3 Score:-52 Communities:[[0 1 2 3 4 5 6 7 8 9 10 11 12]] Q=[-46.1 117]
-Low:4.3 High:10 Score:-54 Communities:[[0 1 2 3 4 6 7 8 9 10 11 12] [5]] Q=[-82 254]
diff --git a/internal/backport/go/doc/testdata/examples/multiple.go b/internal/backport/go/doc/testdata/examples/multiple.go
deleted file mode 100644
index 2728264..0000000
--- a/internal/backport/go/doc/testdata/examples/multiple.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package foo_test
-
-import (
-	"flag"
-	"fmt"
-	"log"
-	"os/exec"
-	"sort"
-)
-
-func ExampleHello() {
-	fmt.Println("Hello, world!")
-	// Output: Hello, world!
-}
-
-func ExampleImport() {
-	out, err := exec.Command("date").Output()
-	if err != nil {
-		log.Fatal(err)
-	}
-	fmt.Printf("The date is %s\n", out)
-}
-
-func ExampleKeyValue() {
-	v := struct {
-		a string
-		b int
-	}{
-		a: "A",
-		b: 1,
-	}
-	fmt.Print(v)
-	// Output: a: "A", b: 1
-}
-
-func ExampleKeyValueImport() {
-	f := flag.Flag{
-		Name: "play",
-	}
-	fmt.Print(f)
-	// Output: Name: "play"
-}
-
-var keyValueTopDecl = struct {
-	a string
-	b int
-}{
-	a: "B",
-	b: 2,
-}
-
-func ExampleKeyValueTopDecl() {
-	fmt.Print(keyValueTopDecl)
-	// Output: a: "B", b: 2
-}
-
-// Person represents a person by name and age.
-type Person struct {
-	Name string
-	Age  int
-}
-
-// String returns a string representation of the Person.
-func (p Person) String() string {
-	return fmt.Sprintf("%s: %d", p.Name, p.Age)
-}
-
-// ByAge implements sort.Interface for []Person based on
-// the Age field.
-type ByAge []Person
-
-// Len returns the number of elements in ByAge.
-func (a ByAge) Len() int { return len(a) }
-
-// Swap swaps the elements in ByAge.
-func (a ByAge) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
-func (a ByAge) Less(i, j int) bool { return a[i].Age < a[j].Age }
-
-// people is the array of Person
-var people = []Person{
-	{"Bob", 31},
-	{"John", 42},
-	{"Michael", 17},
-	{"Jenny", 26},
-}
-
-func ExampleSort() {
-	fmt.Println(people)
-	sort.Sort(ByAge(people))
-	fmt.Println(people)
-	// Output:
-	// [Bob: 31 John: 42 Michael: 17 Jenny: 26]
-	// [Michael: 17 Jenny: 26 Bob: 31 John: 42]
-}
diff --git a/internal/backport/go/doc/testdata/examples/multiple.golden b/internal/backport/go/doc/testdata/examples/multiple.golden
deleted file mode 100644
index d2d791e..0000000
--- a/internal/backport/go/doc/testdata/examples/multiple.golden
+++ /dev/null
@@ -1,129 +0,0 @@
--- Hello.Play --
-package main
-
-import (
-	"fmt"
-)
-
-func main() {
-	fmt.Println("Hello, world!")
-}
--- Hello.Output --
-Hello, world!
--- Import.Play --
-package main
-
-import (
-	"fmt"
-	"log"
-	"os/exec"
-)
-
-func main() {
-	out, err := exec.Command("date").Output()
-	if err != nil {
-		log.Fatal(err)
-	}
-	fmt.Printf("The date is %s\n", out)
-}
--- KeyValue.Play --
-package main
-
-import (
-	"fmt"
-)
-
-func main() {
-	v := struct {
-		a string
-		b int
-	}{
-		a: "A",
-		b: 1,
-	}
-	fmt.Print(v)
-}
--- KeyValue.Output --
-a: "A", b: 1
--- KeyValueImport.Play --
-package main
-
-import (
-	"flag"
-	"fmt"
-)
-
-func main() {
-	f := flag.Flag{
-		Name: "play",
-	}
-	fmt.Print(f)
-}
--- KeyValueImport.Output --
-Name: "play"
--- KeyValueTopDecl.Play --
-package main
-
-import (
-	"fmt"
-)
-
-var keyValueTopDecl = struct {
-	a string
-	b int
-}{
-	a: "B",
-	b: 2,
-}
-
-func main() {
-	fmt.Print(keyValueTopDecl)
-}
--- KeyValueTopDecl.Output --
-a: "B", b: 2
--- Sort.Play --
-package main
-
-import (
-	"fmt"
-	"sort"
-)
-
-// Person represents a person by name and age.
-type Person struct {
-	Name string
-	Age  int
-}
-
-// String returns a string representation of the Person.
-func (p Person) String() string {
-	return fmt.Sprintf("%s: %d", p.Name, p.Age)
-}
-
-// ByAge implements sort.Interface for []Person based on
-// the Age field.
-type ByAge []Person
-
-// Len returns the number of elements in ByAge.
-func (a ByAge) Len() int { return len(a) }
-
-// Swap swaps the elements in ByAge.
-func (a ByAge) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
-func (a ByAge) Less(i, j int) bool { return a[i].Age < a[j].Age }
-
-// people is the array of Person
-var people = []Person{
-	{"Bob", 31},
-	{"John", 42},
-	{"Michael", 17},
-	{"Jenny", 26},
-}
-
-func main() {
-	fmt.Println(people)
-	sort.Sort(ByAge(people))
-	fmt.Println(people)
-}
--- Sort.Output --
-[Bob: 31 John: 42 Michael: 17 Jenny: 26]
-[Michael: 17 Jenny: 26 Bob: 31 John: 42]
diff --git a/internal/backport/go/doc/testdata/examples/values.go b/internal/backport/go/doc/testdata/examples/values.go
deleted file mode 100644
index 64b0de4..0000000
--- a/internal/backport/go/doc/testdata/examples/values.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package foo_test
-
-// Variable declaration with fewer values than names.
-
-func f() (int, int) {
-	return 1, 2
-}
-
-var a, b = f()
-
-// Need two examples to hit playExample.
-
-func ExampleA() {
-	_ = a
-}
-
-func ExampleB() {
-}
diff --git a/internal/backport/go/doc/testdata/examples/values.golden b/internal/backport/go/doc/testdata/examples/values.golden
deleted file mode 100644
index 00c1991..0000000
--- a/internal/backport/go/doc/testdata/examples/values.golden
+++ /dev/null
@@ -1,21 +0,0 @@
--- A.Play --
-package main
-
-import ()
-
-func f() (int, int) {
-	return 1, 2
-}
-
-var a, b = f()
-
-func main() {
-	_ = a
-}
--- B.Play --
-package main
-
-import ()
-
-func main() {
-}
diff --git a/internal/backport/go/doc/testdata/examples/whole_file.go b/internal/backport/go/doc/testdata/examples/whole_file.go
deleted file mode 100644
index 61954ce..0000000
--- a/internal/backport/go/doc/testdata/examples/whole_file.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package foo_test
-
-import "fmt"
-
-type X int
-
-func (X) Foo() {
-}
-
-func (X) TestBlah() {
-}
-
-func (X) BenchmarkFoo() {
-}
-
-func Example() {
-	fmt.Println("Hello, world!")
-	// Output: Hello, world!
-}
diff --git a/internal/backport/go/doc/testdata/examples/whole_file.golden b/internal/backport/go/doc/testdata/examples/whole_file.golden
deleted file mode 100644
index 74a2291..0000000
--- a/internal/backport/go/doc/testdata/examples/whole_file.golden
+++ /dev/null
@@ -1,21 +0,0 @@
--- .Play --
-package main
-
-import "fmt"
-
-type X int
-
-func (X) Foo() {
-}
-
-func (X) TestBlah() {
-}
-
-func (X) BenchmarkFoo() {
-}
-
-func main() {
-	fmt.Println("Hello, world!")
-}
--- .Output --
-Hello, world!
diff --git a/internal/backport/go/doc/testdata/examples/whole_function.go b/internal/backport/go/doc/testdata/examples/whole_function.go
deleted file mode 100644
index 1754ee3..0000000
--- a/internal/backport/go/doc/testdata/examples/whole_function.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package foo_test
-
-func Foo(x int) {
-}
-
-func Example() {
-	fmt.Println("Hello, world!")
-	// Output: Hello, world!
-}
diff --git a/internal/backport/go/doc/testdata/examples/whole_function.golden b/internal/backport/go/doc/testdata/examples/whole_function.golden
deleted file mode 100644
index 7d5b5cb..0000000
--- a/internal/backport/go/doc/testdata/examples/whole_function.golden
+++ /dev/null
@@ -1,11 +0,0 @@
--- .Play --
-package main
-
-func Foo(x int) {
-}
-
-func main() {
-	fmt.Println("Hello, world!")
-}
--- .Output --
-Hello, world!
diff --git a/internal/backport/go/doc/testdata/examples/whole_function_external.go b/internal/backport/go/doc/testdata/examples/whole_function_external.go
deleted file mode 100644
index 0e0e2f5..0000000
--- a/internal/backport/go/doc/testdata/examples/whole_function_external.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package foo_test
-
-func foo(int)
-
-func Example() {
-	foo(42)
-	// Output:
-}
diff --git a/internal/backport/go/doc/testdata/examples/whole_function_external.golden b/internal/backport/go/doc/testdata/examples/whole_function_external.golden
deleted file mode 100644
index ec8f114..0000000
--- a/internal/backport/go/doc/testdata/examples/whole_function_external.golden
+++ /dev/null
@@ -1,8 +0,0 @@
--- .Play --
-package main
-
-func foo(int)
-
-func main() {
-	foo(42)
-}
diff --git a/internal/backport/go/doc/testdata/f.0.golden b/internal/backport/go/doc/testdata/f.0.golden
deleted file mode 100644
index 8175901..0000000
--- a/internal/backport/go/doc/testdata/f.0.golden
+++ /dev/null
@@ -1,13 +0,0 @@
-// The package f is a go/doc test for functions and factory ...
-PACKAGE f
-
-IMPORTPATH
-	testdata/f
-
-FILENAMES
-	testdata/f.go
-
-FUNCTIONS
-	// Exported must always be visible. Was issue 2824. 
-	func Exported() private
-
diff --git a/internal/backport/go/doc/testdata/f.1.golden b/internal/backport/go/doc/testdata/f.1.golden
deleted file mode 100644
index ba68e88..0000000
--- a/internal/backport/go/doc/testdata/f.1.golden
+++ /dev/null
@@ -1,16 +0,0 @@
-// The package f is a go/doc test for functions and factory ...
-PACKAGE f
-
-IMPORTPATH
-	testdata/f
-
-FILENAMES
-	testdata/f.go
-
-TYPES
-	// 
-	type private struct{}
-
-	// Exported must always be visible. Was issue 2824. 
-	func Exported() private
-
diff --git a/internal/backport/go/doc/testdata/f.2.golden b/internal/backport/go/doc/testdata/f.2.golden
deleted file mode 100644
index 8175901..0000000
--- a/internal/backport/go/doc/testdata/f.2.golden
+++ /dev/null
@@ -1,13 +0,0 @@
-// The package f is a go/doc test for functions and factory ...
-PACKAGE f
-
-IMPORTPATH
-	testdata/f
-
-FILENAMES
-	testdata/f.go
-
-FUNCTIONS
-	// Exported must always be visible. Was issue 2824. 
-	func Exported() private
-
diff --git a/internal/backport/go/doc/testdata/f.go b/internal/backport/go/doc/testdata/f.go
deleted file mode 100644
index 7e9add9..0000000
--- a/internal/backport/go/doc/testdata/f.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// The package f is a go/doc test for functions and factory methods.
-package f
-
-// ----------------------------------------------------------------------------
-// Factory functions for non-exported types must not get lost.
-
-type private struct{}
-
-// Exported must always be visible. Was issue 2824.
-func Exported() private {}
diff --git a/internal/backport/go/doc/testdata/g.0.golden b/internal/backport/go/doc/testdata/g.0.golden
deleted file mode 100644
index 487cf06..0000000
--- a/internal/backport/go/doc/testdata/g.0.golden
+++ /dev/null
@@ -1,32 +0,0 @@
-// The package g is a go/doc test for mixed exported/unexported ...
-PACKAGE g
-
-IMPORTPATH
-	testdata/g
-
-FILENAMES
-	testdata/g.go
-
-CONSTANTS
-	// 
-	const (
-		A, _	= iota, iota
-		_, D
-		E, _
-		G, H
-	)
-
-
-VARIABLES
-	// 
-	var (
-		_, C2, _	= 1, 2, 3
-		C4, _, C6	= 4, 5, 6
-		_, C8, _	= 7, 8, 9
-	)
-
-	// 
-	var (
-		_, X = f()
-	)
-
diff --git a/internal/backport/go/doc/testdata/g.1.golden b/internal/backport/go/doc/testdata/g.1.golden
deleted file mode 100644
index 438441a..0000000
--- a/internal/backport/go/doc/testdata/g.1.golden
+++ /dev/null
@@ -1,34 +0,0 @@
-// The package g is a go/doc test for mixed exported/unexported ...
-PACKAGE g
-
-IMPORTPATH
-	testdata/g
-
-FILENAMES
-	testdata/g.go
-
-CONSTANTS
-	// 
-	const (
-		A, b	= iota, iota
-		c, D
-		E, f
-		G, H
-	)
-
-
-VARIABLES
-	// 
-	var (
-		c1, C2, c3	= 1, 2, 3
-		C4, c5, C6	= 4, 5, 6
-		c7, C8, c9	= 7, 8, 9
-		xx, yy, zz	= 0, 0, 0	// all unexported and hidden
-	)
-
-	// 
-	var (
-		x, X	= f()
-		y, z	= f()
-	)
-
diff --git a/internal/backport/go/doc/testdata/g.2.golden b/internal/backport/go/doc/testdata/g.2.golden
deleted file mode 100644
index 487cf06..0000000
--- a/internal/backport/go/doc/testdata/g.2.golden
+++ /dev/null
@@ -1,32 +0,0 @@
-// The package g is a go/doc test for mixed exported/unexported ...
-PACKAGE g
-
-IMPORTPATH
-	testdata/g
-
-FILENAMES
-	testdata/g.go
-
-CONSTANTS
-	// 
-	const (
-		A, _	= iota, iota
-		_, D
-		E, _
-		G, H
-	)
-
-
-VARIABLES
-	// 
-	var (
-		_, C2, _	= 1, 2, 3
-		C4, _, C6	= 4, 5, 6
-		_, C8, _	= 7, 8, 9
-	)
-
-	// 
-	var (
-		_, X = f()
-	)
-
diff --git a/internal/backport/go/doc/testdata/g.go b/internal/backport/go/doc/testdata/g.go
deleted file mode 100644
index ceeb417..0000000
--- a/internal/backport/go/doc/testdata/g.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// The package g is a go/doc test for mixed exported/unexported values.
-package g
-
-const (
-	A, b = iota, iota
-	c, D
-	E, f
-	G, H
-)
-
-var (
-	c1, C2, c3 = 1, 2, 3
-	C4, c5, C6 = 4, 5, 6
-	c7, C8, c9 = 7, 8, 9
-	xx, yy, zz = 0, 0, 0 // all unexported and hidden
-)
-
-var (
-	x, X = f()
-	y, z = f()
-)
diff --git a/internal/backport/go/doc/testdata/generics.0.golden b/internal/backport/go/doc/testdata/generics.0.golden
deleted file mode 100644
index 91c874c..0000000
--- a/internal/backport/go/doc/testdata/generics.0.golden
+++ /dev/null
@@ -1,76 +0,0 @@
-// Package generics contains the new syntax supporting generic ...
-PACKAGE generics
-
-IMPORTPATH
-	testdata/generics
-
-FILENAMES
-	testdata/generics.go
-
-FUNCTIONS
-	// AnotherFunc has an implicit constraint interface.  Neither type ...
-	func AnotherFunc[T ~struct{ f int }](_ struct{ f int })
-
-	// Func has an instantiated constraint. 
-	func Func[T Constraint[string, Type[int]]]()
-
-	// Single is not a factory function. 
-	func Single[T any]() *T
-
-	// Slice is not a factory function. 
-	func Slice[T any]() []T
-
-
-TYPES
-	// AFuncType demonstrates filtering of parameters and type ...
-	type AFuncType[T ~struct{ f int }] func(_ struct {
-		// contains filtered or unexported fields
-	})
-
-	// Constraint is a constraint interface with two type parameters. 
-	type Constraint[P, Q interface{ string | ~int | Type[int] }] interface {
-		~int | ~byte | Type[string]
-		M() P
-	}
-
-	// NewEmbeddings demonstrates how we filter the new embedded ...
-	type NewEmbeddings interface {
-		string	// should not be filtered
-	
-		struct {
-			// contains filtered or unexported fields
-		}
-		~struct {
-			// contains filtered or unexported fields
-		}
-		*struct {
-			// contains filtered or unexported fields
-		}
-		struct {
-			// contains filtered or unexported fields
-		} | ~struct {
-			// contains filtered or unexported fields
-		}
-		// contains filtered or unexported methods
-	}
-
-	// Parameterized types should be shown. 
-	type Type[P any] struct {
-		Field P
-	}
-
-	// Variables with an instantiated type should be shown. 
-	var X Type[int]
-
-	// Constructors for parameterized types should be shown. 
-	func Constructor[lowerCase any]() Type[lowerCase]
-
-	// MethodA uses a different name for its receiver type parameter. 
-	func (t Type[A]) MethodA(p A)
-
-	// MethodB has a blank receiver type parameter. 
-	func (t Type[_]) MethodB()
-
-	// MethodC has a lower-case receiver type parameter. 
-	func (t Type[c]) MethodC()
-
diff --git a/internal/backport/go/doc/testdata/generics.1.golden b/internal/backport/go/doc/testdata/generics.1.golden
deleted file mode 100644
index 923a4ce..0000000
--- a/internal/backport/go/doc/testdata/generics.1.golden
+++ /dev/null
@@ -1,66 +0,0 @@
-// Package generics contains the new syntax supporting generic ...
-PACKAGE generics
-
-IMPORTPATH
-	testdata/generics
-
-FILENAMES
-	testdata/generics.go
-
-FUNCTIONS
-	// AnotherFunc has an implicit constraint interface.  Neither type ...
-	func AnotherFunc[T ~struct{ f int }](_ struct{ f int })
-
-	// Func has an instantiated constraint. 
-	func Func[T Constraint[string, Type[int]]]()
-
-	// Single is not a factory function. 
-	func Single[T any]() *T
-
-	// Slice is not a factory function. 
-	func Slice[T any]() []T
-
-
-TYPES
-	// AFuncType demonstrates filtering of parameters and type ...
-	type AFuncType[T ~struct{ f int }] func(_ struct{ f int })
-
-	// Constraint is a constraint interface with two type parameters. 
-	type Constraint[P, Q interface{ string | ~int | Type[int] }] interface {
-		~int | ~byte | Type[string]
-		M() P
-	}
-
-	// NewEmbeddings demonstrates how we filter the new embedded ...
-	type NewEmbeddings interface {
-		string	// should not be filtered
-		int16
-		struct{ f int }
-		~struct{ f int }
-		*struct{ f int }
-		struct{ f int } | ~struct{ f int }
-	}
-
-	// Parameterized types should be shown. 
-	type Type[P any] struct {
-		Field P
-	}
-
-	// Variables with an instantiated type should be shown. 
-	var X Type[int]
-
-	// Constructors for parameterized types should be shown. 
-	func Constructor[lowerCase any]() Type[lowerCase]
-
-	// MethodA uses a different name for its receiver type parameter. 
-	func (t Type[A]) MethodA(p A)
-
-	// MethodB has a blank receiver type parameter. 
-	func (t Type[_]) MethodB()
-
-	// MethodC has a lower-case receiver type parameter. 
-	func (t Type[c]) MethodC()
-
-	// int16 shadows the predeclared type int16. 
-	type int16 int
-
diff --git a/internal/backport/go/doc/testdata/generics.2.golden b/internal/backport/go/doc/testdata/generics.2.golden
deleted file mode 100644
index 91c874c..0000000
--- a/internal/backport/go/doc/testdata/generics.2.golden
+++ /dev/null
@@ -1,76 +0,0 @@
-// Package generics contains the new syntax supporting generic ...
-PACKAGE generics
-
-IMPORTPATH
-	testdata/generics
-
-FILENAMES
-	testdata/generics.go
-
-FUNCTIONS
-	// AnotherFunc has an implicit constraint interface.  Neither type ...
-	func AnotherFunc[T ~struct{ f int }](_ struct{ f int })
-
-	// Func has an instantiated constraint. 
-	func Func[T Constraint[string, Type[int]]]()
-
-	// Single is not a factory function. 
-	func Single[T any]() *T
-
-	// Slice is not a factory function. 
-	func Slice[T any]() []T
-
-
-TYPES
-	// AFuncType demonstrates filtering of parameters and type ...
-	type AFuncType[T ~struct{ f int }] func(_ struct {
-		// contains filtered or unexported fields
-	})
-
-	// Constraint is a constraint interface with two type parameters. 
-	type Constraint[P, Q interface{ string | ~int | Type[int] }] interface {
-		~int | ~byte | Type[string]
-		M() P
-	}
-
-	// NewEmbeddings demonstrates how we filter the new embedded ...
-	type NewEmbeddings interface {
-		string	// should not be filtered
-	
-		struct {
-			// contains filtered or unexported fields
-		}
-		~struct {
-			// contains filtered or unexported fields
-		}
-		*struct {
-			// contains filtered or unexported fields
-		}
-		struct {
-			// contains filtered or unexported fields
-		} | ~struct {
-			// contains filtered or unexported fields
-		}
-		// contains filtered or unexported methods
-	}
-
-	// Parameterized types should be shown. 
-	type Type[P any] struct {
-		Field P
-	}
-
-	// Variables with an instantiated type should be shown. 
-	var X Type[int]
-
-	// Constructors for parameterized types should be shown. 
-	func Constructor[lowerCase any]() Type[lowerCase]
-
-	// MethodA uses a different name for its receiver type parameter. 
-	func (t Type[A]) MethodA(p A)
-
-	// MethodB has a blank receiver type parameter. 
-	func (t Type[_]) MethodB()
-
-	// MethodC has a lower-case receiver type parameter. 
-	func (t Type[c]) MethodC()
-
diff --git a/internal/backport/go/doc/testdata/generics.go b/internal/backport/go/doc/testdata/generics.go
deleted file mode 100644
index ba7187e..0000000
--- a/internal/backport/go/doc/testdata/generics.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package generics contains the new syntax supporting generic programming in
-// Go.
-package generics
-
-// Variables with an instantiated type should be shown.
-var X Type[int]
-
-// Parameterized types should be shown.
-type Type[P any] struct {
-	Field P
-}
-
-// Constructors for parameterized types should be shown.
-func Constructor[lowerCase any]() Type[lowerCase] {
-	return Type[lowerCase]{}
-}
-
-// MethodA uses a different name for its receiver type parameter.
-func (t Type[A]) MethodA(p A) {}
-
-// MethodB has a blank receiver type parameter.
-func (t Type[_]) MethodB() {}
-
-// MethodC has a lower-case receiver type parameter.
-func (t Type[c]) MethodC() {}
-
-// Constraint is a constraint interface with two type parameters.
-type Constraint[P, Q interface{ string | ~int | Type[int] }] interface {
-	~int | ~byte | Type[string]
-	M() P
-}
-
-// int16 shadows the predeclared type int16.
-type int16 int
-
-// NewEmbeddings demonstrates how we filter the new embedded elements.
-type NewEmbeddings interface {
-	string // should not be filtered
-	int16
-	struct{ f int }
-	~struct{ f int }
-	*struct{ f int }
-	struct{ f int } | ~struct{ f int }
-}
-
-// Func has an instantiated constraint.
-func Func[T Constraint[string, Type[int]]]() {}
-
-// AnotherFunc has an implicit constraint interface.
-//
-// Neither type parameters nor regular parameters should be filtered.
-func AnotherFunc[T ~struct{ f int }](_ struct{ f int }) {}
-
-// AFuncType demonstrates filtering of parameters and type parameters. Here we
-// don't filter type parameters (to be consistent with function declarations),
-// but DO filter the RHS.
-type AFuncType[T ~struct{ f int }] func(_ struct{ f int })
-
-// See issue #49477: type parameters should not be interpreted as named types
-// for the purpose of determining whether a function is a factory function.
-
-// Slice is not a factory function.
-func Slice[T any]() []T {
-	return nil
-}
-
-// Single is not a factory function.
-func Single[T any]() *T {
-	return nil
-}
diff --git a/internal/backport/go/doc/testdata/issue12839.0.golden b/internal/backport/go/doc/testdata/issue12839.0.golden
deleted file mode 100644
index 6b59774..0000000
--- a/internal/backport/go/doc/testdata/issue12839.0.golden
+++ /dev/null
@@ -1,51 +0,0 @@
-// Package issue12839 is a go/doc test to test association of a ...
-PACKAGE issue12839
-
-IMPORTPATH
-	testdata/issue12839
-
-IMPORTS
-	p
-
-FILENAMES
-	testdata/issue12839.go
-
-FUNCTIONS
-	// F1 should not be associated with T1 
-	func F1() (*T1, *T2)
-
-	// F10 should not be associated with T1. 
-	func F10() (T1, T2, error)
-
-	// F4 should not be associated with a type (same as F1) 
-	func F4() (a T1, b T2)
-
-	// F9 should not be associated with T1. 
-	func F9() (int, T1, T2)
-
-
-TYPES
-	// 
-	type T1 struct{}
-
-	// F2 should be associated with T1 
-	func F2() (a, b, c T1)
-
-	// F3 should be associated with T1 because b.T3 is from a ...
-	func F3() (a T1, b p.T3)
-
-	// F5 should be associated with T1. 
-	func F5() (T1, error)
-
-	// F6 should be associated with T1. 
-	func F6() (*T1, error)
-
-	// F7 should be associated with T1. 
-	func F7() (T1, string)
-
-	// F8 should be associated with T1. 
-	func F8() (int, T1, string)
-
-	// 
-	type T2 struct{}
-
diff --git a/internal/backport/go/doc/testdata/issue12839.1.golden b/internal/backport/go/doc/testdata/issue12839.1.golden
deleted file mode 100644
index 4b9b9f6..0000000
--- a/internal/backport/go/doc/testdata/issue12839.1.golden
+++ /dev/null
@@ -1,54 +0,0 @@
-// Package issue12839 is a go/doc test to test association of a ...
-PACKAGE issue12839
-
-IMPORTPATH
-	testdata/issue12839
-
-IMPORTS
-	p
-
-FILENAMES
-	testdata/issue12839.go
-
-FUNCTIONS
-	// F1 should not be associated with T1 
-	func F1() (*T1, *T2)
-
-	// F10 should not be associated with T1. 
-	func F10() (T1, T2, error)
-
-	// F4 should not be associated with a type (same as F1) 
-	func F4() (a T1, b T2)
-
-	// F9 should not be associated with T1. 
-	func F9() (int, T1, T2)
-
-
-TYPES
-	// 
-	type T1 struct{}
-
-	// F2 should be associated with T1 
-	func F2() (a, b, c T1)
-
-	// F3 should be associated with T1 because b.T3 is from a ...
-	func F3() (a T1, b p.T3)
-
-	// F5 should be associated with T1. 
-	func F5() (T1, error)
-
-	// F6 should be associated with T1. 
-	func F6() (*T1, error)
-
-	// F7 should be associated with T1. 
-	func F7() (T1, string)
-
-	// F8 should be associated with T1. 
-	func F8() (int, T1, string)
-
-	// 
-	func (t T1) hello() string
-
-	// 
-	type T2 struct{}
-
diff --git a/internal/backport/go/doc/testdata/issue12839.2.golden b/internal/backport/go/doc/testdata/issue12839.2.golden
deleted file mode 100644
index 6b59774..0000000
--- a/internal/backport/go/doc/testdata/issue12839.2.golden
+++ /dev/null
@@ -1,51 +0,0 @@
-// Package issue12839 is a go/doc test to test association of a ...
-PACKAGE issue12839
-
-IMPORTPATH
-	testdata/issue12839
-
-IMPORTS
-	p
-
-FILENAMES
-	testdata/issue12839.go
-
-FUNCTIONS
-	// F1 should not be associated with T1 
-	func F1() (*T1, *T2)
-
-	// F10 should not be associated with T1. 
-	func F10() (T1, T2, error)
-
-	// F4 should not be associated with a type (same as F1) 
-	func F4() (a T1, b T2)
-
-	// F9 should not be associated with T1. 
-	func F9() (int, T1, T2)
-
-
-TYPES
-	// 
-	type T1 struct{}
-
-	// F2 should be associated with T1 
-	func F2() (a, b, c T1)
-
-	// F3 should be associated with T1 because b.T3 is from a ...
-	func F3() (a T1, b p.T3)
-
-	// F5 should be associated with T1. 
-	func F5() (T1, error)
-
-	// F6 should be associated with T1. 
-	func F6() (*T1, error)
-
-	// F7 should be associated with T1. 
-	func F7() (T1, string)
-
-	// F8 should be associated with T1. 
-	func F8() (int, T1, string)
-
-	// 
-	type T2 struct{}
-
diff --git a/internal/backport/go/doc/testdata/issue12839.go b/internal/backport/go/doc/testdata/issue12839.go
deleted file mode 100644
index 51c7ac1..0000000
--- a/internal/backport/go/doc/testdata/issue12839.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package issue12839 is a go/doc test to test association of a function
-// that returns multiple types.
-// See golang.org/issue/12839.
-// (See also golang.org/issue/27928.)
-package issue12839
-
-import "p"
-
-type T1 struct{}
-
-type T2 struct{}
-
-func (t T1) hello() string {
-	return "hello"
-}
-
-// F1 should not be associated with T1
-func F1() (*T1, *T2) {
-	return &T1{}, &T2{}
-}
-
-// F2 should be associated with T1
-func F2() (a, b, c T1) {
-	return T1{}, T1{}, T1{}
-}
-
-// F3 should be associated with T1 because b.T3 is from a different package
-func F3() (a T1, b p.T3) {
-	return T1{}, p.T3{}
-}
-
-// F4 should not be associated with a type (same as F1)
-func F4() (a T1, b T2) {
-	return T1{}, T2{}
-}
-
-// F5 should be associated with T1.
-func F5() (T1, error) {
-	return T1{}, nil
-}
-
-// F6 should be associated with T1.
-func F6() (*T1, error) {
-	return &T1{}, nil
-}
-
-// F7 should be associated with T1.
-func F7() (T1, string) {
-	return T1{}, nil
-}
-
-// F8 should be associated with T1.
-func F8() (int, T1, string) {
-	return 0, T1{}, nil
-}
-
-// F9 should not be associated with T1.
-func F9() (int, T1, T2) {
-	return 0, T1{}, T2{}
-}
-
-// F10 should not be associated with T1.
-func F10() (T1, T2, error) {
-	return T1{}, T2{}, nil
-}
diff --git a/internal/backport/go/doc/testdata/issue13742.0.golden b/internal/backport/go/doc/testdata/issue13742.0.golden
deleted file mode 100644
index 8dee9aa..0000000
--- a/internal/backport/go/doc/testdata/issue13742.0.golden
+++ /dev/null
@@ -1,25 +0,0 @@
-// 
-PACKAGE issue13742
-
-IMPORTPATH
-	testdata/issue13742
-
-IMPORTS
-	go/ast
-
-FILENAMES
-	testdata/issue13742.go
-
-FUNCTIONS
-	// Both F0 and G0 should appear as functions. 
-	func F0(Node)
-
-	// Both F1 and G1 should appear as functions. 
-	func F1(ast.Node)
-
-	// 
-	func G0() Node
-
-	// 
-	func G1() ast.Node
-
diff --git a/internal/backport/go/doc/testdata/issue13742.1.golden b/internal/backport/go/doc/testdata/issue13742.1.golden
deleted file mode 100644
index 8dee9aa..0000000
--- a/internal/backport/go/doc/testdata/issue13742.1.golden
+++ /dev/null
@@ -1,25 +0,0 @@
-// 
-PACKAGE issue13742
-
-IMPORTPATH
-	testdata/issue13742
-
-IMPORTS
-	go/ast
-
-FILENAMES
-	testdata/issue13742.go
-
-FUNCTIONS
-	// Both F0 and G0 should appear as functions. 
-	func F0(Node)
-
-	// Both F1 and G1 should appear as functions. 
-	func F1(ast.Node)
-
-	// 
-	func G0() Node
-
-	// 
-	func G1() ast.Node
-
diff --git a/internal/backport/go/doc/testdata/issue13742.2.golden b/internal/backport/go/doc/testdata/issue13742.2.golden
deleted file mode 100644
index 8dee9aa..0000000
--- a/internal/backport/go/doc/testdata/issue13742.2.golden
+++ /dev/null
@@ -1,25 +0,0 @@
-// 
-PACKAGE issue13742
-
-IMPORTPATH
-	testdata/issue13742
-
-IMPORTS
-	go/ast
-
-FILENAMES
-	testdata/issue13742.go
-
-FUNCTIONS
-	// Both F0 and G0 should appear as functions. 
-	func F0(Node)
-
-	// Both F1 and G1 should appear as functions. 
-	func F1(ast.Node)
-
-	// 
-	func G0() Node
-
-	// 
-	func G1() ast.Node
-
diff --git a/internal/backport/go/doc/testdata/issue13742.go b/internal/backport/go/doc/testdata/issue13742.go
deleted file mode 100644
index dbc1941..0000000
--- a/internal/backport/go/doc/testdata/issue13742.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package issue13742
-
-import (
-	"go/ast"
-	. "go/ast"
-)
-
-// Both F0 and G0 should appear as functions.
-func F0(Node)  {}
-func G0() Node { return nil }
-
-// Both F1 and G1 should appear as functions.
-func F1(ast.Node)  {}
-func G1() ast.Node { return nil }
diff --git a/internal/backport/go/doc/testdata/issue16153.0.golden b/internal/backport/go/doc/testdata/issue16153.0.golden
deleted file mode 100644
index 189260b..0000000
--- a/internal/backport/go/doc/testdata/issue16153.0.golden
+++ /dev/null
@@ -1,32 +0,0 @@
-// 
-PACKAGE issue16153
-
-IMPORTPATH
-	testdata/issue16153
-
-FILENAMES
-	testdata/issue16153.go
-
-CONSTANTS
-	// 
-	const (
-		X3	int64	= iota
-		Y3		= 1
-	)
-
-	// 
-	const (
-		X4	int64	= iota
-		Y4
-	)
-
-	// original test case 
-	const (
-		Y1 = 256
-	)
-
-	// variations 
-	const (
-		Y2 uint8
-	)
-
diff --git a/internal/backport/go/doc/testdata/issue16153.1.golden b/internal/backport/go/doc/testdata/issue16153.1.golden
deleted file mode 100644
index 803df3e..0000000
--- a/internal/backport/go/doc/testdata/issue16153.1.golden
+++ /dev/null
@@ -1,34 +0,0 @@
-// 
-PACKAGE issue16153
-
-IMPORTPATH
-	testdata/issue16153
-
-FILENAMES
-	testdata/issue16153.go
-
-CONSTANTS
-	// original test case 
-	const (
-		x1	uint8	= 255
-		Y1		= 256
-	)
-
-	// variations 
-	const (
-		x2	uint8	= 255
-		Y2
-	)
-
-	// 
-	const (
-		X3	int64	= iota
-		Y3		= 1
-	)
-
-	// 
-	const (
-		X4	int64	= iota
-		Y4
-	)
-
diff --git a/internal/backport/go/doc/testdata/issue16153.2.golden b/internal/backport/go/doc/testdata/issue16153.2.golden
deleted file mode 100644
index 189260b..0000000
--- a/internal/backport/go/doc/testdata/issue16153.2.golden
+++ /dev/null
@@ -1,32 +0,0 @@
-// 
-PACKAGE issue16153
-
-IMPORTPATH
-	testdata/issue16153
-
-FILENAMES
-	testdata/issue16153.go
-
-CONSTANTS
-	// 
-	const (
-		X3	int64	= iota
-		Y3		= 1
-	)
-
-	// 
-	const (
-		X4	int64	= iota
-		Y4
-	)
-
-	// original test case 
-	const (
-		Y1 = 256
-	)
-
-	// variations 
-	const (
-		Y2 uint8
-	)
-
diff --git a/internal/backport/go/doc/testdata/issue16153.go b/internal/backport/go/doc/testdata/issue16153.go
deleted file mode 100644
index 528be42..0000000
--- a/internal/backport/go/doc/testdata/issue16153.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package issue16153
-
-// original test case
-const (
-	x1 uint8 = 255
-	Y1       = 256
-)
-
-// variations
-const (
-	x2 uint8 = 255
-	Y2
-)
-
-const (
-	X3 int64 = iota
-	Y3       = 1
-)
-
-const (
-	X4 int64 = iota
-	Y4
-)
diff --git a/internal/backport/go/doc/testdata/issue17788.0.golden b/internal/backport/go/doc/testdata/issue17788.0.golden
deleted file mode 100644
index 42c00da..0000000
--- a/internal/backport/go/doc/testdata/issue17788.0.golden
+++ /dev/null
@@ -1,8 +0,0 @@
-// 
-PACKAGE issue17788
-
-IMPORTPATH
-	testdata/issue17788
-
-FILENAMES
-	testdata/issue17788.go
diff --git a/internal/backport/go/doc/testdata/issue17788.1.golden b/internal/backport/go/doc/testdata/issue17788.1.golden
deleted file mode 100644
index 42c00da..0000000
--- a/internal/backport/go/doc/testdata/issue17788.1.golden
+++ /dev/null
@@ -1,8 +0,0 @@
-// 
-PACKAGE issue17788
-
-IMPORTPATH
-	testdata/issue17788
-
-FILENAMES
-	testdata/issue17788.go
diff --git a/internal/backport/go/doc/testdata/issue17788.2.golden b/internal/backport/go/doc/testdata/issue17788.2.golden
deleted file mode 100644
index 42c00da..0000000
--- a/internal/backport/go/doc/testdata/issue17788.2.golden
+++ /dev/null
@@ -1,8 +0,0 @@
-// 
-PACKAGE issue17788
-
-IMPORTPATH
-	testdata/issue17788
-
-FILENAMES
-	testdata/issue17788.go
diff --git a/internal/backport/go/doc/testdata/issue17788.go b/internal/backport/go/doc/testdata/issue17788.go
deleted file mode 100644
index 883ad5f..0000000
--- a/internal/backport/go/doc/testdata/issue17788.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package issue17788
-
-func ( /* receiver type */ ) f0() {
-}
diff --git a/internal/backport/go/doc/testdata/issue22856.0.golden b/internal/backport/go/doc/testdata/issue22856.0.golden
deleted file mode 100644
index a88f43f..0000000
--- a/internal/backport/go/doc/testdata/issue22856.0.golden
+++ /dev/null
@@ -1,45 +0,0 @@
-// 
-PACKAGE issue22856
-
-IMPORTPATH
-	testdata/issue22856
-
-FILENAMES
-	testdata/issue22856.go
-
-FUNCTIONS
-	// NewPointerSliceOfSlice is not a factory function because slices ...
-	func NewPointerSliceOfSlice() [][]*T
-
-	// NewSlice3 is not a factory function because 3 nested slices of ...
-	func NewSlice3() [][][]T
-
-	// NewSliceOfSlice is not a factory function because slices of a ...
-	func NewSliceOfSlice() [][]T
-
-
-TYPES
-	// 
-	type T struct{}
-
-	// 
-	func New() T
-
-	// 
-	func NewArray() [1]T
-
-	// 
-	func NewPointer() *T
-
-	// 
-	func NewPointerArray() [1]*T
-
-	// 
-	func NewPointerOfPointer() **T
-
-	// 
-	func NewPointerSlice() []*T
-
-	// 
-	func NewSlice() []T
-
diff --git a/internal/backport/go/doc/testdata/issue22856.1.golden b/internal/backport/go/doc/testdata/issue22856.1.golden
deleted file mode 100644
index a88f43f..0000000
--- a/internal/backport/go/doc/testdata/issue22856.1.golden
+++ /dev/null
@@ -1,45 +0,0 @@
-// 
-PACKAGE issue22856
-
-IMPORTPATH
-	testdata/issue22856
-
-FILENAMES
-	testdata/issue22856.go
-
-FUNCTIONS
-	// NewPointerSliceOfSlice is not a factory function because slices ...
-	func NewPointerSliceOfSlice() [][]*T
-
-	// NewSlice3 is not a factory function because 3 nested slices of ...
-	func NewSlice3() [][][]T
-
-	// NewSliceOfSlice is not a factory function because slices of a ...
-	func NewSliceOfSlice() [][]T
-
-
-TYPES
-	// 
-	type T struct{}
-
-	// 
-	func New() T
-
-	// 
-	func NewArray() [1]T
-
-	// 
-	func NewPointer() *T
-
-	// 
-	func NewPointerArray() [1]*T
-
-	// 
-	func NewPointerOfPointer() **T
-
-	// 
-	func NewPointerSlice() []*T
-
-	// 
-	func NewSlice() []T
-
diff --git a/internal/backport/go/doc/testdata/issue22856.2.golden b/internal/backport/go/doc/testdata/issue22856.2.golden
deleted file mode 100644
index a88f43f..0000000
--- a/internal/backport/go/doc/testdata/issue22856.2.golden
+++ /dev/null
@@ -1,45 +0,0 @@
-// 
-PACKAGE issue22856
-
-IMPORTPATH
-	testdata/issue22856
-
-FILENAMES
-	testdata/issue22856.go
-
-FUNCTIONS
-	// NewPointerSliceOfSlice is not a factory function because slices ...
-	func NewPointerSliceOfSlice() [][]*T
-
-	// NewSlice3 is not a factory function because 3 nested slices of ...
-	func NewSlice3() [][][]T
-
-	// NewSliceOfSlice is not a factory function because slices of a ...
-	func NewSliceOfSlice() [][]T
-
-
-TYPES
-	// 
-	type T struct{}
-
-	// 
-	func New() T
-
-	// 
-	func NewArray() [1]T
-
-	// 
-	func NewPointer() *T
-
-	// 
-	func NewPointerArray() [1]*T
-
-	// 
-	func NewPointerOfPointer() **T
-
-	// 
-	func NewPointerSlice() []*T
-
-	// 
-	func NewSlice() []T
-
diff --git a/internal/backport/go/doc/testdata/issue22856.go b/internal/backport/go/doc/testdata/issue22856.go
deleted file mode 100644
index f456998..0000000
--- a/internal/backport/go/doc/testdata/issue22856.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package issue22856
-
-type T struct{}
-
-func New() T                   { return T{} }
-func NewPointer() *T           { return &T{} }
-func NewPointerSlice() []*T    { return []*T{&T{}} }
-func NewSlice() []T            { return []T{T{}} }
-func NewPointerOfPointer() **T { x := &T{}; return &x }
-func NewArray() [1]T           { return [1]T{T{}} }
-func NewPointerArray() [1]*T   { return [1]*T{&T{}} }
-
-// NewSliceOfSlice is not a factory function because slices of a slice of
-// type *T are not factory functions of type T.
-func NewSliceOfSlice() [][]T { return []T{[]T{}} }
-
-// NewPointerSliceOfSlice is not a factory function because slices of a
-// slice of type *T are not factory functions of type T.
-func NewPointerSliceOfSlice() [][]*T { return []*T{[]*T{}} }
-
-// NewSlice3 is not a factory function because 3 nested slices of type T
-// are not factory functions of type T.
-func NewSlice3() [][][]T { return []T{[]T{[]T{}}} }
diff --git a/internal/backport/go/doc/testdata/pkgdoc/doc.go b/internal/backport/go/doc/testdata/pkgdoc/doc.go
deleted file mode 100644
index 61bd4e3..0000000
--- a/internal/backport/go/doc/testdata/pkgdoc/doc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package pkgdoc
-
-import (
-	crand "crypto/rand"
-	"math/rand"
-)
-
-type T int
-
-type U int
-
-func (T) M() {}
-
-var _ = rand.Int
-var _ = crand.Reader
diff --git a/internal/backport/go/doc/testdata/predeclared.0.golden b/internal/backport/go/doc/testdata/predeclared.0.golden
deleted file mode 100644
index 9f37b06..0000000
--- a/internal/backport/go/doc/testdata/predeclared.0.golden
+++ /dev/null
@@ -1,8 +0,0 @@
-// Package predeclared is a go/doc test for handling of exported ...
-PACKAGE predeclared
-
-IMPORTPATH
-	testdata/predeclared
-
-FILENAMES
-	testdata/predeclared.go
diff --git a/internal/backport/go/doc/testdata/predeclared.1.golden b/internal/backport/go/doc/testdata/predeclared.1.golden
deleted file mode 100644
index 2ff8ee6..0000000
--- a/internal/backport/go/doc/testdata/predeclared.1.golden
+++ /dev/null
@@ -1,22 +0,0 @@
-// Package predeclared is a go/doc test for handling of exported ...
-PACKAGE predeclared
-
-IMPORTPATH
-	testdata/predeclared
-
-FILENAMES
-	testdata/predeclared.go
-
-TYPES
-	// 
-	type bool int
-
-	// Must not be visible. 
-	func (b bool) String() string
-
-	// 
-	type error struct{}
-
-	// Must not be visible. 
-	func (e error) Error() string
-
diff --git a/internal/backport/go/doc/testdata/predeclared.2.golden b/internal/backport/go/doc/testdata/predeclared.2.golden
deleted file mode 100644
index 9f37b06..0000000
--- a/internal/backport/go/doc/testdata/predeclared.2.golden
+++ /dev/null
@@ -1,8 +0,0 @@
-// Package predeclared is a go/doc test for handling of exported ...
-PACKAGE predeclared
-
-IMPORTPATH
-	testdata/predeclared
-
-FILENAMES
-	testdata/predeclared.go
diff --git a/internal/backport/go/doc/testdata/predeclared.go b/internal/backport/go/doc/testdata/predeclared.go
deleted file mode 100644
index c6dd806..0000000
--- a/internal/backport/go/doc/testdata/predeclared.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package predeclared is a go/doc test for handling of
-// exported methods on locally-defined predeclared types.
-// See issue 9860.
-package predeclared
-
-type error struct{}
-
-// Must not be visible.
-func (e error) Error() string {
-	return ""
-}
-
-type bool int
-
-// Must not be visible.
-func (b bool) String() string {
-	return ""
-}
diff --git a/internal/backport/go/doc/testdata/template.txt b/internal/backport/go/doc/testdata/template.txt
deleted file mode 100644
index 1b07382..0000000
--- a/internal/backport/go/doc/testdata/template.txt
+++ /dev/null
@@ -1,68 +0,0 @@
-{{synopsis .Doc}}
-PACKAGE {{.Name}}
-
-IMPORTPATH
-	{{.ImportPath}}
-
-{{with .Imports}}IMPORTS
-{{range .}}	{{.}}
-{{end}}
-{{end}}{{/*
-
-*/}}FILENAMES
-{{range .Filenames}}	{{.}}
-{{end}}{{/*
-
-*/}}{{with .Consts}}
-CONSTANTS
-{{range .}}	{{synopsis .Doc}}
-	{{node .Decl $.FSet}}
-
-{{end}}{{end}}{{/*
-
-*/}}{{with .Vars}}
-VARIABLES
-{{range .}}	{{synopsis .Doc}}
-	{{node .Decl $.FSet}}
-
-{{end}}{{end}}{{/*
-
-*/}}{{with .Funcs}}
-FUNCTIONS
-{{range .}}	{{synopsis .Doc}}
-	{{node .Decl $.FSet}}
-
-{{end}}{{end}}{{/*
-
-*/}}{{with .Types}}
-TYPES
-{{range .}}	{{synopsis .Doc}}
-	{{node .Decl $.FSet}}
-
-{{range .Consts}}	{{synopsis .Doc}}
-	{{node .Decl $.FSet}}
-
-{{end}}{{/*
-
-*/}}{{range .Vars}}	{{synopsis .Doc}}
-	{{node .Decl $.FSet}}
-
-{{end}}{{/*
-
-*/}}{{range .Funcs}}	{{synopsis .Doc}}
-	{{node .Decl $.FSet}}
-
-{{end}}{{/*
-
-*/}}{{range .Methods}}	{{synopsis .Doc}}
-	{{node .Decl $.FSet}}
-
-{{end}}{{end}}{{end}}{{/*
-
-*/}}{{with .Bugs}}
-BUGS .Bugs is now deprecated, please use .Notes instead
-{{range .}}{{indent "\t" .}}
-{{end}}{{end}}{{with .Notes}}{{range $marker, $content := .}}
-{{$marker}}S
-{{range $content}}{{$marker}}({{.UID}}){{indent "\t" .Body}}
-{{end}}{{end}}{{end}}
\ No newline at end of file
diff --git a/internal/backport/go/doc/testdata/testing.0.golden b/internal/backport/go/doc/testdata/testing.0.golden
deleted file mode 100644
index 61dac8b..0000000
--- a/internal/backport/go/doc/testdata/testing.0.golden
+++ /dev/null
@@ -1,156 +0,0 @@
-// Package testing provides support for automated testing of Go ...
-PACKAGE testing
-
-IMPORTPATH
-	testdata/testing
-
-IMPORTS
-	bytes
-	flag
-	fmt
-	io
-	os
-	runtime
-	runtime/pprof
-	strconv
-	strings
-	time
-
-FILENAMES
-	testdata/benchmark.go
-	testdata/example.go
-	testdata/testing.go
-
-FUNCTIONS
-	// An internal function but exported because it is cross-package; ...
-	func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample)
-
-	// An internal function but exported because it is cross-package; ...
-	func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark)
-
-	// 
-	func RunExamples(examples []InternalExample) (ok bool)
-
-	// 
-	func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool)
-
-	// Short reports whether the -test.short flag is set. 
-	func Short() bool
-
-
-TYPES
-	// B is a type passed to Benchmark functions to manage benchmark ...
-	type B struct {
-		N int
-		// contains filtered or unexported fields
-	}
-
-	// Error is equivalent to Log() followed by Fail(). 
-	func (c *B) Error(args ...any)
-
-	// Errorf is equivalent to Logf() followed by Fail(). 
-	func (c *B) Errorf(format string, args ...any)
-
-	// Fail marks the function as having failed but continues ...
-	func (c *B) Fail()
-
-	// FailNow marks the function as having failed and stops its ...
-	func (c *B) FailNow()
-
-	// Failed reports whether the function has failed. 
-	func (c *B) Failed() bool
-
-	// Fatal is equivalent to Log() followed by FailNow(). 
-	func (c *B) Fatal(args ...any)
-
-	// Fatalf is equivalent to Logf() followed by FailNow(). 
-	func (c *B) Fatalf(format string, args ...any)
-
-	// Log formats its arguments using default formatting, analogous ...
-	func (c *B) Log(args ...any)
-
-	// Logf formats its arguments according to the format, analogous ...
-	func (c *B) Logf(format string, args ...any)
-
-	// ResetTimer sets the elapsed benchmark time to zero. It does not ...
-	func (b *B) ResetTimer()
-
-	// SetBytes records the number of bytes processed in a single ...
-	func (b *B) SetBytes(n int64)
-
-	// StartTimer starts timing a test. This function is called ...
-	func (b *B) StartTimer()
-
-	// StopTimer stops timing a test. This can be used to pause the ...
-	func (b *B) StopTimer()
-
-	// The results of a benchmark run. 
-	type BenchmarkResult struct {
-		N	int		// The number of iterations.
-		T	time.Duration	// The total time taken.
-		Bytes	int64		// Bytes processed in one iteration.
-	}
-
-	// Benchmark benchmarks a single function. Useful for creating ...
-	func Benchmark(f func(b *B)) BenchmarkResult
-
-	// 
-	func (r BenchmarkResult) NsPerOp() int64
-
-	// 
-	func (r BenchmarkResult) String() string
-
-	// An internal type but exported because it is cross-package; part ...
-	type InternalBenchmark struct {
-		Name	string
-		F	func(b *B)
-	}
-
-	// 
-	type InternalExample struct {
-		Name	string
-		F	func()
-		Output	string
-	}
-
-	// An internal type but exported because it is cross-package; part ...
-	type InternalTest struct {
-		Name	string
-		F	func(*T)
-	}
-
-	// T is a type passed to Test functions to manage test state and ...
-	type T struct {
-		// contains filtered or unexported fields
-	}
-
-	// Error is equivalent to Log() followed by Fail(). 
-	func (c *T) Error(args ...any)
-
-	// Errorf is equivalent to Logf() followed by Fail(). 
-	func (c *T) Errorf(format string, args ...any)
-
-	// Fail marks the function as having failed but continues ...
-	func (c *T) Fail()
-
-	// FailNow marks the function as having failed and stops its ...
-	func (c *T) FailNow()
-
-	// Failed reports whether the function has failed. 
-	func (c *T) Failed() bool
-
-	// Fatal is equivalent to Log() followed by FailNow(). 
-	func (c *T) Fatal(args ...any)
-
-	// Fatalf is equivalent to Logf() followed by FailNow(). 
-	func (c *T) Fatalf(format string, args ...any)
-
-	// Log formats its arguments using default formatting, analogous ...
-	func (c *T) Log(args ...any)
-
-	// Logf formats its arguments according to the format, analogous ...
-	func (c *T) Logf(format string, args ...any)
-
-	// Parallel signals that this test is to be run in parallel with ...
-	func (t *T) Parallel()
-
diff --git a/internal/backport/go/doc/testdata/testing.1.golden b/internal/backport/go/doc/testdata/testing.1.golden
deleted file mode 100644
index 1655af1..0000000
--- a/internal/backport/go/doc/testdata/testing.1.golden
+++ /dev/null
@@ -1,298 +0,0 @@
-// Package testing provides support for automated testing of Go ...
-PACKAGE testing
-
-IMPORTPATH
-	testdata/testing
-
-IMPORTS
-	bytes
-	flag
-	fmt
-	io
-	os
-	runtime
-	runtime/pprof
-	strconv
-	strings
-	time
-
-FILENAMES
-	testdata/benchmark.go
-	testdata/example.go
-	testdata/testing.go
-
-VARIABLES
-	// 
-	var (
-		// The short flag requests that tests run more quickly, but its functionality
-		// is provided by test writers themselves. The testing package is just its
-		// home. The all.bash installation script sets it to make installation more
-		// efficient, but by default the flag is off so a plain "go test" will do a
-		// full test of the package.
-		short	= flag.Bool("test.short", false, "run smaller test suite to save time")
-	
-		// Report as tests are run; default is silent for success.
-		chatty		= flag.Bool("test.v", false, "verbose: print additional output")
-		match		= flag.String("test.run", "", "regular expression to select tests to run")
-		memProfile	= flag.String("test.memprofile", "", "write a memory profile to the named file after execution")
-		memProfileRate	= flag.Int("test.memprofilerate", 0, "if >=0, sets runtime.MemProfileRate")
-		cpuProfile	= flag.String("test.cpuprofile", "", "write a cpu profile to the named file during execution")
-		timeout		= flag.Duration("test.timeout", 0, "if positive, sets an aggregate time limit for all tests")
-		cpuListStr	= flag.String("test.cpu", "", "comma-separated list of number of CPUs to use for each test")
-		parallel	= flag.Int("test.parallel", runtime.GOMAXPROCS(0), "maximum test parallelism")
-	
-		cpuList	[]int
-	)
-
-	// 
-	var benchTime = flag.Duration("test.benchtime", 1*time.Second, "approximate run time for each benchmark")
-
-	// 
-	var matchBenchmarks = flag.String("test.bench", "", "regular expression to select benchmarks to run")
-
-	// 
-	var timer *time.Timer
-
-
-FUNCTIONS
-	// An internal function but exported because it is cross-package; ...
-	func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample)
-
-	// An internal function but exported because it is cross-package; ...
-	func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark)
-
-	// 
-	func RunExamples(examples []InternalExample) (ok bool)
-
-	// 
-	func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool)
-
-	// Short reports whether the -test.short flag is set. 
-	func Short() bool
-
-	// after runs after all testing. 
-	func after()
-
-	// alarm is called if the timeout expires. 
-	func alarm()
-
-	// before runs before all testing. 
-	func before()
-
-	// decorate inserts the final newline if needed and indentation ...
-	func decorate(s string, addFileLine bool) string
-
-	// 
-	func max(x, y int) int
-
-	// 
-	func min(x, y int) int
-
-	// 
-	func parseCpuList()
-
-	// roundDown10 rounds a number down to the nearest power of 10. 
-	func roundDown10(n int) int
-
-	// roundUp rounds x up to a number of the form [1eX, 2eX, 5eX]. 
-	func roundUp(n int) int
-
-	// startAlarm starts an alarm if requested. 
-	func startAlarm()
-
-	// stopAlarm turns off the alarm. 
-	func stopAlarm()
-
-	// 
-	func tRunner(t *T, test *InternalTest)
-
-
-TYPES
-	// B is a type passed to Benchmark functions to manage benchmark ...
-	type B struct {
-		common
-		N		int
-		benchmark	InternalBenchmark
-		bytes		int64
-		timerOn		bool
-		result		BenchmarkResult
-	}
-
-	// Error is equivalent to Log() followed by Fail(). 
-	func (c *B) Error(args ...any)
-
-	// Errorf is equivalent to Logf() followed by Fail(). 
-	func (c *B) Errorf(format string, args ...any)
-
-	// Fail marks the function as having failed but continues ...
-	func (c *B) Fail()
-
-	// FailNow marks the function as having failed and stops its ...
-	func (c *B) FailNow()
-
-	// Failed reports whether the function has failed. 
-	func (c *B) Failed() bool
-
-	// Fatal is equivalent to Log() followed by FailNow(). 
-	func (c *B) Fatal(args ...any)
-
-	// Fatalf is equivalent to Logf() followed by FailNow(). 
-	func (c *B) Fatalf(format string, args ...any)
-
-	// Log formats its arguments using default formatting, analogous ...
-	func (c *B) Log(args ...any)
-
-	// Logf formats its arguments according to the format, analogous ...
-	func (c *B) Logf(format string, args ...any)
-
-	// ResetTimer sets the elapsed benchmark time to zero. It does not ...
-	func (b *B) ResetTimer()
-
-	// SetBytes records the number of bytes processed in a single ...
-	func (b *B) SetBytes(n int64)
-
-	// StartTimer starts timing a test. This function is called ...
-	func (b *B) StartTimer()
-
-	// StopTimer stops timing a test. This can be used to pause the ...
-	func (b *B) StopTimer()
-
-	// launch launches the benchmark function. It gradually increases ...
-	func (b *B) launch()
-
-	// log generates the output. It's always at the same stack depth. 
-	func (c *B) log(s string)
-
-	// 
-	func (b *B) nsPerOp() int64
-
-	// run times the benchmark function in a separate goroutine. 
-	func (b *B) run() BenchmarkResult
-
-	// runN runs a single benchmark for the specified number of ...
-	func (b *B) runN(n int)
-
-	// trimOutput shortens the output from a benchmark, which can be ...
-	func (b *B) trimOutput()
-
-	// The results of a benchmark run. 
-	type BenchmarkResult struct {
-		N	int		// The number of iterations.
-		T	time.Duration	// The total time taken.
-		Bytes	int64		// Bytes processed in one iteration.
-	}
-
-	// Benchmark benchmarks a single function. Useful for creating ...
-	func Benchmark(f func(b *B)) BenchmarkResult
-
-	// 
-	func (r BenchmarkResult) NsPerOp() int64
-
-	// 
-	func (r BenchmarkResult) String() string
-
-	// 
-	func (r BenchmarkResult) mbPerSec() float64
-
-	// An internal type but exported because it is cross-package; part ...
-	type InternalBenchmark struct {
-		Name	string
-		F	func(b *B)
-	}
-
-	// 
-	type InternalExample struct {
-		Name	string
-		F	func()
-		Output	string
-	}
-
-	// An internal type but exported because it is cross-package; part ...
-	type InternalTest struct {
-		Name	string
-		F	func(*T)
-	}
-
-	// T is a type passed to Test functions to manage test state and ...
-	type T struct {
-		common
-		name		string		// Name of test.
-		startParallel	chan bool	// Parallel tests will wait on this.
-	}
-
-	// Error is equivalent to Log() followed by Fail(). 
-	func (c *T) Error(args ...any)
-
-	// Errorf is equivalent to Logf() followed by Fail(). 
-	func (c *T) Errorf(format string, args ...any)
-
-	// Fail marks the function as having failed but continues ...
-	func (c *T) Fail()
-
-	// FailNow marks the function as having failed and stops its ...
-	func (c *T) FailNow()
-
-	// Failed reports whether the function has failed. 
-	func (c *T) Failed() bool
-
-	// Fatal is equivalent to Log() followed by FailNow(). 
-	func (c *T) Fatal(args ...any)
-
-	// Fatalf is equivalent to Logf() followed by FailNow(). 
-	func (c *T) Fatalf(format string, args ...any)
-
-	// Log formats its arguments using default formatting, analogous ...
-	func (c *T) Log(args ...any)
-
-	// Logf formats its arguments according to the format, analogous ...
-	func (c *T) Logf(format string, args ...any)
-
-	// Parallel signals that this test is to be run in parallel with ...
-	func (t *T) Parallel()
-
-	// log generates the output. It's always at the same stack depth. 
-	func (c *T) log(s string)
-
-	// 
-	func (t *T) report()
-
-	// common holds the elements common between T and B and captures ...
-	type common struct {
-		output		[]byte		// Output generated by test or benchmark.
-		failed		bool		// Test or benchmark has failed.
-		start		time.Time	// Time test or benchmark started
-		duration	time.Duration
-		self		any		// To be sent on signal channel when done.
-		signal		chan any	// Output for serial tests.
-	}
-
-	// Error is equivalent to Log() followed by Fail(). 
-	func (c *common) Error(args ...any)
-
-	// Errorf is equivalent to Logf() followed by Fail(). 
-	func (c *common) Errorf(format string, args ...any)
-
-	// Fail marks the function as having failed but continues ...
-	func (c *common) Fail()
-
-	// FailNow marks the function as having failed and stops its ...
-	func (c *common) FailNow()
-
-	// Failed reports whether the function has failed. 
-	func (c *common) Failed() bool
-
-	// Fatal is equivalent to Log() followed by FailNow(). 
-	func (c *common) Fatal(args ...any)
-
-	// Fatalf is equivalent to Logf() followed by FailNow(). 
-	func (c *common) Fatalf(format string, args ...any)
-
-	// Log formats its arguments using default formatting, analogous ...
-	func (c *common) Log(args ...any)
-
-	// Logf formats its arguments according to the format, analogous ...
-	func (c *common) Logf(format string, args ...any)
-
-	// log generates the output. It's always at the same stack depth. 
-	func (c *common) log(s string)
-
diff --git a/internal/backport/go/doc/testdata/testing.2.golden b/internal/backport/go/doc/testdata/testing.2.golden
deleted file mode 100644
index 61dac8b..0000000
--- a/internal/backport/go/doc/testdata/testing.2.golden
+++ /dev/null
@@ -1,156 +0,0 @@
-// Package testing provides support for automated testing of Go ...
-PACKAGE testing
-
-IMPORTPATH
-	testdata/testing
-
-IMPORTS
-	bytes
-	flag
-	fmt
-	io
-	os
-	runtime
-	runtime/pprof
-	strconv
-	strings
-	time
-
-FILENAMES
-	testdata/benchmark.go
-	testdata/example.go
-	testdata/testing.go
-
-FUNCTIONS
-	// An internal function but exported because it is cross-package; ...
-	func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample)
-
-	// An internal function but exported because it is cross-package; ...
-	func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark)
-
-	// 
-	func RunExamples(examples []InternalExample) (ok bool)
-
-	// 
-	func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool)
-
-	// Short reports whether the -test.short flag is set. 
-	func Short() bool
-
-
-TYPES
-	// B is a type passed to Benchmark functions to manage benchmark ...
-	type B struct {
-		N int
-		// contains filtered or unexported fields
-	}
-
-	// Error is equivalent to Log() followed by Fail(). 
-	func (c *B) Error(args ...any)
-
-	// Errorf is equivalent to Logf() followed by Fail(). 
-	func (c *B) Errorf(format string, args ...any)
-
-	// Fail marks the function as having failed but continues ...
-	func (c *B) Fail()
-
-	// FailNow marks the function as having failed and stops its ...
-	func (c *B) FailNow()
-
-	// Failed reports whether the function has failed. 
-	func (c *B) Failed() bool
-
-	// Fatal is equivalent to Log() followed by FailNow(). 
-	func (c *B) Fatal(args ...any)
-
-	// Fatalf is equivalent to Logf() followed by FailNow(). 
-	func (c *B) Fatalf(format string, args ...any)
-
-	// Log formats its arguments using default formatting, analogous ...
-	func (c *B) Log(args ...any)
-
-	// Logf formats its arguments according to the format, analogous ...
-	func (c *B) Logf(format string, args ...any)
-
-	// ResetTimer sets the elapsed benchmark time to zero. It does not ...
-	func (b *B) ResetTimer()
-
-	// SetBytes records the number of bytes processed in a single ...
-	func (b *B) SetBytes(n int64)
-
-	// StartTimer starts timing a test. This function is called ...
-	func (b *B) StartTimer()
-
-	// StopTimer stops timing a test. This can be used to pause the ...
-	func (b *B) StopTimer()
-
-	// The results of a benchmark run. 
-	type BenchmarkResult struct {
-		N	int		// The number of iterations.
-		T	time.Duration	// The total time taken.
-		Bytes	int64		// Bytes processed in one iteration.
-	}
-
-	// Benchmark benchmarks a single function. Useful for creating ...
-	func Benchmark(f func(b *B)) BenchmarkResult
-
-	// 
-	func (r BenchmarkResult) NsPerOp() int64
-
-	// 
-	func (r BenchmarkResult) String() string
-
-	// An internal type but exported because it is cross-package; part ...
-	type InternalBenchmark struct {
-		Name	string
-		F	func(b *B)
-	}
-
-	// 
-	type InternalExample struct {
-		Name	string
-		F	func()
-		Output	string
-	}
-
-	// An internal type but exported because it is cross-package; part ...
-	type InternalTest struct {
-		Name	string
-		F	func(*T)
-	}
-
-	// T is a type passed to Test functions to manage test state and ...
-	type T struct {
-		// contains filtered or unexported fields
-	}
-
-	// Error is equivalent to Log() followed by Fail(). 
-	func (c *T) Error(args ...any)
-
-	// Errorf is equivalent to Logf() followed by Fail(). 
-	func (c *T) Errorf(format string, args ...any)
-
-	// Fail marks the function as having failed but continues ...
-	func (c *T) Fail()
-
-	// FailNow marks the function as having failed and stops its ...
-	func (c *T) FailNow()
-
-	// Failed reports whether the function has failed. 
-	func (c *T) Failed() bool
-
-	// Fatal is equivalent to Log() followed by FailNow(). 
-	func (c *T) Fatal(args ...any)
-
-	// Fatalf is equivalent to Logf() followed by FailNow(). 
-	func (c *T) Fatalf(format string, args ...any)
-
-	// Log formats its arguments using default formatting, analogous ...
-	func (c *T) Log(args ...any)
-
-	// Logf formats its arguments according to the format, analogous ...
-	func (c *T) Logf(format string, args ...any)
-
-	// Parallel signals that this test is to be run in parallel with ...
-	func (t *T) Parallel()
-
diff --git a/internal/backport/go/doc/testdata/testing.go b/internal/backport/go/doc/testdata/testing.go
deleted file mode 100644
index 6365ffc..0000000
--- a/internal/backport/go/doc/testdata/testing.go
+++ /dev/null
@@ -1,404 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package testing provides support for automated testing of Go packages.
-// It is intended to be used in concert with the “go test” utility, which automates
-// execution of any function of the form
-//     func TestXxx(*testing.T)
-// where Xxx can be any alphanumeric string (but the first letter must not be in
-// [a-z]) and serves to identify the test routine.
-// These TestXxx routines should be declared within the package they are testing.
-//
-// Functions of the form
-//     func BenchmarkXxx(*testing.B)
-// are considered benchmarks, and are executed by go test when the -test.bench
-// flag is provided.
-//
-// A sample benchmark function looks like this:
-//     func BenchmarkHello(b *testing.B) {
-//         for i := 0; i < b.N; i++ {
-//             fmt.Sprintf("hello")
-//         }
-//     }
-// The benchmark package will vary b.N until the benchmark function lasts
-// long enough to be timed reliably. The output
-//     testing.BenchmarkHello    10000000    282 ns/op
-// means that the loop ran 10000000 times at a speed of 282 ns per loop.
-//
-// If a benchmark needs some expensive setup before running, the timer
-// may be stopped:
-//     func BenchmarkBigLen(b *testing.B) {
-//         b.StopTimer()
-//         big := NewBig()
-//         b.StartTimer()
-//         for i := 0; i < b.N; i++ {
-//             big.Len()
-//         }
-//     }
-package testing
-
-import (
-	"flag"
-	"fmt"
-	"os"
-	"runtime"
-	"runtime/pprof"
-	"strconv"
-	"strings"
-	"time"
-)
-
-var (
-	// The short flag requests that tests run more quickly, but its functionality
-	// is provided by test writers themselves. The testing package is just its
-	// home. The all.bash installation script sets it to make installation more
-	// efficient, but by default the flag is off so a plain "go test" will do a
-	// full test of the package.
-	short = flag.Bool("test.short", false, "run smaller test suite to save time")
-
-	// Report as tests are run; default is silent for success.
-	chatty         = flag.Bool("test.v", false, "verbose: print additional output")
-	match          = flag.String("test.run", "", "regular expression to select tests to run")
-	memProfile     = flag.String("test.memprofile", "", "write a memory profile to the named file after execution")
-	memProfileRate = flag.Int("test.memprofilerate", 0, "if >=0, sets runtime.MemProfileRate")
-	cpuProfile     = flag.String("test.cpuprofile", "", "write a cpu profile to the named file during execution")
-	timeout        = flag.Duration("test.timeout", 0, "if positive, sets an aggregate time limit for all tests")
-	cpuListStr     = flag.String("test.cpu", "", "comma-separated list of number of CPUs to use for each test")
-	parallel       = flag.Int("test.parallel", runtime.GOMAXPROCS(0), "maximum test parallelism")
-
-	cpuList []int
-)
-
-// common holds the elements common between T and B and
-// captures common methods such as Errorf.
-type common struct {
-	output   []byte    // Output generated by test or benchmark.
-	failed   bool      // Test or benchmark has failed.
-	start    time.Time // Time test or benchmark started
-	duration time.Duration
-	self     any      // To be sent on signal channel when done.
-	signal   chan any // Output for serial tests.
-}
-
-// Short reports whether the -test.short flag is set.
-func Short() bool {
-	return *short
-}
-
-// decorate inserts the final newline if needed and indentation tabs for formatting.
-// If addFileLine is true, it also prefixes the string with the file and line of the call site.
-func decorate(s string, addFileLine bool) string {
-	if addFileLine {
-		_, file, line, ok := runtime.Caller(3) // decorate + log + public function.
-		if ok {
-			// Truncate file name at last file name separator.
-			if index := strings.LastIndex(file, "/"); index >= 0 {
-				file = file[index+1:]
-			} else if index = strings.LastIndex(file, "\\"); index >= 0 {
-				file = file[index+1:]
-			}
-		} else {
-			file = "???"
-			line = 1
-		}
-		s = fmt.Sprintf("%s:%d: %s", file, line, s)
-	}
-	s = "\t" + s // Every line is indented at least one tab.
-	n := len(s)
-	if n > 0 && s[n-1] != '\n' {
-		s += "\n"
-		n++
-	}
-	for i := 0; i < n-1; i++ { // -1 to avoid final newline
-		if s[i] == '\n' {
-			// Second and subsequent lines are indented an extra tab.
-			return s[0:i+1] + "\t" + decorate(s[i+1:n], false)
-		}
-	}
-	return s
-}
-
-// T is a type passed to Test functions to manage test state and support formatted test logs.
-// Logs are accumulated during execution and dumped to standard error when done.
-type T struct {
-	common
-	name          string    // Name of test.
-	startParallel chan bool // Parallel tests will wait on this.
-}
-
-// Fail marks the function as having failed but continues execution.
-func (c *common) Fail() { c.failed = true }
-
-// Failed reports whether the function has failed.
-func (c *common) Failed() bool { return c.failed }
-
-// FailNow marks the function as having failed and stops its execution.
-// Execution will continue at the next Test.
-func (c *common) FailNow() {
-	c.Fail()
-
-	// Calling runtime.Goexit will exit the goroutine, which
-	// will run the deferred functions in this goroutine,
-	// which will eventually run the deferred lines in tRunner,
-	// which will signal to the test loop that this test is done.
-	//
-	// A previous version of this code said:
-	//
-	//	c.duration = ...
-	//	c.signal <- c.self
-	//	runtime.Goexit()
-	//
-	// This previous version duplicated code (those lines are in
-	// tRunner no matter what), but worse the goroutine teardown
-	// implicit in runtime.Goexit was not guaranteed to complete
-	// before the test exited. If a test deferred an important cleanup
-	// function (like removing temporary files), there was no guarantee
-	// it would run on a test failure. Because we send on c.signal during
-	// a top-of-stack deferred function now, we know that the send
-	// only happens after any other stacked defers have completed.
-	runtime.Goexit()
-}
-
-// log generates the output. It's always at the same stack depth.
-func (c *common) log(s string) {
-	c.output = append(c.output, decorate(s, true)...)
-}
-
-// Log formats its arguments using default formatting, analogous to Println(),
-// and records the text in the error log.
-func (c *common) Log(args ...any) { c.log(fmt.Sprintln(args...)) }
-
-// Logf formats its arguments according to the format, analogous to Printf(),
-// and records the text in the error log.
-func (c *common) Logf(format string, args ...any) { c.log(fmt.Sprintf(format, args...)) }
-
-// Error is equivalent to Log() followed by Fail().
-func (c *common) Error(args ...any) {
-	c.log(fmt.Sprintln(args...))
-	c.Fail()
-}
-
-// Errorf is equivalent to Logf() followed by Fail().
-func (c *common) Errorf(format string, args ...any) {
-	c.log(fmt.Sprintf(format, args...))
-	c.Fail()
-}
-
-// Fatal is equivalent to Log() followed by FailNow().
-func (c *common) Fatal(args ...any) {
-	c.log(fmt.Sprintln(args...))
-	c.FailNow()
-}
-
-// Fatalf is equivalent to Logf() followed by FailNow().
-func (c *common) Fatalf(format string, args ...any) {
-	c.log(fmt.Sprintf(format, args...))
-	c.FailNow()
-}
-
-// Parallel signals that this test is to be run in parallel with (and only with)
-// other parallel tests in this CPU group.
-func (t *T) Parallel() {
-	t.signal <- (*T)(nil) // Release main testing loop
-	<-t.startParallel     // Wait for serial tests to finish
-}
-
-// An internal type but exported because it is cross-package; part of the implementation
-// of go test.
-type InternalTest struct {
-	Name string
-	F    func(*T)
-}
-
-func tRunner(t *T, test *InternalTest) {
-	t.start = time.Now()
-
-	// When this goroutine is done, either because test.F(t)
-	// returned normally or because a test failure triggered
-	// a call to runtime.Goexit, record the duration and send
-	// a signal saying that the test is done.
-	defer func() {
-		t.duration = time.Now().Sub(t.start)
-		t.signal <- t
-	}()
-
-	test.F(t)
-}
-
-// An internal function but exported because it is cross-package; part of the implementation
-// of go test.
-func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) {
-	flag.Parse()
-	parseCpuList()
-
-	before()
-	startAlarm()
-	testOk := RunTests(matchString, tests)
-	exampleOk := RunExamples(examples)
-	if !testOk || !exampleOk {
-		fmt.Println("FAIL")
-		os.Exit(1)
-	}
-	fmt.Println("PASS")
-	stopAlarm()
-	RunBenchmarks(matchString, benchmarks)
-	after()
-}
-
-func (t *T) report() {
-	tstr := fmt.Sprintf("(%.2f seconds)", t.duration.Seconds())
-	format := "--- %s: %s %s\n%s"
-	if t.failed {
-		fmt.Printf(format, "FAIL", t.name, tstr, t.output)
-	} else if *chatty {
-		fmt.Printf(format, "PASS", t.name, tstr, t.output)
-	}
-}
-
-func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool) {
-	ok = true
-	if len(tests) == 0 {
-		fmt.Fprintln(os.Stderr, "testing: warning: no tests to run")
-		return
-	}
-	for _, procs := range cpuList {
-		runtime.GOMAXPROCS(procs)
-		// We build a new channel tree for each run of the loop.
-		// collector merges in one channel all the upstream signals from parallel tests.
-		// If all tests pump to the same channel, a bug can occur where a test
-		// kicks off a goroutine that Fails, yet the test still delivers a completion signal,
-		// which skews the counting.
-		var collector = make(chan any)
-
-		numParallel := 0
-		startParallel := make(chan bool)
-
-		for i := 0; i < len(tests); i++ {
-			matched, err := matchString(*match, tests[i].Name)
-			if err != nil {
-				fmt.Fprintf(os.Stderr, "testing: invalid regexp for -test.run: %s\n", err)
-				os.Exit(1)
-			}
-			if !matched {
-				continue
-			}
-			testName := tests[i].Name
-			if procs != 1 {
-				testName = fmt.Sprintf("%s-%d", tests[i].Name, procs)
-			}
-			t := &T{
-				common: common{
-					signal: make(chan any),
-				},
-				name:          testName,
-				startParallel: startParallel,
-			}
-			t.self = t
-			if *chatty {
-				fmt.Printf("=== RUN %s\n", t.name)
-			}
-			go tRunner(t, &tests[i])
-			out := (<-t.signal).(*T)
-			if out == nil { // Parallel run.
-				go func() {
-					collector <- <-t.signal
-				}()
-				numParallel++
-				continue
-			}
-			t.report()
-			ok = ok && !out.failed
-		}
-
-		running := 0
-		for numParallel+running > 0 {
-			if running < *parallel && numParallel > 0 {
-				startParallel <- true
-				running++
-				numParallel--
-				continue
-			}
-			t := (<-collector).(*T)
-			t.report()
-			ok = ok && !t.failed
-			running--
-		}
-	}
-	return
-}
-
-// before runs before all testing.
-func before() {
-	if *memProfileRate > 0 {
-		runtime.MemProfileRate = *memProfileRate
-	}
-	if *cpuProfile != "" {
-		f, err := os.Create(*cpuProfile)
-		if err != nil {
-			fmt.Fprintf(os.Stderr, "testing: %s", err)
-			return
-		}
-		if err := pprof.StartCPUProfile(f); err != nil {
-			fmt.Fprintf(os.Stderr, "testing: can't start cpu profile: %s", err)
-			f.Close()
-			return
-		}
-		// Could save f so after can call f.Close; not worth the effort.
-	}
-
-}
-
-// after runs after all testing.
-func after() {
-	if *cpuProfile != "" {
-		pprof.StopCPUProfile() // flushes profile to disk
-	}
-	if *memProfile != "" {
-		f, err := os.Create(*memProfile)
-		if err != nil {
-			fmt.Fprintf(os.Stderr, "testing: %s", err)
-			return
-		}
-		if err = pprof.WriteHeapProfile(f); err != nil {
-			fmt.Fprintf(os.Stderr, "testing: can't write %s: %s", *memProfile, err)
-		}
-		f.Close()
-	}
-}
-
-var timer *time.Timer
-
-// startAlarm starts an alarm if requested.
-func startAlarm() {
-	if *timeout > 0 {
-		timer = time.AfterFunc(*timeout, alarm)
-	}
-}
-
-// stopAlarm turns off the alarm.
-func stopAlarm() {
-	if *timeout > 0 {
-		timer.Stop()
-	}
-}
-
-// alarm is called if the timeout expires.
-func alarm() {
-	panic("test timed out")
-}
-
-func parseCpuList() {
-	if len(*cpuListStr) == 0 {
-		cpuList = append(cpuList, runtime.GOMAXPROCS(-1))
-	} else {
-		for _, val := range strings.Split(*cpuListStr, ",") {
-			cpu, err := strconv.Atoi(val)
-			if err != nil || cpu <= 0 {
-				fmt.Fprintf(os.Stderr, "testing: invalid value %q for -test.cpu", val)
-				os.Exit(1)
-			}
-			cpuList = append(cpuList, cpu)
-		}
-	}
-}
diff --git a/internal/backport/go/format/benchmark_test.go b/internal/backport/go/format/benchmark_test.go
deleted file mode 100644
index e2a6491..0000000
--- a/internal/backport/go/format/benchmark_test.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file provides a simple framework to add benchmarks
-// based on generated input (source) files.
-
-package format_test
-
-import (
-	"bytes"
-	"flag"
-	"fmt"
-	"golang.org/x/website/internal/backport/go/format"
-	"os"
-	"testing"
-)
-
-var debug = flag.Bool("debug", false, "write .src files containing formatting input; for debugging")
-
-// array1 generates an array literal with n elements of the form:
-//
-// var _ = [...]byte{
-//
-//	// 0
-//	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-//	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
-//	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
-//	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
-//	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
-//	// 40
-//	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
-//	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
-//	...
-func array1(buf *bytes.Buffer, n int) {
-	buf.WriteString("var _ = [...]byte{\n")
-	for i := 0; i < n; {
-		if i%10 == 0 {
-			fmt.Fprintf(buf, "\t// %d\n", i)
-		}
-		buf.WriteByte('\t')
-		for j := 0; j < 8; j++ {
-			fmt.Fprintf(buf, "0x%02x, ", byte(i))
-			i++
-		}
-		buf.WriteString("\n")
-	}
-	buf.WriteString("}\n")
-}
-
-var tests = []struct {
-	name string
-	gen  func(*bytes.Buffer, int)
-	n    int
-}{
-	{"array1", array1, 10000},
-	// add new test cases here as needed
-}
-
-func BenchmarkFormat(b *testing.B) {
-	var src bytes.Buffer
-	for _, t := range tests {
-		src.Reset()
-		src.WriteString("package p\n")
-		t.gen(&src, t.n)
-		data := src.Bytes()
-
-		if *debug {
-			filename := t.name + ".src"
-			err := os.WriteFile(filename, data, 0660)
-			if err != nil {
-				b.Fatalf("couldn't write %s: %v", filename, err)
-			}
-		}
-
-		b.Run(fmt.Sprintf("%s-%d", t.name, t.n), func(b *testing.B) {
-			b.SetBytes(int64(len(data)))
-			b.ReportAllocs()
-			b.ResetTimer()
-			for i := 0; i < b.N; i++ {
-				var err error
-				sink, err = format.Source(data)
-				if err != nil {
-					b.Fatal(err)
-				}
-			}
-		})
-	}
-}
-
-var sink []byte
diff --git a/internal/backport/go/format/example_test.go b/internal/backport/go/format/example_test.go
deleted file mode 100644
index 5e7ceee..0000000
--- a/internal/backport/go/format/example_test.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package format_test
-
-import (
-	"bytes"
-	"fmt"
-	"golang.org/x/website/internal/backport/go/format"
-	"golang.org/x/website/internal/backport/go/parser"
-	"golang.org/x/website/internal/backport/go/token"
-	"log"
-)
-
-func ExampleNode() {
-	const expr = "(6+2*3)/4"
-
-	// parser.ParseExpr parses the argument and returns the
-	// corresponding ast.Node.
-	node, err := parser.ParseExpr(expr)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	// Create a FileSet for node. Since the node does not come
-	// from a real source file, fset will be empty.
-	fset := token.NewFileSet()
-
-	var buf bytes.Buffer
-	err = format.Node(&buf, fset, node)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	fmt.Println(buf.String())
-
-	// Output: (6 + 2*3) / 4
-}
diff --git a/internal/backport/go/format/format.go b/internal/backport/go/format/format.go
deleted file mode 100644
index 7d1804c..0000000
--- a/internal/backport/go/format/format.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package format implements standard formatting of Go source.
-//
-// Note that formatting of Go source code changes over time, so tools relying on
-// consistent formatting should execute a specific version of the gofmt binary
-// instead of using this package. That way, the formatting will be stable, and
-// the tools won't need to be recompiled each time gofmt changes.
-//
-// For example, pre-submit checks that use this package directly would behave
-// differently depending on what Go version each developer uses, causing the
-// check to be inherently fragile.
-package format
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/parser"
-	"golang.org/x/website/internal/backport/go/printer"
-	"golang.org/x/website/internal/backport/go/token"
-)
-
-// Keep these in sync with cmd/gofmt/gofmt.go.
-const (
-	tabWidth    = 8
-	printerMode = printer.UseSpaces | printer.TabIndent | printerNormalizeNumbers
-
-	// printerNormalizeNumbers means to canonicalize number literal prefixes
-	// and exponents while printing. See https://golang.org/doc/go1.13#gofmt.
-	//
-	// This value is defined in go/printer specifically for go/format and cmd/gofmt.
-	printerNormalizeNumbers = 1 << 30
-)
-
-var config = printer.Config{Mode: printerMode, Tabwidth: tabWidth}
-
-const parserMode = parser.ParseComments | parser.SkipObjectResolution
-
-// Node formats node in canonical gofmt style and writes the result to dst.
-//
-// The node type must be *ast.File, *printer.CommentedNode, []ast.Decl,
-// []ast.Stmt, or assignment-compatible to ast.Expr, ast.Decl, ast.Spec,
-// or ast.Stmt. Node does not modify node. Imports are not sorted for
-// nodes representing partial source files (for instance, if the node is
-// not an *ast.File or a *printer.CommentedNode not wrapping an *ast.File).
-//
-// The function may return early (before the entire result is written)
-// and return a formatting error, for instance due to an incorrect AST.
-func Node(dst io.Writer, fset *token.FileSet, node interface{}) error {
-	// Determine if we have a complete source file (file != nil).
-	var file *ast.File
-	var cnode *printer.CommentedNode
-	switch n := node.(type) {
-	case *ast.File:
-		file = n
-	case *printer.CommentedNode:
-		if f, ok := n.Node.(*ast.File); ok {
-			file = f
-			cnode = n
-		}
-	}
-
-	// Sort imports if necessary.
-	if file != nil && hasUnsortedImports(file) {
-		// Make a copy of the AST because ast.SortImports is destructive.
-		// TODO(gri) Do this more efficiently.
-		var buf bytes.Buffer
-		err := config.Fprint(&buf, fset, file)
-		if err != nil {
-			return err
-		}
-		file, err = parser.ParseFile(fset, "", buf.Bytes(), parserMode)
-		if err != nil {
-			// We should never get here. If we do, provide good diagnostic.
-			return fmt.Errorf("format.Node internal error (%s)", err)
-		}
-		ast.SortImports(fset, file)
-
-		// Use new file with sorted imports.
-		node = file
-		if cnode != nil {
-			node = &printer.CommentedNode{Node: file, Comments: cnode.Comments}
-		}
-	}
-
-	return config.Fprint(dst, fset, node)
-}
-
-// Source formats src in canonical gofmt style and returns the result
-// or an (I/O or syntax) error. src is expected to be a syntactically
-// correct Go source file, or a list of Go declarations or statements.
-//
-// If src is a partial source file, the leading and trailing space of src
-// is applied to the result (such that it has the same leading and trailing
-// space as src), and the result is indented by the same amount as the first
-// line of src containing code. Imports are not sorted for partial source files.
-func Source(src []byte) ([]byte, error) {
-	fset := token.NewFileSet()
-	file, sourceAdj, indentAdj, err := parse(fset, "", src, true)
-	if err != nil {
-		return nil, err
-	}
-
-	if sourceAdj == nil {
-		// Complete source file.
-		// TODO(gri) consider doing this always.
-		ast.SortImports(fset, file)
-	}
-
-	return format(fset, file, sourceAdj, indentAdj, src, config)
-}
-
-func hasUnsortedImports(file *ast.File) bool {
-	for _, d := range file.Decls {
-		d, ok := d.(*ast.GenDecl)
-		if !ok || d.Tok != token.IMPORT {
-			// Not an import declaration, so we're done.
-			// Imports are always first.
-			return false
-		}
-		if d.Lparen.IsValid() {
-			// For now assume all grouped imports are unsorted.
-			// TODO(gri) Should check if they are sorted already.
-			return true
-		}
-		// Ungrouped imports are sorted by default.
-	}
-	return false
-}
diff --git a/internal/backport/go/format/format_test.go b/internal/backport/go/format/format_test.go
deleted file mode 100644
index 59730a9..0000000
--- a/internal/backport/go/format/format_test.go
+++ /dev/null
@@ -1,188 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package format
-
-import (
-	"bytes"
-	"os"
-	"strings"
-	"testing"
-
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/parser"
-	"golang.org/x/website/internal/backport/go/token"
-)
-
-const testfile = "format_test.go"
-
-func diff(t *testing.T, dst, src []byte) {
-	line := 1
-	offs := 0 // line offset
-	for i := 0; i < len(dst) && i < len(src); i++ {
-		d := dst[i]
-		s := src[i]
-		if d != s {
-			t.Errorf("dst:%d: %s\n", line, dst[offs:i+1])
-			t.Errorf("src:%d: %s\n", line, src[offs:i+1])
-			return
-		}
-		if s == '\n' {
-			line++
-			offs = i + 1
-		}
-	}
-	if len(dst) != len(src) {
-		t.Errorf("len(dst) = %d, len(src) = %d\nsrc = %q", len(dst), len(src), src)
-	}
-}
-
-func TestNode(t *testing.T) {
-	src, err := os.ReadFile(testfile)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	fset := token.NewFileSet()
-	file, err := parser.ParseFile(fset, testfile, src, parser.ParseComments)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	var buf bytes.Buffer
-
-	if err = Node(&buf, fset, file); err != nil {
-		t.Fatal("Node failed:", err)
-	}
-
-	diff(t, buf.Bytes(), src)
-}
-
-// Node is documented to not modify the AST.
-// Test that it is so even when numbers are normalized.
-func TestNodeNoModify(t *testing.T) {
-	const (
-		src    = "package p\n\nconst _ = 0000000123i\n"
-		golden = "package p\n\nconst _ = 123i\n"
-	)
-
-	fset := token.NewFileSet()
-	file, err := parser.ParseFile(fset, "", src, parser.ParseComments)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	// Capture original address and value of a BasicLit node
-	// which will undergo formatting changes during printing.
-	wantLit := file.Decls[0].(*ast.GenDecl).Specs[0].(*ast.ValueSpec).Values[0].(*ast.BasicLit)
-	wantVal := wantLit.Value
-
-	var buf bytes.Buffer
-	if err = Node(&buf, fset, file); err != nil {
-		t.Fatal("Node failed:", err)
-	}
-	diff(t, buf.Bytes(), []byte(golden))
-
-	// Check if anything changed after Node returned.
-	gotLit := file.Decls[0].(*ast.GenDecl).Specs[0].(*ast.ValueSpec).Values[0].(*ast.BasicLit)
-	gotVal := gotLit.Value
-
-	if gotLit != wantLit {
-		t.Errorf("got *ast.BasicLit address %p, want %p", gotLit, wantLit)
-	}
-	if gotVal != wantVal {
-		t.Errorf("got *ast.BasicLit value %q, want %q", gotVal, wantVal)
-	}
-}
-
-func TestSource(t *testing.T) {
-	src, err := os.ReadFile(testfile)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	res, err := Source(src)
-	if err != nil {
-		t.Fatal("Source failed:", err)
-	}
-
-	diff(t, res, src)
-}
-
-// Test cases that are expected to fail are marked by the prefix "ERROR".
-// The formatted result must look the same as the input for successful tests.
-var tests = []string{
-	// declaration lists
-	`import "golang.org/x/website/internal/backport/go/format"`,
-	"var x int",
-	"var x int\n\ntype T struct{}",
-
-	// statement lists
-	"x := 0",
-	"f(a, b, c)\nvar x int = f(1, 2, 3)",
-
-	// indentation, leading and trailing space
-	"\tx := 0\n\tgo f()",
-	"\tx := 0\n\tgo f()\n\n\n",
-	"\n\t\t\n\n\tx := 0\n\tgo f()\n\n\n",
-	"\n\t\t\n\n\t\t\tx := 0\n\t\t\tgo f()\n\n\n",
-	"\n\t\t\n\n\t\t\tx := 0\n\t\t\tconst s = `\nfoo\n`\n\n\n",     // no indentation added inside raw strings
-	"\n\t\t\n\n\t\t\tx := 0\n\t\t\tconst s = `\n\t\tfoo\n`\n\n\n", // no indentation removed inside raw strings
-
-	// comments
-	"/* Comment */",
-	"\t/* Comment */ ",
-	"\n/* Comment */ ",
-	"i := 5 /* Comment */",         // issue #5551
-	"\ta()\n//line :1",             // issue #11276
-	"\t//xxx\n\ta()\n//line :2",    // issue #11276
-	"\ta() //line :1\n\tb()\n",     // issue #11276
-	"x := 0\n//line :1\n//line :2", // issue #11276
-
-	// whitespace
-	"",     // issue #11275
-	" ",    // issue #11275
-	"\t",   // issue #11275
-	"\t\t", // issue #11275
-	"\n",   // issue #11275
-	"\n\n", // issue #11275
-	"\t\n", // issue #11275
-
-	// erroneous programs
-	"ERROR1 + 2 +",
-	"ERRORx :=  0",
-
-	// build comments
-	"// copyright\n\n//go:build x\n\npackage p\n",
-	"// copyright\n\n//go:build x\n// +build x\n\npackage p\n",
-}
-
-func String(s string) (string, error) {
-	res, err := Source([]byte(s))
-	if err != nil {
-		return "", err
-	}
-	return string(res), nil
-}
-
-func TestPartial(t *testing.T) {
-	for _, src := range tests {
-		if strings.HasPrefix(src, "ERROR") {
-			// test expected to fail
-			src = src[5:] // remove ERROR prefix
-			res, err := String(src)
-			if err == nil && res == src {
-				t.Errorf("formatting succeeded but was expected to fail:\n%q", src)
-			}
-		} else {
-			// test expected to succeed
-			res, err := String(src)
-			if err != nil {
-				t.Errorf("formatting failed (%s):\n%q", err, src)
-			} else if res != src {
-				t.Errorf("formatting incorrect:\nsource: %q\nresult: %q", src, res)
-			}
-		}
-	}
-}
diff --git a/internal/backport/go/format/internal.go b/internal/backport/go/format/internal.go
deleted file mode 100644
index c83432d..0000000
--- a/internal/backport/go/format/internal.go
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// TODO(gri): This file and the file src/cmd/gofmt/internal.go are
-// the same (but for this comment and the package name). Do not modify
-// one without the other. Determine if we can factor out functionality
-// in a public API. See also #11844 for context.
-
-package format
-
-import (
-	"bytes"
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/parser"
-	"golang.org/x/website/internal/backport/go/printer"
-	"golang.org/x/website/internal/backport/go/token"
-	"strings"
-)
-
-// parse parses src, which was read from the named file,
-// as a Go source file, declaration, or statement list.
-func parse(fset *token.FileSet, filename string, src []byte, fragmentOk bool) (
-	file *ast.File,
-	sourceAdj func(src []byte, indent int) []byte,
-	indentAdj int,
-	err error,
-) {
-	// Try as whole source file.
-	file, err = parser.ParseFile(fset, filename, src, parserMode)
-	// If there's no error, return. If the error is that the source file didn't begin with a
-	// package line and source fragments are ok, fall through to
-	// try as a source fragment. Stop and return on any other error.
-	if err == nil || !fragmentOk || !strings.Contains(err.Error(), "expected 'package'") {
-		return
-	}
-
-	// If this is a declaration list, make it a source file
-	// by inserting a package clause.
-	// Insert using a ';', not a newline, so that the line numbers
-	// in psrc match the ones in src.
-	psrc := append([]byte("package p;"), src...)
-	file, err = parser.ParseFile(fset, filename, psrc, parserMode)
-	if err == nil {
-		sourceAdj = func(src []byte, indent int) []byte {
-			// Remove the package clause.
-			// Gofmt has turned the ';' into a '\n'.
-			src = src[indent+len("package p\n"):]
-			return bytes.TrimSpace(src)
-		}
-		return
-	}
-	// If the error is that the source file didn't begin with a
-	// declaration, fall through to try as a statement list.
-	// Stop and return on any other error.
-	if !strings.Contains(err.Error(), "expected declaration") {
-		return
-	}
-
-	// If this is a statement list, make it a source file
-	// by inserting a package clause and turning the list
-	// into a function body. This handles expressions too.
-	// Insert using a ';', not a newline, so that the line numbers
-	// in fsrc match the ones in src. Add an extra '\n' before the '}'
-	// to make sure comments are flushed before the '}'.
-	fsrc := append(append([]byte("package p; func _() {"), src...), '\n', '\n', '}')
-	file, err = parser.ParseFile(fset, filename, fsrc, parserMode)
-	if err == nil {
-		sourceAdj = func(src []byte, indent int) []byte {
-			// Cap adjusted indent to zero.
-			if indent < 0 {
-				indent = 0
-			}
-			// Remove the wrapping.
-			// Gofmt has turned the "; " into a "\n\n".
-			// There will be two non-blank lines with indent, hence 2*indent.
-			src = src[2*indent+len("package p\n\nfunc _() {"):]
-			// Remove only the "}\n" suffix: remaining whitespaces will be trimmed anyway
-			src = src[:len(src)-len("}\n")]
-			return bytes.TrimSpace(src)
-		}
-		// Gofmt has also indented the function body one level.
-		// Adjust that with indentAdj.
-		indentAdj = -1
-	}
-
-	// Succeeded, or out of options.
-	return
-}
-
-// format formats the given package file originally obtained from src
-// and adjusts the result based on the original source via sourceAdj
-// and indentAdj.
-func format(
-	fset *token.FileSet,
-	file *ast.File,
-	sourceAdj func(src []byte, indent int) []byte,
-	indentAdj int,
-	src []byte,
-	cfg printer.Config,
-) ([]byte, error) {
-	if sourceAdj == nil {
-		// Complete source file.
-		var buf bytes.Buffer
-		err := cfg.Fprint(&buf, fset, file)
-		if err != nil {
-			return nil, err
-		}
-		return buf.Bytes(), nil
-	}
-
-	// Partial source file.
-	// Determine and prepend leading space.
-	i, j := 0, 0
-	for j < len(src) && isSpace(src[j]) {
-		if src[j] == '\n' {
-			i = j + 1 // byte offset of last line in leading space
-		}
-		j++
-	}
-	var res []byte
-	res = append(res, src[:i]...)
-
-	// Determine and prepend indentation of first code line.
-	// Spaces are ignored unless there are no tabs,
-	// in which case spaces count as one tab.
-	indent := 0
-	hasSpace := false
-	for _, b := range src[i:j] {
-		switch b {
-		case ' ':
-			hasSpace = true
-		case '\t':
-			indent++
-		}
-	}
-	if indent == 0 && hasSpace {
-		indent = 1
-	}
-	for i := 0; i < indent; i++ {
-		res = append(res, '\t')
-	}
-
-	// Format the source.
-	// Write it without any leading and trailing space.
-	cfg.Indent = indent + indentAdj
-	var buf bytes.Buffer
-	err := cfg.Fprint(&buf, fset, file)
-	if err != nil {
-		return nil, err
-	}
-	out := sourceAdj(buf.Bytes(), cfg.Indent)
-
-	// If the adjusted output is empty, the source
-	// was empty but (possibly) for white space.
-	// The result is the incoming source.
-	if len(out) == 0 {
-		return src, nil
-	}
-
-	// Otherwise, append output to leading space.
-	res = append(res, out...)
-
-	// Determine and append trailing space.
-	i = len(src)
-	for i > 0 && isSpace(src[i-1]) {
-		i--
-	}
-	return append(res, src[i:]...), nil
-}
-
-// isSpace reports whether the byte is a space character.
-// isSpace defines a space as being among the following bytes: ' ', '\t', '\n' and '\r'.
-func isSpace(b byte) bool {
-	return b == ' ' || b == '\t' || b == '\n' || b == '\r'
-}
diff --git a/internal/backport/go/internal/typeparams/common.go b/internal/backport/go/internal/typeparams/common.go
deleted file mode 100644
index 9b82e60..0000000
--- a/internal/backport/go/internal/typeparams/common.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package typeparams provides functions to work with type parameter data
-// stored in the AST, while these AST changes are guarded by a build
-// constraint.
-package typeparams
-
-// 'Hidden' parser modes to control the parsing of type-parameter related
-// features.
-const (
-	DisallowTypeSets = 1 << 29 // Disallow eliding 'interface' in constraint type sets.
-	DisallowParsing  = 1 << 30 // Disallow type parameters entirely.
-)
diff --git a/internal/backport/go/internal/typeparams/typeparams.go b/internal/backport/go/internal/typeparams/typeparams.go
deleted file mode 100644
index 2f7a5ce..0000000
--- a/internal/backport/go/internal/typeparams/typeparams.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package typeparams
-
-import (
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/token"
-)
-
-func PackIndexExpr(x ast.Expr, lbrack token.Pos, exprs []ast.Expr, rbrack token.Pos) ast.Expr {
-	switch len(exprs) {
-	case 0:
-		panic("internal error: PackIndexExpr with empty expr slice")
-	case 1:
-		return &ast.IndexExpr{
-			X:      x,
-			Lbrack: lbrack,
-			Index:  exprs[0],
-			Rbrack: rbrack,
-		}
-	default:
-		return &ast.IndexListExpr{
-			X:       x,
-			Lbrack:  lbrack,
-			Indices: exprs,
-			Rbrack:  rbrack,
-		}
-	}
-}
-
-// IndexExpr wraps an ast.IndexExpr or ast.IndexListExpr.
-//
-// Orig holds the original ast.Expr from which this IndexExpr was derived.
-type IndexExpr struct {
-	Orig ast.Expr // the wrapped expr, which may be distinct from the IndexListExpr below.
-	*ast.IndexListExpr
-}
-
-func UnpackIndexExpr(n ast.Node) *IndexExpr {
-	switch e := n.(type) {
-	case *ast.IndexExpr:
-		return &IndexExpr{e, &ast.IndexListExpr{
-			X:       e.X,
-			Lbrack:  e.Lbrack,
-			Indices: []ast.Expr{e.Index},
-			Rbrack:  e.Rbrack,
-		}}
-	case *ast.IndexListExpr:
-		return &IndexExpr{e, e}
-	}
-	return nil
-}
diff --git a/internal/backport/go/parser/error_test.go b/internal/backport/go/parser/error_test.go
deleted file mode 100644
index 38658c2..0000000
--- a/internal/backport/go/parser/error_test.go
+++ /dev/null
@@ -1,202 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements a parser test harness. The files in the testdata
-// directory are parsed and the errors reported are compared against the
-// error messages expected in the test files. The test files must end in
-// .src rather than .go so that they are not disturbed by gofmt runs.
-//
-// Expected errors are indicated in the test files by putting a comment
-// of the form /* ERROR "rx" */ immediately following an offending token.
-// The harness will verify that an error matching the regular expression
-// rx is reported at that source position.
-//
-// For instance, the following test file indicates that a "not declared"
-// error should be reported for the undeclared variable x:
-//
-//	package p
-//	func f() {
-//		_ = x /* ERROR "not declared" */ + 1
-//	}
-
-package parser
-
-import (
-	"flag"
-	"golang.org/x/website/internal/backport/go/internal/typeparams"
-	"golang.org/x/website/internal/backport/go/scanner"
-	"golang.org/x/website/internal/backport/go/token"
-	"os"
-	"path/filepath"
-	"regexp"
-	"strings"
-	"testing"
-)
-
-var traceErrs = flag.Bool("trace_errs", false, "whether to enable tracing for error tests")
-
-const testdata = "testdata"
-
-// getFile assumes that each filename occurs at most once
-func getFile(fset *token.FileSet, filename string) (file *token.File) {
-	fset.Iterate(func(f *token.File) bool {
-		if f.Name() == filename {
-			if file != nil {
-				panic(filename + " used multiple times")
-			}
-			file = f
-		}
-		return true
-	})
-	return file
-}
-
-func getPos(fset *token.FileSet, filename string, offset int) token.Pos {
-	if f := getFile(fset, filename); f != nil {
-		return f.Pos(offset)
-	}
-	return token.NoPos
-}
-
-// ERROR comments must be of the form /* ERROR "rx" */ and rx is
-// a regular expression that matches the expected error message.
-// The special form /* ERROR HERE "rx" */ must be used for error
-// messages that appear immediately after a token, rather than at
-// a token's position.
-var errRx = regexp.MustCompile(`^/\* *ERROR *(HERE)? *"([^"]*)" *\*/$`)
-
-// expectedErrors collects the regular expressions of ERROR comments found
-// in files and returns them as a map of error positions to error messages.
-func expectedErrors(fset *token.FileSet, filename string, src []byte) map[token.Pos]string {
-	errors := make(map[token.Pos]string)
-
-	var s scanner.Scanner
-	// file was parsed already - do not add it again to the file
-	// set otherwise the position information returned here will
-	// not match the position information collected by the parser
-	s.Init(getFile(fset, filename), src, nil, scanner.ScanComments)
-	var prev token.Pos // position of last non-comment, non-semicolon token
-	var here token.Pos // position immediately after the token at position prev
-
-	for {
-		pos, tok, lit := s.Scan()
-		switch tok {
-		case token.EOF:
-			return errors
-		case token.COMMENT:
-			s := errRx.FindStringSubmatch(lit)
-			if len(s) == 3 {
-				pos := prev
-				if s[1] == "HERE" {
-					pos = here
-				}
-				errors[pos] = s[2]
-			}
-		case token.SEMICOLON:
-			// don't use the position of auto-inserted (invisible) semicolons
-			if lit != ";" {
-				break
-			}
-			fallthrough
-		default:
-			prev = pos
-			var l int // token length
-			if tok.IsLiteral() {
-				l = len(lit)
-			} else {
-				l = len(tok.String())
-			}
-			here = prev + token.Pos(l)
-		}
-	}
-}
-
-// compareErrors compares the map of expected error messages with the list
-// of found errors and reports discrepancies.
-func compareErrors(t *testing.T, fset *token.FileSet, expected map[token.Pos]string, found scanner.ErrorList) {
-	t.Helper()
-	for _, error := range found {
-		// error.Pos is a token.Position, but we want
-		// a token.Pos so we can do a map lookup
-		pos := getPos(fset, error.Pos.Filename, error.Pos.Offset)
-		if msg, found := expected[pos]; found {
-			// we expect a message at pos; check if it matches
-			rx, err := regexp.Compile(msg)
-			if err != nil {
-				t.Errorf("%s: %v", error.Pos, err)
-				continue
-			}
-			if match := rx.MatchString(error.Msg); !match {
-				t.Errorf("%s: %q does not match %q", error.Pos, error.Msg, msg)
-				continue
-			}
-			// we have a match - eliminate this error
-			delete(expected, pos)
-		} else {
-			// To keep in mind when analyzing failed test output:
-			// If the same error position occurs multiple times in errors,
-			// this message will be triggered (because the first error at
-			// the position removes this position from the expected errors).
-			t.Errorf("%s: unexpected error: %s", error.Pos, error.Msg)
-		}
-	}
-
-	// there should be no expected errors left
-	if len(expected) > 0 {
-		t.Errorf("%d errors not reported:", len(expected))
-		for pos, msg := range expected {
-			t.Errorf("%s: %s\n", fset.Position(pos), msg)
-		}
-	}
-}
-
-func checkErrors(t *testing.T, filename string, input interface{}, mode Mode, expectErrors bool) {
-	t.Helper()
-	src, err := readSource(filename, input)
-	if err != nil {
-		t.Error(err)
-		return
-	}
-
-	fset := token.NewFileSet()
-	_, err = ParseFile(fset, filename, src, mode)
-	found, ok := err.(scanner.ErrorList)
-	if err != nil && !ok {
-		t.Error(err)
-		return
-	}
-	found.RemoveMultiples()
-
-	expected := map[token.Pos]string{}
-	if expectErrors {
-		// we are expecting the following errors
-		// (collect these after parsing a file so that it is found in the file set)
-		expected = expectedErrors(fset, filename, src)
-	}
-
-	// verify errors returned by the parser
-	compareErrors(t, fset, expected, found)
-}
-
-func TestErrors(t *testing.T) {
-	list, err := os.ReadDir(testdata)
-	if err != nil {
-		t.Fatal(err)
-	}
-	for _, d := range list {
-		name := d.Name()
-		t.Run(name, func(t *testing.T) {
-			if !d.IsDir() && !strings.HasPrefix(name, ".") && (strings.HasSuffix(name, ".src") || strings.HasSuffix(name, ".go2")) {
-				mode := DeclarationErrors | AllErrors
-				if !strings.HasSuffix(name, ".go2") {
-					mode |= typeparams.DisallowParsing
-				}
-				if *traceErrs {
-					mode |= Trace
-				}
-				checkErrors(t, filepath.Join(testdata, name), nil, mode, true)
-			}
-		})
-	}
-}
diff --git a/internal/backport/go/parser/example_test.go b/internal/backport/go/parser/example_test.go
deleted file mode 100644
index 35fdd33..0000000
--- a/internal/backport/go/parser/example_test.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package parser_test
-
-import (
-	"fmt"
-	"golang.org/x/website/internal/backport/go/parser"
-	"golang.org/x/website/internal/backport/go/token"
-)
-
-func ExampleParseFile() {
-	fset := token.NewFileSet() // positions are relative to fset
-
-	src := `package foo
-
-import (
-	"fmt"
-	"time"
-)
-
-func bar() {
-	fmt.Println(time.Now())
-}`
-
-	// Parse src but stop after processing the imports.
-	f, err := parser.ParseFile(fset, "", src, parser.ImportsOnly)
-	if err != nil {
-		fmt.Println(err)
-		return
-	}
-
-	// Print the imports from the file's AST.
-	for _, s := range f.Imports {
-		fmt.Println(s.Path.Value)
-	}
-
-	// output:
-	//
-	// "fmt"
-	// "time"
-}
diff --git a/internal/backport/go/parser/interface.go b/internal/backport/go/parser/interface.go
deleted file mode 100644
index e253539..0000000
--- a/internal/backport/go/parser/interface.go
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file contains the exported entry points for invoking the parser.
-
-package parser
-
-import (
-	"bytes"
-	"errors"
-	"io"
-	"io/fs"
-	"os"
-	"path/filepath"
-	"strings"
-
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/token"
-)
-
-// If src != nil, readSource converts src to a []byte if possible;
-// otherwise it returns an error. If src == nil, readSource returns
-// the result of reading the file specified by filename.
-func readSource(filename string, src interface{}) ([]byte, error) {
-	if src != nil {
-		switch s := src.(type) {
-		case string:
-			return []byte(s), nil
-		case []byte:
-			return s, nil
-		case *bytes.Buffer:
-			// is io.Reader, but src is already available in []byte form
-			if s != nil {
-				return s.Bytes(), nil
-			}
-		case io.Reader:
-			return io.ReadAll(s)
-		}
-		return nil, errors.New("invalid source")
-	}
-	return os.ReadFile(filename)
-}
-
-// A Mode value is a set of flags (or 0).
-// They control the amount of source code parsed and other optional
-// parser functionality.
-type Mode uint
-
-const (
-	PackageClauseOnly    Mode             = 1 << iota // stop parsing after package clause
-	ImportsOnly                                       // stop parsing after import declarations
-	ParseComments                                     // parse comments and add them to AST
-	Trace                                             // print a trace of parsed productions
-	DeclarationErrors                                 // report declaration errors
-	SpuriousErrors                                    // same as AllErrors, for backward-compatibility
-	SkipObjectResolution                              // don't resolve identifiers to objects - see ParseFile
-	AllErrors            = SpuriousErrors             // report all errors (not just the first 10 on different lines)
-)
-
-// ParseFile parses the source code of a single Go source file and returns
-// the corresponding ast.File node. The source code may be provided via
-// the filename of the source file, or via the src parameter.
-//
-// If src != nil, ParseFile parses the source from src and the filename is
-// only used when recording position information. The type of the argument
-// for the src parameter must be string, []byte, or io.Reader.
-// If src == nil, ParseFile parses the file specified by filename.
-//
-// The mode parameter controls the amount of source text parsed and other
-// optional parser functionality. If the SkipObjectResolution mode bit is set,
-// the object resolution phase of parsing will be skipped, causing File.Scope,
-// File.Unresolved, and all Ident.Obj fields to be nil.
-//
-// Position information is recorded in the file set fset, which must not be
-// nil.
-//
-// If the source couldn't be read, the returned AST is nil and the error
-// indicates the specific failure. If the source was read but syntax
-// errors were found, the result is a partial AST (with ast.Bad* nodes
-// representing the fragments of erroneous source code). Multiple errors
-// are returned via a scanner.ErrorList which is sorted by source position.
-func ParseFile(fset *token.FileSet, filename string, src interface{}, mode Mode) (f *ast.File, err error) {
-	if fset == nil {
-		panic("parser.ParseFile: no token.FileSet provided (fset == nil)")
-	}
-
-	// get source
-	text, err := readSource(filename, src)
-	if err != nil {
-		return nil, err
-	}
-
-	var p parser
-	defer func() {
-		if e := recover(); e != nil {
-			// resume same panic if it's not a bailout
-			bail, ok := e.(bailout)
-			if !ok {
-				panic(e)
-			} else if bail.msg != "" {
-				p.errors.Add(p.file.Position(bail.pos), bail.msg)
-			}
-		}
-
-		// set result values
-		if f == nil {
-			// source is not a valid Go source file - satisfy
-			// ParseFile API and return a valid (but) empty
-			// *ast.File
-			f = &ast.File{
-				Name:  new(ast.Ident),
-				Scope: ast.NewScope(nil),
-			}
-		}
-
-		p.errors.Sort()
-		err = p.errors.Err()
-	}()
-
-	// parse source
-	p.init(fset, filename, text, mode)
-	f = p.parseFile()
-
-	return
-}
-
-// ParseDir calls ParseFile for all files with names ending in ".go" in the
-// directory specified by path and returns a map of package name -> package
-// AST with all the packages found.
-//
-// If filter != nil, only the files with fs.FileInfo entries passing through
-// the filter (and ending in ".go") are considered. The mode bits are passed
-// to ParseFile unchanged. Position information is recorded in fset, which
-// must not be nil.
-//
-// If the directory couldn't be read, a nil map and the respective error are
-// returned. If a parse error occurred, a non-nil but incomplete map and the
-// first error encountered are returned.
-func ParseDir(fset *token.FileSet, path string, filter func(fs.FileInfo) bool, mode Mode) (pkgs map[string]*ast.Package, first error) {
-	list, err := os.ReadDir(path)
-	if err != nil {
-		return nil, err
-	}
-
-	pkgs = make(map[string]*ast.Package)
-	for _, d := range list {
-		if d.IsDir() || !strings.HasSuffix(d.Name(), ".go") {
-			continue
-		}
-		if filter != nil {
-			info, err := d.Info()
-			if err != nil {
-				return nil, err
-			}
-			if !filter(info) {
-				continue
-			}
-		}
-		filename := filepath.Join(path, d.Name())
-		if src, err := ParseFile(fset, filename, nil, mode); err == nil {
-			name := src.Name.Name
-			pkg, found := pkgs[name]
-			if !found {
-				pkg = &ast.Package{
-					Name:  name,
-					Files: make(map[string]*ast.File),
-				}
-				pkgs[name] = pkg
-			}
-			pkg.Files[filename] = src
-		} else if first == nil {
-			first = err
-		}
-	}
-
-	return
-}
-
-// ParseExprFrom is a convenience function for parsing an expression.
-// The arguments have the same meaning as for ParseFile, but the source must
-// be a valid Go (type or value) expression. Specifically, fset must not
-// be nil.
-//
-// If the source couldn't be read, the returned AST is nil and the error
-// indicates the specific failure. If the source was read but syntax
-// errors were found, the result is a partial AST (with ast.Bad* nodes
-// representing the fragments of erroneous source code). Multiple errors
-// are returned via a scanner.ErrorList which is sorted by source position.
-func ParseExprFrom(fset *token.FileSet, filename string, src interface{}, mode Mode) (expr ast.Expr, err error) {
-	if fset == nil {
-		panic("parser.ParseExprFrom: no token.FileSet provided (fset == nil)")
-	}
-
-	// get source
-	text, err := readSource(filename, src)
-	if err != nil {
-		return nil, err
-	}
-
-	var p parser
-	defer func() {
-		if e := recover(); e != nil {
-			// resume same panic if it's not a bailout
-			bail, ok := e.(bailout)
-			if !ok {
-				panic(e)
-			} else if bail.msg != "" {
-				p.errors.Add(p.file.Position(bail.pos), bail.msg)
-			}
-		}
-		p.errors.Sort()
-		err = p.errors.Err()
-	}()
-
-	// parse expr
-	p.init(fset, filename, text, mode)
-	expr = p.parseRhsOrType()
-
-	// If a semicolon was inserted, consume it;
-	// report an error if there's more tokens.
-	if p.tok == token.SEMICOLON && p.lit == "\n" {
-		p.next()
-	}
-	p.expect(token.EOF)
-
-	return
-}
-
-// ParseExpr is a convenience function for obtaining the AST of an expression x.
-// The position information recorded in the AST is undefined. The filename used
-// in error messages is the empty string.
-//
-// If syntax errors were found, the result is a partial AST (with ast.Bad* nodes
-// representing the fragments of erroneous source code). Multiple errors are
-// returned via a scanner.ErrorList which is sorted by source position.
-func ParseExpr(x string) (ast.Expr, error) {
-	return ParseExprFrom(token.NewFileSet(), "", []byte(x), 0)
-}
diff --git a/internal/backport/go/parser/parser.go b/internal/backport/go/parser/parser.go
deleted file mode 100644
index ea33adf..0000000
--- a/internal/backport/go/parser/parser.go
+++ /dev/null
@@ -1,2936 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package parser implements a parser for Go source files. Input may be
-// provided in a variety of forms (see the various Parse* functions); the
-// output is an abstract syntax tree (AST) representing the Go source. The
-// parser is invoked through one of the Parse* functions.
-//
-// The parser accepts a larger language than is syntactically permitted by
-// the Go spec, for simplicity, and for improved robustness in the presence
-// of syntax errors. For instance, in method declarations, the receiver is
-// treated like an ordinary parameter list and thus may contain multiple
-// entries where the spec permits exactly one. Consequently, the corresponding
-// field in the AST (ast.FuncDecl.Recv) field is not restricted to one entry.
-package parser
-
-import (
-	"fmt"
-	"strconv"
-	"strings"
-	"unicode"
-
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/internal/typeparams"
-	"golang.org/x/website/internal/backport/go/scanner"
-	"golang.org/x/website/internal/backport/go/token"
-)
-
-// The parser structure holds the parser's internal state.
-type parser struct {
-	file    *token.File
-	errors  scanner.ErrorList
-	scanner scanner.Scanner
-
-	// Tracing/debugging
-	mode   Mode // parsing mode
-	trace  bool // == (mode&Trace != 0)
-	indent int  // indentation used for tracing output
-
-	// Comments
-	comments    []*ast.CommentGroup
-	leadComment *ast.CommentGroup // last lead comment
-	lineComment *ast.CommentGroup // last line comment
-
-	// Next token
-	pos token.Pos   // token position
-	tok token.Token // one token look-ahead
-	lit string      // token literal
-
-	// Error recovery
-	// (used to limit the number of calls to parser.advance
-	// w/o making scanning progress - avoids potential endless
-	// loops across multiple parser functions during error recovery)
-	syncPos token.Pos // last synchronization position
-	syncCnt int       // number of parser.advance calls without progress
-
-	// Non-syntactic parser control
-	exprLev int  // < 0: in control clause, >= 0: in expression
-	inRhs   bool // if set, the parser is parsing a rhs expression
-
-	imports []*ast.ImportSpec // list of imports
-
-	// nestLev is used to track and limit the recursion depth
-	// during parsing.
-	nestLev int
-}
-
-func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode Mode) {
-	p.file = fset.AddFile(filename, -1, len(src))
-	var m scanner.Mode
-	if mode&ParseComments != 0 {
-		m = scanner.ScanComments
-	}
-	eh := func(pos token.Position, msg string) { p.errors.Add(pos, msg) }
-	p.scanner.Init(p.file, src, eh, m)
-
-	p.mode = mode
-	p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently)
-	p.next()
-}
-
-func (p *parser) allowGenerics() bool { return p.mode&typeparams.DisallowParsing == 0 }
-func (p *parser) allowTypeSets() bool { return p.mode&typeparams.DisallowTypeSets == 0 }
-
-// ----------------------------------------------------------------------------
-// Parsing support
-
-func (p *parser) printTrace(a ...interface{}) {
-	const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
-	const n = len(dots)
-	pos := p.file.Position(p.pos)
-	fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
-	i := 2 * p.indent
-	for i > n {
-		fmt.Print(dots)
-		i -= n
-	}
-	// i <= n
-	fmt.Print(dots[0:i])
-	fmt.Println(a...)
-}
-
-func trace(p *parser, msg string) *parser {
-	p.printTrace(msg, "(")
-	p.indent++
-	return p
-}
-
-// Usage pattern: defer un(trace(p, "..."))
-func un(p *parser) {
-	p.indent--
-	p.printTrace(")")
-}
-
-// maxNestLev is the deepest we're willing to recurse during parsing
-const maxNestLev int = 1e5
-
-func incNestLev(p *parser) *parser {
-	p.nestLev++
-	if p.nestLev > maxNestLev {
-		p.error(p.pos, "exceeded max nesting depth")
-		panic(bailout{})
-	}
-	return p
-}
-
-// decNestLev is used to track nesting depth during parsing to prevent stack exhaustion.
-// It is used along with incNestLev in a similar fashion to how un and trace are used.
-func decNestLev(p *parser) {
-	p.nestLev--
-}
-
-// Advance to the next token.
-func (p *parser) next0() {
-	// Because of one-token look-ahead, print the previous token
-	// when tracing as it provides a more readable output. The
-	// very first token (!p.pos.IsValid()) is not initialized
-	// (it is token.ILLEGAL), so don't print it.
-	if p.trace && p.pos.IsValid() {
-		s := p.tok.String()
-		switch {
-		case p.tok.IsLiteral():
-			p.printTrace(s, p.lit)
-		case p.tok.IsOperator(), p.tok.IsKeyword():
-			p.printTrace("\"" + s + "\"")
-		default:
-			p.printTrace(s)
-		}
-	}
-
-	p.pos, p.tok, p.lit = p.scanner.Scan()
-}
-
-// Consume a comment and return it and the line on which it ends.
-func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
-	// /*-style comments may end on a different line than where they start.
-	// Scan the comment for '\n' chars and adjust endline accordingly.
-	endline = p.file.Line(p.pos)
-	if p.lit[1] == '*' {
-		// don't use range here - no need to decode Unicode code points
-		for i := 0; i < len(p.lit); i++ {
-			if p.lit[i] == '\n' {
-				endline++
-			}
-		}
-	}
-
-	comment = &ast.Comment{Slash: p.pos, Text: p.lit}
-	p.next0()
-
-	return
-}
-
-// Consume a group of adjacent comments, add it to the parser's
-// comments list, and return it together with the line at which
-// the last comment in the group ends. A non-comment token or n
-// empty lines terminate a comment group.
-func (p *parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
-	var list []*ast.Comment
-	endline = p.file.Line(p.pos)
-	for p.tok == token.COMMENT && p.file.Line(p.pos) <= endline+n {
-		var comment *ast.Comment
-		comment, endline = p.consumeComment()
-		list = append(list, comment)
-	}
-
-	// add comment group to the comments list
-	comments = &ast.CommentGroup{List: list}
-	p.comments = append(p.comments, comments)
-
-	return
-}
-
-// Advance to the next non-comment token. In the process, collect
-// any comment groups encountered, and remember the last lead and
-// line comments.
-//
-// A lead comment is a comment group that starts and ends in a
-// line without any other tokens and that is followed by a non-comment
-// token on the line immediately after the comment group.
-//
-// A line comment is a comment group that follows a non-comment
-// token on the same line, and that has no tokens after it on the line
-// where it ends.
-//
-// Lead and line comments may be considered documentation that is
-// stored in the AST.
-func (p *parser) next() {
-	p.leadComment = nil
-	p.lineComment = nil
-	prev := p.pos
-	p.next0()
-
-	if p.tok == token.COMMENT {
-		var comment *ast.CommentGroup
-		var endline int
-
-		if p.file.Line(p.pos) == p.file.Line(prev) {
-			// The comment is on same line as the previous token; it
-			// cannot be a lead comment but may be a line comment.
-			comment, endline = p.consumeCommentGroup(0)
-			if p.file.Line(p.pos) != endline || p.tok == token.EOF {
-				// The next token is on a different line, thus
-				// the last comment group is a line comment.
-				p.lineComment = comment
-			}
-		}
-
-		// consume successor comments, if any
-		endline = -1
-		for p.tok == token.COMMENT {
-			comment, endline = p.consumeCommentGroup(1)
-		}
-
-		if endline+1 == p.file.Line(p.pos) {
-			// The next token is following on the line immediately after the
-			// comment group, thus the last comment group is a lead comment.
-			p.leadComment = comment
-		}
-	}
-}
-
-// A bailout panic is raised to indicate early termination. pos and msg are
-// only populated when bailing out of object resolution.
-type bailout struct {
-	pos token.Pos
-	msg string
-}
-
-func (p *parser) error(pos token.Pos, msg string) {
-	if p.trace {
-		defer un(trace(p, "error: "+msg))
-	}
-
-	epos := p.file.Position(pos)
-
-	// If AllErrors is not set, discard errors reported on the same line
-	// as the last recorded error and stop parsing if there are more than
-	// 10 errors.
-	if p.mode&AllErrors == 0 {
-		n := len(p.errors)
-		if n > 0 && p.errors[n-1].Pos.Line == epos.Line {
-			return // discard - likely a spurious error
-		}
-		if n > 10 {
-			panic(bailout{})
-		}
-	}
-
-	p.errors.Add(epos, msg)
-}
-
-func (p *parser) errorExpected(pos token.Pos, msg string) {
-	msg = "expected " + msg
-	if pos == p.pos {
-		// the error happened at the current position;
-		// make the error message more specific
-		switch {
-		case p.tok == token.SEMICOLON && p.lit == "\n":
-			msg += ", found newline"
-		case p.tok.IsLiteral():
-			// print 123 rather than 'INT', etc.
-			msg += ", found " + p.lit
-		default:
-			msg += ", found '" + p.tok.String() + "'"
-		}
-	}
-	p.error(pos, msg)
-}
-
-func (p *parser) expect(tok token.Token) token.Pos {
-	pos := p.pos
-	if p.tok != tok {
-		p.errorExpected(pos, "'"+tok.String()+"'")
-	}
-	p.next() // make progress
-	return pos
-}
-
-// expect2 is like expect, but it returns an invalid position
-// if the expected token is not found.
-func (p *parser) expect2(tok token.Token) (pos token.Pos) {
-	if p.tok == tok {
-		pos = p.pos
-	} else {
-		p.errorExpected(p.pos, "'"+tok.String()+"'")
-	}
-	p.next() // make progress
-	return
-}
-
-// expectClosing is like expect but provides a better error message
-// for the common case of a missing comma before a newline.
-func (p *parser) expectClosing(tok token.Token, context string) token.Pos {
-	if p.tok != tok && p.tok == token.SEMICOLON && p.lit == "\n" {
-		p.error(p.pos, "missing ',' before newline in "+context)
-		p.next()
-	}
-	return p.expect(tok)
-}
-
-func (p *parser) expectSemi() {
-	// semicolon is optional before a closing ')' or '}'
-	if p.tok != token.RPAREN && p.tok != token.RBRACE {
-		switch p.tok {
-		case token.COMMA:
-			// permit a ',' instead of a ';' but complain
-			p.errorExpected(p.pos, "';'")
-			fallthrough
-		case token.SEMICOLON:
-			p.next()
-		default:
-			p.errorExpected(p.pos, "';'")
-			p.advance(stmtStart)
-		}
-	}
-}
-
-func (p *parser) atComma(context string, follow token.Token) bool {
-	if p.tok == token.COMMA {
-		return true
-	}
-	if p.tok != follow {
-		msg := "missing ','"
-		if p.tok == token.SEMICOLON && p.lit == "\n" {
-			msg += " before newline"
-		}
-		p.error(p.pos, msg+" in "+context)
-		return true // "insert" comma and continue
-	}
-	return false
-}
-
-func assert(cond bool, msg string) {
-	if !cond {
-		panic("go/parser internal error: " + msg)
-	}
-}
-
-// advance consumes tokens until the current token p.tok
-// is in the 'to' set, or token.EOF. For error recovery.
-func (p *parser) advance(to map[token.Token]bool) {
-	for ; p.tok != token.EOF; p.next() {
-		if to[p.tok] {
-			// Return only if parser made some progress since last
-			// sync or if it has not reached 10 advance calls without
-			// progress. Otherwise consume at least one token to
-			// avoid an endless parser loop (it is possible that
-			// both parseOperand and parseStmt call advance and
-			// correctly do not advance, thus the need for the
-			// invocation limit p.syncCnt).
-			if p.pos == p.syncPos && p.syncCnt < 10 {
-				p.syncCnt++
-				return
-			}
-			if p.pos > p.syncPos {
-				p.syncPos = p.pos
-				p.syncCnt = 0
-				return
-			}
-			// Reaching here indicates a parser bug, likely an
-			// incorrect token list in this function, but it only
-			// leads to skipping of possibly correct code if a
-			// previous error is present, and thus is preferred
-			// over a non-terminating parse.
-		}
-	}
-}
-
-var stmtStart = map[token.Token]bool{
-	token.BREAK:       true,
-	token.CONST:       true,
-	token.CONTINUE:    true,
-	token.DEFER:       true,
-	token.FALLTHROUGH: true,
-	token.FOR:         true,
-	token.GO:          true,
-	token.GOTO:        true,
-	token.IF:          true,
-	token.RETURN:      true,
-	token.SELECT:      true,
-	token.SWITCH:      true,
-	token.TYPE:        true,
-	token.VAR:         true,
-}
-
-var declStart = map[token.Token]bool{
-	token.CONST: true,
-	token.TYPE:  true,
-	token.VAR:   true,
-}
-
-var exprEnd = map[token.Token]bool{
-	token.COMMA:     true,
-	token.COLON:     true,
-	token.SEMICOLON: true,
-	token.RPAREN:    true,
-	token.RBRACK:    true,
-	token.RBRACE:    true,
-}
-
-// safePos returns a valid file position for a given position: If pos
-// is valid to begin with, safePos returns pos. If pos is out-of-range,
-// safePos returns the EOF position.
-//
-// This is hack to work around "artificial" end positions in the AST which
-// are computed by adding 1 to (presumably valid) token positions. If the
-// token positions are invalid due to parse errors, the resulting end position
-// may be past the file's EOF position, which would lead to panics if used
-// later on.
-func (p *parser) safePos(pos token.Pos) (res token.Pos) {
-	defer func() {
-		if recover() != nil {
-			res = token.Pos(p.file.Base() + p.file.Size()) // EOF position
-		}
-	}()
-	_ = p.file.Offset(pos) // trigger a panic if position is out-of-range
-	return pos
-}
-
-// ----------------------------------------------------------------------------
-// Identifiers
-
-func (p *parser) parseIdent() *ast.Ident {
-	pos := p.pos
-	name := "_"
-	if p.tok == token.IDENT {
-		name = p.lit
-		p.next()
-	} else {
-		p.expect(token.IDENT) // use expect() error handling
-	}
-	return &ast.Ident{NamePos: pos, Name: name}
-}
-
-func (p *parser) parseIdentList() (list []*ast.Ident) {
-	if p.trace {
-		defer un(trace(p, "IdentList"))
-	}
-
-	list = append(list, p.parseIdent())
-	for p.tok == token.COMMA {
-		p.next()
-		list = append(list, p.parseIdent())
-	}
-
-	return
-}
-
-// ----------------------------------------------------------------------------
-// Common productions
-
-// If lhs is set, result list elements which are identifiers are not resolved.
-func (p *parser) parseExprList() (list []ast.Expr) {
-	if p.trace {
-		defer un(trace(p, "ExpressionList"))
-	}
-
-	list = append(list, p.checkExpr(p.parseExpr()))
-	for p.tok == token.COMMA {
-		p.next()
-		list = append(list, p.checkExpr(p.parseExpr()))
-	}
-
-	return
-}
-
-func (p *parser) parseList(inRhs bool) []ast.Expr {
-	old := p.inRhs
-	p.inRhs = inRhs
-	list := p.parseExprList()
-	p.inRhs = old
-	return list
-}
-
-// ----------------------------------------------------------------------------
-// Types
-
-func (p *parser) parseType() ast.Expr {
-	if p.trace {
-		defer un(trace(p, "Type"))
-	}
-
-	typ := p.tryIdentOrType()
-
-	if typ == nil {
-		pos := p.pos
-		p.errorExpected(pos, "type")
-		p.advance(exprEnd)
-		return &ast.BadExpr{From: pos, To: p.pos}
-	}
-
-	return typ
-}
-
-func (p *parser) parseQualifiedIdent(ident *ast.Ident) ast.Expr {
-	if p.trace {
-		defer un(trace(p, "QualifiedIdent"))
-	}
-
-	typ := p.parseTypeName(ident)
-	if p.tok == token.LBRACK && p.allowGenerics() {
-		typ = p.parseTypeInstance(typ)
-	}
-
-	return typ
-}
-
-// If the result is an identifier, it is not resolved.
-func (p *parser) parseTypeName(ident *ast.Ident) ast.Expr {
-	if p.trace {
-		defer un(trace(p, "TypeName"))
-	}
-
-	if ident == nil {
-		ident = p.parseIdent()
-	}
-
-	if p.tok == token.PERIOD {
-		// ident is a package name
-		p.next()
-		sel := p.parseIdent()
-		return &ast.SelectorExpr{X: ident, Sel: sel}
-	}
-
-	return ident
-}
-
-// "[" has already been consumed, and lbrack is its position.
-// If len != nil it is the already consumed array length.
-func (p *parser) parseArrayType(lbrack token.Pos, len ast.Expr) *ast.ArrayType {
-	if p.trace {
-		defer un(trace(p, "ArrayType"))
-	}
-
-	if len == nil {
-		p.exprLev++
-		// always permit ellipsis for more fault-tolerant parsing
-		if p.tok == token.ELLIPSIS {
-			len = &ast.Ellipsis{Ellipsis: p.pos}
-			p.next()
-		} else if p.tok != token.RBRACK {
-			len = p.parseRhs()
-		}
-		p.exprLev--
-	}
-	if p.tok == token.COMMA {
-		// Trailing commas are accepted in type parameter
-		// lists but not in array type declarations.
-		// Accept for better error handling but complain.
-		p.error(p.pos, "unexpected comma; expecting ]")
-		p.next()
-	}
-	p.expect(token.RBRACK)
-	elt := p.parseType()
-	return &ast.ArrayType{Lbrack: lbrack, Len: len, Elt: elt}
-}
-
-func (p *parser) parseArrayFieldOrTypeInstance(x *ast.Ident) (*ast.Ident, ast.Expr) {
-	if p.trace {
-		defer un(trace(p, "ArrayFieldOrTypeInstance"))
-	}
-
-	// TODO(gri) Should we allow a trailing comma in a type argument
-	//           list such as T[P,]? (We do in parseTypeInstance).
-	lbrack := p.expect(token.LBRACK)
-	var args []ast.Expr
-	var firstComma token.Pos
-	// TODO(rfindley): consider changing parseRhsOrType so that this function variable
-	// is not needed.
-	argparser := p.parseRhsOrType
-	if !p.allowGenerics() {
-		argparser = p.parseRhs
-	}
-	if p.tok != token.RBRACK {
-		p.exprLev++
-		args = append(args, argparser())
-		for p.tok == token.COMMA {
-			if !firstComma.IsValid() {
-				firstComma = p.pos
-			}
-			p.next()
-			args = append(args, argparser())
-		}
-		p.exprLev--
-	}
-	rbrack := p.expect(token.RBRACK)
-
-	if len(args) == 0 {
-		// x []E
-		elt := p.parseType()
-		return x, &ast.ArrayType{Lbrack: lbrack, Elt: elt}
-	}
-
-	// x [P]E or x[P]
-	if len(args) == 1 {
-		elt := p.tryIdentOrType()
-		if elt != nil {
-			// x [P]E
-			return x, &ast.ArrayType{Lbrack: lbrack, Len: args[0], Elt: elt}
-		}
-		if !p.allowGenerics() {
-			p.error(rbrack, "missing element type in array type expression")
-			return nil, &ast.BadExpr{From: args[0].Pos(), To: args[0].End()}
-		}
-	}
-
-	if !p.allowGenerics() {
-		p.error(firstComma, "expected ']', found ','")
-		return x, &ast.BadExpr{From: args[0].Pos(), To: args[len(args)-1].End()}
-	}
-
-	// x[P], x[P1, P2], ...
-	return nil, typeparams.PackIndexExpr(x, lbrack, args, rbrack)
-}
-
-func (p *parser) parseFieldDecl() *ast.Field {
-	if p.trace {
-		defer un(trace(p, "FieldDecl"))
-	}
-
-	doc := p.leadComment
-
-	var names []*ast.Ident
-	var typ ast.Expr
-	if p.tok == token.IDENT {
-		name := p.parseIdent()
-		if p.tok == token.PERIOD || p.tok == token.STRING || p.tok == token.SEMICOLON || p.tok == token.RBRACE {
-			// embedded type
-			typ = name
-			if p.tok == token.PERIOD {
-				typ = p.parseQualifiedIdent(name)
-			}
-		} else {
-			// name1, name2, ... T
-			names = []*ast.Ident{name}
-			for p.tok == token.COMMA {
-				p.next()
-				names = append(names, p.parseIdent())
-			}
-			// Careful dance: We don't know if we have an embedded instantiated
-			// type T[P1, P2, ...] or a field T of array type []E or [P]E.
-			if len(names) == 1 && p.tok == token.LBRACK {
-				name, typ = p.parseArrayFieldOrTypeInstance(name)
-				if name == nil {
-					names = nil
-				}
-			} else {
-				// T P
-				typ = p.parseType()
-			}
-		}
-	} else {
-		// embedded, possibly generic type
-		// (using the enclosing parentheses to distinguish it from a named field declaration)
-		// TODO(rFindley) confirm that this doesn't allow parenthesized embedded type
-		typ = p.parseType()
-	}
-
-	var tag *ast.BasicLit
-	if p.tok == token.STRING {
-		tag = &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
-		p.next()
-	}
-
-	p.expectSemi() // call before accessing p.linecomment
-
-	field := &ast.Field{Doc: doc, Names: names, Type: typ, Tag: tag, Comment: p.lineComment}
-	return field
-}
-
-func (p *parser) parseStructType() *ast.StructType {
-	if p.trace {
-		defer un(trace(p, "StructType"))
-	}
-
-	pos := p.expect(token.STRUCT)
-	lbrace := p.expect(token.LBRACE)
-	var list []*ast.Field
-	for p.tok == token.IDENT || p.tok == token.MUL || p.tok == token.LPAREN {
-		// a field declaration cannot start with a '(' but we accept
-		// it here for more robust parsing and better error messages
-		// (parseFieldDecl will check and complain if necessary)
-		list = append(list, p.parseFieldDecl())
-	}
-	rbrace := p.expect(token.RBRACE)
-
-	return &ast.StructType{
-		Struct: pos,
-		Fields: &ast.FieldList{
-			Opening: lbrace,
-			List:    list,
-			Closing: rbrace,
-		},
-	}
-}
-
-func (p *parser) parsePointerType() *ast.StarExpr {
-	if p.trace {
-		defer un(trace(p, "PointerType"))
-	}
-
-	star := p.expect(token.MUL)
-	base := p.parseType()
-
-	return &ast.StarExpr{Star: star, X: base}
-}
-
-func (p *parser) parseDotsType() *ast.Ellipsis {
-	if p.trace {
-		defer un(trace(p, "DotsType"))
-	}
-
-	pos := p.expect(token.ELLIPSIS)
-	elt := p.parseType()
-
-	return &ast.Ellipsis{Ellipsis: pos, Elt: elt}
-}
-
-type field struct {
-	name *ast.Ident
-	typ  ast.Expr
-}
-
-func (p *parser) parseParamDecl(name *ast.Ident, typeSetsOK bool) (f field) {
-	// TODO(rFindley) refactor to be more similar to paramDeclOrNil in the syntax
-	// package
-	if p.trace {
-		defer un(trace(p, "ParamDeclOrNil"))
-	}
-
-	ptok := p.tok
-	if name != nil {
-		p.tok = token.IDENT // force token.IDENT case in switch below
-	} else if typeSetsOK && p.tok == token.TILDE {
-		// "~" ...
-		return field{nil, p.embeddedElem(nil)}
-	}
-
-	switch p.tok {
-	case token.IDENT:
-		// name
-		if name != nil {
-			f.name = name
-			p.tok = ptok
-		} else {
-			f.name = p.parseIdent()
-		}
-		switch p.tok {
-		case token.IDENT, token.MUL, token.ARROW, token.FUNC, token.CHAN, token.MAP, token.STRUCT, token.INTERFACE, token.LPAREN:
-			// name type
-			f.typ = p.parseType()
-
-		case token.LBRACK:
-			// name "[" type1, ..., typeN "]" or name "[" n "]" type
-			f.name, f.typ = p.parseArrayFieldOrTypeInstance(f.name)
-
-		case token.ELLIPSIS:
-			// name "..." type
-			f.typ = p.parseDotsType()
-			return // don't allow ...type "|" ...
-
-		case token.PERIOD:
-			// name "." ...
-			f.typ = p.parseQualifiedIdent(f.name)
-			f.name = nil
-
-		case token.TILDE:
-			if typeSetsOK {
-				f.typ = p.embeddedElem(nil)
-				return
-			}
-
-		case token.OR:
-			if typeSetsOK {
-				// name "|" typeset
-				f.typ = p.embeddedElem(f.name)
-				f.name = nil
-				return
-			}
-		}
-
-	case token.MUL, token.ARROW, token.FUNC, token.LBRACK, token.CHAN, token.MAP, token.STRUCT, token.INTERFACE, token.LPAREN:
-		// type
-		f.typ = p.parseType()
-
-	case token.ELLIPSIS:
-		// "..." type
-		// (always accepted)
-		f.typ = p.parseDotsType()
-		return // don't allow ...type "|" ...
-
-	default:
-		// TODO(rfindley): this is incorrect in the case of type parameter lists
-		//                 (should be "']'" in that case)
-		p.errorExpected(p.pos, "')'")
-		p.advance(exprEnd)
-	}
-
-	// [name] type "|"
-	if typeSetsOK && p.tok == token.OR && f.typ != nil {
-		f.typ = p.embeddedElem(f.typ)
-	}
-
-	return
-}
-
-func (p *parser) parseParameterList(name0 *ast.Ident, typ0 ast.Expr, closing token.Token) (params []*ast.Field) {
-	if p.trace {
-		defer un(trace(p, "ParameterList"))
-	}
-
-	// Type parameters are the only parameter list closed by ']'.
-	tparams := closing == token.RBRACK
-	// Type set notation is ok in type parameter lists.
-	typeSetsOK := tparams && p.allowTypeSets()
-
-	pos := p.pos
-	if name0 != nil {
-		pos = name0.Pos()
-	}
-
-	var list []field
-	var named int // number of parameters that have an explicit name and type
-
-	for name0 != nil || p.tok != closing && p.tok != token.EOF {
-		var par field
-		if typ0 != nil {
-			if typeSetsOK {
-				typ0 = p.embeddedElem(typ0)
-			}
-			par = field{name0, typ0}
-		} else {
-			par = p.parseParamDecl(name0, typeSetsOK)
-		}
-		name0 = nil // 1st name was consumed if present
-		typ0 = nil  // 1st typ was consumed if present
-		if par.name != nil || par.typ != nil {
-			list = append(list, par)
-			if par.name != nil && par.typ != nil {
-				named++
-			}
-		}
-		if !p.atComma("parameter list", closing) {
-			break
-		}
-		p.next()
-	}
-
-	if len(list) == 0 {
-		return // not uncommon
-	}
-
-	// TODO(gri) parameter distribution and conversion to []*ast.Field
-	//           can be combined and made more efficient
-
-	// distribute parameter types
-	if named == 0 {
-		// all unnamed => found names are type names
-		for i := 0; i < len(list); i++ {
-			par := &list[i]
-			if typ := par.name; typ != nil {
-				par.typ = typ
-				par.name = nil
-			}
-		}
-		if tparams {
-			p.error(pos, "all type parameters must be named")
-		}
-	} else if named != len(list) {
-		// some named => all must be named
-		ok := true
-		var typ ast.Expr
-		missingName := pos
-		for i := len(list) - 1; i >= 0; i-- {
-			if par := &list[i]; par.typ != nil {
-				typ = par.typ
-				if par.name == nil {
-					ok = false
-					missingName = par.typ.Pos()
-					n := ast.NewIdent("_")
-					n.NamePos = typ.Pos() // correct position
-					par.name = n
-				}
-			} else if typ != nil {
-				par.typ = typ
-			} else {
-				// par.typ == nil && typ == nil => we only have a par.name
-				ok = false
-				missingName = par.name.Pos()
-				par.typ = &ast.BadExpr{From: par.name.Pos(), To: p.pos}
-			}
-		}
-		if !ok {
-			if tparams {
-				p.error(missingName, "all type parameters must be named")
-			} else {
-				p.error(pos, "mixed named and unnamed parameters")
-			}
-		}
-	}
-
-	// convert list []*ast.Field
-	if named == 0 {
-		// parameter list consists of types only
-		for _, par := range list {
-			assert(par.typ != nil, "nil type in unnamed parameter list")
-			params = append(params, &ast.Field{Type: par.typ})
-		}
-		return
-	}
-
-	// parameter list consists of named parameters with types
-	var names []*ast.Ident
-	var typ ast.Expr
-	addParams := func() {
-		assert(typ != nil, "nil type in named parameter list")
-		field := &ast.Field{Names: names, Type: typ}
-		params = append(params, field)
-		names = nil
-	}
-	for _, par := range list {
-		if par.typ != typ {
-			if len(names) > 0 {
-				addParams()
-			}
-			typ = par.typ
-		}
-		names = append(names, par.name)
-	}
-	if len(names) > 0 {
-		addParams()
-	}
-	return
-}
-
-func (p *parser) parseParameters(acceptTParams bool) (tparams, params *ast.FieldList) {
-	if p.trace {
-		defer un(trace(p, "Parameters"))
-	}
-
-	if p.allowGenerics() && acceptTParams && p.tok == token.LBRACK {
-		opening := p.pos
-		p.next()
-		// [T any](params) syntax
-		list := p.parseParameterList(nil, nil, token.RBRACK)
-		rbrack := p.expect(token.RBRACK)
-		tparams = &ast.FieldList{Opening: opening, List: list, Closing: rbrack}
-		// Type parameter lists must not be empty.
-		if tparams.NumFields() == 0 {
-			p.error(tparams.Closing, "empty type parameter list")
-			tparams = nil // avoid follow-on errors
-		}
-	}
-
-	opening := p.expect(token.LPAREN)
-
-	var fields []*ast.Field
-	if p.tok != token.RPAREN {
-		fields = p.parseParameterList(nil, nil, token.RPAREN)
-	}
-
-	rparen := p.expect(token.RPAREN)
-	params = &ast.FieldList{Opening: opening, List: fields, Closing: rparen}
-
-	return
-}
-
-func (p *parser) parseResult() *ast.FieldList {
-	if p.trace {
-		defer un(trace(p, "Result"))
-	}
-
-	if p.tok == token.LPAREN {
-		_, results := p.parseParameters(false)
-		return results
-	}
-
-	typ := p.tryIdentOrType()
-	if typ != nil {
-		list := make([]*ast.Field, 1)
-		list[0] = &ast.Field{Type: typ}
-		return &ast.FieldList{List: list}
-	}
-
-	return nil
-}
-
-func (p *parser) parseFuncType() *ast.FuncType {
-	if p.trace {
-		defer un(trace(p, "FuncType"))
-	}
-
-	pos := p.expect(token.FUNC)
-	tparams, params := p.parseParameters(true)
-	if tparams != nil {
-		p.error(tparams.Pos(), "function type must have no type parameters")
-	}
-	results := p.parseResult()
-
-	return &ast.FuncType{Func: pos, Params: params, Results: results}
-}
-
-func (p *parser) parseMethodSpec() *ast.Field {
-	if p.trace {
-		defer un(trace(p, "MethodSpec"))
-	}
-
-	doc := p.leadComment
-	var idents []*ast.Ident
-	var typ ast.Expr
-	x := p.parseTypeName(nil)
-	if ident, _ := x.(*ast.Ident); ident != nil {
-		switch {
-		case p.tok == token.LBRACK && p.allowGenerics():
-			// generic method or embedded instantiated type
-			lbrack := p.pos
-			p.next()
-			p.exprLev++
-			x := p.parseExpr()
-			p.exprLev--
-			if name0, _ := x.(*ast.Ident); name0 != nil && p.tok != token.COMMA && p.tok != token.RBRACK {
-				// generic method m[T any]
-				//
-				// Interface methods do not have type parameters. We parse them for a
-				// better error message and improved error recovery.
-				_ = p.parseParameterList(name0, nil, token.RBRACK)
-				_ = p.expect(token.RBRACK)
-				p.error(lbrack, "interface method must have no type parameters")
-
-				// TODO(rfindley) refactor to share code with parseFuncType.
-				_, params := p.parseParameters(false)
-				results := p.parseResult()
-				idents = []*ast.Ident{ident}
-				typ = &ast.FuncType{
-					Func:    token.NoPos,
-					Params:  params,
-					Results: results,
-				}
-			} else {
-				// embedded instantiated type
-				// TODO(rfindley) should resolve all identifiers in x.
-				list := []ast.Expr{x}
-				if p.atComma("type argument list", token.RBRACK) {
-					p.exprLev++
-					p.next()
-					for p.tok != token.RBRACK && p.tok != token.EOF {
-						list = append(list, p.parseType())
-						if !p.atComma("type argument list", token.RBRACK) {
-							break
-						}
-						p.next()
-					}
-					p.exprLev--
-				}
-				rbrack := p.expectClosing(token.RBRACK, "type argument list")
-				typ = typeparams.PackIndexExpr(ident, lbrack, list, rbrack)
-			}
-		case p.tok == token.LPAREN:
-			// ordinary method
-			// TODO(rfindley) refactor to share code with parseFuncType.
-			_, params := p.parseParameters(false)
-			results := p.parseResult()
-			idents = []*ast.Ident{ident}
-			typ = &ast.FuncType{Func: token.NoPos, Params: params, Results: results}
-		default:
-			// embedded type
-			typ = x
-		}
-	} else {
-		// embedded, possibly instantiated type
-		typ = x
-		if p.tok == token.LBRACK && p.allowGenerics() {
-			// embedded instantiated interface
-			typ = p.parseTypeInstance(typ)
-		}
-	}
-
-	// Comment is added at the callsite: the field below may joined with
-	// additional type specs using '|'.
-	// TODO(rfindley) this should be refactored.
-	// TODO(rfindley) add more tests for comment handling.
-	return &ast.Field{Doc: doc, Names: idents, Type: typ}
-}
-
-func (p *parser) embeddedElem(x ast.Expr) ast.Expr {
-	if p.trace {
-		defer un(trace(p, "EmbeddedElem"))
-	}
-	if x == nil {
-		x = p.embeddedTerm()
-	}
-	for p.tok == token.OR {
-		t := new(ast.BinaryExpr)
-		t.OpPos = p.pos
-		t.Op = token.OR
-		p.next()
-		t.X = x
-		t.Y = p.embeddedTerm()
-		x = t
-	}
-	return x
-}
-
-func (p *parser) embeddedTerm() ast.Expr {
-	if p.trace {
-		defer un(trace(p, "EmbeddedTerm"))
-	}
-	if p.tok == token.TILDE {
-		t := new(ast.UnaryExpr)
-		t.OpPos = p.pos
-		t.Op = token.TILDE
-		p.next()
-		t.X = p.parseType()
-		return t
-	}
-
-	t := p.tryIdentOrType()
-	if t == nil {
-		pos := p.pos
-		p.errorExpected(pos, "~ term or type")
-		p.advance(exprEnd)
-		return &ast.BadExpr{From: pos, To: p.pos}
-	}
-
-	return t
-}
-
-func (p *parser) parseInterfaceType() *ast.InterfaceType {
-	if p.trace {
-		defer un(trace(p, "InterfaceType"))
-	}
-
-	pos := p.expect(token.INTERFACE)
-	lbrace := p.expect(token.LBRACE)
-
-	var list []*ast.Field
-
-parseElements:
-	for {
-		switch {
-		case p.tok == token.IDENT:
-			f := p.parseMethodSpec()
-			if f.Names == nil && p.allowGenerics() {
-				f.Type = p.embeddedElem(f.Type)
-			}
-			p.expectSemi()
-			f.Comment = p.lineComment
-			list = append(list, f)
-		case p.tok == token.TILDE && p.allowGenerics():
-			typ := p.embeddedElem(nil)
-			p.expectSemi()
-			comment := p.lineComment
-			list = append(list, &ast.Field{Type: typ, Comment: comment})
-		case p.allowGenerics():
-			if t := p.tryIdentOrType(); t != nil {
-				typ := p.embeddedElem(t)
-				p.expectSemi()
-				comment := p.lineComment
-				list = append(list, &ast.Field{Type: typ, Comment: comment})
-			} else {
-				break parseElements
-			}
-		default:
-			break parseElements
-		}
-	}
-
-	// TODO(rfindley): the error produced here could be improved, since we could
-	// accept a identifier, 'type', or a '}' at this point.
-	rbrace := p.expect(token.RBRACE)
-
-	return &ast.InterfaceType{
-		Interface: pos,
-		Methods: &ast.FieldList{
-			Opening: lbrace,
-			List:    list,
-			Closing: rbrace,
-		},
-	}
-}
-
-func (p *parser) parseMapType() *ast.MapType {
-	if p.trace {
-		defer un(trace(p, "MapType"))
-	}
-
-	pos := p.expect(token.MAP)
-	p.expect(token.LBRACK)
-	key := p.parseType()
-	p.expect(token.RBRACK)
-	value := p.parseType()
-
-	return &ast.MapType{Map: pos, Key: key, Value: value}
-}
-
-func (p *parser) parseChanType() *ast.ChanType {
-	if p.trace {
-		defer un(trace(p, "ChanType"))
-	}
-
-	pos := p.pos
-	dir := ast.SEND | ast.RECV
-	var arrow token.Pos
-	if p.tok == token.CHAN {
-		p.next()
-		if p.tok == token.ARROW {
-			arrow = p.pos
-			p.next()
-			dir = ast.SEND
-		}
-	} else {
-		arrow = p.expect(token.ARROW)
-		p.expect(token.CHAN)
-		dir = ast.RECV
-	}
-	value := p.parseType()
-
-	return &ast.ChanType{Begin: pos, Arrow: arrow, Dir: dir, Value: value}
-}
-
-func (p *parser) parseTypeInstance(typ ast.Expr) ast.Expr {
-	assert(p.allowGenerics(), "parseTypeInstance while not parsing type params")
-	if p.trace {
-		defer un(trace(p, "TypeInstance"))
-	}
-
-	opening := p.expect(token.LBRACK)
-	p.exprLev++
-	var list []ast.Expr
-	for p.tok != token.RBRACK && p.tok != token.EOF {
-		list = append(list, p.parseType())
-		if !p.atComma("type argument list", token.RBRACK) {
-			break
-		}
-		p.next()
-	}
-	p.exprLev--
-
-	closing := p.expectClosing(token.RBRACK, "type argument list")
-
-	if len(list) == 0 {
-		p.errorExpected(closing, "type argument list")
-		return &ast.IndexExpr{
-			X:      typ,
-			Lbrack: opening,
-			Index:  &ast.BadExpr{From: opening + 1, To: closing},
-			Rbrack: closing,
-		}
-	}
-
-	return typeparams.PackIndexExpr(typ, opening, list, closing)
-}
-
-func (p *parser) tryIdentOrType() ast.Expr {
-	defer decNestLev(incNestLev(p))
-
-	switch p.tok {
-	case token.IDENT:
-		typ := p.parseTypeName(nil)
-		if p.tok == token.LBRACK && p.allowGenerics() {
-			typ = p.parseTypeInstance(typ)
-		}
-		return typ
-	case token.LBRACK:
-		lbrack := p.expect(token.LBRACK)
-		return p.parseArrayType(lbrack, nil)
-	case token.STRUCT:
-		return p.parseStructType()
-	case token.MUL:
-		return p.parsePointerType()
-	case token.FUNC:
-		typ := p.parseFuncType()
-		return typ
-	case token.INTERFACE:
-		return p.parseInterfaceType()
-	case token.MAP:
-		return p.parseMapType()
-	case token.CHAN, token.ARROW:
-		return p.parseChanType()
-	case token.LPAREN:
-		lparen := p.pos
-		p.next()
-		typ := p.parseType()
-		rparen := p.expect(token.RPAREN)
-		return &ast.ParenExpr{Lparen: lparen, X: typ, Rparen: rparen}
-	}
-
-	// no type found
-	return nil
-}
-
-// ----------------------------------------------------------------------------
-// Blocks
-
-func (p *parser) parseStmtList() (list []ast.Stmt) {
-	if p.trace {
-		defer un(trace(p, "StatementList"))
-	}
-
-	for p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE && p.tok != token.EOF {
-		list = append(list, p.parseStmt())
-	}
-
-	return
-}
-
-func (p *parser) parseBody() *ast.BlockStmt {
-	if p.trace {
-		defer un(trace(p, "Body"))
-	}
-
-	lbrace := p.expect(token.LBRACE)
-	list := p.parseStmtList()
-	rbrace := p.expect2(token.RBRACE)
-
-	return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
-}
-
-func (p *parser) parseBlockStmt() *ast.BlockStmt {
-	if p.trace {
-		defer un(trace(p, "BlockStmt"))
-	}
-
-	lbrace := p.expect(token.LBRACE)
-	list := p.parseStmtList()
-	rbrace := p.expect2(token.RBRACE)
-
-	return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
-}
-
-// ----------------------------------------------------------------------------
-// Expressions
-
-func (p *parser) parseFuncTypeOrLit() ast.Expr {
-	if p.trace {
-		defer un(trace(p, "FuncTypeOrLit"))
-	}
-
-	typ := p.parseFuncType()
-	if p.tok != token.LBRACE {
-		// function type only
-		return typ
-	}
-
-	p.exprLev++
-	body := p.parseBody()
-	p.exprLev--
-
-	return &ast.FuncLit{Type: typ, Body: body}
-}
-
-// parseOperand may return an expression or a raw type (incl. array
-// types of the form [...]T. Callers must verify the result.
-func (p *parser) parseOperand() ast.Expr {
-	if p.trace {
-		defer un(trace(p, "Operand"))
-	}
-
-	switch p.tok {
-	case token.IDENT:
-		x := p.parseIdent()
-		return x
-
-	case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
-		x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
-		p.next()
-		return x
-
-	case token.LPAREN:
-		lparen := p.pos
-		p.next()
-		p.exprLev++
-		x := p.parseRhsOrType() // types may be parenthesized: (some type)
-		p.exprLev--
-		rparen := p.expect(token.RPAREN)
-		return &ast.ParenExpr{Lparen: lparen, X: x, Rparen: rparen}
-
-	case token.FUNC:
-		return p.parseFuncTypeOrLit()
-	}
-
-	if typ := p.tryIdentOrType(); typ != nil { // do not consume trailing type parameters
-		// could be type for composite literal or conversion
-		_, isIdent := typ.(*ast.Ident)
-		assert(!isIdent, "type cannot be identifier")
-		return typ
-	}
-
-	// we have an error
-	pos := p.pos
-	p.errorExpected(pos, "operand")
-	p.advance(stmtStart)
-	return &ast.BadExpr{From: pos, To: p.pos}
-}
-
-func (p *parser) parseSelector(x ast.Expr) ast.Expr {
-	if p.trace {
-		defer un(trace(p, "Selector"))
-	}
-
-	sel := p.parseIdent()
-
-	return &ast.SelectorExpr{X: x, Sel: sel}
-}
-
-func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr {
-	if p.trace {
-		defer un(trace(p, "TypeAssertion"))
-	}
-
-	lparen := p.expect(token.LPAREN)
-	var typ ast.Expr
-	if p.tok == token.TYPE {
-		// type switch: typ == nil
-		p.next()
-	} else {
-		typ = p.parseType()
-	}
-	rparen := p.expect(token.RPAREN)
-
-	return &ast.TypeAssertExpr{X: x, Type: typ, Lparen: lparen, Rparen: rparen}
-}
-
-func (p *parser) parseIndexOrSliceOrInstance(x ast.Expr) ast.Expr {
-	if p.trace {
-		defer un(trace(p, "parseIndexOrSliceOrInstance"))
-	}
-
-	lbrack := p.expect(token.LBRACK)
-	if p.tok == token.RBRACK {
-		// empty index, slice or index expressions are not permitted;
-		// accept them for parsing tolerance, but complain
-		p.errorExpected(p.pos, "operand")
-		rbrack := p.pos
-		p.next()
-		return &ast.IndexExpr{
-			X:      x,
-			Lbrack: lbrack,
-			Index:  &ast.BadExpr{From: rbrack, To: rbrack},
-			Rbrack: rbrack,
-		}
-	}
-	p.exprLev++
-
-	const N = 3 // change the 3 to 2 to disable 3-index slices
-	var args []ast.Expr
-	var index [N]ast.Expr
-	var colons [N - 1]token.Pos
-	var firstComma token.Pos
-	if p.tok != token.COLON {
-		// We can't know if we have an index expression or a type instantiation;
-		// so even if we see a (named) type we are not going to be in type context.
-		index[0] = p.parseRhsOrType()
-	}
-	ncolons := 0
-	switch p.tok {
-	case token.COLON:
-		// slice expression
-		for p.tok == token.COLON && ncolons < len(colons) {
-			colons[ncolons] = p.pos
-			ncolons++
-			p.next()
-			if p.tok != token.COLON && p.tok != token.RBRACK && p.tok != token.EOF {
-				index[ncolons] = p.parseRhs()
-			}
-		}
-	case token.COMMA:
-		firstComma = p.pos
-		// instance expression
-		args = append(args, index[0])
-		for p.tok == token.COMMA {
-			p.next()
-			if p.tok != token.RBRACK && p.tok != token.EOF {
-				args = append(args, p.parseType())
-			}
-		}
-	}
-
-	p.exprLev--
-	rbrack := p.expect(token.RBRACK)
-
-	if ncolons > 0 {
-		// slice expression
-		slice3 := false
-		if ncolons == 2 {
-			slice3 = true
-			// Check presence of 2nd and 3rd index here rather than during type-checking
-			// to prevent erroneous programs from passing through gofmt (was issue 7305).
-			if index[1] == nil {
-				p.error(colons[0], "2nd index required in 3-index slice")
-				index[1] = &ast.BadExpr{From: colons[0] + 1, To: colons[1]}
-			}
-			if index[2] == nil {
-				p.error(colons[1], "3rd index required in 3-index slice")
-				index[2] = &ast.BadExpr{From: colons[1] + 1, To: rbrack}
-			}
-		}
-		return &ast.SliceExpr{X: x, Lbrack: lbrack, Low: index[0], High: index[1], Max: index[2], Slice3: slice3, Rbrack: rbrack}
-	}
-
-	if len(args) == 0 {
-		// index expression
-		return &ast.IndexExpr{X: x, Lbrack: lbrack, Index: index[0], Rbrack: rbrack}
-	}
-
-	if !p.allowGenerics() {
-		p.error(firstComma, "expected ']' or ':', found ','")
-		return &ast.BadExpr{From: args[0].Pos(), To: args[len(args)-1].End()}
-	}
-
-	// instance expression
-	return typeparams.PackIndexExpr(x, lbrack, args, rbrack)
-}
-
-func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
-	if p.trace {
-		defer un(trace(p, "CallOrConversion"))
-	}
-
-	lparen := p.expect(token.LPAREN)
-	p.exprLev++
-	var list []ast.Expr
-	var ellipsis token.Pos
-	for p.tok != token.RPAREN && p.tok != token.EOF && !ellipsis.IsValid() {
-		list = append(list, p.parseRhsOrType()) // builtins may expect a type: make(some type, ...)
-		if p.tok == token.ELLIPSIS {
-			ellipsis = p.pos
-			p.next()
-		}
-		if !p.atComma("argument list", token.RPAREN) {
-			break
-		}
-		p.next()
-	}
-	p.exprLev--
-	rparen := p.expectClosing(token.RPAREN, "argument list")
-
-	return &ast.CallExpr{Fun: fun, Lparen: lparen, Args: list, Ellipsis: ellipsis, Rparen: rparen}
-}
-
-func (p *parser) parseValue() ast.Expr {
-	if p.trace {
-		defer un(trace(p, "Element"))
-	}
-
-	if p.tok == token.LBRACE {
-		return p.parseLiteralValue(nil)
-	}
-
-	x := p.checkExpr(p.parseExpr())
-
-	return x
-}
-
-func (p *parser) parseElement() ast.Expr {
-	if p.trace {
-		defer un(trace(p, "Element"))
-	}
-
-	x := p.parseValue()
-	if p.tok == token.COLON {
-		colon := p.pos
-		p.next()
-		x = &ast.KeyValueExpr{Key: x, Colon: colon, Value: p.parseValue()}
-	}
-
-	return x
-}
-
-func (p *parser) parseElementList() (list []ast.Expr) {
-	if p.trace {
-		defer un(trace(p, "ElementList"))
-	}
-
-	for p.tok != token.RBRACE && p.tok != token.EOF {
-		list = append(list, p.parseElement())
-		if !p.atComma("composite literal", token.RBRACE) {
-			break
-		}
-		p.next()
-	}
-
-	return
-}
-
-func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr {
-	if p.trace {
-		defer un(trace(p, "LiteralValue"))
-	}
-
-	lbrace := p.expect(token.LBRACE)
-	var elts []ast.Expr
-	p.exprLev++
-	if p.tok != token.RBRACE {
-		elts = p.parseElementList()
-	}
-	p.exprLev--
-	rbrace := p.expectClosing(token.RBRACE, "composite literal")
-	return &ast.CompositeLit{Type: typ, Lbrace: lbrace, Elts: elts, Rbrace: rbrace}
-}
-
-// checkExpr checks that x is an expression (and not a type).
-func (p *parser) checkExpr(x ast.Expr) ast.Expr {
-	switch unparen(x).(type) {
-	case *ast.BadExpr:
-	case *ast.Ident:
-	case *ast.BasicLit:
-	case *ast.FuncLit:
-	case *ast.CompositeLit:
-	case *ast.ParenExpr:
-		panic("unreachable")
-	case *ast.SelectorExpr:
-	case *ast.IndexExpr:
-	case *ast.IndexListExpr:
-	case *ast.SliceExpr:
-	case *ast.TypeAssertExpr:
-		// If t.Type == nil we have a type assertion of the form
-		// y.(type), which is only allowed in type switch expressions.
-		// It's hard to exclude those but for the case where we are in
-		// a type switch. Instead be lenient and test this in the type
-		// checker.
-	case *ast.CallExpr:
-	case *ast.StarExpr:
-	case *ast.UnaryExpr:
-	case *ast.BinaryExpr:
-	default:
-		// all other nodes are not proper expressions
-		p.errorExpected(x.Pos(), "expression")
-		x = &ast.BadExpr{From: x.Pos(), To: p.safePos(x.End())}
-	}
-	return x
-}
-
-// If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
-func unparen(x ast.Expr) ast.Expr {
-	if p, isParen := x.(*ast.ParenExpr); isParen {
-		x = unparen(p.X)
-	}
-	return x
-}
-
-// checkExprOrType checks that x is an expression or a type
-// (and not a raw type such as [...]T).
-func (p *parser) checkExprOrType(x ast.Expr) ast.Expr {
-	switch t := unparen(x).(type) {
-	case *ast.ParenExpr:
-		panic("unreachable")
-	case *ast.ArrayType:
-		if len, isEllipsis := t.Len.(*ast.Ellipsis); isEllipsis {
-			p.error(len.Pos(), "expected array length, found '...'")
-			x = &ast.BadExpr{From: x.Pos(), To: p.safePos(x.End())}
-		}
-	}
-
-	// all other nodes are expressions or types
-	return x
-}
-
-func (p *parser) parsePrimaryExpr(x ast.Expr) ast.Expr {
-	if p.trace {
-		defer un(trace(p, "PrimaryExpr"))
-	}
-
-	if x == nil {
-		x = p.parseOperand()
-	}
-	// We track the nesting here rather than at the entry for the function,
-	// since it can iteratively produce a nested output, and we want to
-	// limit how deep a structure we generate.
-	var n int
-	defer func() { p.nestLev -= n }()
-	for n = 1; ; n++ {
-		incNestLev(p)
-		switch p.tok {
-		case token.PERIOD:
-			p.next()
-			switch p.tok {
-			case token.IDENT:
-				x = p.parseSelector(p.checkExprOrType(x))
-			case token.LPAREN:
-				x = p.parseTypeAssertion(p.checkExpr(x))
-			default:
-				pos := p.pos
-				p.errorExpected(pos, "selector or type assertion")
-				// TODO(rFindley) The check for token.RBRACE below is a targeted fix
-				//                to error recovery sufficient to make the x/tools tests to
-				//                pass with the new parsing logic introduced for type
-				//                parameters. Remove this once error recovery has been
-				//                more generally reconsidered.
-				if p.tok != token.RBRACE {
-					p.next() // make progress
-				}
-				sel := &ast.Ident{NamePos: pos, Name: "_"}
-				x = &ast.SelectorExpr{X: x, Sel: sel}
-			}
-		case token.LBRACK:
-			x = p.parseIndexOrSliceOrInstance(p.checkExpr(x))
-		case token.LPAREN:
-			x = p.parseCallOrConversion(p.checkExprOrType(x))
-		case token.LBRACE:
-			// operand may have returned a parenthesized complit
-			// type; accept it but complain if we have a complit
-			t := unparen(x)
-			// determine if '{' belongs to a composite literal or a block statement
-			switch t.(type) {
-			case *ast.BadExpr, *ast.Ident, *ast.SelectorExpr:
-				if p.exprLev < 0 {
-					return x
-				}
-				// x is possibly a composite literal type
-			case *ast.IndexExpr, *ast.IndexListExpr:
-				if p.exprLev < 0 {
-					return x
-				}
-				// x is possibly a composite literal type
-			case *ast.ArrayType, *ast.StructType, *ast.MapType:
-				// x is a composite literal type
-			default:
-				return x
-			}
-			if t != x {
-				p.error(t.Pos(), "cannot parenthesize type in composite literal")
-				// already progressed, no need to advance
-			}
-			x = p.parseLiteralValue(x)
-		default:
-			return x
-		}
-	}
-}
-
-func (p *parser) parseUnaryExpr() ast.Expr {
-	defer decNestLev(incNestLev(p))
-
-	if p.trace {
-		defer un(trace(p, "UnaryExpr"))
-	}
-
-	switch p.tok {
-	case token.ADD, token.SUB, token.NOT, token.XOR, token.AND, token.TILDE:
-		pos, op := p.pos, p.tok
-		p.next()
-		x := p.parseUnaryExpr()
-		return &ast.UnaryExpr{OpPos: pos, Op: op, X: p.checkExpr(x)}
-
-	case token.ARROW:
-		// channel type or receive expression
-		arrow := p.pos
-		p.next()
-
-		// If the next token is token.CHAN we still don't know if it
-		// is a channel type or a receive operation - we only know
-		// once we have found the end of the unary expression. There
-		// are two cases:
-		//
-		//   <- type  => (<-type) must be channel type
-		//   <- expr  => <-(expr) is a receive from an expression
-		//
-		// In the first case, the arrow must be re-associated with
-		// the channel type parsed already:
-		//
-		//   <- (chan type)    =>  (<-chan type)
-		//   <- (chan<- type)  =>  (<-chan (<-type))
-
-		x := p.parseUnaryExpr()
-
-		// determine which case we have
-		if typ, ok := x.(*ast.ChanType); ok {
-			// (<-type)
-
-			// re-associate position info and <-
-			dir := ast.SEND
-			for ok && dir == ast.SEND {
-				if typ.Dir == ast.RECV {
-					// error: (<-type) is (<-(<-chan T))
-					p.errorExpected(typ.Arrow, "'chan'")
-				}
-				arrow, typ.Begin, typ.Arrow = typ.Arrow, arrow, arrow
-				dir, typ.Dir = typ.Dir, ast.RECV
-				typ, ok = typ.Value.(*ast.ChanType)
-			}
-			if dir == ast.SEND {
-				p.errorExpected(arrow, "channel type")
-			}
-
-			return x
-		}
-
-		// <-(expr)
-		return &ast.UnaryExpr{OpPos: arrow, Op: token.ARROW, X: p.checkExpr(x)}
-
-	case token.MUL:
-		// pointer type or unary "*" expression
-		pos := p.pos
-		p.next()
-		x := p.parseUnaryExpr()
-		return &ast.StarExpr{Star: pos, X: p.checkExprOrType(x)}
-	}
-
-	return p.parsePrimaryExpr(nil)
-}
-
-func (p *parser) tokPrec() (token.Token, int) {
-	tok := p.tok
-	if p.inRhs && tok == token.ASSIGN {
-		tok = token.EQL
-	}
-	return tok, tok.Precedence()
-}
-
-// parseBinaryExpr parses a (possibly) binary expression.
-// If x is non-nil, it is used as the left operand.
-// If check is true, operands are checked to be valid expressions.
-//
-// TODO(rfindley): parseBinaryExpr has become overloaded. Consider refactoring.
-func (p *parser) parseBinaryExpr(x ast.Expr, prec1 int, check bool) ast.Expr {
-	if p.trace {
-		defer un(trace(p, "BinaryExpr"))
-	}
-
-	if x == nil {
-		x = p.parseUnaryExpr()
-	}
-	// We track the nesting here rather than at the entry for the function,
-	// since it can iteratively produce a nested output, and we want to
-	// limit how deep a structure we generate.
-	var n int
-	defer func() { p.nestLev -= n }()
-	for n = 1; ; n++ {
-		incNestLev(p)
-		op, oprec := p.tokPrec()
-		if oprec < prec1 {
-			return x
-		}
-		pos := p.expect(op)
-		y := p.parseBinaryExpr(nil, oprec+1, check)
-		if check {
-			x = p.checkExpr(x)
-			y = p.checkExpr(y)
-		}
-		x = &ast.BinaryExpr{X: x, OpPos: pos, Op: op, Y: y}
-	}
-}
-
-// The result may be a type or even a raw type ([...]int). Callers must
-// check the result (using checkExpr or checkExprOrType), depending on
-// context.
-func (p *parser) parseExpr() ast.Expr {
-	if p.trace {
-		defer un(trace(p, "Expression"))
-	}
-
-	return p.parseBinaryExpr(nil, token.LowestPrec+1, true)
-}
-
-func (p *parser) parseRhs() ast.Expr {
-	old := p.inRhs
-	p.inRhs = true
-	x := p.checkExpr(p.parseExpr())
-	p.inRhs = old
-	return x
-}
-
-func (p *parser) parseRhsOrType() ast.Expr {
-	old := p.inRhs
-	p.inRhs = true
-	x := p.checkExprOrType(p.parseExpr())
-	p.inRhs = old
-	return x
-}
-
-// ----------------------------------------------------------------------------
-// Statements
-
-// Parsing modes for parseSimpleStmt.
-const (
-	basic = iota
-	labelOk
-	rangeOk
-)
-
-// parseSimpleStmt returns true as 2nd result if it parsed the assignment
-// of a range clause (with mode == rangeOk). The returned statement is an
-// assignment with a right-hand side that is a single unary expression of
-// the form "range x". No guarantees are given for the left-hand side.
-func (p *parser) parseSimpleStmt(mode int) (ast.Stmt, bool) {
-	if p.trace {
-		defer un(trace(p, "SimpleStmt"))
-	}
-
-	x := p.parseList(false)
-
-	switch p.tok {
-	case
-		token.DEFINE, token.ASSIGN, token.ADD_ASSIGN,
-		token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN,
-		token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN,
-		token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN:
-		// assignment statement, possibly part of a range clause
-		pos, tok := p.pos, p.tok
-		p.next()
-		var y []ast.Expr
-		isRange := false
-		if mode == rangeOk && p.tok == token.RANGE && (tok == token.DEFINE || tok == token.ASSIGN) {
-			pos := p.pos
-			p.next()
-			y = []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}}
-			isRange = true
-		} else {
-			y = p.parseList(true)
-		}
-		as := &ast.AssignStmt{Lhs: x, TokPos: pos, Tok: tok, Rhs: y}
-		if tok == token.DEFINE {
-			p.checkAssignStmt(as)
-		}
-		return as, isRange
-	}
-
-	if len(x) > 1 {
-		p.errorExpected(x[0].Pos(), "1 expression")
-		// continue with first expression
-	}
-
-	switch p.tok {
-	case token.COLON:
-		// labeled statement
-		colon := p.pos
-		p.next()
-		if label, isIdent := x[0].(*ast.Ident); mode == labelOk && isIdent {
-			// Go spec: The scope of a label is the body of the function
-			// in which it is declared and excludes the body of any nested
-			// function.
-			stmt := &ast.LabeledStmt{Label: label, Colon: colon, Stmt: p.parseStmt()}
-			return stmt, false
-		}
-		// The label declaration typically starts at x[0].Pos(), but the label
-		// declaration may be erroneous due to a token after that position (and
-		// before the ':'). If SpuriousErrors is not set, the (only) error
-		// reported for the line is the illegal label error instead of the token
-		// before the ':' that caused the problem. Thus, use the (latest) colon
-		// position for error reporting.
-		p.error(colon, "illegal label declaration")
-		return &ast.BadStmt{From: x[0].Pos(), To: colon + 1}, false
-
-	case token.ARROW:
-		// send statement
-		arrow := p.pos
-		p.next()
-		y := p.parseRhs()
-		return &ast.SendStmt{Chan: x[0], Arrow: arrow, Value: y}, false
-
-	case token.INC, token.DEC:
-		// increment or decrement
-		s := &ast.IncDecStmt{X: x[0], TokPos: p.pos, Tok: p.tok}
-		p.next()
-		return s, false
-	}
-
-	// expression
-	return &ast.ExprStmt{X: x[0]}, false
-}
-
-func (p *parser) checkAssignStmt(as *ast.AssignStmt) {
-	for _, x := range as.Lhs {
-		if _, isIdent := x.(*ast.Ident); !isIdent {
-			p.errorExpected(x.Pos(), "identifier on left side of :=")
-		}
-	}
-}
-
-func (p *parser) parseCallExpr(callType string) *ast.CallExpr {
-	x := p.parseRhsOrType() // could be a conversion: (some type)(x)
-	if call, isCall := x.(*ast.CallExpr); isCall {
-		return call
-	}
-	if _, isBad := x.(*ast.BadExpr); !isBad {
-		// only report error if it's a new one
-		p.error(p.safePos(x.End()), fmt.Sprintf("function must be invoked in %s statement", callType))
-	}
-	return nil
-}
-
-func (p *parser) parseGoStmt() ast.Stmt {
-	if p.trace {
-		defer un(trace(p, "GoStmt"))
-	}
-
-	pos := p.expect(token.GO)
-	call := p.parseCallExpr("go")
-	p.expectSemi()
-	if call == nil {
-		return &ast.BadStmt{From: pos, To: pos + 2} // len("go")
-	}
-
-	return &ast.GoStmt{Go: pos, Call: call}
-}
-
-func (p *parser) parseDeferStmt() ast.Stmt {
-	if p.trace {
-		defer un(trace(p, "DeferStmt"))
-	}
-
-	pos := p.expect(token.DEFER)
-	call := p.parseCallExpr("defer")
-	p.expectSemi()
-	if call == nil {
-		return &ast.BadStmt{From: pos, To: pos + 5} // len("defer")
-	}
-
-	return &ast.DeferStmt{Defer: pos, Call: call}
-}
-
-func (p *parser) parseReturnStmt() *ast.ReturnStmt {
-	if p.trace {
-		defer un(trace(p, "ReturnStmt"))
-	}
-
-	pos := p.pos
-	p.expect(token.RETURN)
-	var x []ast.Expr
-	if p.tok != token.SEMICOLON && p.tok != token.RBRACE {
-		x = p.parseList(true)
-	}
-	p.expectSemi()
-
-	return &ast.ReturnStmt{Return: pos, Results: x}
-}
-
-func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
-	if p.trace {
-		defer un(trace(p, "BranchStmt"))
-	}
-
-	pos := p.expect(tok)
-	var label *ast.Ident
-	if tok != token.FALLTHROUGH && p.tok == token.IDENT {
-		label = p.parseIdent()
-	}
-	p.expectSemi()
-
-	return &ast.BranchStmt{TokPos: pos, Tok: tok, Label: label}
-}
-
-func (p *parser) makeExpr(s ast.Stmt, want string) ast.Expr {
-	if s == nil {
-		return nil
-	}
-	if es, isExpr := s.(*ast.ExprStmt); isExpr {
-		return p.checkExpr(es.X)
-	}
-	found := "simple statement"
-	if _, isAss := s.(*ast.AssignStmt); isAss {
-		found = "assignment"
-	}
-	p.error(s.Pos(), fmt.Sprintf("expected %s, found %s (missing parentheses around composite literal?)", want, found))
-	return &ast.BadExpr{From: s.Pos(), To: p.safePos(s.End())}
-}
-
-// parseIfHeader is an adjusted version of parser.header
-// in cmd/compile/internal/syntax/parser.go, which has
-// been tuned for better error handling.
-func (p *parser) parseIfHeader() (init ast.Stmt, cond ast.Expr) {
-	if p.tok == token.LBRACE {
-		p.error(p.pos, "missing condition in if statement")
-		cond = &ast.BadExpr{From: p.pos, To: p.pos}
-		return
-	}
-	// p.tok != token.LBRACE
-
-	prevLev := p.exprLev
-	p.exprLev = -1
-
-	if p.tok != token.SEMICOLON {
-		// accept potential variable declaration but complain
-		if p.tok == token.VAR {
-			p.next()
-			p.error(p.pos, "var declaration not allowed in 'IF' initializer")
-		}
-		init, _ = p.parseSimpleStmt(basic)
-	}
-
-	var condStmt ast.Stmt
-	var semi struct {
-		pos token.Pos
-		lit string // ";" or "\n"; valid if pos.IsValid()
-	}
-	if p.tok != token.LBRACE {
-		if p.tok == token.SEMICOLON {
-			semi.pos = p.pos
-			semi.lit = p.lit
-			p.next()
-		} else {
-			p.expect(token.SEMICOLON)
-		}
-		if p.tok != token.LBRACE {
-			condStmt, _ = p.parseSimpleStmt(basic)
-		}
-	} else {
-		condStmt = init
-		init = nil
-	}
-
-	if condStmt != nil {
-		cond = p.makeExpr(condStmt, "boolean expression")
-	} else if semi.pos.IsValid() {
-		if semi.lit == "\n" {
-			p.error(semi.pos, "unexpected newline, expecting { after if clause")
-		} else {
-			p.error(semi.pos, "missing condition in if statement")
-		}
-	}
-
-	// make sure we have a valid AST
-	if cond == nil {
-		cond = &ast.BadExpr{From: p.pos, To: p.pos}
-	}
-
-	p.exprLev = prevLev
-	return
-}
-
-func (p *parser) parseIfStmt() *ast.IfStmt {
-	defer decNestLev(incNestLev(p))
-
-	if p.trace {
-		defer un(trace(p, "IfStmt"))
-	}
-
-	pos := p.expect(token.IF)
-
-	init, cond := p.parseIfHeader()
-	body := p.parseBlockStmt()
-
-	var else_ ast.Stmt
-	if p.tok == token.ELSE {
-		p.next()
-		switch p.tok {
-		case token.IF:
-			else_ = p.parseIfStmt()
-		case token.LBRACE:
-			else_ = p.parseBlockStmt()
-			p.expectSemi()
-		default:
-			p.errorExpected(p.pos, "if statement or block")
-			else_ = &ast.BadStmt{From: p.pos, To: p.pos}
-		}
-	} else {
-		p.expectSemi()
-	}
-
-	return &ast.IfStmt{If: pos, Init: init, Cond: cond, Body: body, Else: else_}
-}
-
-func (p *parser) parseTypeList() (list []ast.Expr) {
-	if p.trace {
-		defer un(trace(p, "TypeList"))
-	}
-
-	list = append(list, p.parseType())
-	for p.tok == token.COMMA {
-		p.next()
-		list = append(list, p.parseType())
-	}
-
-	return
-}
-
-func (p *parser) parseCaseClause(typeSwitch bool) *ast.CaseClause {
-	if p.trace {
-		defer un(trace(p, "CaseClause"))
-	}
-
-	pos := p.pos
-	var list []ast.Expr
-	if p.tok == token.CASE {
-		p.next()
-		if typeSwitch {
-			list = p.parseTypeList()
-		} else {
-			list = p.parseList(true)
-		}
-	} else {
-		p.expect(token.DEFAULT)
-	}
-
-	colon := p.expect(token.COLON)
-	body := p.parseStmtList()
-
-	return &ast.CaseClause{Case: pos, List: list, Colon: colon, Body: body}
-}
-
-func isTypeSwitchAssert(x ast.Expr) bool {
-	a, ok := x.(*ast.TypeAssertExpr)
-	return ok && a.Type == nil
-}
-
-func (p *parser) isTypeSwitchGuard(s ast.Stmt) bool {
-	switch t := s.(type) {
-	case *ast.ExprStmt:
-		// x.(type)
-		return isTypeSwitchAssert(t.X)
-	case *ast.AssignStmt:
-		// v := x.(type)
-		if len(t.Lhs) == 1 && len(t.Rhs) == 1 && isTypeSwitchAssert(t.Rhs[0]) {
-			switch t.Tok {
-			case token.ASSIGN:
-				// permit v = x.(type) but complain
-				p.error(t.TokPos, "expected ':=', found '='")
-				fallthrough
-			case token.DEFINE:
-				return true
-			}
-		}
-	}
-	return false
-}
-
-func (p *parser) parseSwitchStmt() ast.Stmt {
-	if p.trace {
-		defer un(trace(p, "SwitchStmt"))
-	}
-
-	pos := p.expect(token.SWITCH)
-
-	var s1, s2 ast.Stmt
-	if p.tok != token.LBRACE {
-		prevLev := p.exprLev
-		p.exprLev = -1
-		if p.tok != token.SEMICOLON {
-			s2, _ = p.parseSimpleStmt(basic)
-		}
-		if p.tok == token.SEMICOLON {
-			p.next()
-			s1 = s2
-			s2 = nil
-			if p.tok != token.LBRACE {
-				// A TypeSwitchGuard may declare a variable in addition
-				// to the variable declared in the initial SimpleStmt.
-				// Introduce extra scope to avoid redeclaration errors:
-				//
-				//	switch t := 0; t := x.(T) { ... }
-				//
-				// (this code is not valid Go because the first t
-				// cannot be accessed and thus is never used, the extra
-				// scope is needed for the correct error message).
-				//
-				// If we don't have a type switch, s2 must be an expression.
-				// Having the extra nested but empty scope won't affect it.
-				s2, _ = p.parseSimpleStmt(basic)
-			}
-		}
-		p.exprLev = prevLev
-	}
-
-	typeSwitch := p.isTypeSwitchGuard(s2)
-	lbrace := p.expect(token.LBRACE)
-	var list []ast.Stmt
-	for p.tok == token.CASE || p.tok == token.DEFAULT {
-		list = append(list, p.parseCaseClause(typeSwitch))
-	}
-	rbrace := p.expect(token.RBRACE)
-	p.expectSemi()
-	body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
-
-	if typeSwitch {
-		return &ast.TypeSwitchStmt{Switch: pos, Init: s1, Assign: s2, Body: body}
-	}
-
-	return &ast.SwitchStmt{Switch: pos, Init: s1, Tag: p.makeExpr(s2, "switch expression"), Body: body}
-}
-
-func (p *parser) parseCommClause() *ast.CommClause {
-	if p.trace {
-		defer un(trace(p, "CommClause"))
-	}
-
-	pos := p.pos
-	var comm ast.Stmt
-	if p.tok == token.CASE {
-		p.next()
-		lhs := p.parseList(false)
-		if p.tok == token.ARROW {
-			// SendStmt
-			if len(lhs) > 1 {
-				p.errorExpected(lhs[0].Pos(), "1 expression")
-				// continue with first expression
-			}
-			arrow := p.pos
-			p.next()
-			rhs := p.parseRhs()
-			comm = &ast.SendStmt{Chan: lhs[0], Arrow: arrow, Value: rhs}
-		} else {
-			// RecvStmt
-			if tok := p.tok; tok == token.ASSIGN || tok == token.DEFINE {
-				// RecvStmt with assignment
-				if len(lhs) > 2 {
-					p.errorExpected(lhs[0].Pos(), "1 or 2 expressions")
-					// continue with first two expressions
-					lhs = lhs[0:2]
-				}
-				pos := p.pos
-				p.next()
-				rhs := p.parseRhs()
-				as := &ast.AssignStmt{Lhs: lhs, TokPos: pos, Tok: tok, Rhs: []ast.Expr{rhs}}
-				if tok == token.DEFINE {
-					p.checkAssignStmt(as)
-				}
-				comm = as
-			} else {
-				// lhs must be single receive operation
-				if len(lhs) > 1 {
-					p.errorExpected(lhs[0].Pos(), "1 expression")
-					// continue with first expression
-				}
-				comm = &ast.ExprStmt{X: lhs[0]}
-			}
-		}
-	} else {
-		p.expect(token.DEFAULT)
-	}
-
-	colon := p.expect(token.COLON)
-	body := p.parseStmtList()
-
-	return &ast.CommClause{Case: pos, Comm: comm, Colon: colon, Body: body}
-}
-
-func (p *parser) parseSelectStmt() *ast.SelectStmt {
-	if p.trace {
-		defer un(trace(p, "SelectStmt"))
-	}
-
-	pos := p.expect(token.SELECT)
-	lbrace := p.expect(token.LBRACE)
-	var list []ast.Stmt
-	for p.tok == token.CASE || p.tok == token.DEFAULT {
-		list = append(list, p.parseCommClause())
-	}
-	rbrace := p.expect(token.RBRACE)
-	p.expectSemi()
-	body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
-
-	return &ast.SelectStmt{Select: pos, Body: body}
-}
-
-func (p *parser) parseForStmt() ast.Stmt {
-	if p.trace {
-		defer un(trace(p, "ForStmt"))
-	}
-
-	pos := p.expect(token.FOR)
-
-	var s1, s2, s3 ast.Stmt
-	var isRange bool
-	if p.tok != token.LBRACE {
-		prevLev := p.exprLev
-		p.exprLev = -1
-		if p.tok != token.SEMICOLON {
-			if p.tok == token.RANGE {
-				// "for range x" (nil lhs in assignment)
-				pos := p.pos
-				p.next()
-				y := []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}}
-				s2 = &ast.AssignStmt{Rhs: y}
-				isRange = true
-			} else {
-				s2, isRange = p.parseSimpleStmt(rangeOk)
-			}
-		}
-		if !isRange && p.tok == token.SEMICOLON {
-			p.next()
-			s1 = s2
-			s2 = nil
-			if p.tok != token.SEMICOLON {
-				s2, _ = p.parseSimpleStmt(basic)
-			}
-			p.expectSemi()
-			if p.tok != token.LBRACE {
-				s3, _ = p.parseSimpleStmt(basic)
-			}
-		}
-		p.exprLev = prevLev
-	}
-
-	body := p.parseBlockStmt()
-	p.expectSemi()
-
-	if isRange {
-		as := s2.(*ast.AssignStmt)
-		// check lhs
-		var key, value ast.Expr
-		switch len(as.Lhs) {
-		case 0:
-			// nothing to do
-		case 1:
-			key = as.Lhs[0]
-		case 2:
-			key, value = as.Lhs[0], as.Lhs[1]
-		default:
-			p.errorExpected(as.Lhs[len(as.Lhs)-1].Pos(), "at most 2 expressions")
-			return &ast.BadStmt{From: pos, To: p.safePos(body.End())}
-		}
-		// parseSimpleStmt returned a right-hand side that
-		// is a single unary expression of the form "range x"
-		x := as.Rhs[0].(*ast.UnaryExpr).X
-		return &ast.RangeStmt{
-			For:    pos,
-			Key:    key,
-			Value:  value,
-			TokPos: as.TokPos,
-			Tok:    as.Tok,
-			X:      x,
-			Body:   body,
-		}
-	}
-
-	// regular for statement
-	return &ast.ForStmt{
-		For:  pos,
-		Init: s1,
-		Cond: p.makeExpr(s2, "boolean or range expression"),
-		Post: s3,
-		Body: body,
-	}
-}
-
-func (p *parser) parseStmt() (s ast.Stmt) {
-	defer decNestLev(incNestLev(p))
-
-	if p.trace {
-		defer un(trace(p, "Statement"))
-	}
-
-	switch p.tok {
-	case token.CONST, token.TYPE, token.VAR:
-		s = &ast.DeclStmt{Decl: p.parseDecl(stmtStart)}
-	case
-		// tokens that may start an expression
-		token.IDENT, token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operands
-		token.LBRACK, token.STRUCT, token.MAP, token.CHAN, token.INTERFACE, // composite types
-		token.ADD, token.SUB, token.MUL, token.AND, token.XOR, token.ARROW, token.NOT: // unary operators
-		s, _ = p.parseSimpleStmt(labelOk)
-		// because of the required look-ahead, labeled statements are
-		// parsed by parseSimpleStmt - don't expect a semicolon after
-		// them
-		if _, isLabeledStmt := s.(*ast.LabeledStmt); !isLabeledStmt {
-			p.expectSemi()
-		}
-	case token.GO:
-		s = p.parseGoStmt()
-	case token.DEFER:
-		s = p.parseDeferStmt()
-	case token.RETURN:
-		s = p.parseReturnStmt()
-	case token.BREAK, token.CONTINUE, token.GOTO, token.FALLTHROUGH:
-		s = p.parseBranchStmt(p.tok)
-	case token.LBRACE:
-		s = p.parseBlockStmt()
-		p.expectSemi()
-	case token.IF:
-		s = p.parseIfStmt()
-	case token.SWITCH:
-		s = p.parseSwitchStmt()
-	case token.SELECT:
-		s = p.parseSelectStmt()
-	case token.FOR:
-		s = p.parseForStmt()
-	case token.SEMICOLON:
-		// Is it ever possible to have an implicit semicolon
-		// producing an empty statement in a valid program?
-		// (handle correctly anyway)
-		s = &ast.EmptyStmt{Semicolon: p.pos, Implicit: p.lit == "\n"}
-		p.next()
-	case token.RBRACE:
-		// a semicolon may be omitted before a closing "}"
-		s = &ast.EmptyStmt{Semicolon: p.pos, Implicit: true}
-	default:
-		// no statement found
-		pos := p.pos
-		p.errorExpected(pos, "statement")
-		p.advance(stmtStart)
-		s = &ast.BadStmt{From: pos, To: p.pos}
-	}
-
-	return
-}
-
-// ----------------------------------------------------------------------------
-// Declarations
-
-type parseSpecFunction func(doc *ast.CommentGroup, pos token.Pos, keyword token.Token, iota int) ast.Spec
-
-func isValidImport(lit string) bool {
-	const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
-	s, _ := strconv.Unquote(lit) // go/scanner returns a legal string literal
-	for _, r := range s {
-		if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
-			return false
-		}
-	}
-	return s != ""
-}
-
-func (p *parser) parseImportSpec(doc *ast.CommentGroup, _ token.Pos, _ token.Token, _ int) ast.Spec {
-	if p.trace {
-		defer un(trace(p, "ImportSpec"))
-	}
-
-	var ident *ast.Ident
-	switch p.tok {
-	case token.PERIOD:
-		ident = &ast.Ident{NamePos: p.pos, Name: "."}
-		p.next()
-	case token.IDENT:
-		ident = p.parseIdent()
-	}
-
-	pos := p.pos
-	var path string
-	if p.tok == token.STRING {
-		path = p.lit
-		if !isValidImport(path) {
-			p.error(pos, "invalid import path: "+path)
-		}
-		p.next()
-	} else {
-		p.expect(token.STRING) // use expect() error handling
-	}
-	p.expectSemi() // call before accessing p.linecomment
-
-	// collect imports
-	spec := &ast.ImportSpec{
-		Doc:     doc,
-		Name:    ident,
-		Path:    &ast.BasicLit{ValuePos: pos, Kind: token.STRING, Value: path},
-		Comment: p.lineComment,
-	}
-	p.imports = append(p.imports, spec)
-
-	return spec
-}
-
-func (p *parser) parseValueSpec(doc *ast.CommentGroup, _ token.Pos, keyword token.Token, iota int) ast.Spec {
-	if p.trace {
-		defer un(trace(p, keyword.String()+"Spec"))
-	}
-
-	pos := p.pos
-	idents := p.parseIdentList()
-	typ := p.tryIdentOrType()
-	var values []ast.Expr
-	// always permit optional initialization for more tolerant parsing
-	if p.tok == token.ASSIGN {
-		p.next()
-		values = p.parseList(true)
-	}
-	p.expectSemi() // call before accessing p.linecomment
-
-	switch keyword {
-	case token.VAR:
-		if typ == nil && values == nil {
-			p.error(pos, "missing variable type or initialization")
-		}
-	case token.CONST:
-		if values == nil && (iota == 0 || typ != nil) {
-			p.error(pos, "missing constant value")
-		}
-	}
-
-	spec := &ast.ValueSpec{
-		Doc:     doc,
-		Names:   idents,
-		Type:    typ,
-		Values:  values,
-		Comment: p.lineComment,
-	}
-	return spec
-}
-
-func (p *parser) parseGenericType(spec *ast.TypeSpec, openPos token.Pos, name0 *ast.Ident, typ0 ast.Expr) {
-	if p.trace {
-		defer un(trace(p, "parseGenericType"))
-	}
-
-	list := p.parseParameterList(name0, typ0, token.RBRACK)
-	closePos := p.expect(token.RBRACK)
-	spec.TypeParams = &ast.FieldList{Opening: openPos, List: list, Closing: closePos}
-	// Let the type checker decide whether to accept type parameters on aliases:
-	// see issue #46477.
-	if p.tok == token.ASSIGN {
-		// type alias
-		spec.Assign = p.pos
-		p.next()
-	}
-	spec.Type = p.parseType()
-}
-
-func (p *parser) parseTypeSpec(doc *ast.CommentGroup, _ token.Pos, _ token.Token, _ int) ast.Spec {
-	if p.trace {
-		defer un(trace(p, "TypeSpec"))
-	}
-
-	name := p.parseIdent()
-	spec := &ast.TypeSpec{Doc: doc, Name: name}
-
-	if p.tok == token.LBRACK && p.allowGenerics() {
-		// spec.Name "[" ...
-		// array/slice type or type parameter list
-		lbrack := p.pos
-		p.next()
-		if p.tok == token.IDENT {
-			// We may have an array type or a type parameter list.
-			// In either case we expect an expression x (which may
-			// just be a name, or a more complex expression) which
-			// we can analyze further.
-			//
-			// A type parameter list may have a type bound starting
-			// with a "[" as in: P []E. In that case, simply parsing
-			// an expression would lead to an error: P[] is invalid.
-			// But since index or slice expressions are never constant
-			// and thus invalid array length expressions, if the name
-			// is followed by "[" it must be the start of an array or
-			// slice constraint. Only if we don't see a "[" do we
-			// need to parse a full expression. Notably, name <- x
-			// is not a concern because name <- x is a statement and
-			// not an expression.
-			var x ast.Expr = p.parseIdent()
-			if p.tok != token.LBRACK {
-				// To parse the expression starting with name, expand
-				// the call sequence we would get by passing in name
-				// to parser.expr, and pass in name to parsePrimaryExpr.
-				p.exprLev++
-				lhs := p.parsePrimaryExpr(x)
-				x = p.parseBinaryExpr(lhs, token.LowestPrec+1, false)
-				p.exprLev--
-			}
-			// Analyze expression x. If we can split x into a type parameter
-			// name, possibly followed by a type parameter type, we consider
-			// this the start of a type parameter list, with some caveats:
-			// a single name followed by "]" tilts the decision towards an
-			// array declaration; a type parameter type that could also be
-			// an ordinary expression but which is followed by a comma tilts
-			// the decision towards a type parameter list.
-			if pname, ptype := extractName(x, p.tok == token.COMMA); pname != nil && (ptype != nil || p.tok != token.RBRACK) {
-				// spec.Name "[" pname ...
-				// spec.Name "[" pname ptype ...
-				// spec.Name "[" pname ptype "," ...
-				p.parseGenericType(spec, lbrack, pname, ptype) // ptype may be nil
-			} else {
-				// spec.Name "[" pname "]" ...
-				// spec.Name "[" x ...
-				spec.Type = p.parseArrayType(lbrack, x)
-			}
-		} else {
-			// array type
-			spec.Type = p.parseArrayType(lbrack, nil)
-		}
-	} else {
-		// no type parameters
-		if p.tok == token.ASSIGN {
-			// type alias
-			spec.Assign = p.pos
-			p.next()
-		}
-		spec.Type = p.parseType()
-	}
-
-	p.expectSemi() // call before accessing p.linecomment
-	spec.Comment = p.lineComment
-
-	return spec
-}
-
-// extractName splits the expression x into (name, expr) if syntactically
-// x can be written as name expr. The split only happens if expr is a type
-// element (per the isTypeElem predicate) or if force is set.
-// If x is just a name, the result is (name, nil). If the split succeeds,
-// the result is (name, expr). Otherwise the result is (nil, x).
-// Examples:
-//
-//	x           force    name    expr
-//	------------------------------------
-//	P*[]int     T/F      P       *[]int
-//	P*E         T        P       *E
-//	P*E         F        nil     P*E
-//	P([]int)    T/F      P       []int
-//	P(E)        T        P       E
-//	P(E)        F        nil     P(E)
-//	P*E|F|~G    T/F      P       *E|F|~G
-//	P*E|F|G     T        P       *E|F|G
-//	P*E|F|G     F        nil     P*E|F|G
-func extractName(x ast.Expr, force bool) (*ast.Ident, ast.Expr) {
-	switch x := x.(type) {
-	case *ast.Ident:
-		return x, nil
-	case *ast.BinaryExpr:
-		switch x.Op {
-		case token.MUL:
-			if name, _ := x.X.(*ast.Ident); name != nil && (force || isTypeElem(x.Y)) {
-				// x = name *x.Y
-				return name, &ast.StarExpr{Star: x.OpPos, X: x.Y}
-			}
-		case token.OR:
-			if name, lhs := extractName(x.X, force || isTypeElem(x.Y)); name != nil && lhs != nil {
-				// x = name lhs|x.Y
-				op := *x
-				op.X = lhs
-				return name, &op
-			}
-		}
-	case *ast.CallExpr:
-		if name, _ := x.Fun.(*ast.Ident); name != nil {
-			if len(x.Args) == 1 && x.Ellipsis == token.NoPos && (force || isTypeElem(x.Args[0])) {
-				// x = name "(" x.ArgList[0] ")"
-				return name, x.Args[0]
-			}
-		}
-	}
-	return nil, x
-}
-
-// isTypeElem reports whether x is a (possibly parenthesized) type element expression.
-// The result is false if x could be a type element OR an ordinary (value) expression.
-func isTypeElem(x ast.Expr) bool {
-	switch x := x.(type) {
-	case *ast.ArrayType, *ast.StructType, *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.ChanType:
-		return true
-	case *ast.BinaryExpr:
-		return isTypeElem(x.X) || isTypeElem(x.Y)
-	case *ast.UnaryExpr:
-		return x.Op == token.TILDE
-	case *ast.ParenExpr:
-		return isTypeElem(x.X)
-	}
-	return false
-}
-
-func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl {
-	if p.trace {
-		defer un(trace(p, "GenDecl("+keyword.String()+")"))
-	}
-
-	doc := p.leadComment
-	pos := p.expect(keyword)
-	var lparen, rparen token.Pos
-	var list []ast.Spec
-	if p.tok == token.LPAREN {
-		lparen = p.pos
-		p.next()
-		for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
-			list = append(list, f(p.leadComment, pos, keyword, iota))
-		}
-		rparen = p.expect(token.RPAREN)
-		p.expectSemi()
-	} else {
-		list = append(list, f(nil, pos, keyword, 0))
-	}
-
-	return &ast.GenDecl{
-		Doc:    doc,
-		TokPos: pos,
-		Tok:    keyword,
-		Lparen: lparen,
-		Specs:  list,
-		Rparen: rparen,
-	}
-}
-
-func (p *parser) parseFuncDecl() *ast.FuncDecl {
-	if p.trace {
-		defer un(trace(p, "FunctionDecl"))
-	}
-
-	doc := p.leadComment
-	pos := p.expect(token.FUNC)
-
-	var recv *ast.FieldList
-	if p.tok == token.LPAREN {
-		_, recv = p.parseParameters(false)
-	}
-
-	ident := p.parseIdent()
-
-	tparams, params := p.parseParameters(true)
-	if recv != nil && tparams != nil {
-		// Method declarations do not have type parameters. We parse them for a
-		// better error message and improved error recovery.
-		p.error(tparams.Opening, "method must have no type parameters")
-		tparams = nil
-	}
-	results := p.parseResult()
-
-	var body *ast.BlockStmt
-	switch p.tok {
-	case token.LBRACE:
-		body = p.parseBody()
-		p.expectSemi()
-	case token.SEMICOLON:
-		p.next()
-		if p.tok == token.LBRACE {
-			// opening { of function declaration on next line
-			p.error(p.pos, "unexpected semicolon or newline before {")
-			body = p.parseBody()
-			p.expectSemi()
-		}
-	default:
-		p.expectSemi()
-	}
-
-	decl := &ast.FuncDecl{
-		Doc:  doc,
-		Recv: recv,
-		Name: ident,
-		Type: &ast.FuncType{
-			Func:       pos,
-			TypeParams: tparams,
-			Params:     params,
-			Results:    results,
-		},
-		Body: body,
-	}
-	return decl
-}
-
-func (p *parser) parseDecl(sync map[token.Token]bool) ast.Decl {
-	if p.trace {
-		defer un(trace(p, "Declaration"))
-	}
-
-	var f parseSpecFunction
-	switch p.tok {
-	case token.CONST, token.VAR:
-		f = p.parseValueSpec
-
-	case token.TYPE:
-		f = p.parseTypeSpec
-
-	case token.FUNC:
-		return p.parseFuncDecl()
-
-	default:
-		pos := p.pos
-		p.errorExpected(pos, "declaration")
-		p.advance(sync)
-		return &ast.BadDecl{From: pos, To: p.pos}
-	}
-
-	return p.parseGenDecl(p.tok, f)
-}
-
-// ----------------------------------------------------------------------------
-// Source files
-
-func (p *parser) parseFile() *ast.File {
-	if p.trace {
-		defer un(trace(p, "File"))
-	}
-
-	// Don't bother parsing the rest if we had errors scanning the first token.
-	// Likely not a Go source file at all.
-	if p.errors.Len() != 0 {
-		return nil
-	}
-
-	// package clause
-	doc := p.leadComment
-	pos := p.expect(token.PACKAGE)
-	// Go spec: The package clause is not a declaration;
-	// the package name does not appear in any scope.
-	ident := p.parseIdent()
-	if ident.Name == "_" && p.mode&DeclarationErrors != 0 {
-		p.error(p.pos, "invalid package name _")
-	}
-	p.expectSemi()
-
-	// Don't bother parsing the rest if we had errors parsing the package clause.
-	// Likely not a Go source file at all.
-	if p.errors.Len() != 0 {
-		return nil
-	}
-
-	var decls []ast.Decl
-	if p.mode&PackageClauseOnly == 0 {
-		// import decls
-		for p.tok == token.IMPORT {
-			decls = append(decls, p.parseGenDecl(token.IMPORT, p.parseImportSpec))
-		}
-
-		if p.mode&ImportsOnly == 0 {
-			// rest of package body
-			for p.tok != token.EOF {
-				decls = append(decls, p.parseDecl(declStart))
-			}
-		}
-	}
-
-	f := &ast.File{
-		Doc:      doc,
-		Package:  pos,
-		Name:     ident,
-		Decls:    decls,
-		Imports:  p.imports,
-		Comments: p.comments,
-	}
-	var declErr func(token.Pos, string)
-	if p.mode&DeclarationErrors != 0 {
-		declErr = p.error
-	}
-	if p.mode&SkipObjectResolution == 0 {
-		resolveFile(f, p.file, declErr)
-	}
-
-	return f
-}
diff --git a/internal/backport/go/parser/parser_test.go b/internal/backport/go/parser/parser_test.go
deleted file mode 100644
index feb05ea..0000000
--- a/internal/backport/go/parser/parser_test.go
+++ /dev/null
@@ -1,579 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package parser
-
-import (
-	"bytes"
-	"fmt"
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/token"
-	"io/fs"
-	"strings"
-	"testing"
-)
-
-var validFiles = []string{
-	"parser.go",
-	"parser_test.go",
-	"error_test.go",
-	"short_test.go",
-}
-
-func TestParse(t *testing.T) {
-	for _, filename := range validFiles {
-		_, err := ParseFile(token.NewFileSet(), filename, nil, DeclarationErrors)
-		if err != nil {
-			t.Fatalf("ParseFile(%s): %v", filename, err)
-		}
-	}
-}
-
-func nameFilter(filename string) bool {
-	switch filename {
-	case "parser.go", "interface.go", "parser_test.go":
-		return true
-	case "parser.go.orig":
-		return true // permit but should be ignored by ParseDir
-	}
-	return false
-}
-
-func dirFilter(f fs.FileInfo) bool { return nameFilter(f.Name()) }
-
-func TestParseFile(t *testing.T) {
-	src := "package p\nvar _=s[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]"
-	_, err := ParseFile(token.NewFileSet(), "", src, 0)
-	if err == nil {
-		t.Errorf("ParseFile(%s) succeeded unexpectedly", src)
-	}
-}
-
-func TestParseExprFrom(t *testing.T) {
-	src := "s[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]"
-	_, err := ParseExprFrom(token.NewFileSet(), "", src, 0)
-	if err == nil {
-		t.Errorf("ParseExprFrom(%s) succeeded unexpectedly", src)
-	}
-}
-
-func TestParseDir(t *testing.T) {
-	path := "."
-	pkgs, err := ParseDir(token.NewFileSet(), path, dirFilter, 0)
-	if err != nil {
-		t.Fatalf("ParseDir(%s): %v", path, err)
-	}
-	if n := len(pkgs); n != 1 {
-		t.Errorf("got %d packages; want 1", n)
-	}
-	pkg := pkgs["parser"]
-	if pkg == nil {
-		t.Errorf(`package "parser" not found`)
-		return
-	}
-	if n := len(pkg.Files); n != 3 {
-		t.Errorf("got %d package files; want 3", n)
-	}
-	for filename := range pkg.Files {
-		if !nameFilter(filename) {
-			t.Errorf("unexpected package file: %s", filename)
-		}
-	}
-}
-
-func TestIssue42951(t *testing.T) {
-	path := "./testdata/issue42951"
-	_, err := ParseDir(token.NewFileSet(), path, nil, 0)
-	if err != nil {
-		t.Errorf("ParseDir(%s): %v", path, err)
-	}
-}
-
-func TestParseExpr(t *testing.T) {
-	// just kicking the tires:
-	// a valid arithmetic expression
-	src := "a + b"
-	x, err := ParseExpr(src)
-	if err != nil {
-		t.Errorf("ParseExpr(%q): %v", src, err)
-	}
-	// sanity check
-	if _, ok := x.(*ast.BinaryExpr); !ok {
-		t.Errorf("ParseExpr(%q): got %T, want *ast.BinaryExpr", src, x)
-	}
-
-	// a valid type expression
-	src = "struct{x *int}"
-	x, err = ParseExpr(src)
-	if err != nil {
-		t.Errorf("ParseExpr(%q): %v", src, err)
-	}
-	// sanity check
-	if _, ok := x.(*ast.StructType); !ok {
-		t.Errorf("ParseExpr(%q): got %T, want *ast.StructType", src, x)
-	}
-
-	// an invalid expression
-	src = "a + *"
-	x, err = ParseExpr(src)
-	if err == nil {
-		t.Errorf("ParseExpr(%q): got no error", src)
-	}
-	if x == nil {
-		t.Errorf("ParseExpr(%q): got no (partial) result", src)
-	}
-	if _, ok := x.(*ast.BinaryExpr); !ok {
-		t.Errorf("ParseExpr(%q): got %T, want *ast.BinaryExpr", src, x)
-	}
-
-	// a valid expression followed by extra tokens is invalid
-	src = "a[i] := x"
-	if _, err := ParseExpr(src); err == nil {
-		t.Errorf("ParseExpr(%q): got no error", src)
-	}
-
-	// a semicolon is not permitted unless automatically inserted
-	src = "a + b\n"
-	if _, err := ParseExpr(src); err != nil {
-		t.Errorf("ParseExpr(%q): got error %s", src, err)
-	}
-	src = "a + b;"
-	if _, err := ParseExpr(src); err == nil {
-		t.Errorf("ParseExpr(%q): got no error", src)
-	}
-
-	// various other stuff following a valid expression
-	const validExpr = "a + b"
-	const anything = "dh3*#D)#_"
-	for _, c := range "!)]};," {
-		src := validExpr + string(c) + anything
-		if _, err := ParseExpr(src); err == nil {
-			t.Errorf("ParseExpr(%q): got no error", src)
-		}
-	}
-
-	// ParseExpr must not crash
-	for _, src := range valids {
-		ParseExpr(src)
-	}
-}
-
-func TestColonEqualsScope(t *testing.T) {
-	f, err := ParseFile(token.NewFileSet(), "", `package p; func f() { x, y, z := x, y, z }`, 0)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	// RHS refers to undefined globals; LHS does not.
-	as := f.Decls[0].(*ast.FuncDecl).Body.List[0].(*ast.AssignStmt)
-	for _, v := range as.Rhs {
-		id := v.(*ast.Ident)
-		if id.Obj != nil {
-			t.Errorf("rhs %s has Obj, should not", id.Name)
-		}
-	}
-	for _, v := range as.Lhs {
-		id := v.(*ast.Ident)
-		if id.Obj == nil {
-			t.Errorf("lhs %s does not have Obj, should", id.Name)
-		}
-	}
-}
-
-func TestVarScope(t *testing.T) {
-	f, err := ParseFile(token.NewFileSet(), "", `package p; func f() { var x, y, z = x, y, z }`, 0)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	// RHS refers to undefined globals; LHS does not.
-	as := f.Decls[0].(*ast.FuncDecl).Body.List[0].(*ast.DeclStmt).Decl.(*ast.GenDecl).Specs[0].(*ast.ValueSpec)
-	for _, v := range as.Values {
-		id := v.(*ast.Ident)
-		if id.Obj != nil {
-			t.Errorf("rhs %s has Obj, should not", id.Name)
-		}
-	}
-	for _, id := range as.Names {
-		if id.Obj == nil {
-			t.Errorf("lhs %s does not have Obj, should", id.Name)
-		}
-	}
-}
-
-func TestObjects(t *testing.T) {
-	const src = `
-package p
-import fmt "fmt"
-const pi = 3.14
-type T struct{}
-var x int
-func f() { L: }
-`
-
-	f, err := ParseFile(token.NewFileSet(), "", src, 0)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	objects := map[string]ast.ObjKind{
-		"p":   ast.Bad, // not in a scope
-		"fmt": ast.Bad, // not resolved yet
-		"pi":  ast.Con,
-		"T":   ast.Typ,
-		"x":   ast.Var,
-		"int": ast.Bad, // not resolved yet
-		"f":   ast.Fun,
-		"L":   ast.Lbl,
-	}
-
-	ast.Inspect(f, func(n ast.Node) bool {
-		if ident, ok := n.(*ast.Ident); ok {
-			obj := ident.Obj
-			if obj == nil {
-				if objects[ident.Name] != ast.Bad {
-					t.Errorf("no object for %s", ident.Name)
-				}
-				return true
-			}
-			if obj.Name != ident.Name {
-				t.Errorf("names don't match: obj.Name = %s, ident.Name = %s", obj.Name, ident.Name)
-			}
-			kind := objects[ident.Name]
-			if obj.Kind != kind {
-				t.Errorf("%s: obj.Kind = %s; want %s", ident.Name, obj.Kind, kind)
-			}
-		}
-		return true
-	})
-}
-
-func TestUnresolved(t *testing.T) {
-	f, err := ParseFile(token.NewFileSet(), "", `
-package p
-//
-func f1a(int)
-func f2a(byte, int, float)
-func f3a(a, b int, c float)
-func f4a(...complex)
-func f5a(a s1a, b ...complex)
-//
-func f1b(*int)
-func f2b([]byte, (int), *float)
-func f3b(a, b *int, c []float)
-func f4b(...*complex)
-func f5b(a s1a, b ...[]complex)
-//
-type s1a struct { int }
-type s2a struct { byte; int; s1a }
-type s3a struct { a, b int; c float }
-//
-type s1b struct { *int }
-type s2b struct { byte; int; *float }
-type s3b struct { a, b *s3b; c []float }
-`, 0)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	want := "int " + // f1a
-		"byte int float " + // f2a
-		"int float " + // f3a
-		"complex " + // f4a
-		"complex " + // f5a
-		//
-		"int " + // f1b
-		"byte int float " + // f2b
-		"int float " + // f3b
-		"complex " + // f4b
-		"complex " + // f5b
-		//
-		"int " + // s1a
-		"byte int " + // s2a
-		"int float " + // s3a
-		//
-		"int " + // s1a
-		"byte int float " + // s2a
-		"float " // s3a
-
-	// collect unresolved identifiers
-	var buf bytes.Buffer
-	for _, u := range f.Unresolved {
-		buf.WriteString(u.Name)
-		buf.WriteByte(' ')
-	}
-	got := buf.String()
-
-	if got != want {
-		t.Errorf("\ngot:  %s\nwant: %s", got, want)
-	}
-}
-
-var imports = map[string]bool{
-	`"a"`:        true,
-	"`a`":        true,
-	`"a/b"`:      true,
-	`"a.b"`:      true,
-	`"m\x61th"`:  true,
-	`"greek/αβ"`: true,
-	`""`:         false,
-
-	// Each of these pairs tests both `` vs "" strings
-	// and also use of invalid characters spelled out as
-	// escape sequences and written directly.
-	// For example `"\x00"` tests import "\x00"
-	// while "`\x00`" tests import `<actual-NUL-byte>`.
-	`"\x00"`:     false,
-	"`\x00`":     false,
-	`"\x7f"`:     false,
-	"`\x7f`":     false,
-	`"a!"`:       false,
-	"`a!`":       false,
-	`"a b"`:      false,
-	"`a b`":      false,
-	`"a\\b"`:     false,
-	"`a\\b`":     false,
-	"\"`a`\"":    false,
-	"`\"a\"`":    false,
-	`"\x80\x80"`: false,
-	"`\x80\x80`": false,
-	`"\xFFFD"`:   false,
-	"`\xFFFD`":   false,
-}
-
-func TestImports(t *testing.T) {
-	for path, isValid := range imports {
-		src := fmt.Sprintf("package p; import %s", path)
-		_, err := ParseFile(token.NewFileSet(), "", src, 0)
-		switch {
-		case err != nil && isValid:
-			t.Errorf("ParseFile(%s): got %v; expected no error", src, err)
-		case err == nil && !isValid:
-			t.Errorf("ParseFile(%s): got no error; expected one", src)
-		}
-	}
-}
-
-func TestCommentGroups(t *testing.T) {
-	f, err := ParseFile(token.NewFileSet(), "", `
-package p /* 1a */ /* 1b */      /* 1c */ // 1d
-/* 2a
-*/
-// 2b
-const pi = 3.1415
-/* 3a */ // 3b
-/* 3c */ const e = 2.7182
-
-// Example from issue 3139
-func ExampleCount() {
-	fmt.Println(strings.Count("cheese", "e"))
-	fmt.Println(strings.Count("five", "")) // before & after each rune
-	// Output:
-	// 3
-	// 5
-}
-`, ParseComments)
-	if err != nil {
-		t.Fatal(err)
-	}
-	expected := [][]string{
-		{"/* 1a */", "/* 1b */", "/* 1c */", "// 1d"},
-		{"/* 2a\n*/", "// 2b"},
-		{"/* 3a */", "// 3b", "/* 3c */"},
-		{"// Example from issue 3139"},
-		{"// before & after each rune"},
-		{"// Output:", "// 3", "// 5"},
-	}
-	if len(f.Comments) != len(expected) {
-		t.Fatalf("got %d comment groups; expected %d", len(f.Comments), len(expected))
-	}
-	for i, exp := range expected {
-		got := f.Comments[i].List
-		if len(got) != len(exp) {
-			t.Errorf("got %d comments in group %d; expected %d", len(got), i, len(exp))
-			continue
-		}
-		for j, exp := range exp {
-			got := got[j].Text
-			if got != exp {
-				t.Errorf("got %q in group %d; expected %q", got, i, exp)
-			}
-		}
-	}
-}
-
-func getField(file *ast.File, fieldname string) *ast.Field {
-	parts := strings.Split(fieldname, ".")
-	for _, d := range file.Decls {
-		if d, ok := d.(*ast.GenDecl); ok && d.Tok == token.TYPE {
-			for _, s := range d.Specs {
-				if s, ok := s.(*ast.TypeSpec); ok && s.Name.Name == parts[0] {
-					if s, ok := s.Type.(*ast.StructType); ok {
-						for _, f := range s.Fields.List {
-							for _, name := range f.Names {
-								if name.Name == parts[1] {
-									return f
-								}
-							}
-						}
-					}
-				}
-			}
-		}
-	}
-	return nil
-}
-
-// Don't use ast.CommentGroup.Text() - we want to see exact comment text.
-func commentText(c *ast.CommentGroup) string {
-	var buf bytes.Buffer
-	if c != nil {
-		for _, c := range c.List {
-			buf.WriteString(c.Text)
-		}
-	}
-	return buf.String()
-}
-
-func checkFieldComments(t *testing.T, file *ast.File, fieldname, lead, line string) {
-	f := getField(file, fieldname)
-	if f == nil {
-		t.Fatalf("field not found: %s", fieldname)
-	}
-	if got := commentText(f.Doc); got != lead {
-		t.Errorf("got lead comment %q; expected %q", got, lead)
-	}
-	if got := commentText(f.Comment); got != line {
-		t.Errorf("got line comment %q; expected %q", got, line)
-	}
-}
-
-func TestLeadAndLineComments(t *testing.T) {
-	f, err := ParseFile(token.NewFileSet(), "", `
-package p
-type T struct {
-	/* F1 lead comment */
-	//
-	F1 int  /* F1 */ // line comment
-	// F2 lead
-	// comment
-	F2 int  // F2 line comment
-	// f3 lead comment
-	f3 int  // f3 line comment
-}
-`, ParseComments)
-	if err != nil {
-		t.Fatal(err)
-	}
-	checkFieldComments(t, f, "T.F1", "/* F1 lead comment *///", "/* F1 */// line comment")
-	checkFieldComments(t, f, "T.F2", "// F2 lead// comment", "// F2 line comment")
-	checkFieldComments(t, f, "T.f3", "// f3 lead comment", "// f3 line comment")
-	ast.FileExports(f)
-	checkFieldComments(t, f, "T.F1", "/* F1 lead comment *///", "/* F1 */// line comment")
-	checkFieldComments(t, f, "T.F2", "// F2 lead// comment", "// F2 line comment")
-	if getField(f, "T.f3") != nil {
-		t.Error("not expected to find T.f3")
-	}
-}
-
-// TestIssue9979 verifies that empty statements are contained within their enclosing blocks.
-func TestIssue9979(t *testing.T) {
-	for _, src := range []string{
-		"package p; func f() {;}",
-		"package p; func f() {L:}",
-		"package p; func f() {L:;}",
-		"package p; func f() {L:\n}",
-		"package p; func f() {L:\n;}",
-		"package p; func f() { ; }",
-		"package p; func f() { L: }",
-		"package p; func f() { L: ; }",
-		"package p; func f() { L: \n}",
-		"package p; func f() { L: \n; }",
-	} {
-		fset := token.NewFileSet()
-		f, err := ParseFile(fset, "", src, 0)
-		if err != nil {
-			t.Fatal(err)
-		}
-
-		var pos, end token.Pos
-		ast.Inspect(f, func(x ast.Node) bool {
-			switch s := x.(type) {
-			case *ast.BlockStmt:
-				pos, end = s.Pos()+1, s.End()-1 // exclude "{", "}"
-			case *ast.LabeledStmt:
-				pos, end = s.Pos()+2, s.End() // exclude "L:"
-			case *ast.EmptyStmt:
-				// check containment
-				if s.Pos() < pos || s.End() > end {
-					t.Errorf("%s: %T[%d, %d] not inside [%d, %d]", src, s, s.Pos(), s.End(), pos, end)
-				}
-				// check semicolon
-				offs := fset.Position(s.Pos()).Offset
-				if ch := src[offs]; ch != ';' != s.Implicit {
-					want := "want ';'"
-					if s.Implicit {
-						want = "but ';' is implicit"
-					}
-					t.Errorf("%s: found %q at offset %d; %s", src, ch, offs, want)
-				}
-			}
-			return true
-		})
-	}
-}
-
-// TestIncompleteSelection ensures that an incomplete selector
-// expression is parsed as a (blank) *ast.SelectorExpr, not a
-// *ast.BadExpr.
-func TestIncompleteSelection(t *testing.T) {
-	for _, src := range []string{
-		"package p; var _ = fmt.",             // at EOF
-		"package p; var _ = fmt.\ntype X int", // not at EOF
-	} {
-		fset := token.NewFileSet()
-		f, err := ParseFile(fset, "", src, 0)
-		if err == nil {
-			t.Errorf("ParseFile(%s) succeeded unexpectedly", src)
-			continue
-		}
-
-		const wantErr = "expected selector or type assertion"
-		if !strings.Contains(err.Error(), wantErr) {
-			t.Errorf("ParseFile returned wrong error %q, want %q", err, wantErr)
-		}
-
-		var sel *ast.SelectorExpr
-		ast.Inspect(f, func(n ast.Node) bool {
-			if n, ok := n.(*ast.SelectorExpr); ok {
-				sel = n
-			}
-			return true
-		})
-		if sel == nil {
-			t.Error("found no *ast.SelectorExpr")
-			continue
-		}
-		const wantSel = "&{fmt _}"
-		if fmt.Sprint(sel) != wantSel {
-			t.Errorf("found selector %s, want %s", sel, wantSel)
-			continue
-		}
-	}
-}
-
-func TestLastLineComment(t *testing.T) {
-	const src = `package main
-type x int // comment
-`
-	fset := token.NewFileSet()
-	f, err := ParseFile(fset, "", src, ParseComments)
-	if err != nil {
-		t.Fatal(err)
-	}
-	comment := f.Decls[0].(*ast.GenDecl).Specs[0].(*ast.TypeSpec).Comment.List[0].Text
-	if comment != "// comment" {
-		t.Errorf("got %q, want %q", comment, "// comment")
-	}
-}
diff --git a/internal/backport/go/parser/performance_test.go b/internal/backport/go/parser/performance_test.go
deleted file mode 100644
index 8b219f1..0000000
--- a/internal/backport/go/parser/performance_test.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package parser
-
-import (
-	"os"
-	"testing"
-
-	"golang.org/x/website/internal/backport/go/token"
-)
-
-// TODO(rfindley): use a testdata file or file from another package here, to
-// avoid a moving target.
-var src = readFile("parser.go")
-
-func readFile(filename string) []byte {
-	data, err := os.ReadFile(filename)
-	if err != nil {
-		panic(err)
-	}
-	return data
-}
-
-func BenchmarkParse(b *testing.B) {
-	b.SetBytes(int64(len(src)))
-	for i := 0; i < b.N; i++ {
-		if _, err := ParseFile(token.NewFileSet(), "", src, ParseComments); err != nil {
-			b.Fatalf("benchmark failed due to parse error: %s", err)
-		}
-	}
-}
-
-func BenchmarkParseOnly(b *testing.B) {
-	b.SetBytes(int64(len(src)))
-	for i := 0; i < b.N; i++ {
-		if _, err := ParseFile(token.NewFileSet(), "", src, ParseComments|SkipObjectResolution); err != nil {
-			b.Fatalf("benchmark failed due to parse error: %s", err)
-		}
-	}
-}
-
-func BenchmarkResolve(b *testing.B) {
-	b.SetBytes(int64(len(src)))
-	for i := 0; i < b.N; i++ {
-		b.StopTimer()
-		fset := token.NewFileSet()
-		file, err := ParseFile(fset, "", src, SkipObjectResolution)
-		if err != nil {
-			b.Fatalf("benchmark failed due to parse error: %s", err)
-		}
-		b.StartTimer()
-		handle := fset.File(file.Package)
-		resolveFile(file, handle, nil)
-	}
-}
diff --git a/internal/backport/go/parser/resolver.go b/internal/backport/go/parser/resolver.go
deleted file mode 100644
index b648836..0000000
--- a/internal/backport/go/parser/resolver.go
+++ /dev/null
@@ -1,607 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package parser
-
-import (
-	"fmt"
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/token"
-	"strings"
-)
-
-const debugResolve = false
-
-// resolveFile walks the given file to resolve identifiers within the file
-// scope, updating ast.Ident.Obj fields with declaration information.
-//
-// If declErr is non-nil, it is used to report declaration errors during
-// resolution. tok is used to format position in error messages.
-func resolveFile(file *ast.File, handle *token.File, declErr func(token.Pos, string)) {
-	pkgScope := ast.NewScope(nil)
-	r := &resolver{
-		handle:   handle,
-		declErr:  declErr,
-		topScope: pkgScope,
-		pkgScope: pkgScope,
-		depth:    1,
-	}
-
-	for _, decl := range file.Decls {
-		ast.Walk(r, decl)
-	}
-
-	r.closeScope()
-	assert(r.topScope == nil, "unbalanced scopes")
-	assert(r.labelScope == nil, "unbalanced label scopes")
-
-	// resolve global identifiers within the same file
-	i := 0
-	for _, ident := range r.unresolved {
-		// i <= index for current ident
-		assert(ident.Obj == unresolved, "object already resolved")
-		ident.Obj = r.pkgScope.Lookup(ident.Name) // also removes unresolved sentinel
-		if ident.Obj == nil {
-			r.unresolved[i] = ident
-			i++
-		} else if debugResolve {
-			pos := ident.Obj.Decl.(interface{ Pos() token.Pos }).Pos()
-			r.trace("resolved %s@%v to package object %v", ident.Name, ident.Pos(), pos)
-		}
-	}
-	file.Scope = r.pkgScope
-	file.Unresolved = r.unresolved[0:i]
-}
-
-type resolver struct {
-	handle  *token.File
-	declErr func(token.Pos, string)
-
-	// Ordinary identifier scopes
-	pkgScope   *ast.Scope   // pkgScope.Outer == nil
-	topScope   *ast.Scope   // top-most scope; may be pkgScope
-	unresolved []*ast.Ident // unresolved identifiers
-	depth      int          // scope depth
-
-	// Label scopes
-	// (maintained by open/close LabelScope)
-	labelScope  *ast.Scope     // label scope for current function
-	targetStack [][]*ast.Ident // stack of unresolved labels
-}
-
-func (r *resolver) trace(format string, args ...interface{}) {
-	fmt.Println(strings.Repeat(". ", r.depth) + r.sprintf(format, args...))
-}
-
-func (r *resolver) sprintf(format string, args ...interface{}) string {
-	for i, arg := range args {
-		switch arg := arg.(type) {
-		case token.Pos:
-			args[i] = r.handle.Position(arg)
-		}
-	}
-	return fmt.Sprintf(format, args...)
-}
-
-func (r *resolver) openScope(pos token.Pos) {
-	if debugResolve {
-		r.trace("opening scope @%v", pos)
-		r.depth++
-	}
-	r.topScope = ast.NewScope(r.topScope)
-}
-
-func (r *resolver) closeScope() {
-	if debugResolve {
-		r.depth--
-		r.trace("closing scope")
-	}
-	r.topScope = r.topScope.Outer
-}
-
-func (r *resolver) openLabelScope() {
-	r.labelScope = ast.NewScope(r.labelScope)
-	r.targetStack = append(r.targetStack, nil)
-}
-
-func (r *resolver) closeLabelScope() {
-	// resolve labels
-	n := len(r.targetStack) - 1
-	scope := r.labelScope
-	for _, ident := range r.targetStack[n] {
-		ident.Obj = scope.Lookup(ident.Name)
-		if ident.Obj == nil && r.declErr != nil {
-			r.declErr(ident.Pos(), fmt.Sprintf("label %s undefined", ident.Name))
-		}
-	}
-	// pop label scope
-	r.targetStack = r.targetStack[0:n]
-	r.labelScope = r.labelScope.Outer
-}
-
-func (r *resolver) declare(decl, data interface{}, scope *ast.Scope, kind ast.ObjKind, idents ...*ast.Ident) {
-	for _, ident := range idents {
-		if ident.Obj != nil {
-			panic(fmt.Sprintf("%v: identifier %s already declared or resolved", ident.Pos(), ident.Name))
-		}
-		obj := ast.NewObj(kind, ident.Name)
-		// remember the corresponding declaration for redeclaration
-		// errors and global variable resolution/typechecking phase
-		obj.Decl = decl
-		obj.Data = data
-		// Identifiers (for receiver type parameters) are written to the scope, but
-		// never set as the resolved object. See issue #50956.
-		if _, ok := decl.(*ast.Ident); !ok {
-			ident.Obj = obj
-		}
-		if ident.Name != "_" {
-			if debugResolve {
-				r.trace("declaring %s@%v", ident.Name, ident.Pos())
-			}
-			if alt := scope.Insert(obj); alt != nil && r.declErr != nil {
-				prevDecl := ""
-				if pos := alt.Pos(); pos.IsValid() {
-					prevDecl = r.sprintf("\n\tprevious declaration at %v", pos)
-				}
-				r.declErr(ident.Pos(), fmt.Sprintf("%s redeclared in this block%s", ident.Name, prevDecl))
-			}
-		}
-	}
-}
-
-func (r *resolver) shortVarDecl(decl *ast.AssignStmt) {
-	// Go spec: A short variable declaration may redeclare variables
-	// provided they were originally declared in the same block with
-	// the same type, and at least one of the non-blank variables is new.
-	n := 0 // number of new variables
-	for _, x := range decl.Lhs {
-		if ident, isIdent := x.(*ast.Ident); isIdent {
-			assert(ident.Obj == nil, "identifier already declared or resolved")
-			obj := ast.NewObj(ast.Var, ident.Name)
-			// remember corresponding assignment for other tools
-			obj.Decl = decl
-			ident.Obj = obj
-			if ident.Name != "_" {
-				if debugResolve {
-					r.trace("declaring %s@%v", ident.Name, ident.Pos())
-				}
-				if alt := r.topScope.Insert(obj); alt != nil {
-					ident.Obj = alt // redeclaration
-				} else {
-					n++ // new declaration
-				}
-			}
-		}
-	}
-	if n == 0 && r.declErr != nil {
-		r.declErr(decl.Lhs[0].Pos(), "no new variables on left side of :=")
-	}
-}
-
-// The unresolved object is a sentinel to mark identifiers that have been added
-// to the list of unresolved identifiers. The sentinel is only used for verifying
-// internal consistency.
-var unresolved = new(ast.Object)
-
-// If x is an identifier, resolve attempts to resolve x by looking up
-// the object it denotes. If no object is found and collectUnresolved is
-// set, x is marked as unresolved and collected in the list of unresolved
-// identifiers.
-func (r *resolver) resolve(ident *ast.Ident, collectUnresolved bool) {
-	if ident.Obj != nil {
-		panic(r.sprintf("%v: identifier %s already declared or resolved", ident.Pos(), ident.Name))
-	}
-	// '_' should never refer to existing declarations, because it has special
-	// handling in the spec.
-	if ident.Name == "_" {
-		return
-	}
-	for s := r.topScope; s != nil; s = s.Outer {
-		if obj := s.Lookup(ident.Name); obj != nil {
-			if debugResolve {
-				r.trace("resolved %v:%s to %v", ident.Pos(), ident.Name, obj)
-			}
-			assert(obj.Name != "", "obj with no name")
-			// Identifiers (for receiver type parameters) are written to the scope,
-			// but never set as the resolved object. See issue #50956.
-			if _, ok := obj.Decl.(*ast.Ident); !ok {
-				ident.Obj = obj
-			}
-			return
-		}
-	}
-	// all local scopes are known, so any unresolved identifier
-	// must be found either in the file scope, package scope
-	// (perhaps in another file), or universe scope --- collect
-	// them so that they can be resolved later
-	if collectUnresolved {
-		ident.Obj = unresolved
-		r.unresolved = append(r.unresolved, ident)
-	}
-}
-
-func (r *resolver) walkExprs(list []ast.Expr) {
-	for _, node := range list {
-		ast.Walk(r, node)
-	}
-}
-
-func (r *resolver) walkLHS(list []ast.Expr) {
-	for _, expr := range list {
-		expr := unparen(expr)
-		if _, ok := expr.(*ast.Ident); !ok && expr != nil {
-			ast.Walk(r, expr)
-		}
-	}
-}
-
-func (r *resolver) walkStmts(list []ast.Stmt) {
-	for _, stmt := range list {
-		ast.Walk(r, stmt)
-	}
-}
-
-func (r *resolver) Visit(node ast.Node) ast.Visitor {
-	if debugResolve && node != nil {
-		r.trace("node %T@%v", node, node.Pos())
-	}
-
-	switch n := node.(type) {
-
-	// Expressions.
-	case *ast.Ident:
-		r.resolve(n, true)
-
-	case *ast.FuncLit:
-		r.openScope(n.Pos())
-		defer r.closeScope()
-		r.walkFuncType(n.Type)
-		r.walkBody(n.Body)
-
-	case *ast.SelectorExpr:
-		ast.Walk(r, n.X)
-		// Note: don't try to resolve n.Sel, as we don't support qualified
-		// resolution.
-
-	case *ast.StructType:
-		r.openScope(n.Pos())
-		defer r.closeScope()
-		r.walkFieldList(n.Fields, ast.Var)
-
-	case *ast.FuncType:
-		r.openScope(n.Pos())
-		defer r.closeScope()
-		r.walkFuncType(n)
-
-	case *ast.CompositeLit:
-		if n.Type != nil {
-			ast.Walk(r, n.Type)
-		}
-		for _, e := range n.Elts {
-			if kv, _ := e.(*ast.KeyValueExpr); kv != nil {
-				// See issue #45160: try to resolve composite lit keys, but don't
-				// collect them as unresolved if resolution failed. This replicates
-				// existing behavior when resolving during parsing.
-				if ident, _ := kv.Key.(*ast.Ident); ident != nil {
-					r.resolve(ident, false)
-				} else {
-					ast.Walk(r, kv.Key)
-				}
-				ast.Walk(r, kv.Value)
-			} else {
-				ast.Walk(r, e)
-			}
-		}
-
-	case *ast.InterfaceType:
-		r.openScope(n.Pos())
-		defer r.closeScope()
-		r.walkFieldList(n.Methods, ast.Fun)
-
-	// Statements
-	case *ast.LabeledStmt:
-		r.declare(n, nil, r.labelScope, ast.Lbl, n.Label)
-		ast.Walk(r, n.Stmt)
-
-	case *ast.AssignStmt:
-		r.walkExprs(n.Rhs)
-		if n.Tok == token.DEFINE {
-			r.shortVarDecl(n)
-		} else {
-			r.walkExprs(n.Lhs)
-		}
-
-	case *ast.BranchStmt:
-		// add to list of unresolved targets
-		if n.Tok != token.FALLTHROUGH && n.Label != nil {
-			depth := len(r.targetStack) - 1
-			r.targetStack[depth] = append(r.targetStack[depth], n.Label)
-		}
-
-	case *ast.BlockStmt:
-		r.openScope(n.Pos())
-		defer r.closeScope()
-		r.walkStmts(n.List)
-
-	case *ast.IfStmt:
-		r.openScope(n.Pos())
-		defer r.closeScope()
-		if n.Init != nil {
-			ast.Walk(r, n.Init)
-		}
-		ast.Walk(r, n.Cond)
-		ast.Walk(r, n.Body)
-		if n.Else != nil {
-			ast.Walk(r, n.Else)
-		}
-
-	case *ast.CaseClause:
-		r.walkExprs(n.List)
-		r.openScope(n.Pos())
-		defer r.closeScope()
-		r.walkStmts(n.Body)
-
-	case *ast.SwitchStmt:
-		r.openScope(n.Pos())
-		defer r.closeScope()
-		if n.Init != nil {
-			ast.Walk(r, n.Init)
-		}
-		if n.Tag != nil {
-			// The scope below reproduces some unnecessary behavior of the parser,
-			// opening an extra scope in case this is a type switch. It's not needed
-			// for expression switches.
-			// TODO: remove this once we've matched the parser resolution exactly.
-			if n.Init != nil {
-				r.openScope(n.Tag.Pos())
-				defer r.closeScope()
-			}
-			ast.Walk(r, n.Tag)
-		}
-		if n.Body != nil {
-			r.walkStmts(n.Body.List)
-		}
-
-	case *ast.TypeSwitchStmt:
-		if n.Init != nil {
-			r.openScope(n.Pos())
-			defer r.closeScope()
-			ast.Walk(r, n.Init)
-		}
-		r.openScope(n.Assign.Pos())
-		defer r.closeScope()
-		ast.Walk(r, n.Assign)
-		// s.Body consists only of case clauses, so does not get its own
-		// scope.
-		if n.Body != nil {
-			r.walkStmts(n.Body.List)
-		}
-
-	case *ast.CommClause:
-		r.openScope(n.Pos())
-		defer r.closeScope()
-		if n.Comm != nil {
-			ast.Walk(r, n.Comm)
-		}
-		r.walkStmts(n.Body)
-
-	case *ast.SelectStmt:
-		// as for switch statements, select statement bodies don't get their own
-		// scope.
-		if n.Body != nil {
-			r.walkStmts(n.Body.List)
-		}
-
-	case *ast.ForStmt:
-		r.openScope(n.Pos())
-		defer r.closeScope()
-		if n.Init != nil {
-			ast.Walk(r, n.Init)
-		}
-		if n.Cond != nil {
-			ast.Walk(r, n.Cond)
-		}
-		if n.Post != nil {
-			ast.Walk(r, n.Post)
-		}
-		ast.Walk(r, n.Body)
-
-	case *ast.RangeStmt:
-		r.openScope(n.Pos())
-		defer r.closeScope()
-		ast.Walk(r, n.X)
-		var lhs []ast.Expr
-		if n.Key != nil {
-			lhs = append(lhs, n.Key)
-		}
-		if n.Value != nil {
-			lhs = append(lhs, n.Value)
-		}
-		if len(lhs) > 0 {
-			if n.Tok == token.DEFINE {
-				// Note: we can't exactly match the behavior of object resolution
-				// during the parsing pass here, as it uses the position of the RANGE
-				// token for the RHS OpPos. That information is not contained within
-				// the AST.
-				as := &ast.AssignStmt{
-					Lhs:    lhs,
-					Tok:    token.DEFINE,
-					TokPos: n.TokPos,
-					Rhs:    []ast.Expr{&ast.UnaryExpr{Op: token.RANGE, X: n.X}},
-				}
-				// TODO(rFindley): this walkLHS reproduced the parser resolution, but
-				// is it necessary? By comparison, for a normal AssignStmt we don't
-				// walk the LHS in case there is an invalid identifier list.
-				r.walkLHS(lhs)
-				r.shortVarDecl(as)
-			} else {
-				r.walkExprs(lhs)
-			}
-		}
-		ast.Walk(r, n.Body)
-
-	// Declarations
-	case *ast.GenDecl:
-		switch n.Tok {
-		case token.CONST, token.VAR:
-			for i, spec := range n.Specs {
-				spec := spec.(*ast.ValueSpec)
-				kind := ast.Con
-				if n.Tok == token.VAR {
-					kind = ast.Var
-				}
-				r.walkExprs(spec.Values)
-				if spec.Type != nil {
-					ast.Walk(r, spec.Type)
-				}
-				r.declare(spec, i, r.topScope, kind, spec.Names...)
-			}
-		case token.TYPE:
-			for _, spec := range n.Specs {
-				spec := spec.(*ast.TypeSpec)
-				// Go spec: The scope of a type identifier declared inside a function begins
-				// at the identifier in the TypeSpec and ends at the end of the innermost
-				// containing block.
-				r.declare(spec, nil, r.topScope, ast.Typ, spec.Name)
-				if spec.TypeParams != nil {
-					r.openScope(spec.Pos())
-					defer r.closeScope()
-					r.walkTParams(spec.TypeParams)
-				}
-				ast.Walk(r, spec.Type)
-			}
-		}
-
-	case *ast.FuncDecl:
-		// Open the function scope.
-		r.openScope(n.Pos())
-		defer r.closeScope()
-
-		r.walkRecv(n.Recv)
-
-		// Type parameters are walked normally: they can reference each other, and
-		// can be referenced by normal parameters.
-		if n.Type.TypeParams != nil {
-			r.walkTParams(n.Type.TypeParams)
-			// TODO(rFindley): need to address receiver type parameters.
-		}
-
-		// Resolve and declare parameters in a specific order to get duplicate
-		// declaration errors in the correct location.
-		r.resolveList(n.Type.Params)
-		r.resolveList(n.Type.Results)
-		r.declareList(n.Recv, ast.Var)
-		r.declareList(n.Type.Params, ast.Var)
-		r.declareList(n.Type.Results, ast.Var)
-
-		r.walkBody(n.Body)
-		if n.Recv == nil && n.Name.Name != "init" {
-			r.declare(n, nil, r.pkgScope, ast.Fun, n.Name)
-		}
-
-	default:
-		return r
-	}
-
-	return nil
-}
-
-func (r *resolver) walkFuncType(typ *ast.FuncType) {
-	// typ.TypeParams must be walked separately for FuncDecls.
-	r.resolveList(typ.Params)
-	r.resolveList(typ.Results)
-	r.declareList(typ.Params, ast.Var)
-	r.declareList(typ.Results, ast.Var)
-}
-
-func (r *resolver) resolveList(list *ast.FieldList) {
-	if list == nil {
-		return
-	}
-	for _, f := range list.List {
-		if f.Type != nil {
-			ast.Walk(r, f.Type)
-		}
-	}
-}
-
-func (r *resolver) declareList(list *ast.FieldList, kind ast.ObjKind) {
-	if list == nil {
-		return
-	}
-	for _, f := range list.List {
-		r.declare(f, nil, r.topScope, kind, f.Names...)
-	}
-}
-
-func (r *resolver) walkRecv(recv *ast.FieldList) {
-	// If our receiver has receiver type parameters, we must declare them before
-	// trying to resolve the rest of the receiver, and avoid re-resolving the
-	// type parameter identifiers.
-	if recv == nil || len(recv.List) == 0 {
-		return // nothing to do
-	}
-	typ := recv.List[0].Type
-	if ptr, ok := typ.(*ast.StarExpr); ok {
-		typ = ptr.X
-	}
-
-	var declareExprs []ast.Expr // exprs to declare
-	var resolveExprs []ast.Expr // exprs to resolve
-	switch typ := typ.(type) {
-	case *ast.IndexExpr:
-		declareExprs = []ast.Expr{typ.Index}
-		resolveExprs = append(resolveExprs, typ.X)
-	case *ast.IndexListExpr:
-		declareExprs = typ.Indices
-		resolveExprs = append(resolveExprs, typ.X)
-	default:
-		resolveExprs = append(resolveExprs, typ)
-	}
-	for _, expr := range declareExprs {
-		if id, _ := expr.(*ast.Ident); id != nil {
-			r.declare(expr, nil, r.topScope, ast.Typ, id)
-		} else {
-			// The receiver type parameter expression is invalid, but try to resolve
-			// it anyway for consistency.
-			resolveExprs = append(resolveExprs, expr)
-		}
-	}
-	for _, expr := range resolveExprs {
-		if expr != nil {
-			ast.Walk(r, expr)
-		}
-	}
-	// The receiver is invalid, but try to resolve it anyway for consistency.
-	for _, f := range recv.List[1:] {
-		if f.Type != nil {
-			ast.Walk(r, f.Type)
-		}
-	}
-}
-
-func (r *resolver) walkFieldList(list *ast.FieldList, kind ast.ObjKind) {
-	if list == nil {
-		return
-	}
-	r.resolveList(list)
-	r.declareList(list, kind)
-}
-
-// walkTParams is like walkFieldList, but declares type parameters eagerly so
-// that they may be resolved in the constraint expressions held in the field
-// Type.
-func (r *resolver) walkTParams(list *ast.FieldList) {
-	r.declareList(list, ast.Typ)
-	r.resolveList(list)
-}
-
-func (r *resolver) walkBody(body *ast.BlockStmt) {
-	if body == nil {
-		return
-	}
-	r.openLabelScope()
-	defer r.closeLabelScope()
-	r.walkStmts(body.List)
-}
diff --git a/internal/backport/go/parser/resolver_test.go b/internal/backport/go/parser/resolver_test.go
deleted file mode 100644
index 8dd9172..0000000
--- a/internal/backport/go/parser/resolver_test.go
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package parser
-
-import (
-	"fmt"
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/internal/typeparams"
-	"golang.org/x/website/internal/backport/go/scanner"
-	"golang.org/x/website/internal/backport/go/token"
-	"os"
-	"path/filepath"
-	"strings"
-	"testing"
-)
-
-// TestResolution checks that identifiers are resolved to the declarations
-// annotated in the source, by comparing the positions of the resulting
-// Ident.Obj.Decl to positions marked in the source via special comments.
-//
-// In the test source, any comment prefixed with '=' or '@' (or both) marks the
-// previous token position as the declaration ('=') or a use ('@') of an
-// identifier. The text following '=' and '@' in the comment string is the
-// label to use for the location.  Declaration labels must be unique within the
-// file, and use labels must refer to an existing declaration label. It's OK
-// for a comment to denote both the declaration and use of a label (e.g.
-// '=@foo'). Leading and trailing whitespace is ignored. Any comment not
-// beginning with '=' or '@' is ignored.
-func TestResolution(t *testing.T) {
-	dir := filepath.Join("testdata", "resolution")
-	fis, err := os.ReadDir(dir)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	for _, fi := range fis {
-		t.Run(fi.Name(), func(t *testing.T) {
-			fset := token.NewFileSet()
-			path := filepath.Join(dir, fi.Name())
-			src := readFile(path) // panics on failure
-			var mode Mode
-			if !strings.HasSuffix(path, ".go2") {
-				mode |= typeparams.DisallowParsing
-			}
-			file, err := ParseFile(fset, path, src, mode)
-			if err != nil {
-				t.Fatal(err)
-			}
-
-			// Compare the positions of objects resolved during parsing (fromParser)
-			// to those annotated in source comments (fromComments).
-
-			handle := fset.File(file.Package)
-			fromParser := declsFromParser(file)
-			fromComments := declsFromComments(handle, src)
-
-			pos := func(pos token.Pos) token.Position {
-				p := handle.Position(pos)
-				// The file name is implied by the subtest, so remove it to avoid
-				// clutter in error messages.
-				p.Filename = ""
-				return p
-			}
-			for k, want := range fromComments {
-				if got := fromParser[k]; got != want {
-					t.Errorf("%s resolved to %s, want %s", pos(k), pos(got), pos(want))
-				}
-				delete(fromParser, k)
-			}
-			// What remains in fromParser are unexpected resolutions.
-			for k, got := range fromParser {
-				t.Errorf("%s resolved to %s, want no object", pos(k), pos(got))
-			}
-		})
-	}
-}
-
-// declsFromParser walks the file and collects the map associating an
-// identifier position with its declaration position.
-func declsFromParser(file *ast.File) map[token.Pos]token.Pos {
-	objmap := map[token.Pos]token.Pos{}
-	ast.Inspect(file, func(node ast.Node) bool {
-		// Ignore blank identifiers to reduce noise.
-		if ident, _ := node.(*ast.Ident); ident != nil && ident.Obj != nil && ident.Name != "_" {
-			objmap[ident.Pos()] = ident.Obj.Pos()
-		}
-		return true
-	})
-	return objmap
-}
-
-// declsFromComments looks at comments annotating uses and declarations, and
-// maps each identifier use to its corresponding declaration. See the
-// description of these annotations in the documentation for TestResolution.
-func declsFromComments(handle *token.File, src []byte) map[token.Pos]token.Pos {
-	decls, uses := positionMarkers(handle, src)
-
-	objmap := make(map[token.Pos]token.Pos)
-	// Join decls and uses on name, to build the map of use->decl.
-	for name, posns := range uses {
-		declpos, ok := decls[name]
-		if !ok {
-			panic(fmt.Sprintf("missing declaration for %s", name))
-		}
-		for _, pos := range posns {
-			objmap[pos] = declpos
-		}
-	}
-	return objmap
-}
-
-// positionMarkers extracts named positions from the source denoted by comments
-// prefixed with '=' (declarations) and '@' (uses): for example '@foo' or
-// '=@bar'. It returns a map of name->position for declarations, and
-// name->position(s) for uses.
-func positionMarkers(handle *token.File, src []byte) (decls map[string]token.Pos, uses map[string][]token.Pos) {
-	var s scanner.Scanner
-	s.Init(handle, src, nil, scanner.ScanComments)
-	decls = make(map[string]token.Pos)
-	uses = make(map[string][]token.Pos)
-	var prev token.Pos // position of last non-comment, non-semicolon token
-
-scanFile:
-	for {
-		pos, tok, lit := s.Scan()
-		switch tok {
-		case token.EOF:
-			break scanFile
-		case token.COMMENT:
-			name, decl, use := annotatedObj(lit)
-			if len(name) > 0 {
-				if decl {
-					if _, ok := decls[name]; ok {
-						panic(fmt.Sprintf("duplicate declaration markers for %s", name))
-					}
-					decls[name] = prev
-				}
-				if use {
-					uses[name] = append(uses[name], prev)
-				}
-			}
-		case token.SEMICOLON:
-			// ignore automatically inserted semicolon
-			if lit == "\n" {
-				continue scanFile
-			}
-			fallthrough
-		default:
-			prev = pos
-		}
-	}
-	return decls, uses
-}
-
-func annotatedObj(lit string) (name string, decl, use bool) {
-	if lit[1] == '*' {
-		lit = lit[:len(lit)-2] // strip trailing */
-	}
-	lit = strings.TrimSpace(lit[2:])
-
-scanLit:
-	for idx, r := range lit {
-		switch r {
-		case '=':
-			decl = true
-		case '@':
-			use = true
-		default:
-			name = lit[idx:]
-			break scanLit
-		}
-	}
-	return
-}
diff --git a/internal/backport/go/parser/short_test.go b/internal/backport/go/parser/short_test.go
deleted file mode 100644
index ada2caa..0000000
--- a/internal/backport/go/parser/short_test.go
+++ /dev/null
@@ -1,284 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file contains test cases for short valid and invalid programs.
-
-package parser
-
-import (
-	"golang.org/x/website/internal/backport/go/internal/typeparams"
-	"testing"
-)
-
-var valids = []string{
-	"package p\n",
-	`package p;`,
-	`package p; import "fmt"; func f() { fmt.Println("Hello, World!") };`,
-	`package p; func f() { if f(T{}) {} };`,
-	`package p; func f() { _ = <-chan int(nil) };`,
-	`package p; func f() { _ = (<-chan int)(nil) };`,
-	`package p; func f() { _ = (<-chan <-chan int)(nil) };`,
-	`package p; func f() { _ = <-chan <-chan <-chan <-chan <-int(nil) };`,
-	`package p; func f(func() func() func());`,
-	`package p; func f(...T);`,
-	`package p; func f(float, ...int);`,
-	`package p; func f(x int, a ...int) { f(0, a...); f(1, a...,) };`,
-	`package p; func f(int,) {};`,
-	`package p; func f(...int,) {};`,
-	`package p; func f(x ...int,) {};`,
-	`package p; type T []int; var a []bool; func f() { if a[T{42}[0]] {} };`,
-	`package p; type T []int; func g(int) bool { return true }; func f() { if g(T{42}[0]) {} };`,
-	`package p; type T []int; func f() { for _ = range []int{T{42}[0]} {} };`,
-	`package p; var a = T{{1, 2}, {3, 4}}`,
-	`package p; func f() { select { case <- c: case c <- d: case c <- <- d: case <-c <- d: } };`,
-	`package p; func f() { select { case x := (<-c): } };`,
-	`package p; func f() { if ; true {} };`,
-	`package p; func f() { switch ; {} };`,
-	`package p; func f() { for _ = range "foo" + "bar" {} };`,
-	`package p; func f() { var s []int; g(s[:], s[i:], s[:j], s[i:j], s[i:j:k], s[:j:k]) };`,
-	`package p; var ( _ = (struct {*T}).m; _ = (interface {T}).m )`,
-	`package p; func ((T),) m() {}`,
-	`package p; func ((*T),) m() {}`,
-	`package p; func (*(T),) m() {}`,
-	`package p; func _(x []int) { for range x {} }`,
-	`package p; func _() { if [T{}.n]int{} {} }`,
-	`package p; func _() { map[int]int{}[0]++; map[int]int{}[0] += 1 }`,
-	`package p; func _(x interface{f()}) { interface{f()}(x).f() }`,
-	`package p; func _(x chan int) { chan int(x) <- 0 }`,
-	`package p; const (x = 0; y; z)`, // issue 9639
-	`package p; var _ = map[P]int{P{}:0, {}:1}`,
-	`package p; var _ = map[*P]int{&P{}:0, {}:1}`,
-	`package p; type T = int`,
-	`package p; type (T = p.T; _ = struct{}; x = *T)`,
-	`package p; type T (*int)`,
-	`package p; type _ struct{ ((int)) }`,
-	`package p; type _ struct{ (*(int)) }`,
-	`package p; type _ struct{ ([]byte) }`, // disallowed by type-checker
-	`package p; var _ = func()T(nil)`,
-	`package p; func _(T (P))`,
-	`package p; func _(T []E)`,
-	`package p; func _(T [P]E)`,
-	`package p; type _ [A+B]struct{}`,
-	`package p; func (R) _()`,
-	`package p; type _ struct{ f [n]E }`,
-	`package p; type _ struct{ f [a+b+c+d]E }`,
-	`package p; type I1 interface{}; type I2 interface{ I1 }`,
-}
-
-// validWithTParamsOnly holds source code examples that are valid if
-// parseTypeParams is set, but invalid if not. When checking with the
-// parseTypeParams set, errors are ignored.
-var validWithTParamsOnly = []string{
-	`package p; type _ []T[ /* ERROR "expected ';', found '\['" */ int]`,
-	`package p; type T[P any /* ERROR "expected ']', found any" */ ] struct { P }`,
-	`package p; type T[P comparable /* ERROR "expected ']', found comparable" */ ] struct { P }`,
-	`package p; type T[P comparable /* ERROR "expected ']', found comparable" */ [P]] struct { P }`,
-	`package p; type T[P1, /* ERROR "unexpected comma" */ P2 any] struct { P1; f []P2 }`,
-	`package p; func _[ /* ERROR "expected '\(', found '\['" */ T any]()()`,
-	`package p; func _(T (P))`,
-	`package p; func f[ /* ERROR "expected '\(', found '\['" */ A, B any](); func _() { _ = f[int, int] }`,
-	`package p; func _(x /* ERROR "mixed named and unnamed parameters" */ T[P1, P2, P3])`,
-	`package p; func _(x /* ERROR "mixed named and unnamed parameters" */ p.T[Q])`,
-	`package p; func _(p.T[ /* ERROR "missing ',' in parameter list" */ Q])`,
-	`package p; type _[A interface /* ERROR "expected ']', found 'interface'" */ {},] struct{}`,
-	`package p; type _[A interface /* ERROR "expected ']', found 'interface'" */ {}] struct{}`,
-	`package p; type _[A, /* ERROR "unexpected comma" */  B any,] struct{}`,
-	`package p; type _[A, /* ERROR "unexpected comma" */ B any] struct{}`,
-	`package p; type _[A any /* ERROR "expected ']', found any" */,] struct{}`,
-	`package p; type _[A any /* ERROR "expected ']', found any" */ ]struct{}`,
-	`package p; type _[A any /* ERROR "expected ']', found any" */ ] struct{ A }`,
-	`package p; func _[ /* ERROR "expected '\(', found '\['" */ T any]()`,
-	`package p; func _[ /* ERROR "expected '\(', found '\['" */ T any](x T)`,
-	`package p; func _[ /* ERROR "expected '\(', found '\['" */ T1, T2 any](x T)`,
-	`package p; func _[ /* ERROR "expected '\(', found '\['" */ A, B any](a A) B`,
-	`package p; func _[ /* ERROR "expected '\(', found '\['" */ A, B C](a A) B`,
-	`package p; func _[ /* ERROR "expected '\(', found '\['" */ A, B C[A, B]](a A) B`,
-
-	`package p; type _[A, /* ERROR "unexpected comma" */ B any] interface { _(a A) B }`,
-	`package p; type _[A, /* ERROR "unexpected comma" */ B C[A, B]] interface { _(a A) B }`,
-	`package p; func _[ /* ERROR "expected '\(', found '\['" */ T1, T2 interface{}](x T1) T2`,
-	`package p; func _[ /* ERROR "expected '\(', found '\['" */ T1 interface{ m() }, T2, T3 interface{}](x T1, y T3) T2`,
-	`package p; var _ = [ /* ERROR "expected expression" */ ]T[int]{}`,
-	`package p; var _ = [ /* ERROR "expected expression" */ 10]T[int]{}`,
-	`package p; var _ = func /* ERROR "expected expression" */ ()T[int]{}`,
-	`package p; var _ = map /* ERROR "expected expression" */ [T[int]]T[int]{}`,
-	`package p; var _ = chan /* ERROR "expected expression" */ T[int](x)`,
-	`package p; func _(_ T[ /* ERROR "missing ',' in parameter list" */ P], T P) T[P]`,
-	`package p; var _ T[ /* ERROR "expected ';', found '\['" */ chan int]`,
-
-	// TODO(rfindley) this error message could be improved.
-	`package p; func (_ /* ERROR "mixed named and unnamed parameters" */ R[P]) _(x T)`,
-	`package p; func (_ /* ERROR "mixed named and unnamed parameters" */ R[ P, Q]) _(x T)`,
-
-	`package p; func (R[P] /* ERROR "missing element type" */ ) _()`,
-	`package p; func _(T[P] /* ERROR "missing element type" */ )`,
-	`package p; func _(T[P1, /* ERROR "expected ']', found ','" */ P2, P3 ])`,
-	`package p; func _(T[P] /* ERROR "missing element type" */ ) T[P]`,
-	`package p; type _ struct{ T[P] /* ERROR "missing element type" */ }`,
-	`package p; type _ struct{ T[struct /* ERROR "expected expression" */ {a, b, c int}] }`,
-	`package p; type _ interface{int| /* ERROR "expected ';'" */ float32; bool; m(); string;}`,
-	`package p; type I1[T any /* ERROR "expected ']', found any" */ ] interface{}; type I2 interface{ I1[int] }`,
-	`package p; type I1[T any /* ERROR "expected ']', found any" */ ] interface{}; type I2[T any] interface{ I1[T] }`,
-	`package p; type _ interface { N[ /* ERROR "expected ';', found '\['" */ T] }`,
-	`package p; type T[P any /* ERROR "expected ']'" */ ] = T0`,
-}
-
-func TestValid(t *testing.T) {
-	t.Run("no tparams", func(t *testing.T) {
-		for _, src := range valids {
-			checkErrors(t, src, src, DeclarationErrors|AllErrors, false)
-		}
-	})
-	t.Run("tparams", func(t *testing.T) {
-		for _, src := range valids {
-			checkErrors(t, src, src, DeclarationErrors|AllErrors, false)
-		}
-		for _, src := range validWithTParamsOnly {
-			checkErrors(t, src, src, DeclarationErrors|AllErrors, false)
-		}
-	})
-}
-
-// TestSingle is useful to track down a problem with a single short test program.
-func TestSingle(t *testing.T) {
-	const src = `package p; var _ = T{}`
-	checkErrors(t, src, src, DeclarationErrors|AllErrors, true)
-}
-
-var invalids = []string{
-	`foo /* ERROR "expected 'package'" */ !`,
-	`package p; func f() { if { /* ERROR "missing condition" */ } };`,
-	`package p; func f() { if ; /* ERROR "missing condition" */ {} };`,
-	`package p; func f() { if f(); /* ERROR "missing condition" */ {} };`,
-	`package p; func f() { if _ = range /* ERROR "expected operand" */ x; true {} };`,
-	`package p; func f() { switch _ /* ERROR "expected switch expression" */ = range x; true {} };`,
-	`package p; func f() { for _ = range x ; /* ERROR "expected '{'" */ ; {} };`,
-	`package p; func f() { for ; ; _ = range /* ERROR "expected operand" */ x {} };`,
-	`package p; func f() { for ; _ /* ERROR "expected boolean or range expression" */ = range x ; {} };`,
-	`package p; func f() { switch t = /* ERROR "expected ':=', found '='" */ t.(type) {} };`,
-	`package p; func f() { switch t /* ERROR "expected switch expression" */ , t = t.(type) {} };`,
-	`package p; func f() { switch t /* ERROR "expected switch expression" */ = t.(type), t {} };`,
-	`package p; var a = [ /* ERROR "expected expression" */ 1]int;`,
-	`package p; var a = [ /* ERROR "expected expression" */ ...]int;`,
-	`package p; var a = struct /* ERROR "expected expression" */ {}`,
-	`package p; var a = func /* ERROR "expected expression" */ ();`,
-	`package p; var a = interface /* ERROR "expected expression" */ {}`,
-	`package p; var a = [ /* ERROR "expected expression" */ ]int`,
-	`package p; var a = map /* ERROR "expected expression" */ [int]int`,
-	`package p; var a = chan /* ERROR "expected expression" */ int;`,
-	`package p; var a = []int{[ /* ERROR "expected expression" */ ]int};`,
-	`package p; var a = ( /* ERROR "expected expression" */ []int);`,
-	`package p; var a = <- /* ERROR "expected expression" */ chan int;`,
-	`package p; func f() { select { case _ <- chan /* ERROR "expected expression" */ int: } };`,
-	`package p; func f() { _ = (<-<- /* ERROR "expected 'chan'" */ chan int)(nil) };`,
-	`package p; func f() { _ = (<-chan<-chan<-chan<-chan<-chan<- /* ERROR "expected channel type" */ int)(nil) };`,
-	`package p; func f() { var t []int; t /* ERROR "expected identifier on left side of :=" */ [0] := 0 };`,
-	`package p; func f() { if x := g(); x /* ERROR "expected boolean expression" */ = 0 {}};`,
-	`package p; func f() { _ = x = /* ERROR "expected '=='" */ 0 {}};`,
-	`package p; func f() { _ = 1 == func()int { var x bool; x = x = /* ERROR "expected '=='" */ true; return x }() };`,
-	`package p; func f() { var s []int; _ = s[] /* ERROR "expected operand" */ };`,
-	`package p; func f() { var s []int; _ = s[i:j: /* ERROR "3rd index required" */ ] };`,
-	`package p; func f() { var s []int; _ = s[i: /* ERROR "2nd index required" */ :k] };`,
-	`package p; func f() { var s []int; _ = s[i: /* ERROR "2nd index required" */ :] };`,
-	`package p; func f() { var s []int; _ = s[: /* ERROR "2nd index required" */ :] };`,
-	`package p; func f() { var s []int; _ = s[: /* ERROR "2nd index required" */ ::] };`,
-	`package p; func f() { var s []int; _ = s[i:j:k: /* ERROR "expected ']'" */ l] };`,
-	`package p; func f() { for x /* ERROR "boolean or range expression" */ = []string {} }`,
-	`package p; func f() { for x /* ERROR "boolean or range expression" */ := []string {} }`,
-	`package p; func f() { for i /* ERROR "boolean or range expression" */ , x = []string {} }`,
-	`package p; func f() { for i /* ERROR "boolean or range expression" */ , x := []string {} }`,
-	`package p; func f() { go f /* ERROR HERE "function must be invoked" */ }`,
-	`package p; func f() { defer func() {} /* ERROR HERE "function must be invoked" */ }`,
-	`package p; func f() { go func() { func() { f(x func /* ERROR "missing ','" */ (){}) } } }`,
-	`package p; func _() (type /* ERROR "found 'type'" */ T)(T)`,
-	`package p; func (type /* ERROR "found 'type'" */ T)(T) _()`,
-	`package p; type _[A+B, /* ERROR "unexpected comma" */ ] int`,
-
-	// TODO(rfindley): this error should be positioned on the ':'
-	`package p; var a = a[[]int:[ /* ERROR "expected expression" */ ]int];`,
-
-	// TODO(rfindley): the compiler error is better here: "cannot parenthesize embedded type"
-	// TODO(rfindley): confirm that parenthesized types should now be accepted.
-	// `package p; type I1 interface{}; type I2 interface{ (/* ERROR "expected '}', found '\('" */ I1) }`,
-
-	// issue 8656
-	`package p; func f() (a b string /* ERROR "missing ','" */ , ok bool)`,
-
-	// issue 9639
-	`package p; var x /* ERROR "missing variable type or initialization" */ , y, z;`,
-	`package p; const x /* ERROR "missing constant value" */ ;`,
-	`package p; const x /* ERROR "missing constant value" */ int;`,
-	`package p; const (x = 0; y; z /* ERROR "missing constant value" */ int);`,
-
-	// issue 12437
-	`package p; var _ = struct { x int, /* ERROR "expected ';', found ','" */ }{};`,
-	`package p; var _ = struct { x int, /* ERROR "expected ';', found ','" */ y float }{};`,
-
-	// issue 11611
-	`package p; type _ struct { int, } /* ERROR "expected 'IDENT', found '}'" */ ;`,
-	`package p; type _ struct { int, float } /* ERROR "expected type, found '}'" */ ;`,
-
-	// issue 13475
-	`package p; func f() { if true {} else ; /* ERROR "expected if statement or block" */ }`,
-	`package p; func f() { if true {} else defer /* ERROR "expected if statement or block" */ f() }`,
-}
-
-// invalidNoTParamErrs holds invalid source code examples annotated with the
-// error messages produced when ParseTypeParams is not set.
-var invalidNoTParamErrs = []string{
-	`package p; type _[_ any /* ERROR "expected ']', found any" */ ] int; var _ = T[]{}`,
-	`package p; type T[P any /* ERROR "expected ']', found any" */ ] = T0`,
-	`package p; var _ func[ /* ERROR "expected '\(', found '\['" */ T any](T)`,
-	`package p; func _[ /* ERROR "expected '\(', found '\['" */ ]()`,
-	`package p; type _[A, /* ERROR "unexpected comma" */] struct{ A }`,
-	`package p; func _[ /* ERROR "expected '\(', found '\['" */ type P, *Q interface{}]()`,
-
-	`package p; func (T) _[ /* ERROR "expected '\(', found '\['" */ A, B any](a A) B`,
-	`package p; func (T) _[ /* ERROR "expected '\(', found '\['" */ A, B C](a A) B`,
-	`package p; func (T) _[ /* ERROR "expected '\(', found '\['" */ A, B C[A, B]](a A) B`,
-
-	`package p; func(*T[ /* ERROR "missing ',' in parameter list" */ e, e]) _()`,
-}
-
-// invalidTParamErrs holds invalid source code examples annotated with the
-// error messages produced when ParseTypeParams is set.
-var invalidTParamErrs = []string{
-	`package p; type _[_ any] int; var _ = T[] /* ERROR "expected operand" */ {}`,
-	`package p; var _ func[ /* ERROR "must have no type parameters" */ T any](T)`,
-	`package p; func _[]/* ERROR "empty type parameter list" */()`,
-
-	// TODO(rfindley) a better location would be after the ']'
-	`package p; type _[A /* ERROR "all type parameters must be named" */ ,] struct{ A }`,
-
-	// TODO(rfindley) this error is confusing.
-	`package p; func _[type /* ERROR "all type parameters must be named" */ P, *Q interface{}]()`,
-
-	`package p; func (T) _[ /* ERROR "must have no type parameters" */ A, B any](a A) B`,
-	`package p; func (T) _[ /* ERROR "must have no type parameters" */ A, B C](a A) B`,
-	`package p; func (T) _[ /* ERROR "must have no type parameters" */ A, B C[A, B]](a A) B`,
-
-	`package p; func(*T[e, e /* ERROR "e redeclared" */ ]) _()`,
-}
-
-func TestInvalid(t *testing.T) {
-	t.Run("no tparams", func(t *testing.T) {
-		for _, src := range invalids {
-			checkErrors(t, src, src, DeclarationErrors|AllErrors|typeparams.DisallowParsing, true)
-		}
-		for _, src := range validWithTParamsOnly {
-			checkErrors(t, src, src, DeclarationErrors|AllErrors|typeparams.DisallowParsing, true)
-		}
-		for _, src := range invalidNoTParamErrs {
-			checkErrors(t, src, src, DeclarationErrors|AllErrors|typeparams.DisallowParsing, true)
-		}
-	})
-	t.Run("tparams", func(t *testing.T) {
-		for _, src := range invalids {
-			checkErrors(t, src, src, DeclarationErrors|AllErrors, true)
-		}
-		for _, src := range invalidTParamErrs {
-			checkErrors(t, src, src, DeclarationErrors|AllErrors, true)
-		}
-	})
-}
diff --git a/internal/backport/go/parser/testdata/chans.go2 b/internal/backport/go/parser/testdata/chans.go2
deleted file mode 100644
index fad2bce..0000000
--- a/internal/backport/go/parser/testdata/chans.go2
+++ /dev/null
@@ -1,62 +0,0 @@
-package chans
-
-import "runtime"
-
-// Ranger returns a Sender and a Receiver. The Receiver provides a
-// Next method to retrieve values. The Sender provides a Send method
-// to send values and a Close method to stop sending values. The Next
-// method indicates when the Sender has been closed, and the Send
-// method indicates when the Receiver has been freed.
-//
-// This is a convenient way to exit a goroutine sending values when
-// the receiver stops reading them.
-func Ranger[T any]() (*Sender[T], *Receiver[T]) {
-	c := make(chan T)
-	d := make(chan bool)
-	s := &Sender[T]{values: c, done: d}
-	r := &Receiver[T]{values: c, done: d}
-	runtime.SetFinalizer(r, r.finalize)
-	return s, r
-}
-
-// A sender is used to send values to a Receiver.
-type Sender[T any] struct {
-	values chan<- T
-	done <-chan bool
-}
-
-// Send sends a value to the receiver. It returns whether any more
-// values may be sent; if it returns false the value was not sent.
-func (s *Sender[T]) Send(v T) bool {
-	select {
-	case s.values <- v:
-		return true
-	case <-s.done:
-		return false
-	}
-}
-
-// Close tells the receiver that no more values will arrive.
-// After Close is called, the Sender may no longer be used.
-func (s *Sender[T]) Close() {
-	close(s.values)
-}
-
-// A Receiver receives values from a Sender.
-type Receiver[T any] struct {
-	values <-chan T
-	done chan<- bool
-}
-
-// Next returns the next value from the channel. The bool result
-// indicates whether the value is valid, or whether the Sender has
-// been closed and no more values will be received.
-func (r *Receiver[T]) Next() (T, bool) {
-	v, ok := <-r.values
-	return v, ok
-}
-
-// finalize is a finalizer for the receiver.
-func (r *Receiver[T]) finalize() {
-	close(r.done)
-}
diff --git a/internal/backport/go/parser/testdata/commas.src b/internal/backport/go/parser/testdata/commas.src
deleted file mode 100644
index e0603cf..0000000
--- a/internal/backport/go/parser/testdata/commas.src
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Test case for error messages/parser synchronization
-// after missing commas.
-
-package p
-
-var _ = []int{
-	0/* ERROR HERE "missing ','" */
-}
-
-var _ = []int{
-	0,
-	1,
-	2,
-	3/* ERROR HERE "missing ','" */
-}
diff --git a/internal/backport/go/parser/testdata/interface.go2 b/internal/backport/go/parser/testdata/interface.go2
deleted file mode 100644
index 2ed9339..0000000
--- a/internal/backport/go/parser/testdata/interface.go2
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file contains test cases for interfaces containing
-// constraint elements.
-
-package p
-
-type _ interface {
-	m()
-	~int
-	~int|string
-	E
-}
-
-type _ interface {
-	m()
-	~int
-	int | string
-	int | ~string
-	~int | ~string
-}
-
-type _ interface {
-	m()
-	~int
-	T[int, string] | string
-	int | ~T[string, struct{}]
-	~int | ~string
-}
-
-type _ interface {
-	int
-	[]byte
-	[10]int
-	struct{}
-	*int
-	func()
-	interface{}
-	map[string]int
-	chan T
-	chan<- T
-	<-chan T
-	T[int]
-}
-
-type _ interface {
-	int | string
-	[]byte | string
-	[10]int | string
-	struct{} | string
-	*int | string
-	func() | string
-	interface{} | string
-	map[string]int | string
-	chan T | string
-	chan<- T | string
-	<-chan T | string
-	T[int] | string
-}
-
-type _ interface {
-	~int | string
-	~[]byte | string
-	~[10]int | string
-	~struct{} | string
-	~*int | string
-	~func() | string
-	~interface{} | string
-	~map[string]int | string
-	~chan T | string
-	~chan<- T | string
-	~<-chan T | string
-	~T[int] | string
-}
diff --git a/internal/backport/go/parser/testdata/issue11377.src b/internal/backport/go/parser/testdata/issue11377.src
deleted file mode 100644
index 1c43800..0000000
--- a/internal/backport/go/parser/testdata/issue11377.src
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Test case for issue 11377: Better synchronization of
-// parser after certain syntax errors.
-
-package p
-
-func bad1() {
-    if f()) /* ERROR "expected ';', found '\)'" */ {
-        return
-    }
-}
-
-// There shouldn't be any errors down below.
-
-func F1() {}
-func F2() {}
-func F3() {}
-func F4() {}
-func F5() {}
-func F6() {}
-func F7() {}
-func F8() {}
-func F9() {}
-func F10() {}
diff --git a/internal/backport/go/parser/testdata/issue23434.src b/internal/backport/go/parser/testdata/issue23434.src
deleted file mode 100644
index 24a0832..0000000
--- a/internal/backport/go/parser/testdata/issue23434.src
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Test case for issue 23434: Better synchronization of
-// parser after missing type. There should be exactly
-// one error each time, with now follow errors.
-
-package p
-
-func g() {
-	m := make(map[string]! /* ERROR "expected type, found '!'" */ )
-	for {
-		x := 1
-		print(x)
-	}
-}
-
-func f() {
-	m := make(map[string]) /* ERROR "expected type, found '\)'" */
-	for {
-		x := 1
-		print(x)
-	}
-}
diff --git a/internal/backport/go/parser/testdata/issue3106.src b/internal/backport/go/parser/testdata/issue3106.src
deleted file mode 100644
index 2db10be..0000000
--- a/internal/backport/go/parser/testdata/issue3106.src
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Test case for issue 3106: Better synchronization of
-// parser after certain syntax errors.
-
-package main
-
-func f() {
-	var m Mutex
-	c := MakeCond(&m)
-	percent := 0
-	const step = 10
-	for i := 0; i < 5; i++ {
-		go func() {
-			for {
-				// Emulates some useful work.
-				time.Sleep(1e8)
-				m.Lock()
-				defer
-				if /* ERROR "expected ';', found 'if'" */ percent == 100 {
-					m.Unlock()
-					break
-				}
-				percent++
-				if percent % step == 0 {
-					//c.Signal()
-				}
-				m.Unlock()
-			}
-		}()
-	}
-	for {
-		m.Lock()
-		if percent == 0 || percent % step != 0 {
-			c.Wait()
-		}
-		fmt.Print(",")
-		if percent == 100 {
-			m.Unlock()
-			break
-		}
-		m.Unlock()
-	}
-}
diff --git a/internal/backport/go/parser/testdata/issue34946.src b/internal/backport/go/parser/testdata/issue34946.src
deleted file mode 100644
index 6bb15e1..0000000
--- a/internal/backport/go/parser/testdata/issue34946.src
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Test case for issue 34946: Better synchronization of
-// parser for function declarations that start their
-// body's opening { on a new line.
-
-package p
-
-// accept Allman/BSD-style declaration but complain
-// (implicit semicolon between signature and body)
-func _() int
-{ /* ERROR "unexpected semicolon or newline before {" */
-	{ return 0 }
-}
-
-func _() {}
-
-func _(); { /* ERROR "unexpected semicolon or newline before {" */ }
-
-func _() {}
diff --git a/internal/backport/go/parser/testdata/issue42951/not_a_file.go/invalid.go b/internal/backport/go/parser/testdata/issue42951/not_a_file.go/invalid.go
deleted file mode 100644
index bb698be..0000000
--- a/internal/backport/go/parser/testdata/issue42951/not_a_file.go/invalid.go
+++ /dev/null
@@ -1 +0,0 @@
-This file should not be parsed by ParseDir.
diff --git a/internal/backport/go/parser/testdata/issue44504.src b/internal/backport/go/parser/testdata/issue44504.src
deleted file mode 100644
index 7791f4a..0000000
--- a/internal/backport/go/parser/testdata/issue44504.src
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Test case for issue 44504: panic due to duplicate resolution of slice/index
-// operands. We should not try to resolve a LHS expression with invalid syntax.
-
-package p
-
-func _() {
-  var items []bool
-  items[] /* ERROR "operand" */ = false
-}
diff --git a/internal/backport/go/parser/testdata/issue49174.go2 b/internal/backport/go/parser/testdata/issue49174.go2
deleted file mode 100644
index 77c1950..0000000
--- a/internal/backport/go/parser/testdata/issue49174.go2
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package p
-
-func _[_ []int | int]() {}
-func _[_ int | []int]() {}
diff --git a/internal/backport/go/parser/testdata/issue49175.go2 b/internal/backport/go/parser/testdata/issue49175.go2
deleted file mode 100644
index a5ad30f..0000000
--- a/internal/backport/go/parser/testdata/issue49175.go2
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package p
-
-type _[_ []t]t
-type _[_ [1]t]t
-
-func _[_ []t]() {}
-func _[_ [1]t]() {}
-
-type t [t /* ERROR "all type parameters must be named" */ [0]]t
diff --git a/internal/backport/go/parser/testdata/issue49482.go2 b/internal/backport/go/parser/testdata/issue49482.go2
deleted file mode 100644
index d8385be..0000000
--- a/internal/backport/go/parser/testdata/issue49482.go2
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package p
-
-type (
-        // these need a comma to disambiguate
-        _[P *T,] struct{}
-        _[P *T, _ any] struct{}
-        _[P (*T),] struct{}
-        _[P (*T), _ any] struct{}
-        _[P (T),] struct{}
-        _[P (T), _ any] struct{}
-
-        // these parse as name followed by type
-        _[P *struct{}] struct{}
-        _[P (*struct{})] struct{}
-        _[P ([]int)] struct{}
-
-        // array declarations
-        _ [P(T)]struct{}
-        _ [P((T))]struct{}
-        _ [P * *T]struct{}
-        _ [P * T]struct{}
-        _ [P(*T)]struct{}
-        _ [P(**T)]struct{}
-        _ [P * T - T]struct{}
-        _ [P*T-T, /* ERROR "unexpected comma" */ ]struct{}
-        _ [10, /* ERROR "unexpected comma" */ ]struct{}
-
-        _[P *struct{}|int] struct{}
-        _[P *struct{}|int|string] struct{}
-)
diff --git a/internal/backport/go/parser/testdata/issue50427.go2 b/internal/backport/go/parser/testdata/issue50427.go2
deleted file mode 100644
index 1521459..0000000
--- a/internal/backport/go/parser/testdata/issue50427.go2
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package p
-
-type T interface{ m[ /* ERROR "must have no type parameters" */ P any]() }
-
-func _(t T) {
-	var _ interface{ m[ /* ERROR "must have no type parameters" */ P any](); n() } = t
-}
-
-type S struct{}
-
-func (S) m[ /* ERROR "must have no type parameters" */ P any]() {}
-
-func _(s S) {
-	var _ interface{ m[ /* ERROR "must have no type parameters" */ P any](); n() } = s
-}
diff --git a/internal/backport/go/parser/testdata/linalg.go2 b/internal/backport/go/parser/testdata/linalg.go2
deleted file mode 100644
index 7ccb19c..0000000
--- a/internal/backport/go/parser/testdata/linalg.go2
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package linalg
-
-import "math"
-
-// Numeric is type bound that matches any numeric type.
-// It would likely be in a constraints package in the standard library.
-type Numeric interface {
-	~int|~int8|~int16|~int32|~int64|
-		~uint|~uint8|~uint16|~uint32|~uint64|~uintptr|
-		~float32|~float64|
-		~complex64|~complex128
-}
-
-func DotProduct[T Numeric](s1, s2 []T) T {
-	if len(s1) != len(s2) {
-		panic("DotProduct: slices of unequal length")
-	}
-	var r T
-	for i := range s1 {
-		r += s1[i] * s2[i]
-	}
-	return r
-}
-
-// NumericAbs matches numeric types with an Abs method.
-type NumericAbs[T any] interface {
-	Numeric
-
-	Abs() T
-}
-
-// AbsDifference computes the absolute value of the difference of
-// a and b, where the absolute value is determined by the Abs method.
-func AbsDifference[T NumericAbs](a, b T) T {
-	d := a - b
-	return d.Abs()
-}
-
-// OrderedNumeric is a type bound that matches numeric types that support the < operator.
-type OrderedNumeric interface {
-	~int|~int8|~int16|~int32|~int64|
-		~uint|~uint8|~uint16|~uint32|~uint64|~uintptr|
-		~float32|~float64
-}
-
-// Complex is a type bound that matches the two complex types, which do not have a < operator.
-type Complex interface {
-	~complex64|~complex128
-}
-
-// OrderedAbs is a helper type that defines an Abs method for
-// ordered numeric types.
-type OrderedAbs[T OrderedNumeric] T
-
-func (a OrderedAbs[T]) Abs() OrderedAbs[T] {
-	if a < 0 {
-		return -a
-	}
-	return a
-}
-
-// ComplexAbs is a helper type that defines an Abs method for
-// complex types.
-type ComplexAbs[T Complex] T
-
-func (a ComplexAbs[T]) Abs() ComplexAbs[T] {
-	r := float64(real(a))
-	i := float64(imag(a))
-	d := math.Sqrt(r * r + i * i)
-	return ComplexAbs[T](complex(d, 0))
-}
-
-func OrderedAbsDifference[T OrderedNumeric](a, b T) T {
-	return T(AbsDifference(OrderedAbs[T](a), OrderedAbs[T](b)))
-}
-
-func ComplexAbsDifference[T Complex](a, b T) T {
-	return T(AbsDifference(ComplexAbs[T](a), ComplexAbs[T](b)))
-}
diff --git a/internal/backport/go/parser/testdata/map.go2 b/internal/backport/go/parser/testdata/map.go2
deleted file mode 100644
index 74c79ae..0000000
--- a/internal/backport/go/parser/testdata/map.go2
+++ /dev/null
@@ -1,109 +0,0 @@
-// Package orderedmap provides an ordered map, implemented as a binary tree.
-package orderedmap
-
-import "chans"
-
-// Map is an ordered map.
-type Map[K, V any] struct {
-	root    *node[K, V]
-	compare func(K, K) int
-}
-
-// node is the type of a node in the binary tree.
-type node[K, V any] struct {
-	key         K
-	val         V
-	left, right *node[K, V]
-}
-
-// New returns a new map.
-func New[K, V any](compare func(K, K) int) *Map[K, V] {
-        return &Map[K, V]{compare: compare}
-}
-
-// find looks up key in the map, and returns either a pointer
-// to the node holding key, or a pointer to the location where
-// such a node would go.
-func (m *Map[K, V]) find(key K) **node[K, V] {
-	pn := &m.root
-	for *pn != nil {
-		switch cmp := m.compare(key, (*pn).key); {
-		case cmp < 0:
-			pn = &(*pn).left
-		case cmp > 0:
-			pn = &(*pn).right
-		default:
-			return pn
-		}
-	}
-	return pn
-}
-
-// Insert inserts a new key/value into the map.
-// If the key is already present, the value is replaced.
-// Returns true if this is a new key, false if already present.
-func (m *Map[K, V]) Insert(key K, val V) bool {
-	pn := m.find(key)
-	if *pn != nil {
-		(*pn).val = val
-		return false
-	}
-        *pn = &node[K, V]{key: key, val: val}
-	return true
-}
-
-// Find returns the value associated with a key, or zero if not present.
-// The found result reports whether the key was found.
-func (m *Map[K, V]) Find(key K) (V, bool) {
-	pn := m.find(key)
-	if *pn == nil {
-		var zero V // see the discussion of zero values, above
-		return zero, false
-	}
-	return (*pn).val, true
-}
-
-// keyValue is a pair of key and value used when iterating.
-type keyValue[K, V any] struct {
-	key K
-	val V
-}
-
-// InOrder returns an iterator that does an in-order traversal of the map.
-func (m *Map[K, V]) InOrder() *Iterator[K, V] {
-	sender, receiver := chans.Ranger[keyValue[K, V]]()
-	var f func(*node[K, V]) bool
-	f = func(n *node[K, V]) bool {
-		if n == nil {
-			return true
-		}
-		// Stop sending values if sender.Send returns false,
-		// meaning that nothing is listening at the receiver end.
-		return f(n.left) &&
-                        // TODO
-			// sender.Send(keyValue[K, V]{n.key, n.val}) &&
-			f(n.right)
-	}
-	go func() {
-		f(m.root)
-		sender.Close()
-	}()
-	return &Iterator{receiver}
-}
-
-// Iterator is used to iterate over the map.
-type Iterator[K, V any] struct {
-	r *chans.Receiver[keyValue[K, V]]
-}
-
-// Next returns the next key and value pair, and a boolean indicating
-// whether they are valid or whether we have reached the end.
-func (it *Iterator[K, V]) Next() (K, V, bool) {
-	keyval, ok := it.r.Next()
-	if !ok {
-		var zerok K
-		var zerov V
-		return zerok, zerov, false
-	}
-	return keyval.key, keyval.val, true
-}
diff --git a/internal/backport/go/parser/testdata/metrics.go2 b/internal/backport/go/parser/testdata/metrics.go2
deleted file mode 100644
index ef1c66b..0000000
--- a/internal/backport/go/parser/testdata/metrics.go2
+++ /dev/null
@@ -1,58 +0,0 @@
-package metrics
-
-import "sync"
-
-type Metric1[T comparable] struct {
-	mu sync.Mutex
-	m  map[T]int
-}
-
-func (m *Metric1[T]) Add(v T) {
-	m.mu.Lock()
-	defer m.mu.Unlock()
-	if m.m == nil {
-		m.m = make(map[T]int)
-	}
-	m[v]++
-}
-
-type key2[T1, T2 comparable] struct {
-	f1 T1
-	f2 T2
-}
-
-type Metric2[T1, T2 cmp2] struct {
-	mu sync.Mutex
-	m  map[key2[T1, T2]]int
-}
-
-func (m *Metric2[T1, T2]) Add(v1 T1, v2 T2) {
-	m.mu.Lock()
-	defer m.mu.Unlock()
-	if m.m == nil {
-		m.m = make(map[key2[T1, T2]]int)
-	}
-	m[key[T1, T2]{v1, v2}]++
-}
-
-type key3[T1, T2, T3 comparable] struct {
-	f1 T1
-	f2 T2
-	f3 T3
-}
-
-type Metric3[T1, T2, T3 comparable] struct {
-	mu sync.Mutex
-	m  map[key3[T1, T2, T3]]int
-}
-
-func (m *Metric3[T1, T2, T3]) Add(v1 T1, v2 T2, v3 T3) {
-	m.mu.Lock()
-	defer m.mu.Unlock()
-	if m.m == nil {
-		m.m = make(map[key3]int)
-	}
-	m[key[T1, T2, T3]{v1, v2, v3}]++
-}
-
-// Repeat for the maximum number of permitted arguments.
diff --git a/internal/backport/go/parser/testdata/resolution/issue45136.src b/internal/backport/go/parser/testdata/resolution/issue45136.src
deleted file mode 100644
index e1d63d8..0000000
--- a/internal/backport/go/parser/testdata/resolution/issue45136.src
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package issue45136
-
-type obj /* =@obj */ struct {
-	name /*=@name */ string
-}
-
-func _() {
-	var foo /* =@foo */ = "foo"
-	obj /* @obj */ ["foo"]
-	obj /* @obj */ .run()
-	obj /* @obj */ {
-		name: foo /* @foo */,
-	}
-	obj /* @obj */ {
-		name: "bar",
-	}.run()
-
-	var _ = File{key: obj /* @obj */ {}}
-	var _ = File{obj /* @obj */ {}}
-
-	[]obj /* @obj */ {foo /* @foo */}
-	x /* =@x1 */ := obj /* @obj */{}
-}
diff --git a/internal/backport/go/parser/testdata/resolution/issue45160.src b/internal/backport/go/parser/testdata/resolution/issue45160.src
deleted file mode 100644
index 6be933b..0000000
--- a/internal/backport/go/parser/testdata/resolution/issue45160.src
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package issue45160
-
-func mklink1 /* =@mklink1func */() {}
-
-func _() {
-	var tests /* =@tests */ = []dirLinkTest /* @dirLinkTest */ {
-		{
-			mklink1 /* @mklink1func */: func() {},
-			mklink2: func(link /* =@link */, target /* =@target */ string) error {
-				return nil
-			},
-		},
-	}
-}
-
-type dirLinkTest /* =@dirLinkTest */ struct {
-	mklink1 /* =@mklink1field */ func(string, string) error
-	mklink2 /* =@mklink2field */ func(string, string) error
-}
-
-func mklink2 /* =@mklink2func */() {}
diff --git a/internal/backport/go/parser/testdata/resolution/resolution.src b/internal/backport/go/parser/testdata/resolution/resolution.src
deleted file mode 100644
index a880dd1..0000000
--- a/internal/backport/go/parser/testdata/resolution/resolution.src
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package resolution
-
-func f /* =@fdecl */(n /* =@narg */ ast.Node) bool {
-		if n /* =@ninit */, ok /* =@ok */ := n /* @narg */ .(*ast.SelectorExpr); ok /* @ok */ {
-			sel = n /* @ninit */
-	}
-}
-
-type c /* =@cdecl */ map[token.Pos]resolvedObj
-
-func (v /* =@vdecl */ c /* @cdecl */) Visit(node /* =@nodearg */ ast.Node) (w /* =@w */ ast.Visitor) {}
-
-const (
-	basic /* =@basic */ = iota
-	labelOk // =@labelOk
-)
-
-type T /* =@T */ int
-
-func _(count /* =@count */ T /* @T */) {
-	x /* =@x1 */ := c /* @cdecl */{}
-	switch x /* =@x2 */ := x /* @x1 */; x /* =@x3 */ := x /* @x2 */.(type) {
-	case c /* @cdecl */:
-	default:
-	}
-loop /* =@loop */:
-	for {
-		if true {
-			break loop /* @loop */
-		}
-	}
-	select {
-	case err /* =@err1 */ := <-_:
-		return err /* @err1 */
-	case err /* =@err2 */ := <-_:
-		return err /* @err2 */
-	}
-
-	_ = func(p1 /* =@p1 */ int, p2 /* =@p2 */ p1) {
-		closed /* =@closed */ := p1 // @p1
-		shadowed /* =@shadowed1 */ := p2 // @p2
-		_ = func(shadowed /* =@shadowed2 */ p2 /* @p2 */) {
-			closed /* @closed */ = 1
-			shadowed /* @shadowed2 */ = 2
-		}
-	}
-}
-
-func (r /* =@r */ c /* @cdecl */) m(_ r) c /* @cdecl */ { return r /* @r */ }
-
-var cycle /* =@cycle */ = cycle /* @cycle */ + 1
-
-type chain /* =@chain */ struct {
-	next /* =@next */ *chain /* @chain */
-}
-
-func recursive /* =@recursive */() {
-	recursive /* @recursive */ ()
-}
diff --git a/internal/backport/go/parser/testdata/resolution/typeparams.go2 b/internal/backport/go/parser/testdata/resolution/typeparams.go2
deleted file mode 100644
index 7395ca2..0000000
--- a/internal/backport/go/parser/testdata/resolution/typeparams.go2
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package resolution
-
-type List /* =@List */ [E /* =@E */ any] []E // @E
-
-type Pair /* =@Pair */ [L /* =@L */, R /* =@R */ any] struct {
-	Left /* =@Left */ L // @L
-	Right /* =@Right */ R // @R
-	L /* =@Lfield */ int
-}
-
-var _ = Pair /* @Pair */ [int, string]{}
-
-type Addable /* =@Addable */ interface {
-	~int64|~float64
-}
-
-func Add /* =@AddDecl */[T /* =@T */ Addable /* @Addable */](l /* =@l */, r /* =@r */ T /* @T */) T /* @T */ {
-	var t /* =@t */ T /* @T */
-	return l /* @l */ + r /* @r */ + t /* @t */
-}
-
-type Receiver /* =@Receiver */[P /* =@P */ any] struct {}
-
-type RP /* =@RP1 */ struct{}
-
-// TODO(rFindley): make a decision on how/whether to resolve identifiers that
-// refer to receiver type parameters, as is the case for the 'P' result
-// parameter below.
-//
-// For now, we ensure that types are not incorrectly resolved when receiver
-// type parameters are in scope.
-func (r /* =@recv */ Receiver /* @Receiver */ [RP]) m(RP) RP {}
-
-func f /* =@f */[T1 /* =@T1 */ interface{~[]T2 /* @T2 */}, T2 /* =@T2 */ any](
-  x /* =@x */ T1 /* @T1 */, T1 /* =@T1_duplicate */ y,  // Note that this is a bug:
-                                                        // the duplicate T1 should
-							// not be allowed.
-  ){
-  // Note that duplicate short var declarations resolve to their alt declaration.
-  x /* @x */ := 0
-  y /* =@y */ := 0
-  T1 /* @T1 */ := 0
-  var t1var /* =@t1var */ T1 /* @T1 */
-}
-
-// From issue #39634
-func(*ph1[e, e])h(d)
diff --git a/internal/backport/go/parser/testdata/set.go2 b/internal/backport/go/parser/testdata/set.go2
deleted file mode 100644
index 0da6377..0000000
--- a/internal/backport/go/parser/testdata/set.go2
+++ /dev/null
@@ -1,31 +0,0 @@
-// Package set implements sets of any type.
-package set
-
-type Set[Elem comparable] map[Elem]struct{}
-
-func Make[Elem comparable]() Set[Elem] {
-	return make(Set(Elem))
-}
-
-func (s Set[Elem]) Add(v Elem) {
-	s[v] = struct{}{}
-}
-
-func (s Set[Elem]) Delete(v Elem) {
-	delete(s, v)
-}
-
-func (s Set[Elem]) Contains(v Elem) bool {
-	_, ok := s[v]
-	return ok
-}
-
-func (s Set[Elem]) Len() int {
-	return len(s)
-}
-
-func (s Set[Elem]) Iterate(f func(Elem)) {
-	for v := range s {
-		f(v)
-	}
-}
diff --git a/internal/backport/go/parser/testdata/slices.go2 b/internal/backport/go/parser/testdata/slices.go2
deleted file mode 100644
index e060212..0000000
--- a/internal/backport/go/parser/testdata/slices.go2
+++ /dev/null
@@ -1,31 +0,0 @@
-// Package slices implements various slice algorithms.
-package slices
-
-// Map turns a []T1 to a []T2 using a mapping function.
-func Map[T1, T2 any](s []T1, f func(T1) T2) []T2 {
-	r := make([]T2, len(s))
-	for i, v := range s {
-		r[i] = f(v)
-	}
-	return r
-}
-
-// Reduce reduces a []T1 to a single value using a reduction function.
-func Reduce[T1, T2 any](s []T1, initializer T2, f func(T2, T1) T2) T2 {
-	r := initializer
-	for _, v := range s {
-		r = f(r, v)
-	}
-	return r
-}
-
-// Filter filters values from a slice using a filter function.
-func Filter[T any](s []T, f func(T) bool) []T {
-	var r []T
-	for _, v := range s {
-		if f(v) {
-			r = append(r, v)
-		}
-	}
-	return r
-}
diff --git a/internal/backport/go/parser/testdata/sort.go2 b/internal/backport/go/parser/testdata/sort.go2
deleted file mode 100644
index 88be79f..0000000
--- a/internal/backport/go/parser/testdata/sort.go2
+++ /dev/null
@@ -1,27 +0,0 @@
-package sort
-
-type orderedSlice[Elem comparable] []Elem
-
-func (s orderedSlice[Elem]) Len() int           { return len(s) }
-func (s orderedSlice[Elem]) Less(i, j int) bool { return s[i] < s[j] }
-func (s orderedSlice[Elem]) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
-
-// OrderedSlice sorts the slice s in ascending order.
-// The elements of s must be ordered using the < operator.
-func OrderedSlice[Elem comparable](s []Elem) {
-	sort.Sort(orderedSlice[Elem](s))
-}
-
-type sliceFn[Elem any] struct {
-	s []Elem
-	f func(Elem, Elem) bool
-}
-
-func (s sliceFn[Elem]) Len() int           { return len(s.s) }
-func (s sliceFn[Elem]) Less(i, j int) bool { return s.f(s.s[i], s.s[j]) }
-func (s sliceFn[Elem]) Swap(i, j int)      { s.s[i], s.s[j] = s.s[j], s.s[i] }
-
-// SliceFn sorts the slice s according to the function f.
-func SliceFn[Elem any](s []Elem, f func(Elem, Elem) bool) {
-	Sort(sliceFn[Elem]{s, f})
-}
diff --git a/internal/backport/go/parser/testdata/tparams.go2 b/internal/backport/go/parser/testdata/tparams.go2
deleted file mode 100644
index 28fd132..0000000
--- a/internal/backport/go/parser/testdata/tparams.go2
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package p
-
-type _[a /* ERROR "all type parameters must be named" */, b] struct{}
-type _[a t, b t, c /* ERROR "all type parameters must be named" */ ] struct{}
-type _ struct {
-	t [n]byte
-	t[a]
-	t[a, b]
-}
-type _ interface {
-	t[a]
-	m[ /* ERROR "method must have no type parameters" */ _ _, /* ERROR mixed */ _]()
-	t[a, b]
-}
-
-func _[] /* ERROR "empty type parameter list" */ ()
-func _[a /* ERROR "all type parameters must be named" */, b ]()
-func _[a t, b t, c /* ERROR "all type parameters must be named" */ ]()
-
-// TODO(rfindley) incorrect error message (see existing TODO in parser)
-func f[a b, 0 /* ERROR "expected '\)', found 0" */ ] ()
-
-// issue #49482
-type (
-	_[a *[]int] struct{}
-	_[a *t,] struct{}
-	_[a *t|[]int] struct{}
-	_[a *t|t,] struct{}
-	_[a *t|~t,] struct{}
-	_[a *struct{}|t] struct{}
-	_[a *t|struct{}] struct{}
-	_[a *struct{}|~t] struct{}
-)
-
-// issue #51488
-type (
-	_[a *t|t,] struct{}
-	_[a *t|t, b t] struct{}
-	_[a *t|t] struct{}
-	_[a *[]t|t] struct{}
-	_[a ([]t)] struct{}
-	_[a ([]t)|t] struct{}
-)
diff --git a/internal/backport/go/parser/testdata/typeparams.src b/internal/backport/go/parser/testdata/typeparams.src
deleted file mode 100644
index 479cb96..0000000
--- a/internal/backport/go/parser/testdata/typeparams.src
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Test cases for error messages produced while parsing code that uses type
-// parameters, without ParseTypeParams being enabled.
-
-package p
-
-type List[E any /* ERROR "expected ']', found any" */ ] []E
-
-type Pair[L, /* ERROR "unexpected comma" */ R any] struct {
-	Left L
-	Right R
-}
-
-var _ = Pair[int, /* ERROR "expected ']' or ':', found ','" */ string]{}
diff --git a/internal/backport/go/parser/testdata/typeset.go2 b/internal/backport/go/parser/testdata/typeset.go2
deleted file mode 100644
index 7844c22..0000000
--- a/internal/backport/go/parser/testdata/typeset.go2
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file contains test cases for typeset-only constraint elements.
-// TODO(gri) gofmt once/if gofmt supports this notation.
-
-package p
-
-type (
-        _[_ t] t
-        _[_ ~t] t
-        _[_ t|t] t
-        _[_ ~t|t] t
-        _[_ t|~t] t
-        _[_ ~t|~t] t
-
-        _[_ t, _, _ t|t] t
-        _[_ t, _, _ ~t|t] t
-        _[_ t, _, _ t|~t] t
-        _[_ t, _, _ ~t|~t] t
-
-        _[_ t.t] t
-        _[_ ~t.t] t
-        _[_ t.t|t.t] t
-        _[_ ~t.t|t.t] t
-        _[_ t.t|~t.t] t
-        _[_ ~t.t|~t.t] t
-
-        _[_ t, _, _ t.t|t.t] t
-        _[_ t, _, _ ~t.t|t.t] t
-        _[_ t, _, _ t.t|~t.t] t
-        _[_ t, _, _ ~t.t|~t.t] t
-
-        _[_ struct{}] t
-        _[_ ~struct{}] t
-
-        _[_ struct{}|t] t
-        _[_ ~struct{}|t] t
-        _[_ struct{}|~t] t
-        _[_ ~struct{}|~t] t
-
-        _[_ t|struct{}] t
-        _[_ ~t|struct{}] t
-        _[_ t|~struct{}] t
-        _[_ ~t|~struct{}] t
-)
-
-// Single-expression type parameter lists and those that don't start
-// with a (type parameter) name are considered array sizes.
-// The term must be a valid expression (it could be a type incl. a
-// tilde term) but the type-checker will complain.
-type (
-        _[t] t
-        _[t|t] t
-
-        // These are invalid and the type-checker will complain.
-        _[~t] t
-        _[~t|t] t
-        _[t|~t] t
-        _[~t|~t] t
-)
-
-type _[_ t, t /* ERROR "type parameters must be named" */ ] t
-type _[_ ~t, t /* ERROR "type parameters must be named" */ ] t
-type _[_ t, ~ /* ERROR "type parameters must be named" */ t] t
-type _[_ ~t, ~ /* ERROR "type parameters must be named" */ t] t
-
-type _[_ t|t, t /* ERROR "type parameters must be named" */ |t] t
-type _[_ ~t|t, t /* ERROR "type parameters must be named" */ |t] t
-type _[_ t|t, ~ /* ERROR "type parameters must be named" */ t|t] t
-type _[_ ~t|t, ~ /* ERROR "type parameters must be named" */ t|t] t
diff --git a/internal/backport/go/printer/comment.go b/internal/backport/go/printer/comment.go
deleted file mode 100644
index 663c528..0000000
--- a/internal/backport/go/printer/comment.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package printer
-
-import (
-	"strings"
-
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/doc/comment"
-)
-
-// formatDocComment reformats the doc comment list,
-// returning the canonical formatting.
-func formatDocComment(list []*ast.Comment) []*ast.Comment {
-	// Extract comment text (removing comment markers).
-	var kind, text string
-	var directives []*ast.Comment
-	if len(list) == 1 && strings.HasPrefix(list[0].Text, "/*") {
-		kind = "/*"
-		text = list[0].Text
-		if !strings.Contains(text, "\n") || allStars(text) {
-			// Single-line /* .. */ comment in doc comment position,
-			// or multiline old-style comment like
-			//	/*
-			//	 * Comment
-			//	 * text here.
-			//	 */
-			// Should not happen, since it will not work well as a
-			// doc comment, but if it does, just ignore:
-			// reformatting it will only make the situation worse.
-			return list
-		}
-		text = text[2 : len(text)-2] // cut /* and */
-	} else if strings.HasPrefix(list[0].Text, "//") {
-		kind = "//"
-		var b strings.Builder
-		for _, c := range list {
-			if !strings.HasPrefix(c.Text, "//") {
-				return list
-			}
-			// Accumulate //go:build etc lines separately.
-			if isDirective(c.Text[2:]) {
-				directives = append(directives, c)
-				continue
-			}
-			b.WriteString(strings.TrimPrefix(c.Text[2:], " "))
-			b.WriteString("\n")
-		}
-		text = b.String()
-	} else {
-		// Not sure what this is, so leave alone.
-		return list
-	}
-
-	if text == "" {
-		return list
-	}
-
-	// Parse comment and reformat as text.
-	var p comment.Parser
-	d := p.Parse(text)
-
-	var pr comment.Printer
-	text = string(pr.Comment(d))
-
-	// For /* */ comment, return one big comment with text inside.
-	slash := list[0].Slash
-	if kind == "/*" {
-		c := &ast.Comment{
-			Slash: slash,
-			Text:  "/*\n" + text + "*/",
-		}
-		return []*ast.Comment{c}
-	}
-
-	// For // comment, return sequence of // lines.
-	var out []*ast.Comment
-	for text != "" {
-		var line string
-		line, text, _ = stringsCut(text, "\n")
-		if line == "" {
-			line = "//"
-		} else if strings.HasPrefix(line, "\t") {
-			line = "//" + line
-		} else {
-			line = "// " + line
-		}
-		out = append(out, &ast.Comment{
-			Slash: slash,
-			Text:  line,
-		})
-	}
-	if len(directives) > 0 {
-		out = append(out, &ast.Comment{
-			Slash: slash,
-			Text:  "//",
-		})
-		for _, c := range directives {
-			out = append(out, &ast.Comment{
-				Slash: slash,
-				Text:  c.Text,
-			})
-		}
-	}
-	return out
-}
-
-// isDirective reports whether c is a comment directive.
-// See go.dev/issue/37974.
-// This code is also in go/ast.
-func isDirective(c string) bool {
-	// "//line " is a line directive.
-	// "//extern " is for gccgo.
-	// "//export " is for cgo.
-	// (The // has been removed.)
-	if strings.HasPrefix(c, "line ") || strings.HasPrefix(c, "extern ") || strings.HasPrefix(c, "export ") {
-		return true
-	}
-
-	// "//[a-z0-9]+:[a-z0-9]"
-	// (The // has been removed.)
-	colon := strings.Index(c, ":")
-	if colon <= 0 || colon+1 >= len(c) {
-		return false
-	}
-	for i := 0; i <= colon+1; i++ {
-		if i == colon {
-			continue
-		}
-		b := c[i]
-		if !('a' <= b && b <= 'z' || '0' <= b && b <= '9') {
-			return false
-		}
-	}
-	return true
-}
-
-// allStars reports whether text is the interior of an
-// old-style /* */ comment with a star at the start of each line.
-func allStars(text string) bool {
-	for i := 0; i < len(text); i++ {
-		if text[i] == '\n' {
-			j := i + 1
-			for j < len(text) && (text[j] == ' ' || text[j] == '\t') {
-				j++
-			}
-			if j < len(text) && text[j] != '*' {
-				return false
-			}
-		}
-	}
-	return true
-}
diff --git a/internal/backport/go/printer/example_test.go b/internal/backport/go/printer/example_test.go
deleted file mode 100644
index 0df644d..0000000
--- a/internal/backport/go/printer/example_test.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package printer_test
-
-import (
-	"bytes"
-	"fmt"
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/parser"
-	"golang.org/x/website/internal/backport/go/printer"
-	"golang.org/x/website/internal/backport/go/token"
-	"strings"
-	"testing"
-)
-
-// Dummy test function so that godoc does not use the entire file as example.
-func Test(*testing.T) {}
-
-func parseFunc(filename, functionname string) (fun *ast.FuncDecl, fset *token.FileSet) {
-	fset = token.NewFileSet()
-	if file, err := parser.ParseFile(fset, filename, nil, 0); err == nil {
-		for _, d := range file.Decls {
-			if f, ok := d.(*ast.FuncDecl); ok && f.Name.Name == functionname {
-				fun = f
-				return
-			}
-		}
-	}
-	panic("function not found")
-}
-
-func ExampleFprint() {
-	// Parse source file and extract the AST without comments for
-	// this function, with position information referring to the
-	// file set fset.
-	funcAST, fset := parseFunc("example_test.go", "ExampleFprint")
-
-	// Print the function body into buffer buf.
-	// The file set is provided to the printer so that it knows
-	// about the original source formatting and can add additional
-	// line breaks where they were present in the source.
-	var buf bytes.Buffer
-	printer.Fprint(&buf, fset, funcAST.Body)
-
-	// Remove braces {} enclosing the function body, unindent,
-	// and trim leading and trailing white space.
-	s := buf.String()
-	s = s[1 : len(s)-1]
-	s = strings.TrimSpace(strings.ReplaceAll(s, "\n\t", "\n"))
-
-	// Print the cleaned-up body text to stdout.
-	fmt.Println(s)
-
-	// output:
-	// funcAST, fset := parseFunc("example_test.go", "ExampleFprint")
-	//
-	// var buf bytes.Buffer
-	// printer.Fprint(&buf, fset, funcAST.Body)
-	//
-	// s := buf.String()
-	// s = s[1 : len(s)-1]
-	// s = strings.TrimSpace(strings.ReplaceAll(s, "\n\t", "\n"))
-	//
-	// fmt.Println(s)
-}
diff --git a/internal/backport/go/printer/gobuild.go b/internal/backport/go/printer/gobuild.go
deleted file mode 100644
index f00492d..0000000
--- a/internal/backport/go/printer/gobuild.go
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package printer
-
-import (
-	"go/build/constraint"
-	"sort"
-	"text/tabwriter"
-)
-
-func (p *printer) fixGoBuildLines() {
-	if len(p.goBuild)+len(p.plusBuild) == 0 {
-		return
-	}
-
-	// Find latest possible placement of //go:build and // +build comments.
-	// That's just after the last blank line before we find a non-comment.
-	// (We'll add another blank line after our comment block.)
-	// When we start dropping // +build comments, we can skip over /* */ comments too.
-	// Note that we are processing tabwriter input, so every comment
-	// begins and ends with a tabwriter.Escape byte.
-	// And some newlines have turned into \f bytes.
-	insert := 0
-	for pos := 0; ; {
-		// Skip leading space at beginning of line.
-		blank := true
-		for pos < len(p.output) && (p.output[pos] == ' ' || p.output[pos] == '\t') {
-			pos++
-		}
-		// Skip over // comment if any.
-		if pos+3 < len(p.output) && p.output[pos] == tabwriter.Escape && p.output[pos+1] == '/' && p.output[pos+2] == '/' {
-			blank = false
-			for pos < len(p.output) && !isNL(p.output[pos]) {
-				pos++
-			}
-		}
-		// Skip over \n at end of line.
-		if pos >= len(p.output) || !isNL(p.output[pos]) {
-			break
-		}
-		pos++
-
-		if blank {
-			insert = pos
-		}
-	}
-
-	// If there is a //go:build comment before the place we identified,
-	// use that point instead. (Earlier in the file is always fine.)
-	if len(p.goBuild) > 0 && p.goBuild[0] < insert {
-		insert = p.goBuild[0]
-	} else if len(p.plusBuild) > 0 && p.plusBuild[0] < insert {
-		insert = p.plusBuild[0]
-	}
-
-	var x constraint.Expr
-	switch len(p.goBuild) {
-	case 0:
-		// Synthesize //go:build expression from // +build lines.
-		for _, pos := range p.plusBuild {
-			y, err := constraint.Parse(p.commentTextAt(pos))
-			if err != nil {
-				x = nil
-				break
-			}
-			if x == nil {
-				x = y
-			} else {
-				x = &constraint.AndExpr{X: x, Y: y}
-			}
-		}
-	case 1:
-		// Parse //go:build expression.
-		x, _ = constraint.Parse(p.commentTextAt(p.goBuild[0]))
-	}
-
-	var block []byte
-	if x == nil {
-		// Don't have a valid //go:build expression to treat as truth.
-		// Bring all the lines together but leave them alone.
-		// Note that these are already tabwriter-escaped.
-		for _, pos := range p.goBuild {
-			block = append(block, p.lineAt(pos)...)
-		}
-		for _, pos := range p.plusBuild {
-			block = append(block, p.lineAt(pos)...)
-		}
-	} else {
-		block = append(block, tabwriter.Escape)
-		block = append(block, "//go:build "...)
-		block = append(block, x.String()...)
-		block = append(block, tabwriter.Escape, '\n')
-		if len(p.plusBuild) > 0 {
-			lines, err := constraint.PlusBuildLines(x)
-			if err != nil {
-				lines = []string{"// +build error: " + err.Error()}
-			}
-			for _, line := range lines {
-				block = append(block, tabwriter.Escape)
-				block = append(block, line...)
-				block = append(block, tabwriter.Escape, '\n')
-			}
-		}
-	}
-	block = append(block, '\n')
-
-	// Build sorted list of lines to delete from remainder of output.
-	toDelete := append(p.goBuild, p.plusBuild...)
-	sort.Ints(toDelete)
-
-	// Collect output after insertion point, with lines deleted, into after.
-	var after []byte
-	start := insert
-	for _, end := range toDelete {
-		if end < start {
-			continue
-		}
-		after = appendLines(after, p.output[start:end])
-		start = end + len(p.lineAt(end))
-	}
-	after = appendLines(after, p.output[start:])
-	if n := len(after); n >= 2 && isNL(after[n-1]) && isNL(after[n-2]) {
-		after = after[:n-1]
-	}
-
-	p.output = p.output[:insert]
-	p.output = append(p.output, block...)
-	p.output = append(p.output, after...)
-}
-
-// appendLines is like append(x, y...)
-// but it avoids creating doubled blank lines,
-// which would not be gofmt-standard output.
-// It assumes that only whole blocks of lines are being appended,
-// not line fragments.
-func appendLines(x, y []byte) []byte {
-	if len(y) > 0 && isNL(y[0]) && // y starts in blank line
-		(len(x) == 0 || len(x) >= 2 && isNL(x[len(x)-1]) && isNL(x[len(x)-2])) { // x is empty or ends in blank line
-		y = y[1:] // delete y's leading blank line
-	}
-	return append(x, y...)
-}
-
-func (p *printer) lineAt(start int) []byte {
-	pos := start
-	for pos < len(p.output) && !isNL(p.output[pos]) {
-		pos++
-	}
-	if pos < len(p.output) {
-		pos++
-	}
-	return p.output[start:pos]
-}
-
-func (p *printer) commentTextAt(start int) string {
-	if start < len(p.output) && p.output[start] == tabwriter.Escape {
-		start++
-	}
-	pos := start
-	for pos < len(p.output) && p.output[pos] != tabwriter.Escape && !isNL(p.output[pos]) {
-		pos++
-	}
-	return string(p.output[start:pos])
-}
-
-func isNL(b byte) bool {
-	return b == '\n' || b == '\f'
-}
diff --git a/internal/backport/go/printer/nodes.go b/internal/backport/go/printer/nodes.go
deleted file mode 100644
index 8358c91..0000000
--- a/internal/backport/go/printer/nodes.go
+++ /dev/null
@@ -1,1921 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements printing of AST nodes; specifically
-// expressions, statements, declarations, and files. It uses
-// the print functionality implemented in printer.go.
-
-package printer
-
-import (
-	"bytes"
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/token"
-	"math"
-	"strconv"
-	"strings"
-	"unicode"
-	"unicode/utf8"
-)
-
-// Formatting issues:
-// - better comment formatting for /*-style comments at the end of a line (e.g. a declaration)
-//   when the comment spans multiple lines; if such a comment is just two lines, formatting is
-//   not idempotent
-// - formatting of expression lists
-// - should use blank instead of tab to separate one-line function bodies from
-//   the function header unless there is a group of consecutive one-liners
-
-// ----------------------------------------------------------------------------
-// Common AST nodes.
-
-// Print as many newlines as necessary (but at least min newlines) to get to
-// the current line. ws is printed before the first line break. If newSection
-// is set, the first line break is printed as formfeed. Returns 0 if no line
-// breaks were printed, returns 1 if there was exactly one newline printed,
-// and returns a value > 1 if there was a formfeed or more than one newline
-// printed.
-//
-// TODO(gri): linebreak may add too many lines if the next statement at "line"
-// is preceded by comments because the computation of n assumes
-// the current position before the comment and the target position
-// after the comment. Thus, after interspersing such comments, the
-// space taken up by them is not considered to reduce the number of
-// linebreaks. At the moment there is no easy way to know about
-// future (not yet interspersed) comments in this function.
-func (p *printer) linebreak(line, min int, ws whiteSpace, newSection bool) (nbreaks int) {
-	n := nlimit(line - p.pos.Line)
-	if n < min {
-		n = min
-	}
-	if n > 0 {
-		p.print(ws)
-		if newSection {
-			p.print(formfeed)
-			n--
-			nbreaks = 2
-		}
-		nbreaks += n
-		for ; n > 0; n-- {
-			p.print(newline)
-		}
-	}
-	return
-}
-
-// setComment sets g as the next comment if g != nil and if node comments
-// are enabled - this mode is used when printing source code fragments such
-// as exports only. It assumes that there is no pending comment in p.comments
-// and at most one pending comment in the p.comment cache.
-func (p *printer) setComment(g *ast.CommentGroup) {
-	if g == nil || !p.useNodeComments {
-		return
-	}
-	if p.comments == nil {
-		// initialize p.comments lazily
-		p.comments = make([]*ast.CommentGroup, 1)
-	} else if p.cindex < len(p.comments) {
-		// for some reason there are pending comments; this
-		// should never happen - handle gracefully and flush
-		// all comments up to g, ignore anything after that
-		p.flush(p.posFor(g.List[0].Pos()), token.ILLEGAL)
-		p.comments = p.comments[0:1]
-		// in debug mode, report error
-		p.internalError("setComment found pending comments")
-	}
-	p.comments[0] = g
-	p.cindex = 0
-	// don't overwrite any pending comment in the p.comment cache
-	// (there may be a pending comment when a line comment is
-	// immediately followed by a lead comment with no other
-	// tokens between)
-	if p.commentOffset == infinity {
-		p.nextComment() // get comment ready for use
-	}
-}
-
-type exprListMode uint
-
-const (
-	commaTerm exprListMode = 1 << iota // list is optionally terminated by a comma
-	noIndent                           // no extra indentation in multi-line lists
-)
-
-// If indent is set, a multi-line identifier list is indented after the
-// first linebreak encountered.
-func (p *printer) identList(list []*ast.Ident, indent bool) {
-	// convert into an expression list so we can re-use exprList formatting
-	xlist := make([]ast.Expr, len(list))
-	for i, x := range list {
-		xlist[i] = x
-	}
-	var mode exprListMode
-	if !indent {
-		mode = noIndent
-	}
-	p.exprList(token.NoPos, xlist, 1, mode, token.NoPos, false)
-}
-
-const filteredMsg = "contains filtered or unexported fields"
-
-// Print a list of expressions. If the list spans multiple
-// source lines, the original line breaks are respected between
-// expressions.
-//
-// TODO(gri) Consider rewriting this to be independent of []ast.Expr
-// so that we can use the algorithm for any kind of list
-//
-//	(e.g., pass list via a channel over which to range).
-func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exprListMode, next0 token.Pos, isIncomplete bool) {
-	if len(list) == 0 {
-		if isIncomplete {
-			prev := p.posFor(prev0)
-			next := p.posFor(next0)
-			if prev.IsValid() && prev.Line == next.Line {
-				p.print("/* " + filteredMsg + " */")
-			} else {
-				p.print(newline)
-				p.print(indent, "// "+filteredMsg, unindent, newline)
-			}
-		}
-		return
-	}
-
-	prev := p.posFor(prev0)
-	next := p.posFor(next0)
-	line := p.lineFor(list[0].Pos())
-	endLine := p.lineFor(list[len(list)-1].End())
-
-	if prev.IsValid() && prev.Line == line && line == endLine {
-		// all list entries on a single line
-		for i, x := range list {
-			if i > 0 {
-				// use position of expression following the comma as
-				// comma position for correct comment placement
-				p.print(x.Pos(), token.COMMA, blank)
-			}
-			p.expr0(x, depth)
-		}
-		if isIncomplete {
-			p.print(token.COMMA, blank, "/* "+filteredMsg+" */")
-		}
-		return
-	}
-
-	// list entries span multiple lines;
-	// use source code positions to guide line breaks
-
-	// Don't add extra indentation if noIndent is set;
-	// i.e., pretend that the first line is already indented.
-	ws := ignore
-	if mode&noIndent == 0 {
-		ws = indent
-	}
-
-	// The first linebreak is always a formfeed since this section must not
-	// depend on any previous formatting.
-	prevBreak := -1 // index of last expression that was followed by a linebreak
-	if prev.IsValid() && prev.Line < line && p.linebreak(line, 0, ws, true) > 0 {
-		ws = ignore
-		prevBreak = 0
-	}
-
-	// initialize expression/key size: a zero value indicates expr/key doesn't fit on a single line
-	size := 0
-
-	// We use the ratio between the geometric mean of the previous key sizes and
-	// the current size to determine if there should be a break in the alignment.
-	// To compute the geometric mean we accumulate the ln(size) values (lnsum)
-	// and the number of sizes included (count).
-	lnsum := 0.0
-	count := 0
-
-	// print all list elements
-	prevLine := prev.Line
-	for i, x := range list {
-		line = p.lineFor(x.Pos())
-
-		// Determine if the next linebreak, if any, needs to use formfeed:
-		// in general, use the entire node size to make the decision; for
-		// key:value expressions, use the key size.
-		// TODO(gri) for a better result, should probably incorporate both
-		//           the key and the node size into the decision process
-		useFF := true
-
-		// Determine element size: All bets are off if we don't have
-		// position information for the previous and next token (likely
-		// generated code - simply ignore the size in this case by setting
-		// it to 0).
-		prevSize := size
-		const infinity = 1e6 // larger than any source line
-		size = p.nodeSize(x, infinity)
-		pair, isPair := x.(*ast.KeyValueExpr)
-		if size <= infinity && prev.IsValid() && next.IsValid() {
-			// x fits on a single line
-			if isPair {
-				size = p.nodeSize(pair.Key, infinity) // size <= infinity
-			}
-		} else {
-			// size too large or we don't have good layout information
-			size = 0
-		}
-
-		// If the previous line and the current line had single-
-		// line-expressions and the key sizes are small or the
-		// ratio between the current key and the geometric mean
-		// if the previous key sizes does not exceed a threshold,
-		// align columns and do not use formfeed.
-		if prevSize > 0 && size > 0 {
-			const smallSize = 40
-			if count == 0 || prevSize <= smallSize && size <= smallSize {
-				useFF = false
-			} else {
-				const r = 2.5                               // threshold
-				geomean := math.Exp(lnsum / float64(count)) // count > 0
-				ratio := float64(size) / geomean
-				useFF = r*ratio <= 1 || r <= ratio
-			}
-		}
-
-		needsLinebreak := 0 < prevLine && prevLine < line
-		if i > 0 {
-			// Use position of expression following the comma as
-			// comma position for correct comment placement, but
-			// only if the expression is on the same line.
-			if !needsLinebreak {
-				p.print(x.Pos())
-			}
-			p.print(token.COMMA)
-			needsBlank := true
-			if needsLinebreak {
-				// Lines are broken using newlines so comments remain aligned
-				// unless useFF is set or there are multiple expressions on
-				// the same line in which case formfeed is used.
-				nbreaks := p.linebreak(line, 0, ws, useFF || prevBreak+1 < i)
-				if nbreaks > 0 {
-					ws = ignore
-					prevBreak = i
-					needsBlank = false // we got a line break instead
-				}
-				// If there was a new section or more than one new line
-				// (which means that the tabwriter will implicitly break
-				// the section), reset the geomean variables since we are
-				// starting a new group of elements with the next element.
-				if nbreaks > 1 {
-					lnsum = 0
-					count = 0
-				}
-			}
-			if needsBlank {
-				p.print(blank)
-			}
-		}
-
-		if len(list) > 1 && isPair && size > 0 && needsLinebreak {
-			// We have a key:value expression that fits onto one line
-			// and it's not on the same line as the prior expression:
-			// Use a column for the key such that consecutive entries
-			// can align if possible.
-			// (needsLinebreak is set if we started a new line before)
-			p.expr(pair.Key)
-			p.print(pair.Colon, token.COLON, vtab)
-			p.expr(pair.Value)
-		} else {
-			p.expr0(x, depth)
-		}
-
-		if size > 0 {
-			lnsum += math.Log(float64(size))
-			count++
-		}
-
-		prevLine = line
-	}
-
-	if mode&commaTerm != 0 && next.IsValid() && p.pos.Line < next.Line {
-		// Print a terminating comma if the next token is on a new line.
-		p.print(token.COMMA)
-		if isIncomplete {
-			p.print(newline)
-			p.print("// " + filteredMsg)
-		}
-		if ws == ignore && mode&noIndent == 0 {
-			// unindent if we indented
-			p.print(unindent)
-		}
-		p.print(formfeed) // terminating comma needs a line break to look good
-		return
-	}
-
-	if isIncomplete {
-		p.print(token.COMMA, newline)
-		p.print("// "+filteredMsg, newline)
-	}
-
-	if ws == ignore && mode&noIndent == 0 {
-		// unindent if we indented
-		p.print(unindent)
-	}
-}
-
-type paramMode int
-
-const (
-	funcParam paramMode = iota
-	funcTParam
-	typeTParam
-)
-
-func (p *printer) parameters(fields *ast.FieldList, mode paramMode) {
-	openTok, closeTok := token.LPAREN, token.RPAREN
-	if mode != funcParam {
-		openTok, closeTok = token.LBRACK, token.RBRACK
-	}
-	p.print(fields.Opening, openTok)
-	if len(fields.List) > 0 {
-		prevLine := p.lineFor(fields.Opening)
-		ws := indent
-		for i, par := range fields.List {
-			// determine par begin and end line (may be different
-			// if there are multiple parameter names for this par
-			// or the type is on a separate line)
-			parLineBeg := p.lineFor(par.Pos())
-			parLineEnd := p.lineFor(par.End())
-			// separating "," if needed
-			needsLinebreak := 0 < prevLine && prevLine < parLineBeg
-			if i > 0 {
-				// use position of parameter following the comma as
-				// comma position for correct comma placement, but
-				// only if the next parameter is on the same line
-				if !needsLinebreak {
-					p.print(par.Pos())
-				}
-				p.print(token.COMMA)
-			}
-			// separator if needed (linebreak or blank)
-			if needsLinebreak && p.linebreak(parLineBeg, 0, ws, true) > 0 {
-				// break line if the opening "(" or previous parameter ended on a different line
-				ws = ignore
-			} else if i > 0 {
-				p.print(blank)
-			}
-			// parameter names
-			if len(par.Names) > 0 {
-				// Very subtle: If we indented before (ws == ignore), identList
-				// won't indent again. If we didn't (ws == indent), identList will
-				// indent if the identList spans multiple lines, and it will outdent
-				// again at the end (and still ws == indent). Thus, a subsequent indent
-				// by a linebreak call after a type, or in the next multi-line identList
-				// will do the right thing.
-				p.identList(par.Names, ws == indent)
-				p.print(blank)
-			}
-			// parameter type
-			p.expr(stripParensAlways(par.Type))
-			prevLine = parLineEnd
-		}
-
-		// if the closing ")" is on a separate line from the last parameter,
-		// print an additional "," and line break
-		if closing := p.lineFor(fields.Closing); 0 < prevLine && prevLine < closing {
-			p.print(token.COMMA)
-			p.linebreak(closing, 0, ignore, true)
-		} else if mode == typeTParam && fields.NumFields() == 1 && combinesWithName(fields.List[0].Type) {
-			// A type parameter list [P T] where the name P and the type expression T syntactically
-			// combine to another valid (value) expression requires a trailing comma, as in [P *T,]
-			// (or an enclosing interface as in [P interface(*T)]), so that the type parameter list
-			// is not parsed as an array length [P*T].
-			p.print(token.COMMA)
-		}
-
-		// unindent if we indented
-		if ws == ignore {
-			p.print(unindent)
-		}
-	}
-
-	p.print(fields.Closing, closeTok)
-}
-
-// combinesWithName reports whether a name followed by the expression x
-// syntactically combines to another valid (value) expression. For instance
-// using *T for x, "name *T" syntactically appears as the expression x*T.
-// On the other hand, using  P|Q or *P|~Q for x, "name P|Q" or name *P|~Q"
-// cannot be combined into a valid (value) expression.
-func combinesWithName(x ast.Expr) bool {
-	switch x := x.(type) {
-	case *ast.StarExpr:
-		// name *x.X combines to name*x.X if x.X is not a type element
-		return !isTypeElem(x.X)
-	case *ast.BinaryExpr:
-		return combinesWithName(x.X) && !isTypeElem(x.Y)
-	case *ast.ParenExpr:
-		// name(x) combines but we are making sure at
-		// the call site that x is never parenthesized.
-		panic("unexpected parenthesized expression")
-	}
-	return false
-}
-
-// isTypeElem reports whether x is a (possibly parenthesized) type element expression.
-// The result is false if x could be a type element OR an ordinary (value) expression.
-func isTypeElem(x ast.Expr) bool {
-	switch x := x.(type) {
-	case *ast.ArrayType, *ast.StructType, *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.ChanType:
-		return true
-	case *ast.UnaryExpr:
-		return x.Op == token.TILDE
-	case *ast.BinaryExpr:
-		return isTypeElem(x.X) || isTypeElem(x.Y)
-	case *ast.ParenExpr:
-		return isTypeElem(x.X)
-	}
-	return false
-}
-
-func (p *printer) signature(sig *ast.FuncType) {
-	if sig.TypeParams != nil {
-		p.parameters(sig.TypeParams, funcTParam)
-	}
-	if sig.Params != nil {
-		p.parameters(sig.Params, funcParam)
-	} else {
-		p.print(token.LPAREN, token.RPAREN)
-	}
-	res := sig.Results
-	n := res.NumFields()
-	if n > 0 {
-		// res != nil
-		p.print(blank)
-		if n == 1 && res.List[0].Names == nil {
-			// single anonymous res; no ()'s
-			p.expr(stripParensAlways(res.List[0].Type))
-			return
-		}
-		p.parameters(res, funcParam)
-	}
-}
-
-func identListSize(list []*ast.Ident, maxSize int) (size int) {
-	for i, x := range list {
-		if i > 0 {
-			size += len(", ")
-		}
-		size += utf8.RuneCountInString(x.Name)
-		if size >= maxSize {
-			break
-		}
-	}
-	return
-}
-
-func (p *printer) isOneLineFieldList(list []*ast.Field) bool {
-	if len(list) != 1 {
-		return false // allow only one field
-	}
-	f := list[0]
-	if f.Tag != nil || f.Comment != nil {
-		return false // don't allow tags or comments
-	}
-	// only name(s) and type
-	const maxSize = 30 // adjust as appropriate, this is an approximate value
-	namesSize := identListSize(f.Names, maxSize)
-	if namesSize > 0 {
-		namesSize = 1 // blank between names and types
-	}
-	typeSize := p.nodeSize(f.Type, maxSize)
-	return namesSize+typeSize <= maxSize
-}
-
-func (p *printer) setLineComment(text string) {
-	p.setComment(&ast.CommentGroup{List: []*ast.Comment{{Slash: token.NoPos, Text: text}}})
-}
-
-func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool) {
-	lbrace := fields.Opening
-	list := fields.List
-	rbrace := fields.Closing
-	hasComments := isIncomplete || p.commentBefore(p.posFor(rbrace))
-	srcIsOneLine := lbrace.IsValid() && rbrace.IsValid() && p.lineFor(lbrace) == p.lineFor(rbrace)
-
-	if !hasComments && srcIsOneLine {
-		// possibly a one-line struct/interface
-		if len(list) == 0 {
-			// no blank between keyword and {} in this case
-			p.print(lbrace, token.LBRACE, rbrace, token.RBRACE)
-			return
-		} else if p.isOneLineFieldList(list) {
-			// small enough - print on one line
-			// (don't use identList and ignore source line breaks)
-			p.print(lbrace, token.LBRACE, blank)
-			f := list[0]
-			if isStruct {
-				for i, x := range f.Names {
-					if i > 0 {
-						// no comments so no need for comma position
-						p.print(token.COMMA, blank)
-					}
-					p.expr(x)
-				}
-				if len(f.Names) > 0 {
-					p.print(blank)
-				}
-				p.expr(f.Type)
-			} else { // interface
-				if len(f.Names) > 0 {
-					name := f.Names[0] // method name
-					p.expr(name)
-					p.signature(f.Type.(*ast.FuncType)) // don't print "func"
-				} else {
-					// embedded interface
-					p.expr(f.Type)
-				}
-			}
-			p.print(blank, rbrace, token.RBRACE)
-			return
-		}
-	}
-	// hasComments || !srcIsOneLine
-
-	p.print(blank, lbrace, token.LBRACE, indent)
-	if hasComments || len(list) > 0 {
-		p.print(formfeed)
-	}
-
-	if isStruct {
-
-		sep := vtab
-		if len(list) == 1 {
-			sep = blank
-		}
-		var line int
-		for i, f := range list {
-			if i > 0 {
-				p.linebreak(p.lineFor(f.Pos()), 1, ignore, p.linesFrom(line) > 0)
-			}
-			extraTabs := 0
-			p.setComment(f.Doc)
-			p.recordLine(&line)
-			if len(f.Names) > 0 {
-				// named fields
-				p.identList(f.Names, false)
-				p.print(sep)
-				p.expr(f.Type)
-				extraTabs = 1
-			} else {
-				// anonymous field
-				p.expr(f.Type)
-				extraTabs = 2
-			}
-			if f.Tag != nil {
-				if len(f.Names) > 0 && sep == vtab {
-					p.print(sep)
-				}
-				p.print(sep)
-				p.expr(f.Tag)
-				extraTabs = 0
-			}
-			if f.Comment != nil {
-				for ; extraTabs > 0; extraTabs-- {
-					p.print(sep)
-				}
-				p.setComment(f.Comment)
-			}
-		}
-		if isIncomplete {
-			if len(list) > 0 {
-				p.print(formfeed)
-			}
-			p.flush(p.posFor(rbrace), token.RBRACE) // make sure we don't lose the last line comment
-			p.setLineComment("// " + filteredMsg)
-		}
-
-	} else { // interface
-
-		var line int
-		var prev *ast.Ident // previous "type" identifier
-		for i, f := range list {
-			var name *ast.Ident // first name, or nil
-			if len(f.Names) > 0 {
-				name = f.Names[0]
-			}
-			if i > 0 {
-				// don't do a line break (min == 0) if we are printing a list of types
-				// TODO(gri) this doesn't work quite right if the list of types is
-				//           spread across multiple lines
-				min := 1
-				if prev != nil && name == prev {
-					min = 0
-				}
-				p.linebreak(p.lineFor(f.Pos()), min, ignore, p.linesFrom(line) > 0)
-			}
-			p.setComment(f.Doc)
-			p.recordLine(&line)
-			if name != nil {
-				// method
-				p.expr(name)
-				p.signature(f.Type.(*ast.FuncType)) // don't print "func"
-				prev = nil
-			} else {
-				// embedded interface
-				p.expr(f.Type)
-				prev = nil
-			}
-			p.setComment(f.Comment)
-		}
-		if isIncomplete {
-			if len(list) > 0 {
-				p.print(formfeed)
-			}
-			p.flush(p.posFor(rbrace), token.RBRACE) // make sure we don't lose the last line comment
-			p.setLineComment("// contains filtered or unexported methods")
-		}
-
-	}
-	p.print(unindent, formfeed, rbrace, token.RBRACE)
-}
-
-// ----------------------------------------------------------------------------
-// Expressions
-
-func walkBinary(e *ast.BinaryExpr) (has4, has5 bool, maxProblem int) {
-	switch e.Op.Precedence() {
-	case 4:
-		has4 = true
-	case 5:
-		has5 = true
-	}
-
-	switch l := e.X.(type) {
-	case *ast.BinaryExpr:
-		if l.Op.Precedence() < e.Op.Precedence() {
-			// parens will be inserted.
-			// pretend this is an *ast.ParenExpr and do nothing.
-			break
-		}
-		h4, h5, mp := walkBinary(l)
-		has4 = has4 || h4
-		has5 = has5 || h5
-		if maxProblem < mp {
-			maxProblem = mp
-		}
-	}
-
-	switch r := e.Y.(type) {
-	case *ast.BinaryExpr:
-		if r.Op.Precedence() <= e.Op.Precedence() {
-			// parens will be inserted.
-			// pretend this is an *ast.ParenExpr and do nothing.
-			break
-		}
-		h4, h5, mp := walkBinary(r)
-		has4 = has4 || h4
-		has5 = has5 || h5
-		if maxProblem < mp {
-			maxProblem = mp
-		}
-
-	case *ast.StarExpr:
-		if e.Op == token.QUO { // `*/`
-			maxProblem = 5
-		}
-
-	case *ast.UnaryExpr:
-		switch e.Op.String() + r.Op.String() {
-		case "/*", "&&", "&^":
-			maxProblem = 5
-		case "++", "--":
-			if maxProblem < 4 {
-				maxProblem = 4
-			}
-		}
-	}
-	return
-}
-
-func cutoff(e *ast.BinaryExpr, depth int) int {
-	has4, has5, maxProblem := walkBinary(e)
-	if maxProblem > 0 {
-		return maxProblem + 1
-	}
-	if has4 && has5 {
-		if depth == 1 {
-			return 5
-		}
-		return 4
-	}
-	if depth == 1 {
-		return 6
-	}
-	return 4
-}
-
-func diffPrec(expr ast.Expr, prec int) int {
-	x, ok := expr.(*ast.BinaryExpr)
-	if !ok || prec != x.Op.Precedence() {
-		return 1
-	}
-	return 0
-}
-
-func reduceDepth(depth int) int {
-	depth--
-	if depth < 1 {
-		depth = 1
-	}
-	return depth
-}
-
-// Format the binary expression: decide the cutoff and then format.
-// Let's call depth == 1 Normal mode, and depth > 1 Compact mode.
-// (Algorithm suggestion by Russ Cox.)
-//
-// The precedences are:
-//
-//	5             *  /  %  <<  >>  &  &^
-//	4             +  -  |  ^
-//	3             ==  !=  <  <=  >  >=
-//	2             &&
-//	1             ||
-//
-// The only decision is whether there will be spaces around levels 4 and 5.
-// There are never spaces at level 6 (unary), and always spaces at levels 3 and below.
-//
-// To choose the cutoff, look at the whole expression but excluding primary
-// expressions (function calls, parenthesized exprs), and apply these rules:
-//
-//  1. If there is a binary operator with a right side unary operand
-//     that would clash without a space, the cutoff must be (in order):
-//
-//     /*	6
-//     &&	6
-//     &^	6
-//     ++	5
-//     --	5
-//
-//     (Comparison operators always have spaces around them.)
-//
-//  2. If there is a mix of level 5 and level 4 operators, then the cutoff
-//     is 5 (use spaces to distinguish precedence) in Normal mode
-//     and 4 (never use spaces) in Compact mode.
-//
-//  3. If there are no level 4 operators or no level 5 operators, then the
-//     cutoff is 6 (always use spaces) in Normal mode
-//     and 4 (never use spaces) in Compact mode.
-func (p *printer) binaryExpr(x *ast.BinaryExpr, prec1, cutoff, depth int) {
-	prec := x.Op.Precedence()
-	if prec < prec1 {
-		// parenthesis needed
-		// Note: The parser inserts an ast.ParenExpr node; thus this case
-		//       can only occur if the AST is created in a different way.
-		p.print(token.LPAREN)
-		p.expr0(x, reduceDepth(depth)) // parentheses undo one level of depth
-		p.print(token.RPAREN)
-		return
-	}
-
-	printBlank := prec < cutoff
-
-	ws := indent
-	p.expr1(x.X, prec, depth+diffPrec(x.X, prec))
-	if printBlank {
-		p.print(blank)
-	}
-	xline := p.pos.Line // before the operator (it may be on the next line!)
-	yline := p.lineFor(x.Y.Pos())
-	p.print(x.OpPos, x.Op)
-	if xline != yline && xline > 0 && yline > 0 {
-		// at least one line break, but respect an extra empty line
-		// in the source
-		if p.linebreak(yline, 1, ws, true) > 0 {
-			ws = ignore
-			printBlank = false // no blank after line break
-		}
-	}
-	if printBlank {
-		p.print(blank)
-	}
-	p.expr1(x.Y, prec+1, depth+1)
-	if ws == ignore {
-		p.print(unindent)
-	}
-}
-
-func isBinary(expr ast.Expr) bool {
-	_, ok := expr.(*ast.BinaryExpr)
-	return ok
-}
-
-func (p *printer) expr1(expr ast.Expr, prec1, depth int) {
-	p.print(expr.Pos())
-
-	switch x := expr.(type) {
-	case *ast.BadExpr:
-		p.print("BadExpr")
-
-	case *ast.Ident:
-		p.print(x)
-
-	case *ast.BinaryExpr:
-		if depth < 1 {
-			p.internalError("depth < 1:", depth)
-			depth = 1
-		}
-		p.binaryExpr(x, prec1, cutoff(x, depth), depth)
-
-	case *ast.KeyValueExpr:
-		p.expr(x.Key)
-		p.print(x.Colon, token.COLON, blank)
-		p.expr(x.Value)
-
-	case *ast.StarExpr:
-		const prec = token.UnaryPrec
-		if prec < prec1 {
-			// parenthesis needed
-			p.print(token.LPAREN)
-			p.print(token.MUL)
-			p.expr(x.X)
-			p.print(token.RPAREN)
-		} else {
-			// no parenthesis needed
-			p.print(token.MUL)
-			p.expr(x.X)
-		}
-
-	case *ast.UnaryExpr:
-		const prec = token.UnaryPrec
-		if prec < prec1 {
-			// parenthesis needed
-			p.print(token.LPAREN)
-			p.expr(x)
-			p.print(token.RPAREN)
-		} else {
-			// no parenthesis needed
-			p.print(x.Op)
-			if x.Op == token.RANGE {
-				// TODO(gri) Remove this code if it cannot be reached.
-				p.print(blank)
-			}
-			p.expr1(x.X, prec, depth)
-		}
-
-	case *ast.BasicLit:
-		if p.Config.Mode&normalizeNumbers != 0 {
-			x = normalizedNumber(x)
-		}
-		p.print(x)
-
-	case *ast.FuncLit:
-		p.print(x.Type.Pos(), token.FUNC)
-		// See the comment in funcDecl about how the header size is computed.
-		startCol := p.out.Column - len("func")
-		p.signature(x.Type)
-		p.funcBody(p.distanceFrom(x.Type.Pos(), startCol), blank, x.Body)
-
-	case *ast.ParenExpr:
-		if _, hasParens := x.X.(*ast.ParenExpr); hasParens {
-			// don't print parentheses around an already parenthesized expression
-			// TODO(gri) consider making this more general and incorporate precedence levels
-			p.expr0(x.X, depth)
-		} else {
-			p.print(token.LPAREN)
-			p.expr0(x.X, reduceDepth(depth)) // parentheses undo one level of depth
-			p.print(x.Rparen, token.RPAREN)
-		}
-
-	case *ast.SelectorExpr:
-		p.selectorExpr(x, depth, false)
-
-	case *ast.TypeAssertExpr:
-		p.expr1(x.X, token.HighestPrec, depth)
-		p.print(token.PERIOD, x.Lparen, token.LPAREN)
-		if x.Type != nil {
-			p.expr(x.Type)
-		} else {
-			p.print(token.TYPE)
-		}
-		p.print(x.Rparen, token.RPAREN)
-
-	case *ast.IndexExpr:
-		// TODO(gri): should treat[] like parentheses and undo one level of depth
-		p.expr1(x.X, token.HighestPrec, 1)
-		p.print(x.Lbrack, token.LBRACK)
-		p.expr0(x.Index, depth+1)
-		p.print(x.Rbrack, token.RBRACK)
-
-	case *ast.IndexListExpr:
-		// TODO(gri): as for IndexExpr, should treat [] like parentheses and undo
-		// one level of depth
-		p.expr1(x.X, token.HighestPrec, 1)
-		p.print(x.Lbrack, token.LBRACK)
-		p.exprList(x.Lbrack, x.Indices, depth+1, commaTerm, x.Rbrack, false)
-		p.print(x.Rbrack, token.RBRACK)
-
-	case *ast.SliceExpr:
-		// TODO(gri): should treat[] like parentheses and undo one level of depth
-		p.expr1(x.X, token.HighestPrec, 1)
-		p.print(x.Lbrack, token.LBRACK)
-		indices := []ast.Expr{x.Low, x.High}
-		if x.Max != nil {
-			indices = append(indices, x.Max)
-		}
-		// determine if we need extra blanks around ':'
-		var needsBlanks bool
-		if depth <= 1 {
-			var indexCount int
-			var hasBinaries bool
-			for _, x := range indices {
-				if x != nil {
-					indexCount++
-					if isBinary(x) {
-						hasBinaries = true
-					}
-				}
-			}
-			if indexCount > 1 && hasBinaries {
-				needsBlanks = true
-			}
-		}
-		for i, x := range indices {
-			if i > 0 {
-				if indices[i-1] != nil && needsBlanks {
-					p.print(blank)
-				}
-				p.print(token.COLON)
-				if x != nil && needsBlanks {
-					p.print(blank)
-				}
-			}
-			if x != nil {
-				p.expr0(x, depth+1)
-			}
-		}
-		p.print(x.Rbrack, token.RBRACK)
-
-	case *ast.CallExpr:
-		if len(x.Args) > 1 {
-			depth++
-		}
-		var wasIndented bool
-		if _, ok := x.Fun.(*ast.FuncType); ok {
-			// conversions to literal function types require parentheses around the type
-			p.print(token.LPAREN)
-			wasIndented = p.possibleSelectorExpr(x.Fun, token.HighestPrec, depth)
-			p.print(token.RPAREN)
-		} else {
-			wasIndented = p.possibleSelectorExpr(x.Fun, token.HighestPrec, depth)
-		}
-		p.print(x.Lparen, token.LPAREN)
-		if x.Ellipsis.IsValid() {
-			p.exprList(x.Lparen, x.Args, depth, 0, x.Ellipsis, false)
-			p.print(x.Ellipsis, token.ELLIPSIS)
-			if x.Rparen.IsValid() && p.lineFor(x.Ellipsis) < p.lineFor(x.Rparen) {
-				p.print(token.COMMA, formfeed)
-			}
-		} else {
-			p.exprList(x.Lparen, x.Args, depth, commaTerm, x.Rparen, false)
-		}
-		p.print(x.Rparen, token.RPAREN)
-		if wasIndented {
-			p.print(unindent)
-		}
-
-	case *ast.CompositeLit:
-		// composite literal elements that are composite literals themselves may have the type omitted
-		if x.Type != nil {
-			p.expr1(x.Type, token.HighestPrec, depth)
-		}
-		p.level++
-		p.print(x.Lbrace, token.LBRACE)
-		p.exprList(x.Lbrace, x.Elts, 1, commaTerm, x.Rbrace, x.Incomplete)
-		// do not insert extra line break following a /*-style comment
-		// before the closing '}' as it might break the code if there
-		// is no trailing ','
-		mode := noExtraLinebreak
-		// do not insert extra blank following a /*-style comment
-		// before the closing '}' unless the literal is empty
-		if len(x.Elts) > 0 {
-			mode |= noExtraBlank
-		}
-		// need the initial indent to print lone comments with
-		// the proper level of indentation
-		p.print(indent, unindent, mode, x.Rbrace, token.RBRACE, mode)
-		p.level--
-
-	case *ast.Ellipsis:
-		p.print(token.ELLIPSIS)
-		if x.Elt != nil {
-			p.expr(x.Elt)
-		}
-
-	case *ast.ArrayType:
-		p.print(token.LBRACK)
-		if x.Len != nil {
-			p.expr(x.Len)
-		}
-		p.print(token.RBRACK)
-		p.expr(x.Elt)
-
-	case *ast.StructType:
-		p.print(token.STRUCT)
-		p.fieldList(x.Fields, true, x.Incomplete)
-
-	case *ast.FuncType:
-		p.print(token.FUNC)
-		p.signature(x)
-
-	case *ast.InterfaceType:
-		p.print(token.INTERFACE)
-		p.fieldList(x.Methods, false, x.Incomplete)
-
-	case *ast.MapType:
-		p.print(token.MAP, token.LBRACK)
-		p.expr(x.Key)
-		p.print(token.RBRACK)
-		p.expr(x.Value)
-
-	case *ast.ChanType:
-		switch x.Dir {
-		case ast.SEND | ast.RECV:
-			p.print(token.CHAN)
-		case ast.RECV:
-			p.print(token.ARROW, token.CHAN) // x.Arrow and x.Pos() are the same
-		case ast.SEND:
-			p.print(token.CHAN, x.Arrow, token.ARROW)
-		}
-		p.print(blank)
-		p.expr(x.Value)
-
-	default:
-		panic("unreachable")
-	}
-}
-
-// normalizedNumber rewrites base prefixes and exponents
-// of numbers to use lower-case letters (0X123 to 0x123 and 1.2E3 to 1.2e3),
-// and removes leading 0's from integer imaginary literals (0765i to 765i).
-// It leaves hexadecimal digits alone.
-//
-// normalizedNumber doesn't modify the ast.BasicLit value lit points to.
-// If lit is not a number or a number in canonical format already,
-// lit is returned as is. Otherwise a new ast.BasicLit is created.
-func normalizedNumber(lit *ast.BasicLit) *ast.BasicLit {
-	if lit.Kind != token.INT && lit.Kind != token.FLOAT && lit.Kind != token.IMAG {
-		return lit // not a number - nothing to do
-	}
-	if len(lit.Value) < 2 {
-		return lit // only one digit (common case) - nothing to do
-	}
-	// len(lit.Value) >= 2
-
-	// We ignore lit.Kind because for lit.Kind == token.IMAG the literal may be an integer
-	// or floating-point value, decimal or not. Instead, just consider the literal pattern.
-	x := lit.Value
-	switch x[:2] {
-	default:
-		// 0-prefix octal, decimal int, or float (possibly with 'i' suffix)
-		if i := strings.LastIndexByte(x, 'E'); i >= 0 {
-			x = x[:i] + "e" + x[i+1:]
-			break
-		}
-		// remove leading 0's from integer (but not floating-point) imaginary literals
-		if x[len(x)-1] == 'i' && !strings.ContainsAny(x, ".e") {
-			x = strings.TrimLeft(x, "0_")
-			if x == "i" {
-				x = "0i"
-			}
-		}
-	case "0X":
-		x = "0x" + x[2:]
-		// possibly a hexadecimal float
-		if i := strings.LastIndexByte(x, 'P'); i >= 0 {
-			x = x[:i] + "p" + x[i+1:]
-		}
-	case "0x":
-		// possibly a hexadecimal float
-		i := strings.LastIndexByte(x, 'P')
-		if i == -1 {
-			return lit // nothing to do
-		}
-		x = x[:i] + "p" + x[i+1:]
-	case "0O":
-		x = "0o" + x[2:]
-	case "0o":
-		return lit // nothing to do
-	case "0B":
-		x = "0b" + x[2:]
-	case "0b":
-		return lit // nothing to do
-	}
-
-	return &ast.BasicLit{ValuePos: lit.ValuePos, Kind: lit.Kind, Value: x}
-}
-
-func (p *printer) possibleSelectorExpr(expr ast.Expr, prec1, depth int) bool {
-	if x, ok := expr.(*ast.SelectorExpr); ok {
-		return p.selectorExpr(x, depth, true)
-	}
-	p.expr1(expr, prec1, depth)
-	return false
-}
-
-// selectorExpr handles an *ast.SelectorExpr node and reports whether x spans
-// multiple lines.
-func (p *printer) selectorExpr(x *ast.SelectorExpr, depth int, isMethod bool) bool {
-	p.expr1(x.X, token.HighestPrec, depth)
-	p.print(token.PERIOD)
-	if line := p.lineFor(x.Sel.Pos()); p.pos.IsValid() && p.pos.Line < line {
-		p.print(indent, newline, x.Sel.Pos(), x.Sel)
-		if !isMethod {
-			p.print(unindent)
-		}
-		return true
-	}
-	p.print(x.Sel.Pos(), x.Sel)
-	return false
-}
-
-func (p *printer) expr0(x ast.Expr, depth int) {
-	p.expr1(x, token.LowestPrec, depth)
-}
-
-func (p *printer) expr(x ast.Expr) {
-	const depth = 1
-	p.expr1(x, token.LowestPrec, depth)
-}
-
-// ----------------------------------------------------------------------------
-// Statements
-
-// Print the statement list indented, but without a newline after the last statement.
-// Extra line breaks between statements in the source are respected but at most one
-// empty line is printed between statements.
-func (p *printer) stmtList(list []ast.Stmt, nindent int, nextIsRBrace bool) {
-	if nindent > 0 {
-		p.print(indent)
-	}
-	var line int
-	i := 0
-	for _, s := range list {
-		// ignore empty statements (was issue 3466)
-		if _, isEmpty := s.(*ast.EmptyStmt); !isEmpty {
-			// nindent == 0 only for lists of switch/select case clauses;
-			// in those cases each clause is a new section
-			if len(p.output) > 0 {
-				// only print line break if we are not at the beginning of the output
-				// (i.e., we are not printing only a partial program)
-				p.linebreak(p.lineFor(s.Pos()), 1, ignore, i == 0 || nindent == 0 || p.linesFrom(line) > 0)
-			}
-			p.recordLine(&line)
-			p.stmt(s, nextIsRBrace && i == len(list)-1)
-			// labeled statements put labels on a separate line, but here
-			// we only care about the start line of the actual statement
-			// without label - correct line for each label
-			for t := s; ; {
-				lt, _ := t.(*ast.LabeledStmt)
-				if lt == nil {
-					break
-				}
-				line++
-				t = lt.Stmt
-			}
-			i++
-		}
-	}
-	if nindent > 0 {
-		p.print(unindent)
-	}
-}
-
-// block prints an *ast.BlockStmt; it always spans at least two lines.
-func (p *printer) block(b *ast.BlockStmt, nindent int) {
-	p.print(b.Lbrace, token.LBRACE)
-	p.stmtList(b.List, nindent, true)
-	p.linebreak(p.lineFor(b.Rbrace), 1, ignore, true)
-	p.print(b.Rbrace, token.RBRACE)
-}
-
-func isTypeName(x ast.Expr) bool {
-	switch t := x.(type) {
-	case *ast.Ident:
-		return true
-	case *ast.SelectorExpr:
-		return isTypeName(t.X)
-	}
-	return false
-}
-
-func stripParens(x ast.Expr) ast.Expr {
-	if px, strip := x.(*ast.ParenExpr); strip {
-		// parentheses must not be stripped if there are any
-		// unparenthesized composite literals starting with
-		// a type name
-		ast.Inspect(px.X, func(node ast.Node) bool {
-			switch x := node.(type) {
-			case *ast.ParenExpr:
-				// parentheses protect enclosed composite literals
-				return false
-			case *ast.CompositeLit:
-				if isTypeName(x.Type) {
-					strip = false // do not strip parentheses
-				}
-				return false
-			}
-			// in all other cases, keep inspecting
-			return true
-		})
-		if strip {
-			return stripParens(px.X)
-		}
-	}
-	return x
-}
-
-func stripParensAlways(x ast.Expr) ast.Expr {
-	if x, ok := x.(*ast.ParenExpr); ok {
-		return stripParensAlways(x.X)
-	}
-	return x
-}
-
-func (p *printer) controlClause(isForStmt bool, init ast.Stmt, expr ast.Expr, post ast.Stmt) {
-	p.print(blank)
-	needsBlank := false
-	if init == nil && post == nil {
-		// no semicolons required
-		if expr != nil {
-			p.expr(stripParens(expr))
-			needsBlank = true
-		}
-	} else {
-		// all semicolons required
-		// (they are not separators, print them explicitly)
-		if init != nil {
-			p.stmt(init, false)
-		}
-		p.print(token.SEMICOLON, blank)
-		if expr != nil {
-			p.expr(stripParens(expr))
-			needsBlank = true
-		}
-		if isForStmt {
-			p.print(token.SEMICOLON, blank)
-			needsBlank = false
-			if post != nil {
-				p.stmt(post, false)
-				needsBlank = true
-			}
-		}
-	}
-	if needsBlank {
-		p.print(blank)
-	}
-}
-
-// indentList reports whether an expression list would look better if it
-// were indented wholesale (starting with the very first element, rather
-// than starting at the first line break).
-func (p *printer) indentList(list []ast.Expr) bool {
-	// Heuristic: indentList reports whether there are more than one multi-
-	// line element in the list, or if there is any element that is not
-	// starting on the same line as the previous one ends.
-	if len(list) >= 2 {
-		var b = p.lineFor(list[0].Pos())
-		var e = p.lineFor(list[len(list)-1].End())
-		if 0 < b && b < e {
-			// list spans multiple lines
-			n := 0 // multi-line element count
-			line := b
-			for _, x := range list {
-				xb := p.lineFor(x.Pos())
-				xe := p.lineFor(x.End())
-				if line < xb {
-					// x is not starting on the same
-					// line as the previous one ended
-					return true
-				}
-				if xb < xe {
-					// x is a multi-line element
-					n++
-				}
-				line = xe
-			}
-			return n > 1
-		}
-	}
-	return false
-}
-
-func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool) {
-	p.print(stmt.Pos())
-
-	switch s := stmt.(type) {
-	case *ast.BadStmt:
-		p.print("BadStmt")
-
-	case *ast.DeclStmt:
-		p.decl(s.Decl)
-
-	case *ast.EmptyStmt:
-		// nothing to do
-
-	case *ast.LabeledStmt:
-		// a "correcting" unindent immediately following a line break
-		// is applied before the line break if there is no comment
-		// between (see writeWhitespace)
-		p.print(unindent)
-		p.expr(s.Label)
-		p.print(s.Colon, token.COLON, indent)
-		if e, isEmpty := s.Stmt.(*ast.EmptyStmt); isEmpty {
-			if !nextIsRBrace {
-				p.print(newline, e.Pos(), token.SEMICOLON)
-				break
-			}
-		} else {
-			p.linebreak(p.lineFor(s.Stmt.Pos()), 1, ignore, true)
-		}
-		p.stmt(s.Stmt, nextIsRBrace)
-
-	case *ast.ExprStmt:
-		const depth = 1
-		p.expr0(s.X, depth)
-
-	case *ast.SendStmt:
-		const depth = 1
-		p.expr0(s.Chan, depth)
-		p.print(blank, s.Arrow, token.ARROW, blank)
-		p.expr0(s.Value, depth)
-
-	case *ast.IncDecStmt:
-		const depth = 1
-		p.expr0(s.X, depth+1)
-		p.print(s.TokPos, s.Tok)
-
-	case *ast.AssignStmt:
-		var depth = 1
-		if len(s.Lhs) > 1 && len(s.Rhs) > 1 {
-			depth++
-		}
-		p.exprList(s.Pos(), s.Lhs, depth, 0, s.TokPos, false)
-		p.print(blank, s.TokPos, s.Tok, blank)
-		p.exprList(s.TokPos, s.Rhs, depth, 0, token.NoPos, false)
-
-	case *ast.GoStmt:
-		p.print(token.GO, blank)
-		p.expr(s.Call)
-
-	case *ast.DeferStmt:
-		p.print(token.DEFER, blank)
-		p.expr(s.Call)
-
-	case *ast.ReturnStmt:
-		p.print(token.RETURN)
-		if s.Results != nil {
-			p.print(blank)
-			// Use indentList heuristic to make corner cases look
-			// better (issue 1207). A more systematic approach would
-			// always indent, but this would cause significant
-			// reformatting of the code base and not necessarily
-			// lead to more nicely formatted code in general.
-			if p.indentList(s.Results) {
-				p.print(indent)
-				// Use NoPos so that a newline never goes before
-				// the results (see issue #32854).
-				p.exprList(token.NoPos, s.Results, 1, noIndent, token.NoPos, false)
-				p.print(unindent)
-			} else {
-				p.exprList(token.NoPos, s.Results, 1, 0, token.NoPos, false)
-			}
-		}
-
-	case *ast.BranchStmt:
-		p.print(s.Tok)
-		if s.Label != nil {
-			p.print(blank)
-			p.expr(s.Label)
-		}
-
-	case *ast.BlockStmt:
-		p.block(s, 1)
-
-	case *ast.IfStmt:
-		p.print(token.IF)
-		p.controlClause(false, s.Init, s.Cond, nil)
-		p.block(s.Body, 1)
-		if s.Else != nil {
-			p.print(blank, token.ELSE, blank)
-			switch s.Else.(type) {
-			case *ast.BlockStmt, *ast.IfStmt:
-				p.stmt(s.Else, nextIsRBrace)
-			default:
-				// This can only happen with an incorrectly
-				// constructed AST. Permit it but print so
-				// that it can be parsed without errors.
-				p.print(token.LBRACE, indent, formfeed)
-				p.stmt(s.Else, true)
-				p.print(unindent, formfeed, token.RBRACE)
-			}
-		}
-
-	case *ast.CaseClause:
-		if s.List != nil {
-			p.print(token.CASE, blank)
-			p.exprList(s.Pos(), s.List, 1, 0, s.Colon, false)
-		} else {
-			p.print(token.DEFAULT)
-		}
-		p.print(s.Colon, token.COLON)
-		p.stmtList(s.Body, 1, nextIsRBrace)
-
-	case *ast.SwitchStmt:
-		p.print(token.SWITCH)
-		p.controlClause(false, s.Init, s.Tag, nil)
-		p.block(s.Body, 0)
-
-	case *ast.TypeSwitchStmt:
-		p.print(token.SWITCH)
-		if s.Init != nil {
-			p.print(blank)
-			p.stmt(s.Init, false)
-			p.print(token.SEMICOLON)
-		}
-		p.print(blank)
-		p.stmt(s.Assign, false)
-		p.print(blank)
-		p.block(s.Body, 0)
-
-	case *ast.CommClause:
-		if s.Comm != nil {
-			p.print(token.CASE, blank)
-			p.stmt(s.Comm, false)
-		} else {
-			p.print(token.DEFAULT)
-		}
-		p.print(s.Colon, token.COLON)
-		p.stmtList(s.Body, 1, nextIsRBrace)
-
-	case *ast.SelectStmt:
-		p.print(token.SELECT, blank)
-		body := s.Body
-		if len(body.List) == 0 && !p.commentBefore(p.posFor(body.Rbrace)) {
-			// print empty select statement w/o comments on one line
-			p.print(body.Lbrace, token.LBRACE, body.Rbrace, token.RBRACE)
-		} else {
-			p.block(body, 0)
-		}
-
-	case *ast.ForStmt:
-		p.print(token.FOR)
-		p.controlClause(true, s.Init, s.Cond, s.Post)
-		p.block(s.Body, 1)
-
-	case *ast.RangeStmt:
-		p.print(token.FOR, blank)
-		if s.Key != nil {
-			p.expr(s.Key)
-			if s.Value != nil {
-				// use position of value following the comma as
-				// comma position for correct comment placement
-				p.print(s.Value.Pos(), token.COMMA, blank)
-				p.expr(s.Value)
-			}
-			p.print(blank, s.TokPos, s.Tok, blank)
-		}
-		p.print(token.RANGE, blank)
-		p.expr(stripParens(s.X))
-		p.print(blank)
-		p.block(s.Body, 1)
-
-	default:
-		panic("unreachable")
-	}
-}
-
-// ----------------------------------------------------------------------------
-// Declarations
-
-// The keepTypeColumn function determines if the type column of a series of
-// consecutive const or var declarations must be kept, or if initialization
-// values (V) can be placed in the type column (T) instead. The i'th entry
-// in the result slice is true if the type column in spec[i] must be kept.
-//
-// For example, the declaration:
-//
-//		const (
-//			foobar int = 42 // comment
-//			x          = 7  // comment
-//			foo
-//	             bar = 991
-//		)
-//
-// leads to the type/values matrix below. A run of value columns (V) can
-// be moved into the type column if there is no type for any of the values
-// in that column (we only move entire columns so that they align properly).
-//
-//		matrix        formatted     result
-//	                   matrix
-//		T  V    ->    T  V     ->   true      there is a T and so the type
-//		-  V          -  V          true      column must be kept
-//		-  -          -  -          false
-//		-  V          V  -          false     V is moved into T column
-func keepTypeColumn(specs []ast.Spec) []bool {
-	m := make([]bool, len(specs))
-
-	populate := func(i, j int, keepType bool) {
-		if keepType {
-			for ; i < j; i++ {
-				m[i] = true
-			}
-		}
-	}
-
-	i0 := -1 // if i0 >= 0 we are in a run and i0 is the start of the run
-	var keepType bool
-	for i, s := range specs {
-		t := s.(*ast.ValueSpec)
-		if t.Values != nil {
-			if i0 < 0 {
-				// start of a run of ValueSpecs with non-nil Values
-				i0 = i
-				keepType = false
-			}
-		} else {
-			if i0 >= 0 {
-				// end of a run
-				populate(i0, i, keepType)
-				i0 = -1
-			}
-		}
-		if t.Type != nil {
-			keepType = true
-		}
-	}
-	if i0 >= 0 {
-		// end of a run
-		populate(i0, len(specs), keepType)
-	}
-
-	return m
-}
-
-func (p *printer) valueSpec(s *ast.ValueSpec, keepType bool) {
-	p.setComment(s.Doc)
-	p.identList(s.Names, false) // always present
-	extraTabs := 3
-	if s.Type != nil || keepType {
-		p.print(vtab)
-		extraTabs--
-	}
-	if s.Type != nil {
-		p.expr(s.Type)
-	}
-	if s.Values != nil {
-		p.print(vtab, token.ASSIGN, blank)
-		p.exprList(token.NoPos, s.Values, 1, 0, token.NoPos, false)
-		extraTabs--
-	}
-	if s.Comment != nil {
-		for ; extraTabs > 0; extraTabs-- {
-			p.print(vtab)
-		}
-		p.setComment(s.Comment)
-	}
-}
-
-func sanitizeImportPath(lit *ast.BasicLit) *ast.BasicLit {
-	// Note: An unmodified AST generated by go/parser will already
-	// contain a backward- or double-quoted path string that does
-	// not contain any invalid characters, and most of the work
-	// here is not needed. However, a modified or generated AST
-	// may possibly contain non-canonical paths. Do the work in
-	// all cases since it's not too hard and not speed-critical.
-
-	// if we don't have a proper string, be conservative and return whatever we have
-	if lit.Kind != token.STRING {
-		return lit
-	}
-	s, err := strconv.Unquote(lit.Value)
-	if err != nil {
-		return lit
-	}
-
-	// if the string is an invalid path, return whatever we have
-	//
-	// spec: "Implementation restriction: A compiler may restrict
-	// ImportPaths to non-empty strings using only characters belonging
-	// to Unicode's L, M, N, P, and S general categories (the Graphic
-	// characters without spaces) and may also exclude the characters
-	// !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character
-	// U+FFFD."
-	if s == "" {
-		return lit
-	}
-	const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
-	for _, r := range s {
-		if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
-			return lit
-		}
-	}
-
-	// otherwise, return the double-quoted path
-	s = strconv.Quote(s)
-	if s == lit.Value {
-		return lit // nothing wrong with lit
-	}
-	return &ast.BasicLit{ValuePos: lit.ValuePos, Kind: token.STRING, Value: s}
-}
-
-// The parameter n is the number of specs in the group. If doIndent is set,
-// multi-line identifier lists in the spec are indented when the first
-// linebreak is encountered.
-func (p *printer) spec(spec ast.Spec, n int, doIndent bool) {
-	switch s := spec.(type) {
-	case *ast.ImportSpec:
-		p.setComment(s.Doc)
-		if s.Name != nil {
-			p.expr(s.Name)
-			p.print(blank)
-		}
-		p.expr(sanitizeImportPath(s.Path))
-		p.setComment(s.Comment)
-		p.print(s.EndPos)
-
-	case *ast.ValueSpec:
-		if n != 1 {
-			p.internalError("expected n = 1; got", n)
-		}
-		p.setComment(s.Doc)
-		p.identList(s.Names, doIndent) // always present
-		if s.Type != nil {
-			p.print(blank)
-			p.expr(s.Type)
-		}
-		if s.Values != nil {
-			p.print(blank, token.ASSIGN, blank)
-			p.exprList(token.NoPos, s.Values, 1, 0, token.NoPos, false)
-		}
-		p.setComment(s.Comment)
-
-	case *ast.TypeSpec:
-		p.setComment(s.Doc)
-		p.expr(s.Name)
-		if s.TypeParams != nil {
-			p.parameters(s.TypeParams, typeTParam)
-		}
-		if n == 1 {
-			p.print(blank)
-		} else {
-			p.print(vtab)
-		}
-		if s.Assign.IsValid() {
-			p.print(token.ASSIGN, blank)
-		}
-		p.expr(s.Type)
-		p.setComment(s.Comment)
-
-	default:
-		panic("unreachable")
-	}
-}
-
-func (p *printer) genDecl(d *ast.GenDecl) {
-	p.setComment(d.Doc)
-	p.print(d.Pos(), d.Tok, blank)
-
-	if d.Lparen.IsValid() || len(d.Specs) > 1 {
-		// group of parenthesized declarations
-		p.print(d.Lparen, token.LPAREN)
-		if n := len(d.Specs); n > 0 {
-			p.print(indent, formfeed)
-			if n > 1 && (d.Tok == token.CONST || d.Tok == token.VAR) {
-				// two or more grouped const/var declarations:
-				// determine if the type column must be kept
-				keepType := keepTypeColumn(d.Specs)
-				var line int
-				for i, s := range d.Specs {
-					if i > 0 {
-						p.linebreak(p.lineFor(s.Pos()), 1, ignore, p.linesFrom(line) > 0)
-					}
-					p.recordLine(&line)
-					p.valueSpec(s.(*ast.ValueSpec), keepType[i])
-				}
-			} else {
-				var line int
-				for i, s := range d.Specs {
-					if i > 0 {
-						p.linebreak(p.lineFor(s.Pos()), 1, ignore, p.linesFrom(line) > 0)
-					}
-					p.recordLine(&line)
-					p.spec(s, n, false)
-				}
-			}
-			p.print(unindent, formfeed)
-		}
-		p.print(d.Rparen, token.RPAREN)
-
-	} else if len(d.Specs) > 0 {
-		// single declaration
-		p.spec(d.Specs[0], 1, true)
-	}
-}
-
-// nodeSize determines the size of n in chars after formatting.
-// The result is <= maxSize if the node fits on one line with at
-// most maxSize chars and the formatted output doesn't contain
-// any control chars. Otherwise, the result is > maxSize.
-func (p *printer) nodeSize(n ast.Node, maxSize int) (size int) {
-	// nodeSize invokes the printer, which may invoke nodeSize
-	// recursively. For deep composite literal nests, this can
-	// lead to an exponential algorithm. Remember previous
-	// results to prune the recursion (was issue 1628).
-	if size, found := p.nodeSizes[n]; found {
-		return size
-	}
-
-	size = maxSize + 1 // assume n doesn't fit
-	p.nodeSizes[n] = size
-
-	// nodeSize computation must be independent of particular
-	// style so that we always get the same decision; print
-	// in RawFormat
-	cfg := Config{Mode: RawFormat}
-	var buf bytes.Buffer
-	if err := cfg.fprint(&buf, p.fset, n, p.nodeSizes); err != nil {
-		return
-	}
-	if buf.Len() <= maxSize {
-		for _, ch := range buf.Bytes() {
-			switch ch {
-			case '\n', '\f':
-				return // does not fit in a single line
-			}
-		}
-		size = buf.Len() // n fits
-		p.nodeSizes[n] = size
-	}
-	return
-}
-
-// numLines returns the number of lines spanned by node n in the original source.
-func (p *printer) numLines(n ast.Node) int {
-	if from := n.Pos(); from.IsValid() {
-		if to := n.End(); to.IsValid() {
-			return p.lineFor(to) - p.lineFor(from) + 1
-		}
-	}
-	return infinity
-}
-
-// bodySize is like nodeSize but it is specialized for *ast.BlockStmt's.
-func (p *printer) bodySize(b *ast.BlockStmt, maxSize int) int {
-	pos1 := b.Pos()
-	pos2 := b.Rbrace
-	if pos1.IsValid() && pos2.IsValid() && p.lineFor(pos1) != p.lineFor(pos2) {
-		// opening and closing brace are on different lines - don't make it a one-liner
-		return maxSize + 1
-	}
-	if len(b.List) > 5 {
-		// too many statements - don't make it a one-liner
-		return maxSize + 1
-	}
-	// otherwise, estimate body size
-	bodySize := p.commentSizeBefore(p.posFor(pos2))
-	for i, s := range b.List {
-		if bodySize > maxSize {
-			break // no need to continue
-		}
-		if i > 0 {
-			bodySize += 2 // space for a semicolon and blank
-		}
-		bodySize += p.nodeSize(s, maxSize)
-	}
-	return bodySize
-}
-
-// funcBody prints a function body following a function header of given headerSize.
-// If the header's and block's size are "small enough" and the block is "simple enough",
-// the block is printed on the current line, without line breaks, spaced from the header
-// by sep. Otherwise the block's opening "{" is printed on the current line, followed by
-// lines for the block's statements and its closing "}".
-func (p *printer) funcBody(headerSize int, sep whiteSpace, b *ast.BlockStmt) {
-	if b == nil {
-		return
-	}
-
-	// save/restore composite literal nesting level
-	defer func(level int) {
-		p.level = level
-	}(p.level)
-	p.level = 0
-
-	const maxSize = 100
-	if headerSize+p.bodySize(b, maxSize) <= maxSize {
-		p.print(sep, b.Lbrace, token.LBRACE)
-		if len(b.List) > 0 {
-			p.print(blank)
-			for i, s := range b.List {
-				if i > 0 {
-					p.print(token.SEMICOLON, blank)
-				}
-				p.stmt(s, i == len(b.List)-1)
-			}
-			p.print(blank)
-		}
-		p.print(noExtraLinebreak, b.Rbrace, token.RBRACE, noExtraLinebreak)
-		return
-	}
-
-	if sep != ignore {
-		p.print(blank) // always use blank
-	}
-	p.block(b, 1)
-}
-
-// distanceFrom returns the column difference between p.out (the current output
-// position) and startOutCol. If the start position is on a different line from
-// the current position (or either is unknown), the result is infinity.
-func (p *printer) distanceFrom(startPos token.Pos, startOutCol int) int {
-	if startPos.IsValid() && p.pos.IsValid() && p.posFor(startPos).Line == p.pos.Line {
-		return p.out.Column - startOutCol
-	}
-	return infinity
-}
-
-func (p *printer) funcDecl(d *ast.FuncDecl) {
-	p.setComment(d.Doc)
-	p.print(d.Pos(), token.FUNC, blank)
-	// We have to save startCol only after emitting FUNC; otherwise it can be on a
-	// different line (all whitespace preceding the FUNC is emitted only when the
-	// FUNC is emitted).
-	startCol := p.out.Column - len("func ")
-	if d.Recv != nil {
-		p.parameters(d.Recv, funcParam) // method: print receiver
-		p.print(blank)
-	}
-	p.expr(d.Name)
-	p.signature(d.Type)
-	p.funcBody(p.distanceFrom(d.Pos(), startCol), vtab, d.Body)
-}
-
-func (p *printer) decl(decl ast.Decl) {
-	switch d := decl.(type) {
-	case *ast.BadDecl:
-		p.print(d.Pos(), "BadDecl")
-	case *ast.GenDecl:
-		p.genDecl(d)
-	case *ast.FuncDecl:
-		p.funcDecl(d)
-	default:
-		panic("unreachable")
-	}
-}
-
-// ----------------------------------------------------------------------------
-// Files
-
-func declToken(decl ast.Decl) (tok token.Token) {
-	tok = token.ILLEGAL
-	switch d := decl.(type) {
-	case *ast.GenDecl:
-		tok = d.Tok
-	case *ast.FuncDecl:
-		tok = token.FUNC
-	}
-	return
-}
-
-func (p *printer) declList(list []ast.Decl) {
-	tok := token.ILLEGAL
-	for _, d := range list {
-		prev := tok
-		tok = declToken(d)
-		// If the declaration token changed (e.g., from CONST to TYPE)
-		// or the next declaration has documentation associated with it,
-		// print an empty line between top-level declarations.
-		// (because p.linebreak is called with the position of d, which
-		// is past any documentation, the minimum requirement is satisfied
-		// even w/o the extra getDoc(d) nil-check - leave it in case the
-		// linebreak logic improves - there's already a TODO).
-		if len(p.output) > 0 {
-			// only print line break if we are not at the beginning of the output
-			// (i.e., we are not printing only a partial program)
-			min := 1
-			if prev != tok || getDoc(d) != nil {
-				min = 2
-			}
-			// start a new section if the next declaration is a function
-			// that spans multiple lines (see also issue #19544)
-			p.linebreak(p.lineFor(d.Pos()), min, ignore, tok == token.FUNC && p.numLines(d) > 1)
-		}
-		p.decl(d)
-	}
-}
-
-func (p *printer) file(src *ast.File) {
-	p.setComment(src.Doc)
-	p.print(src.Pos(), token.PACKAGE, blank)
-	p.expr(src.Name)
-	p.declList(src.Decls)
-	p.print(newline)
-}
diff --git a/internal/backport/go/printer/performance_test.go b/internal/backport/go/printer/performance_test.go
deleted file mode 100644
index bc8b17b..0000000
--- a/internal/backport/go/printer/performance_test.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements a simple printer performance benchmark:
-// go test -bench=BenchmarkPrint
-
-package printer
-
-import (
-	"bytes"
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/parser"
-	"io"
-	"log"
-	"os"
-	"testing"
-)
-
-var (
-	testfile *ast.File
-	testsize int64
-)
-
-func testprint(out io.Writer, file *ast.File) {
-	if err := (&Config{TabIndent | UseSpaces | normalizeNumbers, 8, 0}).Fprint(out, fset, file); err != nil {
-		log.Fatalf("print error: %s", err)
-	}
-}
-
-// cannot initialize in init because (printer) Fprint launches goroutines.
-func initialize() {
-	const filename = "testdata/parser.go"
-
-	src, err := os.ReadFile(filename)
-	if err != nil {
-		log.Fatalf("%s", err)
-	}
-
-	file, err := parser.ParseFile(fset, filename, src, parser.ParseComments)
-	if err != nil {
-		log.Fatalf("%s", err)
-	}
-
-	var buf bytes.Buffer
-	testprint(&buf, file)
-	if !bytes.Equal(buf.Bytes(), src) {
-		log.Fatalf("print error: %s not idempotent", filename)
-	}
-
-	testfile = file
-	testsize = int64(len(src))
-}
-
-func BenchmarkPrint(b *testing.B) {
-	if testfile == nil {
-		initialize()
-	}
-	b.ReportAllocs()
-	b.SetBytes(testsize)
-	for i := 0; i < b.N; i++ {
-		testprint(io.Discard, testfile)
-	}
-}
diff --git a/internal/backport/go/printer/printer.go b/internal/backport/go/printer/printer.go
deleted file mode 100644
index 0ab4b96..0000000
--- a/internal/backport/go/printer/printer.go
+++ /dev/null
@@ -1,1409 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package printer implements printing of AST nodes.
-package printer
-
-import (
-	"fmt"
-	"go/build/constraint"
-	"io"
-	"os"
-	"strings"
-	"text/tabwriter"
-	"unicode"
-
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/token"
-)
-
-const (
-	maxNewlines = 2     // max. number of newlines between source text
-	debug       = false // enable for debugging
-	infinity    = 1 << 30
-)
-
-type whiteSpace byte
-
-const (
-	ignore   = whiteSpace(0)
-	blank    = whiteSpace(' ')
-	vtab     = whiteSpace('\v')
-	newline  = whiteSpace('\n')
-	formfeed = whiteSpace('\f')
-	indent   = whiteSpace('>')
-	unindent = whiteSpace('<')
-)
-
-// A pmode value represents the current printer mode.
-type pmode int
-
-const (
-	noExtraBlank     pmode = 1 << iota // disables extra blank after /*-style comment
-	noExtraLinebreak                   // disables extra line break after /*-style comment
-)
-
-type commentInfo struct {
-	cindex         int               // current comment index
-	comment        *ast.CommentGroup // = printer.comments[cindex]; or nil
-	commentOffset  int               // = printer.posFor(printer.comments[cindex].List[0].Pos()).Offset; or infinity
-	commentNewline bool              // true if the comment group contains newlines
-}
-
-type printer struct {
-	// Configuration (does not change after initialization)
-	Config
-	fset *token.FileSet
-
-	// Current state
-	output       []byte       // raw printer result
-	indent       int          // current indentation
-	level        int          // level == 0: outside composite literal; level > 0: inside composite literal
-	mode         pmode        // current printer mode
-	endAlignment bool         // if set, terminate alignment immediately
-	impliedSemi  bool         // if set, a linebreak implies a semicolon
-	lastTok      token.Token  // last token printed (token.ILLEGAL if it's whitespace)
-	prevOpen     token.Token  // previous non-brace "open" token (, [, or token.ILLEGAL
-	wsbuf        []whiteSpace // delayed white space
-	goBuild      []int        // start index of all //go:build comments in output
-	plusBuild    []int        // start index of all // +build comments in output
-
-	// Positions
-	// The out position differs from the pos position when the result
-	// formatting differs from the source formatting (in the amount of
-	// white space). If there's a difference and SourcePos is set in
-	// ConfigMode, //line directives are used in the output to restore
-	// original source positions for a reader.
-	pos     token.Position // current position in AST (source) space
-	out     token.Position // current position in output space
-	last    token.Position // value of pos after calling writeString
-	linePtr *int           // if set, record out.Line for the next token in *linePtr
-
-	// The list of all source comments, in order of appearance.
-	comments        []*ast.CommentGroup // may be nil
-	useNodeComments bool                // if not set, ignore lead and line comments of nodes
-
-	// Information about p.comments[p.cindex]; set up by nextComment.
-	commentInfo
-
-	// Cache of already computed node sizes.
-	nodeSizes map[ast.Node]int
-
-	// Cache of most recently computed line position.
-	cachedPos  token.Pos
-	cachedLine int // line corresponding to cachedPos
-}
-
-func (p *printer) init(cfg *Config, fset *token.FileSet, nodeSizes map[ast.Node]int) {
-	p.Config = *cfg
-	p.fset = fset
-	p.pos = token.Position{Line: 1, Column: 1}
-	p.out = token.Position{Line: 1, Column: 1}
-	p.wsbuf = make([]whiteSpace, 0, 16) // whitespace sequences are short
-	p.nodeSizes = nodeSizes
-	p.cachedPos = -1
-}
-
-func (p *printer) internalError(msg ...interface{}) {
-	if debug {
-		fmt.Print(p.pos.String() + ": ")
-		fmt.Println(msg...)
-		panic("golang.org/x/website/internal/backport/go/printer")
-	}
-}
-
-// commentsHaveNewline reports whether a list of comments belonging to
-// an *ast.CommentGroup contains newlines. Because the position information
-// may only be partially correct, we also have to read the comment text.
-func (p *printer) commentsHaveNewline(list []*ast.Comment) bool {
-	// len(list) > 0
-	line := p.lineFor(list[0].Pos())
-	for i, c := range list {
-		if i > 0 && p.lineFor(list[i].Pos()) != line {
-			// not all comments on the same line
-			return true
-		}
-		if t := c.Text; len(t) >= 2 && (t[1] == '/' || strings.Contains(t, "\n")) {
-			return true
-		}
-	}
-	_ = line
-	return false
-}
-
-func (p *printer) nextComment() {
-	for p.cindex < len(p.comments) {
-		c := p.comments[p.cindex]
-		p.cindex++
-		if list := c.List; len(list) > 0 {
-			p.comment = c
-			p.commentOffset = p.posFor(list[0].Pos()).Offset
-			p.commentNewline = p.commentsHaveNewline(list)
-			return
-		}
-		// we should not reach here (correct ASTs don't have empty
-		// ast.CommentGroup nodes), but be conservative and try again
-	}
-	// no more comments
-	p.commentOffset = infinity
-}
-
-// commentBefore reports whether the current comment group occurs
-// before the next position in the source code and printing it does
-// not introduce implicit semicolons.
-func (p *printer) commentBefore(next token.Position) bool {
-	return p.commentOffset < next.Offset && (!p.impliedSemi || !p.commentNewline)
-}
-
-// commentSizeBefore returns the estimated size of the
-// comments on the same line before the next position.
-func (p *printer) commentSizeBefore(next token.Position) int {
-	// save/restore current p.commentInfo (p.nextComment() modifies it)
-	defer func(info commentInfo) {
-		p.commentInfo = info
-	}(p.commentInfo)
-
-	size := 0
-	for p.commentBefore(next) {
-		for _, c := range p.comment.List {
-			size += len(c.Text)
-		}
-		p.nextComment()
-	}
-	return size
-}
-
-// recordLine records the output line number for the next non-whitespace
-// token in *linePtr. It is used to compute an accurate line number for a
-// formatted construct, independent of pending (not yet emitted) whitespace
-// or comments.
-func (p *printer) recordLine(linePtr *int) {
-	p.linePtr = linePtr
-}
-
-// linesFrom returns the number of output lines between the current
-// output line and the line argument, ignoring any pending (not yet
-// emitted) whitespace or comments. It is used to compute an accurate
-// size (in number of lines) for a formatted construct.
-func (p *printer) linesFrom(line int) int {
-	return p.out.Line - line
-}
-
-func (p *printer) posFor(pos token.Pos) token.Position {
-	// not used frequently enough to cache entire token.Position
-	return p.fset.PositionFor(pos, false /* absolute position */)
-}
-
-func (p *printer) lineFor(pos token.Pos) int {
-	if pos != p.cachedPos {
-		p.cachedPos = pos
-		p.cachedLine = p.fset.PositionFor(pos, false /* absolute position */).Line
-	}
-	return p.cachedLine
-}
-
-// writeLineDirective writes a //line directive if necessary.
-func (p *printer) writeLineDirective(pos token.Position) {
-	if pos.IsValid() && (p.out.Line != pos.Line || p.out.Filename != pos.Filename) {
-		p.output = append(p.output, tabwriter.Escape) // protect '\n' in //line from tabwriter interpretation
-		p.output = append(p.output, fmt.Sprintf("//line %s:%d\n", pos.Filename, pos.Line)...)
-		p.output = append(p.output, tabwriter.Escape)
-		// p.out must match the //line directive
-		p.out.Filename = pos.Filename
-		p.out.Line = pos.Line
-	}
-}
-
-// writeIndent writes indentation.
-func (p *printer) writeIndent() {
-	// use "hard" htabs - indentation columns
-	// must not be discarded by the tabwriter
-	n := p.Config.Indent + p.indent // include base indentation
-	for i := 0; i < n; i++ {
-		p.output = append(p.output, '\t')
-	}
-
-	// update positions
-	p.pos.Offset += n
-	p.pos.Column += n
-	p.out.Column += n
-}
-
-// writeByte writes ch n times to p.output and updates p.pos.
-// Only used to write formatting (white space) characters.
-func (p *printer) writeByte(ch byte, n int) {
-	if p.endAlignment {
-		// Ignore any alignment control character;
-		// and at the end of the line, break with
-		// a formfeed to indicate termination of
-		// existing columns.
-		switch ch {
-		case '\t', '\v':
-			ch = ' '
-		case '\n', '\f':
-			ch = '\f'
-			p.endAlignment = false
-		}
-	}
-
-	if p.out.Column == 1 {
-		// no need to write line directives before white space
-		p.writeIndent()
-	}
-
-	for i := 0; i < n; i++ {
-		p.output = append(p.output, ch)
-	}
-
-	// update positions
-	p.pos.Offset += n
-	if ch == '\n' || ch == '\f' {
-		p.pos.Line += n
-		p.out.Line += n
-		p.pos.Column = 1
-		p.out.Column = 1
-		return
-	}
-	p.pos.Column += n
-	p.out.Column += n
-}
-
-// writeString writes the string s to p.output and updates p.pos, p.out,
-// and p.last. If isLit is set, s is escaped w/ tabwriter.Escape characters
-// to protect s from being interpreted by the tabwriter.
-//
-// Note: writeString is only used to write Go tokens, literals, and
-// comments, all of which must be written literally. Thus, it is correct
-// to always set isLit = true. However, setting it explicitly only when
-// needed (i.e., when we don't know that s contains no tabs or line breaks)
-// avoids processing extra escape characters and reduces run time of the
-// printer benchmark by up to 10%.
-func (p *printer) writeString(pos token.Position, s string, isLit bool) {
-	if p.out.Column == 1 {
-		if p.Config.Mode&SourcePos != 0 {
-			p.writeLineDirective(pos)
-		}
-		p.writeIndent()
-	}
-
-	if pos.IsValid() {
-		// update p.pos (if pos is invalid, continue with existing p.pos)
-		// Note: Must do this after handling line beginnings because
-		// writeIndent updates p.pos if there's indentation, but p.pos
-		// is the position of s.
-		p.pos = pos
-	}
-
-	if isLit {
-		// Protect s such that is passes through the tabwriter
-		// unchanged. Note that valid Go programs cannot contain
-		// tabwriter.Escape bytes since they do not appear in legal
-		// UTF-8 sequences.
-		p.output = append(p.output, tabwriter.Escape)
-	}
-
-	if debug {
-		p.output = append(p.output, fmt.Sprintf("/*%s*/", pos)...) // do not update p.pos!
-	}
-	p.output = append(p.output, s...)
-
-	// update positions
-	nlines := 0
-	var li int // index of last newline; valid if nlines > 0
-	for i := 0; i < len(s); i++ {
-		// Raw string literals may contain any character except back quote (`).
-		if ch := s[i]; ch == '\n' || ch == '\f' {
-			// account for line break
-			nlines++
-			li = i
-			// A line break inside a literal will break whatever column
-			// formatting is in place; ignore any further alignment through
-			// the end of the line.
-			p.endAlignment = true
-		}
-	}
-	p.pos.Offset += len(s)
-	if nlines > 0 {
-		p.pos.Line += nlines
-		p.out.Line += nlines
-		c := len(s) - li
-		p.pos.Column = c
-		p.out.Column = c
-	} else {
-		p.pos.Column += len(s)
-		p.out.Column += len(s)
-	}
-
-	if isLit {
-		p.output = append(p.output, tabwriter.Escape)
-	}
-
-	p.last = p.pos
-}
-
-// writeCommentPrefix writes the whitespace before a comment.
-// If there is any pending whitespace, it consumes as much of
-// it as is likely to help position the comment nicely.
-// pos is the comment position, next the position of the item
-// after all pending comments, prev is the previous comment in
-// a group of comments (or nil), and tok is the next token.
-func (p *printer) writeCommentPrefix(pos, next token.Position, prev *ast.Comment, tok token.Token) {
-	if len(p.output) == 0 {
-		// the comment is the first item to be printed - don't write any whitespace
-		return
-	}
-
-	if pos.IsValid() && pos.Filename != p.last.Filename {
-		// comment in a different file - separate with newlines
-		p.writeByte('\f', maxNewlines)
-		return
-	}
-
-	if pos.Line == p.last.Line && (prev == nil || prev.Text[1] != '/') {
-		// comment on the same line as last item:
-		// separate with at least one separator
-		hasSep := false
-		if prev == nil {
-			// first comment of a comment group
-			j := 0
-			for i, ch := range p.wsbuf {
-				switch ch {
-				case blank:
-					// ignore any blanks before a comment
-					p.wsbuf[i] = ignore
-					continue
-				case vtab:
-					// respect existing tabs - important
-					// for proper formatting of commented structs
-					hasSep = true
-					continue
-				case indent:
-					// apply pending indentation
-					continue
-				}
-				j = i
-				break
-			}
-			p.writeWhitespace(j)
-		}
-		// make sure there is at least one separator
-		if !hasSep {
-			sep := byte('\t')
-			if pos.Line == next.Line {
-				// next item is on the same line as the comment
-				// (which must be a /*-style comment): separate
-				// with a blank instead of a tab
-				sep = ' '
-			}
-			p.writeByte(sep, 1)
-		}
-
-	} else {
-		// comment on a different line:
-		// separate with at least one line break
-		droppedLinebreak := false
-		j := 0
-		for i, ch := range p.wsbuf {
-			switch ch {
-			case blank, vtab:
-				// ignore any horizontal whitespace before line breaks
-				p.wsbuf[i] = ignore
-				continue
-			case indent:
-				// apply pending indentation
-				continue
-			case unindent:
-				// if this is not the last unindent, apply it
-				// as it is (likely) belonging to the last
-				// construct (e.g., a multi-line expression list)
-				// and is not part of closing a block
-				if i+1 < len(p.wsbuf) && p.wsbuf[i+1] == unindent {
-					continue
-				}
-				// if the next token is not a closing }, apply the unindent
-				// if it appears that the comment is aligned with the
-				// token; otherwise assume the unindent is part of a
-				// closing block and stop (this scenario appears with
-				// comments before a case label where the comments
-				// apply to the next case instead of the current one)
-				if tok != token.RBRACE && pos.Column == next.Column {
-					continue
-				}
-			case newline, formfeed:
-				p.wsbuf[i] = ignore
-				droppedLinebreak = prev == nil // record only if first comment of a group
-			}
-			j = i
-			break
-		}
-		p.writeWhitespace(j)
-
-		// determine number of linebreaks before the comment
-		n := 0
-		if pos.IsValid() && p.last.IsValid() {
-			n = pos.Line - p.last.Line
-			if n < 0 { // should never happen
-				n = 0
-			}
-		}
-
-		// at the package scope level only (p.indent == 0),
-		// add an extra newline if we dropped one before:
-		// this preserves a blank line before documentation
-		// comments at the package scope level (issue 2570)
-		if p.indent == 0 && droppedLinebreak {
-			n++
-		}
-
-		// make sure there is at least one line break
-		// if the previous comment was a line comment
-		if n == 0 && prev != nil && prev.Text[1] == '/' {
-			n = 1
-		}
-
-		if n > 0 {
-			// use formfeeds to break columns before a comment;
-			// this is analogous to using formfeeds to separate
-			// individual lines of /*-style comments
-			p.writeByte('\f', nlimit(n))
-		}
-	}
-}
-
-// Returns true if s contains only white space
-// (only tabs and blanks can appear in the printer's context).
-func isBlank(s string) bool {
-	for i := 0; i < len(s); i++ {
-		if s[i] > ' ' {
-			return false
-		}
-	}
-	return true
-}
-
-// commonPrefix returns the common prefix of a and b.
-func commonPrefix(a, b string) string {
-	i := 0
-	for i < len(a) && i < len(b) && a[i] == b[i] && (a[i] <= ' ' || a[i] == '*') {
-		i++
-	}
-	return a[0:i]
-}
-
-// trimRight returns s with trailing whitespace removed.
-func trimRight(s string) string {
-	return strings.TrimRightFunc(s, unicode.IsSpace)
-}
-
-// stripCommonPrefix removes a common prefix from /*-style comment lines (unless no
-// comment line is indented, all but the first line have some form of space prefix).
-// The prefix is computed using heuristics such that is likely that the comment
-// contents are nicely laid out after re-printing each line using the printer's
-// current indentation.
-func stripCommonPrefix(lines []string) {
-	if len(lines) <= 1 {
-		return // at most one line - nothing to do
-	}
-	// len(lines) > 1
-
-	// The heuristic in this function tries to handle a few
-	// common patterns of /*-style comments: Comments where
-	// the opening /* and closing */ are aligned and the
-	// rest of the comment text is aligned and indented with
-	// blanks or tabs, cases with a vertical "line of stars"
-	// on the left, and cases where the closing */ is on the
-	// same line as the last comment text.
-
-	// Compute maximum common white prefix of all but the first,
-	// last, and blank lines, and replace blank lines with empty
-	// lines (the first line starts with /* and has no prefix).
-	// In cases where only the first and last lines are not blank,
-	// such as two-line comments, or comments where all inner lines
-	// are blank, consider the last line for the prefix computation
-	// since otherwise the prefix would be empty.
-	//
-	// Note that the first and last line are never empty (they
-	// contain the opening /* and closing */ respectively) and
-	// thus they can be ignored by the blank line check.
-	prefix := ""
-	prefixSet := false
-	if len(lines) > 2 {
-		for i, line := range lines[1 : len(lines)-1] {
-			if isBlank(line) {
-				lines[1+i] = "" // range starts with lines[1]
-			} else {
-				if !prefixSet {
-					prefix = line
-					prefixSet = true
-				}
-				prefix = commonPrefix(prefix, line)
-			}
-
-		}
-	}
-	// If we don't have a prefix yet, consider the last line.
-	if !prefixSet {
-		line := lines[len(lines)-1]
-		prefix = commonPrefix(line, line)
-	}
-
-	/*
-	 * Check for vertical "line of stars" and correct prefix accordingly.
-	 */
-	lineOfStars := false
-	if p, _, ok := stringsCut(prefix, "*"); ok {
-		// remove trailing blank from prefix so stars remain aligned
-		prefix = strings.TrimSuffix(p, " ")
-		lineOfStars = true
-	} else {
-		// No line of stars present.
-		// Determine the white space on the first line after the /*
-		// and before the beginning of the comment text, assume two
-		// blanks instead of the /* unless the first character after
-		// the /* is a tab. If the first comment line is empty but
-		// for the opening /*, assume up to 3 blanks or a tab. This
-		// whitespace may be found as suffix in the common prefix.
-		first := lines[0]
-		if isBlank(first[2:]) {
-			// no comment text on the first line:
-			// reduce prefix by up to 3 blanks or a tab
-			// if present - this keeps comment text indented
-			// relative to the /* and */'s if it was indented
-			// in the first place
-			i := len(prefix)
-			for n := 0; n < 3 && i > 0 && prefix[i-1] == ' '; n++ {
-				i--
-			}
-			if i == len(prefix) && i > 0 && prefix[i-1] == '\t' {
-				i--
-			}
-			prefix = prefix[0:i]
-		} else {
-			// comment text on the first line
-			suffix := make([]byte, len(first))
-			n := 2 // start after opening /*
-			for n < len(first) && first[n] <= ' ' {
-				suffix[n] = first[n]
-				n++
-			}
-			if n > 2 && suffix[2] == '\t' {
-				// assume the '\t' compensates for the /*
-				suffix = suffix[2:n]
-			} else {
-				// otherwise assume two blanks
-				suffix[0], suffix[1] = ' ', ' '
-				suffix = suffix[0:n]
-			}
-			// Shorten the computed common prefix by the length of
-			// suffix, if it is found as suffix of the prefix.
-			prefix = strings.TrimSuffix(prefix, string(suffix))
-		}
-	}
-
-	// Handle last line: If it only contains a closing */, align it
-	// with the opening /*, otherwise align the text with the other
-	// lines.
-	last := lines[len(lines)-1]
-	closing := "*/"
-	before, _, _ := stringsCut(last, closing) // closing always present
-	if isBlank(before) {
-		// last line only contains closing */
-		if lineOfStars {
-			closing = " */" // add blank to align final star
-		}
-		lines[len(lines)-1] = prefix + closing
-	} else {
-		// last line contains more comment text - assume
-		// it is aligned like the other lines and include
-		// in prefix computation
-		prefix = commonPrefix(prefix, last)
-	}
-
-	// Remove the common prefix from all but the first and empty lines.
-	for i, line := range lines {
-		if i > 0 && line != "" {
-			lines[i] = line[len(prefix):]
-		}
-	}
-}
-
-func (p *printer) writeComment(comment *ast.Comment) {
-	text := comment.Text
-	pos := p.posFor(comment.Pos())
-
-	const linePrefix = "//line "
-	if strings.HasPrefix(text, linePrefix) && (!pos.IsValid() || pos.Column == 1) {
-		// Possibly a //-style line directive.
-		// Suspend indentation temporarily to keep line directive valid.
-		defer func(indent int) { p.indent = indent }(p.indent)
-		p.indent = 0
-	}
-
-	// shortcut common case of //-style comments
-	if text[1] == '/' {
-		if constraint.IsGoBuild(text) {
-			p.goBuild = append(p.goBuild, len(p.output))
-		} else if constraint.IsPlusBuild(text) {
-			p.plusBuild = append(p.plusBuild, len(p.output))
-		}
-		p.writeString(pos, trimRight(text), true)
-		return
-	}
-
-	// for /*-style comments, print line by line and let the
-	// write function take care of the proper indentation
-	lines := strings.Split(text, "\n")
-
-	// The comment started in the first column but is going
-	// to be indented. For an idempotent result, add indentation
-	// to all lines such that they look like they were indented
-	// before - this will make sure the common prefix computation
-	// is the same independent of how many times formatting is
-	// applied (was issue 1835).
-	if pos.IsValid() && pos.Column == 1 && p.indent > 0 {
-		for i, line := range lines[1:] {
-			lines[1+i] = "   " + line
-		}
-	}
-
-	stripCommonPrefix(lines)
-
-	// write comment lines, separated by formfeed,
-	// without a line break after the last line
-	for i, line := range lines {
-		if i > 0 {
-			p.writeByte('\f', 1)
-			pos = p.pos
-		}
-		if len(line) > 0 {
-			p.writeString(pos, trimRight(line), true)
-		}
-	}
-}
-
-// writeCommentSuffix writes a line break after a comment if indicated
-// and processes any leftover indentation information. If a line break
-// is needed, the kind of break (newline vs formfeed) depends on the
-// pending whitespace. The writeCommentSuffix result indicates if a
-// newline was written or if a formfeed was dropped from the whitespace
-// buffer.
-func (p *printer) writeCommentSuffix(needsLinebreak bool) (wroteNewline, droppedFF bool) {
-	for i, ch := range p.wsbuf {
-		switch ch {
-		case blank, vtab:
-			// ignore trailing whitespace
-			p.wsbuf[i] = ignore
-		case indent, unindent:
-			// don't lose indentation information
-		case newline, formfeed:
-			// if we need a line break, keep exactly one
-			// but remember if we dropped any formfeeds
-			if needsLinebreak {
-				needsLinebreak = false
-				wroteNewline = true
-			} else {
-				if ch == formfeed {
-					droppedFF = true
-				}
-				p.wsbuf[i] = ignore
-			}
-		}
-	}
-	p.writeWhitespace(len(p.wsbuf))
-
-	// make sure we have a line break
-	if needsLinebreak {
-		p.writeByte('\n', 1)
-		wroteNewline = true
-	}
-
-	return
-}
-
-// containsLinebreak reports whether the whitespace buffer contains any line breaks.
-func (p *printer) containsLinebreak() bool {
-	for _, ch := range p.wsbuf {
-		if ch == newline || ch == formfeed {
-			return true
-		}
-	}
-	return false
-}
-
-// intersperseComments consumes all comments that appear before the next token
-// tok and prints it together with the buffered whitespace (i.e., the whitespace
-// that needs to be written before the next token). A heuristic is used to mix
-// the comments and whitespace. The intersperseComments result indicates if a
-// newline was written or if a formfeed was dropped from the whitespace buffer.
-func (p *printer) intersperseComments(next token.Position, tok token.Token) (wroteNewline, droppedFF bool) {
-	var last *ast.Comment
-	for p.commentBefore(next) {
-		list := p.comment.List
-		changed := false
-		if p.lastTok != token.IMPORT && // do not rewrite cgo's import "C" comments
-			p.posFor(p.comment.Pos()).Column == 1 &&
-			p.posFor(p.comment.End()+1) == next {
-			// Unindented comment abutting next token position:
-			// a top-level doc comment.
-			list = formatDocComment(list)
-			changed = true
-
-			if len(p.comment.List) > 0 && len(list) == 0 {
-				// The doc comment was removed entirely.
-				// Keep preceding whitespace.
-				p.writeCommentPrefix(p.posFor(p.comment.Pos()), next, last, tok)
-				// Change print state to continue at next.
-				p.pos = next
-				p.last = next
-				// There can't be any more comments.
-				p.nextComment()
-				return p.writeCommentSuffix(false)
-			}
-		}
-		for _, c := range list {
-			p.writeCommentPrefix(p.posFor(c.Pos()), next, last, tok)
-			p.writeComment(c)
-			last = c
-		}
-		// In case list was rewritten, change print state to where
-		// the original list would have ended.
-		if len(p.comment.List) > 0 && changed {
-			last = p.comment.List[len(p.comment.List)-1]
-			p.pos = p.posFor(last.End())
-			p.last = p.pos
-		}
-		p.nextComment()
-	}
-
-	if last != nil {
-		// If the last comment is a /*-style comment and the next item
-		// follows on the same line but is not a comma, and not a "closing"
-		// token immediately following its corresponding "opening" token,
-		// add an extra separator unless explicitly disabled. Use a blank
-		// as separator unless we have pending linebreaks, they are not
-		// disabled, and we are outside a composite literal, in which case
-		// we want a linebreak (issue 15137).
-		// TODO(gri) This has become overly complicated. We should be able
-		// to track whether we're inside an expression or statement and
-		// use that information to decide more directly.
-		needsLinebreak := false
-		if p.mode&noExtraBlank == 0 &&
-			last.Text[1] == '*' && p.lineFor(last.Pos()) == next.Line &&
-			tok != token.COMMA &&
-			(tok != token.RPAREN || p.prevOpen == token.LPAREN) &&
-			(tok != token.RBRACK || p.prevOpen == token.LBRACK) {
-			if p.containsLinebreak() && p.mode&noExtraLinebreak == 0 && p.level == 0 {
-				needsLinebreak = true
-			} else {
-				p.writeByte(' ', 1)
-			}
-		}
-		// Ensure that there is a line break after a //-style comment,
-		// before EOF, and before a closing '}' unless explicitly disabled.
-		if last.Text[1] == '/' ||
-			tok == token.EOF ||
-			tok == token.RBRACE && p.mode&noExtraLinebreak == 0 {
-			needsLinebreak = true
-		}
-		return p.writeCommentSuffix(needsLinebreak)
-	}
-
-	// no comment was written - we should never reach here since
-	// intersperseComments should not be called in that case
-	p.internalError("intersperseComments called without pending comments")
-	return
-}
-
-// whiteWhitespace writes the first n whitespace entries.
-func (p *printer) writeWhitespace(n int) {
-	// write entries
-	for i := 0; i < n; i++ {
-		switch ch := p.wsbuf[i]; ch {
-		case ignore:
-			// ignore!
-		case indent:
-			p.indent++
-		case unindent:
-			p.indent--
-			if p.indent < 0 {
-				p.internalError("negative indentation:", p.indent)
-				p.indent = 0
-			}
-		case newline, formfeed:
-			// A line break immediately followed by a "correcting"
-			// unindent is swapped with the unindent - this permits
-			// proper label positioning. If a comment is between
-			// the line break and the label, the unindent is not
-			// part of the comment whitespace prefix and the comment
-			// will be positioned correctly indented.
-			if i+1 < n && p.wsbuf[i+1] == unindent {
-				// Use a formfeed to terminate the current section.
-				// Otherwise, a long label name on the next line leading
-				// to a wide column may increase the indentation column
-				// of lines before the label; effectively leading to wrong
-				// indentation.
-				p.wsbuf[i], p.wsbuf[i+1] = unindent, formfeed
-				i-- // do it again
-				continue
-			}
-			fallthrough
-		default:
-			p.writeByte(byte(ch), 1)
-		}
-	}
-
-	// shift remaining entries down
-	l := copy(p.wsbuf, p.wsbuf[n:])
-	p.wsbuf = p.wsbuf[:l]
-}
-
-// ----------------------------------------------------------------------------
-// Printing interface
-
-// nlimit limits n to maxNewlines.
-func nlimit(n int) int {
-	if n > maxNewlines {
-		n = maxNewlines
-	}
-	return n
-}
-
-func mayCombine(prev token.Token, next byte) (b bool) {
-	switch prev {
-	case token.INT:
-		b = next == '.' // 1.
-	case token.ADD:
-		b = next == '+' // ++
-	case token.SUB:
-		b = next == '-' // --
-	case token.QUO:
-		b = next == '*' // /*
-	case token.LSS:
-		b = next == '-' || next == '<' // <- or <<
-	case token.AND:
-		b = next == '&' || next == '^' // && or &^
-	}
-	return
-}
-
-// print prints a list of "items" (roughly corresponding to syntactic
-// tokens, but also including whitespace and formatting information).
-// It is the only print function that should be called directly from
-// any of the AST printing functions in nodes.go.
-//
-// Whitespace is accumulated until a non-whitespace token appears. Any
-// comments that need to appear before that token are printed first,
-// taking into account the amount and structure of any pending white-
-// space for best comment placement. Then, any leftover whitespace is
-// printed, followed by the actual token.
-func (p *printer) print(args ...interface{}) {
-	for _, arg := range args {
-		// information about the current arg
-		var data string
-		var isLit bool
-		var impliedSemi bool // value for p.impliedSemi after this arg
-
-		// record previous opening token, if any
-		switch p.lastTok {
-		case token.ILLEGAL:
-			// ignore (white space)
-		case token.LPAREN, token.LBRACK:
-			p.prevOpen = p.lastTok
-		default:
-			// other tokens followed any opening token
-			p.prevOpen = token.ILLEGAL
-		}
-
-		switch x := arg.(type) {
-		case pmode:
-			// toggle printer mode
-			p.mode ^= x
-			continue
-
-		case whiteSpace:
-			if x == ignore {
-				// don't add ignore's to the buffer; they
-				// may screw up "correcting" unindents (see
-				// LabeledStmt)
-				continue
-			}
-			i := len(p.wsbuf)
-			if i == cap(p.wsbuf) {
-				// Whitespace sequences are very short so this should
-				// never happen. Handle gracefully (but possibly with
-				// bad comment placement) if it does happen.
-				p.writeWhitespace(i)
-				i = 0
-			}
-			p.wsbuf = p.wsbuf[0 : i+1]
-			p.wsbuf[i] = x
-			if x == newline || x == formfeed {
-				// newlines affect the current state (p.impliedSemi)
-				// and not the state after printing arg (impliedSemi)
-				// because comments can be interspersed before the arg
-				// in this case
-				p.impliedSemi = false
-			}
-			p.lastTok = token.ILLEGAL
-			continue
-
-		case *ast.Ident:
-			data = x.Name
-			impliedSemi = true
-			p.lastTok = token.IDENT
-
-		case *ast.BasicLit:
-			data = x.Value
-			isLit = true
-			impliedSemi = true
-			p.lastTok = x.Kind
-
-		case token.Token:
-			s := x.String()
-			if mayCombine(p.lastTok, s[0]) {
-				// the previous and the current token must be
-				// separated by a blank otherwise they combine
-				// into a different incorrect token sequence
-				// (except for token.INT followed by a '.' this
-				// should never happen because it is taken care
-				// of via binary expression formatting)
-				if len(p.wsbuf) != 0 {
-					p.internalError("whitespace buffer not empty")
-				}
-				p.wsbuf = p.wsbuf[0:1]
-				p.wsbuf[0] = ' '
-			}
-			data = s
-			// some keywords followed by a newline imply a semicolon
-			switch x {
-			case token.BREAK, token.CONTINUE, token.FALLTHROUGH, token.RETURN,
-				token.INC, token.DEC, token.RPAREN, token.RBRACK, token.RBRACE:
-				impliedSemi = true
-			}
-			p.lastTok = x
-
-		case token.Pos:
-			if x.IsValid() {
-				p.pos = p.posFor(x) // accurate position of next item
-			}
-			continue
-
-		case string:
-			// incorrect AST - print error message
-			data = x
-			isLit = true
-			impliedSemi = true
-			p.lastTok = token.STRING
-
-		default:
-			fmt.Fprintf(os.Stderr, "print: unsupported argument %v (%T)\n", arg, arg)
-			panic("go/printer type")
-		}
-		// data != ""
-
-		next := p.pos // estimated/accurate position of next item
-		wroteNewline, droppedFF := p.flush(next, p.lastTok)
-
-		// intersperse extra newlines if present in the source and
-		// if they don't cause extra semicolons (don't do this in
-		// flush as it will cause extra newlines at the end of a file)
-		if !p.impliedSemi {
-			n := nlimit(next.Line - p.pos.Line)
-			// don't exceed maxNewlines if we already wrote one
-			if wroteNewline && n == maxNewlines {
-				n = maxNewlines - 1
-			}
-			if n > 0 {
-				ch := byte('\n')
-				if droppedFF {
-					ch = '\f' // use formfeed since we dropped one before
-				}
-				p.writeByte(ch, n)
-				impliedSemi = false
-			}
-		}
-
-		// the next token starts now - record its line number if requested
-		if p.linePtr != nil {
-			*p.linePtr = p.out.Line
-			p.linePtr = nil
-		}
-
-		p.writeString(next, data, isLit)
-		p.impliedSemi = impliedSemi
-	}
-}
-
-// flush prints any pending comments and whitespace occurring textually
-// before the position of the next token tok. The flush result indicates
-// if a newline was written or if a formfeed was dropped from the whitespace
-// buffer.
-func (p *printer) flush(next token.Position, tok token.Token) (wroteNewline, droppedFF bool) {
-	if p.commentBefore(next) {
-		// if there are comments before the next item, intersperse them
-		wroteNewline, droppedFF = p.intersperseComments(next, tok)
-	} else {
-		// otherwise, write any leftover whitespace
-		p.writeWhitespace(len(p.wsbuf))
-	}
-	return
-}
-
-// getNode returns the ast.CommentGroup associated with n, if any.
-func getDoc(n ast.Node) *ast.CommentGroup {
-	switch n := n.(type) {
-	case *ast.Field:
-		return n.Doc
-	case *ast.ImportSpec:
-		return n.Doc
-	case *ast.ValueSpec:
-		return n.Doc
-	case *ast.TypeSpec:
-		return n.Doc
-	case *ast.GenDecl:
-		return n.Doc
-	case *ast.FuncDecl:
-		return n.Doc
-	case *ast.File:
-		return n.Doc
-	}
-	return nil
-}
-
-func getLastComment(n ast.Node) *ast.CommentGroup {
-	switch n := n.(type) {
-	case *ast.Field:
-		return n.Comment
-	case *ast.ImportSpec:
-		return n.Comment
-	case *ast.ValueSpec:
-		return n.Comment
-	case *ast.TypeSpec:
-		return n.Comment
-	case *ast.GenDecl:
-		if len(n.Specs) > 0 {
-			return getLastComment(n.Specs[len(n.Specs)-1])
-		}
-	case *ast.File:
-		if len(n.Comments) > 0 {
-			return n.Comments[len(n.Comments)-1]
-		}
-	}
-	return nil
-}
-
-func (p *printer) printNode(node interface{}) error {
-	// unpack *CommentedNode, if any
-	var comments []*ast.CommentGroup
-	if cnode, ok := node.(*CommentedNode); ok {
-		node = cnode.Node
-		comments = cnode.Comments
-	}
-
-	if comments != nil {
-		// commented node - restrict comment list to relevant range
-		n, ok := node.(ast.Node)
-		if !ok {
-			goto unsupported
-		}
-		beg := n.Pos()
-		end := n.End()
-		// if the node has associated documentation,
-		// include that commentgroup in the range
-		// (the comment list is sorted in the order
-		// of the comment appearance in the source code)
-		if doc := getDoc(n); doc != nil {
-			beg = doc.Pos()
-		}
-		if com := getLastComment(n); com != nil {
-			if e := com.End(); e > end {
-				end = e
-			}
-		}
-		// token.Pos values are global offsets, we can
-		// compare them directly
-		i := 0
-		for i < len(comments) && comments[i].End() < beg {
-			i++
-		}
-		j := i
-		for j < len(comments) && comments[j].Pos() < end {
-			j++
-		}
-		if i < j {
-			p.comments = comments[i:j]
-		}
-	} else if n, ok := node.(*ast.File); ok {
-		// use ast.File comments, if any
-		p.comments = n.Comments
-	}
-
-	// if there are no comments, use node comments
-	p.useNodeComments = p.comments == nil
-
-	// get comments ready for use
-	p.nextComment()
-
-	p.print(pmode(0))
-
-	// format node
-	switch n := node.(type) {
-	case ast.Expr:
-		p.expr(n)
-	case ast.Stmt:
-		// A labeled statement will un-indent to position the label.
-		// Set p.indent to 1 so we don't get indent "underflow".
-		if _, ok := n.(*ast.LabeledStmt); ok {
-			p.indent = 1
-		}
-		p.stmt(n, false)
-	case ast.Decl:
-		p.decl(n)
-	case ast.Spec:
-		p.spec(n, 1, false)
-	case []ast.Stmt:
-		// A labeled statement will un-indent to position the label.
-		// Set p.indent to 1 so we don't get indent "underflow".
-		for _, s := range n {
-			if _, ok := s.(*ast.LabeledStmt); ok {
-				p.indent = 1
-			}
-		}
-		p.stmtList(n, 0, false)
-	case []ast.Decl:
-		p.declList(n)
-	case *ast.File:
-		p.file(n)
-	default:
-		goto unsupported
-	}
-
-	return nil
-
-unsupported:
-	return fmt.Errorf("go/printer: unsupported node type %T", node)
-}
-
-// ----------------------------------------------------------------------------
-// Trimmer
-
-// A trimmer is an io.Writer filter for stripping tabwriter.Escape
-// characters, trailing blanks and tabs, and for converting formfeed
-// and vtab characters into newlines and htabs (in case no tabwriter
-// is used). Text bracketed by tabwriter.Escape characters is passed
-// through unchanged.
-type trimmer struct {
-	output io.Writer
-	state  int
-	space  []byte
-}
-
-// trimmer is implemented as a state machine.
-// It can be in one of the following states:
-const (
-	inSpace  = iota // inside space
-	inEscape        // inside text bracketed by tabwriter.Escapes
-	inText          // inside text
-)
-
-func (p *trimmer) resetSpace() {
-	p.state = inSpace
-	p.space = p.space[0:0]
-}
-
-// Design note: It is tempting to eliminate extra blanks occurring in
-//              whitespace in this function as it could simplify some
-//              of the blanks logic in the node printing functions.
-//              However, this would mess up any formatting done by
-//              the tabwriter.
-
-var aNewline = []byte("\n")
-
-func (p *trimmer) Write(data []byte) (n int, err error) {
-	// invariants:
-	// p.state == inSpace:
-	//	p.space is unwritten
-	// p.state == inEscape, inText:
-	//	data[m:n] is unwritten
-	m := 0
-	var b byte
-	for n, b = range data {
-		if b == '\v' {
-			b = '\t' // convert to htab
-		}
-		switch p.state {
-		case inSpace:
-			switch b {
-			case '\t', ' ':
-				p.space = append(p.space, b)
-			case '\n', '\f':
-				p.resetSpace() // discard trailing space
-				_, err = p.output.Write(aNewline)
-			case tabwriter.Escape:
-				_, err = p.output.Write(p.space)
-				p.state = inEscape
-				m = n + 1 // +1: skip tabwriter.Escape
-			default:
-				_, err = p.output.Write(p.space)
-				p.state = inText
-				m = n
-			}
-		case inEscape:
-			if b == tabwriter.Escape {
-				_, err = p.output.Write(data[m:n])
-				p.resetSpace()
-			}
-		case inText:
-			switch b {
-			case '\t', ' ':
-				_, err = p.output.Write(data[m:n])
-				p.resetSpace()
-				p.space = append(p.space, b)
-			case '\n', '\f':
-				_, err = p.output.Write(data[m:n])
-				p.resetSpace()
-				if err == nil {
-					_, err = p.output.Write(aNewline)
-				}
-			case tabwriter.Escape:
-				_, err = p.output.Write(data[m:n])
-				p.state = inEscape
-				m = n + 1 // +1: skip tabwriter.Escape
-			}
-		default:
-			panic("unreachable")
-		}
-		if err != nil {
-			return
-		}
-	}
-	n = len(data)
-
-	switch p.state {
-	case inEscape, inText:
-		_, err = p.output.Write(data[m:n])
-		p.resetSpace()
-	}
-
-	return
-}
-
-// ----------------------------------------------------------------------------
-// Public interface
-
-// A Mode value is a set of flags (or 0). They control printing.
-type Mode uint
-
-const (
-	RawFormat Mode = 1 << iota // do not use a tabwriter; if set, UseSpaces is ignored
-	TabIndent                  // use tabs for indentation independent of UseSpaces
-	UseSpaces                  // use spaces instead of tabs for alignment
-	SourcePos                  // emit //line directives to preserve original source positions
-)
-
-// The mode below is not included in printer's public API because
-// editing code text is deemed out of scope. Because this mode is
-// unexported, it's also possible to modify or remove it based on
-// the evolving needs of go/format and cmd/gofmt without breaking
-// users. See discussion in CL 240683.
-const (
-	// normalizeNumbers means to canonicalize number
-	// literal prefixes and exponents while printing.
-	//
-	// This value is known in and used by go/format and cmd/gofmt.
-	// It is currently more convenient and performant for those
-	// packages to apply number normalization during printing,
-	// rather than by modifying the AST in advance.
-	normalizeNumbers Mode = 1 << 30
-)
-
-// A Config node controls the output of Fprint.
-type Config struct {
-	Mode     Mode // default: 0
-	Tabwidth int  // default: 8
-	Indent   int  // default: 0 (all code is indented at least by this much)
-}
-
-// fprint implements Fprint and takes a nodesSizes map for setting up the printer state.
-func (cfg *Config) fprint(output io.Writer, fset *token.FileSet, node interface{}, nodeSizes map[ast.Node]int) (err error) {
-	// print node
-	var p printer
-	p.init(cfg, fset, nodeSizes)
-	if err = p.printNode(node); err != nil {
-		return
-	}
-	// print outstanding comments
-	p.impliedSemi = false // EOF acts like a newline
-	p.flush(token.Position{Offset: infinity, Line: infinity}, token.EOF)
-
-	// output is buffered in p.output now.
-	// fix //go:build and // +build comments if needed.
-	p.fixGoBuildLines()
-
-	// redirect output through a trimmer to eliminate trailing whitespace
-	// (Input to a tabwriter must be untrimmed since trailing tabs provide
-	// formatting information. The tabwriter could provide trimming
-	// functionality but no tabwriter is used when RawFormat is set.)
-	output = &trimmer{output: output}
-
-	// redirect output through a tabwriter if necessary
-	if cfg.Mode&RawFormat == 0 {
-		minwidth := cfg.Tabwidth
-
-		padchar := byte('\t')
-		if cfg.Mode&UseSpaces != 0 {
-			padchar = ' '
-		}
-
-		twmode := tabwriter.DiscardEmptyColumns
-		if cfg.Mode&TabIndent != 0 {
-			minwidth = 0
-			twmode |= tabwriter.TabIndent
-		}
-
-		output = tabwriter.NewWriter(output, minwidth, cfg.Tabwidth, 1, padchar, twmode)
-	}
-
-	// write printer result via tabwriter/trimmer to output
-	if _, err = output.Write(p.output); err != nil {
-		return
-	}
-
-	// flush tabwriter, if any
-	if tw, _ := output.(*tabwriter.Writer); tw != nil {
-		err = tw.Flush()
-	}
-
-	return
-}
-
-// A CommentedNode bundles an AST node and corresponding comments.
-// It may be provided as argument to any of the Fprint functions.
-type CommentedNode struct {
-	Node     interface{} // *ast.File, or ast.Expr, ast.Decl, ast.Spec, or ast.Stmt
-	Comments []*ast.CommentGroup
-}
-
-// Fprint "pretty-prints" an AST node to output for a given configuration cfg.
-// Position information is interpreted relative to the file set fset.
-// The node type must be *ast.File, *CommentedNode, []ast.Decl, []ast.Stmt,
-// or assignment-compatible to ast.Expr, ast.Decl, ast.Spec, or ast.Stmt.
-func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node interface{}) error {
-	return cfg.fprint(output, fset, node, make(map[ast.Node]int))
-}
-
-// Fprint "pretty-prints" an AST node to output.
-// It calls Config.Fprint with default settings.
-// Note that gofmt uses tabs for indentation but spaces for alignment;
-// use format.Node (package go/format) for output that matches gofmt.
-func Fprint(output io.Writer, fset *token.FileSet, node interface{}) error {
-	return (&Config{Tabwidth: 8}).Fprint(output, fset, node)
-}
-
-func stringsCut(s, sep string) (before, after string, found bool) {
-	if i := strings.Index(s, sep); i >= 0 {
-		return s[:i], s[i+len(sep):], true
-	}
-	return s, "", false
-}
diff --git a/internal/backport/go/printer/printer_test.go b/internal/backport/go/printer/printer_test.go
deleted file mode 100644
index 9347d6b..0000000
--- a/internal/backport/go/printer/printer_test.go
+++ /dev/null
@@ -1,800 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package printer
-
-import (
-	"bytes"
-	"errors"
-	"flag"
-	"fmt"
-	"io"
-	"os"
-	"path/filepath"
-	"testing"
-	"time"
-
-	"golang.org/x/website/internal/backport/diff"
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/parser"
-	"golang.org/x/website/internal/backport/go/token"
-)
-
-const (
-	dataDir  = "testdata"
-	tabwidth = 8
-)
-
-var update = flag.Bool("update", false, "update golden files")
-
-var fset = token.NewFileSet()
-
-type checkMode uint
-
-const (
-	export checkMode = 1 << iota
-	rawFormat
-	normNumber
-	idempotent
-	allowTypeParams
-)
-
-// format parses src, prints the corresponding AST, verifies the resulting
-// src is syntactically correct, and returns the resulting src or an error
-// if any.
-func format(src []byte, mode checkMode) ([]byte, error) {
-	// parse src
-	f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
-	if err != nil {
-		return nil, fmt.Errorf("parse: %s\n%s", err, src)
-	}
-
-	// filter exports if necessary
-	if mode&export != 0 {
-		ast.FileExports(f) // ignore result
-		f.Comments = nil   // don't print comments that are not in AST
-	}
-
-	// determine printer configuration
-	cfg := Config{Tabwidth: tabwidth}
-	if mode&rawFormat != 0 {
-		cfg.Mode |= RawFormat
-	}
-	if mode&normNumber != 0 {
-		cfg.Mode |= normalizeNumbers
-	}
-
-	// print AST
-	var buf bytes.Buffer
-	if err := cfg.Fprint(&buf, fset, f); err != nil {
-		return nil, fmt.Errorf("print: %s", err)
-	}
-
-	// make sure formatted output is syntactically correct
-	res := buf.Bytes()
-	if _, err := parser.ParseFile(fset, "", res, parser.ParseComments); err != nil {
-		return nil, fmt.Errorf("re-parse: %s\n%s", err, buf.Bytes())
-	}
-
-	return res, nil
-}
-
-// lineAt returns the line in text starting at offset offs.
-func lineAt(text []byte, offs int) []byte {
-	i := offs
-	for i < len(text) && text[i] != '\n' {
-		i++
-	}
-	return text[offs:i]
-}
-
-// checkEqual compares a and b.
-func checkEqual(aname, bname string, a, b []byte) error {
-	if bytes.Equal(a, b) {
-		return nil
-	}
-	return errors.New(string(diff.Diff(aname, a, bname, b)))
-}
-
-func runcheck(t *testing.T, source, golden string, mode checkMode) {
-	src, err := os.ReadFile(source)
-	if err != nil {
-		t.Error(err)
-		return
-	}
-
-	res, err := format(src, mode)
-	if err != nil {
-		t.Error(err)
-		return
-	}
-
-	// update golden files if necessary
-	if *update {
-		if err := os.WriteFile(golden, res, 0644); err != nil {
-			t.Error(err)
-		}
-		return
-	}
-
-	// get golden
-	gld, err := os.ReadFile(golden)
-	if err != nil {
-		t.Error(err)
-		return
-	}
-
-	// formatted source and golden must be the same
-	if err := checkEqual(source, golden, res, gld); err != nil {
-		t.Error(err)
-		return
-	}
-
-	if mode&idempotent != 0 {
-		// formatting golden must be idempotent
-		// (This is very difficult to achieve in general and for now
-		// it is only checked for files explicitly marked as such.)
-		res, err = format(gld, mode)
-		if err != nil {
-			t.Error(err)
-			return
-		}
-		if err := checkEqual(golden, fmt.Sprintf("format(%s)", golden), gld, res); err != nil {
-			t.Errorf("golden is not idempotent: %s", err)
-		}
-	}
-}
-
-func check(t *testing.T, source, golden string, mode checkMode) {
-	// run the test
-	cc := make(chan int, 1)
-	go func() {
-		runcheck(t, source, golden, mode)
-		cc <- 0
-	}()
-
-	// wait with timeout
-	select {
-	case <-time.After(10 * time.Second): // plenty of a safety margin, even for very slow machines
-		// test running past time out
-		t.Errorf("%s: running too slowly", source)
-	case <-cc:
-		// test finished within allotted time margin
-	}
-}
-
-type entry struct {
-	source, golden string
-	mode           checkMode
-}
-
-// Use go test -update to create/update the respective golden files.
-var data = []entry{
-	{"empty.input", "empty.golden", idempotent},
-	{"comments.input", "comments.golden", 0},
-	{"comments.input", "comments.x", export},
-	{"comments2.input", "comments2.golden", idempotent},
-	{"alignment.input", "alignment.golden", idempotent},
-	{"linebreaks.input", "linebreaks.golden", idempotent},
-	{"expressions.input", "expressions.golden", idempotent},
-	{"expressions.input", "expressions.raw", rawFormat | idempotent},
-	{"declarations.input", "declarations.golden", 0},
-	{"statements.input", "statements.golden", 0},
-	{"slow.input", "slow.golden", idempotent},
-	{"complit.input", "complit.x", export},
-	{"go2numbers.input", "go2numbers.golden", idempotent},
-	{"go2numbers.input", "go2numbers.norm", normNumber | idempotent},
-	{"generics.input", "generics.golden", idempotent | allowTypeParams},
-	{"gobuild1.input", "gobuild1.golden", idempotent},
-	{"gobuild2.input", "gobuild2.golden", idempotent},
-	{"gobuild3.input", "gobuild3.golden", idempotent},
-	{"gobuild4.input", "gobuild4.golden", idempotent},
-	{"gobuild5.input", "gobuild5.golden", idempotent},
-	{"gobuild6.input", "gobuild6.golden", idempotent},
-	{"gobuild7.input", "gobuild7.golden", idempotent},
-}
-
-func TestFiles(t *testing.T) {
-	t.Parallel()
-	for _, e := range data {
-		source := filepath.Join(dataDir, e.source)
-		golden := filepath.Join(dataDir, e.golden)
-		mode := e.mode
-		t.Run(e.source, func(t *testing.T) {
-			t.Parallel()
-			check(t, source, golden, mode)
-			// TODO(gri) check that golden is idempotent
-			//check(t, golden, golden, e.mode)
-		})
-	}
-}
-
-// TestLineComments, using a simple test case, checks that consecutive line
-// comments are properly terminated with a newline even if the AST position
-// information is incorrect.
-func TestLineComments(t *testing.T) {
-	const src = `// comment 1
-	// comment 2
-	// comment 3
-	package main
-	`
-
-	fset := token.NewFileSet()
-	f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
-	if err != nil {
-		panic(err) // error in test
-	}
-
-	var buf bytes.Buffer
-	fset = token.NewFileSet() // use the wrong file set
-	Fprint(&buf, fset, f)
-
-	nlines := 0
-	for _, ch := range buf.Bytes() {
-		if ch == '\n' {
-			nlines++
-		}
-	}
-
-	const expected = 3
-	if nlines < expected {
-		t.Errorf("got %d, expected %d\n", nlines, expected)
-		t.Errorf("result:\n%s", buf.Bytes())
-	}
-}
-
-// Verify that the printer can be invoked during initialization.
-func init() {
-	const name = "foobar"
-	var buf bytes.Buffer
-	if err := Fprint(&buf, fset, &ast.Ident{Name: name}); err != nil {
-		panic(err) // error in test
-	}
-	// in debug mode, the result contains additional information;
-	// ignore it
-	if s := buf.String(); !debug && s != name {
-		panic("got " + s + ", want " + name)
-	}
-}
-
-// Verify that the printer doesn't crash if the AST contains BadXXX nodes.
-func TestBadNodes(t *testing.T) {
-	const src = "package p\n("
-	const res = "package p\nBadDecl\n"
-	f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
-	if err == nil {
-		t.Error("expected illegal program") // error in test
-	}
-	var buf bytes.Buffer
-	Fprint(&buf, fset, f)
-	if buf.String() != res {
-		t.Errorf("got %q, expected %q", buf.String(), res)
-	}
-}
-
-// testComment verifies that f can be parsed again after printing it
-// with its first comment set to comment at any possible source offset.
-func testComment(t *testing.T, f *ast.File, srclen int, comment *ast.Comment) {
-	f.Comments[0].List[0] = comment
-	var buf bytes.Buffer
-	for offs := 0; offs <= srclen; offs++ {
-		buf.Reset()
-		// Printing f should result in a correct program no
-		// matter what the (incorrect) comment position is.
-		if err := Fprint(&buf, fset, f); err != nil {
-			t.Error(err)
-		}
-		if _, err := parser.ParseFile(fset, "", buf.Bytes(), 0); err != nil {
-			t.Fatalf("incorrect program for pos = %d:\n%s", comment.Slash, buf.String())
-		}
-		// Position information is just an offset.
-		// Move comment one byte down in the source.
-		comment.Slash++
-	}
-}
-
-// Verify that the printer produces a correct program
-// even if the position information of comments introducing newlines
-// is incorrect.
-func TestBadComments(t *testing.T) {
-	t.Parallel()
-	const src = `
-// first comment - text and position changed by test
-package p
-import "fmt"
-const pi = 3.14 // rough circle
-var (
-	x, y, z int = 1, 2, 3
-	u, v float64
-)
-func fibo(n int) {
-	if n < 2 {
-		return n /* seed values */
-	}
-	return fibo(n-1) + fibo(n-2)
-}
-`
-
-	f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
-	if err != nil {
-		t.Error(err) // error in test
-	}
-
-	comment := f.Comments[0].List[0]
-	pos := comment.Pos()
-	if fset.PositionFor(pos, false /* absolute position */).Offset != 1 {
-		t.Error("expected offset 1") // error in test
-	}
-
-	testComment(t, f, len(src), &ast.Comment{Slash: pos, Text: "//-style comment"})
-	testComment(t, f, len(src), &ast.Comment{Slash: pos, Text: "/*-style comment */"})
-	testComment(t, f, len(src), &ast.Comment{Slash: pos, Text: "/*-style \n comment */"})
-	testComment(t, f, len(src), &ast.Comment{Slash: pos, Text: "/*-style comment \n\n\n */"})
-}
-
-type visitor chan *ast.Ident
-
-func (v visitor) Visit(n ast.Node) (w ast.Visitor) {
-	if ident, ok := n.(*ast.Ident); ok {
-		v <- ident
-	}
-	return v
-}
-
-// idents is an iterator that returns all idents in f via the result channel.
-func idents(f *ast.File) <-chan *ast.Ident {
-	v := make(visitor)
-	go func() {
-		ast.Walk(v, f)
-		close(v)
-	}()
-	return v
-}
-
-// identCount returns the number of identifiers found in f.
-func identCount(f *ast.File) int {
-	n := 0
-	for range idents(f) {
-		n++
-	}
-	return n
-}
-
-// Verify that the SourcePos mode emits correct //line directives
-// by testing that position information for matching identifiers
-// is maintained.
-func TestSourcePos(t *testing.T) {
-	const src = `
-package p
-import ( "golang.org/x/website/internal/backport/go/printer"; "math" )
-const pi = 3.14; var x = 0
-type t struct{ x, y, z int; u, v, w float32 }
-func (t *t) foo(a, b, c int) int {
-	return a*t.x + b*t.y +
-		// two extra lines here
-		// ...
-		c*t.z
-}
-`
-
-	// parse original
-	f1, err := parser.ParseFile(fset, "src", src, parser.ParseComments)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	// pretty-print original
-	var buf bytes.Buffer
-	err = (&Config{Mode: UseSpaces | SourcePos, Tabwidth: 8}).Fprint(&buf, fset, f1)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	// parse pretty printed original
-	// (//line directives must be interpreted even w/o parser.ParseComments set)
-	f2, err := parser.ParseFile(fset, "", buf.Bytes(), 0)
-	if err != nil {
-		t.Fatalf("%s\n%s", err, buf.Bytes())
-	}
-
-	// At this point the position information of identifiers in f2 should
-	// match the position information of corresponding identifiers in f1.
-
-	// number of identifiers must be > 0 (test should run) and must match
-	n1 := identCount(f1)
-	n2 := identCount(f2)
-	if n1 == 0 {
-		t.Fatal("got no idents")
-	}
-	if n2 != n1 {
-		t.Errorf("got %d idents; want %d", n2, n1)
-	}
-
-	// verify that all identifiers have correct line information
-	i2range := idents(f2)
-	for i1 := range idents(f1) {
-		i2 := <-i2range
-
-		if i2.Name != i1.Name {
-			t.Errorf("got ident %s; want %s", i2.Name, i1.Name)
-		}
-
-		// here we care about the relative (line-directive adjusted) positions
-		l1 := fset.Position(i1.Pos()).Line
-		l2 := fset.Position(i2.Pos()).Line
-		if l2 != l1 {
-			t.Errorf("got line %d; want %d for %s", l2, l1, i1.Name)
-		}
-	}
-
-	if t.Failed() {
-		t.Logf("\n%s", buf.Bytes())
-	}
-}
-
-// Verify that the SourcePos mode doesn't emit unnecessary //line directives
-// before empty lines.
-func TestIssue5945(t *testing.T) {
-	const orig = `
-package p   // line 2
-func f() {} // line 3
-
-var x, y, z int
-
-
-func g() { // line 8
-}
-`
-
-	const want = `//line src.go:2
-package p
-
-//line src.go:3
-func f() {}
-
-var x, y, z int
-
-//line src.go:8
-func g() {
-}
-`
-
-	// parse original
-	f1, err := parser.ParseFile(fset, "src.go", orig, 0)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	// pretty-print original
-	var buf bytes.Buffer
-	err = (&Config{Mode: UseSpaces | SourcePos, Tabwidth: 8}).Fprint(&buf, fset, f1)
-	if err != nil {
-		t.Fatal(err)
-	}
-	got := buf.String()
-
-	// compare original with desired output
-	if got != want {
-		t.Errorf("got:\n%s\nwant:\n%s\n", got, want)
-	}
-}
-
-var decls = []string{
-	`import "fmt"`,
-	"const pi = 3.1415\nconst e = 2.71828\n\nvar x = pi",
-	"func sum(x, y int) int\t{ return x + y }",
-}
-
-func TestDeclLists(t *testing.T) {
-	for _, src := range decls {
-		file, err := parser.ParseFile(fset, "", "package p;"+src, parser.ParseComments)
-		if err != nil {
-			panic(err) // error in test
-		}
-
-		var buf bytes.Buffer
-		err = Fprint(&buf, fset, file.Decls) // only print declarations
-		if err != nil {
-			panic(err) // error in test
-		}
-
-		out := buf.String()
-		if out != src {
-			t.Errorf("\ngot : %q\nwant: %q\n", out, src)
-		}
-	}
-}
-
-var stmts = []string{
-	"i := 0",
-	"select {}\nvar a, b = 1, 2\nreturn a + b",
-	"go f()\ndefer func() {}()",
-}
-
-func TestStmtLists(t *testing.T) {
-	for _, src := range stmts {
-		file, err := parser.ParseFile(fset, "", "package p; func _() {"+src+"}", parser.ParseComments)
-		if err != nil {
-			panic(err) // error in test
-		}
-
-		var buf bytes.Buffer
-		err = Fprint(&buf, fset, file.Decls[0].(*ast.FuncDecl).Body.List) // only print statements
-		if err != nil {
-			panic(err) // error in test
-		}
-
-		out := buf.String()
-		if out != src {
-			t.Errorf("\ngot : %q\nwant: %q\n", out, src)
-		}
-	}
-}
-
-func TestBaseIndent(t *testing.T) {
-	t.Parallel()
-	// The testfile must not contain multi-line raw strings since those
-	// are not indented (because their values must not change) and make
-	// this test fail.
-	const filename = "printer.go"
-	src, err := os.ReadFile(filename)
-	if err != nil {
-		panic(err) // error in test
-	}
-
-	file, err := parser.ParseFile(fset, filename, src, 0)
-	if err != nil {
-		panic(err) // error in test
-	}
-
-	for indent := 0; indent < 4; indent++ {
-		indent := indent
-		t.Run(fmt.Sprint(indent), func(t *testing.T) {
-			t.Parallel()
-			var buf bytes.Buffer
-			(&Config{Tabwidth: tabwidth, Indent: indent}).Fprint(&buf, fset, file)
-			// all code must be indented by at least 'indent' tabs
-			lines := bytes.Split(buf.Bytes(), []byte{'\n'})
-			for i, line := range lines {
-				if len(line) == 0 {
-					continue // empty lines don't have indentation
-				}
-				n := 0
-				for j, b := range line {
-					if b != '\t' {
-						// end of indentation
-						n = j
-						break
-					}
-				}
-				if n < indent {
-					t.Errorf("line %d: got only %d tabs; want at least %d: %q", i, n, indent, line)
-				}
-			}
-		})
-	}
-}
-
-// TestFuncType tests that an ast.FuncType with a nil Params field
-// can be printed (per go/ast specification). Test case for issue 3870.
-func TestFuncType(t *testing.T) {
-	src := &ast.File{
-		Name: &ast.Ident{Name: "p"},
-		Decls: []ast.Decl{
-			&ast.FuncDecl{
-				Name: &ast.Ident{Name: "f"},
-				Type: &ast.FuncType{},
-			},
-		},
-	}
-
-	var buf bytes.Buffer
-	if err := Fprint(&buf, fset, src); err != nil {
-		t.Fatal(err)
-	}
-	got := buf.String()
-
-	const want = `package p
-
-func f()
-`
-
-	if got != want {
-		t.Fatalf("got:\n%s\nwant:\n%s\n", got, want)
-	}
-}
-
-type limitWriter struct {
-	remaining int
-	errCount  int
-}
-
-func (l *limitWriter) Write(buf []byte) (n int, err error) {
-	n = len(buf)
-	if n >= l.remaining {
-		n = l.remaining
-		err = io.EOF
-		l.errCount++
-	}
-	l.remaining -= n
-	return n, err
-}
-
-// Test whether the printer stops writing after the first error
-func TestWriteErrors(t *testing.T) {
-	t.Parallel()
-	const filename = "printer.go"
-	src, err := os.ReadFile(filename)
-	if err != nil {
-		panic(err) // error in test
-	}
-	file, err := parser.ParseFile(fset, filename, src, 0)
-	if err != nil {
-		panic(err) // error in test
-	}
-	for i := 0; i < 20; i++ {
-		lw := &limitWriter{remaining: i}
-		err := (&Config{Mode: RawFormat}).Fprint(lw, fset, file)
-		if lw.errCount > 1 {
-			t.Fatal("Writes continued after first error returned")
-		}
-		// We expect errCount be 1 iff err is set
-		if (lw.errCount != 0) != (err != nil) {
-			t.Fatal("Expected err when errCount != 0")
-		}
-	}
-}
-
-// TextX is a skeleton test that can be filled in for debugging one-off cases.
-// Do not remove.
-func TestX(t *testing.T) {
-	const src = `
-package p
-func _() {}
-`
-	_, err := format([]byte(src), 0)
-	if err != nil {
-		t.Error(err)
-	}
-}
-
-func TestCommentedNode(t *testing.T) {
-	const (
-		input = `package main
-
-func foo() {
-	// comment inside func
-}
-
-// leading comment
-type bar int // comment2
-
-`
-
-		foo = `func foo() {
-	// comment inside func
-}`
-
-		bar = `// leading comment
-type bar int	// comment2
-`
-	)
-
-	fset := token.NewFileSet()
-	f, err := parser.ParseFile(fset, "input.go", input, parser.ParseComments)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	var buf bytes.Buffer
-
-	err = Fprint(&buf, fset, &CommentedNode{Node: f.Decls[0], Comments: f.Comments})
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if buf.String() != foo {
-		t.Errorf("got %q, want %q", buf.String(), foo)
-	}
-
-	buf.Reset()
-
-	err = Fprint(&buf, fset, &CommentedNode{Node: f.Decls[1], Comments: f.Comments})
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if buf.String() != bar {
-		t.Errorf("got %q, want %q", buf.String(), bar)
-	}
-}
-
-func TestIssue11151(t *testing.T) {
-	const src = "package p\t/*\r/1\r*\r/2*\r\r\r\r/3*\r\r+\r\r/4*/\n"
-	fset := token.NewFileSet()
-	f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	var buf bytes.Buffer
-	Fprint(&buf, fset, f)
-	got := buf.String()
-	const want = "package p\t/*/1*\r/2*\r/3*+/4*/\n" // \r following opening /* should be stripped
-	if got != want {
-		t.Errorf("\ngot : %q\nwant: %q", got, want)
-	}
-
-	// the resulting program must be valid
-	_, err = parser.ParseFile(fset, "", got, 0)
-	if err != nil {
-		t.Errorf("%v\norig: %q\ngot : %q", err, src, got)
-	}
-}
-
-// If a declaration has multiple specifications, a parenthesized
-// declaration must be printed even if Lparen is token.NoPos.
-func TestParenthesizedDecl(t *testing.T) {
-	// a package with multiple specs in a single declaration
-	const src = "package p; var ( a float64; b int )"
-	fset := token.NewFileSet()
-	f, err := parser.ParseFile(fset, "", src, 0)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	// print the original package
-	var buf bytes.Buffer
-	err = Fprint(&buf, fset, f)
-	if err != nil {
-		t.Fatal(err)
-	}
-	original := buf.String()
-
-	// now remove parentheses from the declaration
-	for i := 0; i != len(f.Decls); i++ {
-		f.Decls[i].(*ast.GenDecl).Lparen = token.NoPos
-	}
-	buf.Reset()
-	err = Fprint(&buf, fset, f)
-	if err != nil {
-		t.Fatal(err)
-	}
-	noparen := buf.String()
-
-	if noparen != original {
-		t.Errorf("got %q, want %q", noparen, original)
-	}
-}
-
-// Verify that we don't print a newline between "return" and its results, as
-// that would incorrectly cause a naked return.
-func TestIssue32854(t *testing.T) {
-	src := `package foo
-
-func f() {
-        return Composite{
-                call(),
-        }
-}`
-	fset := token.NewFileSet()
-	file, err := parser.ParseFile(fset, "", src, 0)
-	if err != nil {
-		panic(err)
-	}
-
-	// Replace the result with call(), which is on the next line.
-	fd := file.Decls[0].(*ast.FuncDecl)
-	ret := fd.Body.List[0].(*ast.ReturnStmt)
-	ret.Results[0] = ret.Results[0].(*ast.CompositeLit).Elts[0]
-
-	var buf bytes.Buffer
-	if err := Fprint(&buf, fset, ret); err != nil {
-		t.Fatal(err)
-	}
-	want := "return call()"
-	if got := buf.String(); got != want {
-		t.Fatalf("got %q, want %q", got, want)
-	}
-}
diff --git a/internal/backport/go/printer/testdata/alignment.golden b/internal/backport/go/printer/testdata/alignment.golden
deleted file mode 100644
index 96086ed..0000000
--- a/internal/backport/go/printer/testdata/alignment.golden
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package alignment
-
-// ----------------------------------------------------------------------------
-// Examples from issue #7335.
-
-func main() {
-	z := MyStruct{
-		Foo:		"foo",
-		Bar:		"bar",
-		Name:		"name",
-		LongName:	"longname",
-		Baz:		"baz",
-	}
-	y := MyStruct{
-		Foo:			"foo",
-		Bar:			"bar",
-		NameXX:			"name",
-		LongNameXXXXXXXXXXXXX:	"longname",
-		Baz:			"baz",
-	}
-	z := MyStruct{
-		Foo:			"foo",
-		Bar:			"bar",
-		Name:			"name",
-		LongNameXXXXXXXXXXXXX:	"longname",
-		Baz:			"baz",
-	}
-}
-
-// ----------------------------------------------------------------------------
-// Examples from issue #10392.
-
-var kcfg = KubeletConfig{
-	Address:			s.Address,
-	AllowPrivileged:		s.AllowPrivileged,
-	HostNetworkSources:		hostNetworkSources,
-	HostnameOverride:		s.HostnameOverride,
-	RootDirectory:			s.RootDirectory,
-	ConfigFile:			s.Config,
-	ManifestURL:			s.ManifestURL,
-	FileCheckFrequency:		s.FileCheckFrequency,
-	HTTPCheckFrequency:		s.HTTPCheckFrequency,
-	PodInfraContainerImage:		s.PodInfraContainerImage,
-	SyncFrequency:			s.SyncFrequency,
-	RegistryPullQPS:		s.RegistryPullQPS,
-	RegistryBurst:			s.RegistryBurst,
-	MinimumGCAge:			s.MinimumGCAge,
-	MaxPerPodContainerCount:	s.MaxPerPodContainerCount,
-	MaxContainerCount:		s.MaxContainerCount,
-	ClusterDomain:			s.ClusterDomain,
-	ClusterDNS:			s.ClusterDNS,
-	Runonce:			s.RunOnce,
-	Port:				s.Port,
-	ReadOnlyPort:			s.ReadOnlyPort,
-	CadvisorInterface:		cadvisorInterface,
-	EnableServer:			s.EnableServer,
-	EnableDebuggingHandlers:	s.EnableDebuggingHandlers,
-	DockerClient:			dockertools.ConnectToDockerOrDie(s.DockerEndpoint),
-	KubeClient:			client,
-	MasterServiceNamespace:		s.MasterServiceNamespace,
-	VolumePlugins:			ProbeVolumePlugins(),
-	NetworkPlugins:			ProbeNetworkPlugins(),
-	NetworkPluginName:		s.NetworkPluginName,
-	StreamingConnectionIdleTimeout:	s.StreamingConnectionIdleTimeout,
-	TLSOptions:			tlsOptions,
-	ImageGCPolicy:			imageGCPolicy, imageGCPolicy,
-	Cloud:				cloud,
-	NodeStatusUpdateFrequency:	s.NodeStatusUpdateFrequency,
-}
-
-var a = A{
-	Long:					1,
-	LongLong:				1,
-	LongLongLong:				1,
-	LongLongLongLong:			1,
-	LongLongLongLongLong:			1,
-	LongLongLongLongLongLong:		1,
-	LongLongLongLongLongLongLong:		1,
-	LongLongLongLongLongLongLongLong:	1,
-	Short:					1,
-	LongLongLongLongLongLongLongLongLong:	3,
-}
-
-// ----------------------------------------------------------------------------
-// Examples from issue #22852.
-
-var fmtMap = map[string]string{
-	"1":					"123",
-	"12":					"123",
-	"123":					"123",
-	"1234":					"123",
-	"12345":				"123",
-	"123456":				"123",
-	"12345678901234567890123456789":	"123",
-	"abcde":				"123",
-	"123456789012345678901234567890":	"123",
-	"1234567":				"123",
-	"abcdefghijklmnopqrstuvwxyzabcd":	"123",
-	"abcd":					"123",
-}
-
-type Fmt struct {
-	abcdefghijklmnopqrstuvwx	string
-	abcdefghijklmnopqrstuvwxy	string
-	abcdefghijklmnopqrstuvwxyz	string
-	abcdefghijklmnopqrstuvwxyza	string
-	abcdefghijklmnopqrstuvwxyzab	string
-	abcdefghijklmnopqrstuvwxyzabc	string
-	abcde				string
-	abcdefghijklmnopqrstuvwxyzabcde	string
-	abcdefg				string
-}
-
-func main() {
-	_ := Fmt{
-		abcdefghijklmnopqrstuvwx:		"foo",
-		abcdefghijklmnopqrstuvwxyza:		"foo",
-		abcdefghijklmnopqrstuvwxyzab:		"foo",
-		abcdefghijklmnopqrstuvwxyzabc:		"foo",
-		abcde:					"foo",
-		abcdefghijklmnopqrstuvwxyzabcde:	"foo",
-		abcdefg:				"foo",
-		abcdefghijklmnopqrstuvwxy:		"foo",
-		abcdefghijklmnopqrstuvwxyz:		"foo",
-	}
-}
-
-// ----------------------------------------------------------------------------
-// Examples from issue #26352.
-
-var _ = map[int]string{
-	1:	"",
-
-	12345678901234567890123456789:		"",
-	12345678901234567890123456789012345678:	"",
-}
-
-func f() {
-	_ = map[int]string{
-		1:	"",
-
-		12345678901234567:				"",
-		12345678901234567890123456789012345678901:	"",
-	}
-}
-
-// ----------------------------------------------------------------------------
-// Examples from issue #26930.
-
-var _ = S{
-	F1:	[]string{},
-	F2____:	[]string{},
-}
-
-var _ = S{
-	F1:	[]string{},
-	F2____:	[]string{},
-}
-
-var _ = S{
-	F1____:	[]string{},
-	F2:	[]string{},
-}
-
-var _ = S{
-	F1____:	[]string{},
-	F2:	[]string{},
-}
diff --git a/internal/backport/go/printer/testdata/alignment.input b/internal/backport/go/printer/testdata/alignment.input
deleted file mode 100644
index 323d268..0000000
--- a/internal/backport/go/printer/testdata/alignment.input
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package alignment
-
-// ----------------------------------------------------------------------------
-// Examples from issue #7335.
-
-func main() {
-    z := MyStruct{
-        Foo:      "foo",
-        Bar:      "bar",
-        Name:     "name",
-        LongName: "longname",
-        Baz:      "baz",
-    }
-    y := MyStruct{
-        Foo:                   "foo",
-        Bar:                   "bar",
-        NameXX:                "name",
-        LongNameXXXXXXXXXXXXX: "longname",
-        Baz: "baz",
-    }
-    z := MyStruct{
-        Foo:  "foo",
-        Bar:  "bar",
-        Name: "name",
-        LongNameXXXXXXXXXXXXX: "longname",
-        Baz: "baz",
-    }
-}
-
-// ----------------------------------------------------------------------------
-// Examples from issue #10392.
-
-var kcfg = KubeletConfig{
-    Address:                        s.Address,
-    AllowPrivileged:                s.AllowPrivileged,
-    HostNetworkSources:             hostNetworkSources,
-    HostnameOverride:               s.HostnameOverride,
-    RootDirectory:                  s.RootDirectory,
-    ConfigFile:                     s.Config,
-    ManifestURL:                    s.ManifestURL,
-    FileCheckFrequency:             s.FileCheckFrequency,
-    HTTPCheckFrequency:             s.HTTPCheckFrequency,
-    PodInfraContainerImage:         s.PodInfraContainerImage,
-    SyncFrequency:                  s.SyncFrequency,
-    RegistryPullQPS:                s.RegistryPullQPS,
-    RegistryBurst:                  s.RegistryBurst,
-    MinimumGCAge:                   s.MinimumGCAge,
-    MaxPerPodContainerCount:        s.MaxPerPodContainerCount,
-    MaxContainerCount:              s.MaxContainerCount,
-    ClusterDomain:                  s.ClusterDomain,
-    ClusterDNS:                     s.ClusterDNS,
-    Runonce:                        s.RunOnce,
-    Port:                           s.Port,
-    ReadOnlyPort:                   s.ReadOnlyPort,
-    CadvisorInterface:              cadvisorInterface,
-    EnableServer:                   s.EnableServer,
-    EnableDebuggingHandlers:        s.EnableDebuggingHandlers,
-    DockerClient:                   dockertools.ConnectToDockerOrDie(s.DockerEndpoint),
-    KubeClient:                     client,
-    MasterServiceNamespace:         s.MasterServiceNamespace,
-    VolumePlugins:                  ProbeVolumePlugins(),
-    NetworkPlugins:                 ProbeNetworkPlugins(),
-    NetworkPluginName:              s.NetworkPluginName,
-    StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout,
-    TLSOptions:                     tlsOptions,
-    ImageGCPolicy:                  imageGCPolicy,imageGCPolicy,
-    Cloud:                          cloud,
-    NodeStatusUpdateFrequency: s.NodeStatusUpdateFrequency,
-}
-
-var a = A{
-    Long:                             1,
-    LongLong:                         1,
-    LongLongLong:                     1,
-    LongLongLongLong:                 1,
-    LongLongLongLongLong:             1,
-    LongLongLongLongLongLong:         1,
-    LongLongLongLongLongLongLong:     1,
-    LongLongLongLongLongLongLongLong: 1,
-    Short: 1,
-    LongLongLongLongLongLongLongLongLong: 3,
-}
-
-// ----------------------------------------------------------------------------
-// Examples from issue #22852.
-
-var fmtMap = map[string]string{
-	"1": "123",
-	"12": "123",
-	"123": "123",
-	"1234": "123",
-	"12345": "123",
-	"123456": "123",
-	"12345678901234567890123456789": "123",
-	"abcde": "123",
-	"123456789012345678901234567890": "123",
-	"1234567": "123",
-	"abcdefghijklmnopqrstuvwxyzabcd": "123",
-	"abcd": "123",
-}
-
-type Fmt struct {
-	abcdefghijklmnopqrstuvwx string
-	abcdefghijklmnopqrstuvwxy string
-	abcdefghijklmnopqrstuvwxyz string
-	abcdefghijklmnopqrstuvwxyza string
-	abcdefghijklmnopqrstuvwxyzab string
-	abcdefghijklmnopqrstuvwxyzabc string
-	abcde string
-	abcdefghijklmnopqrstuvwxyzabcde string
-	abcdefg string
-}
-
-func main() {
-	_ := Fmt{
-		abcdefghijklmnopqrstuvwx: "foo",
-		abcdefghijklmnopqrstuvwxyza: "foo",
-		abcdefghijklmnopqrstuvwxyzab: "foo",
-		abcdefghijklmnopqrstuvwxyzabc: "foo",
-		abcde: "foo",
-		abcdefghijklmnopqrstuvwxyzabcde: "foo",
-		abcdefg: "foo",
-		abcdefghijklmnopqrstuvwxy: "foo",
-		abcdefghijklmnopqrstuvwxyz: "foo",
-	}
-}
-
-// ----------------------------------------------------------------------------
-// Examples from issue #26352.
-
-var _ = map[int]string{
-	1: "",
-
-	12345678901234567890123456789: "",
-	12345678901234567890123456789012345678: "",
-}
-
-func f() {
-	_ = map[int]string{
-		1: "",
-
-		12345678901234567: "",
-		12345678901234567890123456789012345678901: "",
-	}
-}
-
-// ----------------------------------------------------------------------------
-// Examples from issue #26930.
-
-var _ = S{
-	F1: []string{
-	},
-	F2____: []string{},
-}
-
-var _ = S{
-	F1: []string{
-
-
-	},
-	F2____: []string{},
-}
-
-var _ = S{
-	F1____: []string{
-	},
-	F2: []string{},
-}
-
-var _ = S{
-	F1____: []string{
-
-	},
-	F2: []string{},
-}
diff --git a/internal/backport/go/printer/testdata/comments.golden b/internal/backport/go/printer/testdata/comments.golden
deleted file mode 100644
index 1e5d17b..0000000
--- a/internal/backport/go/printer/testdata/comments.golden
+++ /dev/null
@@ -1,774 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This is a package for testing comment placement by go/printer.
-package main
-
-import "fmt"	// fmt
-
-const c0 = 0	// zero
-const (
-	c1	= iota	// c1
-	c2		// c2
-)
-
-// Alignment of comments in declarations>
-const (
-	_	T	= iota	// comment
-	_			// comment
-	_			// comment
-	_	= iota + 10
-	_	// comments
-
-	_		= 10	// comment
-	_	T	= 20	// comment
-)
-
-const (
-	_____	= iota	// foo
-	_		// bar
-	_	= 0	// bal
-	_		// bat
-)
-
-const (
-	_	T	= iota	// comment
-	_			// comment
-	_			// comment
-	_	= iota + 10
-	_		// comment
-	_		= 10
-	_		= 20	// comment
-	_	T	= 0	// comment
-)
-
-// The SZ struct; it is empty.
-type SZ struct{}
-
-// The S0 struct; no field is exported.
-type S0 struct {
-	int
-	x, y, z	int	// 3 unexported fields
-}
-
-// The S1 struct; some fields are not exported.
-type S1 struct {
-	S0
-	A, B, C	float	// 3 exported fields
-	D, b, c	int	// 2 unexported fields
-}
-
-// The S2 struct; all fields are exported.
-type S2 struct {
-	S1
-	A, B, C	float	// 3 exported fields
-}
-
-// The IZ interface; it is empty.
-type SZ interface{}
-
-// The I0 interface; no method is exported.
-type I0 interface {
-	f(x int) int	// unexported method
-}
-
-// The I1 interface; some methods are not exported.
-type I1 interface {
-	I0
-	F(x float) float	// exported methods
-	g(x int) int		// unexported method
-}
-
-// The I2 interface; all methods are exported.
-type I2 interface {
-	I0
-	F(x float) float	// exported method
-	G(x float) float	// exported method
-}
-
-// The S3 struct; all comments except for the last one must appear in the export.
-type S3 struct {
-	// lead comment for F1
-	F1	int	// line comment for F1
-	// lead comment for F2
-	F2	int	// line comment for F2
-	f3	int	// f3 is not exported
-}
-
-// Here is a comment.
-// Here is an accidentally unindented line.
-// More comment.
-//
-//dir:ect ive
-type directiveCheck struct{}
-
-// This comment group should be separated
-// with a newline from the next comment
-// group.
-
-// This comment should NOT be associated with the next declaration.
-
-var x int	// x
-var ()
-
-// This comment SHOULD be associated with f0.
-func f0() {
-	const pi = 3.14	// pi
-	var s1 struct{}	/* an empty struct */	/* foo */
-	// a struct constructor
-	// --------------------
-	var s2 struct{} = struct{}{}
-	x := pi
-}
-
-// This comment should be associated with f1, with one blank line before the comment.
-func f1() {
-	f0()
-	/* 1 */
-	// 2
-	/* 3 */
-	/* 4 */
-	f0()
-}
-
-func _() {
-	// this comment should be properly indented
-}
-
-func _(x int) int {
-	if x < 0 {	// the tab printed before this comment's // must not affect the remaining lines
-		return -x	// this statement should be properly indented
-	}
-	if x < 0 {	/* the tab printed before this comment's /* must not affect the remaining lines */
-		return -x	// this statement should be properly indented
-	}
-	return x
-}
-
-func typeswitch(x interface{}) {
-	switch v := x.(type) {
-	case bool, int, float:
-	case string:
-	default:
-	}
-
-	switch x.(type) {
-	}
-
-	switch v0, ok := x.(int); v := x.(type) {
-	}
-
-	switch v0, ok := x.(int); x.(type) {
-	case byte:	// this comment should be on the same line as the keyword
-		// this comment should be normally indented
-		_ = 0
-	case bool, int, float:
-		// this comment should be indented
-	case string:
-	default:
-		// this comment should be indented
-	}
-	// this comment should not be indented
-}
-
-//
-// Indentation of comments after possibly indented multi-line constructs
-// (test cases for issue 3147).
-//
-
-func _() {
-	s := 1 +
-		2
-	// should be indented like s
-}
-
-func _() {
-	s := 1 +
-		2	// comment
-	// should be indented like s
-}
-
-func _() {
-	s := 1 +
-		2	// comment
-	// should be indented like s
-	_ = 0
-}
-
-func _() {
-	s := 1 +
-		2
-	// should be indented like s
-	_ = 0
-}
-
-func _() {
-	s := 1 +
-		2
-
-	// should be indented like s
-}
-
-func _() {
-	s := 1 +
-		2	// comment
-
-	// should be indented like s
-}
-
-func _() {
-	s := 1 +
-		2	// comment
-
-	// should be indented like s
-	_ = 0
-}
-
-func _() {
-	s := 1 +
-		2
-
-	// should be indented like s
-	_ = 0
-}
-
-// Test case from issue 3147.
-func f() {
-	templateText := "a" +	// A
-		"b" +	// B
-		"c"	// C
-
-	// should be aligned with f()
-	f()
-}
-
-// Modified test case from issue 3147.
-func f() {
-	templateText := "a" +	// A
-		"b" +	// B
-		"c"	// C
-
-		// may not be aligned with f() (source is not aligned)
-	f()
-}
-
-//
-// Test cases for alignment of lines in general comments.
-//
-
-func _() {
-	/* freestanding comment
-	   aligned		line
-	   aligned line
-	*/
-}
-
-func _() {
-	/* freestanding comment
-	   aligned		line
-	   aligned line
-	*/
-}
-
-func _() {
-	/* freestanding comment
-	   aligned		line
-	   aligned line */
-}
-
-func _() {
-	/*	freestanding comment
-		aligned		line
-		aligned line
-	*/
-}
-
-func _() {
-	/*	freestanding comment
-		aligned		line
-		aligned line
-	*/
-}
-
-func _() {
-	/*	freestanding comment
-		aligned		line
-		aligned line */
-}
-
-func _() {
-	/*
-	   freestanding comment
-	   aligned		line
-	   aligned line
-	*/
-}
-
-func _() {
-	/*
-	   freestanding comment
-	   aligned		line
-	   aligned line
-	*/
-}
-
-func _() {
-	/*
-	   freestanding comment
-	   aligned		line
-	   aligned line */
-}
-
-func _() {
-	/*
-		freestanding comment
-		aligned		line
-		aligned line
-	*/
-}
-
-func _() {
-	/*
-		freestanding comment
-		aligned		line
-		aligned line
-	*/
-}
-
-func _() {
-	/*
-		freestanding comment
-		aligned		line
-		aligned line */
-}
-
-func _() {
-	/* freestanding comment
-	   aligned line
-	*/
-}
-
-func _() {
-	/* freestanding comment
-	   aligned line
-	*/
-}
-
-func _() {
-	/* freestanding comment
-	   aligned line */
-}
-
-func _() {
-	/*	freestanding comment
-		aligned line
-	*/
-}
-
-func _() {
-	/*	freestanding comment
-		aligned line
-	*/
-}
-
-func _() {
-	/*	freestanding comment
-		aligned line */
-}
-
-func _() {
-	/*
-	   freestanding comment
-	   aligned line
-	*/
-}
-
-func _() {
-	/*
-	   freestanding comment
-	   aligned line
-	*/
-}
-
-func _() {
-	/*
-	   freestanding comment
-	   aligned line */
-}
-
-func _() {
-	/*
-		freestanding comment
-		aligned line
-	*/
-}
-
-func _() {
-	/*
-		freestanding comment
-		aligned line
-	*/
-}
-
-func _() {
-	/*
-		freestanding comment
-		aligned line */
-}
-
-// Issue 9751.
-func _() {
-	/*a string
-
-	b string*/
-
-	/*A string
-
-
-
-	Z string*/
-
-	/*a string
-
-	b string
-
-	c string*/
-
-	{
-		/*a string
-		b string*/
-
-		/*a string
-
-		b string*/
-
-		/*a string
-
-		b string
-
-		c string*/
-	}
-
-	{
-		/*a string
-		b string*/
-
-		/*a string
-
-		b string*/
-
-		/*a string
-
-		b string
-
-		c string*/
-	}
-
-	/*
-	 */
-
-	/*
-
-	 */
-
-	/*
-
-	 * line
-
-	 */
-}
-
-/*
- * line
- * of
- * stars
- */
-
-/* another line
- * of
- * stars */
-
-/*	and another line
- *	of
- *	stars */
-
-/* a line of
- * stars */
-
-/*	and another line of
- *	stars */
-
-/* a line of stars
- */
-
-/*	and another line of
- */
-
-/* a line of stars
- */
-
-/*	and another line of
- */
-
-/*
-aligned in middle
-here
-        not here
-*/
-
-/*
-blank line in middle:
-
-with no leading spaces on blank line.
-*/
-
-/*
-   aligned in middle
-   here
-           not here
-*/
-
-/*
-	blank line in middle:
-
-	with no leading spaces on blank line.
-*/
-
-func _() {
-	/*
-	 * line
-	 * of
-	 * stars
-	 */
-
-	/*
-		aligned in middle
-		here
-			not here
-	*/
-
-	/*
-		blank line in middle:
-
-		with no leading spaces on blank line.
-	*/
-}
-
-// Some interesting interspersed comments.
-// See below for more common cases.
-func _( /* this */ x /* is */ /* an */ int) {
-}
-
-func _( /* no params - extra blank before and after comment */ )	{}
-func _(a, b int /* params - no extra blank after comment */)		{}
-
-func _()	{ f( /* no args - extra blank before and after comment */ ) }
-func _()	{ f(a, b /* args - no extra blank after comment */) }
-
-func _() {
-	f( /* no args - extra blank before and after comment */ )
-	f(a, b /* args - no extra blank after comment */)
-}
-
-func ( /* comment1 */ T /* comment2 */) _()	{}
-
-func _()	{ /* "short-ish one-line functions with comments are formatted as multi-line functions */ }
-func _()	{ x := 0; /* comment */ y = x /* comment */ }
-
-func _() {
-	_ = 0
-	/* closing curly brace should be on new line */
-}
-
-func _() {
-	_ = []int{0, 1 /* don't introduce a newline after this comment - was issue 1365 */}
-}
-
-// Test cases from issue 1542:
-// Comments must not be placed before commas and cause invalid programs.
-func _() {
-	var a = []int{1, 2	/*jasldf*/}
-	_ = a
-}
-
-func _() {
-	var a = []int{1, 2}/*jasldf
-	 */
-
-	_ = a
-}
-
-func _() {
-	var a = []int{1, 2}// jasldf
-
-	_ = a
-}
-
-// Test cases from issues 11274, 15137:
-// Semicolon must not be lost when multiple statements are on the same line with a comment.
-func _() {
-	x := 0 /**/
-	y := 1
-}
-
-func _() {
-	f()
-	f()
-	f() /* comment */
-	f()
-	f() /* comment */
-	f()
-	f() /* a */ /* b */
-	f()
-	f() /* a */ /* b */
-	f()
-	f() /* a */ /* b */
-	f()
-}
-
-func _() {
-	f() /* a */ /* b */
-}
-
-// Comments immediately adjacent to punctuation followed by a newline
-// remain after the punctuation (looks better and permits alignment of
-// comments).
-func _() {
-	_ = T{
-		1,	// comment after comma
-		2,	/* comment after comma */
-		3,	// comment after comma
-	}
-	_ = T{
-		1,	// comment after comma
-		2,	/* comment after comma */
-		3,	// comment after comma
-	}
-	_ = T{
-		/* comment before literal */ 1,
-		2,	/* comment before comma - ok to move after comma */
-		3,	/* comment before comma - ok to move after comma */
-	}
-
-	for i = 0;	// comment after semicolon
-	i < 9;		/* comment after semicolon */
-	i++ {		// comment after opening curly brace
-	}
-
-	// TODO(gri) the last comment in this example should be aligned */
-	for i = 0;	// comment after semicolon
-	i < 9;		/* comment before semicolon - ok to move after semicolon */
-	i++ /* comment before opening curly brace */ {
-	}
-}
-
-// If there is no newline following punctuation, commas move before the punctuation.
-// This way, commas interspersed in lists stay with the respective expression.
-func f(x /* comment */, y int, z int /* comment */, u, v, w int /* comment */) {
-	f(x /* comment */, y)
-	f(x,	/* comment */
-		y)
-	f(
-		x,	/* comment */
-	)
-}
-
-func g(
-	x int,	/* comment */
-) {
-}
-
-type _ struct {
-	a, b /* comment */, c int
-}
-
-type _ struct {
-	a, b /* comment */, c int
-}
-
-func _() {
-	for a /* comment */, b := range x {
-	}
-}
-
-//extern foo
-func foo()	{}
-
-//export bar
-func bar()	{}
-
-// Print line directives correctly.
-
-// The following is a legal line directive.
-//
-//line foo:1
-func _() {
-	_ = 0
-	// The following is a legal line directive. It must not be indented:
-//line foo:2
-	_ = 1
-
-	// The following is not a legal line directive (it doesn't start in column 1):
-	//line foo:2
-	_ = 2
-
-	// The following is not a legal line directive (missing colon):
-//line foo -3
-	_ = 3
-}
-
-// Line comments with tabs
-func _() {
-	var finput *bufio.Reader	// input file
-	var stderr *bufio.Writer
-	var ftable *bufio.Writer	// y.go file
-	var foutput *bufio.Writer	// y.output file
-
-	var oflag string	// -o [y.go]		- y.go file
-	var vflag string	// -v [y.output]	- y.output file
-	var lflag bool		// -l			- disable line directives
-}
-
-// Trailing white space in comments should be trimmed
-func _() {
-	// This comment has 4 blanks following that should be trimmed:
-	/* Each line of this comment has blanks or tabs following that should be trimmed:
-	   line 2:
-	   line 3:
-	*/
-}
-
-var _ = []T{ /* lone comment */ }
-
-var _ = []T{
-	/* lone comment */
-}
-
-var _ = []T{
-	// lone comments
-	// in composite lit
-}
-
-var _ = [][]T{
-	{
-		// lone comments
-		// in composite lit
-	},
-}
-
-// TODO: gofmt doesn't add these tabs; make it so that these golden
-// tests run the printer in a way that it's exactly like gofmt.
-
-var _ = []T{	// lone comment
-}
-
-var _ = []T{	// lone comments
-	// in composite lit
-}
-
-func _()	{}
-
-func _()	{}
-
-/* This comment is the last entry in this file. It must be printed and should be followed by a newline */
diff --git a/internal/backport/go/printer/testdata/comments.input b/internal/backport/go/printer/testdata/comments.input
deleted file mode 100644
index 40aa55b..0000000
--- a/internal/backport/go/printer/testdata/comments.input
+++ /dev/null
@@ -1,773 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This is a package for testing comment placement by go/printer.
-//
-package main
-
-import "fmt"  // fmt
-
-const c0 = 0  // zero
-const (
-	c1 = iota  // c1
-	c2  // c2
-)
-
-// Alignment of comments in declarations>
-const (
-	_ T = iota  // comment
-	_  // comment
-	_  // comment
-	_ = iota+10
-	_  // comments
-
-	_ = 10  // comment
-	_ T = 20  // comment
-)
-
-const (
-	_____ = iota // foo
-	_ // bar
-	_  = 0    // bal
-	_ // bat
-)
-
-const (
-	_ T = iota // comment
-	_ // comment
-	_ // comment
-	_ = iota + 10
-	_ // comment
-	_ = 10
-	_ = 20 // comment
-	_ T = 0 // comment
-)
-
-// The SZ struct; it is empty.
-type SZ struct {}
-
-// The S0 struct; no field is exported.
-type S0 struct {
-	int
-	x, y, z int  // 3 unexported fields
-}
-
-// The S1 struct; some fields are not exported.
-type S1 struct {
-	S0
-	A, B, C float  // 3 exported fields
-	D, b, c int  // 2 unexported fields
-}
-
-// The S2 struct; all fields are exported.
-type S2 struct {
-	S1
-	A, B, C float  // 3 exported fields
-}
-
-// The IZ interface; it is empty.
-type SZ interface {}
-
-// The I0 interface; no method is exported.
-type I0 interface {
-	f(x int) int  // unexported method
-}
-
-// The I1 interface; some methods are not exported.
-type I1 interface {
-	I0
-	F(x float) float  // exported methods
-	g(x int) int  // unexported method
-}
-
-// The I2 interface; all methods are exported.
-type I2 interface {
-	I0
-	F(x float) float  // exported method
-	G(x float) float  // exported method
-}
-
-// The S3 struct; all comments except for the last one must appear in the export.
-type S3 struct {
-	// lead comment for F1
-	F1 int // line comment for F1
-	// lead comment for F2
-	F2 int // line comment for F2
-	f3 int // f3 is not exported
-}
-
-// Here is a comment.
-//Here is an accidentally unindented line.
-//dir:ect ive
-// More comment.
-type directiveCheck struct{}
-
-// This comment group should be separated
-// with a newline from the next comment
-// group.
-
-// This comment should NOT be associated with the next declaration.
-
-var x int  // x
-var ()
-
-
-// This comment SHOULD be associated with f0.
-func f0() {
-	const pi = 3.14  // pi
-	var s1 struct {}  /* an empty struct */ /* foo */
-	// a struct constructor
-	// --------------------
-	var s2 struct {} = struct {}{}
-	x := pi
-}
-//
-// This comment should be associated with f1, with one blank line before the comment.
-//
-func f1() {
-	f0()
-	/* 1 */
-	// 2
-	/* 3 */
-	/* 4 */
-	f0()
-}
-
-
-func _() {
-// this comment should be properly indented
-}
-
-
-func _(x int) int {
-	if x < 0 {  // the tab printed before this comment's // must not affect the remaining lines
-		return -x  // this statement should be properly indented
-	}
-	if x < 0 {  /* the tab printed before this comment's /* must not affect the remaining lines */
-		return -x  // this statement should be properly indented
-	}
-	return x
-}
-
-
-func typeswitch(x interface{}) {
-	switch v := x.(type) {
-	case bool, int, float:
-	case string:
-	default:
-	}
-
-	switch x.(type) {
-	}
-
-	switch v0, ok := x.(int); v := x.(type) {
-	}
-
-	switch v0, ok := x.(int); x.(type) {
-	case byte:  // this comment should be on the same line as the keyword
-		// this comment should be normally indented
-		_ = 0
-	case bool, int, float:
-		// this comment should be indented
-	case string:
-	default:
-		// this comment should be indented
-	}
-	// this comment should not be indented
-}
-
-//
-// Indentation of comments after possibly indented multi-line constructs
-// (test cases for issue 3147).
-//
-
-func _() {
-	s := 1 +
-		2
-// should be indented like s
-}
-
-func _() {
-	s := 1 +
-		2 // comment
-		// should be indented like s
-}
-
-func _() {
-	s := 1 +
-		2 // comment
-	// should be indented like s
-	_ = 0
-}
-
-func _() {
-	s := 1 +
-		2
-	// should be indented like s
-	_ = 0
-}
-
-func _() {
-	s := 1 +
-		2
-
-// should be indented like s
-}
-
-func _() {
-	s := 1 +
-		2 // comment
-
-		// should be indented like s
-}
-
-func _() {
-	s := 1 +
-		2 // comment
-
-	// should be indented like s
-	_ = 0
-}
-
-func _() {
-	s := 1 +
-		2
-
-	// should be indented like s
-	_ = 0
-}
-
-// Test case from issue 3147.
-func f() {
-	templateText := "a" + // A
-		"b" + // B
-		"c" // C
-
-	// should be aligned with f()
-	f()
-}
-
-// Modified test case from issue 3147.
-func f() {
-	templateText := "a" + // A
-		"b" + // B
-		"c" // C
-
-		// may not be aligned with f() (source is not aligned)
-	f()
-}
-
-//
-// Test cases for alignment of lines in general comments.
-//
-
-func _() {
-	/* freestanding comment
-	   aligned		line
-	   aligned line
-	*/
-}
-
-func _() {
-	/* freestanding comment
-	   aligned		line
-	   aligned line
-	   */
-}
-
-func _() {
-	/* freestanding comment
-	   aligned		line
-	   aligned line */
-}
-
-func _() {
-	/*	freestanding comment
-		aligned		line
-		aligned line
-	*/
-}
-
-func _() {
-	/*	freestanding comment
-		aligned		line
-		aligned line
-		*/
-}
-
-func _() {
-	/*	freestanding comment
-		aligned		line
-		aligned line */
-}
-
-
-func _() {
-	/*
-	   freestanding comment
-	   aligned		line
-	   aligned line
-	*/
-}
-
-func _() {
-	/*
-	   freestanding comment
-	   aligned		line
-	   aligned line
-	   */
-}
-
-func _() {
-	/*
-	   freestanding comment
-	   aligned		line
-	   aligned line */
-}
-
-func _() {
-	/*
-		freestanding comment
-		aligned		line
-		aligned line
-	*/
-}
-
-func _() {
-	/*
-		freestanding comment
-		aligned		line
-		aligned line
-		*/
-}
-
-func _() {
-	/*
-		freestanding comment
-		aligned		line
-		aligned line */
-}
-
-func _() {
-	/* freestanding comment
-	   aligned line
-	*/
-}
-
-func _() {
-	/* freestanding comment
-	   aligned line
-	   */
-}
-
-func _() {
-	/* freestanding comment
-	   aligned line */
-}
-
-func _() {
-	/*	freestanding comment
-		aligned line
-	*/
-}
-
-func _() {
-	/*	freestanding comment
-		aligned line
-		*/
-}
-
-func _() {
-	/*	freestanding comment
-		aligned line */
-}
-
-
-func _() {
-	/*
-	   freestanding comment
-	   aligned line
-	*/
-}
-
-func _() {
-	/*
-	   freestanding comment
-	   aligned line
-	   */
-}
-
-func _() {
-	/*
-	   freestanding comment
-	   aligned line */
-}
-
-func _() {
-	/*
-		freestanding comment
-		aligned line
-	*/
-}
-
-func _() {
-	/*
-		freestanding comment
-		aligned line
-		*/
-}
-
-func _() {
-	/*
-		freestanding comment
-		aligned line */
-}
-
-// Issue 9751.
-func _() {
-	/*a string
-
-	b string*/
-
-	/*A string
-
-
-
-	Z string*/
-
-	/*a string
-
-	b string
-
-	c string*/
-
-	{
-		/*a string
-b string*/
-
-		/*a string
-
-b string*/
-
-		/*a string
-
-b string
-
-c string*/
-	}
-
-	{
-		/*a string
-				b string*/
-
-		/*a string
-
-				b string*/
-
-		/*a string
-
-				b string
-
-				c string*/
-	}
-
-	/*
-	*/
-
-	/*
-
-	*/
-
-	/*
-
-	 * line
-
-	*/
-}
-
-/*
- * line
- * of
- * stars
- */
-
-/* another line
- * of
- * stars */
-
-/*	and another line
- *	of
- *	stars */
-
-/* a line of
- * stars */
-
-/*	and another line of
- *	stars */
-
-/* a line of stars
-*/
-
-/*	and another line of
-*/
-
-/* a line of stars
- */
-
-/*	and another line of
- */
-
-/*
-aligned in middle
-here
-        not here
-*/
-
-/*
-blank line in middle:
-
-with no leading spaces on blank line.
-*/
-
-/*
-   aligned in middle
-   here
-           not here
-*/
-
-/*
-	blank line in middle:
-
-	with no leading spaces on blank line.
-*/
-
-func _() {
-	/*
-	 * line
-	 * of
-	 * stars
-	 */
-
-	/*
-	aligned in middle
-	here
-		not here
-	*/
-
-	/*
-	blank line in middle:
-
-	with no leading spaces on blank line.
-*/
-}
-
-
-// Some interesting interspersed comments.
-// See below for more common cases.
-func _(/* this */x/* is *//* an */ int) {
-}
-
-func _(/* no params - extra blank before and after comment */) {}
-func _(a, b int /* params - no extra blank after comment */) {}
-
-func _() { f(/* no args - extra blank before and after comment */) }
-func _() { f(a, b /* args - no extra blank after comment */) }
-
-func _() {
-	f(/* no args - extra blank before and after comment */)
-	f(a, b /* args - no extra blank after comment */)
-}
-
-func (/* comment1 */ T /* comment2 */) _() {}
-
-func _() { /* "short-ish one-line functions with comments are formatted as multi-line functions */ }
-func _() { x := 0; /* comment */ y = x /* comment */ }
-
-func _() {
-	_ = 0
-	/* closing curly brace should be on new line */ }
-
-func _() {
-	_ = []int{0, 1 /* don't introduce a newline after this comment - was issue 1365 */}
-}
-
-// Test cases from issue 1542:
-// Comments must not be placed before commas and cause invalid programs.
-func _() {
-	var a = []int{1, 2, /*jasldf*/
-	}
-	_ = a
-}
-
-func _() {
-	var a = []int{1, 2, /*jasldf
-						*/
-	}
-	_ = a
-}
-
-func _() {
-	var a = []int{1, 2, // jasldf
-	}
-	_ = a
-}
-
-// Test cases from issues 11274, 15137:
-// Semicolon must not be lost when multiple statements are on the same line with a comment.
-func _() {
-    x := 0 /**/; y := 1
-}
-
-func _() {
-	f(); f()
-	f(); /* comment */ f()
-	f() /* comment */; f()
-	f(); /* a */ /* b */ f()
-	f() /* a */ /* b */; f()
-	f() /* a */; /* b */ f()
-}
-
-func _() {
-	f() /* a */ /* b */ }
-
-// Comments immediately adjacent to punctuation followed by a newline
-// remain after the punctuation (looks better and permits alignment of
-// comments).
-func _() {
-	_ = T{
-		1,    // comment after comma
-		2,    /* comment after comma */
-		3  ,  // comment after comma
-	}
-	_ = T{
-		1  ,// comment after comma
-		2  ,/* comment after comma */
-		3,// comment after comma
-	}
-	_ = T{
-		/* comment before literal */1,
-		2/* comment before comma - ok to move after comma */,
-		3  /* comment before comma - ok to move after comma */  ,
-	}
-
-	for
-		i=0;// comment after semicolon
-		i<9;/* comment after semicolon */
-		i++{// comment after opening curly brace
-	}
-
-	// TODO(gri) the last comment in this example should be aligned */
-	for
-		i=0;// comment after semicolon
-		i<9/* comment before semicolon - ok to move after semicolon */;
-		i++ /* comment before opening curly brace */ {
-	}
-}
-
-// If there is no newline following punctuation, commas move before the punctuation.
-// This way, commas interspersed in lists stay with the respective expression.
-func f(x/* comment */, y int, z int /* comment */, u, v, w int /* comment */) {
-	f(x /* comment */, y)
-	f(x /* comment */,
-	y)
-	f(
-		x /* comment */,
-	)
-}
-
-func g(
-	x int /* comment */,
-) {}
-
-type _ struct {
-	a, b /* comment */, c int
-}
-
-type _ struct { a, b /* comment */, c int }
-
-func _() {
-	for a /* comment */, b := range x {
-	}
-}
-
-//extern foo
-func foo() {}
-
-//export bar
-func bar() {}
-
-// Print line directives correctly.
-
-// The following is a legal line directive.
-//line foo:1
-func _() {
-	_ = 0
-// The following is a legal line directive. It must not be indented:
-//line foo:2
-	_ = 1
-
-// The following is not a legal line directive (it doesn't start in column 1):
-	//line foo:2
-	_ = 2
-
-// The following is not a legal line directive (missing colon):
-//line foo -3
-	_ = 3
-}
-
-// Line comments with tabs
-func _() {
-var	finput		*bufio.Reader			// input file
-var	stderr		*bufio.Writer
-var	ftable		*bufio.Writer			// y.go file
-var	foutput		*bufio.Writer			// y.output file
-
-var	oflag		string				// -o [y.go]		- y.go file
-var	vflag		string				// -v [y.output]	- y.output file
-var	lflag		bool				// -l			- disable line directives
-}
-
-// Trailing white space in comments should be trimmed
-func _() {
-// This comment has 4 blanks following that should be trimmed:
-/* Each line of this comment has blanks or tabs following that should be trimmed:
-   line 2:
-   line 3:
-*/
-}
-
-var _ = []T{/* lone comment */}
-
-var _ = []T{
-/* lone comment */
-}
-
-var _ = []T{
-// lone comments
-// in composite lit
-}
-
-var _ = [][]T{
-	{
-		// lone comments
-		// in composite lit
-	},
-}
-
-// TODO: gofmt doesn't add these tabs; make it so that these golden
-// tests run the printer in a way that it's exactly like gofmt.
-
-var _ = []T{// lone comment
-}
-
-var _ = []T{// lone comments
-// in composite lit
-}
-
-func _() {}
-
-//
-func _() {}
-
-/* This comment is the last entry in this file. It must be printed and should be followed by a newline */
diff --git a/internal/backport/go/printer/testdata/comments.x b/internal/backport/go/printer/testdata/comments.x
deleted file mode 100644
index 5d088ab..0000000
--- a/internal/backport/go/printer/testdata/comments.x
+++ /dev/null
@@ -1,55 +0,0 @@
-// This is a package for testing comment placement by go/printer.
-package main
-
-// The SZ struct; it is empty.
-type SZ struct{}
-
-// The S0 struct; no field is exported.
-type S0 struct {
-	// contains filtered or unexported fields
-}
-
-// The S1 struct; some fields are not exported.
-type S1 struct {
-	S0
-	A, B, C	float	// 3 exported fields
-	D	int	// 2 unexported fields
-	// contains filtered or unexported fields
-}
-
-// The S2 struct; all fields are exported.
-type S2 struct {
-	S1
-	A, B, C	float	// 3 exported fields
-}
-
-// The IZ interface; it is empty.
-type SZ interface{}
-
-// The I0 interface; no method is exported.
-type I0 interface {
-	// contains filtered or unexported methods
-}
-
-// The I1 interface; some methods are not exported.
-type I1 interface {
-	I0
-	F(x float) float	// exported methods
-	// contains filtered or unexported methods
-}
-
-// The I2 interface; all methods are exported.
-type I2 interface {
-	I0
-	F(x float) float	// exported method
-	G(x float) float	// exported method
-}
-
-// The S3 struct; all comments except for the last one must appear in the export.
-type S3 struct {
-	// lead comment for F1
-	F1	int	// line comment for F1
-	// lead comment for F2
-	F2	int	// line comment for F2
-	// contains filtered or unexported fields
-}
diff --git a/internal/backport/go/printer/testdata/comments2.golden b/internal/backport/go/printer/testdata/comments2.golden
deleted file mode 100644
index 83213d1..0000000
--- a/internal/backport/go/printer/testdata/comments2.golden
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This is a package for testing comment placement by go/printer.
-package main
-
-// Test cases for idempotent comment formatting (was issue 1835).
-/*
-c1a
-*/
-/*
-   c1b
-*/
-/* foo
-c1c
-*/
-/* foo
-   c1d
-*/
-/*
-c1e
-foo */
-/*
-   c1f
-   foo */
-
-func f() {
-	/*
-	   c2a
-	*/
-	/*
-	   c2b
-	*/
-	/* foo
-	   c2c
-	*/
-	/* foo
-	   c2d
-	*/
-	/*
-	   c2e
-	   foo */
-	/*
-	   c2f
-	   foo */
-}
-
-func g() {
-	/*
-	   c3a
-	*/
-	/*
-	   c3b
-	*/
-	/* foo
-	   c3c
-	*/
-	/* foo
-	   c3d
-	*/
-	/*
-	   c3e
-	   foo */
-	/*
-	   c3f
-	   foo */
-}
-
-// Test case taken literally from issue 1835.
-func main() {
-	/*
-	   prints test 5 times
-	*/
-	for i := 0; i < 5; i++ {
-		println("test")
-	}
-}
-
-func issue5623() {
-L:
-	_ = yyyyyyyyyyyyyyyy			// comment - should be aligned
-	_ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx	/* comment */
-
-	_ = yyyyyyyyyyyyyyyy			/* comment - should be aligned */
-	_ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx	// comment
-
-LLLLLLL:
-	_ = yyyyyyyyyyyyyyyy			// comment - should be aligned
-	_ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx	// comment
-
-LL:
-LLLLL:
-	_ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx	/* comment */
-	_ = yyyyyyyyyyyyyyyy			/* comment - should be aligned */
-
-	_ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx	// comment
-	_ = yyyyyyyyyyyyyyyy			// comment - should be aligned
-
-	// test case from issue
-label:
-	mask := uint64(1)<<c - 1		// Allocation mask
-	used := atomic.LoadUint64(&h.used)	// Current allocations
-}
-
-// Test cases for issue 18782
-var _ = [][]int{
-	/*       a, b, c, d, e */
-	/* a */ {0, 0, 0, 0, 0},
-	/* b */ {0, 5, 4, 4, 4},
-	/* c */ {0, 4, 5, 4, 4},
-	/* d */ {0, 4, 4, 5, 4},
-	/* e */ {0, 4, 4, 4, 5},
-}
-
-var _ = T{ /* a */ 0}
-
-var _ = T{ /* a */ /* b */ 0}
-
-var _ = T{	/* a */	/* b */
-	/* c */ 0,
-}
-
-var _ = T{	/* a */	/* b */
-	/* c */
-	/* d */ 0,
-}
-
-var _ = T{
-	/* a */
-	/* b */ 0,
-}
-
-var _ = T{ /* a */ {}}
-
-var _ = T{ /* a */ /* b */ {}}
-
-var _ = T{	/* a */	/* b */
-	/* c */ {},
-}
-
-var _ = T{	/* a */	/* b */
-	/* c */
-	/* d */ {},
-}
-
-var _ = T{
-	/* a */
-	/* b */ {},
-}
-
-var _ = []T{
-	func() {
-		var _ = [][]int{
-			/*       a, b, c, d, e */
-			/* a */ {0, 0, 0, 0, 0},
-			/* b */ {0, 5, 4, 4, 4},
-			/* c */ {0, 4, 5, 4, 4},
-			/* d */ {0, 4, 4, 5, 4},
-			/* e */ {0, 4, 4, 4, 5},
-		}
-	},
-}
diff --git a/internal/backport/go/printer/testdata/comments2.input b/internal/backport/go/printer/testdata/comments2.input
deleted file mode 100644
index 8d38c41..0000000
--- a/internal/backport/go/printer/testdata/comments2.input
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This is a package for testing comment placement by go/printer.
-//
-package main
-
-// Test cases for idempotent comment formatting (was issue 1835).
-/*
-c1a
-*/
-/*
-   c1b
-*/
-/* foo
-c1c
-*/
-/* foo
-   c1d
-*/
-/*
-c1e
-foo */
-/*
-   c1f
-   foo */
-
-func f() {
-/*
-c2a
-*/
-/*
-   c2b
-*/
-/* foo
-c2c
-*/
-/* foo
-   c2d
-*/
-/*
-c2e
-foo */
-/*
-   c2f
-   foo */
-}
-
-func g() {
-/*
-c3a
-*/
-/*
-   c3b
-*/
-/* foo
-c3c
-*/
-/* foo
-   c3d
-*/
-/*
-c3e
-foo */
-/*
-   c3f
-   foo */
-}
-
-// Test case taken literally from issue 1835.
-func main() {
-/*
-prints test 5 times
-*/
-   for i := 0; i < 5; i++ {
-      println("test")
-   }
-}
-
-func issue5623() {
-L:
-   _ = yyyyyyyyyyyyyyyy // comment - should be aligned
-   _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx /* comment */
-
-   _ = yyyyyyyyyyyyyyyy /* comment - should be aligned */
-   _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx // comment
-
-LLLLLLL:
-   _ = yyyyyyyyyyyyyyyy // comment - should be aligned
-   _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx // comment
-
-LL:
-LLLLL:
-   _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx /* comment */
-   _ = yyyyyyyyyyyyyyyy /* comment - should be aligned */
-
-   _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx // comment
-   _ = yyyyyyyyyyyyyyyy // comment - should be aligned
-
-// test case from issue
-label:
-   mask := uint64(1)<<c - 1 // Allocation mask
-   used := atomic.LoadUint64(&h.used) // Current allocations
-}
-
-// Test cases for issue 18782
-var _ = [][]int{
-   /*       a, b, c, d, e */
-   /* a */ {0, 0, 0, 0, 0},
-   /* b */ {0, 5, 4, 4, 4},
-   /* c */ {0, 4, 5, 4, 4},
-   /* d */ {0, 4, 4, 5, 4},
-   /* e */ {0, 4, 4, 4, 5},
-}
-
-var _ = T{ /* a */ 0,
-}
-
-var _ = T{ /* a */ /* b */ 0,
-}
-
-var _ = T{ /* a */ /* b */
-   /* c */ 0,
-}
-
-var _ = T{ /* a */ /* b */
-   /* c */
-   /* d */ 0,
-}
-
-var _ = T{
-   /* a */
-   /* b */ 0,
-}
-
-var _ = T{ /* a */ {},
-}
-
-var _ = T{ /* a */ /* b */ {},
-}
-
-var _ = T{ /* a */ /* b */
-   /* c */ {},
-}
-
-var _ = T{ /* a */ /* b */
-   /* c */
-   /* d */ {},
-}
-
-var _ = T{
-   /* a */
-   /* b */ {},
-}
-
-var _ = []T{
-   func() {
-      var _ = [][]int{
-         /*       a, b, c, d, e */
-         /* a */ {0, 0, 0, 0, 0},
-         /* b */ {0, 5, 4, 4, 4},
-         /* c */ {0, 4, 5, 4, 4},
-         /* d */ {0, 4, 4, 5, 4},
-         /* e */ {0, 4, 4, 4, 5},
-      }
-   },
-}
diff --git a/internal/backport/go/printer/testdata/complit.input b/internal/backport/go/printer/testdata/complit.input
deleted file mode 100644
index 82806a4..0000000
--- a/internal/backport/go/printer/testdata/complit.input
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package complit
-
-var (
-	// Multi-line declarations
-	V1 = T{
-		F1: "hello",
-		f2: 1,
-	}
-	V2 = T{
-		f2: 1,
-		F1: "hello",
-	}
-	V3 = T{
-		F1: "hello",
-		F2: T2{
-			A: "world",
-			b: "hidden",
-		},
-		f3: T2{
-			A: "world",
-		},
-	}
-	V4 = T{
-		f2: 1,
-	}
-
-	// Single-line declarations
-	V5 = T{F1: "hello", f2: 1}
-	V6 = T{f2: 1, F1: "hello"}
-	V7 = T{f2: 1}
-
-	// Mixed-mode declarations
-	V8 = T{
-		F1: "hello", f2: 1,
-		F3: "world",
-		f4: 2}
-	V9 = T{
-	f2: 1, F1: "hello",}
-	V10 = T{
-		F1: "hello", f2: 1,
-		f3: 2,
-		F4: "world", f5: 3,
-	}
-
-	// Other miscellaneous declarations
-	V11 = T{
-		t{
-			A: "world",
-			b: "hidden",
-		},
-		f2: t{
-			A: "world",
-			b: "hidden",
-		},
-	}
-	V12 = T{
-		F1: make(chan int),
-		f2: []int{},
-		F3: make(map[int]string), f4: 1,
-	}
-)
\ No newline at end of file
diff --git a/internal/backport/go/printer/testdata/complit.x b/internal/backport/go/printer/testdata/complit.x
deleted file mode 100644
index 458ac61..0000000
--- a/internal/backport/go/printer/testdata/complit.x
+++ /dev/null
@@ -1,62 +0,0 @@
-package complit
-
-var (
-	// Multi-line declarations
-	V1	= T{
-		F1: "hello",
-		// contains filtered or unexported fields
-	}
-	V2	= T{
-
-		F1: "hello",
-		// contains filtered or unexported fields
-	}
-	V3	= T{
-		F1:	"hello",
-		F2: T2{
-			A: "world",
-			// contains filtered or unexported fields
-		},
-		// contains filtered or unexported fields
-	}
-	V4	= T{
-		// contains filtered or unexported fields
-	}
-
-	// Single-line declarations
-	V5	= T{F1: "hello", /* contains filtered or unexported fields */}
-	V6	= T{F1: "hello", /* contains filtered or unexported fields */}
-	V7	= T{/* contains filtered or unexported fields */}
-
-	// Mixed-mode declarations
-	V8	= T{
-		F1:	"hello",
-		F3:	"world",
-		// contains filtered or unexported fields
-	}
-	V9	= T{
-		F1: "hello",
-		// contains filtered or unexported fields
-	}
-	V10	= T{
-		F1:	"hello",
-
-		F4:	"world",
-		// contains filtered or unexported fields
-	}
-
-	// Other miscellaneous declarations
-	V11	= T{
-		t{
-			A: "world",
-			// contains filtered or unexported fields
-		},
-		// contains filtered or unexported fields
-	}
-	V12	= T{
-		F1:	make(chan int),
-
-		F3:	make(map[int]string),
-		// contains filtered or unexported fields
-	}
-)
diff --git a/internal/backport/go/printer/testdata/declarations.golden b/internal/backport/go/printer/testdata/declarations.golden
deleted file mode 100644
index fe0f783..0000000
--- a/internal/backport/go/printer/testdata/declarations.golden
+++ /dev/null
@@ -1,1008 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package imports
-
-import "io"
-
-import (
-	_ "io"
-)
-
-import _ "io"
-
-import (
-	"io"
-	"io"
-	"io"
-)
-
-import (
-	"io"
-	aLongRename "io"
-
-	b "io"
-)
-
-import (
-	"unrenamed"
-	renamed "renameMe"
-	. "io"
-	_ "io"
-	"io"
-	. "os"
-)
-
-// no newlines between consecutive single imports, but
-// respect extra line breaks in the source (at most one empty line)
-import _ "io"
-import _ "io"
-import _ "io"
-
-import _ "os"
-import _ "os"
-import _ "os"
-
-import _ "fmt"
-import _ "fmt"
-import _ "fmt"
-
-import "foo"	// a comment
-import "bar"	// a comment
-
-import (
-	_ "foo"
-	// a comment
-	"bar"
-	"foo"	// a comment
-	"bar"	// a comment
-)
-
-// comments + renames
-import (
-	"unrenamed"	// a comment
-	renamed "renameMe"
-	. "io"		/* a comment */
-	_ "io/ioutil"	// a comment
-	"io"		// testing alignment
-	. "os"
-	// a comment
-)
-
-// a case that caused problems in the past (comment placement)
-import (
-	. "fmt"
-	"io"
-	"malloc"	// for the malloc count test only
-	"math"
-	"strings"
-	"testing"
-)
-
-// more import examples
-import (
-	"xxx"
-	"much_longer_name"	// comment
-	"short_name"		// comment
-)
-
-import (
-	_ "xxx"
-	"much_longer_name"	// comment
-)
-
-import (
-	mymath "math"
-	"/foo/bar/long_package_path"	// a comment
-)
-
-import (
-	"package_a"	// comment
-	"package_b"
-	my_better_c "package_c"	// comment
-	"package_d"		// comment
-	my_e "package_e"	// comment
-
-	"package_a"	// comment
-	"package_bb"
-	"package_ccc"	// comment
-	"package_dddd"	// comment
-)
-
-// print import paths as double-quoted strings
-// (we would like more test cases but the go/parser
-// already excludes most incorrect paths, and we don't
-// bother setting up test-ASTs manually)
-import (
-	"fmt"
-	"math"
-)
-
-// at least one empty line between declarations of different kind
-import _ "io"
-
-var _ int
-
-// at least one empty line between declarations of the same kind
-// if there is associated documentation (was issue 2570)
-type T1 struct{}
-
-// T2 comment
-type T2 struct {
-}	// should be a two-line struct
-
-// T3 comment
-type T2 struct {
-}	// should be a two-line struct
-
-// printing of constant literals
-const (
-	_	= "foobar"
-	_	= "a۰۱۸"
-	_	= "foo६४"
-	_	= "bar9876"
-	_	= 0
-	_	= 1
-	_	= 123456789012345678890
-	_	= 01234567
-	_	= 0xcafebabe
-	_	= 0.
-	_	= .0
-	_	= 3.14159265
-	_	= 1e0
-	_	= 1e+100
-	_	= 1e-100
-	_	= 2.71828e-1000
-	_	= 0i
-	_	= 1i
-	_	= 012345678901234567889i
-	_	= 123456789012345678890i
-	_	= 0.i
-	_	= .0i
-	_	= 3.14159265i
-	_	= 1e0i
-	_	= 1e+100i
-	_	= 1e-100i
-	_	= 2.71828e-1000i
-	_	= 'a'
-	_	= '\000'
-	_	= '\xFF'
-	_	= '\uff16'
-	_	= '\U0000ff16'
-	_	= `foobar`
-	_	= `foo
----
----
-bar`
-)
-
-func _() {
-	type _ int
-	type _ *int
-	type _ []int
-	type _ map[string]int
-	type _ chan int
-	type _ func() int
-
-	var _ int
-	var _ *int
-	var _ []int
-	var _ map[string]int
-	var _ chan int
-	var _ func() int
-
-	type _ struct{}
-	type _ *struct{}
-	type _ []struct{}
-	type _ map[string]struct{}
-	type _ chan struct{}
-	type _ func() struct{}
-
-	type _ interface{}
-	type _ *interface{}
-	type _ []interface{}
-	type _ map[string]interface{}
-	type _ chan interface{}
-	type _ func() interface{}
-
-	var _ struct{}
-	var _ *struct{}
-	var _ []struct{}
-	var _ map[string]struct{}
-	var _ chan struct{}
-	var _ func() struct{}
-
-	var _ interface{}
-	var _ *interface{}
-	var _ []interface{}
-	var _ map[string]interface{}
-	var _ chan interface{}
-	var _ func() interface{}
-}
-
-// don't lose blank lines in grouped declarations
-const (
-	_	int	= 0
-	_	float	= 1
-
-	_	string	= "foo"
-
-	_	= iota
-	_
-
-	// a comment
-	_
-
-	_
-)
-
-type (
-	_	int
-	_	struct{}
-
-	_	interface{}
-
-	// a comment
-	_	map[string]int
-)
-
-var (
-	_	int	= 0
-	_	float	= 1
-
-	_	string	= "foo"
-
-	_	bool
-
-	// a comment
-	_	bool
-)
-
-// don't lose blank lines in this struct
-type _ struct {
-	String	struct {
-		Str, Len int
-	}
-	Slice	struct {
-		Array, Len, Cap int
-	}
-	Eface	struct {
-		Typ, Ptr int
-	}
-
-	UncommonType	struct {
-		Name, PkgPath int
-	}
-	CommonType	struct {
-		Size, Hash, Alg, Align, FieldAlign, String, UncommonType int
-	}
-	Type	struct {
-		Typ, Ptr int
-	}
-	StructField	struct {
-		Name, PkgPath, Typ, Tag, Offset int
-	}
-	StructType	struct {
-		Fields int
-	}
-	PtrType	struct {
-		Elem int
-	}
-	SliceType	struct {
-		Elem int
-	}
-	ArrayType	struct {
-		Elem, Len int
-	}
-
-	Stktop	struct {
-		Stackguard, Stackbase, Gobuf int
-	}
-	Gobuf	struct {
-		Sp, Pc, G int
-	}
-	G	struct {
-		Stackbase, Sched, Status, Alllink int
-	}
-}
-
-// no blank lines in empty structs and interfaces, but leave 1- or 2-line layout alone
-type _ struct{}
-type _ struct {
-}
-
-type _ interface{}
-type _ interface {
-}
-
-// no tabs for single or ungrouped decls
-func _() {
-	const xxxxxx = 0
-	type x int
-	var xxx int
-	var yyyy float = 3.14
-	var zzzzz = "bar"
-
-	const (
-		xxxxxx = 0
-	)
-	type (
-		x int
-	)
-	var (
-		xxx int
-	)
-	var (
-		yyyy float = 3.14
-	)
-	var (
-		zzzzz = "bar"
-	)
-}
-
-// tabs for multiple or grouped decls
-func _() {
-	// no entry has a type
-	const (
-		zzzzzz	= 1
-		z	= 2
-		zzz	= 3
-	)
-	// some entries have a type
-	const (
-		xxxxxx			= 1
-		x			= 2
-		xxx			= 3
-		yyyyyyyy	float	= iota
-		yyyy			= "bar"
-		yyy
-		yy	= 2
-	)
-}
-
-func _() {
-	// no entry has a type
-	var (
-		zzzzzz	= 1
-		z	= 2
-		zzz	= 3
-	)
-	// no entry has a value
-	var (
-		_	int
-		_	float
-		_	string
-
-		_	int	// comment
-		_	float	// comment
-		_	string	// comment
-	)
-	// some entries have a type
-	var (
-		xxxxxx		int
-		x		float
-		xxx		string
-		yyyyyyyy	int	= 1234
-		y		float	= 3.14
-		yyyy			= "bar"
-		yyy		string	= "foo"
-	)
-	// mixed entries - all comments should be aligned
-	var (
-		a, b, c			int
-		x			= 10
-		d			int			// comment
-		y				= 20		// comment
-		f, ff, fff, ffff	int	= 0, 1, 2, 3	// comment
-	)
-	// respect original line breaks
-	var _ = []T{
-		T{0x20, "Telugu"},
-	}
-	var _ = []T{
-		// respect original line breaks
-		T{0x20, "Telugu"},
-	}
-}
-
-// use the formatted output rather than the input to decide when to align
-// (was issue 4505)
-const (
-	short		= 2 * (1 + 2)
-	aMuchLongerName	= 3
-)
-
-var (
-	short		= X{}
-	aMuchLongerName	= X{}
-
-	x1	= X{}	// foo
-	x2	= X{}	// foo
-)
-
-func _() {
-	type (
-		xxxxxx	int
-		x	float
-		xxx	string
-		xxxxx	[]x
-		xx	struct{}
-		xxxxxxx	struct {
-			_, _	int
-			_	float
-		}
-		xxxx	chan<- string
-	)
-}
-
-// alignment of "=" in consecutive lines (extended example from issue 1414)
-const (
-	umax	uint	= ^uint(0)		// maximum value for a uint
-	bpu		= 1 << (5 + umax>>63)	// bits per uint
-	foo
-	bar	= -1
-)
-
-// typical enum
-const (
-	a	MyType	= iota
-	abcd
-	b
-	c
-	def
-)
-
-// excerpt from godoc.go
-var (
-	goroot		= flag.String("goroot", runtime.GOROOT(), "Go root directory")
-	testDir		= flag.String("testdir", "", "Go root subdirectory - for testing only (faster startups)")
-	pkgPath		= flag.String("path", "", "additional package directories (colon-separated)")
-	filter		= flag.String("filter", "", "filter file containing permitted package directory paths")
-	filterMin	= flag.Int("filter_minutes", 0, "filter file update interval in minutes; disabled if <= 0")
-	filterDelay	delayTime	// actual filter update interval in minutes; usually filterDelay == filterMin, but filterDelay may back off exponentially
-)
-
-// formatting of structs
-type _ struct{}
-
-type _ struct { /* this comment should be visible */
-}
-
-type _ struct {
-	// this comment should be visible and properly indented
-}
-
-type _ struct {	// this comment must not change indentation
-	f			int
-	f, ff, fff, ffff	int
-}
-
-type _ struct {
-	string
-}
-
-type _ struct {
-	string	// comment
-}
-
-type _ struct {
-	string "tag"
-}
-
-type _ struct {
-	string "tag"	// comment
-}
-
-type _ struct {
-	f int
-}
-
-type _ struct {
-	f int	// comment
-}
-
-type _ struct {
-	f int "tag"
-}
-
-type _ struct {
-	f int "tag"	// comment
-}
-
-type _ struct {
-	bool
-	a, b, c			int
-	int			"tag"
-	ES				// comment
-	float			"tag"	// comment
-	f			int	// comment
-	f, ff, fff, ffff	int	// comment
-	g			float	"tag"
-	h			float	"tag"	// comment
-}
-
-type _ struct {
-	a, b,
-	c, d	int	// this line should be indented
-	u, v, w, x	float	// this line should be indented
-	p, q,
-	r, s	float	// this line should be indented
-}
-
-// difficult cases
-type _ struct {
-	bool		// comment
-	text	[]byte	// comment
-}
-
-// formatting of interfaces
-type EI interface{}
-
-type _ interface {
-	EI
-}
-
-type _ interface {
-	f()
-	fffff()
-}
-
-type _ interface {
-	EI
-	f()
-	fffffg()
-}
-
-type _ interface {	// this comment must not change indentation
-	EI				// here's a comment
-	f()				// no blank between identifier and ()
-	fffff()				// no blank between identifier and ()
-	gggggggggggg(x, y, z int)	// hurray
-}
-
-// formatting of variable declarations
-func _() {
-	type day struct {
-		n		int
-		short, long	string
-	}
-	var (
-		Sunday		= day{0, "SUN", "Sunday"}
-		Monday		= day{1, "MON", "Monday"}
-		Tuesday		= day{2, "TUE", "Tuesday"}
-		Wednesday	= day{3, "WED", "Wednesday"}
-		Thursday	= day{4, "THU", "Thursday"}
-		Friday		= day{5, "FRI", "Friday"}
-		Saturday	= day{6, "SAT", "Saturday"}
-	)
-}
-
-// formatting of multi-line variable declarations
-var a1, b1, c1 int	// all on one line
-
-var a2, b2,
-	c2 int	// this line should be indented
-
-var (
-	a3, b3,
-	c3, d3	int	// this line should be indented
-	a4, b4, c4	int	// this line should be indented
-)
-
-// Test case from issue 3304: multi-line declarations must end
-// a formatting section and not influence indentation of the
-// next line.
-var (
-	minRefreshTimeSec	= flag.Int64("min_refresh_time_sec", 604800,
-		"minimum time window between two refreshes for a given user.")
-	x	= flag.Int64("refresh_user_rollout_percent", 100,
-		"temporary flag to ramp up the refresh user rpc")
-	aVeryLongVariableName	= stats.GetVarInt("refresh-user-count")
-)
-
-func _() {
-	var privateKey2 = &Block{Type: "RSA PRIVATE KEY",
-		Headers:	map[string]string{},
-		Bytes: []uint8{0x30, 0x82, 0x1, 0x3a, 0x2, 0x1, 0x0, 0x2,
-			0x41, 0x0, 0xb2, 0x99, 0xf, 0x49, 0xc4, 0x7d, 0xfa, 0x8c,
-			0xd4, 0x0, 0xae, 0x6a, 0x4d, 0x1b, 0x8a, 0x3b, 0x6a, 0x13,
-			0x64, 0x2b, 0x23, 0xf2, 0x8b, 0x0, 0x3b, 0xfb, 0x97, 0x79,
-		},
-	}
-}
-
-func _() {
-	var Universe = Scope{
-		Names: map[string]*Ident{
-			// basic types
-			"bool":		nil,
-			"byte":		nil,
-			"int8":		nil,
-			"int16":	nil,
-			"int32":	nil,
-			"int64":	nil,
-			"uint8":	nil,
-			"uint16":	nil,
-			"uint32":	nil,
-			"uint64":	nil,
-			"float32":	nil,
-			"float64":	nil,
-			"string":	nil,
-
-			// convenience types
-			"int":		nil,
-			"uint":		nil,
-			"uintptr":	nil,
-			"float":	nil,
-
-			// constants
-			"false":	nil,
-			"true":		nil,
-			"iota":		nil,
-			"nil":		nil,
-
-			// functions
-			"cap":		nil,
-			"len":		nil,
-			"new":		nil,
-			"make":		nil,
-			"panic":	nil,
-			"panicln":	nil,
-			"print":	nil,
-			"println":	nil,
-		},
-	}
-}
-
-// alignment of map composite entries
-var _ = map[int]int{
-	// small key sizes: always align even if size ratios are large
-	a:			a,
-	abcdefghabcdefgh:	a,
-	ab:			a,
-	abc:			a,
-	abcdefgabcdefg:		a,
-	abcd:			a,
-	abcde:			a,
-	abcdef:			a,
-
-	// mixed key sizes: align when key sizes change within accepted ratio
-	abcdefgh:		a,
-	abcdefghabcdefg:	a,
-	abcdefghij:		a,
-	abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij:	a,	// outlier - do not align with previous line
-	abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij:		a,	// align with previous line
-
-	ab:	a,	// do not align with previous line
-	abcde:	a,	// align with previous line
-}
-
-// alignment of map composite entries: test cases from issue 3965
-// aligned
-var _ = T1{
-	a:			x,
-	b:			y,
-	cccccccccccccccccccc:	z,
-}
-
-// not aligned
-var _ = T2{
-	a:			x,
-	b:			y,
-	ccccccccccccccccccccc:	z,
-}
-
-// aligned
-var _ = T3{
-	aaaaaaaaaaaaaaaaaaaa:	x,
-	b:			y,
-	c:			z,
-}
-
-// not aligned
-var _ = T4{
-	aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa:	x,
-	b:						y,
-	c:						z,
-}
-
-// no alignment of map composite entries if they are not the first entry on a line
-var _ = T{0: 0}	// not aligned
-var _ = T{0: 0,	// not aligned
-	1:	1,				// aligned
-	22:	22,				// aligned
-	333:	333, 1234: 12, 12345: 0,	// first on line aligned
-}
-
-// test cases form issue 8685
-// not aligned
-var _ = map[int]string{1: "spring", 2: "summer",
-	3:	"autumn", 4: "winter"}
-
-// not aligned
-var _ = map[string]string{"a": "spring", "b": "summer",
-	"c":	"autumn", "d": "winter"}
-
-// aligned
-var _ = map[string]string{"a": "spring",
-	"b":	"summer",
-	"c":	"autumn",
-	"d":	"winter"}
-
-func _() {
-	var _ = T{
-		a,	// must introduce trailing comma
-	}
-}
-
-// formatting of function results
-func _() func()				{}
-func _() func(int)			{ return nil }
-func _() func(int) int			{ return nil }
-func _() func(int) func(int) func()	{ return nil }
-
-// formatting of consecutive single-line functions
-func _()	{}
-func _()	{}
-func _()	{}
-
-func _()	{}	// an empty line before this function
-func _()	{}
-func _()	{}
-
-func _()		{ f(1, 2, 3) }
-func _(x int) int	{ y := x; return y + 1 }
-func _() int		{ type T struct{}; var x T; return x }
-
-// these must remain multi-line since they are multi-line in the source
-func _() {
-	f(1, 2, 3)
-}
-func _(x int) int {
-	y := x
-	return y + 1
-}
-func _() int {
-	type T struct{}
-	var x T
-	return x
-}
-
-// making function declarations safe for new semicolon rules
-func _()	{ /* single-line function because of "short-ish" comment */ }
-func _() { /* multi-line function because of "long-ish" comment - much more comment text is following here */ /* and more */
-}
-
-func _() {
-	/* multi-line func because block is on multiple lines */
-}
-
-// test case for issue #19544
-func _()	{}
-func _longer_name_() {	// this comment must not force the {} from above to alignment
-	// multiple lines
-}
-
-// ellipsis parameters
-func _(...int)
-func _(...*int)
-func _(...[]int)
-func _(...struct{})
-func _(bool, ...interface{})
-func _(bool, ...func())
-func _(bool, ...func(...int))
-func _(bool, ...map[string]int)
-func _(bool, ...chan int)
-
-func _(b bool, x ...int)
-func _(b bool, x ...*int)
-func _(b bool, x ...[]int)
-func _(b bool, x ...struct{})
-func _(x ...interface{})
-func _(x ...func())
-func _(x ...func(...int))
-func _(x ...map[string]int)
-func _(x ...chan int)
-
-// these parameter lists must remain multi-line since they are multi-line in the source
-func _(bool,
-	int) {
-}
-func _(x bool,
-	y int) {
-}
-func _(x,
-	y bool) {
-}
-func _(bool,	// comment
-	int) {
-}
-func _(x bool,	// comment
-	y int) {
-}
-func _(x,	// comment
-	y bool) {
-}
-func _(bool,	// comment
-	// comment
-	int) {
-}
-func _(x bool,	// comment
-	// comment
-	y int) {
-}
-func _(x,	// comment
-	// comment
-	y bool) {
-}
-func _(bool,
-	// comment
-	int) {
-}
-func _(x bool,
-	// comment
-	y int) {
-}
-func _(x,
-	// comment
-	y bool) {
-}
-func _(x,	// comment
-	y,	// comment
-	z bool) {
-}
-func _(x,	// comment
-	y,	// comment
-	z bool) {
-}
-func _(x int,	// comment
-	y float,	// comment
-	z bool) {
-}
-
-// properly indent multi-line signatures
-func ManageStatus(in <-chan *Status, req <-chan Request,
-	stat chan<- *TargetInfo,
-	TargetHistorySize int) {
-}
-
-func MultiLineSignature0(
-	a, b, c int,
-) {
-}
-
-func MultiLineSignature1(
-	a, b, c int,
-	u, v, w float,
-) {
-}
-
-func MultiLineSignature2(
-	a, b,
-	c int,
-) {
-}
-
-func MultiLineSignature3(
-	a, b,
-	c int, u, v,
-	w float,
-	x ...int) {
-}
-
-func MultiLineSignature4(
-	a, b, c int,
-	u, v,
-	w float,
-	x ...int) {
-}
-
-func MultiLineSignature5(
-	a, b, c int,
-	u, v, w float,
-	p, q,
-	r string,
-	x ...int) {
-}
-
-// make sure it also works for methods in interfaces
-type _ interface {
-	MultiLineSignature0(
-		a, b, c int,
-	)
-
-	MultiLineSignature1(
-		a, b, c int,
-		u, v, w float,
-	)
-
-	MultiLineSignature2(
-		a, b,
-		c int,
-	)
-
-	MultiLineSignature3(
-		a, b,
-		c int, u, v,
-		w float,
-		x ...int)
-
-	MultiLineSignature4(
-		a, b, c int,
-		u, v,
-		w float,
-		x ...int)
-
-	MultiLineSignature5(
-		a, b, c int,
-		u, v, w float,
-		p, q,
-		r string,
-		x ...int)
-}
-
-// omit superfluous parentheses in parameter lists
-func _(int)
-func _(int)
-func _(x int)
-func _(x int)
-func _(x, y int)
-func _(x, y int)
-
-func _() int
-func _() int
-func _() int
-
-func _() (x int)
-func _() (x int)
-func _() (x int)
-
-// special cases: some channel types require parentheses
-func _(x chan (<-chan int))
-func _(x chan (<-chan int))
-func _(x chan (<-chan int))
-
-func _(x chan<- (chan int))
-func _(x chan<- (chan int))
-func _(x chan<- (chan int))
-
-// don't introduce comma after last parameter if the closing ) is on the same line
-// even if the parameter type itself is multi-line (test cases from issue 4533)
-func _(...interface{})
-func _(...interface {
-	m()
-	n()
-})	// no extra comma between } and )
-
-func (t *T) _(...interface{})
-func (t *T) _(...interface {
-	m()
-	n()
-})	// no extra comma between } and )
-
-func _(interface{})
-func _(interface {
-	m()
-})	// no extra comma between } and )
-
-func _(struct{})
-func _(struct {
-	x	int
-	y	int
-})	// no extra comma between } and )
-
-// alias declarations
-
-type c0 struct{}
-type c1 = C
-type c2 = struct{ x int }
-type c3 = p.C
-type (
-	s	struct{}
-	a	= A
-	b	= A
-	c	= foo
-	d	= interface{}
-	ddd	= p.Foo
-)
diff --git a/internal/backport/go/printer/testdata/declarations.input b/internal/backport/go/printer/testdata/declarations.input
deleted file mode 100644
index f34395b..0000000
--- a/internal/backport/go/printer/testdata/declarations.input
+++ /dev/null
@@ -1,1021 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package imports
-
-import "io"
-
-import (
-	_ "io"
-)
-
-import _ "io"
-
-import (
-	"io"
-	"io"
-	"io"
-)
-
-import (
-	"io"
-	aLongRename "io"
-
-	b "io"
-)
-
-import (
-       "unrenamed"
-       renamed "renameMe"
-       . "io"
-       _ "io"
-       "io"
-       . "os"
-)
-
-// no newlines between consecutive single imports, but
-// respect extra line breaks in the source (at most one empty line)
-import _ "io"
-import _ "io"
-import _ "io"
-
-import _ "os"
-import _ "os"
-import _ "os"
-
-
-import _ "fmt"
-import _ "fmt"
-import _ "fmt"
-
-import "foo"  // a comment
-import "bar"  // a comment
-
-import (
-	_ "foo"
-	// a comment
-	"bar"
-	"foo"  // a comment
-	"bar"  // a comment
-)
-
-// comments + renames
-import (
-       "unrenamed" // a comment
-       renamed "renameMe"
-       . "io" /* a comment */
-       _ "io/ioutil" // a comment
-       "io" // testing alignment
-       . "os"
-       // a comment
-)
-
-// a case that caused problems in the past (comment placement)
-import (
-	. "fmt"
-	"io"
-	"malloc"	// for the malloc count test only
-	"math"
-	"strings"
-	"testing"
-)
-
-// more import examples
-import (
-	"xxx"
-	"much_longer_name" // comment
-	"short_name" // comment
-)
-
-import (
-	_ "xxx"
-	"much_longer_name" // comment
-)
-
-import (
-	mymath "math"
-	"/foo/bar/long_package_path" // a comment
-)
-
-import (
-	"package_a" // comment
-	"package_b"
-	my_better_c "package_c" // comment
-	"package_d" // comment
-	my_e "package_e" // comment
-
-	"package_a"    // comment
-	"package_bb"
-	"package_ccc"  // comment
-	"package_dddd" // comment
-)
-
-// print import paths as double-quoted strings
-// (we would like more test cases but the go/parser
-// already excludes most incorrect paths, and we don't
-// bother setting up test-ASTs manually)
-import (
-	`fmt`
-	"math"
-)
-
-// at least one empty line between declarations of different kind
-import _ "io"
-var _ int
-
-// at least one empty line between declarations of the same kind
-// if there is associated documentation (was issue 2570)
-type T1 struct{}
-// T2 comment
-type T2 struct {
-} // should be a two-line struct
-
-
-// T3 comment
-type T2 struct {
-
-
-} // should be a two-line struct
-
-
-// printing of constant literals
-const (
-	_ = "foobar"
-	_ = "a۰۱۸"
-	_ = "foo६४"
-	_ = "bar9876"
-	_ = 0
-	_ = 1
-	_ = 123456789012345678890
-	_ = 01234567
-	_ = 0xcafebabe
-	_ = 0.
-	_ = .0
-	_ = 3.14159265
-	_ = 1e0
-	_ = 1e+100
-	_ = 1e-100
-	_ = 2.71828e-1000
-	_ = 0i
-	_ = 1i
-	_ = 012345678901234567889i
-	_ = 123456789012345678890i
-	_ = 0.i
-	_ = .0i
-	_ = 3.14159265i
-	_ = 1e0i
-	_ = 1e+100i
-	_ = 1e-100i
-	_ = 2.71828e-1000i
-	_ = 'a'
-	_ = '\000'
-	_ = '\xFF'
-	_ = '\uff16'
-	_ = '\U0000ff16'
-	_ = `foobar`
-	_ = `foo
----
----
-bar`
-)
-
-
-func _() {
-	type _ int
-	type _ *int
-	type _ []int
-	type _ map[string]int
-	type _ chan int
-	type _ func() int
-
-	var _ int
-	var _ *int
-	var _ []int
-	var _ map[string]int
-	var _ chan int
-	var _ func() int
-
-	type _ struct{}
-	type _ *struct{}
-	type _ []struct{}
-	type _ map[string]struct{}
-	type _ chan struct{}
-	type _ func() struct{}
-
-	type _ interface{}
-	type _ *interface{}
-	type _ []interface{}
-	type _ map[string]interface{}
-	type _ chan interface{}
-	type _ func() interface{}
-
-	var _ struct{}
-	var _ *struct{}
-	var _ []struct{}
-	var _ map[string]struct{}
-	var _ chan struct{}
-	var _ func() struct{}
-
-	var _ interface{}
-	var _ *interface{}
-	var _ []interface{}
-	var _ map[string]interface{}
-	var _ chan interface{}
-	var _ func() interface{}
-}
-
-
-// don't lose blank lines in grouped declarations
-const (
-	_ int = 0
-	_ float = 1
-
-	_ string = "foo"
-
-	_ = iota
-	_
-	
-	// a comment
-	_
-
-	_
-)
-
-
-type (
-	_ int
-	_ struct {}
-	
-	_ interface{}
-	
-	// a comment
-	_ map[string]int
-)
-
-
-var (
-	_ int = 0
-	_ float = 1
-
-	_ string = "foo"
-
-	_ bool
-	
-	// a comment
-	_ bool
-)
-
-
-// don't lose blank lines in this struct
-type _ struct {
-	String struct {
-		Str, Len int
-	}
-	Slice struct {
-		Array, Len, Cap int
-	}
-	Eface struct {
-		Typ, Ptr int
-	}
-
-	UncommonType struct {
-		Name, PkgPath int
-	}
-	CommonType struct {
-		Size, Hash, Alg, Align, FieldAlign, String, UncommonType int
-	}
-	Type struct {
-		Typ, Ptr int
-	}
-	StructField struct {
-		Name, PkgPath, Typ, Tag, Offset int
-	}
-	StructType struct {
-		Fields int
-	}
-	PtrType struct {
-		Elem int
-	}
-	SliceType struct {
-		Elem int
-	}
-	ArrayType struct {
-		Elem, Len int
-	}
-
-	Stktop struct {
-		Stackguard, Stackbase, Gobuf int
-	}
-	Gobuf struct {
-		Sp, Pc, G int
-	}
-	G struct {
-		Stackbase, Sched, Status, Alllink int
-	}
-}
-
-
-// no blank lines in empty structs and interfaces, but leave 1- or 2-line layout alone
-type _ struct{            }
-type _ struct {
-
-}
-
-type _ interface{            }
-type _ interface {
-
-}
-
-
-// no tabs for single or ungrouped decls
-func _() {
-	const xxxxxx = 0
-	type x int
-	var xxx int
-	var yyyy float = 3.14
-	var zzzzz = "bar"
-
-	const (
-		xxxxxx = 0
-	)
-	type (
-		x int
-	)
-	var (
-		xxx int
-	)
-	var (
-		yyyy float = 3.14
-	)
-	var (
-		zzzzz = "bar"
-	)
-}
-
-// tabs for multiple or grouped decls
-func _() {
-	// no entry has a type
-	const (
-		zzzzzz = 1
-		z = 2
-		zzz = 3
-	)
-	// some entries have a type
-	const (
-		xxxxxx = 1
-		x = 2
-		xxx = 3
-		yyyyyyyy float = iota
-		yyyy = "bar"
-		yyy
-		yy = 2
-	)
-}
-
-func _() {
-	// no entry has a type
-	var (
-		zzzzzz = 1
-		z = 2
-		zzz = 3
-	)
-	// no entry has a value
-	var (
-		_ int
-		_ float
-		_ string
-
-		_ int  // comment
-		_ float  // comment
-		_ string  // comment
-	)
-	// some entries have a type
-	var (
-		xxxxxx int
-		x float
-		xxx string
-		yyyyyyyy int = 1234
-		y float = 3.14
-		yyyy = "bar"
-		yyy string = "foo"
-	)
-	// mixed entries - all comments should be aligned
-	var (
-		a, b, c int
-		x = 10
-		d int  // comment
-		y = 20  // comment
-		f, ff, fff, ffff int = 0, 1, 2, 3  // comment
-	)
-	// respect original line breaks
-	var _ = []T {
-		T{0x20,	"Telugu"},
-	}
-	var _ = []T {
-		// respect original line breaks
-		T{0x20,	"Telugu"},
-	}
-}
-
-// use the formatted output rather than the input to decide when to align
-// (was issue 4505)
-const (
-	short = 2 * (
-	1 + 2)
-	aMuchLongerName = 3
-)
-
-var (
-	short = X{
-	}
-	aMuchLongerName = X{}
-
-	x1 = X{} // foo
-	x2 = X{
-	} // foo
-)
-
-func _() {
-	type (
-		xxxxxx int
-		x float
-		xxx string
-		xxxxx []x
-		xx struct{}
-		xxxxxxx struct {
-			_, _ int
-			_ float
-		}
-		xxxx chan<- string
-	)
-}
-
-// alignment of "=" in consecutive lines (extended example from issue 1414)
-const (
-	umax uint                  = ^uint(0) // maximum value for a uint
-	bpu  = 1 << (5 + umax>>63)            // bits per uint
-	foo
-	bar  = -1
-)
-
-// typical enum
-const (
-	a MyType = iota
-	abcd
-	b
-	c
-	def
-)
-
-// excerpt from godoc.go
-var (
-	goroot = flag.String("goroot", runtime.GOROOT(), "Go root directory")
-	testDir = flag.String("testdir", "", "Go root subdirectory - for testing only (faster startups)")
-	pkgPath = flag.String("path", "", "additional package directories (colon-separated)")
-	filter = flag.String("filter", "", "filter file containing permitted package directory paths")
-	filterMin = flag.Int("filter_minutes", 0, "filter file update interval in minutes; disabled if <= 0")
-	filterDelay delayTime // actual filter update interval in minutes; usually filterDelay == filterMin, but filterDelay may back off exponentially
-)
-
-
-// formatting of structs
-type _ struct{}
-
-type _ struct{ /* this comment should be visible */ }
-
-type _ struct{
-	// this comment should be visible and properly indented
-}
-
-type _ struct {  // this comment must not change indentation
-	f int
-	f, ff, fff, ffff int
-}
-
-type _ struct {
-	string
-}
-
-type _ struct {
-	string  // comment
-}
-
-type _ struct {
-	string "tag"
-}
-
-type _ struct {
-	string "tag"  // comment
-}
-
-type _ struct {
-	f int
-}
-
-type _ struct {
-	f int  // comment
-}
-
-type _ struct {
-	f int "tag"
-}
-
-type _ struct {
-	f int "tag"  // comment
-}
-
-type _ struct {
-	bool
-	a, b, c int
-	int "tag"
-	ES // comment
-	float "tag"  // comment
-	f int  // comment
-	f, ff, fff, ffff int  // comment
-	g float "tag"
-	h float "tag"  // comment
-}
-
-type _ struct { a, b,
-c, d int  // this line should be indented
-u, v, w, x float // this line should be indented
-p, q,
-r, s float // this line should be indented
-}
-
-
-// difficult cases
-type _ struct {
-	bool  // comment
-	text []byte  // comment
-}
-
-
-// formatting of interfaces
-type EI interface{}
-
-type _ interface {
-	EI
-}
-
-type _ interface {
-	f()
-	fffff()
-}
-
-type _ interface {
-	EI
-	f()
-	fffffg()
-}
-
-type _ interface {  // this comment must not change indentation
-	EI  // here's a comment
-	f()  // no blank between identifier and ()
-	fffff()  // no blank between identifier and ()
-	gggggggggggg(x, y, z int) ()  // hurray
-}
-
-
-// formatting of variable declarations
-func _() {
-	type day struct { n int; short, long string }
-	var (
-		Sunday = day{ 0, "SUN", "Sunday" }
-		Monday = day{ 1, "MON", "Monday" }
-		Tuesday = day{ 2, "TUE", "Tuesday" }
-		Wednesday = day{ 3, "WED", "Wednesday" }
-		Thursday = day{ 4, "THU", "Thursday" }
-		Friday = day{ 5, "FRI", "Friday" }
-		Saturday = day{ 6, "SAT", "Saturday" }
-	)
-}
-
-
-// formatting of multi-line variable declarations
-var a1, b1, c1 int  // all on one line
-
-var a2, b2,
-c2 int  // this line should be indented
-
-var (a3, b3,
-c3, d3 int  // this line should be indented
-a4, b4, c4 int  // this line should be indented
-)
-
-// Test case from issue 3304: multi-line declarations must end
-// a formatting section and not influence indentation of the
-// next line.
-var (
-	minRefreshTimeSec = flag.Int64("min_refresh_time_sec", 604800,
-		"minimum time window between two refreshes for a given user.")
-	x = flag.Int64("refresh_user_rollout_percent", 100,
-		"temporary flag to ramp up the refresh user rpc")
-	aVeryLongVariableName = stats.GetVarInt("refresh-user-count")
-)
-
-func _() {
-	var privateKey2 = &Block{Type: "RSA PRIVATE KEY",
-					Headers: map[string]string{},
-					Bytes: []uint8{0x30, 0x82, 0x1, 0x3a, 0x2, 0x1, 0x0, 0x2,
-			0x41, 0x0, 0xb2, 0x99, 0xf, 0x49, 0xc4, 0x7d, 0xfa, 0x8c,
-			0xd4, 0x0, 0xae, 0x6a, 0x4d, 0x1b, 0x8a, 0x3b, 0x6a, 0x13,
-			0x64, 0x2b, 0x23, 0xf2, 0x8b, 0x0, 0x3b, 0xfb, 0x97, 0x79,
-		},
-	}
-}
-
-
-func _() {
-	var Universe = Scope {
-		Names: map[string]*Ident {
-			// basic types
-			"bool": nil,
-			"byte": nil,
-			"int8": nil,
-			"int16": nil,
-			"int32": nil,
-			"int64": nil,
-			"uint8": nil,
-			"uint16": nil,
-			"uint32": nil,
-			"uint64": nil,
-			"float32": nil,
-			"float64": nil,
-			"string": nil,
-
-			// convenience types
-			"int": nil,
-			"uint": nil,
-			"uintptr": nil,
-			"float": nil,
-
-			// constants
-			"false": nil,
-			"true": nil,
-			"iota": nil,
-			"nil": nil,
-
-			// functions
-			"cap": nil,
-			"len": nil,
-			"new": nil,
-			"make": nil,
-			"panic": nil,
-			"panicln": nil,
-			"print": nil,
-			"println": nil,
-		},
-	}
-}
-
-
-// alignment of map composite entries
-var _ = map[int]int{
-	// small key sizes: always align even if size ratios are large
-	a: a,
-	abcdefghabcdefgh: a,
-	ab: a,
-	abc: a,
-	abcdefgabcdefg: a,
-	abcd: a,
-	abcde: a,
-	abcdef: a,
-
-	// mixed key sizes: align when key sizes change within accepted ratio
-	abcdefgh: a,
-	abcdefghabcdefg: a,
-	abcdefghij: a,
-	abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij: a, // outlier - do not align with previous line
-	abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij: a, // align with previous line
-
-	ab: a, // do not align with previous line
-	abcde: a, // align with previous line
-}
-
-// alignment of map composite entries: test cases from issue 3965
-// aligned
-var _ = T1{
-	a:                    x,
-	b:                    y,
-	cccccccccccccccccccc: z,
-}
-
-// not aligned
-var _ = T2{
-	a: x,
-	b: y,
-	ccccccccccccccccccccc: z,
-}
-
-// aligned
-var _ = T3{
-	aaaaaaaaaaaaaaaaaaaa: x,
-	b:                    y,
-	c:                    z,
-}
-
-// not aligned
-var _ = T4{
-	aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa: x,
-	b:                                       y,
-	c:                                       z,
-}
-
-
-// no alignment of map composite entries if they are not the first entry on a line
-var _ = T{0: 0} // not aligned
-var _ = T{0: 0, // not aligned
-	1: 1, // aligned
-	22: 22, // aligned
-	333: 333, 1234: 12, 12345: 0, // first on line aligned
-}
-
-
-// test cases form issue 8685
-// not aligned
-var _ = map[int]string{1: "spring", 2: "summer",
-					3:             "autumn", 4: "winter"}
-
-// not aligned
-var _ = map[string]string{"a": "spring", "b": "summer",
-	"c": "autumn", "d": "winter"}
-
-// aligned
-var _ = map[string]string{"a": "spring",
-"b": "summer",
-	"c": "autumn",
-"d": "winter"}
-
-
-func _() {
-	var _ = T{
-		a,	// must introduce trailing comma
-	}
-}
-
-
-// formatting of function results
-func _() func() {}
-func _() func(int) { return nil }
-func _() func(int) int { return nil }
-func _() func(int) func(int) func() { return nil }
-
-
-// formatting of consecutive single-line functions
-func _() {}
-func _() {}
-func _() {}
-
-func _() {}  // an empty line before this function
-func _() {}
-func _() {}
-
-func _() { f(1, 2, 3) }
-func _(x int) int { y := x; return y+1 }
-func _() int { type T struct{}; var x T; return x }
-
-// these must remain multi-line since they are multi-line in the source
-func _() {
-	f(1, 2, 3)
-}
-func _(x int) int {
-	y := x; return y+1
-}
-func _() int {
-	type T struct{}; var x T; return x
-}
-
-
-// making function declarations safe for new semicolon rules
-func _() { /* single-line function because of "short-ish" comment */ }
-func _() { /* multi-line function because of "long-ish" comment - much more comment text is following here */ /* and more */ }
-
-func _() {
-/* multi-line func because block is on multiple lines */ }
-
-// test case for issue #19544
-func _() {}
-func _longer_name_() { // this comment must not force the {} from above to alignment
-	// multiple lines
-}
-
-// ellipsis parameters
-func _(...int)
-func _(...*int)
-func _(...[]int)
-func _(...struct{})
-func _(bool, ...interface{})
-func _(bool, ...func())
-func _(bool, ...func(...int))
-func _(bool, ...map[string]int)
-func _(bool, ...chan int)
-
-func _(b bool, x ...int)
-func _(b bool, x ...*int)
-func _(b bool, x ...[]int)
-func _(b bool, x ...struct{})
-func _(x ...interface{})
-func _(x ...func())
-func _(x ...func(...int))
-func _(x ...map[string]int)
-func _(x ...chan int)
-
-
-// these parameter lists must remain multi-line since they are multi-line in the source
-func _(bool,
-int) {
-}
-func _(x bool,
-y int) {
-}
-func _(x,
-y bool) {
-}
-func _(bool, // comment
-int) {
-}
-func _(x bool, // comment
-y int) {
-}
-func _(x, // comment
-y bool) {
-}
-func _(bool, // comment
-// comment
-int) {
-}
-func _(x bool, // comment
-// comment
-y int) {
-}
-func _(x, // comment
-// comment
-y bool) {
-}
-func _(bool,
-// comment
-int) {
-}
-func _(x bool,
-// comment
-y int) {
-}
-func _(x,
-// comment
-y bool) {
-}
-func _(x, // comment
-y,// comment
-z bool) {
-}
-func _(x, // comment
-	y,// comment
-	z bool) {
-}
-func _(x int,	// comment
-	y float,	// comment
-	z bool) {
-}
-
-
-// properly indent multi-line signatures
-func ManageStatus(in <-chan *Status, req <-chan Request,
-stat chan<- *TargetInfo,
-TargetHistorySize int) {
-}
-
-func MultiLineSignature0(
-a, b, c int,
-) {}
-
-func MultiLineSignature1(
-a, b, c int,
-u, v, w float,
-) {}
-
-func MultiLineSignature2(
-a, b,
-c int,
-) {}
-
-func MultiLineSignature3(
-a, b,
-c int, u, v,
-w float,
-		x ...int) {}
-
-func MultiLineSignature4(
-a, b, c int,
-u, v,
-w float,
-		x ...int) {}
-
-func MultiLineSignature5(
-a, b, c int,
-u, v, w float,
-p, q,
-r string,
-		x ...int) {}
-
-// make sure it also works for methods in interfaces
-type _ interface {
-MultiLineSignature0(
-a, b, c int,
-)
-
-MultiLineSignature1(
-a, b, c int,
-u, v, w float,
-)
-
-MultiLineSignature2(
-a, b,
-c int,
-)
-
-MultiLineSignature3(
-a, b,
-c int, u, v,
-w float,
-		x ...int)
-
-MultiLineSignature4(
-a, b, c int,
-u, v,
-w float,
-		x ...int)
-
-MultiLineSignature5(
-a, b, c int,
-u, v, w float,
-p, q,
-r string,
-		x ...int)
-}
-
-// omit superfluous parentheses in parameter lists
-func _((int))
-func _((((((int))))))
-func _(x (int))
-func _(x (((((int))))))
-func _(x, y (int))
-func _(x, y (((((int))))))
-
-func _() (int)
-func _() ((int))
-func _() ((((((int))))))
-
-func _() (x int)
-func _() (x (int))
-func _() (x (((((int))))))
-
-// special cases: some channel types require parentheses
-func _(x chan(<-chan int))
-func _(x (chan(<-chan int)))
-func _(x ((((chan(<-chan int))))))
-
-func _(x chan<-(chan int))
-func _(x (chan<-(chan int)))
-func _(x ((((chan<-(chan int))))))
-
-// don't introduce comma after last parameter if the closing ) is on the same line
-// even if the parameter type itself is multi-line (test cases from issue 4533)
-func _(...interface{})
-func _(...interface {
-	m()
-	n()
-}) // no extra comma between } and )
-
-func (t *T) _(...interface{})
-func (t *T) _(...interface {
-	m()
-	n()
-}) // no extra comma between } and )
-
-func _(interface{})
-func _(interface {
-	m()
-}) // no extra comma between } and )
-
-func _(struct{})
-func _(struct {
-	x int
-	y int
-}) // no extra comma between } and )
-
-// alias declarations
-
-type c0 struct{}
-type c1 = C
-type c2 = struct{ x int}
-type c3 = p.C
-type (
-	s struct{}
-	a = A
-	b = A
-	c = foo
-	d = interface{}
-	ddd = p.Foo
-)
diff --git a/internal/backport/go/printer/testdata/doc.golden b/internal/backport/go/printer/testdata/doc.golden
deleted file mode 100644
index 7ac241a..0000000
--- a/internal/backport/go/printer/testdata/doc.golden
+++ /dev/null
@@ -1,21 +0,0 @@
-package p
-
-/*
-Doc comment.
-
-  - List1.
-
-  - List2.
-*/
-var X int
-
-/* erroneous doc comment */
-var Y int
-
-/*
- * Another erroneous
- * doc comment.
- */
-var Z int
-
-
diff --git a/internal/backport/go/printer/testdata/doc.input b/internal/backport/go/printer/testdata/doc.input
deleted file mode 100644
index 5c057ed..0000000
--- a/internal/backport/go/printer/testdata/doc.input
+++ /dev/null
@@ -1,20 +0,0 @@
-package p
-
-/*
-Doc comment.
-  - List1.
-
-  - List2.
-*/
-var X int
-
-/* erroneous doc comment */
-var Y int
-
-/*
- * Another erroneous
- * doc comment.
- */
-var Z int
-
-
diff --git a/internal/backport/go/printer/testdata/empty.golden b/internal/backport/go/printer/testdata/empty.golden
deleted file mode 100644
index a055f47..0000000
--- a/internal/backport/go/printer/testdata/empty.golden
+++ /dev/null
@@ -1,5 +0,0 @@
-// a comment at the beginning of the file
-
-package empty
-
-// a comment at the end of the file
diff --git a/internal/backport/go/printer/testdata/empty.input b/internal/backport/go/printer/testdata/empty.input
deleted file mode 100644
index a055f47..0000000
--- a/internal/backport/go/printer/testdata/empty.input
+++ /dev/null
@@ -1,5 +0,0 @@
-// a comment at the beginning of the file
-
-package empty
-
-// a comment at the end of the file
diff --git a/internal/backport/go/printer/testdata/expressions.golden b/internal/backport/go/printer/testdata/expressions.golden
deleted file mode 100644
index 16a68c7..0000000
--- a/internal/backport/go/printer/testdata/expressions.golden
+++ /dev/null
@@ -1,743 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package expressions
-
-type T struct {
-	x, y, z int
-}
-
-var (
-	a, b, c, d, e						int
-	under_bar						int
-	longIdentifier1, longIdentifier2, longIdentifier3	int
-	t0, t1, t2						T
-	s							string
-	p							*int
-)
-
-func _() {
-	// no spaces around simple or parenthesized expressions
-	_ = (a + 0)
-	_ = a + b
-	_ = a + b + c
-	_ = a + b - c
-	_ = a - b - c
-	_ = a + (b * c)
-	_ = a + (b / c)
-	_ = a - (b % c)
-	_ = 1 + a
-	_ = a + 1
-	_ = a + b + 1
-	_ = s[a]
-	_ = s[a:]
-	_ = s[:b]
-	_ = s[1:2]
-	_ = s[a:b]
-	_ = s[0:len(s)]
-	_ = s[0] << 1
-	_ = (s[0] << 1) & 0xf
-	_ = s[0]<<2 | s[1]>>4
-	_ = "foo" + s
-	_ = s + "foo"
-	_ = 'a' + 'b'
-	_ = len(s) / 2
-	_ = len(t0.x) / a
-
-	// spaces around expressions of different precedence or expressions containing spaces
-	_ = a + -b
-	_ = a - ^b
-	_ = a / *p
-	_ = a + b*c
-	_ = 1 + b*c
-	_ = a + 2*c
-	_ = a + c*2
-	_ = 1 + 2*3
-	_ = s[1 : 2*3]
-	_ = s[a : b-c]
-	_ = s[0:]
-	_ = s[a+b]
-	_ = s[:b-c]
-	_ = s[a+b:]
-	_ = a[a<<b+1]
-	_ = a[a<<b+1:]
-	_ = s[a+b : len(s)]
-	_ = s[len(s):-a]
-	_ = s[a : len(s)+1]
-	_ = s[a:len(s)+1] + s
-
-	// spaces around operators with equal or lower precedence than comparisons
-	_ = a == b
-	_ = a != b
-	_ = a > b
-	_ = a >= b
-	_ = a < b
-	_ = a <= b
-	_ = a < b && c > d
-	_ = a < b || c > d
-
-	// spaces around "long" operands
-	_ = a + longIdentifier1
-	_ = longIdentifier1 + a
-	_ = longIdentifier1 + longIdentifier2*longIdentifier3
-	_ = s + "a longer string"
-
-	// some selected cases
-	_ = a + t0.x
-	_ = a + t0.x + t1.x*t2.x
-	_ = a + b + c + d + e + 2*3
-	_ = a + b + c + 2*3 + d + e
-	_ = (a + b + c) * 2
-	_ = a - b + c - d + (a + b + c) + d&e
-	_ = under_bar - 1
-	_ = Open(dpath+"/file", O_WRONLY|O_CREAT, 0666)
-	_ = int(c0&_Mask4)<<18 | int(c1&_Maskx)<<12 | int(c2&_Maskx)<<6 | int(c3&_Maskx)
-
-	// test case for issue 8021
-	// want:
-	//  ([]bool{})[([]int{})[((1)+(((1)+((((1)*(((1)+(1))+(1)))+(1))*(1)))+(1)))]]
-	_ = ([]bool{})[([]int{})[((1)+(((1)+((((1)*(((1)+(1))+(1)))+(1))*(1)))+(1)))]]
-
-	// the parser does not restrict expressions that may appear as statements
-	true
-	42
-	"foo"
-	x
-	(x)
-	a + b
-	a + b + c
-	a + (b * c)
-	a + (b / c)
-	1 + a
-	a + 1
-	s[a]
-	x << 1
-	(s[0] << 1) & 0xf
-	"foo" + s
-	x == y
-	x < y || z > 42
-}
-
-// slice expressions with cap
-func _() {
-	_ = x[a:b:c]
-	_ = x[a : b : c+d]
-	_ = x[a : b+d : c]
-	_ = x[a : b+d : c+d]
-	_ = x[a+d : b : c]
-	_ = x[a+d : b : c+d]
-	_ = x[a+d : b+d : c]
-	_ = x[a+d : b+d : c+d]
-
-	_ = x[:b:c]
-	_ = x[: b : c+d]
-	_ = x[: b+d : c]
-	_ = x[: b+d : c+d]
-}
-
-func issue22111() {
-	_ = x[:]
-
-	_ = x[:b]
-	_ = x[:b+1]
-
-	_ = x[a:]
-	_ = x[a+1:]
-
-	_ = x[a:b]
-	_ = x[a+1 : b]
-	_ = x[a : b+1]
-	_ = x[a+1 : b+1]
-
-	_ = x[:b:c]
-	_ = x[: b+1 : c]
-	_ = x[: b : c+1]
-	_ = x[: b+1 : c+1]
-
-	_ = x[a:b:c]
-	_ = x[a+1 : b : c]
-	_ = x[a : b+1 : c]
-	_ = x[a+1 : b+1 : c]
-	_ = x[a : b : c+1]
-	_ = x[a+1 : b : c+1]
-	_ = x[a : b+1 : c+1]
-	_ = x[a+1 : b+1 : c+1]
-}
-
-func _() {
-	_ = a + b
-	_ = a + b + c
-	_ = a + b*c
-	_ = a + (b * c)
-	_ = (a + b) * c
-	_ = a + (b * c * d)
-	_ = a + (b*c + d)
-
-	_ = 1 << x
-	_ = -1 << x
-	_ = 1<<x - 1
-	_ = -1<<x - 1
-
-	_ = f(a + b)
-	_ = f(a + b + c)
-	_ = f(a + b*c)
-	_ = f(a + (b * c))
-	_ = f(1<<x-1, 1<<x-2)
-
-	_ = 1<<d.logWindowSize - 1
-
-	buf = make(x, 2*cap(b.buf)+n)
-
-	dst[i*3+2] = dbuf[0] << 2
-	dst[i*3+2] = dbuf[0]<<2 | dbuf[1]>>4
-
-	b.buf = b.buf[0 : b.off+m+n]
-	b.buf = b.buf[0 : b.off+m*n]
-	f(b.buf[0 : b.off+m+n])
-
-	signed += ' ' * 8
-	tw.octal(header[148:155], chksum)
-
-	_ = x > 0 && i >= 0
-
-	x1, x0 := x>>w2, x&m2
-	z0 = t1<<w2 + t0
-	z1 = (t1 + t0>>w2) >> w2
-	q1, r1 := x1/d1, x1%d1
-	r1 = r1*b2 | x0>>w2
-	x1 = (x1 << z) | (x0 >> (uint(w) - z))
-	x1 = x1<<z | x0>>(uint(w)-z)
-
-	_ = buf[0 : len(buf)+1]
-	_ = buf[0 : n+1]
-
-	a, b = b, a
-	a = b + c
-	a = b*c + d
-	_ = a*b + c
-	_ = a - b - c
-	_ = a - (b - c)
-	_ = a - b*c
-	_ = a - (b * c)
-	_ = a * b / c
-	_ = a / *b
-	_ = x[a|^b]
-	_ = x[a / *b]
-	_ = a & ^b
-	_ = a + +b
-	_ = a - -b
-	_ = x[a*-b]
-	_ = x[a + +b]
-	_ = x ^ y ^ z
-	_ = b[a>>24] ^ b[(a>>16)&0xFF] ^ b[(a>>8)&0xFF] ^ b[a&0xFF]
-	_ = len(longVariableName) * 2
-
-	_ = token(matchType + xlength<<lengthShift + xoffset)
-}
-
-func f(x int, args ...int) {
-	f(0, args...)
-	f(1, args)
-	f(2, args[0])
-
-	// make sure syntactically legal code remains syntactically legal
-	f(3, 42 ...)	// a blank must remain between 42 and ...
-	f(4, 42....)
-	f(5, 42....)
-	f(6, 42.0...)
-	f(7, 42.0...)
-	f(8, .42...)
-	f(9, .42...)
-	f(10, 42e0...)
-	f(11, 42e0...)
-
-	_ = 42 .x	// a blank must remain between 42 and .x
-	_ = 42..x
-	_ = 42..x
-	_ = 42.0.x
-	_ = 42.0.x
-	_ = .42.x
-	_ = .42.x
-	_ = 42e0.x
-	_ = 42e0.x
-
-	// a blank must remain between the binary operator and the 2nd operand
-	_ = x / *y
-	_ = x < -1
-	_ = x < <-1
-	_ = x + +1
-	_ = x - -1
-	_ = x & &x
-	_ = x & ^x
-
-	_ = f(x / *y, x < -1, x < <-1, x + +1, x - -1, x & &x, x & ^x)
-}
-
-func _() {
-	_ = T{}
-	_ = struct{}{}
-	_ = [10]T{}
-	_ = [...]T{}
-	_ = []T{}
-	_ = map[int]T{}
-}
-
-// one-line structs/interfaces in composite literals (up to a threshold)
-func _() {
-	_ = struct{}{}
-	_ = struct{ x int }{0}
-	_ = struct{ x, y, z int }{0, 1, 2}
-	_ = struct{ int }{0}
-	_ = struct{ s struct{ int } }{struct{ int }{0}}
-
-	_ = (interface{})(nil)
-	_ = (interface{ String() string })(nil)
-	_ = (interface {
-		String() string
-	})(nil)
-	_ = (interface{ fmt.Stringer })(nil)
-	_ = (interface {
-		fmt.Stringer
-	})(nil)
-}
-
-func _() {
-	// do not modify literals
-	_ = "tab1	tab2	tab3	end"	// string contains 3 tabs
-	_ = "tab1 tab2 tab3 end"	// same string with 3 blanks - may be unaligned because editors see tabs in strings
-	_ = ""				// this comment should be aligned with the one on the previous line
-	_ = ``
-	_ = `
-`
-	_ = `foo
-		bar`
-	_ = `three spaces before the end of the line starting here:   
-they must not be removed`
-}
-
-func _() {
-	// smart handling of indentation for multi-line raw strings
-	var _ = ``
-	var _ = `foo`
-	var _ = `foo
-bar`
-
-	var _ = ``
-	var _ = `foo`
-	var _ =
-	// the next line should remain indented
-	`foo
-bar`
-
-	var _ =	// comment
-	``
-	var _ =	// comment
-	`foo`
-	var _ =	// comment
-	// the next line should remain indented
-	`foo
-bar`
-
-	var _ = /* comment */ ``
-	var _ = /* comment */ `foo`
-	var _ = /* comment */ `foo
-bar`
-
-	var _ =	/* comment */
-	``
-	var _ =	/* comment */
-	`foo`
-	var _ =	/* comment */
-	// the next line should remain indented
-	`foo
-bar`
-
-	var board = []int(
-		`...........
-...........
-....●●●....
-....●●●....
-..●●●●●●●..
-..●●●○●●●..
-..●●●●●●●..
-....●●●....
-....●●●....
-...........
-...........
-`)
-
-	var state = S{
-		"foo",
-		// the next line should remain indented
-		`...........
-...........
-....●●●....
-....●●●....
-..●●●●●●●..
-..●●●○●●●..
-..●●●●●●●..
-....●●●....
-....●●●....
-...........
-...........
-`,
-		"bar",
-	}
-}
-
-func _() {
-	// one-line function literals (body is on a single line)
-	_ = func() {}
-	_ = func() int { return 0 }
-	_ = func(x, y int) bool { m := (x + y) / 2; return m < 0 }
-
-	// multi-line function literals (body is not on one line)
-	_ = func() {
-	}
-	_ = func() int {
-		return 0
-	}
-	_ = func(x, y int) bool {
-		m := (x + y) / 2
-		return x < y
-	}
-
-	f(func() {
-	})
-	f(func() int {
-		return 0
-	})
-	f(func(x, y int) bool {
-		m := (x + y) / 2
-		return x < y
-	})
-}
-
-func _() {
-	_ = [][]int{
-		[]int{1},
-		[]int{1, 2},
-		[]int{1, 2, 3},
-	}
-	_ = [][]int{
-		{1},
-		[]int{1, 2},
-		[]int{1, 2, 3},
-	}
-	_ = [][]int{
-		{1},
-		{1, 2},
-		{1, 2, 3},
-	}
-	_ = [][]int{{1}, {1, 2}, {1, 2, 3}}
-}
-
-// various multi-line expressions
-func _() {
-	// do not add extra indentation to multi-line string lists
-	_ = "foo" + "bar"
-	_ = "foo" +
-		"bar" +
-		"bah"
-	_ = []string{
-		"abc" +
-			"def",
-		"foo" +
-			"bar",
-	}
-}
-
-const _ = F1 +
-	`string = "%s";` +
-	`ptr = *;` +
-	`datafmt.T2 = s ["-" p "-"];`
-
-const _ = `datafmt "datafmt";` +
-	`default = "%v";` +
-	`array = *;` +
-	`datafmt.T3 = s  {" " a a / ","};`
-
-const _ = `datafmt "datafmt";` +
-	`default = "%v";` +
-	`array = *;` +
-	`datafmt.T3 = s  {" " a a / ","};`
-
-func _() {
-	_ = F1 +
-		`string = "%s";` +
-		`ptr = *;` +
-		`datafmt.T2 = s ["-" p "-"];`
-
-	_ =
-		`datafmt "datafmt";` +
-			`default = "%v";` +
-			`array = *;` +
-			`datafmt.T3 = s  {" " a a / ","};`
-
-	_ = `datafmt "datafmt";` +
-		`default = "%v";` +
-		`array = *;` +
-		`datafmt.T3 = s  {" " a a / ","};`
-}
-
-func _() {
-	// respect source lines in multi-line expressions
-	_ = a +
-		b +
-		c
-	_ = a < b ||
-		b < a
-	_ = "933262154439441526816992388562667004907159682643816214685929" +
-		"638952175999932299156089414639761565182862536979208272237582" +
-		"51185210916864000000000000000000000000"	// 100!
-	_ = "170141183460469231731687303715884105727"	// prime
-}
-
-// Alignment after overlong lines
-const (
-	_	= "991"
-	_	= "2432902008176640000"	// 20!
-	_	= "933262154439441526816992388562667004907159682643816214685929" +
-		"638952175999932299156089414639761565182862536979208272237582" +
-		"51185210916864000000000000000000000000"	// 100!
-	_	= "170141183460469231731687303715884105727"	// prime
-)
-
-// Correct placement of operators and comments in multi-line expressions
-func _() {
-	_ = a +	// comment
-		b +	// comment
-		c
-	_ = "a" +
-		"b" +	// comment
-		"c"
-	_ = "ba0408" + "7265717569726564"	// field 71, encoding 2, string "required"
-}
-
-// Correct placement of terminating comma/closing parentheses in multi-line calls.
-func _() {
-	f(1,
-		2,
-		3)
-	f(1,
-		2,
-		3,
-	)
-	f(1,
-		2,
-		3)	// comment
-	f(1,
-		2,
-		3,	// comment
-	)
-	f(1,
-		2,
-		3)	// comment
-	f(1,
-		2,
-		3,	// comment
-	)
-}
-
-// Align comments in multi-line lists of single-line expressions.
-var txpix = [NCOL]draw.Color{
-	draw.Yellow,		// yellow
-	draw.Cyan,		// cyan
-	draw.Green,		// lime green
-	draw.GreyBlue,		// slate
-	draw.Red,		/* red */
-	draw.GreyGreen,		/* olive green */
-	draw.Blue,		/* blue */
-	draw.Color(0xFF55AAFF),	/* pink */
-	draw.Color(0xFFAAFFFF),	/* lavender */
-	draw.Color(0xBB005DFF),	/* maroon */
-}
-
-func same(t, u *Time) bool {
-	// respect source lines in multi-line expressions
-	return t.Year == u.Year &&
-		t.Month == u.Month &&
-		t.Day == u.Day &&
-		t.Hour == u.Hour &&
-		t.Minute == u.Minute &&
-		t.Second == u.Second &&
-		t.Weekday == u.Weekday &&
-		t.ZoneOffset == u.ZoneOffset &&
-		t.Zone == u.Zone
-}
-
-func (p *parser) charClass() {
-	// respect source lines in multi-line expressions
-	if cc.negate && len(cc.ranges) == 2 &&
-		cc.ranges[0] == '\n' && cc.ranges[1] == '\n' {
-		nl := new(_NotNl)
-		p.re.add(nl)
-	}
-}
-
-func addState(s []state, inst instr, match []int) {
-	// handle comments correctly in multi-line expressions
-	for i := 0; i < l; i++ {
-		if s[i].inst.index() == index &&	// same instruction
-			s[i].match[0] < pos {	// earlier match already going; leftmost wins
-			return s
-		}
-	}
-}
-
-func (self *T) foo(x int) *T	{ return self }
-
-func _()	{ module.Func1().Func2() }
-
-func _() {
-	_ = new(T).
-		foo(1).
-		foo(2).
-		foo(3)
-
-	_ = new(T).
-		foo(1).
-		foo(2).	// inline comments
-		foo(3)
-
-	_ = new(T).foo(1).foo(2).foo(3)
-
-	// handle multiline argument list correctly
-	_ = new(T).
-		foo(
-			1).
-		foo(2)
-
-	_ = new(T).foo(
-		1).foo(2)
-
-	_ = Array[3+
-		4]
-
-	_ = Method(1, 2,
-		3)
-
-	_ = new(T).
-		foo().
-		bar().(*Type)
-
-	_ = new(T).
-		foo().
-		bar().(*Type).
-		baz()
-
-	_ = new(T).
-		foo().
-		bar()["idx"]
-
-	_ = new(T).
-		foo().
-		bar()["idx"].
-		baz()
-
-	_ = new(T).
-		foo().
-		bar()[1:2]
-
-	_ = new(T).
-		foo().
-		bar()[1:2].
-		baz()
-
-	_ = new(T).
-		Field.
-		Array[3+
-		4].
-		Table["foo"].
-		Blob.(*Type).
-		Slices[1:4].
-		Method(1, 2,
-			3).
-		Thingy
-
-	_ = a.b.c
-	_ = a.
-		b.
-		c
-	_ = a.b().c
-	_ = a.
-		b().
-		c
-	_ = a.b[0].c
-	_ = a.
-		b[0].
-		c
-	_ = a.b[0:].c
-	_ = a.
-		b[0:].
-		c
-	_ = a.b.(T).c
-	_ = a.
-		b.(T).
-		c
-}
-
-// Don't introduce extra newlines in strangely formatted expression lists.
-func f() {
-	// os.Open parameters should remain on two lines
-	if writer, err = os.Open(outfile, s.O_WRONLY|os.O_CREATE|
-		os.O_TRUNC, 0666); err != nil {
-		log.Fatal(err)
-	}
-}
-
-// Handle multi-line argument lists ending in ... correctly.
-// Was issue 3130.
-func _() {
-	_ = append(s, a...)
-	_ = append(
-		s, a...)
-	_ = append(s,
-		a...)
-	_ = append(
-		s,
-		a...)
-	_ = append(s, a...,
-	)
-	_ = append(s,
-		a...,
-	)
-	_ = append(
-		s,
-		a...,
-	)
-}
-
-// Literal function types in conversions must be parenthesized;
-// for now go/parser accepts the unparenthesized form where it
-// is non-ambiguous.
-func _() {
-	// these conversions should be rewritten to look
-	// the same as the parenthesized conversions below
-	_ = (func())(nil)
-	_ = (func(x int) float)(nil)
-	_ = (func() func() func())(nil)
-
-	_ = (func())(nil)
-	_ = (func(x int) float)(nil)
-	_ = (func() func() func())(nil)
-}
-
-func _() {
-	_ = f().
-		f(func() {
-			f()
-		}).
-		f(map[int]int{
-			1:	2,
-			3:	4,
-		})
-
-	_ = f().
-		f(
-			func() {
-				f()
-			},
-		)
-}
diff --git a/internal/backport/go/printer/testdata/expressions.input b/internal/backport/go/printer/testdata/expressions.input
deleted file mode 100644
index 8c523b6..0000000
--- a/internal/backport/go/printer/testdata/expressions.input
+++ /dev/null
@@ -1,771 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package expressions
-
-type T struct {
-	x, y, z int
-}
-
-var (
-	a, b, c, d, e int
-	under_bar int
-	longIdentifier1, longIdentifier2, longIdentifier3 int
-	t0, t1, t2 T
-	s string
-	p *int
-)
-
-
-func _() {
-	// no spaces around simple or parenthesized expressions
-	_ = (a+0)
-	_ = a+b
-	_ = a+b+c
-	_ = a+b-c
-	_ = a-b-c
-	_ = a+(b*c)
-	_ = a+(b/c)
-	_ = a-(b%c)
-	_ = 1+a
-	_ = a+1
-	_ = a+b+1
-	_ = s[a]
-	_ = s[a:]
-	_ = s[:b]
-	_ = s[1:2]
-	_ = s[a:b]
-	_ = s[0:len(s)]
-	_ = s[0]<<1
-	_ = (s[0]<<1)&0xf
-	_ = s[0] << 2 | s[1] >> 4
-	_ = "foo"+s
-	_ = s+"foo"
-	_ = 'a'+'b'
-	_ = len(s)/2
-	_ = len(t0.x)/a
-
-	// spaces around expressions of different precedence or expressions containing spaces
-	_ = a + -b
-	_ = a - ^b
-	_ = a / *p
-	_ = a + b*c
-	_ = 1 + b*c
-	_ = a + 2*c
-	_ = a + c*2
-	_ = 1 + 2*3
-	_ = s[1 : 2*3]
-	_ = s[a : b-c]
-	_ = s[0:]
-	_ = s[a+b]
-	_ = s[: b-c]
-	_ = s[a+b :]
-	_ = a[a<<b+1]
-	_ = a[a<<b+1 :]
-	_ = s[a+b : len(s)]
-	_ = s[len(s) : -a]
-	_ = s[a : len(s)+1]
-	_ = s[a : len(s)+1]+s
-
-	// spaces around operators with equal or lower precedence than comparisons
-	_ = a == b
-	_ = a != b
-	_ = a > b
-	_ = a >= b
-	_ = a < b
-	_ = a <= b
-	_ = a < b && c > d
-	_ = a < b || c > d
-
-	// spaces around "long" operands
-	_ = a + longIdentifier1
-	_ = longIdentifier1 + a
-	_ = longIdentifier1 + longIdentifier2 * longIdentifier3
-	_ = s + "a longer string"
-
-	// some selected cases
-	_ = a + t0.x
-	_ = a + t0.x + t1.x * t2.x
-	_ = a + b + c + d + e + 2*3
-	_ = a + b + c + 2*3 + d + e
-	_ = (a+b+c)*2
-	_ = a - b + c - d + (a+b+c) + d&e
-	_ = under_bar-1
-	_ = Open(dpath + "/file", O_WRONLY | O_CREAT, 0666)
-	_ = int(c0&_Mask4)<<18 | int(c1&_Maskx)<<12 | int(c2&_Maskx)<<6 | int(c3&_Maskx)
-
-	// test case for issue 8021
-	// want:
-	//  ([]bool{})[([]int{})[((1)+(((1)+((((1)*(((1)+(1))+(1)))+(1))*(1)))+(1)))]]
-	_ = ([]bool{})[([]int{})[((1) + (((((1) + (((((((1) * (((((1) + (1))) + (1))))) + (1))) * (1))))) + (1))))]]
-
-	// the parser does not restrict expressions that may appear as statements
-	true
-	42
-	"foo"
-	x
-	(x)
-	a+b
-	a+b+c
-	a+(b*c)
-	a+(b/c)
-	1+a
-	a+1
-	s[a]
-	x<<1
-	(s[0]<<1)&0xf
-	"foo"+s
-	x == y
-	x < y || z > 42
-}
-
-
-// slice expressions with cap
-func _() {
-	_ = x[a:b:c]
-	_ = x[a:b:c+d]
-	_ = x[a:b+d:c]
-	_ = x[a:b+d:c+d]
-	_ = x[a+d:b:c]
-	_ = x[a+d:b:c+d]
-	_ = x[a+d:b+d:c]
-	_ = x[a+d:b+d:c+d]
-
-	_ = x[:b:c]
-	_ = x[:b:c+d]
-	_ = x[:b+d:c]
-	_ = x[:b+d:c+d]
-}
-
-func issue22111() {
-	_ = x[:]
-
-	_ = x[:b]
-	_ = x[:b+1]
-
-	_ = x[a:]
-	_ = x[a+1:]
-
-	_ = x[a:b]
-	_ = x[a+1:b]
-	_ = x[a:b+1]
-	_ = x[a+1:b+1]
-
-	_ = x[:b:c]
-	_ = x[:b+1:c]
-	_ = x[:b:c+1]
-	_ = x[:b+1:c+1]
-
-	_ = x[a:b:c]
-	_ = x[a+1:b:c]
-	_ = x[a:b+1:c]
-	_ = x[a+1:b+1:c]
-	_ = x[a:b:c+1]
-	_ = x[a+1:b:c+1]
-	_ = x[a:b+1:c+1]
-	_ = x[a+1:b+1:c+1]
-}
-
-func _() {
-	_ = a+b
-	_ = a+b+c
-	_ = a+b*c
-	_ = a+(b*c)
-	_ = (a+b)*c
-	_ = a+(b*c*d)
-	_ = a+(b*c+d)
-
-	_ = 1<<x
-	_ = -1<<x
-	_ = 1<<x-1
-	_ = -1<<x-1
-
-	_ = f(a+b)
-	_ = f(a+b+c)
-	_ = f(a+b*c)
-	_ = f(a+(b*c))
-	_ = f(1<<x-1, 1<<x-2)
-
-	_ = 1<<d.logWindowSize-1
-
-	buf = make(x, 2*cap(b.buf) + n)
-
-	dst[i*3+2] = dbuf[0]<<2
-	dst[i*3+2] = dbuf[0]<<2 | dbuf[1]>>4
-
-	b.buf = b.buf[0:b.off+m+n]
-	b.buf = b.buf[0:b.off+m*n]
-	f(b.buf[0:b.off+m+n])
-
-	signed += ' '*8
-	tw.octal(header[148:155], chksum)
-
-	_ = x > 0 && i >= 0
-
-	x1, x0 := x>>w2, x&m2
-	z0 = t1<<w2+t0
-	z1 = (t1+t0>>w2)>>w2
-	q1, r1 := x1/d1, x1%d1
-	r1 = r1*b2 | x0>>w2
-	x1 = (x1<<z)|(x0>>(uint(w)-z))
-	x1 = x1<<z | x0>>(uint(w)-z)
-
-	_ = buf[0:len(buf)+1]
-	_ = buf[0:n+1]
-
-	a,b = b,a
-	a = b+c
-	a = b*c+d
-	_ = a*b+c
-	_ = a-b-c
-	_ = a-(b-c)
-	_ = a-b*c
-	_ = a-(b*c)
-	_ = a*b/c
-	_ = a/ *b
-	_ = x[a|^b]
-	_ = x[a/ *b]
-	_ = a& ^b
-	_ = a+ +b
-	_ = a- -b
-	_ = x[a*-b]
-	_ = x[a+ +b]
-	_ = x^y^z
-	_ = b[a>>24] ^ b[(a>>16)&0xFF] ^ b[(a>>8)&0xFF] ^ b[a&0xFF]
-	_ = len(longVariableName)*2
-
-	_ = token(matchType + xlength<<lengthShift + xoffset)
-}
-
-
-func f(x int, args ...int) {
-	f(0, args...)
-	f(1, args)
-	f(2, args[0])
-
-	// make sure syntactically legal code remains syntactically legal
-	f(3, 42 ...) // a blank must remain between 42 and ...
-	f(4, 42. ...)
-	f(5, 42....)
-	f(6, 42.0 ...)
-	f(7, 42.0...)
-	f(8, .42 ...)
-	f(9, .42...)
-	f(10, 42e0 ...)
-	f(11, 42e0...)
-
-	_ = 42 .x // a blank must remain between 42 and .x
-	_ = 42. .x
-	_ = 42..x
-	_ = 42.0 .x
-	_ = 42.0.x
-	_ = .42 .x
-	_ = .42.x
-	_ = 42e0 .x
-	_ = 42e0.x
-
-	// a blank must remain between the binary operator and the 2nd operand
-	_ = x/ *y
-	_ = x< -1
-	_ = x< <-1
-	_ = x+ +1
-	_ = x- -1
-	_ = x& &x
-	_ = x& ^x
-
-	_ = f(x/ *y, x< -1, x< <-1, x+ +1, x- -1, x& &x, x& ^x)
-}
-
-
-func _() {
-	_ = T{}
-	_ = struct{}{}
-	_ = [10]T{}
-	_ = [...]T{}
-	_ = []T{}
-	_ = map[int]T{}
-}
-
-
-// one-line structs/interfaces in composite literals (up to a threshold)
-func _() {
-	_ = struct{}{}
-	_ = struct{ x int }{0}
-	_ = struct{ x, y, z int }{0, 1, 2}
-	_ = struct{ int }{0}
-	_ = struct{ s struct { int } }{struct{ int}{0} }
-
-	_ = (interface{})(nil)
-	_ = (interface{String() string})(nil)
-	_ = (interface{
-		String()    string
-	})(nil)
-	_ = (interface{fmt.Stringer})(nil)
-	_ = (interface{
-		    fmt.Stringer
-	})(nil)
-}
-
-func _() {
-	// do not modify literals
-	_ = "tab1	tab2	tab3	end"  // string contains 3 tabs
-	_ = "tab1 tab2 tab3 end"  // same string with 3 blanks - may be unaligned because editors see tabs in strings
-	_ = ""  // this comment should be aligned with the one on the previous line
-	_ = ``
-	_ = `
-`
-_ = `foo
-		bar`
-	_ = `three spaces before the end of the line starting here:   
-they must not be removed`
-}
-
-
-func _() {
-	// smart handling of indentation for multi-line raw strings
-	var _ = ``
-	var _ = `foo`
-	var _ = `foo
-bar`
-
-
-var _ =
-	``
-var _ =
-	`foo`
-var _ =
-	// the next line should remain indented
-	`foo
-bar`
-
-
-	var _ = // comment
-		``
-	var _ = // comment
-		`foo`
-	var _ = // comment
-		// the next line should remain indented
-		`foo
-bar`
-
-
-var _ = /* comment */ ``
-var _ = /* comment */ `foo`
-var _ = /* comment */ `foo
-bar`
-
-
-	var _ = /* comment */
-		``
-	var _ = /* comment */
-		`foo`
-	var _ = /* comment */
-		// the next line should remain indented
-		`foo
-bar`
-
-
-var board = []int(
-	`...........
-...........
-....●●●....
-....●●●....
-..●●●●●●●..
-..●●●○●●●..
-..●●●●●●●..
-....●●●....
-....●●●....
-...........
-...........
-`)
-
-
-	var state = S{
-		"foo",
-		// the next line should remain indented
-		`...........
-...........
-....●●●....
-....●●●....
-..●●●●●●●..
-..●●●○●●●..
-..●●●●●●●..
-....●●●....
-....●●●....
-...........
-...........
-`,
-		"bar",
-	}
-}
-
-
-func _() {
-	// one-line function literals (body is on a single line)
-	_ = func() {}
-	_ = func() int { return 0 }
-	_ = func(x, y int) bool { m := (x+y)/2; return m < 0 }
-
-	// multi-line function literals (body is not on one line)
-	_ = func() {
-	}
-	_ = func() int {
-		return 0
-	}
-	_ = func(x, y int) bool {
-		m := (x+y)/2; return x < y }
-
-	f(func() {
-	})
-	f(func() int {
-		return 0
-	})
-	f(func(x, y int) bool {
-		m := (x+y)/2; return x < y })
-}
-
-
-func _() {
-	_ = [][]int {
-		[]int{1},
-		[]int{1, 2},
-		[]int{1, 2, 3},
-	}
-	_ = [][]int {
-		{1},
-		[]int{1, 2},
-		[]int{1, 2, 3},
-	}
-	_ = [][]int {
-		{1},
-		{1, 2},
-		{1, 2, 3},
-	}
-	_ = [][]int {{1}, {1, 2}, {1, 2, 3}}
-}
-
-
-// various multi-line expressions
-func _() {
-	// do not add extra indentation to multi-line string lists
-	_ = "foo" + "bar"
-	_ = "foo" +
-	"bar" +
-	"bah"
-	_ = []string {
-		"abc" +
-		"def",
-		"foo" +
-		"bar",
-	}
-}
-
-
-const _ = F1 +
-	`string = "%s";` +
-	`ptr = *;` +
-	`datafmt.T2 = s ["-" p "-"];`
-
-
-const _ =
-	`datafmt "datafmt";` +
-	`default = "%v";` +
-	`array = *;` +
-	`datafmt.T3 = s  {" " a a / ","};`
-
-
-const _ = `datafmt "datafmt";` +
-`default = "%v";` +
-`array = *;` +
-`datafmt.T3 = s  {" " a a / ","};`
-
-
-func _() {
-	_ = F1 +
-		`string = "%s";` +
-		`ptr = *;` +
-		`datafmt.T2 = s ["-" p "-"];`
-
-	_ =
-		`datafmt "datafmt";` +
-		`default = "%v";` +
-		`array = *;` +
-		`datafmt.T3 = s  {" " a a / ","};`
-
-	_ = `datafmt "datafmt";` +
-	`default = "%v";` +
-	`array = *;` +
-	`datafmt.T3 = s  {" " a a / ","};`
-}
-
-
-func _() {
-	// respect source lines in multi-line expressions
-	_ = a+
-	b+
-	c
-	_ = a < b ||
-		b < a
-	_ = "933262154439441526816992388562667004907159682643816214685929" +
-	"638952175999932299156089414639761565182862536979208272237582" +
-	"51185210916864000000000000000000000000"  // 100!
-	_ = "170141183460469231731687303715884105727"  // prime
-}
-
-
-// Alignment after overlong lines
-const (
-	_ = "991"
-	_ = "2432902008176640000"  // 20!
-	_ = "933262154439441526816992388562667004907159682643816214685929" +
-	"638952175999932299156089414639761565182862536979208272237582" +
-	"51185210916864000000000000000000000000"  // 100!
-	_ = "170141183460469231731687303715884105727"  // prime
-)
-
-
-// Correct placement of operators and comments in multi-line expressions
-func _() {
-	_ = a +  // comment
-		b +  // comment
-		c
-	_ = "a"	+
-		"b" +	// comment
-		"c"
-	_ = "ba0408" + "7265717569726564"     // field 71, encoding 2, string "required"
-}
-
-
-// Correct placement of terminating comma/closing parentheses in multi-line calls.
-func _() {
-	f(1,
-		2,
-		3)
-	f(1,
-		2,
-		3,
-	)
-	f(1,
-		2,
-		3)  // comment
-	f(1,
-		2,
-		3,  // comment
-	)
-	f(1,
-		2,
-		3)// comment
-	f(1,
-		2,
-		3,// comment
-	)
-}
-
-
-// Align comments in multi-line lists of single-line expressions.
-var txpix = [NCOL]draw.Color{
-	draw.Yellow, // yellow
-	draw.Cyan, // cyan
-	draw.Green, // lime green
-	draw.GreyBlue, // slate
-	draw.Red, /* red */
-	draw.GreyGreen, /* olive green */
-	draw.Blue, /* blue */
-	draw.Color(0xFF55AAFF), /* pink */
-	draw.Color(0xFFAAFFFF), /* lavender */
-	draw.Color(0xBB005DFF), /* maroon */
-}
-
-
-func same(t, u *Time) bool {
-	// respect source lines in multi-line expressions
-	return t.Year == u.Year &&
-		t.Month == u.Month &&
-		t.Day == u.Day &&
-		t.Hour == u.Hour &&
-		t.Minute == u.Minute &&
-		t.Second == u.Second &&
-		t.Weekday == u.Weekday &&
-		t.ZoneOffset == u.ZoneOffset &&
-		t.Zone == u.Zone
-}
-
-
-func (p *parser) charClass() {
-	// respect source lines in multi-line expressions
-	if cc.negate && len(cc.ranges) == 2 &&
-		cc.ranges[0] == '\n' && cc.ranges[1] == '\n' {
-		nl := new(_NotNl)
-		p.re.add(nl)
-	}
-}
-
-
-func addState(s []state, inst instr, match []int) {
-	// handle comments correctly in multi-line expressions
-	for i := 0; i < l; i++ {
-		if s[i].inst.index() == index && // same instruction
-		   s[i].match[0] < pos {	// earlier match already going; leftmost wins
-		   	return s
-		 }
-	}
-}
-
-func (self *T) foo(x int) *T { return self }
-
-func _() { module.Func1().Func2() }
-
-func _() {
-	_ = new(T).
-		foo(1).
-			foo(2).
-		foo(3)
-
-	_ = new(T).
-	foo(1).
-	foo(2). // inline comments
-	foo(3)
-
-	_ = new(T).foo(1).foo(2).foo(3)
-
-	// handle multiline argument list correctly
-	_ = new(T).
-	foo(
-		1).
-		foo(2)
-
-	_ = new(T).foo(
-		1).foo(2)
-
-	_ = Array[3 +
-4]
-
-	_ = Method(1, 2,
-		3)
-
-	_ = new(T).
-   foo().
-   bar() . (*Type)
-
-	_ = new(T).
-foo().
-bar().(*Type).
-baz()
-
-	_ = new(T).
-	foo().
-	bar()["idx"]
-
-	_ = new(T).
-	foo().
-	bar()["idx"]	.
-	baz()
-
-	_ = new(T).
-	foo().
-	bar()[1:2]
-
-	_ = new(T).
-	foo().
-	bar()[1:2].
-	baz()
-
-	_ = new(T).
-		Field.
-		Array[3+
-       		4].
-		Table ["foo"].
-		Blob. (*Type).
-	Slices[1:4].
-	Method(1, 2,
-	3).
-		Thingy
-
-	_ = a.b.c
-	_ = a.
-	b.
-	c
-	_ = a.b().c
-	_ = a.
-	b().
-	c
-	_ = a.b[0].c
-	_ = a.
-	b[0].
-	c
-	_ = a.b[0:].c
-	_ = a.
-	b[0:].
-	c
-	_ = a.b.(T).c
-	_ = a.
-	b.
-	(T).
-	c
-}
-
-
-// Don't introduce extra newlines in strangely formatted expression lists.
-func f() {
-	// os.Open parameters should remain on two lines
-	if writer, err = os.Open(outfile, s.O_WRONLY|os.O_CREATE|
-		os.O_TRUNC, 0666); err != nil {
-	    log.Fatal(err)
-	}
-}
-
-// Handle multi-line argument lists ending in ... correctly.
-// Was issue 3130.
-func _() {
-	_ = append(s, a...)
-	_ = append(
-		s, a...)
-	_ = append(s,
-		a...)
-	_ = append(
-		s,
-		a...)
-	_ = append(s, a...,
-	)
-	_ = append(s,
-		a...,
-	)
-	_ = append(
-		s,
-		a...,
-	)
-}
-
-// Literal function types in conversions must be parenthesized;
-// for now go/parser accepts the unparenthesized form where it
-// is non-ambiguous.
-func _() {
-	// these conversions should be rewritten to look
-	// the same as the parenthesized conversions below
-	_ = func()()(nil)
-	_ = func(x int)(float)(nil)
-	_ = func() func() func()()(nil)
-
-	_ = (func()())(nil)
-	_ = (func(x int)(float))(nil)
-	_ = (func() func() func()())(nil)
-}
-
-func _() {
-	_ = f().
-	f(func() {
-		f()
-	}).
-	f(map[int]int{
-	1: 2,
-	3: 4,
-})
-
-	_ = f().
-	f(
-	func() {
-		f()
-	},
-	)
-}
diff --git a/internal/backport/go/printer/testdata/expressions.raw b/internal/backport/go/printer/testdata/expressions.raw
deleted file mode 100644
index 058fded..0000000
--- a/internal/backport/go/printer/testdata/expressions.raw
+++ /dev/null
@@ -1,743 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package expressions
-
-type T struct {
-	x, y, z int
-}
-
-var (
-	a, b, c, d, e	int
-	under_bar	int
-	longIdentifier1, longIdentifier2, longIdentifier3	int
-	t0, t1, t2	T
-	s	string
-	p	*int
-)
-
-func _() {
-	// no spaces around simple or parenthesized expressions
-	_ = (a + 0)
-	_ = a + b
-	_ = a + b + c
-	_ = a + b - c
-	_ = a - b - c
-	_ = a + (b * c)
-	_ = a + (b / c)
-	_ = a - (b % c)
-	_ = 1 + a
-	_ = a + 1
-	_ = a + b + 1
-	_ = s[a]
-	_ = s[a:]
-	_ = s[:b]
-	_ = s[1:2]
-	_ = s[a:b]
-	_ = s[0:len(s)]
-	_ = s[0] << 1
-	_ = (s[0] << 1) & 0xf
-	_ = s[0]<<2 | s[1]>>4
-	_ = "foo" + s
-	_ = s + "foo"
-	_ = 'a' + 'b'
-	_ = len(s) / 2
-	_ = len(t0.x) / a
-
-	// spaces around expressions of different precedence or expressions containing spaces
-	_ = a + -b
-	_ = a - ^b
-	_ = a / *p
-	_ = a + b*c
-	_ = 1 + b*c
-	_ = a + 2*c
-	_ = a + c*2
-	_ = 1 + 2*3
-	_ = s[1 : 2*3]
-	_ = s[a : b-c]
-	_ = s[0:]
-	_ = s[a+b]
-	_ = s[:b-c]
-	_ = s[a+b:]
-	_ = a[a<<b+1]
-	_ = a[a<<b+1:]
-	_ = s[a+b : len(s)]
-	_ = s[len(s):-a]
-	_ = s[a : len(s)+1]
-	_ = s[a:len(s)+1] + s
-
-	// spaces around operators with equal or lower precedence than comparisons
-	_ = a == b
-	_ = a != b
-	_ = a > b
-	_ = a >= b
-	_ = a < b
-	_ = a <= b
-	_ = a < b && c > d
-	_ = a < b || c > d
-
-	// spaces around "long" operands
-	_ = a + longIdentifier1
-	_ = longIdentifier1 + a
-	_ = longIdentifier1 + longIdentifier2*longIdentifier3
-	_ = s + "a longer string"
-
-	// some selected cases
-	_ = a + t0.x
-	_ = a + t0.x + t1.x*t2.x
-	_ = a + b + c + d + e + 2*3
-	_ = a + b + c + 2*3 + d + e
-	_ = (a + b + c) * 2
-	_ = a - b + c - d + (a + b + c) + d&e
-	_ = under_bar - 1
-	_ = Open(dpath+"/file", O_WRONLY|O_CREAT, 0666)
-	_ = int(c0&_Mask4)<<18 | int(c1&_Maskx)<<12 | int(c2&_Maskx)<<6 | int(c3&_Maskx)
-
-	// test case for issue 8021
-	// want:
-	//  ([]bool{})[([]int{})[((1)+(((1)+((((1)*(((1)+(1))+(1)))+(1))*(1)))+(1)))]]
-	_ = ([]bool{})[([]int{})[((1)+(((1)+((((1)*(((1)+(1))+(1)))+(1))*(1)))+(1)))]]
-
-	// the parser does not restrict expressions that may appear as statements
-	true
-	42
-	"foo"
-	x
-	(x)
-	a + b
-	a + b + c
-	a + (b * c)
-	a + (b / c)
-	1 + a
-	a + 1
-	s[a]
-	x << 1
-	(s[0] << 1) & 0xf
-	"foo" + s
-	x == y
-	x < y || z > 42
-}
-
-// slice expressions with cap
-func _() {
-	_ = x[a:b:c]
-	_ = x[a : b : c+d]
-	_ = x[a : b+d : c]
-	_ = x[a : b+d : c+d]
-	_ = x[a+d : b : c]
-	_ = x[a+d : b : c+d]
-	_ = x[a+d : b+d : c]
-	_ = x[a+d : b+d : c+d]
-
-	_ = x[:b:c]
-	_ = x[: b : c+d]
-	_ = x[: b+d : c]
-	_ = x[: b+d : c+d]
-}
-
-func issue22111() {
-	_ = x[:]
-
-	_ = x[:b]
-	_ = x[:b+1]
-
-	_ = x[a:]
-	_ = x[a+1:]
-
-	_ = x[a:b]
-	_ = x[a+1 : b]
-	_ = x[a : b+1]
-	_ = x[a+1 : b+1]
-
-	_ = x[:b:c]
-	_ = x[: b+1 : c]
-	_ = x[: b : c+1]
-	_ = x[: b+1 : c+1]
-
-	_ = x[a:b:c]
-	_ = x[a+1 : b : c]
-	_ = x[a : b+1 : c]
-	_ = x[a+1 : b+1 : c]
-	_ = x[a : b : c+1]
-	_ = x[a+1 : b : c+1]
-	_ = x[a : b+1 : c+1]
-	_ = x[a+1 : b+1 : c+1]
-}
-
-func _() {
-	_ = a + b
-	_ = a + b + c
-	_ = a + b*c
-	_ = a + (b * c)
-	_ = (a + b) * c
-	_ = a + (b * c * d)
-	_ = a + (b*c + d)
-
-	_ = 1 << x
-	_ = -1 << x
-	_ = 1<<x - 1
-	_ = -1<<x - 1
-
-	_ = f(a + b)
-	_ = f(a + b + c)
-	_ = f(a + b*c)
-	_ = f(a + (b * c))
-	_ = f(1<<x-1, 1<<x-2)
-
-	_ = 1<<d.logWindowSize - 1
-
-	buf = make(x, 2*cap(b.buf)+n)
-
-	dst[i*3+2] = dbuf[0] << 2
-	dst[i*3+2] = dbuf[0]<<2 | dbuf[1]>>4
-
-	b.buf = b.buf[0 : b.off+m+n]
-	b.buf = b.buf[0 : b.off+m*n]
-	f(b.buf[0 : b.off+m+n])
-
-	signed += ' ' * 8
-	tw.octal(header[148:155], chksum)
-
-	_ = x > 0 && i >= 0
-
-	x1, x0 := x>>w2, x&m2
-	z0 = t1<<w2 + t0
-	z1 = (t1 + t0>>w2) >> w2
-	q1, r1 := x1/d1, x1%d1
-	r1 = r1*b2 | x0>>w2
-	x1 = (x1 << z) | (x0 >> (uint(w) - z))
-	x1 = x1<<z | x0>>(uint(w)-z)
-
-	_ = buf[0 : len(buf)+1]
-	_ = buf[0 : n+1]
-
-	a, b = b, a
-	a = b + c
-	a = b*c + d
-	_ = a*b + c
-	_ = a - b - c
-	_ = a - (b - c)
-	_ = a - b*c
-	_ = a - (b * c)
-	_ = a * b / c
-	_ = a / *b
-	_ = x[a|^b]
-	_ = x[a / *b]
-	_ = a & ^b
-	_ = a + +b
-	_ = a - -b
-	_ = x[a*-b]
-	_ = x[a + +b]
-	_ = x ^ y ^ z
-	_ = b[a>>24] ^ b[(a>>16)&0xFF] ^ b[(a>>8)&0xFF] ^ b[a&0xFF]
-	_ = len(longVariableName) * 2
-
-	_ = token(matchType + xlength<<lengthShift + xoffset)
-}
-
-func f(x int, args ...int) {
-	f(0, args...)
-	f(1, args)
-	f(2, args[0])
-
-	// make sure syntactically legal code remains syntactically legal
-	f(3, 42 ...)	// a blank must remain between 42 and ...
-	f(4, 42....)
-	f(5, 42....)
-	f(6, 42.0...)
-	f(7, 42.0...)
-	f(8, .42...)
-	f(9, .42...)
-	f(10, 42e0...)
-	f(11, 42e0...)
-
-	_ = 42 .x	// a blank must remain between 42 and .x
-	_ = 42..x
-	_ = 42..x
-	_ = 42.0.x
-	_ = 42.0.x
-	_ = .42.x
-	_ = .42.x
-	_ = 42e0.x
-	_ = 42e0.x
-
-	// a blank must remain between the binary operator and the 2nd operand
-	_ = x / *y
-	_ = x < -1
-	_ = x < <-1
-	_ = x + +1
-	_ = x - -1
-	_ = x & &x
-	_ = x & ^x
-
-	_ = f(x / *y, x < -1, x < <-1, x + +1, x - -1, x & &x, x & ^x)
-}
-
-func _() {
-	_ = T{}
-	_ = struct{}{}
-	_ = [10]T{}
-	_ = [...]T{}
-	_ = []T{}
-	_ = map[int]T{}
-}
-
-// one-line structs/interfaces in composite literals (up to a threshold)
-func _() {
-	_ = struct{}{}
-	_ = struct{ x int }{0}
-	_ = struct{ x, y, z int }{0, 1, 2}
-	_ = struct{ int }{0}
-	_ = struct{ s struct{ int } }{struct{ int }{0}}
-
-	_ = (interface{})(nil)
-	_ = (interface{ String() string })(nil)
-	_ = (interface {
-		String() string
-	})(nil)
-	_ = (interface{ fmt.Stringer })(nil)
-	_ = (interface {
-		fmt.Stringer
-	})(nil)
-}
-
-func _() {
-	// do not modify literals
-	_ = "tab1	tab2	tab3	end"	// string contains 3 tabs
-	_ = "tab1 tab2 tab3 end"	// same string with 3 blanks - may be unaligned because editors see tabs in strings
-	_ = ""	// this comment should be aligned with the one on the previous line
-	_ = ``
-	_ = `
-`
-	_ = `foo
-		bar`
-	_ = `three spaces before the end of the line starting here:   
-they must not be removed`
-}
-
-func _() {
-	// smart handling of indentation for multi-line raw strings
-	var _ = ``
-	var _ = `foo`
-	var _ = `foo
-bar`
-
-	var _ = ``
-	var _ = `foo`
-	var _ =
-	// the next line should remain indented
-	`foo
-bar`
-
-	var _ =	// comment
-	``
-	var _ =	// comment
-	`foo`
-	var _ =	// comment
-	// the next line should remain indented
-	`foo
-bar`
-
-	var _ = /* comment */ ``
-	var _ = /* comment */ `foo`
-	var _ = /* comment */ `foo
-bar`
-
-	var _ =	/* comment */
-	``
-	var _ =	/* comment */
-	`foo`
-	var _ =	/* comment */
-	// the next line should remain indented
-	`foo
-bar`
-
-	var board = []int(
-		`...........
-...........
-....●●●....
-....●●●....
-..●●●●●●●..
-..●●●○●●●..
-..●●●●●●●..
-....●●●....
-....●●●....
-...........
-...........
-`)
-
-	var state = S{
-		"foo",
-		// the next line should remain indented
-		`...........
-...........
-....●●●....
-....●●●....
-..●●●●●●●..
-..●●●○●●●..
-..●●●●●●●..
-....●●●....
-....●●●....
-...........
-...........
-`,
-		"bar",
-	}
-}
-
-func _() {
-	// one-line function literals (body is on a single line)
-	_ = func() {}
-	_ = func() int { return 0 }
-	_ = func(x, y int) bool { m := (x + y) / 2; return m < 0 }
-
-	// multi-line function literals (body is not on one line)
-	_ = func() {
-	}
-	_ = func() int {
-		return 0
-	}
-	_ = func(x, y int) bool {
-		m := (x + y) / 2
-		return x < y
-	}
-
-	f(func() {
-	})
-	f(func() int {
-		return 0
-	})
-	f(func(x, y int) bool {
-		m := (x + y) / 2
-		return x < y
-	})
-}
-
-func _() {
-	_ = [][]int{
-		[]int{1},
-		[]int{1, 2},
-		[]int{1, 2, 3},
-	}
-	_ = [][]int{
-		{1},
-		[]int{1, 2},
-		[]int{1, 2, 3},
-	}
-	_ = [][]int{
-		{1},
-		{1, 2},
-		{1, 2, 3},
-	}
-	_ = [][]int{{1}, {1, 2}, {1, 2, 3}}
-}
-
-// various multi-line expressions
-func _() {
-	// do not add extra indentation to multi-line string lists
-	_ = "foo" + "bar"
-	_ = "foo" +
-		"bar" +
-		"bah"
-	_ = []string{
-		"abc" +
-			"def",
-		"foo" +
-			"bar",
-	}
-}
-
-const _ = F1 +
-	`string = "%s";` +
-	`ptr = *;` +
-	`datafmt.T2 = s ["-" p "-"];`
-
-const _ = `datafmt "datafmt";` +
-	`default = "%v";` +
-	`array = *;` +
-	`datafmt.T3 = s  {" " a a / ","};`
-
-const _ = `datafmt "datafmt";` +
-	`default = "%v";` +
-	`array = *;` +
-	`datafmt.T3 = s  {" " a a / ","};`
-
-func _() {
-	_ = F1 +
-		`string = "%s";` +
-		`ptr = *;` +
-		`datafmt.T2 = s ["-" p "-"];`
-
-	_ =
-		`datafmt "datafmt";` +
-			`default = "%v";` +
-			`array = *;` +
-			`datafmt.T3 = s  {" " a a / ","};`
-
-	_ = `datafmt "datafmt";` +
-		`default = "%v";` +
-		`array = *;` +
-		`datafmt.T3 = s  {" " a a / ","};`
-}
-
-func _() {
-	// respect source lines in multi-line expressions
-	_ = a +
-		b +
-		c
-	_ = a < b ||
-		b < a
-	_ = "933262154439441526816992388562667004907159682643816214685929" +
-		"638952175999932299156089414639761565182862536979208272237582" +
-		"51185210916864000000000000000000000000"	// 100!
-	_ = "170141183460469231731687303715884105727"	// prime
-}
-
-// Alignment after overlong lines
-const (
-	_	= "991"
-	_	= "2432902008176640000"		// 20!
-	_	= "933262154439441526816992388562667004907159682643816214685929" +
-		"638952175999932299156089414639761565182862536979208272237582" +
-		"51185210916864000000000000000000000000"	// 100!
-	_	= "170141183460469231731687303715884105727"		// prime
-)
-
-// Correct placement of operators and comments in multi-line expressions
-func _() {
-	_ = a +	// comment
-		b +	// comment
-		c
-	_ = "a" +
-		"b" +	// comment
-		"c"
-	_ = "ba0408" + "7265717569726564"	// field 71, encoding 2, string "required"
-}
-
-// Correct placement of terminating comma/closing parentheses in multi-line calls.
-func _() {
-	f(1,
-		2,
-		3)
-	f(1,
-		2,
-		3,
-	)
-	f(1,
-		2,
-		3)	// comment
-	f(1,
-		2,
-		3,	// comment
-	)
-	f(1,
-		2,
-		3)	// comment
-	f(1,
-		2,
-		3,	// comment
-	)
-}
-
-// Align comments in multi-line lists of single-line expressions.
-var txpix = [NCOL]draw.Color{
-	draw.Yellow,	// yellow
-	draw.Cyan,	// cyan
-	draw.Green,	// lime green
-	draw.GreyBlue,	// slate
-	draw.Red,	/* red */
-	draw.GreyGreen,	/* olive green */
-	draw.Blue,	/* blue */
-	draw.Color(0xFF55AAFF),	/* pink */
-	draw.Color(0xFFAAFFFF),	/* lavender */
-	draw.Color(0xBB005DFF),	/* maroon */
-}
-
-func same(t, u *Time) bool {
-	// respect source lines in multi-line expressions
-	return t.Year == u.Year &&
-		t.Month == u.Month &&
-		t.Day == u.Day &&
-		t.Hour == u.Hour &&
-		t.Minute == u.Minute &&
-		t.Second == u.Second &&
-		t.Weekday == u.Weekday &&
-		t.ZoneOffset == u.ZoneOffset &&
-		t.Zone == u.Zone
-}
-
-func (p *parser) charClass() {
-	// respect source lines in multi-line expressions
-	if cc.negate && len(cc.ranges) == 2 &&
-		cc.ranges[0] == '\n' && cc.ranges[1] == '\n' {
-		nl := new(_NotNl)
-		p.re.add(nl)
-	}
-}
-
-func addState(s []state, inst instr, match []int) {
-	// handle comments correctly in multi-line expressions
-	for i := 0; i < l; i++ {
-		if s[i].inst.index() == index &&	// same instruction
-			s[i].match[0] < pos {	// earlier match already going; leftmost wins
-			return s
-		}
-	}
-}
-
-func (self *T) foo(x int) *T	{ return self }
-
-func _()	{ module.Func1().Func2() }
-
-func _() {
-	_ = new(T).
-		foo(1).
-		foo(2).
-		foo(3)
-
-	_ = new(T).
-		foo(1).
-		foo(2).	// inline comments
-		foo(3)
-
-	_ = new(T).foo(1).foo(2).foo(3)
-
-	// handle multiline argument list correctly
-	_ = new(T).
-		foo(
-			1).
-		foo(2)
-
-	_ = new(T).foo(
-		1).foo(2)
-
-	_ = Array[3+
-		4]
-
-	_ = Method(1, 2,
-		3)
-
-	_ = new(T).
-		foo().
-		bar().(*Type)
-
-	_ = new(T).
-		foo().
-		bar().(*Type).
-		baz()
-
-	_ = new(T).
-		foo().
-		bar()["idx"]
-
-	_ = new(T).
-		foo().
-		bar()["idx"].
-		baz()
-
-	_ = new(T).
-		foo().
-		bar()[1:2]
-
-	_ = new(T).
-		foo().
-		bar()[1:2].
-		baz()
-
-	_ = new(T).
-		Field.
-		Array[3+
-		4].
-		Table["foo"].
-		Blob.(*Type).
-		Slices[1:4].
-		Method(1, 2,
-			3).
-		Thingy
-
-	_ = a.b.c
-	_ = a.
-		b.
-		c
-	_ = a.b().c
-	_ = a.
-		b().
-		c
-	_ = a.b[0].c
-	_ = a.
-		b[0].
-		c
-	_ = a.b[0:].c
-	_ = a.
-		b[0:].
-		c
-	_ = a.b.(T).c
-	_ = a.
-		b.(T).
-		c
-}
-
-// Don't introduce extra newlines in strangely formatted expression lists.
-func f() {
-	// os.Open parameters should remain on two lines
-	if writer, err = os.Open(outfile, s.O_WRONLY|os.O_CREATE|
-		os.O_TRUNC, 0666); err != nil {
-		log.Fatal(err)
-	}
-}
-
-// Handle multi-line argument lists ending in ... correctly.
-// Was issue 3130.
-func _() {
-	_ = append(s, a...)
-	_ = append(
-		s, a...)
-	_ = append(s,
-		a...)
-	_ = append(
-		s,
-		a...)
-	_ = append(s, a...,
-	)
-	_ = append(s,
-		a...,
-	)
-	_ = append(
-		s,
-		a...,
-	)
-}
-
-// Literal function types in conversions must be parenthesized;
-// for now go/parser accepts the unparenthesized form where it
-// is non-ambiguous.
-func _() {
-	// these conversions should be rewritten to look
-	// the same as the parenthesized conversions below
-	_ = (func())(nil)
-	_ = (func(x int) float)(nil)
-	_ = (func() func() func())(nil)
-
-	_ = (func())(nil)
-	_ = (func(x int) float)(nil)
-	_ = (func() func() func())(nil)
-}
-
-func _() {
-	_ = f().
-		f(func() {
-			f()
-		}).
-		f(map[int]int{
-			1:	2,
-			3:	4,
-		})
-
-	_ = f().
-		f(
-			func() {
-				f()
-			},
-		)
-}
diff --git a/internal/backport/go/printer/testdata/generics.golden b/internal/backport/go/printer/testdata/generics.golden
deleted file mode 100644
index 7ddf20b..0000000
--- a/internal/backport/go/printer/testdata/generics.golden
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package generics
-
-func _[A, B any](a A, b B) int	{}
-func _[T any](x, y T) T
-
-type T[P any] struct{}
-type T[P1, P2, P3 any] struct{}
-
-type T[P C] struct{}
-type T[P1, P2, P3 C] struct{}
-
-type T[P C[P]] struct{}
-type T[P1, P2, P3 C[P1, P2, P3]] struct{}
-
-func f[P any](x P)
-func f[P1, P2, P3 any](x1 P1, x2 P2, x3 P3) struct{}
-
-func f[P interface{}](x P)
-func f[P1, P2, P3 interface {
-	m1(P1)
-	~P2 | ~P3
-}](x1 P1, x2 P2, x3 P3) struct{}
-func f[P any](T1[P], T2[P]) T3[P]
-
-func (x T[P]) m()
-func (T[P]) m(x T[P]) P
-
-func _() {
-	type _ []T[P]
-	var _ []T[P]
-	_ = []T[P]{}
-}
-
-// type constraint literals with elided interfaces
-func _[P ~int, Q int | string]()	{}
-func _[P struct{ f int }, Q *P]()	{}
-
-// various potentially ambiguous type parameter lists (issue #49482)
-type _[P *T,] struct{}
-type _[P T | T] struct{}
-type _[P T | T | T | T] struct{}
-type _[P *T, _ any] struct{}
-type _[P *T,] struct{}
-type _[P *T, _ any] struct{}
-type _[P T] struct{}
-type _[P T, _ any] struct{}
-
-type _[P *struct{}] struct{}
-type _ [P(*struct{})]struct{}
-type _[P []int] struct{}
-
-// a type literal in an |-expression indicates a type parameter list (blank after type parameter list and type)
-type _[P *[]int] struct{}
-type _[P *T | T, Q T] struct{}
-type _[P *[]T | T] struct{}
-type _[P *T | T | T | T | ~T] struct{}
-type _[P *T | T | T | ~T | T] struct{}
-type _[P *T | T | struct{} | T] struct{}
-type _[P <-chan int] struct{}
-type _[P *T | struct{} | T] struct{}
-
-// a trailing comma always indicates a (possibly invalid) type parameter list (blank after type parameter list and type)
-type _[P *T,] struct{}
-type _[P *T | T,] struct{}
-type _[P *T | <-T | T,] struct{}
-
-// slice/array type declarations (no blank between array length and element type)
-type _ []byte
-type _ [n]byte
-type _ [P(T)]byte
-type _ [P((T))]byte
-type _ [P * *T]byte
-type _ [P * T]byte
-type _ [P(*T)]byte
-type _ [P(**T)]byte
-type _ [P*T - T]byte
-type _ [P*T - T]byte
-type _ [P*T | T]byte
-type _ [P*T | <-T | T]byte
-
-// equivalent test cases for potentially ambiguous type parameter lists, except
-// for function declarations there is no ambiguity (issue #51548)
-func _[P *T]()		{}
-func _[P *T, _ any]()	{}
-func _[P *T]()		{}
-func _[P *T, _ any]()	{}
-func _[P T]()		{}
-func _[P T, _ any]()	{}
-
-func _[P *struct{}]()	{}
-func _[P *struct{}]()	{}
-func _[P []int]()	{}
-
-func _[P T]()	{}
-func _[P T]()	{}
-func _[P **T]()	{}
-func _[P *T]()	{}
-func _[P *T]()	{}
-func _[P **T]()	{}
-func _[P *T]()	{}
-
-func _[
-	P *T,
-]() {
-}
diff --git a/internal/backport/go/printer/testdata/generics.input b/internal/backport/go/printer/testdata/generics.input
deleted file mode 100644
index 4940f93..0000000
--- a/internal/backport/go/printer/testdata/generics.input
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package generics
-
-func _[A, B any](a A, b B) int {}
-func _[T any](x, y T) T
-
-type T[P any] struct{}
-type T[P1, P2, P3 any] struct{}
-
-type T[P C] struct{}
-type T[P1, P2, P3 C] struct{}
-
-type T[P C[P]] struct{}
-type T[P1, P2, P3 C[P1, P2, P3]] struct{}
-
-func f[P any](x P)
-func f[P1, P2, P3 any](x1 P1, x2 P2, x3 P3) struct{}
-
-func f[P interface{}](x P)
-func f[P1, P2, P3 interface{ m1(P1); ~P2|~P3 }](x1 P1, x2 P2, x3 P3) struct{}
-func f[P any](T1[P], T2[P]) T3[P]
-
-func (x T[P]) m()
-func ((T[P])) m(x T[P]) P
-
-func _() {
-	type _ []T[P]
-	var _ []T[P]
-	_ = []T[P]{}
-}
-
-// type constraint literals with elided interfaces
-func _[P ~int, Q int | string]() {}
-func _[P struct{f int}, Q *P]() {}
-
-// various potentially ambiguous type parameter lists (issue #49482)
-type _[P *T,] struct{}
-type _[P T | T] struct{}
-type _[P T | T | T | T] struct{}
-type _[P *T, _ any] struct{}
-type _[P (*T),] struct{}
-type _[P (*T), _ any] struct{}
-type _[P (T),] struct{}
-type _[P (T), _ any] struct{}
-
-type _[P *struct{}] struct{}
-type _[P (*struct{})] struct{}
-type _[P ([]int)] struct{}
-
-// a type literal in an |-expression indicates a type parameter list (blank after type parameter list and type)
-type _[P *[]int] struct{}
-type _[P *T | T, Q T] struct{}
-type _[P *[]T | T] struct{}
-type _[P *T | T | T | T | ~T] struct{}
-type _[P *T | T | T | ~T | T] struct{}
-type _[P *T | T | struct{} | T] struct{}
-type _[P <-chan int] struct{}
-type _[P *T | struct{} | T] struct{}
-
-// a trailing comma always indicates a (possibly invalid) type parameter list (blank after type parameter list and type)
-type _[P *T,] struct{}
-type _[P *T | T,] struct{}
-type _[P *T | <-T | T,] struct{}
-
-// slice/array type declarations (no blank between array length and element type)
-type _ []byte
-type _ [n]byte
-type _ [P(T)]byte
-type _ [P((T))]byte
-type _ [P * *T]byte
-type _ [P * T]byte
-type _ [P(*T)]byte
-type _ [P(**T)]byte
-type _ [P * T - T]byte
-type _ [P * T - T]byte
-type _ [P * T | T]byte
-type _ [P * T | <-T | T]byte
-
-// equivalent test cases for potentially ambiguous type parameter lists, except
-// for function declarations there is no ambiguity (issue #51548)
-func _[P *T,]() {}
-func _[P *T, _ any]() {}
-func _[P (*T),]() {}
-func _[P (*T), _ any]() {}
-func _[P (T),]() {}
-func _[P (T), _ any]() {}
-
-func _[P *struct{}] () {}
-func _[P (*struct{})] () {}
-func _[P ([]int)] () {}
-
-func _ [P(T)]() {}
-func _ [P((T))]() {}
-func _ [P * *T]() {}
-func _ [P * T]() {}
-func _ [P(*T)]() {}
-func _ [P(**T)]() {}
-func _ [P * T]() {}
-
-func _[
-	P *T,
-]() {}
diff --git a/internal/backport/go/printer/testdata/go2numbers.golden b/internal/backport/go/printer/testdata/go2numbers.golden
deleted file mode 100644
index 3c12049..0000000
--- a/internal/backport/go/printer/testdata/go2numbers.golden
+++ /dev/null
@@ -1,186 +0,0 @@
-package p
-
-const (
-	// 0-octals
-	_	= 0
-	_	= 0123
-	_	= 0123456
-
-	_	= 0_123
-	_	= 0123_456
-
-	// decimals
-	_	= 1
-	_	= 1234
-	_	= 1234567
-
-	_	= 1_234
-	_	= 1_234_567
-
-	// hexadecimals
-	_	= 0x0
-	_	= 0x1234
-	_	= 0xcafef00d
-
-	_	= 0X0
-	_	= 0X1234
-	_	= 0XCAFEf00d
-
-	_	= 0X_0
-	_	= 0X_1234
-	_	= 0X_CAFE_f00d
-
-	// octals
-	_	= 0o0
-	_	= 0o1234
-	_	= 0o01234567
-
-	_	= 0O0
-	_	= 0O1234
-	_	= 0O01234567
-
-	_	= 0o_0
-	_	= 0o_1234
-	_	= 0o0123_4567
-
-	_	= 0O_0
-	_	= 0O_1234
-	_	= 0O0123_4567
-
-	// binaries
-	_	= 0b0
-	_	= 0b1011
-	_	= 0b00101101
-
-	_	= 0B0
-	_	= 0B1011
-	_	= 0B00101101
-
-	_	= 0b_0
-	_	= 0b10_11
-	_	= 0b_0010_1101
-
-	// decimal floats
-	_	= 0.
-	_	= 123.
-	_	= 0123.
-
-	_	= .0
-	_	= .123
-	_	= .0123
-
-	_	= 0e0
-	_	= 123e+0
-	_	= 0123E-1
-
-	_	= 0e-0
-	_	= 123E+0
-	_	= 0123E123
-
-	_	= 0.e+1
-	_	= 123.E-10
-	_	= 0123.e123
-
-	_	= .0e-1
-	_	= .123E+10
-	_	= .0123E123
-
-	_	= 0.0
-	_	= 123.123
-	_	= 0123.0123
-
-	_	= 0.0e1
-	_	= 123.123E-10
-	_	= 0123.0123e+456
-
-	_	= 1_2_3.
-	_	= 0_123.
-
-	_	= 0_0e0
-	_	= 1_2_3e0
-	_	= 0_123e0
-
-	_	= 0e-0_0
-	_	= 1_2_3E+0
-	_	= 0123E1_2_3
-
-	_	= 0.e+1
-	_	= 123.E-1_0
-	_	= 01_23.e123
-
-	_	= .0e-1
-	_	= .123E+10
-	_	= .0123E123
-
-	_	= 1_2_3.123
-	_	= 0123.01_23
-
-	// hexadecimal floats
-	_	= 0x0.p+0
-	_	= 0Xdeadcafe.p-10
-	_	= 0x1234.P123
-
-	_	= 0x.1p-0
-	_	= 0X.deadcafep2
-	_	= 0x.1234P+10
-
-	_	= 0x0p0
-	_	= 0Xdeadcafep+1
-	_	= 0x1234P-10
-
-	_	= 0x0.0p0
-	_	= 0Xdead.cafep+1
-	_	= 0x12.34P-10
-
-	_	= 0Xdead_cafep+1
-	_	= 0x_1234P-10
-
-	_	= 0X_dead_cafe.p-10
-	_	= 0x12_34.P1_2_3
-	_	= 0X1_2_3_4.P-1_2_3
-
-	// imaginaries
-	_	= 0i
-	_	= 00i
-	_	= 08i
-	_	= 0000000000i
-	_	= 0123i
-	_	= 0000000123i
-	_	= 0000056789i
-	_	= 1234i
-	_	= 1234567i
-
-	_	= 0i
-	_	= 0_0i
-	_	= 0_8i
-	_	= 0_000_000_000i
-	_	= 0_123i
-	_	= 0_000_000_123i
-	_	= 0_000_056_789i
-	_	= 1_234i
-	_	= 1_234_567i
-
-	_	= 0.i
-	_	= 123.i
-	_	= 0123.i
-	_	= 000123.i
-
-	_	= 0e0i
-	_	= 123e0i
-	_	= 0123E0i
-	_	= 000123E0i
-
-	_	= 0.e+1i
-	_	= 123.E-1_0i
-	_	= 01_23.e123i
-	_	= 00_01_23.e123i
-
-	_	= 0b1010i
-	_	= 0B1010i
-	_	= 0o660i
-	_	= 0O660i
-	_	= 0xabcDEFi
-	_	= 0XabcDEFi
-	_	= 0xabcDEFP0i
-	_	= 0XabcDEFp0i
-)
diff --git a/internal/backport/go/printer/testdata/go2numbers.input b/internal/backport/go/printer/testdata/go2numbers.input
deleted file mode 100644
index f3e7828..0000000
--- a/internal/backport/go/printer/testdata/go2numbers.input
+++ /dev/null
@@ -1,186 +0,0 @@
-package p
-
-const (
-	// 0-octals
-	_ = 0
-	_ = 0123
-	_ = 0123456
-
-	_ = 0_123
-	_ = 0123_456
-
-	// decimals
-	_ = 1
-	_ = 1234
-	_ = 1234567
-
-	_ = 1_234
-	_ = 1_234_567
-
-	// hexadecimals
-	_ = 0x0
-	_ = 0x1234
-	_ = 0xcafef00d
-
-	_ = 0X0
-	_ = 0X1234
-	_ = 0XCAFEf00d
-
-	_ = 0X_0
-	_ = 0X_1234
-	_ = 0X_CAFE_f00d
-
-	// octals
-	_ = 0o0
-	_ = 0o1234
-	_ = 0o01234567
-
-	_ = 0O0
-	_ = 0O1234
-	_ = 0O01234567
-
-	_ = 0o_0
-	_ = 0o_1234
-	_ = 0o0123_4567
-
-	_ = 0O_0
-	_ = 0O_1234
-	_ = 0O0123_4567
-
-	// binaries
-	_ = 0b0
-	_ = 0b1011
-	_ = 0b00101101
-
-	_ = 0B0
-	_ = 0B1011
-	_ = 0B00101101
-
-	_ = 0b_0
-	_ = 0b10_11
-	_ = 0b_0010_1101
-
-	// decimal floats
-	_ = 0.
-	_ = 123.
-	_ = 0123.
-
-	_ = .0
-	_ = .123
-	_ = .0123
-
-	_ = 0e0
-	_ = 123e+0
-	_ = 0123E-1
-
-	_ = 0e-0
-	_ = 123E+0
-	_ = 0123E123
-
-	_ = 0.e+1
-	_ = 123.E-10
-	_ = 0123.e123
-
-	_ = .0e-1
-	_ = .123E+10
-	_ = .0123E123
-
-	_ = 0.0
-	_ = 123.123
-	_ = 0123.0123
-
-	_ = 0.0e1
-	_ = 123.123E-10
-	_ = 0123.0123e+456
-
-	_ = 1_2_3.
-	_ = 0_123.
-
-	_ = 0_0e0
-	_ = 1_2_3e0
-	_ = 0_123e0
-
-	_ = 0e-0_0
-	_ = 1_2_3E+0
-	_ = 0123E1_2_3
-
-	_ = 0.e+1
-	_ = 123.E-1_0
-	_ = 01_23.e123
-
-	_ = .0e-1
-	_ = .123E+10
-	_ = .0123E123
-
-	_ = 1_2_3.123
-	_ = 0123.01_23
-
-	// hexadecimal floats
-	_ = 0x0.p+0
-	_ = 0Xdeadcafe.p-10
-	_ = 0x1234.P123
-
-	_ = 0x.1p-0
-	_ = 0X.deadcafep2
-	_ = 0x.1234P+10
-
-	_ = 0x0p0
-	_ = 0Xdeadcafep+1
-	_ = 0x1234P-10
-
-	_ = 0x0.0p0
-	_ = 0Xdead.cafep+1
-	_ = 0x12.34P-10
-
-	_ = 0Xdead_cafep+1
-	_ = 0x_1234P-10
-
-	_ = 0X_dead_cafe.p-10
-	_ = 0x12_34.P1_2_3
-	_ = 0X1_2_3_4.P-1_2_3
-
-	// imaginaries
-	_ = 0i
-	_ = 00i
-	_ = 08i
-	_ = 0000000000i
-	_ = 0123i
-	_ = 0000000123i
-	_ = 0000056789i
-	_ = 1234i
-	_ = 1234567i
-
-	_ = 0i
-	_ = 0_0i
-	_ = 0_8i
-	_ = 0_000_000_000i
-	_ = 0_123i
-	_ = 0_000_000_123i
-	_ = 0_000_056_789i
-	_ = 1_234i
-	_ = 1_234_567i
-
-	_ = 0.i
-	_ = 123.i
-	_ = 0123.i
-	_ = 000123.i
-
-	_ = 0e0i
-	_ = 123e0i
-	_ = 0123E0i
-	_ = 000123E0i
-
-	_ = 0.e+1i
-	_ = 123.E-1_0i
-	_ = 01_23.e123i
-	_ = 00_01_23.e123i
-
-	_ = 0b1010i
-	_ = 0B1010i
-	_ = 0o660i
-	_ = 0O660i
-	_ = 0xabcDEFi
-	_ = 0XabcDEFi
-	_ = 0xabcDEFP0i
-	_ = 0XabcDEFp0i
-)
diff --git a/internal/backport/go/printer/testdata/go2numbers.norm b/internal/backport/go/printer/testdata/go2numbers.norm
deleted file mode 100644
index 855f0fc..0000000
--- a/internal/backport/go/printer/testdata/go2numbers.norm
+++ /dev/null
@@ -1,186 +0,0 @@
-package p
-
-const (
-	// 0-octals
-	_	= 0
-	_	= 0123
-	_	= 0123456
-
-	_	= 0_123
-	_	= 0123_456
-
-	// decimals
-	_	= 1
-	_	= 1234
-	_	= 1234567
-
-	_	= 1_234
-	_	= 1_234_567
-
-	// hexadecimals
-	_	= 0x0
-	_	= 0x1234
-	_	= 0xcafef00d
-
-	_	= 0x0
-	_	= 0x1234
-	_	= 0xCAFEf00d
-
-	_	= 0x_0
-	_	= 0x_1234
-	_	= 0x_CAFE_f00d
-
-	// octals
-	_	= 0o0
-	_	= 0o1234
-	_	= 0o01234567
-
-	_	= 0o0
-	_	= 0o1234
-	_	= 0o01234567
-
-	_	= 0o_0
-	_	= 0o_1234
-	_	= 0o0123_4567
-
-	_	= 0o_0
-	_	= 0o_1234
-	_	= 0o0123_4567
-
-	// binaries
-	_	= 0b0
-	_	= 0b1011
-	_	= 0b00101101
-
-	_	= 0b0
-	_	= 0b1011
-	_	= 0b00101101
-
-	_	= 0b_0
-	_	= 0b10_11
-	_	= 0b_0010_1101
-
-	// decimal floats
-	_	= 0.
-	_	= 123.
-	_	= 0123.
-
-	_	= .0
-	_	= .123
-	_	= .0123
-
-	_	= 0e0
-	_	= 123e+0
-	_	= 0123e-1
-
-	_	= 0e-0
-	_	= 123e+0
-	_	= 0123e123
-
-	_	= 0.e+1
-	_	= 123.e-10
-	_	= 0123.e123
-
-	_	= .0e-1
-	_	= .123e+10
-	_	= .0123e123
-
-	_	= 0.0
-	_	= 123.123
-	_	= 0123.0123
-
-	_	= 0.0e1
-	_	= 123.123e-10
-	_	= 0123.0123e+456
-
-	_	= 1_2_3.
-	_	= 0_123.
-
-	_	= 0_0e0
-	_	= 1_2_3e0
-	_	= 0_123e0
-
-	_	= 0e-0_0
-	_	= 1_2_3e+0
-	_	= 0123e1_2_3
-
-	_	= 0.e+1
-	_	= 123.e-1_0
-	_	= 01_23.e123
-
-	_	= .0e-1
-	_	= .123e+10
-	_	= .0123e123
-
-	_	= 1_2_3.123
-	_	= 0123.01_23
-
-	// hexadecimal floats
-	_	= 0x0.p+0
-	_	= 0xdeadcafe.p-10
-	_	= 0x1234.p123
-
-	_	= 0x.1p-0
-	_	= 0x.deadcafep2
-	_	= 0x.1234p+10
-
-	_	= 0x0p0
-	_	= 0xdeadcafep+1
-	_	= 0x1234p-10
-
-	_	= 0x0.0p0
-	_	= 0xdead.cafep+1
-	_	= 0x12.34p-10
-
-	_	= 0xdead_cafep+1
-	_	= 0x_1234p-10
-
-	_	= 0x_dead_cafe.p-10
-	_	= 0x12_34.p1_2_3
-	_	= 0x1_2_3_4.p-1_2_3
-
-	// imaginaries
-	_	= 0i
-	_	= 0i
-	_	= 8i
-	_	= 0i
-	_	= 123i
-	_	= 123i
-	_	= 56789i
-	_	= 1234i
-	_	= 1234567i
-
-	_	= 0i
-	_	= 0i
-	_	= 8i
-	_	= 0i
-	_	= 123i
-	_	= 123i
-	_	= 56_789i
-	_	= 1_234i
-	_	= 1_234_567i
-
-	_	= 0.i
-	_	= 123.i
-	_	= 0123.i
-	_	= 000123.i
-
-	_	= 0e0i
-	_	= 123e0i
-	_	= 0123e0i
-	_	= 000123e0i
-
-	_	= 0.e+1i
-	_	= 123.e-1_0i
-	_	= 01_23.e123i
-	_	= 00_01_23.e123i
-
-	_	= 0b1010i
-	_	= 0b1010i
-	_	= 0o660i
-	_	= 0o660i
-	_	= 0xabcDEFi
-	_	= 0xabcDEFi
-	_	= 0xabcDEFp0i
-	_	= 0xabcDEFp0i
-)
diff --git a/internal/backport/go/printer/testdata/gobuild1.golden b/internal/backport/go/printer/testdata/gobuild1.golden
deleted file mode 100644
index 649da40..0000000
--- a/internal/backport/go/printer/testdata/gobuild1.golden
+++ /dev/null
@@ -1,6 +0,0 @@
-//go:build x
-// +build x
-
-package p
-
-func f()
diff --git a/internal/backport/go/printer/testdata/gobuild1.input b/internal/backport/go/printer/testdata/gobuild1.input
deleted file mode 100644
index 6538ee6..0000000
--- a/internal/backport/go/printer/testdata/gobuild1.input
+++ /dev/null
@@ -1,7 +0,0 @@
-package p
-
-//go:build x
-
-func f()
-
-// +build y
diff --git a/internal/backport/go/printer/testdata/gobuild2.golden b/internal/backport/go/printer/testdata/gobuild2.golden
deleted file mode 100644
index c46fd34..0000000
--- a/internal/backport/go/printer/testdata/gobuild2.golden
+++ /dev/null
@@ -1,8 +0,0 @@
-//go:build x
-// +build x
-
-// other comment
-
-package p
-
-func f()
diff --git a/internal/backport/go/printer/testdata/gobuild2.input b/internal/backport/go/printer/testdata/gobuild2.input
deleted file mode 100644
index f0f772a..0000000
--- a/internal/backport/go/printer/testdata/gobuild2.input
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build y
-
-// other comment
-
-package p
-
-func f()
-
-//go:build x
diff --git a/internal/backport/go/printer/testdata/gobuild3.golden b/internal/backport/go/printer/testdata/gobuild3.golden
deleted file mode 100644
index db92c57..0000000
--- a/internal/backport/go/printer/testdata/gobuild3.golden
+++ /dev/null
@@ -1,10 +0,0 @@
-// other comment
-
-//go:build x
-// +build x
-
-// yet another comment
-
-package p
-
-func f()
diff --git a/internal/backport/go/printer/testdata/gobuild3.input b/internal/backport/go/printer/testdata/gobuild3.input
deleted file mode 100644
index d0c97b2..0000000
--- a/internal/backport/go/printer/testdata/gobuild3.input
+++ /dev/null
@@ -1,11 +0,0 @@
-// other comment
-
-// +build y
-
-// yet another comment
-
-package p
-
-//go:build x
-
-func f()
diff --git a/internal/backport/go/printer/testdata/gobuild4.golden b/internal/backport/go/printer/testdata/gobuild4.golden
deleted file mode 100644
index b16477f..0000000
--- a/internal/backport/go/printer/testdata/gobuild4.golden
+++ /dev/null
@@ -1,6 +0,0 @@
-//go:build (x || y) && z
-// +build x y
-// +build z
-
-// doc comment
-package p
diff --git a/internal/backport/go/printer/testdata/gobuild4.input b/internal/backport/go/printer/testdata/gobuild4.input
deleted file mode 100644
index 29d5a0a..0000000
--- a/internal/backport/go/printer/testdata/gobuild4.input
+++ /dev/null
@@ -1,5 +0,0 @@
-// doc comment
-package p
-
-// +build x y
-// +build z
diff --git a/internal/backport/go/printer/testdata/gobuild5.golden b/internal/backport/go/printer/testdata/gobuild5.golden
deleted file mode 100644
index 2808a53..0000000
--- a/internal/backport/go/printer/testdata/gobuild5.golden
+++ /dev/null
@@ -1,4 +0,0 @@
-//go:build !(x || y) && z
-// +build !x,!y,z
-
-package p
diff --git a/internal/backport/go/printer/testdata/gobuild5.input b/internal/backport/go/printer/testdata/gobuild5.input
deleted file mode 100644
index ec5815c..0000000
--- a/internal/backport/go/printer/testdata/gobuild5.input
+++ /dev/null
@@ -1,4 +0,0 @@
-//go:build !(x || y) && z
-// +build something else
-
-package p
diff --git a/internal/backport/go/printer/testdata/gobuild6.golden b/internal/backport/go/printer/testdata/gobuild6.golden
deleted file mode 100644
index abb1e2a..0000000
--- a/internal/backport/go/printer/testdata/gobuild6.golden
+++ /dev/null
@@ -1,5 +0,0 @@
-//go:build !(x || y) && z
-
-// no +build line
-
-package p
diff --git a/internal/backport/go/printer/testdata/gobuild6.input b/internal/backport/go/printer/testdata/gobuild6.input
deleted file mode 100644
index 1621897..0000000
--- a/internal/backport/go/printer/testdata/gobuild6.input
+++ /dev/null
@@ -1,4 +0,0 @@
-//go:build !(x || y) && z
-// no +build line
-
-package p
diff --git a/internal/backport/go/printer/testdata/gobuild7.golden b/internal/backport/go/printer/testdata/gobuild7.golden
deleted file mode 100644
index bf41dd4..0000000
--- a/internal/backport/go/printer/testdata/gobuild7.golden
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// TODO(rsc): Delete this file once Go 1.17 comes out and we can retire Go 1.15 support.
-
-//go:build !go1.16
-// +build !go1.16
-
-// Package buildtag defines an Analyzer that checks build tags.
-package buildtag
diff --git a/internal/backport/go/printer/testdata/gobuild7.input b/internal/backport/go/printer/testdata/gobuild7.input
deleted file mode 100644
index bf41dd4..0000000
--- a/internal/backport/go/printer/testdata/gobuild7.input
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// TODO(rsc): Delete this file once Go 1.17 comes out and we can retire Go 1.15 support.
-
-//go:build !go1.16
-// +build !go1.16
-
-// Package buildtag defines an Analyzer that checks build tags.
-package buildtag
diff --git a/internal/backport/go/printer/testdata/linebreaks.golden b/internal/backport/go/printer/testdata/linebreaks.golden
deleted file mode 100644
index 17d2b5c..0000000
--- a/internal/backport/go/printer/testdata/linebreaks.golden
+++ /dev/null
@@ -1,295 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package linebreaks
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"os"
-	"reflect"
-	"strings"
-	"testing"
-)
-
-type writerTestEntry struct {
-	header		*Header
-	contents	string
-}
-
-type writerTest struct {
-	file	string	// filename of expected output
-	entries	[]*writerTestEntry
-}
-
-var writerTests = []*writerTest{
-	&writerTest{
-		file:	"testdata/writer.tar",
-		entries: []*writerTestEntry{
-			&writerTestEntry{
-				header: &Header{
-					Name:		"small.txt",
-					Mode:		0640,
-					Uid:		73025,
-					Gid:		5000,
-					Size:		5,
-					Mtime:		1246508266,
-					Typeflag:	'0',
-					Uname:		"dsymonds",
-					Gname:		"eng",
-				},
-				contents:	"Kilts",
-			},
-			&writerTestEntry{
-				header: &Header{
-					Name:		"small2.txt",
-					Mode:		0640,
-					Uid:		73025,
-					Gid:		5000,
-					Size:		11,
-					Mtime:		1245217492,
-					Typeflag:	'0',
-					Uname:		"dsymonds",
-					Gname:		"eng",
-				},
-				contents:	"Google.com\n",
-			},
-		},
-	},
-	// The truncated test file was produced using these commands:
-	//   dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
-	//   tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
-	&writerTest{
-		file:	"testdata/writer-big.tar",
-		entries: []*writerTestEntry{
-			&writerTestEntry{
-				header: &Header{
-					Name:		"tmp/16gig.txt",
-					Mode:		0640,
-					Uid:		73025,
-					Gid:		5000,
-					Size:		16 << 30,
-					Mtime:		1254699560,
-					Typeflag:	'0',
-					Uname:		"dsymonds",
-					Gname:		"eng",
-				},
-				// no contents
-			},
-		},
-	},
-}
-
-type untarTest struct {
-	file	string
-	headers	[]*Header
-}
-
-var untarTests = []*untarTest{
-	&untarTest{
-		file:	"testdata/gnu.tar",
-		headers: []*Header{
-			&Header{
-				Name:		"small.txt",
-				Mode:		0640,
-				Uid:		73025,
-				Gid:		5000,
-				Size:		5,
-				Mtime:		1244428340,
-				Typeflag:	'0',
-				Uname:		"dsymonds",
-				Gname:		"eng",
-			},
-			&Header{
-				Name:		"small2.txt",
-				Mode:		0640,
-				Uid:		73025,
-				Gid:		5000,
-				Size:		11,
-				Mtime:		1244436044,
-				Typeflag:	'0',
-				Uname:		"dsymonds",
-				Gname:		"eng",
-			},
-		},
-	},
-	&untarTest{
-		file:	"testdata/star.tar",
-		headers: []*Header{
-			&Header{
-				Name:		"small.txt",
-				Mode:		0640,
-				Uid:		73025,
-				Gid:		5000,
-				Size:		5,
-				Mtime:		1244592783,
-				Typeflag:	'0',
-				Uname:		"dsymonds",
-				Gname:		"eng",
-				Atime:		1244592783,
-				Ctime:		1244592783,
-			},
-			&Header{
-				Name:		"small2.txt",
-				Mode:		0640,
-				Uid:		73025,
-				Gid:		5000,
-				Size:		11,
-				Mtime:		1244592783,
-				Typeflag:	'0',
-				Uname:		"dsymonds",
-				Gname:		"eng",
-				Atime:		1244592783,
-				Ctime:		1244592783,
-			},
-		},
-	},
-	&untarTest{
-		file:	"testdata/v7.tar",
-		headers: []*Header{
-			&Header{
-				Name:		"small.txt",
-				Mode:		0444,
-				Uid:		73025,
-				Gid:		5000,
-				Size:		5,
-				Mtime:		1244593104,
-				Typeflag:	'\x00',
-			},
-			&Header{
-				Name:		"small2.txt",
-				Mode:		0444,
-				Uid:		73025,
-				Gid:		5000,
-				Size:		11,
-				Mtime:		1244593104,
-				Typeflag:	'\x00',
-			},
-		},
-	},
-}
-
-var facts = map[int]string{
-	0:	"1",
-	1:	"1",
-	2:	"2",
-	10:	"3628800",
-	20:	"2432902008176640000",
-	100: "933262154439441526816992388562667004907159682643816214685929" +
-		"638952175999932299156089414639761565182862536979208272237582" +
-		"51185210916864000000000000000000000000",
-}
-
-func usage() {
-	fmt.Fprintf(os.Stderr,
-		// TODO(gri): the 2nd string of this string list should not be indented
-		"usage: godoc package [name ...]\n"+
-			"	godoc -http=:6060\n")
-	flag.PrintDefaults()
-	os.Exit(2)
-}
-
-func TestReader(t *testing.T) {
-testLoop:
-	for i, test := range untarTests {
-		f, err := os.Open(test.file, os.O_RDONLY, 0444)
-		if err != nil {
-			t.Errorf("test %d: Unexpected error: %v", i, err)
-			continue
-		}
-		tr := NewReader(f)
-		for j, header := range test.headers {
-			hdr, err := tr.Next()
-			if err != nil || hdr == nil {
-				t.Errorf("test %d, entry %d: Didn't get entry: %v", i, j, err)
-				f.Close()
-				continue testLoop
-			}
-			if !reflect.DeepEqual(hdr, header) {
-				t.Errorf("test %d, entry %d: Incorrect header:\nhave %+v\nwant %+v",
-					i, j, *hdr, *header)
-			}
-		}
-		hdr, err := tr.Next()
-		if hdr != nil || err != nil {
-			t.Errorf("test %d: Unexpected entry or error: hdr=%v err=%v", i, err)
-		}
-		f.Close()
-	}
-}
-
-// Respect line breaks in function calls.
-func _() {
-	f(x)
-	f(x,
-		x)
-	f(x,
-		x,
-	)
-	f(
-		x,
-		x)
-	f(
-		x,
-		x,
-	)
-}
-
-// Respect line breaks in function declarations.
-func _(x T)	{}
-func _(x T,
-	y T) {
-}
-func _(x T,
-	y T,
-) {
-}
-func _(
-	x T,
-	y T) {
-}
-func _(
-	x T,
-	y T,
-) {
-}
-
-// Example from issue #2597.
-func ManageStatus0(
-	in <-chan *Status,
-	req <-chan Request,
-	stat chan<- *TargetInfo,
-	TargetHistorySize int) {
-}
-
-func ManageStatus1(
-	in <-chan *Status,
-	req <-chan Request,
-	stat chan<- *TargetInfo,
-	TargetHistorySize int,
-) {
-}
-
-// Example from issue #9064.
-func (y *y) xerrors() error {
-	_ = "xerror.test"	//TODO-
-	_ = []byte(`
-foo bar foo bar foo bar
-`) //TODO-
-}
-
-func _() {
-	_ = "abc"		// foo
-	_ = `abc_0123456789_`	// foo
-}
-
-func _() {
-	_ = "abc"	// foo
-	_ = `abc
-0123456789
-` // foo
-}
-
-// There should be exactly one linebreak after this comment.
diff --git a/internal/backport/go/printer/testdata/linebreaks.input b/internal/backport/go/printer/testdata/linebreaks.input
deleted file mode 100644
index 9e714f3..0000000
--- a/internal/backport/go/printer/testdata/linebreaks.input
+++ /dev/null
@@ -1,291 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package linebreaks
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"os"
-	"reflect"
-	"strings"
-	"testing"
-)
-
-type writerTestEntry struct {
-	header *Header
-	contents string
-}
-
-type writerTest struct {
-	file string  // filename of expected output
-	entries []*writerTestEntry
-}
-
-var writerTests = []*writerTest{
-	&writerTest{
-		file: "testdata/writer.tar",
-		entries: []*writerTestEntry{
-			&writerTestEntry{
-				header: &Header{
-					Name: "small.txt",
-					Mode: 0640,
-					Uid: 73025,
-					Gid: 5000,
-					Size: 5,
-					Mtime: 1246508266,
-					Typeflag: '0',
-					Uname: "dsymonds",
-					Gname: "eng",
-				},
-				contents: "Kilts",
-			},
-			&writerTestEntry{
-				header: &Header{
-					Name: "small2.txt",
-					Mode: 0640,
-					Uid: 73025,
-					Gid: 5000,
-					Size: 11,
-					Mtime: 1245217492,
-					Typeflag: '0',
-					Uname: "dsymonds",
-					Gname: "eng",
-				},
-				contents: "Google.com\n",
-			},
-		},
-	},
-	// The truncated test file was produced using these commands:
-	//   dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
-	//   tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
-	&writerTest{
-		file: "testdata/writer-big.tar",
-		entries: []*writerTestEntry{
-			&writerTestEntry{
-				header: &Header{
-					Name: "tmp/16gig.txt",
-					Mode: 0640,
-					Uid: 73025,
-					Gid: 5000,
-					Size: 16 << 30,
-					Mtime: 1254699560,
-					Typeflag: '0',
-					Uname: "dsymonds",
-					Gname: "eng",
-				},
-				// no contents
-			},
-		},
-	},
-}
-
-type untarTest struct {
-	file string
-	headers []*Header
-}
-
-var untarTests = []*untarTest{
-	&untarTest{
-		file: "testdata/gnu.tar",
-		headers: []*Header{
-			&Header{
-				Name: "small.txt",
-				Mode: 0640,
-				Uid: 73025,
-				Gid: 5000,
-				Size: 5,
-				Mtime: 1244428340,
-				Typeflag: '0',
-				Uname: "dsymonds",
-				Gname: "eng",
-			},
-			&Header{
-				Name: "small2.txt",
-				Mode: 0640,
-				Uid: 73025,
-				Gid: 5000,
-				Size: 11,
-				Mtime: 1244436044,
-				Typeflag: '0',
-				Uname: "dsymonds",
-				Gname: "eng",
-			},
-		},
-	},
-	&untarTest{
-		file: "testdata/star.tar",
-		headers: []*Header{
-			&Header{
-				Name: "small.txt",
-				Mode: 0640,
-				Uid: 73025,
-				Gid: 5000,
-				Size: 5,
-				Mtime: 1244592783,
-				Typeflag: '0',
-				Uname: "dsymonds",
-				Gname: "eng",
-				Atime: 1244592783,
-				Ctime: 1244592783,
-			},
-			&Header{
-				Name: "small2.txt",
-				Mode: 0640,
-				Uid: 73025,
-				Gid: 5000,
-				Size: 11,
-				Mtime: 1244592783,
-				Typeflag: '0',
-				Uname: "dsymonds",
-				Gname: "eng",
-				Atime: 1244592783,
-				Ctime: 1244592783,
-			},
-		},
-	},
-	&untarTest{
-		file: "testdata/v7.tar",
-		headers: []*Header{
-			&Header{
-				Name: "small.txt",
-				Mode: 0444,
-				Uid: 73025,
-				Gid: 5000,
-				Size: 5,
-				Mtime: 1244593104,
-				Typeflag: '\x00',
-			},
-			&Header{
-				Name: "small2.txt",
-				Mode: 0444,
-				Uid: 73025,
-				Gid: 5000,
-				Size: 11,
-				Mtime: 1244593104,
-				Typeflag: '\x00',
-			},
-		},
-	},
-}
-
-var facts = map[int] string {
-	0: "1",
-	1: "1",
-	2: "2",
-	10: "3628800",
-	20: "2432902008176640000",
-	100: "933262154439441526816992388562667004907159682643816214685929" +
-		"638952175999932299156089414639761565182862536979208272237582" +
-		"51185210916864000000000000000000000000",
-}
-
-func usage() {
-	fmt.Fprintf(os.Stderr,
-		// TODO(gri): the 2nd string of this string list should not be indented
-		"usage: godoc package [name ...]\n" +
-		"	godoc -http=:6060\n")
-	flag.PrintDefaults()
-	os.Exit(2)
-}
-
-func TestReader(t *testing.T) {
-testLoop:
-	for i, test := range untarTests {
-		f, err := os.Open(test.file, os.O_RDONLY, 0444)
-		if err != nil {
-			t.Errorf("test %d: Unexpected error: %v", i, err)
-			continue
-		}
-		tr := NewReader(f)
-		for j, header := range test.headers {
-			hdr, err := tr.Next()
-			if err != nil || hdr == nil {
-				t.Errorf("test %d, entry %d: Didn't get entry: %v", i, j, err)
-				f.Close()
-				continue testLoop
-			}
-			if !reflect.DeepEqual(hdr, header) {
-				t.Errorf("test %d, entry %d: Incorrect header:\nhave %+v\nwant %+v",
-					 i, j, *hdr, *header)
-			}
-		}
-		hdr, err := tr.Next()
-		if hdr != nil || err != nil {
-			t.Errorf("test %d: Unexpected entry or error: hdr=%v err=%v", i, err)
-		}
-		f.Close()
-	}
-}
-
-// Respect line breaks in function calls.
-func _() {
-	f(x)
-	f(x,
-	  x)
-	f(x,
-	  x,
-	)
-	f(
-	  x,
-	  x)
-	f(
-	  x,
-	  x,
-	)
-}
-
-// Respect line breaks in function declarations.
-func _(x T) {}
-func _(x T,
-       y T) {}
-func _(x T,
-       y T,
-) {}
-func _(
-       x T,
-       y T) {}
-func _(
-       x T,
-       y T,
-) {}
-
-// Example from issue #2597.
-func ManageStatus0(
-	in <-chan *Status,
-	req <-chan Request,
-	stat chan<- *TargetInfo,
-	TargetHistorySize int) {
-}
-    
-func ManageStatus1(
-	in <-chan *Status,
-	req <-chan Request,
-	stat chan<- *TargetInfo,
-	TargetHistorySize int,
-) {
-}
-
-// Example from issue #9064.
-func (y *y) xerrors() error {
-	_ = "xerror.test" //TODO-
-	_ = []byte(`
-foo bar foo bar foo bar
-`) //TODO-
-}
-
-func _() {
-	_ = "abc" // foo
-	_ = `abc_0123456789_` // foo
-}
-
-func _() {
-	_ = "abc" // foo
-	_ = `abc
-0123456789
-` // foo
-}
-
-// There should be exactly one linebreak after this comment.
diff --git a/internal/backport/go/printer/testdata/parser.go b/internal/backport/go/printer/testdata/parser.go
deleted file mode 100644
index 8479cb6..0000000
--- a/internal/backport/go/printer/testdata/parser.go
+++ /dev/null
@@ -1,2148 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package parser implements a parser for Go source files. Input may be
-// provided in a variety of forms (see the various Parse* functions); the
-// output is an abstract syntax tree (AST) representing the Go source. The
-// parser is invoked through one of the Parse* functions.
-
-package parser
-
-import (
-	"fmt"
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/scanner"
-	"golang.org/x/website/internal/backport/go/token"
-)
-
-// The mode parameter to the Parse* functions is a set of flags (or 0).
-// They control the amount of source code parsed and other optional
-// parser functionality.
-const (
-	PackageClauseOnly uint = 1 << iota // parsing stops after package clause
-	ImportsOnly                        // parsing stops after import declarations
-	ParseComments                      // parse comments and add them to AST
-	Trace                              // print a trace of parsed productions
-	DeclarationErrors                  // report declaration errors
-)
-
-// The parser structure holds the parser's internal state.
-type parser struct {
-	file *token.File
-	scanner.ErrorVector
-	scanner scanner.Scanner
-
-	// Tracing/debugging
-	mode   uint // parsing mode
-	trace  bool // == (mode & Trace != 0)
-	indent uint // indentation used for tracing output
-
-	// Comments
-	comments    []*ast.CommentGroup
-	leadComment *ast.CommentGroup // last lead comment
-	lineComment *ast.CommentGroup // last line comment
-
-	// Next token
-	pos token.Pos   // token position
-	tok token.Token // one token look-ahead
-	lit string      // token literal
-
-	// Non-syntactic parser control
-	exprLev int // < 0: in control clause, >= 0: in expression
-
-	// Ordinary identifier scopes
-	pkgScope   *ast.Scope        // pkgScope.Outer == nil
-	topScope   *ast.Scope        // top-most scope; may be pkgScope
-	unresolved []*ast.Ident      // unresolved identifiers
-	imports    []*ast.ImportSpec // list of imports
-
-	// Label scope
-	// (maintained by open/close LabelScope)
-	labelScope  *ast.Scope     // label scope for current function
-	targetStack [][]*ast.Ident // stack of unresolved labels
-}
-
-// scannerMode returns the scanner mode bits given the parser's mode bits.
-func scannerMode(mode uint) uint {
-	var m uint = scanner.InsertSemis
-	if mode&ParseComments != 0 {
-		m |= scanner.ScanComments
-	}
-	return m
-}
-
-func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode uint) {
-	p.file = fset.AddFile(filename, fset.Base(), len(src))
-	p.scanner.Init(p.file, src, p, scannerMode(mode))
-
-	p.mode = mode
-	p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently)
-
-	p.next()
-
-	// set up the pkgScope here (as opposed to in parseFile) because
-	// there are other parser entry points (ParseExpr, etc.)
-	p.openScope()
-	p.pkgScope = p.topScope
-
-	// for the same reason, set up a label scope
-	p.openLabelScope()
-}
-
-// ----------------------------------------------------------------------------
-// Scoping support
-
-func (p *parser) openScope() {
-	p.topScope = ast.NewScope(p.topScope)
-}
-
-func (p *parser) closeScope() {
-	p.topScope = p.topScope.Outer
-}
-
-func (p *parser) openLabelScope() {
-	p.labelScope = ast.NewScope(p.labelScope)
-	p.targetStack = append(p.targetStack, nil)
-}
-
-func (p *parser) closeLabelScope() {
-	// resolve labels
-	n := len(p.targetStack) - 1
-	scope := p.labelScope
-	for _, ident := range p.targetStack[n] {
-		ident.Obj = scope.Lookup(ident.Name)
-		if ident.Obj == nil && p.mode&DeclarationErrors != 0 {
-			p.error(ident.Pos(), fmt.Sprintf("label %s undefined", ident.Name))
-		}
-	}
-	// pop label scope
-	p.targetStack = p.targetStack[0:n]
-	p.labelScope = p.labelScope.Outer
-}
-
-func (p *parser) declare(decl interface{}, scope *ast.Scope, kind ast.ObjKind, idents ...*ast.Ident) {
-	for _, ident := range idents {
-		assert(ident.Obj == nil, "identifier already declared or resolved")
-		if ident.Name != "_" {
-			obj := ast.NewObj(kind, ident.Name)
-			// remember the corresponding declaration for redeclaration
-			// errors and global variable resolution/typechecking phase
-			obj.Decl = decl
-			if alt := scope.Insert(obj); alt != nil && p.mode&DeclarationErrors != 0 {
-				prevDecl := ""
-				if pos := alt.Pos(); pos.IsValid() {
-					prevDecl = fmt.Sprintf("\n\tprevious declaration at %s", p.file.Position(pos))
-				}
-				p.error(ident.Pos(), fmt.Sprintf("%s redeclared in this block%s", ident.Name, prevDecl))
-			}
-			ident.Obj = obj
-		}
-	}
-}
-
-func (p *parser) shortVarDecl(idents []*ast.Ident) {
-	// Go spec: A short variable declaration may redeclare variables
-	// provided they were originally declared in the same block with
-	// the same type, and at least one of the non-blank variables is new.
-	n := 0 // number of new variables
-	for _, ident := range idents {
-		assert(ident.Obj == nil, "identifier already declared or resolved")
-		if ident.Name != "_" {
-			obj := ast.NewObj(ast.Var, ident.Name)
-			// short var declarations cannot have redeclaration errors
-			// and are not global => no need to remember the respective
-			// declaration
-			alt := p.topScope.Insert(obj)
-			if alt == nil {
-				n++ // new declaration
-				alt = obj
-			}
-			ident.Obj = alt
-		}
-	}
-	if n == 0 && p.mode&DeclarationErrors != 0 {
-		p.error(idents[0].Pos(), "no new variables on left side of :=")
-	}
-}
-
-// The unresolved object is a sentinel to mark identifiers that have been added
-// to the list of unresolved identifiers. The sentinel is only used for verifying
-// internal consistency.
-var unresolved = new(ast.Object)
-
-func (p *parser) resolve(x ast.Expr) {
-	// nothing to do if x is not an identifier or the blank identifier
-	ident, _ := x.(*ast.Ident)
-	if ident == nil {
-		return
-	}
-	assert(ident.Obj == nil, "identifier already declared or resolved")
-	if ident.Name == "_" {
-		return
-	}
-	// try to resolve the identifier
-	for s := p.topScope; s != nil; s = s.Outer {
-		if obj := s.Lookup(ident.Name); obj != nil {
-			ident.Obj = obj
-			return
-		}
-	}
-	// all local scopes are known, so any unresolved identifier
-	// must be found either in the file scope, package scope
-	// (perhaps in another file), or universe scope --- collect
-	// them so that they can be resolved later
-	ident.Obj = unresolved
-	p.unresolved = append(p.unresolved, ident)
-}
-
-// ----------------------------------------------------------------------------
-// Parsing support
-
-func (p *parser) printTrace(a ...interface{}) {
-	const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " +
-		". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
-	const n = uint(len(dots))
-	pos := p.file.Position(p.pos)
-	fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
-	i := 2 * p.indent
-	for ; i > n; i -= n {
-		fmt.Print(dots)
-	}
-	fmt.Print(dots[0:i])
-	fmt.Println(a...)
-}
-
-func trace(p *parser, msg string) *parser {
-	p.printTrace(msg, "(")
-	p.indent++
-	return p
-}
-
-// Usage pattern: defer un(trace(p, "..."));
-func un(p *parser) {
-	p.indent--
-	p.printTrace(")")
-}
-
-// Advance to the next token.
-func (p *parser) next0() {
-	// Because of one-token look-ahead, print the previous token
-	// when tracing as it provides a more readable output. The
-	// very first token (!p.pos.IsValid()) is not initialized
-	// (it is token.ILLEGAL), so don't print it.
-	if p.trace && p.pos.IsValid() {
-		s := p.tok.String()
-		switch {
-		case p.tok.IsLiteral():
-			p.printTrace(s, p.lit)
-		case p.tok.IsOperator(), p.tok.IsKeyword():
-			p.printTrace("\"" + s + "\"")
-		default:
-			p.printTrace(s)
-		}
-	}
-
-	p.pos, p.tok, p.lit = p.scanner.Scan()
-}
-
-// Consume a comment and return it and the line on which it ends.
-func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
-	// /*-style comments may end on a different line than where they start.
-	// Scan the comment for '\n' chars and adjust endline accordingly.
-	endline = p.file.Line(p.pos)
-	if p.lit[1] == '*' {
-		// don't use range here - no need to decode Unicode code points
-		for i := 0; i < len(p.lit); i++ {
-			if p.lit[i] == '\n' {
-				endline++
-			}
-		}
-	}
-
-	comment = &ast.Comment{p.pos, p.lit}
-	p.next0()
-
-	return
-}
-
-// Consume a group of adjacent comments, add it to the parser's
-// comments list, and return it together with the line at which
-// the last comment in the group ends. An empty line or non-comment
-// token terminates a comment group.
-func (p *parser) consumeCommentGroup() (comments *ast.CommentGroup, endline int) {
-	var list []*ast.Comment
-	endline = p.file.Line(p.pos)
-	for p.tok == token.COMMENT && endline+1 >= p.file.Line(p.pos) {
-		var comment *ast.Comment
-		comment, endline = p.consumeComment()
-		list = append(list, comment)
-	}
-
-	// add comment group to the comments list
-	comments = &ast.CommentGroup{list}
-	p.comments = append(p.comments, comments)
-
-	return
-}
-
-// Advance to the next non-comment token. In the process, collect
-// any comment groups encountered, and remember the last lead and
-// line comments.
-//
-// A lead comment is a comment group that starts and ends in a
-// line without any other tokens and that is followed by a non-comment
-// token on the line immediately after the comment group.
-//
-// A line comment is a comment group that follows a non-comment
-// token on the same line, and that has no tokens after it on the line
-// where it ends.
-//
-// Lead and line comments may be considered documentation that is
-// stored in the AST.
-func (p *parser) next() {
-	p.leadComment = nil
-	p.lineComment = nil
-	line := p.file.Line(p.pos) // current line
-	p.next0()
-
-	if p.tok == token.COMMENT {
-		var comment *ast.CommentGroup
-		var endline int
-
-		if p.file.Line(p.pos) == line {
-			// The comment is on same line as the previous token; it
-			// cannot be a lead comment but may be a line comment.
-			comment, endline = p.consumeCommentGroup()
-			if p.file.Line(p.pos) != endline {
-				// The next token is on a different line, thus
-				// the last comment group is a line comment.
-				p.lineComment = comment
-			}
-		}
-
-		// consume successor comments, if any
-		endline = -1
-		for p.tok == token.COMMENT {
-			comment, endline = p.consumeCommentGroup()
-		}
-
-		if endline+1 == p.file.Line(p.pos) {
-			// The next token is following on the line immediately after the
-			// comment group, thus the last comment group is a lead comment.
-			p.leadComment = comment
-		}
-	}
-}
-
-func (p *parser) error(pos token.Pos, msg string) {
-	p.Error(p.file.Position(pos), msg)
-}
-
-func (p *parser) errorExpected(pos token.Pos, msg string) {
-	msg = "expected " + msg
-	if pos == p.pos {
-		// the error happened at the current position;
-		// make the error message more specific
-		if p.tok == token.SEMICOLON && p.lit[0] == '\n' {
-			msg += ", found newline"
-		} else {
-			msg += ", found '" + p.tok.String() + "'"
-			if p.tok.IsLiteral() {
-				msg += " " + p.lit
-			}
-		}
-	}
-	p.error(pos, msg)
-}
-
-func (p *parser) expect(tok token.Token) token.Pos {
-	pos := p.pos
-	if p.tok != tok {
-		p.errorExpected(pos, "'"+tok.String()+"'")
-	}
-	p.next() // make progress
-	return pos
-}
-
-func (p *parser) expectSemi() {
-	if p.tok != token.RPAREN && p.tok != token.RBRACE {
-		p.expect(token.SEMICOLON)
-	}
-}
-
-func assert(cond bool, msg string) {
-	if !cond {
-		panic("go/parser internal error: " + msg)
-	}
-}
-
-// ----------------------------------------------------------------------------
-// Identifiers
-
-func (p *parser) parseIdent() *ast.Ident {
-	pos := p.pos
-	name := "_"
-	if p.tok == token.IDENT {
-		name = p.lit
-		p.next()
-	} else {
-		p.expect(token.IDENT) // use expect() error handling
-	}
-	return &ast.Ident{pos, name, nil}
-}
-
-func (p *parser) parseIdentList() (list []*ast.Ident) {
-	if p.trace {
-		defer un(trace(p, "IdentList"))
-	}
-
-	list = append(list, p.parseIdent())
-	for p.tok == token.COMMA {
-		p.next()
-		list = append(list, p.parseIdent())
-	}
-
-	return
-}
-
-// ----------------------------------------------------------------------------
-// Common productions
-
-// If lhs is set, result list elements which are identifiers are not resolved.
-func (p *parser) parseExprList(lhs bool) (list []ast.Expr) {
-	if p.trace {
-		defer un(trace(p, "ExpressionList"))
-	}
-
-	list = append(list, p.parseExpr(lhs))
-	for p.tok == token.COMMA {
-		p.next()
-		list = append(list, p.parseExpr(lhs))
-	}
-
-	return
-}
-
-func (p *parser) parseLhsList() []ast.Expr {
-	list := p.parseExprList(true)
-	switch p.tok {
-	case token.DEFINE:
-		// lhs of a short variable declaration
-		p.shortVarDecl(p.makeIdentList(list))
-	case token.COLON:
-		// lhs of a label declaration or a communication clause of a select
-		// statement (parseLhsList is not called when parsing the case clause
-		// of a switch statement):
-		// - labels are declared by the caller of parseLhsList
-		// - for communication clauses, if there is a stand-alone identifier
-		//   followed by a colon, we have a syntax error; there is no need
-		//   to resolve the identifier in that case
-	default:
-		// identifiers must be declared elsewhere
-		for _, x := range list {
-			p.resolve(x)
-		}
-	}
-	return list
-}
-
-func (p *parser) parseRhsList() []ast.Expr {
-	return p.parseExprList(false)
-}
-
-// ----------------------------------------------------------------------------
-// Types
-
-func (p *parser) parseType() ast.Expr {
-	if p.trace {
-		defer un(trace(p, "Type"))
-	}
-
-	typ := p.tryType()
-
-	if typ == nil {
-		pos := p.pos
-		p.errorExpected(pos, "type")
-		p.next() // make progress
-		return &ast.BadExpr{pos, p.pos}
-	}
-
-	return typ
-}
-
-// If the result is an identifier, it is not resolved.
-func (p *parser) parseTypeName() ast.Expr {
-	if p.trace {
-		defer un(trace(p, "TypeName"))
-	}
-
-	ident := p.parseIdent()
-	// don't resolve ident yet - it may be a parameter or field name
-
-	if p.tok == token.PERIOD {
-		// ident is a package name
-		p.next()
-		p.resolve(ident)
-		sel := p.parseIdent()
-		return &ast.SelectorExpr{ident, sel}
-	}
-
-	return ident
-}
-
-func (p *parser) parseArrayType(ellipsisOk bool) ast.Expr {
-	if p.trace {
-		defer un(trace(p, "ArrayType"))
-	}
-
-	lbrack := p.expect(token.LBRACK)
-	var len ast.Expr
-	if ellipsisOk && p.tok == token.ELLIPSIS {
-		len = &ast.Ellipsis{p.pos, nil}
-		p.next()
-	} else if p.tok != token.RBRACK {
-		len = p.parseRhs()
-	}
-	p.expect(token.RBRACK)
-	elt := p.parseType()
-
-	return &ast.ArrayType{lbrack, len, elt}
-}
-
-func (p *parser) makeIdentList(list []ast.Expr) []*ast.Ident {
-	idents := make([]*ast.Ident, len(list))
-	for i, x := range list {
-		ident, isIdent := x.(*ast.Ident)
-		if !isIdent {
-			pos := x.(ast.Expr).Pos()
-			p.errorExpected(pos, "identifier")
-			ident = &ast.Ident{pos, "_", nil}
-		}
-		idents[i] = ident
-	}
-	return idents
-}
-
-func (p *parser) parseFieldDecl(scope *ast.Scope) *ast.Field {
-	if p.trace {
-		defer un(trace(p, "FieldDecl"))
-	}
-
-	doc := p.leadComment
-
-	// fields
-	list, typ := p.parseVarList(false)
-
-	// optional tag
-	var tag *ast.BasicLit
-	if p.tok == token.STRING {
-		tag = &ast.BasicLit{p.pos, p.tok, p.lit}
-		p.next()
-	}
-
-	// analyze case
-	var idents []*ast.Ident
-	if typ != nil {
-		// IdentifierList Type
-		idents = p.makeIdentList(list)
-	} else {
-		// ["*"] TypeName (AnonymousField)
-		typ = list[0] // we always have at least one element
-		p.resolve(typ)
-		if n := len(list); n > 1 || !isTypeName(deref(typ)) {
-			pos := typ.Pos()
-			p.errorExpected(pos, "anonymous field")
-			typ = &ast.BadExpr{pos, list[n-1].End()}
-		}
-	}
-
-	p.expectSemi() // call before accessing p.linecomment
-
-	field := &ast.Field{doc, idents, typ, tag, p.lineComment}
-	p.declare(field, scope, ast.Var, idents...)
-
-	return field
-}
-
-func (p *parser) parseStructType() *ast.StructType {
-	if p.trace {
-		defer un(trace(p, "StructType"))
-	}
-
-	pos := p.expect(token.STRUCT)
-	lbrace := p.expect(token.LBRACE)
-	scope := ast.NewScope(nil) // struct scope
-	var list []*ast.Field
-	for p.tok == token.IDENT || p.tok == token.MUL || p.tok == token.LPAREN {
-		// a field declaration cannot start with a '(' but we accept
-		// it here for more robust parsing and better error messages
-		// (parseFieldDecl will check and complain if necessary)
-		list = append(list, p.parseFieldDecl(scope))
-	}
-	rbrace := p.expect(token.RBRACE)
-
-	// TODO(gri): store struct scope in AST
-	return &ast.StructType{pos, &ast.FieldList{lbrace, list, rbrace}, false}
-}
-
-func (p *parser) parsePointerType() *ast.StarExpr {
-	if p.trace {
-		defer un(trace(p, "PointerType"))
-	}
-
-	star := p.expect(token.MUL)
-	base := p.parseType()
-
-	return &ast.StarExpr{star, base}
-}
-
-func (p *parser) tryVarType(isParam bool) ast.Expr {
-	if isParam && p.tok == token.ELLIPSIS {
-		pos := p.pos
-		p.next()
-		typ := p.tryIdentOrType(isParam) // don't use parseType so we can provide better error message
-		if typ == nil {
-			p.error(pos, "'...' parameter is missing type")
-			typ = &ast.BadExpr{pos, p.pos}
-		}
-		if p.tok != token.RPAREN {
-			p.error(pos, "can use '...' with last parameter type only")
-		}
-		return &ast.Ellipsis{pos, typ}
-	}
-	return p.tryIdentOrType(false)
-}
-
-func (p *parser) parseVarType(isParam bool) ast.Expr {
-	typ := p.tryVarType(isParam)
-	if typ == nil {
-		pos := p.pos
-		p.errorExpected(pos, "type")
-		p.next() // make progress
-		typ = &ast.BadExpr{pos, p.pos}
-	}
-	return typ
-}
-
-func (p *parser) parseVarList(isParam bool) (list []ast.Expr, typ ast.Expr) {
-	if p.trace {
-		defer un(trace(p, "VarList"))
-	}
-
-	// a list of identifiers looks like a list of type names
-	for {
-		// parseVarType accepts any type (including parenthesized ones)
-		// even though the syntax does not permit them here: we
-		// accept them all for more robust parsing and complain
-		// afterwards
-		list = append(list, p.parseVarType(isParam))
-		if p.tok != token.COMMA {
-			break
-		}
-		p.next()
-	}
-
-	// if we had a list of identifiers, it must be followed by a type
-	typ = p.tryVarType(isParam)
-	if typ != nil {
-		p.resolve(typ)
-	}
-
-	return
-}
-
-func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params []*ast.Field) {
-	if p.trace {
-		defer un(trace(p, "ParameterList"))
-	}
-
-	list, typ := p.parseVarList(ellipsisOk)
-	if typ != nil {
-		// IdentifierList Type
-		idents := p.makeIdentList(list)
-		field := &ast.Field{nil, idents, typ, nil, nil}
-		params = append(params, field)
-		// Go spec: The scope of an identifier denoting a function
-		// parameter or result variable is the function body.
-		p.declare(field, scope, ast.Var, idents...)
-		if p.tok == token.COMMA {
-			p.next()
-		}
-
-		for p.tok != token.RPAREN && p.tok != token.EOF {
-			idents := p.parseIdentList()
-			typ := p.parseVarType(ellipsisOk)
-			field := &ast.Field{nil, idents, typ, nil, nil}
-			params = append(params, field)
-			// Go spec: The scope of an identifier denoting a function
-			// parameter or result variable is the function body.
-			p.declare(field, scope, ast.Var, idents...)
-			if p.tok != token.COMMA {
-				break
-			}
-			p.next()
-		}
-
-	} else {
-		// Type { "," Type } (anonymous parameters)
-		params = make([]*ast.Field, len(list))
-		for i, x := range list {
-			p.resolve(x)
-			params[i] = &ast.Field{Type: x}
-		}
-	}
-
-	return
-}
-
-func (p *parser) parseParameters(scope *ast.Scope, ellipsisOk bool) *ast.FieldList {
-	if p.trace {
-		defer un(trace(p, "Parameters"))
-	}
-
-	var params []*ast.Field
-	lparen := p.expect(token.LPAREN)
-	if p.tok != token.RPAREN {
-		params = p.parseParameterList(scope, ellipsisOk)
-	}
-	rparen := p.expect(token.RPAREN)
-
-	return &ast.FieldList{lparen, params, rparen}
-}
-
-func (p *parser) parseResult(scope *ast.Scope) *ast.FieldList {
-	if p.trace {
-		defer un(trace(p, "Result"))
-	}
-
-	if p.tok == token.LPAREN {
-		return p.parseParameters(scope, false)
-	}
-
-	typ := p.tryType()
-	if typ != nil {
-		list := make([]*ast.Field, 1)
-		list[0] = &ast.Field{Type: typ}
-		return &ast.FieldList{List: list}
-	}
-
-	return nil
-}
-
-func (p *parser) parseSignature(scope *ast.Scope) (params, results *ast.FieldList) {
-	if p.trace {
-		defer un(trace(p, "Signature"))
-	}
-
-	params = p.parseParameters(scope, true)
-	results = p.parseResult(scope)
-
-	return
-}
-
-func (p *parser) parseFuncType() (*ast.FuncType, *ast.Scope) {
-	if p.trace {
-		defer un(trace(p, "FuncType"))
-	}
-
-	pos := p.expect(token.FUNC)
-	scope := ast.NewScope(p.topScope) // function scope
-	params, results := p.parseSignature(scope)
-
-	return &ast.FuncType{pos, params, results}, scope
-}
-
-func (p *parser) parseMethodSpec(scope *ast.Scope) *ast.Field {
-	if p.trace {
-		defer un(trace(p, "MethodSpec"))
-	}
-
-	doc := p.leadComment
-	var idents []*ast.Ident
-	var typ ast.Expr
-	x := p.parseTypeName()
-	if ident, isIdent := x.(*ast.Ident); isIdent && p.tok == token.LPAREN {
-		// method
-		idents = []*ast.Ident{ident}
-		scope := ast.NewScope(nil) // method scope
-		params, results := p.parseSignature(scope)
-		typ = &ast.FuncType{token.NoPos, params, results}
-	} else {
-		// embedded interface
-		typ = x
-	}
-	p.expectSemi() // call before accessing p.linecomment
-
-	spec := &ast.Field{doc, idents, typ, nil, p.lineComment}
-	p.declare(spec, scope, ast.Fun, idents...)
-
-	return spec
-}
-
-func (p *parser) parseInterfaceType() *ast.InterfaceType {
-	if p.trace {
-		defer un(trace(p, "InterfaceType"))
-	}
-
-	pos := p.expect(token.INTERFACE)
-	lbrace := p.expect(token.LBRACE)
-	scope := ast.NewScope(nil) // interface scope
-	var list []*ast.Field
-	for p.tok == token.IDENT {
-		list = append(list, p.parseMethodSpec(scope))
-	}
-	rbrace := p.expect(token.RBRACE)
-
-	// TODO(gri): store interface scope in AST
-	return &ast.InterfaceType{pos, &ast.FieldList{lbrace, list, rbrace}, false}
-}
-
-func (p *parser) parseMapType() *ast.MapType {
-	if p.trace {
-		defer un(trace(p, "MapType"))
-	}
-
-	pos := p.expect(token.MAP)
-	p.expect(token.LBRACK)
-	key := p.parseType()
-	p.expect(token.RBRACK)
-	value := p.parseType()
-
-	return &ast.MapType{pos, key, value}
-}
-
-func (p *parser) parseChanType() *ast.ChanType {
-	if p.trace {
-		defer un(trace(p, "ChanType"))
-	}
-
-	pos := p.pos
-	dir := ast.SEND | ast.RECV
-	if p.tok == token.CHAN {
-		p.next()
-		if p.tok == token.ARROW {
-			p.next()
-			dir = ast.SEND
-		}
-	} else {
-		p.expect(token.ARROW)
-		p.expect(token.CHAN)
-		dir = ast.RECV
-	}
-	value := p.parseType()
-
-	return &ast.ChanType{pos, dir, value}
-}
-
-// If the result is an identifier, it is not resolved.
-func (p *parser) tryIdentOrType(ellipsisOk bool) ast.Expr {
-	switch p.tok {
-	case token.IDENT:
-		return p.parseTypeName()
-	case token.LBRACK:
-		return p.parseArrayType(ellipsisOk)
-	case token.STRUCT:
-		return p.parseStructType()
-	case token.MUL:
-		return p.parsePointerType()
-	case token.FUNC:
-		typ, _ := p.parseFuncType()
-		return typ
-	case token.INTERFACE:
-		return p.parseInterfaceType()
-	case token.MAP:
-		return p.parseMapType()
-	case token.CHAN, token.ARROW:
-		return p.parseChanType()
-	case token.LPAREN:
-		lparen := p.pos
-		p.next()
-		typ := p.parseType()
-		rparen := p.expect(token.RPAREN)
-		return &ast.ParenExpr{lparen, typ, rparen}
-	}
-
-	// no type found
-	return nil
-}
-
-func (p *parser) tryType() ast.Expr {
-	typ := p.tryIdentOrType(false)
-	if typ != nil {
-		p.resolve(typ)
-	}
-	return typ
-}
-
-// ----------------------------------------------------------------------------
-// Blocks
-
-func (p *parser) parseStmtList() (list []ast.Stmt) {
-	if p.trace {
-		defer un(trace(p, "StatementList"))
-	}
-
-	for p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE && p.tok != token.EOF {
-		list = append(list, p.parseStmt())
-	}
-
-	return
-}
-
-func (p *parser) parseBody(scope *ast.Scope) *ast.BlockStmt {
-	if p.trace {
-		defer un(trace(p, "Body"))
-	}
-
-	lbrace := p.expect(token.LBRACE)
-	p.topScope = scope // open function scope
-	p.openLabelScope()
-	list := p.parseStmtList()
-	p.closeLabelScope()
-	p.closeScope()
-	rbrace := p.expect(token.RBRACE)
-
-	return &ast.BlockStmt{lbrace, list, rbrace}
-}
-
-func (p *parser) parseBlockStmt() *ast.BlockStmt {
-	if p.trace {
-		defer un(trace(p, "BlockStmt"))
-	}
-
-	lbrace := p.expect(token.LBRACE)
-	p.openScope()
-	list := p.parseStmtList()
-	p.closeScope()
-	rbrace := p.expect(token.RBRACE)
-
-	return &ast.BlockStmt{lbrace, list, rbrace}
-}
-
-// ----------------------------------------------------------------------------
-// Expressions
-
-func (p *parser) parseFuncTypeOrLit() ast.Expr {
-	if p.trace {
-		defer un(trace(p, "FuncTypeOrLit"))
-	}
-
-	typ, scope := p.parseFuncType()
-	if p.tok != token.LBRACE {
-		// function type only
-		return typ
-	}
-
-	p.exprLev++
-	body := p.parseBody(scope)
-	p.exprLev--
-
-	return &ast.FuncLit{typ, body}
-}
-
-// parseOperand may return an expression or a raw type (incl. array
-// types of the form [...]T. Callers must verify the result.
-// If lhs is set and the result is an identifier, it is not resolved.
-func (p *parser) parseOperand(lhs bool) ast.Expr {
-	if p.trace {
-		defer un(trace(p, "Operand"))
-	}
-
-	switch p.tok {
-	case token.IDENT:
-		x := p.parseIdent()
-		if !lhs {
-			p.resolve(x)
-		}
-		return x
-
-	case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
-		x := &ast.BasicLit{p.pos, p.tok, p.lit}
-		p.next()
-		return x
-
-	case token.LPAREN:
-		lparen := p.pos
-		p.next()
-		p.exprLev++
-		x := p.parseRhs()
-		p.exprLev--
-		rparen := p.expect(token.RPAREN)
-		return &ast.ParenExpr{lparen, x, rparen}
-
-	case token.FUNC:
-		return p.parseFuncTypeOrLit()
-
-	default:
-		if typ := p.tryIdentOrType(true); typ != nil {
-			// could be type for composite literal or conversion
-			_, isIdent := typ.(*ast.Ident)
-			assert(!isIdent, "type cannot be identifier")
-			return typ
-		}
-	}
-
-	pos := p.pos
-	p.errorExpected(pos, "operand")
-	p.next() // make progress
-	return &ast.BadExpr{pos, p.pos}
-}
-
-func (p *parser) parseSelector(x ast.Expr) ast.Expr {
-	if p.trace {
-		defer un(trace(p, "Selector"))
-	}
-
-	sel := p.parseIdent()
-
-	return &ast.SelectorExpr{x, sel}
-}
-
-func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr {
-	if p.trace {
-		defer un(trace(p, "TypeAssertion"))
-	}
-
-	p.expect(token.LPAREN)
-	var typ ast.Expr
-	if p.tok == token.TYPE {
-		// type switch: typ == nil
-		p.next()
-	} else {
-		typ = p.parseType()
-	}
-	p.expect(token.RPAREN)
-
-	return &ast.TypeAssertExpr{x, typ}
-}
-
-func (p *parser) parseIndexOrSlice(x ast.Expr) ast.Expr {
-	if p.trace {
-		defer un(trace(p, "IndexOrSlice"))
-	}
-
-	lbrack := p.expect(token.LBRACK)
-	p.exprLev++
-	var low, high ast.Expr
-	isSlice := false
-	if p.tok != token.COLON {
-		low = p.parseRhs()
-	}
-	if p.tok == token.COLON {
-		isSlice = true
-		p.next()
-		if p.tok != token.RBRACK {
-			high = p.parseRhs()
-		}
-	}
-	p.exprLev--
-	rbrack := p.expect(token.RBRACK)
-
-	if isSlice {
-		return &ast.SliceExpr{x, lbrack, low, high, rbrack}
-	}
-	return &ast.IndexExpr{x, lbrack, low, rbrack}
-}
-
-func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
-	if p.trace {
-		defer un(trace(p, "CallOrConversion"))
-	}
-
-	lparen := p.expect(token.LPAREN)
-	p.exprLev++
-	var list []ast.Expr
-	var ellipsis token.Pos
-	for p.tok != token.RPAREN && p.tok != token.EOF && !ellipsis.IsValid() {
-		list = append(list, p.parseRhs())
-		if p.tok == token.ELLIPSIS {
-			ellipsis = p.pos
-			p.next()
-		}
-		if p.tok != token.COMMA {
-			break
-		}
-		p.next()
-	}
-	p.exprLev--
-	rparen := p.expect(token.RPAREN)
-
-	return &ast.CallExpr{fun, lparen, list, ellipsis, rparen}
-}
-
-func (p *parser) parseElement(keyOk bool) ast.Expr {
-	if p.trace {
-		defer un(trace(p, "Element"))
-	}
-
-	if p.tok == token.LBRACE {
-		return p.parseLiteralValue(nil)
-	}
-
-	x := p.parseExpr(keyOk) // don't resolve if map key
-	if keyOk {
-		if p.tok == token.COLON {
-			colon := p.pos
-			p.next()
-			return &ast.KeyValueExpr{x, colon, p.parseElement(false)}
-		}
-		p.resolve(x) // not a map key
-	}
-
-	return x
-}
-
-func (p *parser) parseElementList() (list []ast.Expr) {
-	if p.trace {
-		defer un(trace(p, "ElementList"))
-	}
-
-	for p.tok != token.RBRACE && p.tok != token.EOF {
-		list = append(list, p.parseElement(true))
-		if p.tok != token.COMMA {
-			break
-		}
-		p.next()
-	}
-
-	return
-}
-
-func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr {
-	if p.trace {
-		defer un(trace(p, "LiteralValue"))
-	}
-
-	lbrace := p.expect(token.LBRACE)
-	var elts []ast.Expr
-	p.exprLev++
-	if p.tok != token.RBRACE {
-		elts = p.parseElementList()
-	}
-	p.exprLev--
-	rbrace := p.expect(token.RBRACE)
-	return &ast.CompositeLit{typ, lbrace, elts, rbrace}
-}
-
-// checkExpr checks that x is an expression (and not a type).
-func (p *parser) checkExpr(x ast.Expr) ast.Expr {
-	switch t := unparen(x).(type) {
-	case *ast.BadExpr:
-	case *ast.Ident:
-	case *ast.BasicLit:
-	case *ast.FuncLit:
-	case *ast.CompositeLit:
-	case *ast.ParenExpr:
-		panic("unreachable")
-	case *ast.SelectorExpr:
-	case *ast.IndexExpr:
-	case *ast.SliceExpr:
-	case *ast.TypeAssertExpr:
-		if t.Type == nil {
-			// the form X.(type) is only allowed in type switch expressions
-			p.errorExpected(x.Pos(), "expression")
-			x = &ast.BadExpr{x.Pos(), x.End()}
-		}
-	case *ast.CallExpr:
-	case *ast.StarExpr:
-	case *ast.UnaryExpr:
-		if t.Op == token.RANGE {
-			// the range operator is only allowed at the top of a for statement
-			p.errorExpected(x.Pos(), "expression")
-			x = &ast.BadExpr{x.Pos(), x.End()}
-		}
-	case *ast.BinaryExpr:
-	default:
-		// all other nodes are not proper expressions
-		p.errorExpected(x.Pos(), "expression")
-		x = &ast.BadExpr{x.Pos(), x.End()}
-	}
-	return x
-}
-
-// isTypeName reports whether x is a (qualified) TypeName.
-func isTypeName(x ast.Expr) bool {
-	switch t := x.(type) {
-	case *ast.BadExpr:
-	case *ast.Ident:
-	case *ast.SelectorExpr:
-		_, isIdent := t.X.(*ast.Ident)
-		return isIdent
-	default:
-		return false // all other nodes are not type names
-	}
-	return true
-}
-
-// isLiteralType reports whether x is a legal composite literal type.
-func isLiteralType(x ast.Expr) bool {
-	switch t := x.(type) {
-	case *ast.BadExpr:
-	case *ast.Ident:
-	case *ast.SelectorExpr:
-		_, isIdent := t.X.(*ast.Ident)
-		return isIdent
-	case *ast.ArrayType:
-	case *ast.StructType:
-	case *ast.MapType:
-	default:
-		return false // all other nodes are not legal composite literal types
-	}
-	return true
-}
-
-// If x is of the form *T, deref returns T, otherwise it returns x.
-func deref(x ast.Expr) ast.Expr {
-	if p, isPtr := x.(*ast.StarExpr); isPtr {
-		x = p.X
-	}
-	return x
-}
-
-// If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
-func unparen(x ast.Expr) ast.Expr {
-	if p, isParen := x.(*ast.ParenExpr); isParen {
-		x = unparen(p.X)
-	}
-	return x
-}
-
-// checkExprOrType checks that x is an expression or a type
-// (and not a raw type such as [...]T).
-func (p *parser) checkExprOrType(x ast.Expr) ast.Expr {
-	switch t := unparen(x).(type) {
-	case *ast.ParenExpr:
-		panic("unreachable")
-	case *ast.UnaryExpr:
-		if t.Op == token.RANGE {
-			// the range operator is only allowed at the top of a for statement
-			p.errorExpected(x.Pos(), "expression")
-			x = &ast.BadExpr{x.Pos(), x.End()}
-		}
-	case *ast.ArrayType:
-		if len, isEllipsis := t.Len.(*ast.Ellipsis); isEllipsis {
-			p.error(len.Pos(), "expected array length, found '...'")
-			x = &ast.BadExpr{x.Pos(), x.End()}
-		}
-	}
-
-	// all other nodes are expressions or types
-	return x
-}
-
-// If lhs is set and the result is an identifier, it is not resolved.
-func (p *parser) parsePrimaryExpr(lhs bool) ast.Expr {
-	if p.trace {
-		defer un(trace(p, "PrimaryExpr"))
-	}
-
-	x := p.parseOperand(lhs)
-L:
-	for {
-		switch p.tok {
-		case token.PERIOD:
-			p.next()
-			if lhs {
-				p.resolve(x)
-			}
-			switch p.tok {
-			case token.IDENT:
-				x = p.parseSelector(p.checkExpr(x))
-			case token.LPAREN:
-				x = p.parseTypeAssertion(p.checkExpr(x))
-			default:
-				pos := p.pos
-				p.next() // make progress
-				p.errorExpected(pos, "selector or type assertion")
-				x = &ast.BadExpr{pos, p.pos}
-			}
-		case token.LBRACK:
-			if lhs {
-				p.resolve(x)
-			}
-			x = p.parseIndexOrSlice(p.checkExpr(x))
-		case token.LPAREN:
-			if lhs {
-				p.resolve(x)
-			}
-			x = p.parseCallOrConversion(p.checkExprOrType(x))
-		case token.LBRACE:
-			if isLiteralType(x) && (p.exprLev >= 0 || !isTypeName(x)) {
-				if lhs {
-					p.resolve(x)
-				}
-				x = p.parseLiteralValue(x)
-			} else {
-				break L
-			}
-		default:
-			break L
-		}
-		lhs = false // no need to try to resolve again
-	}
-
-	return x
-}
-
-// If lhs is set and the result is an identifier, it is not resolved.
-func (p *parser) parseUnaryExpr(lhs bool) ast.Expr {
-	if p.trace {
-		defer un(trace(p, "UnaryExpr"))
-	}
-
-	switch p.tok {
-	case token.ADD, token.SUB, token.NOT, token.XOR, token.AND, token.RANGE:
-		pos, op := p.pos, p.tok
-		p.next()
-		x := p.parseUnaryExpr(false)
-		return &ast.UnaryExpr{pos, op, p.checkExpr(x)}
-
-	case token.ARROW:
-		// channel type or receive expression
-		pos := p.pos
-		p.next()
-		if p.tok == token.CHAN {
-			p.next()
-			value := p.parseType()
-			return &ast.ChanType{pos, ast.RECV, value}
-		}
-
-		x := p.parseUnaryExpr(false)
-		return &ast.UnaryExpr{pos, token.ARROW, p.checkExpr(x)}
-
-	case token.MUL:
-		// pointer type or unary "*" expression
-		pos := p.pos
-		p.next()
-		x := p.parseUnaryExpr(false)
-		return &ast.StarExpr{pos, p.checkExprOrType(x)}
-	}
-
-	return p.parsePrimaryExpr(lhs)
-}
-
-// If lhs is set and the result is an identifier, it is not resolved.
-func (p *parser) parseBinaryExpr(lhs bool, prec1 int) ast.Expr {
-	if p.trace {
-		defer un(trace(p, "BinaryExpr"))
-	}
-
-	x := p.parseUnaryExpr(lhs)
-	for prec := p.tok.Precedence(); prec >= prec1; prec-- {
-		for p.tok.Precedence() == prec {
-			pos, op := p.pos, p.tok
-			p.next()
-			if lhs {
-				p.resolve(x)
-				lhs = false
-			}
-			y := p.parseBinaryExpr(false, prec+1)
-			x = &ast.BinaryExpr{p.checkExpr(x), pos, op, p.checkExpr(y)}
-		}
-	}
-
-	return x
-}
-
-// If lhs is set and the result is an identifier, it is not resolved.
-// TODO(gri): parseExpr may return a type or even a raw type ([..]int) -
-// should reject when a type/raw type is obviously not allowed
-func (p *parser) parseExpr(lhs bool) ast.Expr {
-	if p.trace {
-		defer un(trace(p, "Expression"))
-	}
-
-	return p.parseBinaryExpr(lhs, token.LowestPrec+1)
-}
-
-func (p *parser) parseRhs() ast.Expr {
-	return p.parseExpr(false)
-}
-
-// ----------------------------------------------------------------------------
-// Statements
-
-func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt {
-	if p.trace {
-		defer un(trace(p, "SimpleStmt"))
-	}
-
-	x := p.parseLhsList()
-
-	switch p.tok {
-	case
-		token.DEFINE, token.ASSIGN, token.ADD_ASSIGN,
-		token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN,
-		token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN,
-		token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN:
-		// assignment statement
-		pos, tok := p.pos, p.tok
-		p.next()
-		y := p.parseRhsList()
-		return &ast.AssignStmt{x, pos, tok, y}
-	}
-
-	if len(x) > 1 {
-		p.errorExpected(x[0].Pos(), "1 expression")
-		// continue with first expression
-	}
-
-	switch p.tok {
-	case token.COLON:
-		// labeled statement
-		colon := p.pos
-		p.next()
-		if label, isIdent := x[0].(*ast.Ident); labelOk && isIdent {
-			// Go spec: The scope of a label is the body of the function
-			// in which it is declared and excludes the body of any nested
-			// function.
-			stmt := &ast.LabeledStmt{label, colon, p.parseStmt()}
-			p.declare(stmt, p.labelScope, ast.Lbl, label)
-			return stmt
-		}
-		p.error(x[0].Pos(), "illegal label declaration")
-		return &ast.BadStmt{x[0].Pos(), colon + 1}
-
-	case token.ARROW:
-		// send statement
-		arrow := p.pos
-		p.next() // consume "<-"
-		y := p.parseRhs()
-		return &ast.SendStmt{x[0], arrow, y}
-
-	case token.INC, token.DEC:
-		// increment or decrement
-		s := &ast.IncDecStmt{x[0], p.pos, p.tok}
-		p.next() // consume "++" or "--"
-		return s
-	}
-
-	// expression
-	return &ast.ExprStmt{x[0]}
-}
-
-func (p *parser) parseCallExpr() *ast.CallExpr {
-	x := p.parseRhs()
-	if call, isCall := x.(*ast.CallExpr); isCall {
-		return call
-	}
-	p.errorExpected(x.Pos(), "function/method call")
-	return nil
-}
-
-func (p *parser) parseGoStmt() ast.Stmt {
-	if p.trace {
-		defer un(trace(p, "GoStmt"))
-	}
-
-	pos := p.expect(token.GO)
-	call := p.parseCallExpr()
-	p.expectSemi()
-	if call == nil {
-		return &ast.BadStmt{pos, pos + 2} // len("go")
-	}
-
-	return &ast.GoStmt{pos, call}
-}
-
-func (p *parser) parseDeferStmt() ast.Stmt {
-	if p.trace {
-		defer un(trace(p, "DeferStmt"))
-	}
-
-	pos := p.expect(token.DEFER)
-	call := p.parseCallExpr()
-	p.expectSemi()
-	if call == nil {
-		return &ast.BadStmt{pos, pos + 5} // len("defer")
-	}
-
-	return &ast.DeferStmt{pos, call}
-}
-
-func (p *parser) parseReturnStmt() *ast.ReturnStmt {
-	if p.trace {
-		defer un(trace(p, "ReturnStmt"))
-	}
-
-	pos := p.pos
-	p.expect(token.RETURN)
-	var x []ast.Expr
-	if p.tok != token.SEMICOLON && p.tok != token.RBRACE {
-		x = p.parseRhsList()
-	}
-	p.expectSemi()
-
-	return &ast.ReturnStmt{pos, x}
-}
-
-func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
-	if p.trace {
-		defer un(trace(p, "BranchStmt"))
-	}
-
-	pos := p.expect(tok)
-	var label *ast.Ident
-	if tok != token.FALLTHROUGH && p.tok == token.IDENT {
-		label = p.parseIdent()
-		// add to list of unresolved targets
-		n := len(p.targetStack) - 1
-		p.targetStack[n] = append(p.targetStack[n], label)
-	}
-	p.expectSemi()
-
-	return &ast.BranchStmt{pos, tok, label}
-}
-
-func (p *parser) makeExpr(s ast.Stmt) ast.Expr {
-	if s == nil {
-		return nil
-	}
-	if es, isExpr := s.(*ast.ExprStmt); isExpr {
-		return p.checkExpr(es.X)
-	}
-	p.error(s.Pos(), "expected condition, found simple statement")
-	return &ast.BadExpr{s.Pos(), s.End()}
-}
-
-func (p *parser) parseIfStmt() *ast.IfStmt {
-	if p.trace {
-		defer un(trace(p, "IfStmt"))
-	}
-
-	pos := p.expect(token.IF)
-	p.openScope()
-	defer p.closeScope()
-
-	var s ast.Stmt
-	var x ast.Expr
-	{
-		prevLev := p.exprLev
-		p.exprLev = -1
-		if p.tok == token.SEMICOLON {
-			p.next()
-			x = p.parseRhs()
-		} else {
-			s = p.parseSimpleStmt(false)
-			if p.tok == token.SEMICOLON {
-				p.next()
-				x = p.parseRhs()
-			} else {
-				x = p.makeExpr(s)
-				s = nil
-			}
-		}
-		p.exprLev = prevLev
-	}
-
-	body := p.parseBlockStmt()
-	var else_ ast.Stmt
-	if p.tok == token.ELSE {
-		p.next()
-		else_ = p.parseStmt()
-	} else {
-		p.expectSemi()
-	}
-
-	return &ast.IfStmt{pos, s, x, body, else_}
-}
-
-func (p *parser) parseTypeList() (list []ast.Expr) {
-	if p.trace {
-		defer un(trace(p, "TypeList"))
-	}
-
-	list = append(list, p.parseType())
-	for p.tok == token.COMMA {
-		p.next()
-		list = append(list, p.parseType())
-	}
-
-	return
-}
-
-func (p *parser) parseCaseClause(exprSwitch bool) *ast.CaseClause {
-	if p.trace {
-		defer un(trace(p, "CaseClause"))
-	}
-
-	pos := p.pos
-	var list []ast.Expr
-	if p.tok == token.CASE {
-		p.next()
-		if exprSwitch {
-			list = p.parseRhsList()
-		} else {
-			list = p.parseTypeList()
-		}
-	} else {
-		p.expect(token.DEFAULT)
-	}
-
-	colon := p.expect(token.COLON)
-	p.openScope()
-	body := p.parseStmtList()
-	p.closeScope()
-
-	return &ast.CaseClause{pos, list, colon, body}
-}
-
-func isExprSwitch(s ast.Stmt) bool {
-	if s == nil {
-		return true
-	}
-	if e, ok := s.(*ast.ExprStmt); ok {
-		if a, ok := e.X.(*ast.TypeAssertExpr); ok {
-			return a.Type != nil // regular type assertion
-		}
-		return true
-	}
-	return false
-}
-
-func (p *parser) parseSwitchStmt() ast.Stmt {
-	if p.trace {
-		defer un(trace(p, "SwitchStmt"))
-	}
-
-	pos := p.expect(token.SWITCH)
-	p.openScope()
-	defer p.closeScope()
-
-	var s1, s2 ast.Stmt
-	if p.tok != token.LBRACE {
-		prevLev := p.exprLev
-		p.exprLev = -1
-		if p.tok != token.SEMICOLON {
-			s2 = p.parseSimpleStmt(false)
-		}
-		if p.tok == token.SEMICOLON {
-			p.next()
-			s1 = s2
-			s2 = nil
-			if p.tok != token.LBRACE {
-				s2 = p.parseSimpleStmt(false)
-			}
-		}
-		p.exprLev = prevLev
-	}
-
-	exprSwitch := isExprSwitch(s2)
-	lbrace := p.expect(token.LBRACE)
-	var list []ast.Stmt
-	for p.tok == token.CASE || p.tok == token.DEFAULT {
-		list = append(list, p.parseCaseClause(exprSwitch))
-	}
-	rbrace := p.expect(token.RBRACE)
-	p.expectSemi()
-	body := &ast.BlockStmt{lbrace, list, rbrace}
-
-	if exprSwitch {
-		return &ast.SwitchStmt{pos, s1, p.makeExpr(s2), body}
-	}
-	// type switch
-	// TODO(gri): do all the checks!
-	return &ast.TypeSwitchStmt{pos, s1, s2, body}
-}
-
-func (p *parser) parseCommClause() *ast.CommClause {
-	if p.trace {
-		defer un(trace(p, "CommClause"))
-	}
-
-	p.openScope()
-	pos := p.pos
-	var comm ast.Stmt
-	if p.tok == token.CASE {
-		p.next()
-		lhs := p.parseLhsList()
-		if p.tok == token.ARROW {
-			// SendStmt
-			if len(lhs) > 1 {
-				p.errorExpected(lhs[0].Pos(), "1 expression")
-				// continue with first expression
-			}
-			arrow := p.pos
-			p.next()
-			rhs := p.parseRhs()
-			comm = &ast.SendStmt{lhs[0], arrow, rhs}
-		} else {
-			// RecvStmt
-			pos := p.pos
-			tok := p.tok
-			var rhs ast.Expr
-			if tok == token.ASSIGN || tok == token.DEFINE {
-				// RecvStmt with assignment
-				if len(lhs) > 2 {
-					p.errorExpected(lhs[0].Pos(), "1 or 2 expressions")
-					// continue with first two expressions
-					lhs = lhs[0:2]
-				}
-				p.next()
-				rhs = p.parseRhs()
-			} else {
-				// rhs must be single receive operation
-				if len(lhs) > 1 {
-					p.errorExpected(lhs[0].Pos(), "1 expression")
-					// continue with first expression
-				}
-				rhs = lhs[0]
-				lhs = nil // there is no lhs
-			}
-			if x, isUnary := rhs.(*ast.UnaryExpr); !isUnary || x.Op != token.ARROW {
-				p.errorExpected(rhs.Pos(), "send or receive operation")
-				rhs = &ast.BadExpr{rhs.Pos(), rhs.End()}
-			}
-			if lhs != nil {
-				comm = &ast.AssignStmt{lhs, pos, tok, []ast.Expr{rhs}}
-			} else {
-				comm = &ast.ExprStmt{rhs}
-			}
-		}
-	} else {
-		p.expect(token.DEFAULT)
-	}
-
-	colon := p.expect(token.COLON)
-	body := p.parseStmtList()
-	p.closeScope()
-
-	return &ast.CommClause{pos, comm, colon, body}
-}
-
-func (p *parser) parseSelectStmt() *ast.SelectStmt {
-	if p.trace {
-		defer un(trace(p, "SelectStmt"))
-	}
-
-	pos := p.expect(token.SELECT)
-	lbrace := p.expect(token.LBRACE)
-	var list []ast.Stmt
-	for p.tok == token.CASE || p.tok == token.DEFAULT {
-		list = append(list, p.parseCommClause())
-	}
-	rbrace := p.expect(token.RBRACE)
-	p.expectSemi()
-	body := &ast.BlockStmt{lbrace, list, rbrace}
-
-	return &ast.SelectStmt{pos, body}
-}
-
-func (p *parser) parseForStmt() ast.Stmt {
-	if p.trace {
-		defer un(trace(p, "ForStmt"))
-	}
-
-	pos := p.expect(token.FOR)
-	p.openScope()
-	defer p.closeScope()
-
-	var s1, s2, s3 ast.Stmt
-	if p.tok != token.LBRACE {
-		prevLev := p.exprLev
-		p.exprLev = -1
-		if p.tok != token.SEMICOLON {
-			s2 = p.parseSimpleStmt(false)
-		}
-		if p.tok == token.SEMICOLON {
-			p.next()
-			s1 = s2
-			s2 = nil
-			if p.tok != token.SEMICOLON {
-				s2 = p.parseSimpleStmt(false)
-			}
-			p.expectSemi()
-			if p.tok != token.LBRACE {
-				s3 = p.parseSimpleStmt(false)
-			}
-		}
-		p.exprLev = prevLev
-	}
-
-	body := p.parseBlockStmt()
-	p.expectSemi()
-
-	if as, isAssign := s2.(*ast.AssignStmt); isAssign {
-		// possibly a for statement with a range clause; check assignment operator
-		if as.Tok != token.ASSIGN && as.Tok != token.DEFINE {
-			p.errorExpected(as.TokPos, "'=' or ':='")
-			return &ast.BadStmt{pos, body.End()}
-		}
-		// check lhs
-		var key, value ast.Expr
-		switch len(as.Lhs) {
-		case 2:
-			key, value = as.Lhs[0], as.Lhs[1]
-		case 1:
-			key = as.Lhs[0]
-		default:
-			p.errorExpected(as.Lhs[0].Pos(), "1 or 2 expressions")
-			return &ast.BadStmt{pos, body.End()}
-		}
-		// check rhs
-		if len(as.Rhs) != 1 {
-			p.errorExpected(as.Rhs[0].Pos(), "1 expression")
-			return &ast.BadStmt{pos, body.End()}
-		}
-		if rhs, isUnary := as.Rhs[0].(*ast.UnaryExpr); isUnary && rhs.Op == token.RANGE {
-			// rhs is range expression
-			// (any short variable declaration was handled by parseSimpleStat above)
-			return &ast.RangeStmt{pos, key, value, as.TokPos, as.Tok, rhs.X, body}
-		}
-		p.errorExpected(s2.Pos(), "range clause")
-		return &ast.BadStmt{pos, body.End()}
-	}
-
-	// regular for statement
-	return &ast.ForStmt{pos, s1, p.makeExpr(s2), s3, body}
-}
-
-func (p *parser) parseStmt() (s ast.Stmt) {
-	if p.trace {
-		defer un(trace(p, "Statement"))
-	}
-
-	switch p.tok {
-	case token.CONST, token.TYPE, token.VAR:
-		s = &ast.DeclStmt{p.parseDecl()}
-	case
-		// tokens that may start a top-level expression
-		token.IDENT, token.INT, token.FLOAT, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operand
-		token.LBRACK, token.STRUCT, // composite type
-		token.MUL, token.AND, token.ARROW, token.ADD, token.SUB, token.XOR: // unary operators
-		s = p.parseSimpleStmt(true)
-		// because of the required look-ahead, labeled statements are
-		// parsed by parseSimpleStmt - don't expect a semicolon after
-		// them
-		if _, isLabeledStmt := s.(*ast.LabeledStmt); !isLabeledStmt {
-			p.expectSemi()
-		}
-	case token.GO:
-		s = p.parseGoStmt()
-	case token.DEFER:
-		s = p.parseDeferStmt()
-	case token.RETURN:
-		s = p.parseReturnStmt()
-	case token.BREAK, token.CONTINUE, token.GOTO, token.FALLTHROUGH:
-		s = p.parseBranchStmt(p.tok)
-	case token.LBRACE:
-		s = p.parseBlockStmt()
-		p.expectSemi()
-	case token.IF:
-		s = p.parseIfStmt()
-	case token.SWITCH:
-		s = p.parseSwitchStmt()
-	case token.SELECT:
-		s = p.parseSelectStmt()
-	case token.FOR:
-		s = p.parseForStmt()
-	case token.SEMICOLON:
-		s = &ast.EmptyStmt{p.pos}
-		p.next()
-	case token.RBRACE:
-		// a semicolon may be omitted before a closing "}"
-		s = &ast.EmptyStmt{p.pos}
-	default:
-		// no statement found
-		pos := p.pos
-		p.errorExpected(pos, "statement")
-		p.next() // make progress
-		s = &ast.BadStmt{pos, p.pos}
-	}
-
-	return
-}
-
-// ----------------------------------------------------------------------------
-// Declarations
-
-type parseSpecFunction func(p *parser, doc *ast.CommentGroup, iota int) ast.Spec
-
-func parseImportSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
-	if p.trace {
-		defer un(trace(p, "ImportSpec"))
-	}
-
-	var ident *ast.Ident
-	switch p.tok {
-	case token.PERIOD:
-		ident = &ast.Ident{p.pos, ".", nil}
-		p.next()
-	case token.IDENT:
-		ident = p.parseIdent()
-	}
-
-	var path *ast.BasicLit
-	if p.tok == token.STRING {
-		path = &ast.BasicLit{p.pos, p.tok, p.lit}
-		p.next()
-	} else {
-		p.expect(token.STRING) // use expect() error handling
-	}
-	p.expectSemi() // call before accessing p.linecomment
-
-	// collect imports
-	spec := &ast.ImportSpec{doc, ident, path, p.lineComment}
-	p.imports = append(p.imports, spec)
-
-	return spec
-}
-
-func parseConstSpec(p *parser, doc *ast.CommentGroup, iota int) ast.Spec {
-	if p.trace {
-		defer un(trace(p, "ConstSpec"))
-	}
-
-	idents := p.parseIdentList()
-	typ := p.tryType()
-	var values []ast.Expr
-	if typ != nil || p.tok == token.ASSIGN || iota == 0 {
-		p.expect(token.ASSIGN)
-		values = p.parseRhsList()
-	}
-	p.expectSemi() // call before accessing p.linecomment
-
-	// Go spec: The scope of a constant or variable identifier declared inside
-	// a function begins at the end of the ConstSpec or VarSpec and ends at
-	// the end of the innermost containing block.
-	// (Global identifiers are resolved in a separate phase after parsing.)
-	spec := &ast.ValueSpec{doc, idents, typ, values, p.lineComment}
-	p.declare(spec, p.topScope, ast.Con, idents...)
-
-	return spec
-}
-
-func parseTypeSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
-	if p.trace {
-		defer un(trace(p, "TypeSpec"))
-	}
-
-	ident := p.parseIdent()
-
-	// Go spec: The scope of a type identifier declared inside a function begins
-	// at the identifier in the TypeSpec and ends at the end of the innermost
-	// containing block.
-	// (Global identifiers are resolved in a separate phase after parsing.)
-	spec := &ast.TypeSpec{doc, ident, nil, nil}
-	p.declare(spec, p.topScope, ast.Typ, ident)
-
-	spec.Type = p.parseType()
-	p.expectSemi() // call before accessing p.linecomment
-	spec.Comment = p.lineComment
-
-	return spec
-}
-
-func parseVarSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
-	if p.trace {
-		defer un(trace(p, "VarSpec"))
-	}
-
-	idents := p.parseIdentList()
-	typ := p.tryType()
-	var values []ast.Expr
-	if typ == nil || p.tok == token.ASSIGN {
-		p.expect(token.ASSIGN)
-		values = p.parseRhsList()
-	}
-	p.expectSemi() // call before accessing p.linecomment
-
-	// Go spec: The scope of a constant or variable identifier declared inside
-	// a function begins at the end of the ConstSpec or VarSpec and ends at
-	// the end of the innermost containing block.
-	// (Global identifiers are resolved in a separate phase after parsing.)
-	spec := &ast.ValueSpec{doc, idents, typ, values, p.lineComment}
-	p.declare(spec, p.topScope, ast.Var, idents...)
-
-	return spec
-}
-
-func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl {
-	if p.trace {
-		defer un(trace(p, "GenDecl("+keyword.String()+")"))
-	}
-
-	doc := p.leadComment
-	pos := p.expect(keyword)
-	var lparen, rparen token.Pos
-	var list []ast.Spec
-	if p.tok == token.LPAREN {
-		lparen = p.pos
-		p.next()
-		for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
-			list = append(list, f(p, p.leadComment, iota))
-		}
-		rparen = p.expect(token.RPAREN)
-		p.expectSemi()
-	} else {
-		list = append(list, f(p, nil, 0))
-	}
-
-	return &ast.GenDecl{doc, pos, keyword, lparen, list, rparen}
-}
-
-func (p *parser) parseReceiver(scope *ast.Scope) *ast.FieldList {
-	if p.trace {
-		defer un(trace(p, "Receiver"))
-	}
-
-	pos := p.pos
-	par := p.parseParameters(scope, false)
-
-	// must have exactly one receiver
-	if par.NumFields() != 1 {
-		p.errorExpected(pos, "exactly one receiver")
-		// TODO determine a better range for BadExpr below
-		par.List = []*ast.Field{{Type: &ast.BadExpr{pos, pos}}}
-		return par
-	}
-
-	// recv type must be of the form ["*"] identifier
-	recv := par.List[0]
-	base := deref(recv.Type)
-	if _, isIdent := base.(*ast.Ident); !isIdent {
-		p.errorExpected(base.Pos(), "(unqualified) identifier")
-		par.List = []*ast.Field{{Type: &ast.BadExpr{recv.Pos(), recv.End()}}}
-	}
-
-	return par
-}
-
-func (p *parser) parseFuncDecl() *ast.FuncDecl {
-	if p.trace {
-		defer un(trace(p, "FunctionDecl"))
-	}
-
-	doc := p.leadComment
-	pos := p.expect(token.FUNC)
-	scope := ast.NewScope(p.topScope) // function scope
-
-	var recv *ast.FieldList
-	if p.tok == token.LPAREN {
-		recv = p.parseReceiver(scope)
-	}
-
-	ident := p.parseIdent()
-
-	params, results := p.parseSignature(scope)
-
-	var body *ast.BlockStmt
-	if p.tok == token.LBRACE {
-		body = p.parseBody(scope)
-	}
-	p.expectSemi()
-
-	decl := &ast.FuncDecl{doc, recv, ident, &ast.FuncType{pos, params, results}, body}
-	if recv == nil {
-		// Go spec: The scope of an identifier denoting a constant, type,
-		// variable, or function (but not method) declared at top level
-		// (outside any function) is the package block.
-		//
-		// init() functions cannot be referred to and there may
-		// be more than one - don't put them in the pkgScope
-		if ident.Name != "init" {
-			p.declare(decl, p.pkgScope, ast.Fun, ident)
-		}
-	}
-
-	return decl
-}
-
-func (p *parser) parseDecl() ast.Decl {
-	if p.trace {
-		defer un(trace(p, "Declaration"))
-	}
-
-	var f parseSpecFunction
-	switch p.tok {
-	case token.CONST:
-		f = parseConstSpec
-
-	case token.TYPE:
-		f = parseTypeSpec
-
-	case token.VAR:
-		f = parseVarSpec
-
-	case token.FUNC:
-		return p.parseFuncDecl()
-
-	default:
-		pos := p.pos
-		p.errorExpected(pos, "declaration")
-		p.next() // make progress
-		decl := &ast.BadDecl{pos, p.pos}
-		return decl
-	}
-
-	return p.parseGenDecl(p.tok, f)
-}
-
-func (p *parser) parseDeclList() (list []ast.Decl) {
-	if p.trace {
-		defer un(trace(p, "DeclList"))
-	}
-
-	for p.tok != token.EOF {
-		list = append(list, p.parseDecl())
-	}
-
-	return
-}
-
-// ----------------------------------------------------------------------------
-// Source files
-
-func (p *parser) parseFile() *ast.File {
-	if p.trace {
-		defer un(trace(p, "File"))
-	}
-
-	// package clause
-	doc := p.leadComment
-	pos := p.expect(token.PACKAGE)
-	// Go spec: The package clause is not a declaration;
-	// the package name does not appear in any scope.
-	ident := p.parseIdent()
-	if ident.Name == "_" {
-		p.error(p.pos, "invalid package name _")
-	}
-	p.expectSemi()
-
-	var decls []ast.Decl
-
-	// Don't bother parsing the rest if we had errors already.
-	// Likely not a Go source file at all.
-
-	if p.ErrorCount() == 0 && p.mode&PackageClauseOnly == 0 {
-		// import decls
-		for p.tok == token.IMPORT {
-			decls = append(decls, p.parseGenDecl(token.IMPORT, parseImportSpec))
-		}
-
-		if p.mode&ImportsOnly == 0 {
-			// rest of package body
-			for p.tok != token.EOF {
-				decls = append(decls, p.parseDecl())
-			}
-		}
-	}
-
-	assert(p.topScope == p.pkgScope, "imbalanced scopes")
-
-	// resolve global identifiers within the same file
-	i := 0
-	for _, ident := range p.unresolved {
-		// i <= index for current ident
-		assert(ident.Obj == unresolved, "object already resolved")
-		ident.Obj = p.pkgScope.Lookup(ident.Name) // also removes unresolved sentinel
-		if ident.Obj == nil {
-			p.unresolved[i] = ident
-			i++
-		}
-	}
-
-	// TODO(gri): store p.imports in AST
-	return &ast.File{doc, pos, ident, decls, p.pkgScope, p.imports, p.unresolved[0:i], p.comments}
-}
diff --git a/internal/backport/go/printer/testdata/slow.golden b/internal/backport/go/printer/testdata/slow.golden
deleted file mode 100644
index 43a15cb..0000000
--- a/internal/backport/go/printer/testdata/slow.golden
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package deepequal_test
-
-import (
-	"testing"
-	"google3/spam/archer/frontend/deepequal"
-)
-
-func TestTwoNilValues(t *testing.T) {
-	if err := deepequal.Check(nil, nil); err != nil {
-		t.Errorf("expected nil, saw %v", err)
-	}
-}
-
-type Foo struct {
-	bar	*Bar
-	bang	*Bar
-}
-
-type Bar struct {
-	baz	*Baz
-	foo	[]*Foo
-}
-
-type Baz struct {
-	entries		map[int]interface{}
-	whatever	string
-}
-
-func newFoo() *Foo {
-	return &Foo{bar: &Bar{baz: &Baz{
-		entries: map[int]interface{}{
-			42:	&Foo{},
-			21:	&Bar{},
-			11:	&Baz{whatever: "it's just a test"}}}},
-		bang: &Bar{foo: []*Foo{
-			&Foo{bar: &Bar{baz: &Baz{
-				entries: map[int]interface{}{
-					43:	&Foo{},
-					22:	&Bar{},
-					13:	&Baz{whatever: "this is nuts"}}}},
-				bang: &Bar{foo: []*Foo{
-					&Foo{bar: &Bar{baz: &Baz{
-						entries: map[int]interface{}{
-							61:	&Foo{},
-							71:	&Bar{},
-							11:	&Baz{whatever: "no, it's Go"}}}},
-						bang: &Bar{foo: []*Foo{
-							&Foo{bar: &Bar{baz: &Baz{
-								entries: map[int]interface{}{
-									0:	&Foo{},
-									-2:	&Bar{},
-									-11:	&Baz{whatever: "we need to go deeper"}}}},
-								bang: &Bar{foo: []*Foo{
-									&Foo{bar: &Bar{baz: &Baz{
-										entries: map[int]interface{}{
-											-2:	&Foo{},
-											-5:	&Bar{},
-											-7:	&Baz{whatever: "are you serious?"}}}},
-										bang:	&Bar{foo: []*Foo{}}},
-									&Foo{bar: &Bar{baz: &Baz{
-										entries: map[int]interface{}{
-											-100:	&Foo{},
-											50:	&Bar{},
-											20:	&Baz{whatever: "na, not really ..."}}}},
-										bang:	&Bar{foo: []*Foo{}}}}}}}}},
-					&Foo{bar: &Bar{baz: &Baz{
-						entries: map[int]interface{}{
-							2:	&Foo{},
-							1:	&Bar{},
-							-1:	&Baz{whatever: "... it's just a test."}}}},
-						bang:	&Bar{foo: []*Foo{}}}}}}}}}
-}
-
-func TestElaborate(t *testing.T) {
-	a := newFoo()
-	b := newFoo()
-
-	if err := deepequal.Check(a, b); err != nil {
-		t.Errorf("expected nil, saw %v", err)
-	}
-}
diff --git a/internal/backport/go/printer/testdata/slow.input b/internal/backport/go/printer/testdata/slow.input
deleted file mode 100644
index 0e5a23d..0000000
--- a/internal/backport/go/printer/testdata/slow.input
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package deepequal_test
-
-import (
-        "testing"
-        "google3/spam/archer/frontend/deepequal"
-)
-
-func TestTwoNilValues(t *testing.T) {
-        if err := deepequal.Check(nil, nil); err != nil {
-                t.Errorf("expected nil, saw %v", err)
-        }
-}
-
-type Foo struct {
-        bar *Bar
-        bang *Bar
-}
-
-type Bar struct {
-        baz *Baz
-        foo []*Foo
-}
-
-type Baz struct {
-        entries  map[int]interface{}
-        whatever string
-}
-
-func newFoo() (*Foo) {
-return &Foo{bar: &Bar{ baz: &Baz{
-entries: map[int]interface{}{
-42: &Foo{},
-21: &Bar{},
-11: &Baz{ whatever: "it's just a test" }}}},
-        bang: &Bar{foo: []*Foo{
-&Foo{bar: &Bar{ baz: &Baz{
-entries: map[int]interface{}{
-43: &Foo{},
-22: &Bar{},
-13: &Baz{ whatever: "this is nuts" }}}},
-        bang: &Bar{foo: []*Foo{
-&Foo{bar: &Bar{ baz: &Baz{
-entries: map[int]interface{}{
-61: &Foo{},
-71: &Bar{},
-11: &Baz{ whatever: "no, it's Go" }}}},
-        bang: &Bar{foo: []*Foo{
-&Foo{bar: &Bar{ baz: &Baz{
-entries: map[int]interface{}{
-0: &Foo{},
--2: &Bar{},
--11: &Baz{ whatever: "we need to go deeper" }}}},
-        bang: &Bar{foo: []*Foo{
-&Foo{bar: &Bar{ baz: &Baz{
-entries: map[int]interface{}{
--2: &Foo{},
--5: &Bar{},
--7: &Baz{ whatever: "are you serious?" }}}},
-        bang: &Bar{foo: []*Foo{}}},
-&Foo{bar: &Bar{ baz: &Baz{
-entries: map[int]interface{}{
--100: &Foo{},
-50: &Bar{},
-20: &Baz{ whatever: "na, not really ..." }}}},
-        bang: &Bar{foo: []*Foo{}}}}}}}}},
-&Foo{bar: &Bar{ baz: &Baz{
-entries: map[int]interface{}{
-2: &Foo{},
-1: &Bar{},
--1: &Baz{ whatever: "... it's just a test." }}}},
-        bang: &Bar{foo: []*Foo{}}}}}}}}}
-}
-
-func TestElaborate(t *testing.T) {
-        a := newFoo()
-        b := newFoo()
-
-        if err := deepequal.Check(a, b); err != nil {
-                t.Errorf("expected nil, saw %v", err)
-        }
-}
diff --git a/internal/backport/go/printer/testdata/statements.golden b/internal/backport/go/printer/testdata/statements.golden
deleted file mode 100644
index 4b13460..0000000
--- a/internal/backport/go/printer/testdata/statements.golden
+++ /dev/null
@@ -1,644 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package statements
-
-var expr bool
-
-func use(x interface{})	{}
-
-// Formatting of multi-line return statements.
-func _f() {
-	return
-	return x, y, z
-	return T{}
-	return T{1, 2, 3},
-		x, y, z
-	return T{1, 2, 3},
-		x, y,
-		z
-	return T{1,
-		2,
-		3}
-	return T{1,
-		2,
-		3,
-	}
-	return T{
-		1,
-		2,
-		3}
-	return T{
-		1,
-		2,
-		3,
-	}
-	return T{
-		1,
-		T{1, 2, 3},
-		3,
-	}
-	return T{
-		1,
-		T{1,
-			2, 3},
-		3,
-	}
-	return T{
-		1,
-		T{1,
-			2,
-			3},
-		3,
-	}
-	return T{
-		1,
-		2,
-	}, nil
-	return T{
-			1,
-			2,
-		},
-		T{
-			x:	3,
-			y:	4,
-		}, nil
-	return T{
-			1,
-			2,
-		},
-		nil
-	return T{
-			1,
-			2,
-		},
-		T{
-			x:	3,
-			y:	4,
-		},
-		nil
-	return x + y +
-		z
-	return func() {}
-	return func() {
-			_ = 0
-		}, T{
-			1, 2,
-		}
-	return func() {
-		_ = 0
-	}
-	return func() T {
-		return T{
-			1, 2,
-		}
-	}
-}
-
-// Formatting of multi-line returns: test cases from issue 1207.
-func F() (*T, os.Error) {
-	return &T{
-			X:	1,
-			Y:	2,
-		},
-		nil
-}
-
-func G() (*T, *T, os.Error) {
-	return &T{
-			X:	1,
-			Y:	2,
-		},
-		&T{
-			X:	3,
-			Y:	4,
-		},
-		nil
-}
-
-func _() interface{} {
-	return &fileStat{
-		name:		basename(file.name),
-		size:		mkSize(d.FileSizeHigh, d.FileSizeLow),
-		modTime:	mkModTime(d.LastWriteTime),
-		mode:		mkMode(d.FileAttributes),
-		sys:		mkSysFromFI(&d),
-	}, nil
-}
-
-// Formatting of if-statement headers.
-func _() {
-	if true {
-	}
-	if true {
-	}	// no semicolon printed
-	if expr {
-	}
-	if expr {
-	}	// no semicolon printed
-	if expr {
-	}	// no parens printed
-	if expr {
-	}	// no semicolon and parens printed
-	if x := expr; true {
-		use(x)
-	}
-	if x := expr; expr {
-		use(x)
-	}
-}
-
-// Formatting of switch-statement headers.
-func _() {
-	switch {
-	}
-	switch {
-	}	// no semicolon printed
-	switch expr {
-	}
-	switch expr {
-	}	// no semicolon printed
-	switch expr {
-	}	// no parens printed
-	switch expr {
-	}	// no semicolon and parens printed
-	switch x := expr; {
-	default:
-		use(
-			x)
-	}
-	switch x := expr; expr {
-	default:
-		use(x)
-	}
-}
-
-// Formatting of switch statement bodies.
-func _() {
-	switch {
-	}
-
-	switch x := 0; x {
-	case 1:
-		use(x)
-		use(x)	// followed by an empty line
-
-	case 2:	// followed by an empty line
-
-		use(x)	// followed by an empty line
-
-	case 3:	// no empty lines
-		use(x)
-		use(x)
-	}
-
-	switch x {
-	case 0:
-		use(x)
-	case 1:	// this comment should have no effect on the previous or next line
-		use(x)
-	}
-
-	switch x := 0; x {
-	case 1:
-		x = 0
-		// this comment should be indented
-	case 2:
-		x = 0
-	// this comment should not be indented, it is aligned with the next case
-	case 3:
-		x = 0
-		/* indented comment
-		   aligned
-		   aligned
-		*/
-		// bla
-		/* and more */
-	case 4:
-		x = 0
-	/* not indented comment
-	   aligned
-	   aligned
-	*/
-	// bla
-	/* and more */
-	case 5:
-	}
-}
-
-// Formatting of selected select statements.
-func _() {
-	select {}
-	select { /* this comment should not be tab-aligned because the closing } is on the same line */
-	}
-	select {	/* this comment should be tab-aligned */
-	}
-	select {	// this comment should be tab-aligned
-	}
-	select {
-	case <-c:
-	}
-}
-
-// Formatting of for-statement headers for single-line for-loops.
-func _() {
-	for {
-	}
-	for expr {
-	}
-	for expr {
-	}	// no parens printed
-	for {
-	}	// no semicolons printed
-	for x := expr; ; {
-		use(x)
-	}
-	for expr {
-	}	// no semicolons printed
-	for expr {
-	}	// no semicolons and parens printed
-	for ; ; expr = false {
-	}
-	for x := expr; expr; {
-		use(x)
-	}
-	for x := expr; ; expr = false {
-		use(x)
-	}
-	for ; expr; expr = false {
-	}
-	for x := expr; expr; expr = false {
-		use(x)
-	}
-	for x := range []int{} {
-		use(x)
-	}
-	for x := range []int{} {
-		use(x)
-	}	// no parens printed
-}
-
-// Formatting of for-statement headers for multi-line for-loops.
-func _() {
-	for {
-	}
-	for expr {
-	}
-	for expr {
-	}	// no parens printed
-	for {
-	}	// no semicolons printed
-	for x := expr; ; {
-		use(x)
-	}
-	for expr {
-	}	// no semicolons printed
-	for expr {
-	}	// no semicolons and parens printed
-	for ; ; expr = false {
-	}
-	for x := expr; expr; {
-		use(x)
-	}
-	for x := expr; ; expr = false {
-		use(x)
-	}
-	for ; expr; expr = false {
-	}
-	for x := expr; expr; expr = false {
-		use(x)
-	}
-	for range []int{} {
-		println("foo")
-	}
-	for x := range []int{} {
-		use(x)
-	}
-	for x := range []int{} {
-		use(x)
-	}	// no parens printed
-}
-
-// Formatting of selected short single- and multi-line statements.
-func _() {
-	if cond {
-	}
-	if cond {
-	}	// multiple lines
-	if cond {
-	} else {
-	}	// else clause always requires multiple lines
-
-	for {
-	}
-	for i := 0; i < len(a); 1++ {
-	}
-	for i := 0; i < len(a); 1++ {
-		a[i] = i
-	}
-	for i := 0; i < len(a); 1++ {
-		a[i] = i
-	}	// multiple lines
-
-	for range a {
-	}
-	for _ = range a {
-	}
-	for _, _ = range a {
-	}
-	for i := range a {
-	}
-	for i := range a {
-		a[i] = i
-	}
-	for i := range a {
-		a[i] = i
-	}	// multiple lines
-
-	go func() {
-		for {
-			a <- <-b
-		}
-	}()
-	defer func() {
-		if x := recover(); x != nil {
-			err = fmt.Sprintf("error: %s", x.msg)
-		}
-	}()
-}
-
-// Don't remove mandatory parentheses around composite literals in control clauses.
-func _() {
-	// strip parentheses - no composite literals or composite literals don't start with a type name
-	if x {
-	}
-	if x {
-	}
-	if []T{} {
-	}
-	if []T{} {
-	}
-	if []T{} {
-	}
-
-	for x {
-	}
-	for x {
-	}
-	for []T{} {
-	}
-	for []T{} {
-	}
-	for []T{} {
-	}
-
-	switch x {
-	}
-	switch x {
-	}
-	switch []T{} {
-	}
-	switch []T{} {
-	}
-
-	for _ = range []T{T{42}} {
-	}
-
-	// leave parentheses - composite literals start with a type name
-	if (T{}) {
-	}
-	if (T{}) {
-	}
-	if (T{}) {
-	}
-
-	for (T{}) {
-	}
-	for (T{}) {
-	}
-	for (T{}) {
-	}
-
-	switch (T{}) {
-	}
-	switch (T{}) {
-	}
-
-	for _ = range (T1{T{42}}) {
-	}
-
-	if x == (T{42}[0]) {
-	}
-	if (x == T{42}[0]) {
-	}
-	if x == (T{42}[0]) {
-	}
-	if x == (T{42}[0]) {
-	}
-	if x == (T{42}[0]) {
-	}
-	if x == a+b*(T{42}[0]) {
-	}
-	if (x == a+b*T{42}[0]) {
-	}
-	if x == a+b*(T{42}[0]) {
-	}
-	if x == a+(b*(T{42}[0])) {
-	}
-	if x == a+b*(T{42}[0]) {
-	}
-	if (a + b*(T{42}[0])) == x {
-	}
-	if (a + b*(T{42}[0])) == x {
-	}
-
-	if struct{ x bool }{false}.x {
-	}
-	if (struct{ x bool }{false}.x) == false {
-	}
-	if struct{ x bool }{false}.x == false {
-	}
-}
-
-// Extra empty lines inside functions. Do respect source code line
-// breaks between statement boundaries but print at most one empty
-// line at a time.
-func _() {
-
-	const _ = 0
-
-	const _ = 1
-	type _ int
-	type _ float
-
-	var _ = 0
-	var x = 1
-
-	// Each use(x) call below should have at most one empty line before and after.
-	// Known bug: The first use call may have more than one empty line before
-	//            (see go/printer/nodes.go, func linebreak).
-
-	use(x)
-
-	if x < x {
-
-		use(x)
-
-	} else {
-
-		use(x)
-
-	}
-}
-
-// Formatting around labels.
-func _() {
-L:
-}
-
-func _() {
-	// this comment should be indented
-L:	// no semicolon needed
-}
-
-func _() {
-	switch 0 {
-	case 0:
-	L0:
-		;	// semicolon required
-	case 1:
-	L1:
-		;	// semicolon required
-	default:
-	L2:	// no semicolon needed
-	}
-}
-
-func _() {
-	f()
-L1:
-	f()
-L2:
-	;
-L3:
-}
-
-func _() {
-	// this comment should be indented
-L:
-}
-
-func _() {
-L:
-	_ = 0
-}
-
-func _() {
-	// this comment should be indented
-L:
-	_ = 0
-}
-
-func _() {
-	for {
-	L1:
-		_ = 0
-	L2:
-		_ = 0
-	}
-}
-
-func _() {
-	// this comment should be indented
-	for {
-	L1:
-		_ = 0
-	L2:
-		_ = 0
-	}
-}
-
-func _() {
-	if true {
-		_ = 0
-	}
-	_ = 0	// the indentation here should not be affected by the long label name
-AnOverlongLabel:
-	_ = 0
-
-	if true {
-		_ = 0
-	}
-	_ = 0
-
-L:
-	_ = 0
-}
-
-func _() {
-	for {
-		goto L
-	}
-L:
-
-	MoreCode()
-}
-
-func _() {
-	for {
-		goto L
-	}
-L:	// A comment on the same line as the label, followed by a single empty line.
-	// Known bug: There may be more than one empty line before MoreCode()
-	//            (see go/printer/nodes.go, func linebreak).
-
-	MoreCode()
-}
-
-func _() {
-	for {
-		goto L
-	}
-L:
-
-	// There should be a single empty line before this comment.
-	MoreCode()
-}
-
-func _() {
-	for {
-		goto AVeryLongLabelThatShouldNotAffectFormatting
-	}
-AVeryLongLabelThatShouldNotAffectFormatting:
-	// There should be a single empty line after this comment.
-
-	// There should be a single empty line before this comment.
-	MoreCode()
-}
-
-// Formatting of empty statements.
-func _() {
-
-}
-
-func _() {
-}
-
-func _() {
-}
-
-func _() {
-	f()
-}
-
-func _() {
-L:
-	;
-}
-
-func _() {
-L:
-	;
-	f()
-}
diff --git a/internal/backport/go/printer/testdata/statements.input b/internal/backport/go/printer/testdata/statements.input
deleted file mode 100644
index cade157..0000000
--- a/internal/backport/go/printer/testdata/statements.input
+++ /dev/null
@@ -1,555 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package statements
-
-var expr bool
-
-func use(x interface{}) {}
-
-// Formatting of multi-line return statements.
-func _f() {
-	return
-	return x, y, z
-	return T{}
-	return T{1, 2, 3},
-		x, y, z
-	return T{1, 2, 3},
-		x, y,
-		z
-	return T{1,
-		2,
-		3}
-	return T{1,
-		2,
-		3,
-	}
-	return T{
-		1,
-		2,
-		3}
-	return T{
-		1,
-		2,
-		3,
-	}
-	return T{
-		1,
-		T{1, 2, 3},
-		3,
-	}
-	return T{
-		1,
-		T{1,
-			2, 3},
-		3,
-	}
-	return T{
-		1,
-		T{1,
-			2,
-			3},
-		3,
-	}
-	return T{
-			1,
-			2,
-		}, nil
-	return T{
-			1,
-			2,
-		},
-		T{
-			x: 3,
-			y: 4,
-		}, nil
-	return T{
-			1,
-			2,
-		},
-		nil
-	return T{
-			1,
-			2,
-		},
-		T{
-			x: 3,
-			y: 4,
-		},
-		nil
-	return x + y +
-		z
-	return func() {}
-	return func() {
-		_ = 0
-	}, T{
-		1, 2,
-	}
-	return func() {
-		_ = 0
-	}
-	return func() T {
-		return T {
-			1, 2,
-		}
-	}
-}
-
-// Formatting of multi-line returns: test cases from issue 1207.
-func F() (*T, os.Error) {
-       return &T{
-               X: 1,
-               Y: 2,
-       },
-               nil
-}
-
-func G() (*T, *T, os.Error) {
-       return &T{
-               X: 1,
-               Y: 2,
-       },
-               &T{
-                       X: 3,
-                       Y: 4,
-               },
-               nil
-}
-
-func _() interface{} {
-	return &fileStat{
-			name:    basename(file.name),
-			size:    mkSize(d.FileSizeHigh, d.FileSizeLow),
-			modTime: mkModTime(d.LastWriteTime),
-			mode:    mkMode(d.FileAttributes),
-			sys:     mkSysFromFI(&d),
-		}, nil
-}
-
-// Formatting of if-statement headers.
-func _() {
-	if true {}
-	if; true {}  // no semicolon printed
-	if expr{}
-	if;expr{}  // no semicolon printed
-	if (expr){}  // no parens printed
-	if;((expr)){}  // no semicolon and parens printed
-	if x:=expr;true{
-	use(x)}
-	if x:=expr; expr {use(x)}
-}
-
-
-// Formatting of switch-statement headers.
-func _() {
-	switch {}
-	switch;{}  // no semicolon printed
-	switch expr {}
-	switch;expr{}  // no semicolon printed
-	switch (expr) {}  // no parens printed
-	switch;((expr)){}  // no semicolon and parens printed
-	switch x := expr; { default:use(
-x)
-	}
-	switch x := expr; expr {default:use(x)}
-}
-
-
-// Formatting of switch statement bodies.
-func _() {
-	switch {
-	}
-
-	switch x := 0; x {
-	case 1:
-		use(x)
-		use(x)  // followed by an empty line
-
-	case 2:  // followed by an empty line
-
-		use(x)  // followed by an empty line
-
-	case 3:  // no empty lines
-		use(x)
-		use(x)
-	}
-
-	switch x {
-	case 0:
-		use(x)
-	case 1:  // this comment should have no effect on the previous or next line
-		use(x)
-	}
-
-	switch x := 0; x {
-	case 1:
-		x = 0
-		// this comment should be indented
-	case 2:
-		x = 0
-	// this comment should not be indented, it is aligned with the next case
-	case 3:
-		x = 0
-		/* indented comment
-		   aligned
-		   aligned
-		*/
-		// bla
-		/* and more */
-	case 4:
-		x = 0
-	/* not indented comment
-	   aligned
-	   aligned
-	*/
-	// bla
-	/* and more */
-	case 5:
-	}
-}
-
-
-// Formatting of selected select statements.
-func _() {
-	select {
-	}
-	select { /* this comment should not be tab-aligned because the closing } is on the same line */ }
-	select { /* this comment should be tab-aligned */
-	}
-	select { // this comment should be tab-aligned
-	}
-	select { case <-c: }
-}
-
-
-// Formatting of for-statement headers for single-line for-loops.
-func _() {
-	for{}
-	for expr {}
-	for (expr) {}  // no parens printed
-	for;;{}  // no semicolons printed
-	for x :=expr;; {use( x)}
-	for; expr;{}  // no semicolons printed
-	for; ((expr));{}  // no semicolons and parens printed
-	for; ; expr = false {}
-	for x :=expr; expr; {use(x)}
-	for x := expr;; expr=false {use(x)}
-	for;expr;expr =false {}
-	for x := expr;expr;expr = false { use(x) }
-	for x := range []int{} { use(x) }
-	for x := range (([]int{})) { use(x) }  // no parens printed
-}
-
-
-// Formatting of for-statement headers for multi-line for-loops.
-func _() {
-	for{
-	}
-	for expr {
-	}
-	for (expr) {
-	}  // no parens printed
-	for;;{
-	}  // no semicolons printed
-	for x :=expr;; {use( x)
-	}
-	for; expr;{
-	}  // no semicolons printed
-	for; ((expr));{
-	}  // no semicolons and parens printed
-	for; ; expr = false {
-	}
-	for x :=expr; expr; {use(x)
-	}
-	for x := expr;; expr=false {use(x)
-	}
-	for;expr;expr =false {
-	}
-	for x := expr;expr;expr = false {
-	use(x)
-	}
-	for range []int{} {
-	println("foo")}
-	for x := range []int{} {
-	use(x) }
-	for x := range (([]int{})) {
-	use(x) }  // no parens printed
-}
-
-
-// Formatting of selected short single- and multi-line statements.
-func _() {
-	if cond {}
-	if cond {
-	} // multiple lines
-	if cond {} else {} // else clause always requires multiple lines
-
-	for {}
-	for i := 0; i < len(a); 1++ {}
-	for i := 0; i < len(a); 1++ { a[i] = i }
-	for i := 0; i < len(a); 1++ { a[i] = i
-	} // multiple lines
-
-	for range a{}
-	for _ = range a{}
-	for _, _ = range a{}
-	for i := range a {}
-	for i := range a { a[i] = i }
-	for i := range a { a[i] = i
-	} // multiple lines
-
-	go func() { for { a <- <-b } }()
-	defer func() { if x := recover(); x != nil { err = fmt.Sprintf("error: %s", x.msg) } }()
-}
-
-
-// Don't remove mandatory parentheses around composite literals in control clauses.
-func _() {
-	// strip parentheses - no composite literals or composite literals don't start with a type name
-	if (x) {}
-	if (((x))) {}
-	if ([]T{}) {}
-	if (([]T{})) {}
-	if ; (((([]T{})))) {}
-
-	for (x) {}
-	for (((x))) {}
-	for ([]T{}) {}
-	for (([]T{})) {}
-	for ; (((([]T{})))) ; {}
-
-	switch (x) {}
-	switch (((x))) {}
-	switch ([]T{}) {}
-	switch ; (((([]T{})))) {}
-
-	for _ = range ((([]T{T{42}}))) {}
-
-	// leave parentheses - composite literals start with a type name
-	if (T{}) {}
-	if ((T{})) {}
-	if ; ((((T{})))) {}
-
-	for (T{}) {}
-	for ((T{})) {}
-	for ; ((((T{})))) ; {}
-
-	switch (T{}) {}
-	switch ; ((((T{})))) {}
-
-	for _ = range (((T1{T{42}}))) {}
-
-	if x == (T{42}[0]) {}
-	if (x == T{42}[0]) {}
-	if (x == (T{42}[0])) {}
-	if (x == (((T{42}[0])))) {}
-	if (((x == (T{42}[0])))) {}
-	if x == a + b*(T{42}[0]) {}
-	if (x == a + b*T{42}[0]) {}
-	if (x == a + b*(T{42}[0])) {}
-	if (x == a + ((b * (T{42}[0])))) {}
-	if (((x == a + b * (T{42}[0])))) {}
-	if (((a + b * (T{42}[0])) == x)) {}
-	if (((a + b * (T{42}[0])))) == x {}
-
-	if (struct{x bool}{false}.x) {}
-	if (struct{x bool}{false}.x) == false {}
-	if (struct{x bool}{false}.x == false) {}
-}
-
-
-// Extra empty lines inside functions. Do respect source code line
-// breaks between statement boundaries but print at most one empty
-// line at a time.
-func _() {
-
-	const _ = 0
-
-	const _ = 1
-	type _ int
-	type _ float
-
-	var _ = 0
-	var x = 1
-
-	// Each use(x) call below should have at most one empty line before and after.
-	// Known bug: The first use call may have more than one empty line before
-	//            (see go/printer/nodes.go, func linebreak).
-
-
-
-	use(x)
-
-	if x < x {
-
-		use(x)
-
-	} else {
-
-		use(x)
-
-	}
-}
-
-
-// Formatting around labels.
-func _() {
-	L:
-}
-
-
-func _() {
-	// this comment should be indented
-	L: ;  // no semicolon needed
-}
-
-
-func _() {
-	switch 0 {
-	case 0:
-		L0: ;  // semicolon required
-	case 1:
-		L1: ;  // semicolon required
-	default:
-		L2: ;  // no semicolon needed
-	}
-}
-
-
-func _() {
-	f()
-L1:
-	f()
-L2:
-	;
-L3:
-}
-
-
-func _() {
-	// this comment should be indented
-	L:
-}
-
-
-func _() {
-	L: _ = 0
-}
-
-
-func _() {
-	// this comment should be indented
-	L: _ = 0
-}
-
-
-func _() {
-	for {
-	L1: _ = 0
-	L2:
-		_ = 0
-	}
-}
-
-
-func _() {
-		// this comment should be indented
-	for {
-	L1: _ = 0
-	L2:
-		_ = 0
-	}
-}
-
-
-func _() {
-	if true {
-		_ = 0
-	}
-	_ = 0  // the indentation here should not be affected by the long label name
-AnOverlongLabel:
-	_ = 0
-	
-	if true {
-		_ = 0
-	}
-	_ = 0
-
-L:	_ = 0
-}
-
-
-func _() {
-	for {
-		goto L
-	}
-L:
-
-	MoreCode()
-}
-
-
-func _() {
-	for {
-		goto L
-	}
-L:	// A comment on the same line as the label, followed by a single empty line.
-	// Known bug: There may be more than one empty line before MoreCode()
-	//            (see go/printer/nodes.go, func linebreak).
-
-
-
-
-	MoreCode()
-}
-
-
-func _() {
-	for {
-		goto L
-	}
-L:
-
-
-
-
-	// There should be a single empty line before this comment.
-	MoreCode()
-}
-
-
-func _() {
-	for {
-		goto AVeryLongLabelThatShouldNotAffectFormatting
-	}
-AVeryLongLabelThatShouldNotAffectFormatting:
-	// There should be a single empty line after this comment.
-
-	// There should be a single empty line before this comment.
-	MoreCode()
-}
-
-
-// Formatting of empty statements.
-func _() {
-	;;;;;;;;;;;;;;;;;;;;;;;;;
-}
-
-func _() {;;;;;;;;;;;;;;;;;;;;;;;;;
-}
-
-func _() {;;;;;;;;;;;;;;;;;;;;;;;;;}
-
-func _() {
-f();;;;;;;;;;;;;;;;;;;;;;;;;
-}
-
-func _() {
-L:;;;;;;;;;;;;
-}
-
-func _() {
-L:;;;;;;;;;;;;
-	f()
-}
diff --git a/internal/backport/go/scanner/errors.go b/internal/backport/go/scanner/errors.go
deleted file mode 100644
index 539bbfd..0000000
--- a/internal/backport/go/scanner/errors.go
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package scanner
-
-import (
-	"fmt"
-	"golang.org/x/website/internal/backport/go/token"
-	"io"
-	"sort"
-)
-
-// In an ErrorList, an error is represented by an *Error.
-// The position Pos, if valid, points to the beginning of
-// the offending token, and the error condition is described
-// by Msg.
-type Error struct {
-	Pos token.Position
-	Msg string
-}
-
-// Error implements the error interface.
-func (e Error) Error() string {
-	if e.Pos.Filename != "" || e.Pos.IsValid() {
-		// don't print "<unknown position>"
-		// TODO(gri) reconsider the semantics of Position.IsValid
-		return e.Pos.String() + ": " + e.Msg
-	}
-	return e.Msg
-}
-
-// ErrorList is a list of *Errors.
-// The zero value for an ErrorList is an empty ErrorList ready to use.
-type ErrorList []*Error
-
-// Add adds an Error with given position and error message to an ErrorList.
-func (p *ErrorList) Add(pos token.Position, msg string) {
-	*p = append(*p, &Error{pos, msg})
-}
-
-// Reset resets an ErrorList to no errors.
-func (p *ErrorList) Reset() { *p = (*p)[0:0] }
-
-// ErrorList implements the sort Interface.
-func (p ErrorList) Len() int      { return len(p) }
-func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-
-func (p ErrorList) Less(i, j int) bool {
-	e := &p[i].Pos
-	f := &p[j].Pos
-	// Note that it is not sufficient to simply compare file offsets because
-	// the offsets do not reflect modified line information (through //line
-	// comments).
-	if e.Filename != f.Filename {
-		return e.Filename < f.Filename
-	}
-	if e.Line != f.Line {
-		return e.Line < f.Line
-	}
-	if e.Column != f.Column {
-		return e.Column < f.Column
-	}
-	return p[i].Msg < p[j].Msg
-}
-
-// Sort sorts an ErrorList. *Error entries are sorted by position,
-// other errors are sorted by error message, and before any *Error
-// entry.
-func (p ErrorList) Sort() {
-	sort.Sort(p)
-}
-
-// RemoveMultiples sorts an ErrorList and removes all but the first error per line.
-func (p *ErrorList) RemoveMultiples() {
-	sort.Sort(p)
-	var last token.Position // initial last.Line is != any legal error line
-	i := 0
-	for _, e := range *p {
-		if e.Pos.Filename != last.Filename || e.Pos.Line != last.Line {
-			last = e.Pos
-			(*p)[i] = e
-			i++
-		}
-	}
-	*p = (*p)[0:i]
-}
-
-// An ErrorList implements the error interface.
-func (p ErrorList) Error() string {
-	switch len(p) {
-	case 0:
-		return "no errors"
-	case 1:
-		return p[0].Error()
-	}
-	return fmt.Sprintf("%s (and %d more errors)", p[0], len(p)-1)
-}
-
-// Err returns an error equivalent to this error list.
-// If the list is empty, Err returns nil.
-func (p ErrorList) Err() error {
-	if len(p) == 0 {
-		return nil
-	}
-	return p
-}
-
-// PrintError is a utility function that prints a list of errors to w,
-// one error per line, if the err parameter is an ErrorList. Otherwise
-// it prints the err string.
-func PrintError(w io.Writer, err error) {
-	if list, ok := err.(ErrorList); ok {
-		for _, e := range list {
-			fmt.Fprintf(w, "%s\n", e)
-		}
-	} else if err != nil {
-		fmt.Fprintf(w, "%s\n", err)
-	}
-}
diff --git a/internal/backport/go/scanner/example_test.go b/internal/backport/go/scanner/example_test.go
deleted file mode 100644
index e909237..0000000
--- a/internal/backport/go/scanner/example_test.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package scanner_test
-
-import (
-	"fmt"
-	"golang.org/x/website/internal/backport/go/scanner"
-	"golang.org/x/website/internal/backport/go/token"
-)
-
-func ExampleScanner_Scan() {
-	// src is the input that we want to tokenize.
-	src := []byte("cos(x) + 1i*sin(x) // Euler")
-
-	// Initialize the scanner.
-	var s scanner.Scanner
-	fset := token.NewFileSet()                      // positions are relative to fset
-	file := fset.AddFile("", fset.Base(), len(src)) // register input "file"
-	s.Init(file, src, nil /* no error handler */, scanner.ScanComments)
-
-	// Repeated calls to Scan yield the token sequence found in the input.
-	for {
-		pos, tok, lit := s.Scan()
-		if tok == token.EOF {
-			break
-		}
-		fmt.Printf("%s\t%s\t%q\n", fset.Position(pos), tok, lit)
-	}
-
-	// output:
-	// 1:1	IDENT	"cos"
-	// 1:4	(	""
-	// 1:5	IDENT	"x"
-	// 1:6	)	""
-	// 1:8	+	""
-	// 1:10	IMAG	"1i"
-	// 1:12	*	""
-	// 1:13	IDENT	"sin"
-	// 1:16	(	""
-	// 1:17	IDENT	"x"
-	// 1:18	)	""
-	// 1:20	;	"\n"
-	// 1:20	COMMENT	"// Euler"
-}
diff --git a/internal/backport/go/scanner/scanner.go b/internal/backport/go/scanner/scanner.go
deleted file mode 100644
index f96569e..0000000
--- a/internal/backport/go/scanner/scanner.go
+++ /dev/null
@@ -1,983 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package scanner implements a scanner for Go source text.
-// It takes a []byte as source which can then be tokenized
-// through repeated calls to the Scan method.
-package scanner
-
-import (
-	"bytes"
-	"fmt"
-	"golang.org/x/website/internal/backport/go/token"
-	"path/filepath"
-	"strconv"
-	"unicode"
-	"unicode/utf8"
-)
-
-// An ErrorHandler may be provided to Scanner.Init. If a syntax error is
-// encountered and a handler was installed, the handler is called with a
-// position and an error message. The position points to the beginning of
-// the offending token.
-type ErrorHandler func(pos token.Position, msg string)
-
-// A Scanner holds the scanner's internal state while processing
-// a given text. It can be allocated as part of another data
-// structure but must be initialized via Init before use.
-type Scanner struct {
-	// immutable state
-	file *token.File  // source file handle
-	dir  string       // directory portion of file.Name()
-	src  []byte       // source
-	err  ErrorHandler // error reporting; or nil
-	mode Mode         // scanning mode
-
-	// scanning state
-	ch         rune // current character
-	offset     int  // character offset
-	rdOffset   int  // reading offset (position after current character)
-	lineOffset int  // current line offset
-	insertSemi bool // insert a semicolon before next newline
-
-	// public state - ok to modify
-	ErrorCount int // number of errors encountered
-}
-
-const (
-	bom = 0xFEFF // byte order mark, only permitted as very first character
-	eof = -1     // end of file
-)
-
-// Read the next Unicode char into s.ch.
-// s.ch < 0 means end-of-file.
-//
-// For optimization, there is some overlap between this method and
-// s.scanIdentifier.
-func (s *Scanner) next() {
-	if s.rdOffset < len(s.src) {
-		s.offset = s.rdOffset
-		if s.ch == '\n' {
-			s.lineOffset = s.offset
-			s.file.AddLine(s.offset)
-		}
-		r, w := rune(s.src[s.rdOffset]), 1
-		switch {
-		case r == 0:
-			s.error(s.offset, "illegal character NUL")
-		case r >= utf8.RuneSelf:
-			// not ASCII
-			r, w = utf8.DecodeRune(s.src[s.rdOffset:])
-			if r == utf8.RuneError && w == 1 {
-				s.error(s.offset, "illegal UTF-8 encoding")
-			} else if r == bom && s.offset > 0 {
-				s.error(s.offset, "illegal byte order mark")
-			}
-		}
-		s.rdOffset += w
-		s.ch = r
-	} else {
-		s.offset = len(s.src)
-		if s.ch == '\n' {
-			s.lineOffset = s.offset
-			s.file.AddLine(s.offset)
-		}
-		s.ch = eof
-	}
-}
-
-// peek returns the byte following the most recently read character without
-// advancing the scanner. If the scanner is at EOF, peek returns 0.
-func (s *Scanner) peek() byte {
-	if s.rdOffset < len(s.src) {
-		return s.src[s.rdOffset]
-	}
-	return 0
-}
-
-// A mode value is a set of flags (or 0).
-// They control scanner behavior.
-type Mode uint
-
-const (
-	ScanComments    Mode = 1 << iota // return comments as COMMENT tokens
-	dontInsertSemis                  // do not automatically insert semicolons - for testing only
-)
-
-// Init prepares the scanner s to tokenize the text src by setting the
-// scanner at the beginning of src. The scanner uses the file set file
-// for position information and it adds line information for each line.
-// It is ok to re-use the same file when re-scanning the same file as
-// line information which is already present is ignored. Init causes a
-// panic if the file size does not match the src size.
-//
-// Calls to Scan will invoke the error handler err if they encounter a
-// syntax error and err is not nil. Also, for each error encountered,
-// the Scanner field ErrorCount is incremented by one. The mode parameter
-// determines how comments are handled.
-//
-// Note that Init may call err if there is an error in the first character
-// of the file.
-func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) {
-	// Explicitly initialize all fields since a scanner may be reused.
-	if file.Size() != len(src) {
-		panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src)))
-	}
-	s.file = file
-	s.dir, _ = filepath.Split(file.Name())
-	s.src = src
-	s.err = err
-	s.mode = mode
-
-	s.ch = ' '
-	s.offset = 0
-	s.rdOffset = 0
-	s.lineOffset = 0
-	s.insertSemi = false
-	s.ErrorCount = 0
-
-	s.next()
-	if s.ch == bom {
-		s.next() // ignore BOM at file beginning
-	}
-}
-
-func (s *Scanner) error(offs int, msg string) {
-	if s.err != nil {
-		s.err(s.file.Position(s.file.Pos(offs)), msg)
-	}
-	s.ErrorCount++
-}
-
-func (s *Scanner) errorf(offs int, format string, args ...interface{}) {
-	s.error(offs, fmt.Sprintf(format, args...))
-}
-
-func (s *Scanner) scanComment() string {
-	// initial '/' already consumed; s.ch == '/' || s.ch == '*'
-	offs := s.offset - 1 // position of initial '/'
-	next := -1           // position immediately following the comment; < 0 means invalid comment
-	numCR := 0
-
-	if s.ch == '/' {
-		//-style comment
-		// (the final '\n' is not considered part of the comment)
-		s.next()
-		for s.ch != '\n' && s.ch >= 0 {
-			if s.ch == '\r' {
-				numCR++
-			}
-			s.next()
-		}
-		// if we are at '\n', the position following the comment is afterwards
-		next = s.offset
-		if s.ch == '\n' {
-			next++
-		}
-		goto exit
-	}
-
-	/*-style comment */
-	s.next()
-	for s.ch >= 0 {
-		ch := s.ch
-		if ch == '\r' {
-			numCR++
-		}
-		s.next()
-		if ch == '*' && s.ch == '/' {
-			s.next()
-			next = s.offset
-			goto exit
-		}
-	}
-
-	s.error(offs, "comment not terminated")
-
-exit:
-	lit := s.src[offs:s.offset]
-
-	// On Windows, a (//-comment) line may end in "\r\n".
-	// Remove the final '\r' before analyzing the text for
-	// line directives (matching the compiler). Remove any
-	// other '\r' afterwards (matching the pre-existing be-
-	// havior of the scanner).
-	if numCR > 0 && len(lit) >= 2 && lit[1] == '/' && lit[len(lit)-1] == '\r' {
-		lit = lit[:len(lit)-1]
-		numCR--
-	}
-
-	// interpret line directives
-	// (//line directives must start at the beginning of the current line)
-	if next >= 0 /* implies valid comment */ && (lit[1] == '*' || offs == s.lineOffset) && bytes.HasPrefix(lit[2:], prefix) {
-		s.updateLineInfo(next, offs, lit)
-	}
-
-	if numCR > 0 {
-		lit = stripCR(lit, lit[1] == '*')
-	}
-
-	return string(lit)
-}
-
-var prefix = []byte("line ")
-
-// updateLineInfo parses the incoming comment text at offset offs
-// as a line directive. If successful, it updates the line info table
-// for the position next per the line directive.
-func (s *Scanner) updateLineInfo(next, offs int, text []byte) {
-	// extract comment text
-	if text[1] == '*' {
-		text = text[:len(text)-2] // lop off trailing "*/"
-	}
-	text = text[7:] // lop off leading "//line " or "/*line "
-	offs += 7
-
-	i, n, ok := trailingDigits(text)
-	if i == 0 {
-		return // ignore (not a line directive)
-	}
-	// i > 0
-
-	if !ok {
-		// text has a suffix :xxx but xxx is not a number
-		s.error(offs+i, "invalid line number: "+string(text[i:]))
-		return
-	}
-
-	var line, col int
-	i2, n2, ok2 := trailingDigits(text[:i-1])
-	if ok2 {
-		//line filename:line:col
-		i, i2 = i2, i
-		line, col = n2, n
-		if col == 0 {
-			s.error(offs+i2, "invalid column number: "+string(text[i2:]))
-			return
-		}
-		text = text[:i2-1] // lop off ":col"
-	} else {
-		//line filename:line
-		line = n
-	}
-
-	if line == 0 {
-		s.error(offs+i, "invalid line number: "+string(text[i:]))
-		return
-	}
-
-	// If we have a column (//line filename:line:col form),
-	// an empty filename means to use the previous filename.
-	filename := string(text[:i-1]) // lop off ":line", and trim white space
-	if filename == "" && ok2 {
-		filename = s.file.Position(s.file.Pos(offs)).Filename
-	} else if filename != "" {
-		// Put a relative filename in the current directory.
-		// This is for compatibility with earlier releases.
-		// See issue 26671.
-		filename = filepath.Clean(filename)
-		if !filepath.IsAbs(filename) {
-			filename = filepath.Join(s.dir, filename)
-		}
-	}
-
-	s.file.AddLineColumnInfo(next, filename, line, col)
-}
-
-func trailingDigits(text []byte) (int, int, bool) {
-	i := bytes.LastIndexByte(text, ':') // look from right (Windows filenames may contain ':')
-	if i < 0 {
-		return 0, 0, false // no ":"
-	}
-	// i >= 0
-	n, err := strconv.ParseUint(string(text[i+1:]), 10, 0)
-	return i + 1, int(n), err == nil
-}
-
-func (s *Scanner) findLineEnd() bool {
-	// initial '/' already consumed
-
-	defer func(offs int) {
-		// reset scanner state to where it was upon calling findLineEnd
-		s.ch = '/'
-		s.offset = offs
-		s.rdOffset = offs + 1
-		s.next() // consume initial '/' again
-	}(s.offset - 1)
-
-	// read ahead until a newline, EOF, or non-comment token is found
-	for s.ch == '/' || s.ch == '*' {
-		if s.ch == '/' {
-			//-style comment always contains a newline
-			return true
-		}
-		/*-style comment: look for newline */
-		s.next()
-		for s.ch >= 0 {
-			ch := s.ch
-			if ch == '\n' {
-				return true
-			}
-			s.next()
-			if ch == '*' && s.ch == '/' {
-				s.next()
-				break
-			}
-		}
-		s.skipWhitespace() // s.insertSemi is set
-		if s.ch < 0 || s.ch == '\n' {
-			return true
-		}
-		if s.ch != '/' {
-			// non-comment token
-			return false
-		}
-		s.next() // consume '/'
-	}
-
-	return false
-}
-
-func isLetter(ch rune) bool {
-	return 'a' <= lower(ch) && lower(ch) <= 'z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch)
-}
-
-func isDigit(ch rune) bool {
-	return isDecimal(ch) || ch >= utf8.RuneSelf && unicode.IsDigit(ch)
-}
-
-// scanIdentifier reads the string of valid identifier characters at s.offset.
-// It must only be called when s.ch is known to be a valid letter.
-//
-// Be careful when making changes to this function: it is optimized and affects
-// scanning performance significantly.
-func (s *Scanner) scanIdentifier() string {
-	offs := s.offset
-
-	// Optimize for the common case of an ASCII identifier.
-	//
-	// Ranging over s.src[s.rdOffset:] lets us avoid some bounds checks, and
-	// avoids conversions to runes.
-	//
-	// In case we encounter a non-ASCII character, fall back on the slower path
-	// of calling into s.next().
-	for rdOffset, b := range s.src[s.rdOffset:] {
-		if 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z' || b == '_' || '0' <= b && b <= '9' {
-			// Avoid assigning a rune for the common case of an ascii character.
-			continue
-		}
-		s.rdOffset += rdOffset
-		if 0 < b && b < utf8.RuneSelf {
-			// Optimization: we've encountered an ASCII character that's not a letter
-			// or number. Avoid the call into s.next() and corresponding set up.
-			//
-			// Note that s.next() does some line accounting if s.ch is '\n', so this
-			// shortcut is only possible because we know that the preceding character
-			// is not '\n'.
-			s.ch = rune(b)
-			s.offset = s.rdOffset
-			s.rdOffset++
-			goto exit
-		}
-		// We know that the preceding character is valid for an identifier because
-		// scanIdentifier is only called when s.ch is a letter, so calling s.next()
-		// at s.rdOffset resets the scanner state.
-		s.next()
-		for isLetter(s.ch) || isDigit(s.ch) {
-			s.next()
-		}
-		goto exit
-	}
-	s.offset = len(s.src)
-	s.rdOffset = len(s.src)
-	s.ch = eof
-
-exit:
-	return string(s.src[offs:s.offset])
-}
-
-func digitVal(ch rune) int {
-	switch {
-	case '0' <= ch && ch <= '9':
-		return int(ch - '0')
-	case 'a' <= lower(ch) && lower(ch) <= 'f':
-		return int(lower(ch) - 'a' + 10)
-	}
-	return 16 // larger than any legal digit val
-}
-
-func lower(ch rune) rune     { return ('a' - 'A') | ch } // returns lower-case ch iff ch is ASCII letter
-func isDecimal(ch rune) bool { return '0' <= ch && ch <= '9' }
-func isHex(ch rune) bool     { return '0' <= ch && ch <= '9' || 'a' <= lower(ch) && lower(ch) <= 'f' }
-
-// digits accepts the sequence { digit | '_' }.
-// If base <= 10, digits accepts any decimal digit but records
-// the offset (relative to the source start) of a digit >= base
-// in *invalid, if *invalid < 0.
-// digits returns a bitset describing whether the sequence contained
-// digits (bit 0 is set), or separators '_' (bit 1 is set).
-func (s *Scanner) digits(base int, invalid *int) (digsep int) {
-	if base <= 10 {
-		max := rune('0' + base)
-		for isDecimal(s.ch) || s.ch == '_' {
-			ds := 1
-			if s.ch == '_' {
-				ds = 2
-			} else if s.ch >= max && *invalid < 0 {
-				*invalid = s.offset // record invalid rune offset
-			}
-			digsep |= ds
-			s.next()
-		}
-	} else {
-		for isHex(s.ch) || s.ch == '_' {
-			ds := 1
-			if s.ch == '_' {
-				ds = 2
-			}
-			digsep |= ds
-			s.next()
-		}
-	}
-	return
-}
-
-func (s *Scanner) scanNumber() (token.Token, string) {
-	offs := s.offset
-	tok := token.ILLEGAL
-
-	base := 10        // number base
-	prefix := rune(0) // one of 0 (decimal), '0' (0-octal), 'x', 'o', or 'b'
-	digsep := 0       // bit 0: digit present, bit 1: '_' present
-	invalid := -1     // index of invalid digit in literal, or < 0
-
-	// integer part
-	if s.ch != '.' {
-		tok = token.INT
-		if s.ch == '0' {
-			s.next()
-			switch lower(s.ch) {
-			case 'x':
-				s.next()
-				base, prefix = 16, 'x'
-			case 'o':
-				s.next()
-				base, prefix = 8, 'o'
-			case 'b':
-				s.next()
-				base, prefix = 2, 'b'
-			default:
-				base, prefix = 8, '0'
-				digsep = 1 // leading 0
-			}
-		}
-		digsep |= s.digits(base, &invalid)
-	}
-
-	// fractional part
-	if s.ch == '.' {
-		tok = token.FLOAT
-		if prefix == 'o' || prefix == 'b' {
-			s.error(s.offset, "invalid radix point in "+litname(prefix))
-		}
-		s.next()
-		digsep |= s.digits(base, &invalid)
-	}
-
-	if digsep&1 == 0 {
-		s.error(s.offset, litname(prefix)+" has no digits")
-	}
-
-	// exponent
-	if e := lower(s.ch); e == 'e' || e == 'p' {
-		switch {
-		case e == 'e' && prefix != 0 && prefix != '0':
-			s.errorf(s.offset, "%q exponent requires decimal mantissa", s.ch)
-		case e == 'p' && prefix != 'x':
-			s.errorf(s.offset, "%q exponent requires hexadecimal mantissa", s.ch)
-		}
-		s.next()
-		tok = token.FLOAT
-		if s.ch == '+' || s.ch == '-' {
-			s.next()
-		}
-		ds := s.digits(10, nil)
-		digsep |= ds
-		if ds&1 == 0 {
-			s.error(s.offset, "exponent has no digits")
-		}
-	} else if prefix == 'x' && tok == token.FLOAT {
-		s.error(s.offset, "hexadecimal mantissa requires a 'p' exponent")
-	}
-
-	// suffix 'i'
-	if s.ch == 'i' {
-		tok = token.IMAG
-		s.next()
-	}
-
-	lit := string(s.src[offs:s.offset])
-	if tok == token.INT && invalid >= 0 {
-		s.errorf(invalid, "invalid digit %q in %s", lit[invalid-offs], litname(prefix))
-	}
-	if digsep&2 != 0 {
-		if i := invalidSep(lit); i >= 0 {
-			s.error(offs+i, "'_' must separate successive digits")
-		}
-	}
-
-	return tok, lit
-}
-
-func litname(prefix rune) string {
-	switch prefix {
-	case 'x':
-		return "hexadecimal literal"
-	case 'o', '0':
-		return "octal literal"
-	case 'b':
-		return "binary literal"
-	}
-	return "decimal literal"
-}
-
-// invalidSep returns the index of the first invalid separator in x, or -1.
-func invalidSep(x string) int {
-	x1 := ' ' // prefix char, we only care if it's 'x'
-	d := '.'  // digit, one of '_', '0' (a digit), or '.' (anything else)
-	i := 0
-
-	// a prefix counts as a digit
-	if len(x) >= 2 && x[0] == '0' {
-		x1 = lower(rune(x[1]))
-		if x1 == 'x' || x1 == 'o' || x1 == 'b' {
-			d = '0'
-			i = 2
-		}
-	}
-
-	// mantissa and exponent
-	for ; i < len(x); i++ {
-		p := d // previous digit
-		d = rune(x[i])
-		switch {
-		case d == '_':
-			if p != '0' {
-				return i
-			}
-		case isDecimal(d) || x1 == 'x' && isHex(d):
-			d = '0'
-		default:
-			if p == '_' {
-				return i - 1
-			}
-			d = '.'
-		}
-	}
-	if d == '_' {
-		return len(x) - 1
-	}
-
-	return -1
-}
-
-// scanEscape parses an escape sequence where rune is the accepted
-// escaped quote. In case of a syntax error, it stops at the offending
-// character (without consuming it) and returns false. Otherwise
-// it returns true.
-func (s *Scanner) scanEscape(quote rune) bool {
-	offs := s.offset
-
-	var n int
-	var base, max uint32
-	switch s.ch {
-	case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote:
-		s.next()
-		return true
-	case '0', '1', '2', '3', '4', '5', '6', '7':
-		n, base, max = 3, 8, 255
-	case 'x':
-		s.next()
-		n, base, max = 2, 16, 255
-	case 'u':
-		s.next()
-		n, base, max = 4, 16, unicode.MaxRune
-	case 'U':
-		s.next()
-		n, base, max = 8, 16, unicode.MaxRune
-	default:
-		msg := "unknown escape sequence"
-		if s.ch < 0 {
-			msg = "escape sequence not terminated"
-		}
-		s.error(offs, msg)
-		return false
-	}
-
-	var x uint32
-	for n > 0 {
-		d := uint32(digitVal(s.ch))
-		if d >= base {
-			msg := fmt.Sprintf("illegal character %#U in escape sequence", s.ch)
-			if s.ch < 0 {
-				msg = "escape sequence not terminated"
-			}
-			s.error(s.offset, msg)
-			return false
-		}
-		x = x*base + d
-		s.next()
-		n--
-	}
-
-	if x > max || 0xD800 <= x && x < 0xE000 {
-		s.error(offs, "escape sequence is invalid Unicode code point")
-		return false
-	}
-
-	return true
-}
-
-func (s *Scanner) scanRune() string {
-	// '\'' opening already consumed
-	offs := s.offset - 1
-
-	valid := true
-	n := 0
-	for {
-		ch := s.ch
-		if ch == '\n' || ch < 0 {
-			// only report error if we don't have one already
-			if valid {
-				s.error(offs, "rune literal not terminated")
-				valid = false
-			}
-			break
-		}
-		s.next()
-		if ch == '\'' {
-			break
-		}
-		n++
-		if ch == '\\' {
-			if !s.scanEscape('\'') {
-				valid = false
-			}
-			// continue to read to closing quote
-		}
-	}
-
-	if valid && n != 1 {
-		s.error(offs, "illegal rune literal")
-	}
-
-	return string(s.src[offs:s.offset])
-}
-
-func (s *Scanner) scanString() string {
-	// '"' opening already consumed
-	offs := s.offset - 1
-
-	for {
-		ch := s.ch
-		if ch == '\n' || ch < 0 {
-			s.error(offs, "string literal not terminated")
-			break
-		}
-		s.next()
-		if ch == '"' {
-			break
-		}
-		if ch == '\\' {
-			s.scanEscape('"')
-		}
-	}
-
-	return string(s.src[offs:s.offset])
-}
-
-func stripCR(b []byte, comment bool) []byte {
-	c := make([]byte, len(b))
-	i := 0
-	for j, ch := range b {
-		// In a /*-style comment, don't strip \r from *\r/ (incl.
-		// sequences of \r from *\r\r...\r/) since the resulting
-		// */ would terminate the comment too early unless the \r
-		// is immediately following the opening /* in which case
-		// it's ok because /*/ is not closed yet (issue #11151).
-		if ch != '\r' || comment && i > len("/*") && c[i-1] == '*' && j+1 < len(b) && b[j+1] == '/' {
-			c[i] = ch
-			i++
-		}
-	}
-	return c[:i]
-}
-
-func (s *Scanner) scanRawString() string {
-	// '`' opening already consumed
-	offs := s.offset - 1
-
-	hasCR := false
-	for {
-		ch := s.ch
-		if ch < 0 {
-			s.error(offs, "raw string literal not terminated")
-			break
-		}
-		s.next()
-		if ch == '`' {
-			break
-		}
-		if ch == '\r' {
-			hasCR = true
-		}
-	}
-
-	lit := s.src[offs:s.offset]
-	if hasCR {
-		lit = stripCR(lit, false)
-	}
-
-	return string(lit)
-}
-
-func (s *Scanner) skipWhitespace() {
-	for s.ch == ' ' || s.ch == '\t' || s.ch == '\n' && !s.insertSemi || s.ch == '\r' {
-		s.next()
-	}
-}
-
-// Helper functions for scanning multi-byte tokens such as >> += >>= .
-// Different routines recognize different length tok_i based on matches
-// of ch_i. If a token ends in '=', the result is tok1 or tok3
-// respectively. Otherwise, the result is tok0 if there was no other
-// matching character, or tok2 if the matching character was ch2.
-
-func (s *Scanner) switch2(tok0, tok1 token.Token) token.Token {
-	if s.ch == '=' {
-		s.next()
-		return tok1
-	}
-	return tok0
-}
-
-func (s *Scanner) switch3(tok0, tok1 token.Token, ch2 rune, tok2 token.Token) token.Token {
-	if s.ch == '=' {
-		s.next()
-		return tok1
-	}
-	if s.ch == ch2 {
-		s.next()
-		return tok2
-	}
-	return tok0
-}
-
-func (s *Scanner) switch4(tok0, tok1 token.Token, ch2 rune, tok2, tok3 token.Token) token.Token {
-	if s.ch == '=' {
-		s.next()
-		return tok1
-	}
-	if s.ch == ch2 {
-		s.next()
-		if s.ch == '=' {
-			s.next()
-			return tok3
-		}
-		return tok2
-	}
-	return tok0
-}
-
-// Scan scans the next token and returns the token position, the token,
-// and its literal string if applicable. The source end is indicated by
-// token.EOF.
-//
-// If the returned token is a literal (token.IDENT, token.INT, token.FLOAT,
-// token.IMAG, token.CHAR, token.STRING) or token.COMMENT, the literal string
-// has the corresponding value.
-//
-// If the returned token is a keyword, the literal string is the keyword.
-//
-// If the returned token is token.SEMICOLON, the corresponding
-// literal string is ";" if the semicolon was present in the source,
-// and "\n" if the semicolon was inserted because of a newline or
-// at EOF.
-//
-// If the returned token is token.ILLEGAL, the literal string is the
-// offending character.
-//
-// In all other cases, Scan returns an empty literal string.
-//
-// For more tolerant parsing, Scan will return a valid token if
-// possible even if a syntax error was encountered. Thus, even
-// if the resulting token sequence contains no illegal tokens,
-// a client may not assume that no error occurred. Instead it
-// must check the scanner's ErrorCount or the number of calls
-// of the error handler, if there was one installed.
-//
-// Scan adds line information to the file added to the file
-// set with Init. Token positions are relative to that file
-// and thus relative to the file set.
-func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) {
-scanAgain:
-	s.skipWhitespace()
-
-	// current token start
-	pos = s.file.Pos(s.offset)
-
-	// determine token value
-	insertSemi := false
-	switch ch := s.ch; {
-	case isLetter(ch):
-		lit = s.scanIdentifier()
-		if len(lit) > 1 {
-			// keywords are longer than one letter - avoid lookup otherwise
-			tok = token.Lookup(lit)
-			switch tok {
-			case token.IDENT, token.BREAK, token.CONTINUE, token.FALLTHROUGH, token.RETURN:
-				insertSemi = true
-			}
-		} else {
-			insertSemi = true
-			tok = token.IDENT
-		}
-	case isDecimal(ch) || ch == '.' && isDecimal(rune(s.peek())):
-		insertSemi = true
-		tok, lit = s.scanNumber()
-	default:
-		s.next() // always make progress
-		switch ch {
-		case -1:
-			if s.insertSemi {
-				s.insertSemi = false // EOF consumed
-				return pos, token.SEMICOLON, "\n"
-			}
-			tok = token.EOF
-		case '\n':
-			// we only reach here if s.insertSemi was
-			// set in the first place and exited early
-			// from s.skipWhitespace()
-			s.insertSemi = false // newline consumed
-			return pos, token.SEMICOLON, "\n"
-		case '"':
-			insertSemi = true
-			tok = token.STRING
-			lit = s.scanString()
-		case '\'':
-			insertSemi = true
-			tok = token.CHAR
-			lit = s.scanRune()
-		case '`':
-			insertSemi = true
-			tok = token.STRING
-			lit = s.scanRawString()
-		case ':':
-			tok = s.switch2(token.COLON, token.DEFINE)
-		case '.':
-			// fractions starting with a '.' are handled by outer switch
-			tok = token.PERIOD
-			if s.ch == '.' && s.peek() == '.' {
-				s.next()
-				s.next() // consume last '.'
-				tok = token.ELLIPSIS
-			}
-		case ',':
-			tok = token.COMMA
-		case ';':
-			tok = token.SEMICOLON
-			lit = ";"
-		case '(':
-			tok = token.LPAREN
-		case ')':
-			insertSemi = true
-			tok = token.RPAREN
-		case '[':
-			tok = token.LBRACK
-		case ']':
-			insertSemi = true
-			tok = token.RBRACK
-		case '{':
-			tok = token.LBRACE
-		case '}':
-			insertSemi = true
-			tok = token.RBRACE
-		case '+':
-			tok = s.switch3(token.ADD, token.ADD_ASSIGN, '+', token.INC)
-			if tok == token.INC {
-				insertSemi = true
-			}
-		case '-':
-			tok = s.switch3(token.SUB, token.SUB_ASSIGN, '-', token.DEC)
-			if tok == token.DEC {
-				insertSemi = true
-			}
-		case '*':
-			tok = s.switch2(token.MUL, token.MUL_ASSIGN)
-		case '/':
-			if s.ch == '/' || s.ch == '*' {
-				// comment
-				if s.insertSemi && s.findLineEnd() {
-					// reset position to the beginning of the comment
-					s.ch = '/'
-					s.offset = s.file.Offset(pos)
-					s.rdOffset = s.offset + 1
-					s.insertSemi = false // newline consumed
-					return pos, token.SEMICOLON, "\n"
-				}
-				comment := s.scanComment()
-				if s.mode&ScanComments == 0 {
-					// skip comment
-					s.insertSemi = false // newline consumed
-					goto scanAgain
-				}
-				tok = token.COMMENT
-				lit = comment
-			} else {
-				tok = s.switch2(token.QUO, token.QUO_ASSIGN)
-			}
-		case '%':
-			tok = s.switch2(token.REM, token.REM_ASSIGN)
-		case '^':
-			tok = s.switch2(token.XOR, token.XOR_ASSIGN)
-		case '<':
-			if s.ch == '-' {
-				s.next()
-				tok = token.ARROW
-			} else {
-				tok = s.switch4(token.LSS, token.LEQ, '<', token.SHL, token.SHL_ASSIGN)
-			}
-		case '>':
-			tok = s.switch4(token.GTR, token.GEQ, '>', token.SHR, token.SHR_ASSIGN)
-		case '=':
-			tok = s.switch2(token.ASSIGN, token.EQL)
-		case '!':
-			tok = s.switch2(token.NOT, token.NEQ)
-		case '&':
-			if s.ch == '^' {
-				s.next()
-				tok = s.switch2(token.AND_NOT, token.AND_NOT_ASSIGN)
-			} else {
-				tok = s.switch3(token.AND, token.AND_ASSIGN, '&', token.LAND)
-			}
-		case '|':
-			tok = s.switch3(token.OR, token.OR_ASSIGN, '|', token.LOR)
-		case '~':
-			tok = token.TILDE
-		default:
-			// next reports unexpected BOMs - don't repeat
-			if ch != bom {
-				s.errorf(s.file.Offset(pos), "illegal character %#U", ch)
-			}
-			insertSemi = s.insertSemi // preserve insertSemi info
-			tok = token.ILLEGAL
-			lit = string(ch)
-		}
-	}
-	if s.mode&dontInsertSemis == 0 {
-		s.insertSemi = insertSemi
-	}
-
-	return
-}
diff --git a/internal/backport/go/scanner/scanner_test.go b/internal/backport/go/scanner/scanner_test.go
deleted file mode 100644
index d56abcb..0000000
--- a/internal/backport/go/scanner/scanner_test.go
+++ /dev/null
@@ -1,1127 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package scanner
-
-import (
-	"golang.org/x/website/internal/backport/go/token"
-	"os"
-	"path/filepath"
-	"runtime"
-	"strings"
-	"testing"
-)
-
-var fset = token.NewFileSet()
-
-const /* class */ (
-	special = iota
-	literal
-	operator
-	keyword
-)
-
-func tokenclass(tok token.Token) int {
-	switch {
-	case tok.IsLiteral():
-		return literal
-	case tok.IsOperator():
-		return operator
-	case tok.IsKeyword():
-		return keyword
-	}
-	return special
-}
-
-type elt struct {
-	tok   token.Token
-	lit   string
-	class int
-}
-
-var tokens = []elt{
-	// Special tokens
-	{token.COMMENT, "/* a comment */", special},
-	{token.COMMENT, "// a comment \n", special},
-	{token.COMMENT, "/*\r*/", special},
-	{token.COMMENT, "/**\r/*/", special}, // issue 11151
-	{token.COMMENT, "/**\r\r/*/", special},
-	{token.COMMENT, "//\r\n", special},
-
-	// Identifiers and basic type literals
-	{token.IDENT, "foobar", literal},
-	{token.IDENT, "a۰۱۸", literal},
-	{token.IDENT, "foo६४", literal},
-	{token.IDENT, "bar9876", literal},
-	{token.IDENT, "ŝ", literal},    // was bug (issue 4000)
-	{token.IDENT, "ŝfoo", literal}, // was bug (issue 4000)
-	{token.INT, "0", literal},
-	{token.INT, "1", literal},
-	{token.INT, "123456789012345678890", literal},
-	{token.INT, "01234567", literal},
-	{token.INT, "0xcafebabe", literal},
-	{token.FLOAT, "0.", literal},
-	{token.FLOAT, ".0", literal},
-	{token.FLOAT, "3.14159265", literal},
-	{token.FLOAT, "1e0", literal},
-	{token.FLOAT, "1e+100", literal},
-	{token.FLOAT, "1e-100", literal},
-	{token.FLOAT, "2.71828e-1000", literal},
-	{token.IMAG, "0i", literal},
-	{token.IMAG, "1i", literal},
-	{token.IMAG, "012345678901234567889i", literal},
-	{token.IMAG, "123456789012345678890i", literal},
-	{token.IMAG, "0.i", literal},
-	{token.IMAG, ".0i", literal},
-	{token.IMAG, "3.14159265i", literal},
-	{token.IMAG, "1e0i", literal},
-	{token.IMAG, "1e+100i", literal},
-	{token.IMAG, "1e-100i", literal},
-	{token.IMAG, "2.71828e-1000i", literal},
-	{token.CHAR, "'a'", literal},
-	{token.CHAR, "'\\000'", literal},
-	{token.CHAR, "'\\xFF'", literal},
-	{token.CHAR, "'\\uff16'", literal},
-	{token.CHAR, "'\\U0000ff16'", literal},
-	{token.STRING, "`foobar`", literal},
-	{token.STRING, "`" + `foo
-	                        bar` +
-		"`",
-		literal,
-	},
-	{token.STRING, "`\r`", literal},
-	{token.STRING, "`foo\r\nbar`", literal},
-
-	// Operators and delimiters
-	{token.ADD, "+", operator},
-	{token.SUB, "-", operator},
-	{token.MUL, "*", operator},
-	{token.QUO, "/", operator},
-	{token.REM, "%", operator},
-
-	{token.AND, "&", operator},
-	{token.OR, "|", operator},
-	{token.XOR, "^", operator},
-	{token.SHL, "<<", operator},
-	{token.SHR, ">>", operator},
-	{token.AND_NOT, "&^", operator},
-
-	{token.ADD_ASSIGN, "+=", operator},
-	{token.SUB_ASSIGN, "-=", operator},
-	{token.MUL_ASSIGN, "*=", operator},
-	{token.QUO_ASSIGN, "/=", operator},
-	{token.REM_ASSIGN, "%=", operator},
-
-	{token.AND_ASSIGN, "&=", operator},
-	{token.OR_ASSIGN, "|=", operator},
-	{token.XOR_ASSIGN, "^=", operator},
-	{token.SHL_ASSIGN, "<<=", operator},
-	{token.SHR_ASSIGN, ">>=", operator},
-	{token.AND_NOT_ASSIGN, "&^=", operator},
-
-	{token.LAND, "&&", operator},
-	{token.LOR, "||", operator},
-	{token.ARROW, "<-", operator},
-	{token.INC, "++", operator},
-	{token.DEC, "--", operator},
-
-	{token.EQL, "==", operator},
-	{token.LSS, "<", operator},
-	{token.GTR, ">", operator},
-	{token.ASSIGN, "=", operator},
-	{token.NOT, "!", operator},
-
-	{token.NEQ, "!=", operator},
-	{token.LEQ, "<=", operator},
-	{token.GEQ, ">=", operator},
-	{token.DEFINE, ":=", operator},
-	{token.ELLIPSIS, "...", operator},
-
-	{token.LPAREN, "(", operator},
-	{token.LBRACK, "[", operator},
-	{token.LBRACE, "{", operator},
-	{token.COMMA, ",", operator},
-	{token.PERIOD, ".", operator},
-
-	{token.RPAREN, ")", operator},
-	{token.RBRACK, "]", operator},
-	{token.RBRACE, "}", operator},
-	{token.SEMICOLON, ";", operator},
-	{token.COLON, ":", operator},
-	{token.TILDE, "~", operator},
-
-	// Keywords
-	{token.BREAK, "break", keyword},
-	{token.CASE, "case", keyword},
-	{token.CHAN, "chan", keyword},
-	{token.CONST, "const", keyword},
-	{token.CONTINUE, "continue", keyword},
-
-	{token.DEFAULT, "default", keyword},
-	{token.DEFER, "defer", keyword},
-	{token.ELSE, "else", keyword},
-	{token.FALLTHROUGH, "fallthrough", keyword},
-	{token.FOR, "for", keyword},
-
-	{token.FUNC, "func", keyword},
-	{token.GO, "go", keyword},
-	{token.GOTO, "goto", keyword},
-	{token.IF, "if", keyword},
-	{token.IMPORT, "import", keyword},
-
-	{token.INTERFACE, "interface", keyword},
-	{token.MAP, "map", keyword},
-	{token.PACKAGE, "package", keyword},
-	{token.RANGE, "range", keyword},
-	{token.RETURN, "return", keyword},
-
-	{token.SELECT, "select", keyword},
-	{token.STRUCT, "struct", keyword},
-	{token.SWITCH, "switch", keyword},
-	{token.TYPE, "type", keyword},
-	{token.VAR, "var", keyword},
-}
-
-const whitespace = "  \t  \n\n\n" // to separate tokens
-
-var source = func() []byte {
-	var src []byte
-	for _, t := range tokens {
-		src = append(src, t.lit...)
-		src = append(src, whitespace...)
-	}
-	return src
-}()
-
-func newlineCount(s string) int {
-	n := 0
-	for i := 0; i < len(s); i++ {
-		if s[i] == '\n' {
-			n++
-		}
-	}
-	return n
-}
-
-func checkPos(t *testing.T, lit string, p token.Pos, expected token.Position) {
-	pos := fset.Position(p)
-	// Check cleaned filenames so that we don't have to worry about
-	// different os.PathSeparator values.
-	if pos.Filename != expected.Filename && filepath.Clean(pos.Filename) != filepath.Clean(expected.Filename) {
-		t.Errorf("bad filename for %q: got %s, expected %s", lit, pos.Filename, expected.Filename)
-	}
-	if pos.Offset != expected.Offset {
-		t.Errorf("bad position for %q: got %d, expected %d", lit, pos.Offset, expected.Offset)
-	}
-	if pos.Line != expected.Line {
-		t.Errorf("bad line for %q: got %d, expected %d", lit, pos.Line, expected.Line)
-	}
-	if pos.Column != expected.Column {
-		t.Errorf("bad column for %q: got %d, expected %d", lit, pos.Column, expected.Column)
-	}
-}
-
-// Verify that calling Scan() provides the correct results.
-func TestScan(t *testing.T) {
-	whitespace_linecount := newlineCount(whitespace)
-
-	// error handler
-	eh := func(_ token.Position, msg string) {
-		t.Errorf("error handler called (msg = %s)", msg)
-	}
-
-	// verify scan
-	var s Scanner
-	s.Init(fset.AddFile("", fset.Base(), len(source)), source, eh, ScanComments|dontInsertSemis)
-
-	// set up expected position
-	epos := token.Position{
-		Filename: "",
-		Offset:   0,
-		Line:     1,
-		Column:   1,
-	}
-
-	index := 0
-	for {
-		pos, tok, lit := s.Scan()
-
-		// check position
-		if tok == token.EOF {
-			// correction for EOF
-			epos.Line = newlineCount(string(source))
-			epos.Column = 2
-		}
-		checkPos(t, lit, pos, epos)
-
-		// check token
-		e := elt{token.EOF, "", special}
-		if index < len(tokens) {
-			e = tokens[index]
-			index++
-		}
-		if tok != e.tok {
-			t.Errorf("bad token for %q: got %s, expected %s", lit, tok, e.tok)
-		}
-
-		// check token class
-		if tokenclass(tok) != e.class {
-			t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class)
-		}
-
-		// check literal
-		elit := ""
-		switch e.tok {
-		case token.COMMENT:
-			// no CRs in comments
-			elit = string(stripCR([]byte(e.lit), e.lit[1] == '*'))
-			//-style comment literal doesn't contain newline
-			if elit[1] == '/' {
-				elit = elit[0 : len(elit)-1]
-			}
-		case token.IDENT:
-			elit = e.lit
-		case token.SEMICOLON:
-			elit = ";"
-		default:
-			if e.tok.IsLiteral() {
-				// no CRs in raw string literals
-				elit = e.lit
-				if elit[0] == '`' {
-					elit = string(stripCR([]byte(elit), false))
-				}
-			} else if e.tok.IsKeyword() {
-				elit = e.lit
-			}
-		}
-		if lit != elit {
-			t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, elit)
-		}
-
-		if tok == token.EOF {
-			break
-		}
-
-		// update position
-		epos.Offset += len(e.lit) + len(whitespace)
-		epos.Line += newlineCount(e.lit) + whitespace_linecount
-
-	}
-
-	if s.ErrorCount != 0 {
-		t.Errorf("found %d errors", s.ErrorCount)
-	}
-}
-
-func TestStripCR(t *testing.T) {
-	for _, test := range []struct{ have, want string }{
-		{"//\n", "//\n"},
-		{"//\r\n", "//\n"},
-		{"//\r\r\r\n", "//\n"},
-		{"//\r*\r/\r\n", "//*/\n"},
-		{"/**/", "/**/"},
-		{"/*\r/*/", "/*/*/"},
-		{"/*\r*/", "/**/"},
-		{"/**\r/*/", "/**\r/*/"},
-		{"/*\r/\r*\r/*/", "/*/*\r/*/"},
-		{"/*\r\r\r\r*/", "/**/"},
-	} {
-		got := string(stripCR([]byte(test.have), len(test.have) >= 2 && test.have[1] == '*'))
-		if got != test.want {
-			t.Errorf("stripCR(%q) = %q; want %q", test.have, got, test.want)
-		}
-	}
-}
-
-func checkSemi(t *testing.T, line string, mode Mode) {
-	var S Scanner
-	file := fset.AddFile("TestSemis", fset.Base(), len(line))
-	S.Init(file, []byte(line), nil, mode)
-	pos, tok, lit := S.Scan()
-	for tok != token.EOF {
-		if tok == token.ILLEGAL {
-			// the illegal token literal indicates what
-			// kind of semicolon literal to expect
-			semiLit := "\n"
-			if lit[0] == '#' {
-				semiLit = ";"
-			}
-			// next token must be a semicolon
-			semiPos := file.Position(pos)
-			semiPos.Offset++
-			semiPos.Column++
-			pos, tok, lit = S.Scan()
-			if tok == token.SEMICOLON {
-				if lit != semiLit {
-					t.Errorf(`bad literal for %q: got %q, expected %q`, line, lit, semiLit)
-				}
-				checkPos(t, line, pos, semiPos)
-			} else {
-				t.Errorf("bad token for %q: got %s, expected ;", line, tok)
-			}
-		} else if tok == token.SEMICOLON {
-			t.Errorf("bad token for %q: got ;, expected no ;", line)
-		}
-		pos, tok, lit = S.Scan()
-	}
-}
-
-var lines = []string{
-	// # indicates a semicolon present in the source
-	// $ indicates an automatically inserted semicolon
-	"",
-	"\ufeff#;", // first BOM is ignored
-	"#;",
-	"foo$\n",
-	"123$\n",
-	"1.2$\n",
-	"'x'$\n",
-	`"x"` + "$\n",
-	"`x`$\n",
-
-	"+\n",
-	"-\n",
-	"*\n",
-	"/\n",
-	"%\n",
-
-	"&\n",
-	"|\n",
-	"^\n",
-	"<<\n",
-	">>\n",
-	"&^\n",
-
-	"+=\n",
-	"-=\n",
-	"*=\n",
-	"/=\n",
-	"%=\n",
-
-	"&=\n",
-	"|=\n",
-	"^=\n",
-	"<<=\n",
-	">>=\n",
-	"&^=\n",
-
-	"&&\n",
-	"||\n",
-	"<-\n",
-	"++$\n",
-	"--$\n",
-
-	"==\n",
-	"<\n",
-	">\n",
-	"=\n",
-	"!\n",
-
-	"!=\n",
-	"<=\n",
-	">=\n",
-	":=\n",
-	"...\n",
-
-	"(\n",
-	"[\n",
-	"{\n",
-	",\n",
-	".\n",
-
-	")$\n",
-	"]$\n",
-	"}$\n",
-	"#;\n",
-	":\n",
-
-	"break$\n",
-	"case\n",
-	"chan\n",
-	"const\n",
-	"continue$\n",
-
-	"default\n",
-	"defer\n",
-	"else\n",
-	"fallthrough$\n",
-	"for\n",
-
-	"func\n",
-	"go\n",
-	"goto\n",
-	"if\n",
-	"import\n",
-
-	"interface\n",
-	"map\n",
-	"package\n",
-	"range\n",
-	"return$\n",
-
-	"select\n",
-	"struct\n",
-	"switch\n",
-	"type\n",
-	"var\n",
-
-	"foo$//comment\n",
-	"foo$//comment",
-	"foo$/*comment*/\n",
-	"foo$/*\n*/",
-	"foo$/*comment*/    \n",
-	"foo$/*\n*/    ",
-
-	"foo    $// comment\n",
-	"foo    $// comment",
-	"foo    $/*comment*/\n",
-	"foo    $/*\n*/",
-	"foo    $/*  */ /* \n */ bar$/**/\n",
-	"foo    $/*0*/ /*1*/ /*2*/\n",
-
-	"foo    $/*comment*/    \n",
-	"foo    $/*0*/ /*1*/ /*2*/    \n",
-	"foo	$/**/ /*-------------*/       /*----\n*/bar       $/*  \n*/baa$\n",
-	"foo    $/* an EOF terminates a line */",
-	"foo    $/* an EOF terminates a line */ /*",
-	"foo    $/* an EOF terminates a line */ //",
-
-	"package main$\n\nfunc main() {\n\tif {\n\t\treturn /* */ }$\n}$\n",
-	"package main$",
-}
-
-func TestSemis(t *testing.T) {
-	for _, line := range lines {
-		checkSemi(t, line, 0)
-		checkSemi(t, line, ScanComments)
-
-		// if the input ended in newlines, the input must tokenize the
-		// same with or without those newlines
-		for i := len(line) - 1; i >= 0 && line[i] == '\n'; i-- {
-			checkSemi(t, line[0:i], 0)
-			checkSemi(t, line[0:i], ScanComments)
-		}
-	}
-}
-
-type segment struct {
-	srcline      string // a line of source text
-	filename     string // filename for current token; error message for invalid line directives
-	line, column int    // line and column for current token; error position for invalid line directives
-}
-
-var segments = []segment{
-	// exactly one token per line since the test consumes one token per segment
-	{"  line1", "TestLineDirectives", 1, 3},
-	{"\nline2", "TestLineDirectives", 2, 1},
-	{"\nline3  //line File1.go:100", "TestLineDirectives", 3, 1}, // bad line comment, ignored
-	{"\nline4", "TestLineDirectives", 4, 1},
-	{"\n//line File1.go:100\n  line100", "File1.go", 100, 0},
-	{"\n//line  \t :42\n  line1", " \t ", 42, 0},
-	{"\n//line File2.go:200\n  line200", "File2.go", 200, 0},
-	{"\n//line foo\t:42\n  line42", "foo\t", 42, 0},
-	{"\n //line foo:42\n  line43", "foo\t", 44, 0}, // bad line comment, ignored (use existing, prior filename)
-	{"\n//line foo 42\n  line44", "foo\t", 46, 0},  // bad line comment, ignored (use existing, prior filename)
-	{"\n//line /bar:42\n  line45", "/bar", 42, 0},
-	{"\n//line ./foo:42\n  line46", "foo", 42, 0},
-	{"\n//line a/b/c/File1.go:100\n  line100", "a/b/c/File1.go", 100, 0},
-	{"\n//line c:\\bar:42\n  line200", "c:\\bar", 42, 0},
-	{"\n//line c:\\dir\\File1.go:100\n  line201", "c:\\dir\\File1.go", 100, 0},
-
-	// tests for new line directive syntax
-	{"\n//line :100\na1", "", 100, 0}, // missing filename means empty filename
-	{"\n//line bar:100\nb1", "bar", 100, 0},
-	{"\n//line :100:10\nc1", "bar", 100, 10}, // missing filename means current filename
-	{"\n//line foo:100:10\nd1", "foo", 100, 10},
-
-	{"\n/*line :100*/a2", "", 100, 0}, // missing filename means empty filename
-	{"\n/*line bar:100*/b2", "bar", 100, 0},
-	{"\n/*line :100:10*/c2", "bar", 100, 10}, // missing filename means current filename
-	{"\n/*line foo:100:10*/d2", "foo", 100, 10},
-	{"\n/*line foo:100:10*/    e2", "foo", 100, 14}, // line-directive relative column
-	{"\n/*line foo:100:10*/\n\nf2", "foo", 102, 1},  // absolute column since on new line
-}
-
-var dirsegments = []segment{
-	// exactly one token per line since the test consumes one token per segment
-	{"  line1", "TestLineDir/TestLineDirectives", 1, 3},
-	{"\n//line File1.go:100\n  line100", "TestLineDir/File1.go", 100, 0},
-}
-
-var dirUnixSegments = []segment{
-	{"\n//line /bar:42\n  line42", "/bar", 42, 0},
-}
-
-var dirWindowsSegments = []segment{
-	{"\n//line c:\\bar:42\n  line42", "c:\\bar", 42, 0},
-}
-
-// Verify that line directives are interpreted correctly.
-func TestLineDirectives(t *testing.T) {
-	testSegments(t, segments, "TestLineDirectives")
-	testSegments(t, dirsegments, "TestLineDir/TestLineDirectives")
-	if runtime.GOOS == "windows" {
-		testSegments(t, dirWindowsSegments, "TestLineDir/TestLineDirectives")
-	} else {
-		testSegments(t, dirUnixSegments, "TestLineDir/TestLineDirectives")
-	}
-}
-
-func testSegments(t *testing.T, segments []segment, filename string) {
-	var src string
-	for _, e := range segments {
-		src += e.srcline
-	}
-
-	// verify scan
-	var S Scanner
-	file := fset.AddFile(filename, fset.Base(), len(src))
-	S.Init(file, []byte(src), func(pos token.Position, msg string) { t.Error(Error{pos, msg}) }, dontInsertSemis)
-	for _, s := range segments {
-		p, _, lit := S.Scan()
-		pos := file.Position(p)
-		checkPos(t, lit, p, token.Position{
-			Filename: s.filename,
-			Offset:   pos.Offset,
-			Line:     s.line,
-			Column:   s.column,
-		})
-	}
-
-	if S.ErrorCount != 0 {
-		t.Errorf("got %d errors", S.ErrorCount)
-	}
-}
-
-// The filename is used for the error message in these test cases.
-// The first line directive is valid and used to control the expected error line.
-var invalidSegments = []segment{
-	{"\n//line :1:1\n//line foo:42 extra text\ndummy", "invalid line number: 42 extra text", 1, 12},
-	{"\n//line :2:1\n//line foobar:\ndummy", "invalid line number: ", 2, 15},
-	{"\n//line :5:1\n//line :0\ndummy", "invalid line number: 0", 5, 9},
-	{"\n//line :10:1\n//line :1:0\ndummy", "invalid column number: 0", 10, 11},
-	{"\n//line :1:1\n//line :foo:0\ndummy", "invalid line number: 0", 1, 13}, // foo is considered part of the filename
-}
-
-// Verify that invalid line directives get the correct error message.
-func TestInvalidLineDirectives(t *testing.T) {
-	// make source
-	var src string
-	for _, e := range invalidSegments {
-		src += e.srcline
-	}
-
-	// verify scan
-	var S Scanner
-	var s segment // current segment
-	file := fset.AddFile(filepath.Join("dir", "TestInvalidLineDirectives"), fset.Base(), len(src))
-	S.Init(file, []byte(src), func(pos token.Position, msg string) {
-		if msg != s.filename {
-			t.Errorf("got error %q; want %q", msg, s.filename)
-		}
-		if pos.Line != s.line || pos.Column != s.column {
-			t.Errorf("got position %d:%d; want %d:%d", pos.Line, pos.Column, s.line, s.column)
-		}
-	}, dontInsertSemis)
-	for _, s = range invalidSegments {
-		S.Scan()
-	}
-
-	if S.ErrorCount != len(invalidSegments) {
-		t.Errorf("got %d errors; want %d", S.ErrorCount, len(invalidSegments))
-	}
-}
-
-// Verify that initializing the same scanner more than once works correctly.
-func TestInit(t *testing.T) {
-	var s Scanner
-
-	// 1st init
-	src1 := "if true { }"
-	f1 := fset.AddFile("src1", fset.Base(), len(src1))
-	s.Init(f1, []byte(src1), nil, dontInsertSemis)
-	if f1.Size() != len(src1) {
-		t.Errorf("bad file size: got %d, expected %d", f1.Size(), len(src1))
-	}
-	s.Scan()              // if
-	s.Scan()              // true
-	_, tok, _ := s.Scan() // {
-	if tok != token.LBRACE {
-		t.Errorf("bad token: got %s, expected %s", tok, token.LBRACE)
-	}
-
-	// 2nd init
-	src2 := "go true { ]"
-	f2 := fset.AddFile("src2", fset.Base(), len(src2))
-	s.Init(f2, []byte(src2), nil, dontInsertSemis)
-	if f2.Size() != len(src2) {
-		t.Errorf("bad file size: got %d, expected %d", f2.Size(), len(src2))
-	}
-	_, tok, _ = s.Scan() // go
-	if tok != token.GO {
-		t.Errorf("bad token: got %s, expected %s", tok, token.GO)
-	}
-
-	if s.ErrorCount != 0 {
-		t.Errorf("found %d errors", s.ErrorCount)
-	}
-}
-
-func TestStdErrorHander(t *testing.T) {
-	const src = "@\n" + // illegal character, cause an error
-		"@ @\n" + // two errors on the same line
-		"//line File2:20\n" +
-		"@\n" + // different file, but same line
-		"//line File2:1\n" +
-		"@ @\n" + // same file, decreasing line number
-		"//line File1:1\n" +
-		"@ @ @" // original file, line 1 again
-
-	var list ErrorList
-	eh := func(pos token.Position, msg string) { list.Add(pos, msg) }
-
-	var s Scanner
-	s.Init(fset.AddFile("File1", fset.Base(), len(src)), []byte(src), eh, dontInsertSemis)
-	for {
-		if _, tok, _ := s.Scan(); tok == token.EOF {
-			break
-		}
-	}
-
-	if len(list) != s.ErrorCount {
-		t.Errorf("found %d errors, expected %d", len(list), s.ErrorCount)
-	}
-
-	if len(list) != 9 {
-		t.Errorf("found %d raw errors, expected 9", len(list))
-		PrintError(os.Stderr, list)
-	}
-
-	list.Sort()
-	if len(list) != 9 {
-		t.Errorf("found %d sorted errors, expected 9", len(list))
-		PrintError(os.Stderr, list)
-	}
-
-	list.RemoveMultiples()
-	if len(list) != 4 {
-		t.Errorf("found %d one-per-line errors, expected 4", len(list))
-		PrintError(os.Stderr, list)
-	}
-}
-
-type errorCollector struct {
-	cnt int            // number of errors encountered
-	msg string         // last error message encountered
-	pos token.Position // last error position encountered
-}
-
-func checkError(t *testing.T, src string, tok token.Token, pos int, lit, err string) {
-	var s Scanner
-	var h errorCollector
-	eh := func(pos token.Position, msg string) {
-		h.cnt++
-		h.msg = msg
-		h.pos = pos
-	}
-	s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), eh, ScanComments|dontInsertSemis)
-	_, tok0, lit0 := s.Scan()
-	if tok0 != tok {
-		t.Errorf("%q: got %s, expected %s", src, tok0, tok)
-	}
-	if tok0 != token.ILLEGAL && lit0 != lit {
-		t.Errorf("%q: got literal %q, expected %q", src, lit0, lit)
-	}
-	cnt := 0
-	if err != "" {
-		cnt = 1
-	}
-	if h.cnt != cnt {
-		t.Errorf("%q: got cnt %d, expected %d", src, h.cnt, cnt)
-	}
-	if h.msg != err {
-		t.Errorf("%q: got msg %q, expected %q", src, h.msg, err)
-	}
-	if h.pos.Offset != pos {
-		t.Errorf("%q: got offset %d, expected %d", src, h.pos.Offset, pos)
-	}
-}
-
-var errors = []struct {
-	src string
-	tok token.Token
-	pos int
-	lit string
-	err string
-}{
-	{"\a", token.ILLEGAL, 0, "", "illegal character U+0007"},
-	{`#`, token.ILLEGAL, 0, "", "illegal character U+0023 '#'"},
-	{`…`, token.ILLEGAL, 0, "", "illegal character U+2026 '…'"},
-	{"..", token.PERIOD, 0, "", ""}, // two periods, not invalid token (issue #28112)
-	{`' '`, token.CHAR, 0, `' '`, ""},
-	{`''`, token.CHAR, 0, `''`, "illegal rune literal"},
-	{`'12'`, token.CHAR, 0, `'12'`, "illegal rune literal"},
-	{`'123'`, token.CHAR, 0, `'123'`, "illegal rune literal"},
-	{`'\0'`, token.CHAR, 3, `'\0'`, "illegal character U+0027 ''' in escape sequence"},
-	{`'\07'`, token.CHAR, 4, `'\07'`, "illegal character U+0027 ''' in escape sequence"},
-	{`'\8'`, token.CHAR, 2, `'\8'`, "unknown escape sequence"},
-	{`'\08'`, token.CHAR, 3, `'\08'`, "illegal character U+0038 '8' in escape sequence"},
-	{`'\x'`, token.CHAR, 3, `'\x'`, "illegal character U+0027 ''' in escape sequence"},
-	{`'\x0'`, token.CHAR, 4, `'\x0'`, "illegal character U+0027 ''' in escape sequence"},
-	{`'\x0g'`, token.CHAR, 4, `'\x0g'`, "illegal character U+0067 'g' in escape sequence"},
-	{`'\u'`, token.CHAR, 3, `'\u'`, "illegal character U+0027 ''' in escape sequence"},
-	{`'\u0'`, token.CHAR, 4, `'\u0'`, "illegal character U+0027 ''' in escape sequence"},
-	{`'\u00'`, token.CHAR, 5, `'\u00'`, "illegal character U+0027 ''' in escape sequence"},
-	{`'\u000'`, token.CHAR, 6, `'\u000'`, "illegal character U+0027 ''' in escape sequence"},
-	{`'\u000`, token.CHAR, 6, `'\u000`, "escape sequence not terminated"},
-	{`'\u0000'`, token.CHAR, 0, `'\u0000'`, ""},
-	{`'\U'`, token.CHAR, 3, `'\U'`, "illegal character U+0027 ''' in escape sequence"},
-	{`'\U0'`, token.CHAR, 4, `'\U0'`, "illegal character U+0027 ''' in escape sequence"},
-	{`'\U00'`, token.CHAR, 5, `'\U00'`, "illegal character U+0027 ''' in escape sequence"},
-	{`'\U000'`, token.CHAR, 6, `'\U000'`, "illegal character U+0027 ''' in escape sequence"},
-	{`'\U0000'`, token.CHAR, 7, `'\U0000'`, "illegal character U+0027 ''' in escape sequence"},
-	{`'\U00000'`, token.CHAR, 8, `'\U00000'`, "illegal character U+0027 ''' in escape sequence"},
-	{`'\U000000'`, token.CHAR, 9, `'\U000000'`, "illegal character U+0027 ''' in escape sequence"},
-	{`'\U0000000'`, token.CHAR, 10, `'\U0000000'`, "illegal character U+0027 ''' in escape sequence"},
-	{`'\U0000000`, token.CHAR, 10, `'\U0000000`, "escape sequence not terminated"},
-	{`'\U00000000'`, token.CHAR, 0, `'\U00000000'`, ""},
-	{`'\Uffffffff'`, token.CHAR, 2, `'\Uffffffff'`, "escape sequence is invalid Unicode code point"},
-	{`'`, token.CHAR, 0, `'`, "rune literal not terminated"},
-	{`'\`, token.CHAR, 2, `'\`, "escape sequence not terminated"},
-	{"'\n", token.CHAR, 0, "'", "rune literal not terminated"},
-	{"'\n   ", token.CHAR, 0, "'", "rune literal not terminated"},
-	{`""`, token.STRING, 0, `""`, ""},
-	{`"abc`, token.STRING, 0, `"abc`, "string literal not terminated"},
-	{"\"abc\n", token.STRING, 0, `"abc`, "string literal not terminated"},
-	{"\"abc\n   ", token.STRING, 0, `"abc`, "string literal not terminated"},
-	{"``", token.STRING, 0, "``", ""},
-	{"`", token.STRING, 0, "`", "raw string literal not terminated"},
-	{"/**/", token.COMMENT, 0, "/**/", ""},
-	{"/*", token.COMMENT, 0, "/*", "comment not terminated"},
-	{"077", token.INT, 0, "077", ""},
-	{"078.", token.FLOAT, 0, "078.", ""},
-	{"07801234567.", token.FLOAT, 0, "07801234567.", ""},
-	{"078e0", token.FLOAT, 0, "078e0", ""},
-	{"0E", token.FLOAT, 2, "0E", "exponent has no digits"}, // issue 17621
-	{"078", token.INT, 2, "078", "invalid digit '8' in octal literal"},
-	{"07090000008", token.INT, 3, "07090000008", "invalid digit '9' in octal literal"},
-	{"0x", token.INT, 2, "0x", "hexadecimal literal has no digits"},
-	{"\"abc\x00def\"", token.STRING, 4, "\"abc\x00def\"", "illegal character NUL"},
-	{"\"abc\x80def\"", token.STRING, 4, "\"abc\x80def\"", "illegal UTF-8 encoding"},
-	{"\ufeff\ufeff", token.ILLEGAL, 3, "\ufeff\ufeff", "illegal byte order mark"},                        // only first BOM is ignored
-	{"//\ufeff", token.COMMENT, 2, "//\ufeff", "illegal byte order mark"},                                // only first BOM is ignored
-	{"'\ufeff" + `'`, token.CHAR, 1, "'\ufeff" + `'`, "illegal byte order mark"},                         // only first BOM is ignored
-	{`"` + "abc\ufeffdef" + `"`, token.STRING, 4, `"` + "abc\ufeffdef" + `"`, "illegal byte order mark"}, // only first BOM is ignored
-	{"abc\x00def", token.IDENT, 3, "abc", "illegal character NUL"},
-	{"abc\x00", token.IDENT, 3, "abc", "illegal character NUL"},
-}
-
-func TestScanErrors(t *testing.T) {
-	for _, e := range errors {
-		checkError(t, e.src, e.tok, e.pos, e.lit, e.err)
-	}
-}
-
-// Verify that no comments show up as literal values when skipping comments.
-func TestIssue10213(t *testing.T) {
-	const src = `
-		var (
-			A = 1 // foo
-		)
-
-		var (
-			B = 2
-			// foo
-		)
-
-		var C = 3 // foo
-
-		var D = 4
-		// foo
-
-		func anycode() {
-		// foo
-		}
-	`
-	var s Scanner
-	s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), nil, 0)
-	for {
-		pos, tok, lit := s.Scan()
-		class := tokenclass(tok)
-		if lit != "" && class != keyword && class != literal && tok != token.SEMICOLON {
-			t.Errorf("%s: tok = %s, lit = %q", fset.Position(pos), tok, lit)
-		}
-		if tok <= token.EOF {
-			break
-		}
-	}
-}
-
-func TestIssue28112(t *testing.T) {
-	const src = "... .. 0.. .." // make sure to have stand-alone ".." immediately before EOF to test EOF behavior
-	tokens := []token.Token{token.ELLIPSIS, token.PERIOD, token.PERIOD, token.FLOAT, token.PERIOD, token.PERIOD, token.PERIOD, token.EOF}
-	var s Scanner
-	s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), nil, 0)
-	for _, want := range tokens {
-		pos, got, lit := s.Scan()
-		if got != want {
-			t.Errorf("%s: got %s, want %s", fset.Position(pos), got, want)
-		}
-		// literals expect to have a (non-empty) literal string and we don't care about other tokens for this test
-		if tokenclass(got) == literal && lit == "" {
-			t.Errorf("%s: for %s got empty literal string", fset.Position(pos), got)
-		}
-	}
-}
-
-func BenchmarkScan(b *testing.B) {
-	b.StopTimer()
-	fset := token.NewFileSet()
-	file := fset.AddFile("", fset.Base(), len(source))
-	var s Scanner
-	b.StartTimer()
-	for i := 0; i < b.N; i++ {
-		s.Init(file, source, nil, ScanComments)
-		for {
-			_, tok, _ := s.Scan()
-			if tok == token.EOF {
-				break
-			}
-		}
-	}
-}
-
-func BenchmarkScanFiles(b *testing.B) {
-	// Scan a few arbitrary large files, and one small one, to provide some
-	// variety in benchmarks.
-	for _, p := range []string{
-		"golang.org/x/website/internal/backport/go/types/expr.go",
-		"golang.org/x/website/internal/backport/go/parser/parser.go",
-		"net/http/server.go",
-		"golang.org/x/website/internal/backport/go/scanner/errors.go",
-	} {
-		b.Run(p, func(b *testing.B) {
-			b.StopTimer()
-			filename := filepath.Join("..", "..", filepath.FromSlash(p))
-			src, err := os.ReadFile(filename)
-			if err != nil {
-				b.Fatal(err)
-			}
-			fset := token.NewFileSet()
-			file := fset.AddFile(filename, fset.Base(), len(src))
-			b.SetBytes(int64(len(src)))
-			var s Scanner
-			b.StartTimer()
-			for i := 0; i < b.N; i++ {
-				s.Init(file, src, nil, ScanComments)
-				for {
-					_, tok, _ := s.Scan()
-					if tok == token.EOF {
-						break
-					}
-				}
-			}
-		})
-	}
-}
-
-func TestNumbers(t *testing.T) {
-	for _, test := range []struct {
-		tok              token.Token
-		src, tokens, err string
-	}{
-		// binaries
-		{token.INT, "0b0", "0b0", ""},
-		{token.INT, "0b1010", "0b1010", ""},
-		{token.INT, "0B1110", "0B1110", ""},
-
-		{token.INT, "0b", "0b", "binary literal has no digits"},
-		{token.INT, "0b0190", "0b0190", "invalid digit '9' in binary literal"},
-		{token.INT, "0b01a0", "0b01 a0", ""}, // only accept 0-9
-
-		{token.FLOAT, "0b.", "0b.", "invalid radix point in binary literal"},
-		{token.FLOAT, "0b.1", "0b.1", "invalid radix point in binary literal"},
-		{token.FLOAT, "0b1.0", "0b1.0", "invalid radix point in binary literal"},
-		{token.FLOAT, "0b1e10", "0b1e10", "'e' exponent requires decimal mantissa"},
-		{token.FLOAT, "0b1P-1", "0b1P-1", "'P' exponent requires hexadecimal mantissa"},
-
-		{token.IMAG, "0b10i", "0b10i", ""},
-		{token.IMAG, "0b10.0i", "0b10.0i", "invalid radix point in binary literal"},
-
-		// octals
-		{token.INT, "0o0", "0o0", ""},
-		{token.INT, "0o1234", "0o1234", ""},
-		{token.INT, "0O1234", "0O1234", ""},
-
-		{token.INT, "0o", "0o", "octal literal has no digits"},
-		{token.INT, "0o8123", "0o8123", "invalid digit '8' in octal literal"},
-		{token.INT, "0o1293", "0o1293", "invalid digit '9' in octal literal"},
-		{token.INT, "0o12a3", "0o12 a3", ""}, // only accept 0-9
-
-		{token.FLOAT, "0o.", "0o.", "invalid radix point in octal literal"},
-		{token.FLOAT, "0o.2", "0o.2", "invalid radix point in octal literal"},
-		{token.FLOAT, "0o1.2", "0o1.2", "invalid radix point in octal literal"},
-		{token.FLOAT, "0o1E+2", "0o1E+2", "'E' exponent requires decimal mantissa"},
-		{token.FLOAT, "0o1p10", "0o1p10", "'p' exponent requires hexadecimal mantissa"},
-
-		{token.IMAG, "0o10i", "0o10i", ""},
-		{token.IMAG, "0o10e0i", "0o10e0i", "'e' exponent requires decimal mantissa"},
-
-		// 0-octals
-		{token.INT, "0", "0", ""},
-		{token.INT, "0123", "0123", ""},
-
-		{token.INT, "08123", "08123", "invalid digit '8' in octal literal"},
-		{token.INT, "01293", "01293", "invalid digit '9' in octal literal"},
-		{token.INT, "0F.", "0 F .", ""}, // only accept 0-9
-		{token.INT, "0123F.", "0123 F .", ""},
-		{token.INT, "0123456x", "0123456 x", ""},
-
-		// decimals
-		{token.INT, "1", "1", ""},
-		{token.INT, "1234", "1234", ""},
-
-		{token.INT, "1f", "1 f", ""}, // only accept 0-9
-
-		{token.IMAG, "0i", "0i", ""},
-		{token.IMAG, "0678i", "0678i", ""},
-
-		// decimal floats
-		{token.FLOAT, "0.", "0.", ""},
-		{token.FLOAT, "123.", "123.", ""},
-		{token.FLOAT, "0123.", "0123.", ""},
-
-		{token.FLOAT, ".0", ".0", ""},
-		{token.FLOAT, ".123", ".123", ""},
-		{token.FLOAT, ".0123", ".0123", ""},
-
-		{token.FLOAT, "0.0", "0.0", ""},
-		{token.FLOAT, "123.123", "123.123", ""},
-		{token.FLOAT, "0123.0123", "0123.0123", ""},
-
-		{token.FLOAT, "0e0", "0e0", ""},
-		{token.FLOAT, "123e+0", "123e+0", ""},
-		{token.FLOAT, "0123E-1", "0123E-1", ""},
-
-		{token.FLOAT, "0.e+1", "0.e+1", ""},
-		{token.FLOAT, "123.E-10", "123.E-10", ""},
-		{token.FLOAT, "0123.e123", "0123.e123", ""},
-
-		{token.FLOAT, ".0e-1", ".0e-1", ""},
-		{token.FLOAT, ".123E+10", ".123E+10", ""},
-		{token.FLOAT, ".0123E123", ".0123E123", ""},
-
-		{token.FLOAT, "0.0e1", "0.0e1", ""},
-		{token.FLOAT, "123.123E-10", "123.123E-10", ""},
-		{token.FLOAT, "0123.0123e+456", "0123.0123e+456", ""},
-
-		{token.FLOAT, "0e", "0e", "exponent has no digits"},
-		{token.FLOAT, "0E+", "0E+", "exponent has no digits"},
-		{token.FLOAT, "1e+f", "1e+ f", "exponent has no digits"},
-		{token.FLOAT, "0p0", "0p0", "'p' exponent requires hexadecimal mantissa"},
-		{token.FLOAT, "1.0P-1", "1.0P-1", "'P' exponent requires hexadecimal mantissa"},
-
-		{token.IMAG, "0.i", "0.i", ""},
-		{token.IMAG, ".123i", ".123i", ""},
-		{token.IMAG, "123.123i", "123.123i", ""},
-		{token.IMAG, "123e+0i", "123e+0i", ""},
-		{token.IMAG, "123.E-10i", "123.E-10i", ""},
-		{token.IMAG, ".123E+10i", ".123E+10i", ""},
-
-		// hexadecimals
-		{token.INT, "0x0", "0x0", ""},
-		{token.INT, "0x1234", "0x1234", ""},
-		{token.INT, "0xcafef00d", "0xcafef00d", ""},
-		{token.INT, "0XCAFEF00D", "0XCAFEF00D", ""},
-
-		{token.INT, "0x", "0x", "hexadecimal literal has no digits"},
-		{token.INT, "0x1g", "0x1 g", ""},
-
-		{token.IMAG, "0xf00i", "0xf00i", ""},
-
-		// hexadecimal floats
-		{token.FLOAT, "0x0p0", "0x0p0", ""},
-		{token.FLOAT, "0x12efp-123", "0x12efp-123", ""},
-		{token.FLOAT, "0xABCD.p+0", "0xABCD.p+0", ""},
-		{token.FLOAT, "0x.0189P-0", "0x.0189P-0", ""},
-		{token.FLOAT, "0x1.ffffp+1023", "0x1.ffffp+1023", ""},
-
-		{token.FLOAT, "0x.", "0x.", "hexadecimal literal has no digits"},
-		{token.FLOAT, "0x0.", "0x0.", "hexadecimal mantissa requires a 'p' exponent"},
-		{token.FLOAT, "0x.0", "0x.0", "hexadecimal mantissa requires a 'p' exponent"},
-		{token.FLOAT, "0x1.1", "0x1.1", "hexadecimal mantissa requires a 'p' exponent"},
-		{token.FLOAT, "0x1.1e0", "0x1.1e0", "hexadecimal mantissa requires a 'p' exponent"},
-		{token.FLOAT, "0x1.2gp1a", "0x1.2 gp1a", "hexadecimal mantissa requires a 'p' exponent"},
-		{token.FLOAT, "0x0p", "0x0p", "exponent has no digits"},
-		{token.FLOAT, "0xeP-", "0xeP-", "exponent has no digits"},
-		{token.FLOAT, "0x1234PAB", "0x1234P AB", "exponent has no digits"},
-		{token.FLOAT, "0x1.2p1a", "0x1.2p1 a", ""},
-
-		{token.IMAG, "0xf00.bap+12i", "0xf00.bap+12i", ""},
-
-		// separators
-		{token.INT, "0b_1000_0001", "0b_1000_0001", ""},
-		{token.INT, "0o_600", "0o_600", ""},
-		{token.INT, "0_466", "0_466", ""},
-		{token.INT, "1_000", "1_000", ""},
-		{token.FLOAT, "1_000.000_1", "1_000.000_1", ""},
-		{token.IMAG, "10e+1_2_3i", "10e+1_2_3i", ""},
-		{token.INT, "0x_f00d", "0x_f00d", ""},
-		{token.FLOAT, "0x_f00d.0p1_2", "0x_f00d.0p1_2", ""},
-
-		{token.INT, "0b__1000", "0b__1000", "'_' must separate successive digits"},
-		{token.INT, "0o60___0", "0o60___0", "'_' must separate successive digits"},
-		{token.INT, "0466_", "0466_", "'_' must separate successive digits"},
-		{token.FLOAT, "1_.", "1_.", "'_' must separate successive digits"},
-		{token.FLOAT, "0._1", "0._1", "'_' must separate successive digits"},
-		{token.FLOAT, "2.7_e0", "2.7_e0", "'_' must separate successive digits"},
-		{token.IMAG, "10e+12_i", "10e+12_i", "'_' must separate successive digits"},
-		{token.INT, "0x___0", "0x___0", "'_' must separate successive digits"},
-		{token.FLOAT, "0x1.0_p0", "0x1.0_p0", "'_' must separate successive digits"},
-	} {
-		var s Scanner
-		var err string
-		s.Init(fset.AddFile("", fset.Base(), len(test.src)), []byte(test.src), func(_ token.Position, msg string) {
-			if err == "" {
-				err = msg
-			}
-		}, 0)
-		for i, want := range strings.Split(test.tokens, " ") {
-			err = ""
-			_, tok, lit := s.Scan()
-
-			// compute lit where for tokens where lit is not defined
-			switch tok {
-			case token.PERIOD:
-				lit = "."
-			case token.ADD:
-				lit = "+"
-			case token.SUB:
-				lit = "-"
-			}
-
-			if i == 0 {
-				if tok != test.tok {
-					t.Errorf("%q: got token %s; want %s", test.src, tok, test.tok)
-				}
-				if err != test.err {
-					t.Errorf("%q: got error %q; want %q", test.src, err, test.err)
-				}
-			}
-
-			if lit != want {
-				t.Errorf("%q: got literal %q (%s); want %s", test.src, lit, tok, want)
-			}
-		}
-
-		// make sure we read all
-		_, tok, _ := s.Scan()
-		if tok == token.SEMICOLON {
-			_, tok, _ = s.Scan()
-		}
-		if tok != token.EOF {
-			t.Errorf("%q: got %s; want EOF", test.src, tok)
-		}
-	}
-}
diff --git a/internal/backport/go/token/example_test.go b/internal/backport/go/token/example_test.go
deleted file mode 100644
index e41a958..0000000
--- a/internal/backport/go/token/example_test.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package token_test
-
-import (
-	"fmt"
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/parser"
-	"golang.org/x/website/internal/backport/go/token"
-)
-
-func Example_retrievePositionInfo() {
-	fset := token.NewFileSet()
-
-	const src = `package main
-
-import "fmt"
-
-import "go/token"
-
-//line :1:5
-type p = token.Pos
-
-const bad = token.NoPos
-
-//line fake.go:42:11
-func ok(pos p) bool {
-	return pos != bad
-}
-
-/*line :7:9*/func main() {
-	fmt.Println(ok(bad) == bad.IsValid())
-}
-`
-
-	f, err := parser.ParseFile(fset, "main.go", src, 0)
-	if err != nil {
-		fmt.Println(err)
-		return
-	}
-
-	// Print the location and kind of each declaration in f.
-	for _, decl := range f.Decls {
-		// Get the filename, line, and column back via the file set.
-		// We get both the relative and absolute position.
-		// The relative position is relative to the last line directive.
-		// The absolute position is the exact position in the source.
-		pos := decl.Pos()
-		relPosition := fset.Position(pos)
-		absPosition := fset.PositionFor(pos, false)
-
-		// Either a FuncDecl or GenDecl, since we exit on error.
-		kind := "func"
-		if gen, ok := decl.(*ast.GenDecl); ok {
-			kind = gen.Tok.String()
-		}
-
-		// If the relative and absolute positions differ, show both.
-		fmtPosition := relPosition.String()
-		if relPosition != absPosition {
-			fmtPosition += "[" + absPosition.String() + "]"
-		}
-
-		fmt.Printf("%s: %s\n", fmtPosition, kind)
-	}
-
-	//Output:
-	//
-	// main.go:3:1: import
-	// main.go:5:1: import
-	// main.go:1:5[main.go:8:1]: type
-	// main.go:3:1[main.go:10:1]: const
-	// fake.go:42:11[main.go:13:1]: func
-	// fake.go:7:9[main.go:17:14]: func
-}
diff --git a/internal/backport/go/token/position.go b/internal/backport/go/token/position.go
deleted file mode 100644
index 00f2453..0000000
--- a/internal/backport/go/token/position.go
+++ /dev/null
@@ -1,529 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package token
-
-import (
-	"fmt"
-	"sort"
-	"sync"
-)
-
-// -----------------------------------------------------------------------------
-// Positions
-
-// Position describes an arbitrary source position
-// including the file, line, and column location.
-// A Position is valid if the line number is > 0.
-type Position struct {
-	Filename string // filename, if any
-	Offset   int    // offset, starting at 0
-	Line     int    // line number, starting at 1
-	Column   int    // column number, starting at 1 (byte count)
-}
-
-// IsValid reports whether the position is valid.
-func (pos *Position) IsValid() bool { return pos.Line > 0 }
-
-// String returns a string in one of several forms:
-//
-//	file:line:column    valid position with file name
-//	file:line           valid position with file name but no column (column == 0)
-//	line:column         valid position without file name
-//	line                valid position without file name and no column (column == 0)
-//	file                invalid position with file name
-//	-                   invalid position without file name
-func (pos Position) String() string {
-	s := pos.Filename
-	if pos.IsValid() {
-		if s != "" {
-			s += ":"
-		}
-		s += fmt.Sprintf("%d", pos.Line)
-		if pos.Column != 0 {
-			s += fmt.Sprintf(":%d", pos.Column)
-		}
-	}
-	if s == "" {
-		s = "-"
-	}
-	return s
-}
-
-// Pos is a compact encoding of a source position within a file set.
-// It can be converted into a Position for a more convenient, but much
-// larger, representation.
-//
-// The Pos value for a given file is a number in the range [base, base+size],
-// where base and size are specified when a file is added to the file set.
-// The difference between a Pos value and the corresponding file base
-// corresponds to the byte offset of that position (represented by the Pos value)
-// from the beginning of the file. Thus, the file base offset is the Pos value
-// representing the first byte in the file.
-//
-// To create the Pos value for a specific source offset (measured in bytes),
-// first add the respective file to the current file set using FileSet.AddFile
-// and then call File.Pos(offset) for that file. Given a Pos value p
-// for a specific file set fset, the corresponding Position value is
-// obtained by calling fset.Position(p).
-//
-// Pos values can be compared directly with the usual comparison operators:
-// If two Pos values p and q are in the same file, comparing p and q is
-// equivalent to comparing the respective source file offsets. If p and q
-// are in different files, p < q is true if the file implied by p was added
-// to the respective file set before the file implied by q.
-type Pos int
-
-// The zero value for Pos is NoPos; there is no file and line information
-// associated with it, and NoPos.IsValid() is false. NoPos is always
-// smaller than any other Pos value. The corresponding Position value
-// for NoPos is the zero value for Position.
-const NoPos Pos = 0
-
-// IsValid reports whether the position is valid.
-func (p Pos) IsValid() bool {
-	return p != NoPos
-}
-
-// -----------------------------------------------------------------------------
-// File
-
-// A File is a handle for a file belonging to a FileSet.
-// A File has a name, size, and line offset table.
-type File struct {
-	set  *FileSet
-	name string // file name as provided to AddFile
-	base int    // Pos value range for this file is [base...base+size]
-	size int    // file size as provided to AddFile
-
-	// lines and infos are protected by mutex
-	mutex sync.Mutex
-	lines []int // lines contains the offset of the first character for each line (the first entry is always 0)
-	infos []lineInfo
-}
-
-// Name returns the file name of file f as registered with AddFile.
-func (f *File) Name() string {
-	return f.name
-}
-
-// Base returns the base offset of file f as registered with AddFile.
-func (f *File) Base() int {
-	return f.base
-}
-
-// Size returns the size of file f as registered with AddFile.
-func (f *File) Size() int {
-	return f.size
-}
-
-// LineCount returns the number of lines in file f.
-func (f *File) LineCount() int {
-	f.mutex.Lock()
-	n := len(f.lines)
-	f.mutex.Unlock()
-	return n
-}
-
-// AddLine adds the line offset for a new line.
-// The line offset must be larger than the offset for the previous line
-// and smaller than the file size; otherwise the line offset is ignored.
-func (f *File) AddLine(offset int) {
-	f.mutex.Lock()
-	if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset < f.size {
-		f.lines = append(f.lines, offset)
-	}
-	f.mutex.Unlock()
-}
-
-// MergeLine merges a line with the following line. It is akin to replacing
-// the newline character at the end of the line with a space (to not change the
-// remaining offsets). To obtain the line number, consult e.g. Position.Line.
-// MergeLine will panic if given an invalid line number.
-func (f *File) MergeLine(line int) {
-	if line < 1 {
-		panic(fmt.Sprintf("invalid line number %d (should be >= 1)", line))
-	}
-	f.mutex.Lock()
-	defer f.mutex.Unlock()
-	if line >= len(f.lines) {
-		panic(fmt.Sprintf("invalid line number %d (should be < %d)", line, len(f.lines)))
-	}
-	// To merge the line numbered <line> with the line numbered <line+1>,
-	// we need to remove the entry in lines corresponding to the line
-	// numbered <line+1>. The entry in lines corresponding to the line
-	// numbered <line+1> is located at index <line>, since indices in lines
-	// are 0-based and line numbers are 1-based.
-	copy(f.lines[line:], f.lines[line+1:])
-	f.lines = f.lines[:len(f.lines)-1]
-}
-
-// SetLines sets the line offsets for a file and reports whether it succeeded.
-// The line offsets are the offsets of the first character of each line;
-// for instance for the content "ab\nc\n" the line offsets are {0, 3}.
-// An empty file has an empty line offset table.
-// Each line offset must be larger than the offset for the previous line
-// and smaller than the file size; otherwise SetLines fails and returns
-// false.
-// Callers must not mutate the provided slice after SetLines returns.
-func (f *File) SetLines(lines []int) bool {
-	// verify validity of lines table
-	size := f.size
-	for i, offset := range lines {
-		if i > 0 && offset <= lines[i-1] || size <= offset {
-			return false
-		}
-	}
-
-	// set lines table
-	f.mutex.Lock()
-	f.lines = lines
-	f.mutex.Unlock()
-	return true
-}
-
-// SetLinesForContent sets the line offsets for the given file content.
-// It ignores position-altering //line comments.
-func (f *File) SetLinesForContent(content []byte) {
-	var lines []int
-	line := 0
-	for offset, b := range content {
-		if line >= 0 {
-			lines = append(lines, line)
-		}
-		line = -1
-		if b == '\n' {
-			line = offset + 1
-		}
-	}
-
-	// set lines table
-	f.mutex.Lock()
-	f.lines = lines
-	f.mutex.Unlock()
-}
-
-// LineStart returns the Pos value of the start of the specified line.
-// It ignores any alternative positions set using AddLineColumnInfo.
-// LineStart panics if the 1-based line number is invalid.
-func (f *File) LineStart(line int) Pos {
-	if line < 1 {
-		panic(fmt.Sprintf("invalid line number %d (should be >= 1)", line))
-	}
-	f.mutex.Lock()
-	defer f.mutex.Unlock()
-	if line > len(f.lines) {
-		panic(fmt.Sprintf("invalid line number %d (should be < %d)", line, len(f.lines)))
-	}
-	return Pos(f.base + f.lines[line-1])
-}
-
-// A lineInfo object describes alternative file, line, and column
-// number information (such as provided via a //line directive)
-// for a given file offset.
-type lineInfo struct {
-	// fields are exported to make them accessible to gob
-	Offset       int
-	Filename     string
-	Line, Column int
-}
-
-// AddLineInfo is like AddLineColumnInfo with a column = 1 argument.
-// It is here for backward-compatibility for code prior to Go 1.11.
-func (f *File) AddLineInfo(offset int, filename string, line int) {
-	f.AddLineColumnInfo(offset, filename, line, 1)
-}
-
-// AddLineColumnInfo adds alternative file, line, and column number
-// information for a given file offset. The offset must be larger
-// than the offset for the previously added alternative line info
-// and smaller than the file size; otherwise the information is
-// ignored.
-//
-// AddLineColumnInfo is typically used to register alternative position
-// information for line directives such as //line filename:line:column.
-func (f *File) AddLineColumnInfo(offset int, filename string, line, column int) {
-	f.mutex.Lock()
-	if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size {
-		f.infos = append(f.infos, lineInfo{offset, filename, line, column})
-	}
-	f.mutex.Unlock()
-}
-
-// Pos returns the Pos value for the given file offset;
-// the offset must be <= f.Size().
-// f.Pos(f.Offset(p)) == p.
-func (f *File) Pos(offset int) Pos {
-	if offset > f.size {
-		panic(fmt.Sprintf("invalid file offset %d (should be <= %d)", offset, f.size))
-	}
-	return Pos(f.base + offset)
-}
-
-// Offset returns the offset for the given file position p;
-// p must be a valid Pos value in that file.
-// f.Offset(f.Pos(offset)) == offset.
-func (f *File) Offset(p Pos) int {
-	if int(p) < f.base || int(p) > f.base+f.size {
-		panic(fmt.Sprintf("invalid Pos value %d (should be in [%d, %d])", p, f.base, f.base+f.size))
-	}
-	return int(p) - f.base
-}
-
-// Line returns the line number for the given file position p;
-// p must be a Pos value in that file or NoPos.
-func (f *File) Line(p Pos) int {
-	return f.Position(p).Line
-}
-
-func searchLineInfos(a []lineInfo, x int) int {
-	return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1
-}
-
-// unpack returns the filename and line and column number for a file offset.
-// If adjusted is set, unpack will return the filename and line information
-// possibly adjusted by //line comments; otherwise those comments are ignored.
-func (f *File) unpack(offset int, adjusted bool) (filename string, line, column int) {
-	f.mutex.Lock()
-	defer f.mutex.Unlock()
-	filename = f.name
-	if i := searchInts(f.lines, offset); i >= 0 {
-		line, column = i+1, offset-f.lines[i]+1
-	}
-	if adjusted && len(f.infos) > 0 {
-		// few files have extra line infos
-		if i := searchLineInfos(f.infos, offset); i >= 0 {
-			alt := &f.infos[i]
-			filename = alt.Filename
-			if i := searchInts(f.lines, alt.Offset); i >= 0 {
-				// i+1 is the line at which the alternative position was recorded
-				d := line - (i + 1) // line distance from alternative position base
-				line = alt.Line + d
-				if alt.Column == 0 {
-					// alternative column is unknown => relative column is unknown
-					// (the current specification for line directives requires
-					// this to apply until the next PosBase/line directive,
-					// not just until the new newline)
-					column = 0
-				} else if d == 0 {
-					// the alternative position base is on the current line
-					// => column is relative to alternative column
-					column = alt.Column + (offset - alt.Offset)
-				}
-			}
-		}
-	}
-	return
-}
-
-func (f *File) position(p Pos, adjusted bool) (pos Position) {
-	offset := int(p) - f.base
-	pos.Offset = offset
-	pos.Filename, pos.Line, pos.Column = f.unpack(offset, adjusted)
-	return
-}
-
-// PositionFor returns the Position value for the given file position p.
-// If adjusted is set, the position may be adjusted by position-altering
-// //line comments; otherwise those comments are ignored.
-// p must be a Pos value in f or NoPos.
-func (f *File) PositionFor(p Pos, adjusted bool) (pos Position) {
-	if p != NoPos {
-		if int(p) < f.base || int(p) > f.base+f.size {
-			panic(fmt.Sprintf("invalid Pos value %d (should be in [%d, %d])", p, f.base, f.base+f.size))
-		}
-		pos = f.position(p, adjusted)
-	}
-	return
-}
-
-// Position returns the Position value for the given file position p.
-// Calling f.Position(p) is equivalent to calling f.PositionFor(p, true).
-func (f *File) Position(p Pos) (pos Position) {
-	return f.PositionFor(p, true)
-}
-
-// -----------------------------------------------------------------------------
-// FileSet
-
-// A FileSet represents a set of source files.
-// Methods of file sets are synchronized; multiple goroutines
-// may invoke them concurrently.
-//
-// The byte offsets for each file in a file set are mapped into
-// distinct (integer) intervals, one interval [base, base+size]
-// per file. Base represents the first byte in the file, and size
-// is the corresponding file size. A Pos value is a value in such
-// an interval. By determining the interval a Pos value belongs
-// to, the file, its file base, and thus the byte offset (position)
-// the Pos value is representing can be computed.
-//
-// When adding a new file, a file base must be provided. That can
-// be any integer value that is past the end of any interval of any
-// file already in the file set. For convenience, FileSet.Base provides
-// such a value, which is simply the end of the Pos interval of the most
-// recently added file, plus one. Unless there is a need to extend an
-// interval later, using the FileSet.Base should be used as argument
-// for FileSet.AddFile.
-type FileSet struct {
-	mutex sync.RWMutex // protects the file set
-	base  int          // base offset for the next file
-	files []*File      // list of files in the order added to the set
-	last  *File        // cache of last file looked up
-}
-
-// NewFileSet creates a new file set.
-func NewFileSet() *FileSet {
-	return &FileSet{
-		base: 1, // 0 == NoPos
-	}
-}
-
-// Base returns the minimum base offset that must be provided to
-// AddFile when adding the next file.
-func (s *FileSet) Base() int {
-	s.mutex.RLock()
-	b := s.base
-	s.mutex.RUnlock()
-	return b
-
-}
-
-// AddFile adds a new file with a given filename, base offset, and file size
-// to the file set s and returns the file. Multiple files may have the same
-// name. The base offset must not be smaller than the FileSet's Base(), and
-// size must not be negative. As a special case, if a negative base is provided,
-// the current value of the FileSet's Base() is used instead.
-//
-// Adding the file will set the file set's Base() value to base + size + 1
-// as the minimum base value for the next file. The following relationship
-// exists between a Pos value p for a given file offset offs:
-//
-//	int(p) = base + offs
-//
-// with offs in the range [0, size] and thus p in the range [base, base+size].
-// For convenience, File.Pos may be used to create file-specific position
-// values from a file offset.
-func (s *FileSet) AddFile(filename string, base, size int) *File {
-	s.mutex.Lock()
-	defer s.mutex.Unlock()
-	if base < 0 {
-		base = s.base
-	}
-	if base < s.base {
-		panic(fmt.Sprintf("invalid base %d (should be >= %d)", base, s.base))
-	}
-	if size < 0 {
-		panic(fmt.Sprintf("invalid size %d (should be >= 0)", size))
-	}
-	// base >= s.base && size >= 0
-	f := &File{set: s, name: filename, base: base, size: size, lines: []int{0}}
-	base += size + 1 // +1 because EOF also has a position
-	if base < 0 {
-		panic("token.Pos offset overflow (> 2G of source code in file set)")
-	}
-	// add the file to the file set
-	s.base = base
-	s.files = append(s.files, f)
-	s.last = f
-	return f
-}
-
-// Iterate calls f for the files in the file set in the order they were added
-// until f returns false.
-func (s *FileSet) Iterate(f func(*File) bool) {
-	for i := 0; ; i++ {
-		var file *File
-		s.mutex.RLock()
-		if i < len(s.files) {
-			file = s.files[i]
-		}
-		s.mutex.RUnlock()
-		if file == nil || !f(file) {
-			break
-		}
-	}
-}
-
-func searchFiles(a []*File, x int) int {
-	return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1
-}
-
-func (s *FileSet) file(p Pos) *File {
-	s.mutex.RLock()
-	// common case: p is in last file
-	if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size {
-		s.mutex.RUnlock()
-		return f
-	}
-	// p is not in last file - search all files
-	if i := searchFiles(s.files, int(p)); i >= 0 {
-		f := s.files[i]
-		// f.base <= int(p) by definition of searchFiles
-		if int(p) <= f.base+f.size {
-			s.mutex.RUnlock()
-			s.mutex.Lock()
-			s.last = f // race is ok - s.last is only a cache
-			s.mutex.Unlock()
-			return f
-		}
-	}
-	s.mutex.RUnlock()
-	return nil
-}
-
-// File returns the file that contains the position p.
-// If no such file is found (for instance for p == NoPos),
-// the result is nil.
-func (s *FileSet) File(p Pos) (f *File) {
-	if p != NoPos {
-		f = s.file(p)
-	}
-	return
-}
-
-// PositionFor converts a Pos p in the fileset into a Position value.
-// If adjusted is set, the position may be adjusted by position-altering
-// //line comments; otherwise those comments are ignored.
-// p must be a Pos value in s or NoPos.
-func (s *FileSet) PositionFor(p Pos, adjusted bool) (pos Position) {
-	if p != NoPos {
-		if f := s.file(p); f != nil {
-			return f.position(p, adjusted)
-		}
-	}
-	return
-}
-
-// Position converts a Pos p in the fileset into a Position value.
-// Calling s.Position(p) is equivalent to calling s.PositionFor(p, true).
-func (s *FileSet) Position(p Pos) (pos Position) {
-	return s.PositionFor(p, true)
-}
-
-// -----------------------------------------------------------------------------
-// Helper functions
-
-func searchInts(a []int, x int) int {
-	// This function body is a manually inlined version of:
-	//
-	//   return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1
-	//
-	// With better compiler optimizations, this may not be needed in the
-	// future, but at the moment this change improves the go/printer
-	// benchmark performance by ~30%. This has a direct impact on the
-	// speed of gofmt and thus seems worthwhile (2011-04-29).
-	// TODO(gri): Remove this when compilers have caught up.
-	i, j := 0, len(a)
-	for i < j {
-		h := int(uint(i+j) >> 1) // avoid overflow when computing h
-		// i ≤ h < j
-		if a[h] <= x {
-			i = h + 1
-		} else {
-			j = h
-		}
-	}
-	return i - 1
-}
diff --git a/internal/backport/go/token/position_bench_test.go b/internal/backport/go/token/position_bench_test.go
deleted file mode 100644
index 41be728..0000000
--- a/internal/backport/go/token/position_bench_test.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package token
-
-import (
-	"testing"
-)
-
-func BenchmarkSearchInts(b *testing.B) {
-	data := make([]int, 10000)
-	for i := 0; i < 10000; i++ {
-		data[i] = i
-	}
-	const x = 8
-	if r := searchInts(data, x); r != x {
-		b.Errorf("got index = %d; want %d", r, x)
-	}
-	b.ResetTimer()
-	for i := 0; i < b.N; i++ {
-		searchInts(data, x)
-	}
-}
diff --git a/internal/backport/go/token/position_test.go b/internal/backport/go/token/position_test.go
deleted file mode 100644
index 7d465df..0000000
--- a/internal/backport/go/token/position_test.go
+++ /dev/null
@@ -1,341 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package token
-
-import (
-	"fmt"
-	"math/rand"
-	"sync"
-	"testing"
-)
-
-func checkPos(t *testing.T, msg string, got, want Position) {
-	if got.Filename != want.Filename {
-		t.Errorf("%s: got filename = %q; want %q", msg, got.Filename, want.Filename)
-	}
-	if got.Offset != want.Offset {
-		t.Errorf("%s: got offset = %d; want %d", msg, got.Offset, want.Offset)
-	}
-	if got.Line != want.Line {
-		t.Errorf("%s: got line = %d; want %d", msg, got.Line, want.Line)
-	}
-	if got.Column != want.Column {
-		t.Errorf("%s: got column = %d; want %d", msg, got.Column, want.Column)
-	}
-}
-
-func TestNoPos(t *testing.T) {
-	if NoPos.IsValid() {
-		t.Errorf("NoPos should not be valid")
-	}
-	var fset *FileSet
-	checkPos(t, "nil NoPos", fset.Position(NoPos), Position{})
-	fset = NewFileSet()
-	checkPos(t, "fset NoPos", fset.Position(NoPos), Position{})
-}
-
-var tests = []struct {
-	filename string
-	source   []byte // may be nil
-	size     int
-	lines    []int
-}{
-	{"a", []byte{}, 0, []int{}},
-	{"b", []byte("01234"), 5, []int{0}},
-	{"c", []byte("\n\n\n\n\n\n\n\n\n"), 9, []int{0, 1, 2, 3, 4, 5, 6, 7, 8}},
-	{"d", nil, 100, []int{0, 5, 10, 20, 30, 70, 71, 72, 80, 85, 90, 99}},
-	{"e", nil, 777, []int{0, 80, 100, 120, 130, 180, 267, 455, 500, 567, 620}},
-	{"f", []byte("package p\n\nimport \"fmt\""), 23, []int{0, 10, 11}},
-	{"g", []byte("package p\n\nimport \"fmt\"\n"), 24, []int{0, 10, 11}},
-	{"h", []byte("package p\n\nimport \"fmt\"\n "), 25, []int{0, 10, 11, 24}},
-}
-
-func linecol(lines []int, offs int) (int, int) {
-	prevLineOffs := 0
-	for line, lineOffs := range lines {
-		if offs < lineOffs {
-			return line, offs - prevLineOffs + 1
-		}
-		prevLineOffs = lineOffs
-	}
-	return len(lines), offs - prevLineOffs + 1
-}
-
-func verifyPositions(t *testing.T, fset *FileSet, f *File, lines []int) {
-	for offs := 0; offs < f.Size(); offs++ {
-		p := f.Pos(offs)
-		offs2 := f.Offset(p)
-		if offs2 != offs {
-			t.Errorf("%s, Offset: got offset %d; want %d", f.Name(), offs2, offs)
-		}
-		line, col := linecol(lines, offs)
-		msg := fmt.Sprintf("%s (offs = %d, p = %d)", f.Name(), offs, p)
-		checkPos(t, msg, f.Position(f.Pos(offs)), Position{f.Name(), offs, line, col})
-		checkPos(t, msg, fset.Position(p), Position{f.Name(), offs, line, col})
-	}
-}
-
-func makeTestSource(size int, lines []int) []byte {
-	src := make([]byte, size)
-	for _, offs := range lines {
-		if offs > 0 {
-			src[offs-1] = '\n'
-		}
-	}
-	return src
-}
-
-func TestPositions(t *testing.T) {
-	const delta = 7 // a non-zero base offset increment
-	fset := NewFileSet()
-	for _, test := range tests {
-		// verify consistency of test case
-		if test.source != nil && len(test.source) != test.size {
-			t.Errorf("%s: inconsistent test case: got file size %d; want %d", test.filename, len(test.source), test.size)
-		}
-
-		// add file and verify name and size
-		f := fset.AddFile(test.filename, fset.Base()+delta, test.size)
-		if f.Name() != test.filename {
-			t.Errorf("got filename %q; want %q", f.Name(), test.filename)
-		}
-		if f.Size() != test.size {
-			t.Errorf("%s: got file size %d; want %d", f.Name(), f.Size(), test.size)
-		}
-		if fset.File(f.Pos(0)) != f {
-			t.Errorf("%s: f.Pos(0) was not found in f", f.Name())
-		}
-
-		// add lines individually and verify all positions
-		for i, offset := range test.lines {
-			f.AddLine(offset)
-			if f.LineCount() != i+1 {
-				t.Errorf("%s, AddLine: got line count %d; want %d", f.Name(), f.LineCount(), i+1)
-			}
-			// adding the same offset again should be ignored
-			f.AddLine(offset)
-			if f.LineCount() != i+1 {
-				t.Errorf("%s, AddLine: got unchanged line count %d; want %d", f.Name(), f.LineCount(), i+1)
-			}
-			verifyPositions(t, fset, f, test.lines[0:i+1])
-		}
-
-		// add lines with SetLines and verify all positions
-		if ok := f.SetLines(test.lines); !ok {
-			t.Errorf("%s: SetLines failed", f.Name())
-		}
-		if f.LineCount() != len(test.lines) {
-			t.Errorf("%s, SetLines: got line count %d; want %d", f.Name(), f.LineCount(), len(test.lines))
-		}
-		verifyPositions(t, fset, f, test.lines)
-
-		// add lines with SetLinesForContent and verify all positions
-		src := test.source
-		if src == nil {
-			// no test source available - create one from scratch
-			src = makeTestSource(test.size, test.lines)
-		}
-		f.SetLinesForContent(src)
-		if f.LineCount() != len(test.lines) {
-			t.Errorf("%s, SetLinesForContent: got line count %d; want %d", f.Name(), f.LineCount(), len(test.lines))
-		}
-		verifyPositions(t, fset, f, test.lines)
-	}
-}
-
-func TestLineInfo(t *testing.T) {
-	fset := NewFileSet()
-	f := fset.AddFile("foo", fset.Base(), 500)
-	lines := []int{0, 42, 77, 100, 210, 220, 277, 300, 333, 401}
-	// add lines individually and provide alternative line information
-	for _, offs := range lines {
-		f.AddLine(offs)
-		f.AddLineInfo(offs, "bar", 42)
-	}
-	// verify positions for all offsets
-	for offs := 0; offs <= f.Size(); offs++ {
-		p := f.Pos(offs)
-		_, col := linecol(lines, offs)
-		msg := fmt.Sprintf("%s (offs = %d, p = %d)", f.Name(), offs, p)
-		checkPos(t, msg, f.Position(f.Pos(offs)), Position{"bar", offs, 42, col})
-		checkPos(t, msg, fset.Position(p), Position{"bar", offs, 42, col})
-	}
-}
-
-func TestFiles(t *testing.T) {
-	fset := NewFileSet()
-	for i, test := range tests {
-		base := fset.Base()
-		if i%2 == 1 {
-			// Setting a negative base is equivalent to
-			// fset.Base(), so test some of each.
-			base = -1
-		}
-		fset.AddFile(test.filename, base, test.size)
-		j := 0
-		fset.Iterate(func(f *File) bool {
-			if f.Name() != tests[j].filename {
-				t.Errorf("got filename = %s; want %s", f.Name(), tests[j].filename)
-			}
-			j++
-			return true
-		})
-		if j != i+1 {
-			t.Errorf("got %d files; want %d", j, i+1)
-		}
-	}
-}
-
-// FileSet.File should return nil if Pos is past the end of the FileSet.
-func TestFileSetPastEnd(t *testing.T) {
-	fset := NewFileSet()
-	for _, test := range tests {
-		fset.AddFile(test.filename, fset.Base(), test.size)
-	}
-	if f := fset.File(Pos(fset.Base())); f != nil {
-		t.Errorf("got %v, want nil", f)
-	}
-}
-
-func TestFileSetCacheUnlikely(t *testing.T) {
-	fset := NewFileSet()
-	offsets := make(map[string]int)
-	for _, test := range tests {
-		offsets[test.filename] = fset.Base()
-		fset.AddFile(test.filename, fset.Base(), test.size)
-	}
-	for file, pos := range offsets {
-		f := fset.File(Pos(pos))
-		if f.Name() != file {
-			t.Errorf("got %q at position %d, want %q", f.Name(), pos, file)
-		}
-	}
-}
-
-// issue 4345. Test that concurrent use of FileSet.Pos does not trigger a
-// race in the FileSet position cache.
-func TestFileSetRace(t *testing.T) {
-	fset := NewFileSet()
-	for i := 0; i < 100; i++ {
-		fset.AddFile(fmt.Sprintf("file-%d", i), fset.Base(), 1031)
-	}
-	max := int32(fset.Base())
-	var stop sync.WaitGroup
-	r := rand.New(rand.NewSource(7))
-	for i := 0; i < 2; i++ {
-		r := rand.New(rand.NewSource(r.Int63()))
-		stop.Add(1)
-		go func() {
-			for i := 0; i < 1000; i++ {
-				fset.Position(Pos(r.Int31n(max)))
-			}
-			stop.Done()
-		}()
-	}
-	stop.Wait()
-}
-
-// issue 16548. Test that concurrent use of File.AddLine and FileSet.PositionFor
-// does not trigger a race in the FileSet position cache.
-func TestFileSetRace2(t *testing.T) {
-	const N = 1e3
-	var (
-		fset = NewFileSet()
-		file = fset.AddFile("", -1, N)
-		ch   = make(chan int, 2)
-	)
-
-	go func() {
-		for i := 0; i < N; i++ {
-			file.AddLine(i)
-		}
-		ch <- 1
-	}()
-
-	go func() {
-		pos := file.Pos(0)
-		for i := 0; i < N; i++ {
-			fset.PositionFor(pos, false)
-		}
-		ch <- 1
-	}()
-
-	<-ch
-	<-ch
-}
-
-func TestPositionFor(t *testing.T) {
-	src := []byte(`
-foo
-b
-ar
-//line :100
-foobar
-//line bar:3
-done
-`)
-
-	const filename = "foo"
-	fset := NewFileSet()
-	f := fset.AddFile(filename, fset.Base(), len(src))
-	f.SetLinesForContent(src)
-
-	// verify position info
-	for i, offs := range f.lines {
-		got1 := f.PositionFor(f.Pos(offs), false)
-		got2 := f.PositionFor(f.Pos(offs), true)
-		got3 := f.Position(f.Pos(offs))
-		want := Position{filename, offs, i + 1, 1}
-		checkPos(t, "1. PositionFor unadjusted", got1, want)
-		checkPos(t, "1. PositionFor adjusted", got2, want)
-		checkPos(t, "1. Position", got3, want)
-	}
-
-	// manually add //line info on lines l1, l2
-	const l1, l2 = 5, 7
-	f.AddLineInfo(f.lines[l1-1], "", 100)
-	f.AddLineInfo(f.lines[l2-1], "bar", 3)
-
-	// unadjusted position info must remain unchanged
-	for i, offs := range f.lines {
-		got1 := f.PositionFor(f.Pos(offs), false)
-		want := Position{filename, offs, i + 1, 1}
-		checkPos(t, "2. PositionFor unadjusted", got1, want)
-	}
-
-	// adjusted position info should have changed
-	for i, offs := range f.lines {
-		got2 := f.PositionFor(f.Pos(offs), true)
-		got3 := f.Position(f.Pos(offs))
-		want := Position{filename, offs, i + 1, 1}
-		// manually compute wanted filename and line
-		line := want.Line
-		if i+1 >= l1 {
-			want.Filename = ""
-			want.Line = line - l1 + 100
-		}
-		if i+1 >= l2 {
-			want.Filename = "bar"
-			want.Line = line - l2 + 3
-		}
-		checkPos(t, "3. PositionFor adjusted", got2, want)
-		checkPos(t, "3. Position", got3, want)
-	}
-}
-
-func TestLineStart(t *testing.T) {
-	const src = "one\ntwo\nthree\n"
-	fset := NewFileSet()
-	f := fset.AddFile("input", -1, len(src))
-	f.SetLinesForContent([]byte(src))
-
-	for line := 1; line <= 3; line++ {
-		pos := f.LineStart(line)
-		position := fset.Position(pos)
-		if position.Line != line || position.Column != 1 {
-			t.Errorf("LineStart(%d) returned wrong pos %d: %s", line, pos, position)
-		}
-	}
-}
diff --git a/internal/backport/go/token/serialize.go b/internal/backport/go/token/serialize.go
deleted file mode 100644
index d0ea345..0000000
--- a/internal/backport/go/token/serialize.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package token
-
-type serializedFile struct {
-	// fields correspond 1:1 to fields with same (lower-case) name in File
-	Name  string
-	Base  int
-	Size  int
-	Lines []int
-	Infos []lineInfo
-}
-
-type serializedFileSet struct {
-	Base  int
-	Files []serializedFile
-}
-
-// Read calls decode to deserialize a file set into s; s must not be nil.
-func (s *FileSet) Read(decode func(interface{}) error) error {
-	var ss serializedFileSet
-	if err := decode(&ss); err != nil {
-		return err
-	}
-
-	s.mutex.Lock()
-	s.base = ss.Base
-	files := make([]*File, len(ss.Files))
-	for i := 0; i < len(ss.Files); i++ {
-		f := &ss.Files[i]
-		files[i] = &File{
-			set:   s,
-			name:  f.Name,
-			base:  f.Base,
-			size:  f.Size,
-			lines: f.Lines,
-			infos: f.Infos,
-		}
-	}
-	s.files = files
-	s.last = nil
-	s.mutex.Unlock()
-
-	return nil
-}
-
-// Write calls encode to serialize the file set s.
-func (s *FileSet) Write(encode func(interface{}) error) error {
-	var ss serializedFileSet
-
-	s.mutex.Lock()
-	ss.Base = s.base
-	files := make([]serializedFile, len(s.files))
-	for i, f := range s.files {
-		f.mutex.Lock()
-		files[i] = serializedFile{
-			Name:  f.name,
-			Base:  f.base,
-			Size:  f.size,
-			Lines: append([]int(nil), f.lines...),
-			Infos: append([]lineInfo(nil), f.infos...),
-		}
-		f.mutex.Unlock()
-	}
-	ss.Files = files
-	s.mutex.Unlock()
-
-	return encode(ss)
-}
diff --git a/internal/backport/go/token/serialize_test.go b/internal/backport/go/token/serialize_test.go
deleted file mode 100644
index 4e925ad..0000000
--- a/internal/backport/go/token/serialize_test.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package token
-
-import (
-	"bytes"
-	"encoding/gob"
-	"fmt"
-	"testing"
-)
-
-// equal returns nil if p and q describe the same file set;
-// otherwise it returns an error describing the discrepancy.
-func equal(p, q *FileSet) error {
-	if p == q {
-		// avoid deadlock if p == q
-		return nil
-	}
-
-	// not strictly needed for the test
-	p.mutex.Lock()
-	q.mutex.Lock()
-	defer q.mutex.Unlock()
-	defer p.mutex.Unlock()
-
-	if p.base != q.base {
-		return fmt.Errorf("different bases: %d != %d", p.base, q.base)
-	}
-
-	if len(p.files) != len(q.files) {
-		return fmt.Errorf("different number of files: %d != %d", len(p.files), len(q.files))
-	}
-
-	for i, f := range p.files {
-		g := q.files[i]
-		if f.set != p {
-			return fmt.Errorf("wrong fileset for %q", f.name)
-		}
-		if g.set != q {
-			return fmt.Errorf("wrong fileset for %q", g.name)
-		}
-		if f.name != g.name {
-			return fmt.Errorf("different filenames: %q != %q", f.name, g.name)
-		}
-		if f.base != g.base {
-			return fmt.Errorf("different base for %q: %d != %d", f.name, f.base, g.base)
-		}
-		if f.size != g.size {
-			return fmt.Errorf("different size for %q: %d != %d", f.name, f.size, g.size)
-		}
-		for j, l := range f.lines {
-			m := g.lines[j]
-			if l != m {
-				return fmt.Errorf("different offsets for %q", f.name)
-			}
-		}
-		for j, l := range f.infos {
-			m := g.infos[j]
-			if l.Offset != m.Offset || l.Filename != m.Filename || l.Line != m.Line {
-				return fmt.Errorf("different infos for %q", f.name)
-			}
-		}
-	}
-
-	// we don't care about .last - it's just a cache
-	return nil
-}
-
-func checkSerialize(t *testing.T, p *FileSet) {
-	var buf bytes.Buffer
-	encode := func(x interface{}) error {
-		return gob.NewEncoder(&buf).Encode(x)
-	}
-	if err := p.Write(encode); err != nil {
-		t.Errorf("writing fileset failed: %s", err)
-		return
-	}
-	q := NewFileSet()
-	decode := func(x interface{}) error {
-		return gob.NewDecoder(&buf).Decode(x)
-	}
-	if err := q.Read(decode); err != nil {
-		t.Errorf("reading fileset failed: %s", err)
-		return
-	}
-	if err := equal(p, q); err != nil {
-		t.Errorf("filesets not identical: %s", err)
-	}
-}
-
-func TestSerialization(t *testing.T) {
-	p := NewFileSet()
-	checkSerialize(t, p)
-	// add some files
-	for i := 0; i < 10; i++ {
-		f := p.AddFile(fmt.Sprintf("file%d", i), p.Base()+i, i*100)
-		checkSerialize(t, p)
-		// add some lines and alternative file infos
-		line := 1000
-		for offs := 0; offs < f.Size(); offs += 40 + i {
-			f.AddLine(offs)
-			if offs%7 == 0 {
-				f.AddLineInfo(offs, fmt.Sprintf("file%d", offs), line)
-				line += 33
-			}
-		}
-		checkSerialize(t, p)
-	}
-}
diff --git a/internal/backport/go/token/token.go b/internal/backport/go/token/token.go
deleted file mode 100644
index db4e955..0000000
--- a/internal/backport/go/token/token.go
+++ /dev/null
@@ -1,338 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package token defines constants representing the lexical tokens of the Go
-// programming language and basic operations on tokens (printing, predicates).
-package token
-
-import (
-	"strconv"
-	"unicode"
-	"unicode/utf8"
-)
-
-// Token is the set of lexical tokens of the Go programming language.
-type Token int
-
-// The list of tokens.
-const (
-	// Special tokens
-	ILLEGAL Token = iota
-	EOF
-	COMMENT
-
-	literal_beg
-	// Identifiers and basic type literals
-	// (these tokens stand for classes of literals)
-	IDENT  // main
-	INT    // 12345
-	FLOAT  // 123.45
-	IMAG   // 123.45i
-	CHAR   // 'a'
-	STRING // "abc"
-	literal_end
-
-	operator_beg
-	// Operators and delimiters
-	ADD // +
-	SUB // -
-	MUL // *
-	QUO // /
-	REM // %
-
-	AND     // &
-	OR      // |
-	XOR     // ^
-	SHL     // <<
-	SHR     // >>
-	AND_NOT // &^
-
-	ADD_ASSIGN // +=
-	SUB_ASSIGN // -=
-	MUL_ASSIGN // *=
-	QUO_ASSIGN // /=
-	REM_ASSIGN // %=
-
-	AND_ASSIGN     // &=
-	OR_ASSIGN      // |=
-	XOR_ASSIGN     // ^=
-	SHL_ASSIGN     // <<=
-	SHR_ASSIGN     // >>=
-	AND_NOT_ASSIGN // &^=
-
-	LAND  // &&
-	LOR   // ||
-	ARROW // <-
-	INC   // ++
-	DEC   // --
-
-	EQL    // ==
-	LSS    // <
-	GTR    // >
-	ASSIGN // =
-	NOT    // !
-
-	NEQ      // !=
-	LEQ      // <=
-	GEQ      // >=
-	DEFINE   // :=
-	ELLIPSIS // ...
-
-	LPAREN // (
-	LBRACK // [
-	LBRACE // {
-	COMMA  // ,
-	PERIOD // .
-
-	RPAREN    // )
-	RBRACK    // ]
-	RBRACE    // }
-	SEMICOLON // ;
-	COLON     // :
-	operator_end
-
-	keyword_beg
-	// Keywords
-	BREAK
-	CASE
-	CHAN
-	CONST
-	CONTINUE
-
-	DEFAULT
-	DEFER
-	ELSE
-	FALLTHROUGH
-	FOR
-
-	FUNC
-	GO
-	GOTO
-	IF
-	IMPORT
-
-	INTERFACE
-	MAP
-	PACKAGE
-	RANGE
-	RETURN
-
-	SELECT
-	STRUCT
-	SWITCH
-	TYPE
-	VAR
-	keyword_end
-
-	additional_beg
-	// additional tokens, handled in an ad-hoc manner
-	TILDE
-	additional_end
-)
-
-var tokens = [...]string{
-	ILLEGAL: "ILLEGAL",
-
-	EOF:     "EOF",
-	COMMENT: "COMMENT",
-
-	IDENT:  "IDENT",
-	INT:    "INT",
-	FLOAT:  "FLOAT",
-	IMAG:   "IMAG",
-	CHAR:   "CHAR",
-	STRING: "STRING",
-
-	ADD: "+",
-	SUB: "-",
-	MUL: "*",
-	QUO: "/",
-	REM: "%",
-
-	AND:     "&",
-	OR:      "|",
-	XOR:     "^",
-	SHL:     "<<",
-	SHR:     ">>",
-	AND_NOT: "&^",
-
-	ADD_ASSIGN: "+=",
-	SUB_ASSIGN: "-=",
-	MUL_ASSIGN: "*=",
-	QUO_ASSIGN: "/=",
-	REM_ASSIGN: "%=",
-
-	AND_ASSIGN:     "&=",
-	OR_ASSIGN:      "|=",
-	XOR_ASSIGN:     "^=",
-	SHL_ASSIGN:     "<<=",
-	SHR_ASSIGN:     ">>=",
-	AND_NOT_ASSIGN: "&^=",
-
-	LAND:  "&&",
-	LOR:   "||",
-	ARROW: "<-",
-	INC:   "++",
-	DEC:   "--",
-
-	EQL:    "==",
-	LSS:    "<",
-	GTR:    ">",
-	ASSIGN: "=",
-	NOT:    "!",
-
-	NEQ:      "!=",
-	LEQ:      "<=",
-	GEQ:      ">=",
-	DEFINE:   ":=",
-	ELLIPSIS: "...",
-
-	LPAREN: "(",
-	LBRACK: "[",
-	LBRACE: "{",
-	COMMA:  ",",
-	PERIOD: ".",
-
-	RPAREN:    ")",
-	RBRACK:    "]",
-	RBRACE:    "}",
-	SEMICOLON: ";",
-	COLON:     ":",
-
-	BREAK:    "break",
-	CASE:     "case",
-	CHAN:     "chan",
-	CONST:    "const",
-	CONTINUE: "continue",
-
-	DEFAULT:     "default",
-	DEFER:       "defer",
-	ELSE:        "else",
-	FALLTHROUGH: "fallthrough",
-	FOR:         "for",
-
-	FUNC:   "func",
-	GO:     "go",
-	GOTO:   "goto",
-	IF:     "if",
-	IMPORT: "import",
-
-	INTERFACE: "interface",
-	MAP:       "map",
-	PACKAGE:   "package",
-	RANGE:     "range",
-	RETURN:    "return",
-
-	SELECT: "select",
-	STRUCT: "struct",
-	SWITCH: "switch",
-	TYPE:   "type",
-	VAR:    "var",
-
-	TILDE: "~",
-}
-
-// String returns the string corresponding to the token tok.
-// For operators, delimiters, and keywords the string is the actual
-// token character sequence (e.g., for the token ADD, the string is
-// "+"). For all other tokens the string corresponds to the token
-// constant name (e.g. for the token IDENT, the string is "IDENT").
-func (tok Token) String() string {
-	s := ""
-	if 0 <= tok && tok < Token(len(tokens)) {
-		s = tokens[tok]
-	}
-	if s == "" {
-		s = "token(" + strconv.Itoa(int(tok)) + ")"
-	}
-	return s
-}
-
-// A set of constants for precedence-based expression parsing.
-// Non-operators have lowest precedence, followed by operators
-// starting with precedence 1 up to unary operators. The highest
-// precedence serves as "catch-all" precedence for selector,
-// indexing, and other operator and delimiter tokens.
-const (
-	LowestPrec  = 0 // non-operators
-	UnaryPrec   = 6
-	HighestPrec = 7
-)
-
-// Precedence returns the operator precedence of the binary
-// operator op. If op is not a binary operator, the result
-// is LowestPrecedence.
-func (op Token) Precedence() int {
-	switch op {
-	case LOR:
-		return 1
-	case LAND:
-		return 2
-	case EQL, NEQ, LSS, LEQ, GTR, GEQ:
-		return 3
-	case ADD, SUB, OR, XOR:
-		return 4
-	case MUL, QUO, REM, SHL, SHR, AND, AND_NOT:
-		return 5
-	}
-	return LowestPrec
-}
-
-var keywords map[string]Token
-
-func init() {
-	keywords = make(map[string]Token)
-	for i := keyword_beg + 1; i < keyword_end; i++ {
-		keywords[tokens[i]] = i
-	}
-}
-
-// Lookup maps an identifier to its keyword token or IDENT (if not a keyword).
-func Lookup(ident string) Token {
-	if tok, is_keyword := keywords[ident]; is_keyword {
-		return tok
-	}
-	return IDENT
-}
-
-// Predicates
-
-// IsLiteral returns true for tokens corresponding to identifiers
-// and basic type literals; it returns false otherwise.
-func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_end }
-
-// IsOperator returns true for tokens corresponding to operators and
-// delimiters; it returns false otherwise.
-func (tok Token) IsOperator() bool {
-	return (operator_beg < tok && tok < operator_end) || tok == TILDE
-}
-
-// IsKeyword returns true for tokens corresponding to keywords;
-// it returns false otherwise.
-func (tok Token) IsKeyword() bool { return keyword_beg < tok && tok < keyword_end }
-
-// IsExported reports whether name starts with an upper-case letter.
-func IsExported(name string) bool {
-	ch, _ := utf8.DecodeRuneInString(name)
-	return unicode.IsUpper(ch)
-}
-
-// IsKeyword reports whether name is a Go keyword, such as "func" or "return".
-func IsKeyword(name string) bool {
-	// TODO: opt: use a perfect hash function instead of a global map.
-	_, ok := keywords[name]
-	return ok
-}
-
-// IsIdentifier reports whether name is a Go identifier, that is, a non-empty
-// string made up of letters, digits, and underscores, where the first character
-// is not a digit. Keywords are not identifiers.
-func IsIdentifier(name string) bool {
-	for i, c := range name {
-		if !unicode.IsLetter(c) && c != '_' && (i == 0 || !unicode.IsDigit(c)) {
-			return false
-		}
-	}
-	return name != "" && !IsKeyword(name)
-}
diff --git a/internal/backport/go/token/token_test.go b/internal/backport/go/token/token_test.go
deleted file mode 100644
index eff38cc..0000000
--- a/internal/backport/go/token/token_test.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package token
-
-import "testing"
-
-func TestIsIdentifier(t *testing.T) {
-	tests := []struct {
-		name string
-		in   string
-		want bool
-	}{
-		{"Empty", "", false},
-		{"Space", " ", false},
-		{"SpaceSuffix", "foo ", false},
-		{"Number", "123", false},
-		{"Keyword", "func", false},
-
-		{"LettersASCII", "foo", true},
-		{"MixedASCII", "_bar123", true},
-		{"UppercaseKeyword", "Func", true},
-		{"LettersUnicode", "fóö", true},
-	}
-	for _, test := range tests {
-		t.Run(test.name, func(t *testing.T) {
-			if got := IsIdentifier(test.in); got != test.want {
-				t.Fatalf("IsIdentifier(%q) = %t, want %v", test.in, got, test.want)
-			}
-		})
-	}
-}
diff --git a/internal/backport/html/template/attr.go b/internal/backport/html/template/attr.go
deleted file mode 100644
index 22922e6..0000000
--- a/internal/backport/html/template/attr.go
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
-	"strings"
-)
-
-// attrTypeMap[n] describes the value of the given attribute.
-// If an attribute affects (or can mask) the encoding or interpretation of
-// other content, or affects the contents, idempotency, or credentials of a
-// network message, then the value in this map is contentTypeUnsafe.
-// This map is derived from HTML5, specifically
-// https://www.w3.org/TR/html5/Overview.html#attributes-1
-// as well as "%URI"-typed attributes from
-// https://www.w3.org/TR/html4/index/attributes.html
-var attrTypeMap = map[string]contentType{
-	"accept":          contentTypePlain,
-	"accept-charset":  contentTypeUnsafe,
-	"action":          contentTypeURL,
-	"alt":             contentTypePlain,
-	"archive":         contentTypeURL,
-	"async":           contentTypeUnsafe,
-	"autocomplete":    contentTypePlain,
-	"autofocus":       contentTypePlain,
-	"autoplay":        contentTypePlain,
-	"background":      contentTypeURL,
-	"border":          contentTypePlain,
-	"checked":         contentTypePlain,
-	"cite":            contentTypeURL,
-	"challenge":       contentTypeUnsafe,
-	"charset":         contentTypeUnsafe,
-	"class":           contentTypePlain,
-	"classid":         contentTypeURL,
-	"codebase":        contentTypeURL,
-	"cols":            contentTypePlain,
-	"colspan":         contentTypePlain,
-	"content":         contentTypeUnsafe,
-	"contenteditable": contentTypePlain,
-	"contextmenu":     contentTypePlain,
-	"controls":        contentTypePlain,
-	"coords":          contentTypePlain,
-	"crossorigin":     contentTypeUnsafe,
-	"data":            contentTypeURL,
-	"datetime":        contentTypePlain,
-	"default":         contentTypePlain,
-	"defer":           contentTypeUnsafe,
-	"dir":             contentTypePlain,
-	"dirname":         contentTypePlain,
-	"disabled":        contentTypePlain,
-	"draggable":       contentTypePlain,
-	"dropzone":        contentTypePlain,
-	"enctype":         contentTypeUnsafe,
-	"for":             contentTypePlain,
-	"form":            contentTypeUnsafe,
-	"formaction":      contentTypeURL,
-	"formenctype":     contentTypeUnsafe,
-	"formmethod":      contentTypeUnsafe,
-	"formnovalidate":  contentTypeUnsafe,
-	"formtarget":      contentTypePlain,
-	"headers":         contentTypePlain,
-	"height":          contentTypePlain,
-	"hidden":          contentTypePlain,
-	"high":            contentTypePlain,
-	"href":            contentTypeURL,
-	"hreflang":        contentTypePlain,
-	"http-equiv":      contentTypeUnsafe,
-	"icon":            contentTypeURL,
-	"id":              contentTypePlain,
-	"ismap":           contentTypePlain,
-	"keytype":         contentTypeUnsafe,
-	"kind":            contentTypePlain,
-	"label":           contentTypePlain,
-	"lang":            contentTypePlain,
-	"language":        contentTypeUnsafe,
-	"list":            contentTypePlain,
-	"longdesc":        contentTypeURL,
-	"loop":            contentTypePlain,
-	"low":             contentTypePlain,
-	"manifest":        contentTypeURL,
-	"max":             contentTypePlain,
-	"maxlength":       contentTypePlain,
-	"media":           contentTypePlain,
-	"mediagroup":      contentTypePlain,
-	"method":          contentTypeUnsafe,
-	"min":             contentTypePlain,
-	"multiple":        contentTypePlain,
-	"name":            contentTypePlain,
-	"novalidate":      contentTypeUnsafe,
-	// Skip handler names from
-	// https://www.w3.org/TR/html5/webappapis.html#event-handlers-on-elements,-document-objects,-and-window-objects
-	// since we have special handling in attrType.
-	"open":        contentTypePlain,
-	"optimum":     contentTypePlain,
-	"pattern":     contentTypeUnsafe,
-	"placeholder": contentTypePlain,
-	"poster":      contentTypeURL,
-	"profile":     contentTypeURL,
-	"preload":     contentTypePlain,
-	"pubdate":     contentTypePlain,
-	"radiogroup":  contentTypePlain,
-	"readonly":    contentTypePlain,
-	"rel":         contentTypeUnsafe,
-	"required":    contentTypePlain,
-	"reversed":    contentTypePlain,
-	"rows":        contentTypePlain,
-	"rowspan":     contentTypePlain,
-	"sandbox":     contentTypeUnsafe,
-	"spellcheck":  contentTypePlain,
-	"scope":       contentTypePlain,
-	"scoped":      contentTypePlain,
-	"seamless":    contentTypePlain,
-	"selected":    contentTypePlain,
-	"shape":       contentTypePlain,
-	"size":        contentTypePlain,
-	"sizes":       contentTypePlain,
-	"span":        contentTypePlain,
-	"src":         contentTypeURL,
-	"srcdoc":      contentTypeHTML,
-	"srclang":     contentTypePlain,
-	"srcset":      contentTypeSrcset,
-	"start":       contentTypePlain,
-	"step":        contentTypePlain,
-	"style":       contentTypeCSS,
-	"tabindex":    contentTypePlain,
-	"target":      contentTypePlain,
-	"title":       contentTypePlain,
-	"type":        contentTypeUnsafe,
-	"usemap":      contentTypeURL,
-	"value":       contentTypeUnsafe,
-	"width":       contentTypePlain,
-	"wrap":        contentTypePlain,
-	"xmlns":       contentTypeURL,
-}
-
-// attrType returns a conservative (upper-bound on authority) guess at the
-// type of the lowercase named attribute.
-func attrType(name string) contentType {
-	if strings.HasPrefix(name, "data-") {
-		// Strip data- so that custom attribute heuristics below are
-		// widely applied.
-		// Treat data-action as URL below.
-		name = name[5:]
-	} else if colon := strings.IndexRune(name, ':'); colon != -1 {
-		if name[:colon] == "xmlns" {
-			return contentTypeURL
-		}
-		// Treat svg:href and xlink:href as href below.
-		name = name[colon+1:]
-	}
-	if t, ok := attrTypeMap[name]; ok {
-		return t
-	}
-	// Treat partial event handler names as script.
-	if strings.HasPrefix(name, "on") {
-		return contentTypeJS
-	}
-
-	// Heuristics to prevent "javascript:..." injection in custom
-	// data attributes and custom attributes like g:tweetUrl.
-	// https://www.w3.org/TR/html5/dom.html#embedding-custom-non-visible-data-with-the-data-*-attributes
-	// "Custom data attributes are intended to store custom data
-	//  private to the page or application, for which there are no
-	//  more appropriate attributes or elements."
-	// Developers seem to store URL content in data URLs that start
-	// or end with "URI" or "URL".
-	if strings.Contains(name, "src") ||
-		strings.Contains(name, "uri") ||
-		strings.Contains(name, "url") {
-		return contentTypeURL
-	}
-	return contentTypePlain
-}
diff --git a/internal/backport/html/template/attr_string.go b/internal/backport/html/template/attr_string.go
deleted file mode 100644
index babe70c..0000000
--- a/internal/backport/html/template/attr_string.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Code generated by "stringer -type attr"; DO NOT EDIT.
-
-package template
-
-import "strconv"
-
-const _attr_name = "attrNoneattrScriptattrScriptTypeattrStyleattrURLattrSrcset"
-
-var _attr_index = [...]uint8{0, 8, 18, 32, 41, 48, 58}
-
-func (i attr) String() string {
-	if i >= attr(len(_attr_index)-1) {
-		return "attr(" + strconv.FormatInt(int64(i), 10) + ")"
-	}
-	return _attr_name[_attr_index[i]:_attr_index[i+1]]
-}
diff --git a/internal/backport/html/template/clone_test.go b/internal/backport/html/template/clone_test.go
deleted file mode 100644
index 07e8ef3..0000000
--- a/internal/backport/html/template/clone_test.go
+++ /dev/null
@@ -1,280 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
-	"bytes"
-	"errors"
-	"fmt"
-	"io/ioutil"
-	"strings"
-	"sync"
-	"testing"
-
-	"golang.org/x/website/internal/backport/text/template/parse"
-)
-
-func TestAddParseTreeHTML(t *testing.T) {
-	root := Must(New("root").Parse(`{{define "a"}} {{.}} {{template "b"}} {{.}} "></a>{{end}}`))
-	tree, err := parse.Parse("t", `{{define "b"}}<a href="{{end}}`, "", "", nil, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	added := Must(root.AddParseTree("b", tree["b"]))
-	b := new(bytes.Buffer)
-	err = added.ExecuteTemplate(b, "a", "1>0")
-	if err != nil {
-		t.Fatal(err)
-	}
-	if got, want := b.String(), ` 1&gt;0 <a href=" 1%3e0 "></a>`; got != want {
-		t.Errorf("got %q want %q", got, want)
-	}
-}
-
-func TestClone(t *testing.T) {
-	// The {{.}} will be executed with data "<i>*/" in different contexts.
-	// In the t0 template, it will be in a text context.
-	// In the t1 template, it will be in a URL context.
-	// In the t2 template, it will be in a JavaScript context.
-	// In the t3 template, it will be in a CSS context.
-	const tmpl = `{{define "a"}}{{template "lhs"}}{{.}}{{template "rhs"}}{{end}}`
-	b := new(bytes.Buffer)
-
-	// Create an incomplete template t0.
-	t0 := Must(New("t0").Parse(tmpl))
-
-	// Clone t0 as t1.
-	t1 := Must(t0.Clone())
-	Must(t1.Parse(`{{define "lhs"}} <a href=" {{end}}`))
-	Must(t1.Parse(`{{define "rhs"}} "></a> {{end}}`))
-
-	// Execute t1.
-	b.Reset()
-	if err := t1.ExecuteTemplate(b, "a", "<i>*/"); err != nil {
-		t.Fatal(err)
-	}
-	if got, want := b.String(), ` <a href=" %3ci%3e*/ "></a> `; got != want {
-		t.Errorf("t1: got %q want %q", got, want)
-	}
-
-	// Clone t0 as t2.
-	t2 := Must(t0.Clone())
-	Must(t2.Parse(`{{define "lhs"}} <p onclick="javascript: {{end}}`))
-	Must(t2.Parse(`{{define "rhs"}} "></p> {{end}}`))
-
-	// Execute t2.
-	b.Reset()
-	if err := t2.ExecuteTemplate(b, "a", "<i>*/"); err != nil {
-		t.Fatal(err)
-	}
-	if got, want := b.String(), ` <p onclick="javascript: &#34;\u003ci\u003e*/&#34; "></p> `; got != want {
-		t.Errorf("t2: got %q want %q", got, want)
-	}
-
-	// Clone t0 as t3, but do not execute t3 yet.
-	t3 := Must(t0.Clone())
-	Must(t3.Parse(`{{define "lhs"}} <style> {{end}}`))
-	Must(t3.Parse(`{{define "rhs"}} </style> {{end}}`))
-
-	// Complete t0.
-	Must(t0.Parse(`{{define "lhs"}} ( {{end}}`))
-	Must(t0.Parse(`{{define "rhs"}} ) {{end}}`))
-
-	// Clone t0 as t4. Redefining the "lhs" template should not fail.
-	t4 := Must(t0.Clone())
-	if _, err := t4.Parse(`{{define "lhs"}} OK {{end}}`); err != nil {
-		t.Errorf(`redefine "lhs": got err %v want nil`, err)
-	}
-	// Cloning t1 should fail as it has been executed.
-	if _, err := t1.Clone(); err == nil {
-		t.Error("cloning t1: got nil err want non-nil")
-	}
-	// Redefining the "lhs" template in t1 should fail as it has been executed.
-	if _, err := t1.Parse(`{{define "lhs"}} OK {{end}}`); err == nil {
-		t.Error(`redefine "lhs": got nil err want non-nil`)
-	}
-
-	// Execute t0.
-	b.Reset()
-	if err := t0.ExecuteTemplate(b, "a", "<i>*/"); err != nil {
-		t.Fatal(err)
-	}
-	if got, want := b.String(), ` ( &lt;i&gt;*/ ) `; got != want {
-		t.Errorf("t0: got %q want %q", got, want)
-	}
-
-	// Clone t0. This should fail, as t0 has already executed.
-	if _, err := t0.Clone(); err == nil {
-		t.Error(`t0.Clone(): got nil err want non-nil`)
-	}
-
-	// Similarly, cloning sub-templates should fail.
-	if _, err := t0.Lookup("a").Clone(); err == nil {
-		t.Error(`t0.Lookup("a").Clone(): got nil err want non-nil`)
-	}
-	if _, err := t0.Lookup("lhs").Clone(); err == nil {
-		t.Error(`t0.Lookup("lhs").Clone(): got nil err want non-nil`)
-	}
-
-	// Execute t3.
-	b.Reset()
-	if err := t3.ExecuteTemplate(b, "a", "<i>*/"); err != nil {
-		t.Fatal(err)
-	}
-	if got, want := b.String(), ` <style> ZgotmplZ </style> `; got != want {
-		t.Errorf("t3: got %q want %q", got, want)
-	}
-}
-
-func TestTemplates(t *testing.T) {
-	names := []string{"t0", "a", "lhs", "rhs"}
-	// Some template definitions borrowed from TestClone.
-	const tmpl = `
-		{{define "a"}}{{template "lhs"}}{{.}}{{template "rhs"}}{{end}}
-		{{define "lhs"}} <a href=" {{end}}
-		{{define "rhs"}} "></a> {{end}}`
-	t0 := Must(New("t0").Parse(tmpl))
-	templates := t0.Templates()
-	if len(templates) != len(names) {
-		t.Errorf("expected %d templates; got %d", len(names), len(templates))
-	}
-	for _, name := range names {
-		found := false
-		for _, tmpl := range templates {
-			if name == tmpl.text.Name() {
-				found = true
-				break
-			}
-		}
-		if !found {
-			t.Error("could not find template", name)
-		}
-	}
-}
-
-// This used to crash; https://golang.org/issue/3281
-func TestCloneCrash(t *testing.T) {
-	t1 := New("all")
-	Must(t1.New("t1").Parse(`{{define "foo"}}foo{{end}}`))
-	t1.Clone()
-}
-
-// Ensure that this guarantee from the docs is upheld:
-// "Further calls to Parse in the copy will add templates
-// to the copy but not to the original."
-func TestCloneThenParse(t *testing.T) {
-	t0 := Must(New("t0").Parse(`{{define "a"}}{{template "embedded"}}{{end}}`))
-	t1 := Must(t0.Clone())
-	Must(t1.Parse(`{{define "embedded"}}t1{{end}}`))
-	if len(t0.Templates())+1 != len(t1.Templates()) {
-		t.Error("adding a template to a clone added it to the original")
-	}
-	// double check that the embedded template isn't available in the original
-	err := t0.ExecuteTemplate(ioutil.Discard, "a", nil)
-	if err == nil {
-		t.Error("expected 'no such template' error")
-	}
-}
-
-// https://golang.org/issue/5980
-func TestFuncMapWorksAfterClone(t *testing.T) {
-	funcs := FuncMap{"customFunc": func() (string, error) {
-		return "", errors.New("issue5980")
-	}}
-
-	// get the expected error output (no clone)
-	uncloned := Must(New("").Funcs(funcs).Parse("{{customFunc}}"))
-	wantErr := uncloned.Execute(ioutil.Discard, nil)
-
-	// toClone must be the same as uncloned. It has to be recreated from scratch,
-	// since cloning cannot occur after execution.
-	toClone := Must(New("").Funcs(funcs).Parse("{{customFunc}}"))
-	cloned := Must(toClone.Clone())
-	gotErr := cloned.Execute(ioutil.Discard, nil)
-
-	if wantErr.Error() != gotErr.Error() {
-		t.Errorf("clone error message mismatch want %q got %q", wantErr, gotErr)
-	}
-}
-
-// https://golang.org/issue/16101
-func TestTemplateCloneExecuteRace(t *testing.T) {
-	const (
-		input   = `<title>{{block "a" .}}a{{end}}</title><body>{{block "b" .}}b{{end}}<body>`
-		overlay = `{{define "b"}}A{{end}}`
-	)
-	outer := Must(New("outer").Parse(input))
-	tmpl := Must(Must(outer.Clone()).Parse(overlay))
-
-	var wg sync.WaitGroup
-	for i := 0; i < 10; i++ {
-		wg.Add(1)
-		go func() {
-			defer wg.Done()
-			for i := 0; i < 100; i++ {
-				if err := tmpl.Execute(ioutil.Discard, "data"); err != nil {
-					panic(err)
-				}
-			}
-		}()
-	}
-	wg.Wait()
-}
-
-func TestTemplateCloneLookup(t *testing.T) {
-	// Template.escape makes an assumption that the template associated
-	// with t.Name() is t. Check that this holds.
-	tmpl := Must(New("x").Parse("a"))
-	tmpl = Must(tmpl.Clone())
-	if tmpl.Lookup(tmpl.Name()) != tmpl {
-		t.Error("after Clone, tmpl.Lookup(tmpl.Name()) != tmpl")
-	}
-}
-
-func TestCloneGrowth(t *testing.T) {
-	tmpl := Must(New("root").Parse(`<title>{{block "B". }}Arg{{end}}</title>`))
-	tmpl = Must(tmpl.Clone())
-	Must(tmpl.Parse(`{{define "B"}}Text{{end}}`))
-	for i := 0; i < 10; i++ {
-		tmpl.Execute(ioutil.Discard, nil)
-	}
-	if len(tmpl.DefinedTemplates()) > 200 {
-		t.Fatalf("too many templates: %v", len(tmpl.DefinedTemplates()))
-	}
-}
-
-// https://golang.org/issue/17735
-func TestCloneRedefinedName(t *testing.T) {
-	const base = `
-{{ define "a" -}}<title>{{ template "b" . -}}</title>{{ end -}}
-{{ define "b" }}{{ end -}}
-`
-	const page = `{{ template "a" . }}`
-
-	t1 := Must(New("a").Parse(base))
-
-	for i := 0; i < 2; i++ {
-		t2 := Must(t1.Clone())
-		t2 = Must(t2.New(fmt.Sprintf("%d", i)).Parse(page))
-		err := t2.Execute(ioutil.Discard, nil)
-		if err != nil {
-			t.Fatal(err)
-		}
-	}
-}
-
-// Issue 24791.
-func TestClonePipe(t *testing.T) {
-	a := Must(New("a").Parse(`{{define "a"}}{{range $v := .A}}{{$v}}{{end}}{{end}}`))
-	data := struct{ A []string }{A: []string{"hi"}}
-	b := Must(a.Clone())
-	var buf strings.Builder
-	if err := b.Execute(&buf, &data); err != nil {
-		t.Fatal(err)
-	}
-	if got, want := buf.String(), "hi"; got != want {
-		t.Errorf("got %q want %q", got, want)
-	}
-}
diff --git a/internal/backport/html/template/content.go b/internal/backport/html/template/content.go
deleted file mode 100644
index 9cf3153..0000000
--- a/internal/backport/html/template/content.go
+++ /dev/null
@@ -1,187 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
-	"fmt"
-	"reflect"
-
-	stdtemplate "html/template"
-)
-
-// Strings of content from a trusted source.
-type (
-	// CSS encapsulates known safe content that matches any of:
-	//   1. The CSS3 stylesheet production, such as `p { color: purple }`.
-	//   2. The CSS3 rule production, such as `a[href=~"https:"].foo#bar`.
-	//   3. CSS3 declaration productions, such as `color: red; margin: 2px`.
-	//   4. The CSS3 value production, such as `rgba(0, 0, 255, 127)`.
-	// See https://www.w3.org/TR/css3-syntax/#parsing and
-	// https://web.archive.org/web/20090211114933/http://w3.org/TR/css3-syntax#style
-	//
-	// Use of this type presents a security risk:
-	// the encapsulated content should come from a trusted source,
-	// as it will be included verbatim in the template output.
-	CSS = stdtemplate.CSS
-
-	// HTML encapsulates a known safe HTML document fragment.
-	// It should not be used for HTML from a third-party, or HTML with
-	// unclosed tags or comments. The outputs of a sound HTML sanitizer
-	// and a template escaped by this package are fine for use with HTML.
-	//
-	// Use of this type presents a security risk:
-	// the encapsulated content should come from a trusted source,
-	// as it will be included verbatim in the template output.
-	HTML = stdtemplate.HTML
-
-	// HTMLAttr encapsulates an HTML attribute from a trusted source,
-	// for example, ` dir="ltr"`.
-	//
-	// Use of this type presents a security risk:
-	// the encapsulated content should come from a trusted source,
-	// as it will be included verbatim in the template output.
-	HTMLAttr = stdtemplate.HTMLAttr
-
-	// JS encapsulates a known safe EcmaScript5 Expression, for example,
-	// `(x + y * z())`.
-	// Template authors are responsible for ensuring that typed expressions
-	// do not break the intended precedence and that there is no
-	// statement/expression ambiguity as when passing an expression like
-	// "{ foo: bar() }\n['foo']()", which is both a valid Expression and a
-	// valid Program with a very different meaning.
-	//
-	// Use of this type presents a security risk:
-	// the encapsulated content should come from a trusted source,
-	// as it will be included verbatim in the template output.
-	//
-	// Using JS to include valid but untrusted JSON is not safe.
-	// A safe alternative is to parse the JSON with json.Unmarshal and then
-	// pass the resultant object into the template, where it will be
-	// converted to sanitized JSON when presented in a JavaScript context.
-	JS = stdtemplate.JS
-
-	// JSStr encapsulates a sequence of characters meant to be embedded
-	// between quotes in a JavaScript expression.
-	// The string must match a series of StringCharacters:
-	//   StringCharacter :: SourceCharacter but not `\` or LineTerminator
-	//                    | EscapeSequence
-	// Note that LineContinuations are not allowed.
-	// JSStr("foo\\nbar") is fine, but JSStr("foo\\\nbar") is not.
-	//
-	// Use of this type presents a security risk:
-	// the encapsulated content should come from a trusted source,
-	// as it will be included verbatim in the template output.
-	JSStr = stdtemplate.JSStr
-
-	// URL encapsulates a known safe URL or URL substring (see RFC 3986).
-	// A URL like `javascript:checkThatFormNotEditedBeforeLeavingPage()`
-	// from a trusted source should go in the page, but by default dynamic
-	// `javascript:` URLs are filtered out since they are a frequently
-	// exploited injection vector.
-	//
-	// Use of this type presents a security risk:
-	// the encapsulated content should come from a trusted source,
-	// as it will be included verbatim in the template output.
-	URL = stdtemplate.URL
-
-	// Srcset encapsulates a known safe srcset attribute
-	// (see https://w3c.github.io/html/semantics-embedded-content.html#element-attrdef-img-srcset).
-	//
-	// Use of this type presents a security risk:
-	// the encapsulated content should come from a trusted source,
-	// as it will be included verbatim in the template output.
-	Srcset = stdtemplate.Srcset
-)
-
-type contentType uint8
-
-const (
-	contentTypePlain contentType = iota
-	contentTypeCSS
-	contentTypeHTML
-	contentTypeHTMLAttr
-	contentTypeJS
-	contentTypeJSStr
-	contentTypeURL
-	contentTypeSrcset
-	// contentTypeUnsafe is used in attr.go for values that affect how
-	// embedded content and network messages are formed, vetted,
-	// or interpreted; or which credentials network messages carry.
-	contentTypeUnsafe
-)
-
-// indirect returns the value, after dereferencing as many times
-// as necessary to reach the base type (or nil).
-func indirect(a interface{}) interface{} {
-	if a == nil {
-		return nil
-	}
-	if t := reflect.TypeOf(a); t.Kind() != reflect.Ptr {
-		// Avoid creating a reflect.Value if it's not a pointer.
-		return a
-	}
-	v := reflect.ValueOf(a)
-	for v.Kind() == reflect.Ptr && !v.IsNil() {
-		v = v.Elem()
-	}
-	return v.Interface()
-}
-
-var (
-	errorType       = reflect.TypeOf((*error)(nil)).Elem()
-	fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
-)
-
-// indirectToStringerOrError returns the value, after dereferencing as many times
-// as necessary to reach the base type (or nil) or an implementation of fmt.Stringer
-// or error,
-func indirectToStringerOrError(a interface{}) interface{} {
-	if a == nil {
-		return nil
-	}
-	v := reflect.ValueOf(a)
-	for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() {
-		v = v.Elem()
-	}
-	return v.Interface()
-}
-
-// stringify converts its arguments to a string and the type of the content.
-// All pointers are dereferenced, as in the text/template package.
-func stringify(args ...interface{}) (string, contentType) {
-	if len(args) == 1 {
-		switch s := indirect(args[0]).(type) {
-		case string:
-			return s, contentTypePlain
-		case CSS:
-			return string(s), contentTypeCSS
-		case HTML:
-			return string(s), contentTypeHTML
-		case HTMLAttr:
-			return string(s), contentTypeHTMLAttr
-		case JS:
-			return string(s), contentTypeJS
-		case JSStr:
-			return string(s), contentTypeJSStr
-		case URL:
-			return string(s), contentTypeURL
-		case Srcset:
-			return string(s), contentTypeSrcset
-		}
-	}
-	i := 0
-	for _, arg := range args {
-		// We skip untyped nil arguments for backward compatibility.
-		// Without this they would be output as <nil>, escaped.
-		// See issue 25875.
-		if arg == nil {
-			continue
-		}
-
-		args[i] = indirectToStringerOrError(arg)
-		i++
-	}
-	return fmt.Sprint(args[:i]...), contentTypePlain
-}
diff --git a/internal/backport/html/template/content_test.go b/internal/backport/html/template/content_test.go
deleted file mode 100644
index e6f47a2..0000000
--- a/internal/backport/html/template/content_test.go
+++ /dev/null
@@ -1,458 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
-	"bytes"
-	"fmt"
-	"strings"
-	"testing"
-)
-
-func TestTypedContent(t *testing.T) {
-	data := []interface{}{
-		`<b> "foo%" O'Reilly &bar;`,
-		CSS(`a[href =~ "//example.com"]#foo`),
-		HTML(`Hello, <b>World</b> &amp;tc!`),
-		HTMLAttr(` dir="ltr"`),
-		JS(`c && alert("Hello, World!");`),
-		JSStr(`Hello, World & O'Reilly\u0021`),
-		URL(`greeting=H%69,&addressee=(World)`),
-		Srcset(`greeting=H%69,&addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`),
-		URL(`,foo/,`),
-	}
-
-	// For each content sensitive escaper, see how it does on
-	// each of the typed strings above.
-	tests := []struct {
-		// A template containing a single {{.}}.
-		input string
-		want  []string
-	}{
-		{
-			`<style>{{.}} { color: blue }</style>`,
-			[]string{
-				`ZgotmplZ`,
-				// Allowed but not escaped.
-				`a[href =~ "//example.com"]#foo`,
-				`ZgotmplZ`,
-				`ZgotmplZ`,
-				`ZgotmplZ`,
-				`ZgotmplZ`,
-				`ZgotmplZ`,
-				`ZgotmplZ`,
-				`ZgotmplZ`,
-			},
-		},
-		{
-			`<div style="{{.}}">`,
-			[]string{
-				`ZgotmplZ`,
-				// Allowed and HTML escaped.
-				`a[href =~ &#34;//example.com&#34;]#foo`,
-				`ZgotmplZ`,
-				`ZgotmplZ`,
-				`ZgotmplZ`,
-				`ZgotmplZ`,
-				`ZgotmplZ`,
-				`ZgotmplZ`,
-				`ZgotmplZ`,
-			},
-		},
-		{
-			`{{.}}`,
-			[]string{
-				`&lt;b&gt; &#34;foo%&#34; O&#39;Reilly &amp;bar;`,
-				`a[href =~ &#34;//example.com&#34;]#foo`,
-				// Not escaped.
-				`Hello, <b>World</b> &amp;tc!`,
-				` dir=&#34;ltr&#34;`,
-				`c &amp;&amp; alert(&#34;Hello, World!&#34;);`,
-				`Hello, World &amp; O&#39;Reilly\u0021`,
-				`greeting=H%69,&amp;addressee=(World)`,
-				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
-				`,foo/,`,
-			},
-		},
-		{
-			`<a{{.}}>`,
-			[]string{
-				`ZgotmplZ`,
-				`ZgotmplZ`,
-				`ZgotmplZ`,
-				// Allowed and HTML escaped.
-				` dir="ltr"`,
-				`ZgotmplZ`,
-				`ZgotmplZ`,
-				`ZgotmplZ`,
-				`ZgotmplZ`,
-				`ZgotmplZ`,
-			},
-		},
-		{
-			`<a title={{.}}>`,
-			[]string{
-				`&lt;b&gt;&#32;&#34;foo%&#34;&#32;O&#39;Reilly&#32;&amp;bar;`,
-				`a[href&#32;&#61;~&#32;&#34;//example.com&#34;]#foo`,
-				// Tags stripped, spaces escaped, entity not re-escaped.
-				`Hello,&#32;World&#32;&amp;tc!`,
-				`&#32;dir&#61;&#34;ltr&#34;`,
-				`c&#32;&amp;&amp;&#32;alert(&#34;Hello,&#32;World!&#34;);`,
-				`Hello,&#32;World&#32;&amp;&#32;O&#39;Reilly\u0021`,
-				`greeting&#61;H%69,&amp;addressee&#61;(World)`,
-				`greeting&#61;H%69,&amp;addressee&#61;(World)&#32;2x,&#32;https://golang.org/favicon.ico&#32;500.5w`,
-				`,foo/,`,
-			},
-		},
-		{
-			`<a title='{{.}}'>`,
-			[]string{
-				`&lt;b&gt; &#34;foo%&#34; O&#39;Reilly &amp;bar;`,
-				`a[href =~ &#34;//example.com&#34;]#foo`,
-				// Tags stripped, entity not re-escaped.
-				`Hello, World &amp;tc!`,
-				` dir=&#34;ltr&#34;`,
-				`c &amp;&amp; alert(&#34;Hello, World!&#34;);`,
-				`Hello, World &amp; O&#39;Reilly\u0021`,
-				`greeting=H%69,&amp;addressee=(World)`,
-				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
-				`,foo/,`,
-			},
-		},
-		{
-			`<textarea>{{.}}</textarea>`,
-			[]string{
-				`&lt;b&gt; &#34;foo%&#34; O&#39;Reilly &amp;bar;`,
-				`a[href =~ &#34;//example.com&#34;]#foo`,
-				// Angle brackets escaped to prevent injection of close tags, entity not re-escaped.
-				`Hello, &lt;b&gt;World&lt;/b&gt; &amp;tc!`,
-				` dir=&#34;ltr&#34;`,
-				`c &amp;&amp; alert(&#34;Hello, World!&#34;);`,
-				`Hello, World &amp; O&#39;Reilly\u0021`,
-				`greeting=H%69,&amp;addressee=(World)`,
-				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
-				`,foo/,`,
-			},
-		},
-		{
-			`<script>alert({{.}})</script>`,
-			[]string{
-				`"\u003cb\u003e \"foo%\" O'Reilly \u0026bar;"`,
-				`"a[href =~ \"//example.com\"]#foo"`,
-				`"Hello, \u003cb\u003eWorld\u003c/b\u003e \u0026amp;tc!"`,
-				`" dir=\"ltr\""`,
-				// Not escaped.
-				`c && alert("Hello, World!");`,
-				// Escape sequence not over-escaped.
-				`"Hello, World & O'Reilly\u0021"`,
-				`"greeting=H%69,\u0026addressee=(World)"`,
-				`"greeting=H%69,\u0026addressee=(World) 2x, https://golang.org/favicon.ico 500.5w"`,
-				`",foo/,"`,
-			},
-		},
-		{
-			`<button onclick="alert({{.}})">`,
-			[]string{
-				`&#34;\u003cb\u003e \&#34;foo%\&#34; O&#39;Reilly \u0026bar;&#34;`,
-				`&#34;a[href =~ \&#34;//example.com\&#34;]#foo&#34;`,
-				`&#34;Hello, \u003cb\u003eWorld\u003c/b\u003e \u0026amp;tc!&#34;`,
-				`&#34; dir=\&#34;ltr\&#34;&#34;`,
-				// Not JS escaped but HTML escaped.
-				`c &amp;&amp; alert(&#34;Hello, World!&#34;);`,
-				// Escape sequence not over-escaped.
-				`&#34;Hello, World &amp; O&#39;Reilly\u0021&#34;`,
-				`&#34;greeting=H%69,\u0026addressee=(World)&#34;`,
-				`&#34;greeting=H%69,\u0026addressee=(World) 2x, https://golang.org/favicon.ico 500.5w&#34;`,
-				`&#34;,foo/,&#34;`,
-			},
-		},
-		{
-			`<script>alert("{{.}}")</script>`,
-			[]string{
-				`\u003cb\u003e \u0022foo%\u0022 O\u0027Reilly \u0026bar;`,
-				`a[href =~ \u0022\/\/example.com\u0022]#foo`,
-				`Hello, \u003cb\u003eWorld\u003c\/b\u003e \u0026amp;tc!`,
-				` dir=\u0022ltr\u0022`,
-				`c \u0026\u0026 alert(\u0022Hello, World!\u0022);`,
-				// Escape sequence not over-escaped.
-				`Hello, World \u0026 O\u0027Reilly\u0021`,
-				`greeting=H%69,\u0026addressee=(World)`,
-				`greeting=H%69,\u0026addressee=(World) 2x, https:\/\/golang.org\/favicon.ico 500.5w`,
-				`,foo\/,`,
-			},
-		},
-		{
-			`<script type="text/javascript">alert("{{.}}")</script>`,
-			[]string{
-				`\u003cb\u003e \u0022foo%\u0022 O\u0027Reilly \u0026bar;`,
-				`a[href =~ \u0022\/\/example.com\u0022]#foo`,
-				`Hello, \u003cb\u003eWorld\u003c\/b\u003e \u0026amp;tc!`,
-				` dir=\u0022ltr\u0022`,
-				`c \u0026\u0026 alert(\u0022Hello, World!\u0022);`,
-				// Escape sequence not over-escaped.
-				`Hello, World \u0026 O\u0027Reilly\u0021`,
-				`greeting=H%69,\u0026addressee=(World)`,
-				`greeting=H%69,\u0026addressee=(World) 2x, https:\/\/golang.org\/favicon.ico 500.5w`,
-				`,foo\/,`,
-			},
-		},
-		{
-			`<script type="text/javascript">alert({{.}})</script>`,
-			[]string{
-				`"\u003cb\u003e \"foo%\" O'Reilly \u0026bar;"`,
-				`"a[href =~ \"//example.com\"]#foo"`,
-				`"Hello, \u003cb\u003eWorld\u003c/b\u003e \u0026amp;tc!"`,
-				`" dir=\"ltr\""`,
-				// Not escaped.
-				`c && alert("Hello, World!");`,
-				// Escape sequence not over-escaped.
-				`"Hello, World & O'Reilly\u0021"`,
-				`"greeting=H%69,\u0026addressee=(World)"`,
-				`"greeting=H%69,\u0026addressee=(World) 2x, https://golang.org/favicon.ico 500.5w"`,
-				`",foo/,"`,
-			},
-		},
-		{
-			// Not treated as JS. The output is same as for <div>{{.}}</div>
-			`<script type="golang.org/x/website/internal/backport/text/template">{{.}}</script>`,
-			[]string{
-				`&lt;b&gt; &#34;foo%&#34; O&#39;Reilly &amp;bar;`,
-				`a[href =~ &#34;//example.com&#34;]#foo`,
-				// Not escaped.
-				`Hello, <b>World</b> &amp;tc!`,
-				` dir=&#34;ltr&#34;`,
-				`c &amp;&amp; alert(&#34;Hello, World!&#34;);`,
-				`Hello, World &amp; O&#39;Reilly\u0021`,
-				`greeting=H%69,&amp;addressee=(World)`,
-				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
-				`,foo/,`,
-			},
-		},
-		{
-			`<button onclick='alert("{{.}}")'>`,
-			[]string{
-				`\u003cb\u003e \u0022foo%\u0022 O\u0027Reilly \u0026bar;`,
-				`a[href =~ \u0022\/\/example.com\u0022]#foo`,
-				`Hello, \u003cb\u003eWorld\u003c\/b\u003e \u0026amp;tc!`,
-				` dir=\u0022ltr\u0022`,
-				`c \u0026\u0026 alert(\u0022Hello, World!\u0022);`,
-				// Escape sequence not over-escaped.
-				`Hello, World \u0026 O\u0027Reilly\u0021`,
-				`greeting=H%69,\u0026addressee=(World)`,
-				`greeting=H%69,\u0026addressee=(World) 2x, https:\/\/golang.org\/favicon.ico 500.5w`,
-				`,foo\/,`,
-			},
-		},
-		{
-			`<a href="?q={{.}}">`,
-			[]string{
-				`%3cb%3e%20%22foo%25%22%20O%27Reilly%20%26bar%3b`,
-				`a%5bhref%20%3d~%20%22%2f%2fexample.com%22%5d%23foo`,
-				`Hello%2c%20%3cb%3eWorld%3c%2fb%3e%20%26amp%3btc%21`,
-				`%20dir%3d%22ltr%22`,
-				`c%20%26%26%20alert%28%22Hello%2c%20World%21%22%29%3b`,
-				`Hello%2c%20World%20%26%20O%27Reilly%5cu0021`,
-				// Quotes and parens are escaped but %69 is not over-escaped. HTML escaping is done.
-				`greeting=H%69,&amp;addressee=%28World%29`,
-				`greeting%3dH%2569%2c%26addressee%3d%28World%29%202x%2c%20https%3a%2f%2fgolang.org%2ffavicon.ico%20500.5w`,
-				`,foo/,`,
-			},
-		},
-		{
-			`<style>body { background: url('?img={{.}}') }</style>`,
-			[]string{
-				`%3cb%3e%20%22foo%25%22%20O%27Reilly%20%26bar%3b`,
-				`a%5bhref%20%3d~%20%22%2f%2fexample.com%22%5d%23foo`,
-				`Hello%2c%20%3cb%3eWorld%3c%2fb%3e%20%26amp%3btc%21`,
-				`%20dir%3d%22ltr%22`,
-				`c%20%26%26%20alert%28%22Hello%2c%20World%21%22%29%3b`,
-				`Hello%2c%20World%20%26%20O%27Reilly%5cu0021`,
-				// Quotes and parens are escaped but %69 is not over-escaped. HTML escaping is not done.
-				`greeting=H%69,&addressee=%28World%29`,
-				`greeting%3dH%2569%2c%26addressee%3d%28World%29%202x%2c%20https%3a%2f%2fgolang.org%2ffavicon.ico%20500.5w`,
-				`,foo/,`,
-			},
-		},
-		{
-			`<img srcset="{{.}}">`,
-			[]string{
-				`#ZgotmplZ`,
-				`#ZgotmplZ`,
-				// Commas are not esacped
-				`Hello,#ZgotmplZ`,
-				// Leading spaces are not percent escapes.
-				` dir=%22ltr%22`,
-				// Spaces after commas are not percent escaped.
-				`#ZgotmplZ, World!%22%29;`,
-				`Hello,#ZgotmplZ`,
-				`greeting=H%69%2c&amp;addressee=%28World%29`,
-				// Metadata is not escaped.
-				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
-				`%2cfoo/%2c`,
-			},
-		},
-		{
-			`<img srcset={{.}}>`,
-			[]string{
-				`#ZgotmplZ`,
-				`#ZgotmplZ`,
-				`Hello,#ZgotmplZ`,
-				// Spaces are HTML escaped not %-escaped
-				`&#32;dir&#61;%22ltr%22`,
-				`#ZgotmplZ,&#32;World!%22%29;`,
-				`Hello,#ZgotmplZ`,
-				`greeting&#61;H%69%2c&amp;addressee&#61;%28World%29`,
-				`greeting&#61;H%69,&amp;addressee&#61;(World)&#32;2x,&#32;https://golang.org/favicon.ico&#32;500.5w`,
-				// Commas are escaped.
-				`%2cfoo/%2c`,
-			},
-		},
-		{
-			`<img srcset="{{.}} 2x, https://golang.org/ 500.5w">`,
-			[]string{
-				`#ZgotmplZ`,
-				`#ZgotmplZ`,
-				`Hello,#ZgotmplZ`,
-				` dir=%22ltr%22`,
-				`#ZgotmplZ, World!%22%29;`,
-				`Hello,#ZgotmplZ`,
-				`greeting=H%69%2c&amp;addressee=%28World%29`,
-				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
-				`%2cfoo/%2c`,
-			},
-		},
-		{
-			`<img srcset="http://godoc.org/ {{.}}, https://golang.org/ 500.5w">`,
-			[]string{
-				`#ZgotmplZ`,
-				`#ZgotmplZ`,
-				`Hello,#ZgotmplZ`,
-				` dir=%22ltr%22`,
-				`#ZgotmplZ, World!%22%29;`,
-				`Hello,#ZgotmplZ`,
-				`greeting=H%69%2c&amp;addressee=%28World%29`,
-				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
-				`%2cfoo/%2c`,
-			},
-		},
-		{
-			`<img srcset="http://godoc.org/?q={{.}} 2x, https://golang.org/ 500.5w">`,
-			[]string{
-				`#ZgotmplZ`,
-				`#ZgotmplZ`,
-				`Hello,#ZgotmplZ`,
-				` dir=%22ltr%22`,
-				`#ZgotmplZ, World!%22%29;`,
-				`Hello,#ZgotmplZ`,
-				`greeting=H%69%2c&amp;addressee=%28World%29`,
-				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
-				`%2cfoo/%2c`,
-			},
-		},
-		{
-			`<img srcset="http://godoc.org/ 2x, {{.}} 500.5w">`,
-			[]string{
-				`#ZgotmplZ`,
-				`#ZgotmplZ`,
-				`Hello,#ZgotmplZ`,
-				` dir=%22ltr%22`,
-				`#ZgotmplZ, World!%22%29;`,
-				`Hello,#ZgotmplZ`,
-				`greeting=H%69%2c&amp;addressee=%28World%29`,
-				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
-				`%2cfoo/%2c`,
-			},
-		},
-		{
-			`<img srcset="http://godoc.org/ 2x, https://golang.org/ {{.}}">`,
-			[]string{
-				`#ZgotmplZ`,
-				`#ZgotmplZ`,
-				`Hello,#ZgotmplZ`,
-				` dir=%22ltr%22`,
-				`#ZgotmplZ, World!%22%29;`,
-				`Hello,#ZgotmplZ`,
-				`greeting=H%69%2c&amp;addressee=%28World%29`,
-				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
-				`%2cfoo/%2c`,
-			},
-		},
-	}
-
-	for _, test := range tests {
-		tmpl := Must(New("x").Parse(test.input))
-		pre := strings.Index(test.input, "{{.}}")
-		post := len(test.input) - (pre + 5)
-		var b bytes.Buffer
-		for i, x := range data {
-			b.Reset()
-			if err := tmpl.Execute(&b, x); err != nil {
-				t.Errorf("%q with %v: %s", test.input, x, err)
-				continue
-			}
-			if want, got := test.want[i], b.String()[pre:b.Len()-post]; want != got {
-				t.Errorf("%q with %v:\nwant\n\t%q,\ngot\n\t%q\n", test.input, x, want, got)
-				continue
-			}
-		}
-	}
-}
-
-// Test that we print using the String method. Was issue 3073.
-type myStringer struct {
-	v int
-}
-
-func (s *myStringer) String() string {
-	return fmt.Sprintf("string=%d", s.v)
-}
-
-type errorer struct {
-	v int
-}
-
-func (s *errorer) Error() string {
-	return fmt.Sprintf("error=%d", s.v)
-}
-
-func TestStringer(t *testing.T) {
-	s := &myStringer{3}
-	b := new(bytes.Buffer)
-	tmpl := Must(New("x").Parse("{{.}}"))
-	if err := tmpl.Execute(b, s); err != nil {
-		t.Fatal(err)
-	}
-	var expect = "string=3"
-	if b.String() != expect {
-		t.Errorf("expected %q got %q", expect, b.String())
-	}
-	e := &errorer{7}
-	b.Reset()
-	if err := tmpl.Execute(b, e); err != nil {
-		t.Fatal(err)
-	}
-	expect = "error=7"
-	if b.String() != expect {
-		t.Errorf("expected %q got %q", expect, b.String())
-	}
-}
-
-// https://golang.org/issue/5982
-func TestEscapingNilNonemptyInterfaces(t *testing.T) {
-	tmpl := Must(New("x").Parse("{{.E}}"))
-
-	got := new(bytes.Buffer)
-	testData := struct{ E error }{} // any non-empty interface here will do; error is just ready at hand
-	tmpl.Execute(got, testData)
-
-	// A non-empty interface should print like an empty interface.
-	want := new(bytes.Buffer)
-	data := struct{ E interface{} }{}
-	tmpl.Execute(want, data)
-
-	if !bytes.Equal(want.Bytes(), got.Bytes()) {
-		t.Errorf("expected %q got %q", string(want.Bytes()), string(got.Bytes()))
-	}
-}
diff --git a/internal/backport/html/template/context.go b/internal/backport/html/template/context.go
deleted file mode 100644
index ce21e95..0000000
--- a/internal/backport/html/template/context.go
+++ /dev/null
@@ -1,267 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
-	"fmt"
-
-	"golang.org/x/website/internal/backport/text/template/parse"
-)
-
-// context describes the state an HTML parser must be in when it reaches the
-// portion of HTML produced by evaluating a particular template node.
-//
-// The zero value of type context is the start context for a template that
-// produces an HTML fragment as defined at
-// https://www.w3.org/TR/html5/syntax.html#the-end
-// where the context element is null.
-type context struct {
-	state   state
-	delim   delim
-	urlPart urlPart
-	jsCtx   jsCtx
-	attr    attr
-	element element
-	n       parse.Node // for range break/continue
-	err     *Error
-}
-
-func (c context) String() string {
-	var err error
-	if c.err != nil {
-		err = c.err
-	}
-	return fmt.Sprintf("{%v %v %v %v %v %v %v}", c.state, c.delim, c.urlPart, c.jsCtx, c.attr, c.element, err)
-}
-
-// eq reports whether two contexts are equal.
-func (c context) eq(d context) bool {
-	return c.state == d.state &&
-		c.delim == d.delim &&
-		c.urlPart == d.urlPart &&
-		c.jsCtx == d.jsCtx &&
-		c.attr == d.attr &&
-		c.element == d.element &&
-		c.err == d.err
-}
-
-// mangle produces an identifier that includes a suffix that distinguishes it
-// from template names mangled with different contexts.
-func (c context) mangle(templateName string) string {
-	// The mangled name for the default context is the input templateName.
-	if c.state == stateText {
-		return templateName
-	}
-	s := templateName + "$htmltemplate_" + c.state.String()
-	if c.delim != delimNone {
-		s += "_" + c.delim.String()
-	}
-	if c.urlPart != urlPartNone {
-		s += "_" + c.urlPart.String()
-	}
-	if c.jsCtx != jsCtxRegexp {
-		s += "_" + c.jsCtx.String()
-	}
-	if c.attr != attrNone {
-		s += "_" + c.attr.String()
-	}
-	if c.element != elementNone {
-		s += "_" + c.element.String()
-	}
-	return s
-}
-
-// state describes a high-level HTML parser state.
-//
-// It bounds the top of the element stack, and by extension the HTML insertion
-// mode, but also contains state that does not correspond to anything in the
-// HTML5 parsing algorithm because a single token production in the HTML
-// grammar may contain embedded actions in a template. For instance, the quoted
-// HTML attribute produced by
-//
-//	<div title="Hello {{.World}}">
-//
-// is a single token in HTML's grammar but in a template spans several nodes.
-type state uint8
-
-//go:generate stringer -type state
-
-const (
-	// stateText is parsed character data. An HTML parser is in
-	// this state when its parse position is outside an HTML tag,
-	// directive, comment, and special element body.
-	stateText state = iota
-	// stateTag occurs before an HTML attribute or the end of a tag.
-	stateTag
-	// stateAttrName occurs inside an attribute name.
-	// It occurs between the ^'s in ` ^name^ = value`.
-	stateAttrName
-	// stateAfterName occurs after an attr name has ended but before any
-	// equals sign. It occurs between the ^'s in ` name^ ^= value`.
-	stateAfterName
-	// stateBeforeValue occurs after the equals sign but before the value.
-	// It occurs between the ^'s in ` name =^ ^value`.
-	stateBeforeValue
-	// stateHTMLCmt occurs inside an <!-- HTML comment -->.
-	stateHTMLCmt
-	// stateRCDATA occurs inside an RCDATA element (<textarea> or <title>)
-	// as described at https://www.w3.org/TR/html5/syntax.html#elements-0
-	stateRCDATA
-	// stateAttr occurs inside an HTML attribute whose content is text.
-	stateAttr
-	// stateURL occurs inside an HTML attribute whose content is a URL.
-	stateURL
-	// stateSrcset occurs inside an HTML srcset attribute.
-	stateSrcset
-	// stateJS occurs inside an event handler or script element.
-	stateJS
-	// stateJSDqStr occurs inside a JavaScript double quoted string.
-	stateJSDqStr
-	// stateJSSqStr occurs inside a JavaScript single quoted string.
-	stateJSSqStr
-	// stateJSRegexp occurs inside a JavaScript regexp literal.
-	stateJSRegexp
-	// stateJSBlockCmt occurs inside a JavaScript /* block comment */.
-	stateJSBlockCmt
-	// stateJSLineCmt occurs inside a JavaScript // line comment.
-	stateJSLineCmt
-	// stateCSS occurs inside a <style> element or style attribute.
-	stateCSS
-	// stateCSSDqStr occurs inside a CSS double quoted string.
-	stateCSSDqStr
-	// stateCSSSqStr occurs inside a CSS single quoted string.
-	stateCSSSqStr
-	// stateCSSDqURL occurs inside a CSS double quoted url("...").
-	stateCSSDqURL
-	// stateCSSSqURL occurs inside a CSS single quoted url('...').
-	stateCSSSqURL
-	// stateCSSURL occurs inside a CSS unquoted url(...).
-	stateCSSURL
-	// stateCSSBlockCmt occurs inside a CSS /* block comment */.
-	stateCSSBlockCmt
-	// stateCSSLineCmt occurs inside a CSS // line comment.
-	stateCSSLineCmt
-	// stateError is an infectious error state outside any valid
-	// HTML/CSS/JS construct.
-	stateError
-	// stateDead marks unreachable code after a {{break}} or {{continue}}.
-	stateDead
-)
-
-// isComment is true for any state that contains content meant for template
-// authors & maintainers, not for end-users or machines.
-func isComment(s state) bool {
-	switch s {
-	case stateHTMLCmt, stateJSBlockCmt, stateJSLineCmt, stateCSSBlockCmt, stateCSSLineCmt:
-		return true
-	}
-	return false
-}
-
-// isInTag return whether s occurs solely inside an HTML tag.
-func isInTag(s state) bool {
-	switch s {
-	case stateTag, stateAttrName, stateAfterName, stateBeforeValue, stateAttr:
-		return true
-	}
-	return false
-}
-
-// delim is the delimiter that will end the current HTML attribute.
-type delim uint8
-
-//go:generate stringer -type delim
-
-const (
-	// delimNone occurs outside any attribute.
-	delimNone delim = iota
-	// delimDoubleQuote occurs when a double quote (") closes the attribute.
-	delimDoubleQuote
-	// delimSingleQuote occurs when a single quote (') closes the attribute.
-	delimSingleQuote
-	// delimSpaceOrTagEnd occurs when a space or right angle bracket (>)
-	// closes the attribute.
-	delimSpaceOrTagEnd
-)
-
-// urlPart identifies a part in an RFC 3986 hierarchical URL to allow different
-// encoding strategies.
-type urlPart uint8
-
-//go:generate stringer -type urlPart
-
-const (
-	// urlPartNone occurs when not in a URL, or possibly at the start:
-	// ^ in "^http://auth/path?k=v#frag".
-	urlPartNone urlPart = iota
-	// urlPartPreQuery occurs in the scheme, authority, or path; between the
-	// ^s in "h^ttp://auth/path^?k=v#frag".
-	urlPartPreQuery
-	// urlPartQueryOrFrag occurs in the query portion between the ^s in
-	// "http://auth/path?^k=v#frag^".
-	urlPartQueryOrFrag
-	// urlPartUnknown occurs due to joining of contexts both before and
-	// after the query separator.
-	urlPartUnknown
-)
-
-// jsCtx determines whether a '/' starts a regular expression literal or a
-// division operator.
-type jsCtx uint8
-
-//go:generate stringer -type jsCtx
-
-const (
-	// jsCtxRegexp occurs where a '/' would start a regexp literal.
-	jsCtxRegexp jsCtx = iota
-	// jsCtxDivOp occurs where a '/' would start a division operator.
-	jsCtxDivOp
-	// jsCtxUnknown occurs where a '/' is ambiguous due to context joining.
-	jsCtxUnknown
-)
-
-// element identifies the HTML element when inside a start tag or special body.
-// Certain HTML element (for example <script> and <style>) have bodies that are
-// treated differently from stateText so the element type is necessary to
-// transition into the correct context at the end of a tag and to identify the
-// end delimiter for the body.
-type element uint8
-
-//go:generate stringer -type element
-
-const (
-	// elementNone occurs outside a special tag or special element body.
-	elementNone element = iota
-	// elementScript corresponds to the raw text <script> element
-	// with JS MIME type or no type attribute.
-	elementScript
-	// elementStyle corresponds to the raw text <style> element.
-	elementStyle
-	// elementTextarea corresponds to the RCDATA <textarea> element.
-	elementTextarea
-	// elementTitle corresponds to the RCDATA <title> element.
-	elementTitle
-)
-
-//go:generate stringer -type attr
-
-// attr identifies the current HTML attribute when inside the attribute,
-// that is, starting from stateAttrName until stateTag/stateText (exclusive).
-type attr uint8
-
-const (
-	// attrNone corresponds to a normal attribute or no attribute.
-	attrNone attr = iota
-	// attrScript corresponds to an event handler attribute.
-	attrScript
-	// attrScriptType corresponds to the type attribute in script HTML element
-	attrScriptType
-	// attrStyle corresponds to the style attribute whose value is CSS.
-	attrStyle
-	// attrURL corresponds to an attribute whose value is a URL.
-	attrURL
-	// attrSrcset corresponds to a srcset attribute.
-	attrSrcset
-)
diff --git a/internal/backport/html/template/css.go b/internal/backport/html/template/css.go
deleted file mode 100644
index eb92fc9..0000000
--- a/internal/backport/html/template/css.go
+++ /dev/null
@@ -1,260 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
-	"bytes"
-	"fmt"
-	"strings"
-	"unicode"
-	"unicode/utf8"
-)
-
-// endsWithCSSKeyword reports whether b ends with an ident that
-// case-insensitively matches the lower-case kw.
-func endsWithCSSKeyword(b []byte, kw string) bool {
-	i := len(b) - len(kw)
-	if i < 0 {
-		// Too short.
-		return false
-	}
-	if i != 0 {
-		r, _ := utf8.DecodeLastRune(b[:i])
-		if isCSSNmchar(r) {
-			// Too long.
-			return false
-		}
-	}
-	// Many CSS keywords, such as "!important" can have characters encoded,
-	// but the URI production does not allow that according to
-	// https://www.w3.org/TR/css3-syntax/#TOK-URI
-	// This does not attempt to recognize encoded keywords. For example,
-	// given "\75\72\6c" and "url" this return false.
-	return string(bytes.ToLower(b[i:])) == kw
-}
-
-// isCSSNmchar reports whether rune is allowed anywhere in a CSS identifier.
-func isCSSNmchar(r rune) bool {
-	// Based on the CSS3 nmchar production but ignores multi-rune escape
-	// sequences.
-	// https://www.w3.org/TR/css3-syntax/#SUBTOK-nmchar
-	return 'a' <= r && r <= 'z' ||
-		'A' <= r && r <= 'Z' ||
-		'0' <= r && r <= '9' ||
-		r == '-' ||
-		r == '_' ||
-		// Non-ASCII cases below.
-		0x80 <= r && r <= 0xd7ff ||
-		0xe000 <= r && r <= 0xfffd ||
-		0x10000 <= r && r <= 0x10ffff
-}
-
-// decodeCSS decodes CSS3 escapes given a sequence of stringchars.
-// If there is no change, it returns the input, otherwise it returns a slice
-// backed by a new array.
-// https://www.w3.org/TR/css3-syntax/#SUBTOK-stringchar defines stringchar.
-func decodeCSS(s []byte) []byte {
-	i := bytes.IndexByte(s, '\\')
-	if i == -1 {
-		return s
-	}
-	// The UTF-8 sequence for a codepoint is never longer than 1 + the
-	// number hex digits need to represent that codepoint, so len(s) is an
-	// upper bound on the output length.
-	b := make([]byte, 0, len(s))
-	for len(s) != 0 {
-		i := bytes.IndexByte(s, '\\')
-		if i == -1 {
-			i = len(s)
-		}
-		b, s = append(b, s[:i]...), s[i:]
-		if len(s) < 2 {
-			break
-		}
-		// https://www.w3.org/TR/css3-syntax/#SUBTOK-escape
-		// escape ::= unicode | '\' [#x20-#x7E#x80-#xD7FF#xE000-#xFFFD#x10000-#x10FFFF]
-		if isHex(s[1]) {
-			// https://www.w3.org/TR/css3-syntax/#SUBTOK-unicode
-			//   unicode ::= '\' [0-9a-fA-F]{1,6} wc?
-			j := 2
-			for j < len(s) && j < 7 && isHex(s[j]) {
-				j++
-			}
-			r := hexDecode(s[1:j])
-			if r > unicode.MaxRune {
-				r, j = r/16, j-1
-			}
-			n := utf8.EncodeRune(b[len(b):cap(b)], r)
-			// The optional space at the end allows a hex
-			// sequence to be followed by a literal hex.
-			// string(decodeCSS([]byte(`\A B`))) == "\nB"
-			b, s = b[:len(b)+n], skipCSSSpace(s[j:])
-		} else {
-			// `\\` decodes to `\` and `\"` to `"`.
-			_, n := utf8.DecodeRune(s[1:])
-			b, s = append(b, s[1:1+n]...), s[1+n:]
-		}
-	}
-	return b
-}
-
-// isHex reports whether the given character is a hex digit.
-func isHex(c byte) bool {
-	return '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F'
-}
-
-// hexDecode decodes a short hex digit sequence: "10" -> 16.
-func hexDecode(s []byte) rune {
-	n := '\x00'
-	for _, c := range s {
-		n <<= 4
-		switch {
-		case '0' <= c && c <= '9':
-			n |= rune(c - '0')
-		case 'a' <= c && c <= 'f':
-			n |= rune(c-'a') + 10
-		case 'A' <= c && c <= 'F':
-			n |= rune(c-'A') + 10
-		default:
-			panic(fmt.Sprintf("Bad hex digit in %q", s))
-		}
-	}
-	return n
-}
-
-// skipCSSSpace returns a suffix of c, skipping over a single space.
-func skipCSSSpace(c []byte) []byte {
-	if len(c) == 0 {
-		return c
-	}
-	// wc ::= #x9 | #xA | #xC | #xD | #x20
-	switch c[0] {
-	case '\t', '\n', '\f', ' ':
-		return c[1:]
-	case '\r':
-		// This differs from CSS3's wc production because it contains a
-		// probable spec error whereby wc contains all the single byte
-		// sequences in nl (newline) but not CRLF.
-		if len(c) >= 2 && c[1] == '\n' {
-			return c[2:]
-		}
-		return c[1:]
-	}
-	return c
-}
-
-// isCSSSpace reports whether b is a CSS space char as defined in wc.
-func isCSSSpace(b byte) bool {
-	switch b {
-	case '\t', '\n', '\f', '\r', ' ':
-		return true
-	}
-	return false
-}
-
-// cssEscaper escapes HTML and CSS special characters using \<hex>+ escapes.
-func cssEscaper(args ...interface{}) string {
-	s, _ := stringify(args...)
-	var b strings.Builder
-	r, w, written := rune(0), 0, 0
-	for i := 0; i < len(s); i += w {
-		// See comment in htmlEscaper.
-		r, w = utf8.DecodeRuneInString(s[i:])
-		var repl string
-		switch {
-		case int(r) < len(cssReplacementTable) && cssReplacementTable[r] != "":
-			repl = cssReplacementTable[r]
-		default:
-			continue
-		}
-		if written == 0 {
-			b.Grow(len(s))
-		}
-		b.WriteString(s[written:i])
-		b.WriteString(repl)
-		written = i + w
-		if repl != `\\` && (written == len(s) || isHex(s[written]) || isCSSSpace(s[written])) {
-			b.WriteByte(' ')
-		}
-	}
-	if written == 0 {
-		return s
-	}
-	b.WriteString(s[written:])
-	return b.String()
-}
-
-var cssReplacementTable = []string{
-	0:    `\0`,
-	'\t': `\9`,
-	'\n': `\a`,
-	'\f': `\c`,
-	'\r': `\d`,
-	// Encode HTML specials as hex so the output can be embedded
-	// in HTML attributes without further encoding.
-	'"':  `\22`,
-	'&':  `\26`,
-	'\'': `\27`,
-	'(':  `\28`,
-	')':  `\29`,
-	'+':  `\2b`,
-	'/':  `\2f`,
-	':':  `\3a`,
-	';':  `\3b`,
-	'<':  `\3c`,
-	'>':  `\3e`,
-	'\\': `\\`,
-	'{':  `\7b`,
-	'}':  `\7d`,
-}
-
-var expressionBytes = []byte("expression")
-var mozBindingBytes = []byte("mozbinding")
-
-// cssValueFilter allows innocuous CSS values in the output including CSS
-// quantities (10px or 25%), ID or class literals (#foo, .bar), keyword values
-// (inherit, blue), and colors (#888).
-// It filters out unsafe values, such as those that affect token boundaries,
-// and anything that might execute scripts.
-func cssValueFilter(args ...interface{}) string {
-	s, t := stringify(args...)
-	if t == contentTypeCSS {
-		return s
-	}
-	b, id := decodeCSS([]byte(s)), make([]byte, 0, 64)
-
-	// CSS3 error handling is specified as honoring string boundaries per
-	// https://www.w3.org/TR/css3-syntax/#error-handling :
-	//     Malformed declarations. User agents must handle unexpected
-	//     tokens encountered while parsing a declaration by reading until
-	//     the end of the declaration, while observing the rules for
-	//     matching pairs of (), [], {}, "", and '', and correctly handling
-	//     escapes. For example, a malformed declaration may be missing a
-	//     property, colon (:) or value.
-	// So we need to make sure that values do not have mismatched bracket
-	// or quote characters to prevent the browser from restarting parsing
-	// inside a string that might embed JavaScript source.
-	for i, c := range b {
-		switch c {
-		case 0, '"', '\'', '(', ')', '/', ';', '@', '[', '\\', ']', '`', '{', '}':
-			return filterFailsafe
-		case '-':
-			// Disallow <!-- or -->.
-			// -- should not appear in valid identifiers.
-			if i != 0 && b[i-1] == '-' {
-				return filterFailsafe
-			}
-		default:
-			if c < utf8.RuneSelf && isCSSNmchar(rune(c)) {
-				id = append(id, c)
-			}
-		}
-	}
-	id = bytes.ToLower(id)
-	if bytes.Contains(id, expressionBytes) || bytes.Contains(id, mozBindingBytes) {
-		return filterFailsafe
-	}
-	return string(b)
-}
diff --git a/internal/backport/html/template/css_test.go b/internal/backport/html/template/css_test.go
deleted file mode 100644
index a735638..0000000
--- a/internal/backport/html/template/css_test.go
+++ /dev/null
@@ -1,281 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
-	"strconv"
-	"strings"
-	"testing"
-)
-
-func TestEndsWithCSSKeyword(t *testing.T) {
-	tests := []struct {
-		css, kw string
-		want    bool
-	}{
-		{"", "url", false},
-		{"url", "url", true},
-		{"URL", "url", true},
-		{"Url", "url", true},
-		{"url", "important", false},
-		{"important", "important", true},
-		{"image-url", "url", false},
-		{"imageurl", "url", false},
-		{"image url", "url", true},
-	}
-	for _, test := range tests {
-		got := endsWithCSSKeyword([]byte(test.css), test.kw)
-		if got != test.want {
-			t.Errorf("want %t but got %t for css=%v, kw=%v", test.want, got, test.css, test.kw)
-		}
-	}
-}
-
-func TestIsCSSNmchar(t *testing.T) {
-	tests := []struct {
-		rune rune
-		want bool
-	}{
-		{0, false},
-		{'0', true},
-		{'9', true},
-		{'A', true},
-		{'Z', true},
-		{'a', true},
-		{'z', true},
-		{'_', true},
-		{'-', true},
-		{':', false},
-		{';', false},
-		{' ', false},
-		{0x7f, false},
-		{0x80, true},
-		{0x1234, true},
-		{0xd800, false},
-		{0xdc00, false},
-		{0xfffe, false},
-		{0x10000, true},
-		{0x110000, false},
-	}
-	for _, test := range tests {
-		got := isCSSNmchar(test.rune)
-		if got != test.want {
-			t.Errorf("%q: want %t but got %t", string(test.rune), test.want, got)
-		}
-	}
-}
-
-func TestDecodeCSS(t *testing.T) {
-	tests := []struct {
-		css, want string
-	}{
-		{``, ``},
-		{`foo`, `foo`},
-		{`foo\`, `foo`},
-		{`foo\\`, `foo\`},
-		{`\`, ``},
-		{`\A`, "\n"},
-		{`\a`, "\n"},
-		{`\0a`, "\n"},
-		{`\00000a`, "\n"},
-		{`\000000a`, "\u0000a"},
-		{`\1234 5`, "\u1234" + "5"},
-		{`\1234\20 5`, "\u1234" + " 5"},
-		{`\1234\A 5`, "\u1234" + "\n5"},
-		{"\\1234\t5", "\u1234" + "5"},
-		{"\\1234\n5", "\u1234" + "5"},
-		{"\\1234\r\n5", "\u1234" + "5"},
-		{`\12345`, "\U00012345"},
-		{`\\`, `\`},
-		{`\\ `, `\ `},
-		{`\"`, `"`},
-		{`\'`, `'`},
-		{`\.`, `.`},
-		{`\. .`, `. .`},
-		{
-			`The \3c i\3equick\3c/i\3e,\d\A\3cspan style=\27 color:brown\27\3e brown\3c/span\3e  fox jumps\2028over the \3c canine class=\22lazy\22 \3e dog\3c/canine\3e`,
-			"The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>",
-		},
-	}
-	for _, test := range tests {
-		got1 := string(decodeCSS([]byte(test.css)))
-		if got1 != test.want {
-			t.Errorf("%q: want\n\t%q\nbut got\n\t%q", test.css, test.want, got1)
-		}
-		recoded := cssEscaper(got1)
-		if got2 := string(decodeCSS([]byte(recoded))); got2 != test.want {
-			t.Errorf("%q: escape & decode not dual for %q", test.css, recoded)
-		}
-	}
-}
-
-func TestHexDecode(t *testing.T) {
-	for i := 0; i < 0x200000; i += 101 /* coprime with 16 */ {
-		s := strconv.FormatInt(int64(i), 16)
-		if got := int(hexDecode([]byte(s))); got != i {
-			t.Errorf("%s: want %d but got %d", s, i, got)
-		}
-		s = strings.ToUpper(s)
-		if got := int(hexDecode([]byte(s))); got != i {
-			t.Errorf("%s: want %d but got %d", s, i, got)
-		}
-	}
-}
-
-func TestSkipCSSSpace(t *testing.T) {
-	tests := []struct {
-		css, want string
-	}{
-		{"", ""},
-		{"foo", "foo"},
-		{"\n", ""},
-		{"\r\n", ""},
-		{"\r", ""},
-		{"\t", ""},
-		{" ", ""},
-		{"\f", ""},
-		{" foo", "foo"},
-		{"  foo", " foo"},
-		{`\20`, `\20`},
-	}
-	for _, test := range tests {
-		got := string(skipCSSSpace([]byte(test.css)))
-		if got != test.want {
-			t.Errorf("%q: want %q but got %q", test.css, test.want, got)
-		}
-	}
-}
-
-func TestCSSEscaper(t *testing.T) {
-	input := ("\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f" +
-		"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
-		` !"#$%&'()*+,-./` +
-		`0123456789:;<=>?` +
-		`@ABCDEFGHIJKLMNO` +
-		`PQRSTUVWXYZ[\]^_` +
-		"`abcdefghijklmno" +
-		"pqrstuvwxyz{|}~\x7f" +
-		"\u00A0\u0100\u2028\u2029\ufeff\U0001D11E")
-
-	want := ("\\0\x01\x02\x03\x04\x05\x06\x07" +
-		"\x08\\9 \\a\x0b\\c \\d\x0E\x0F" +
-		"\x10\x11\x12\x13\x14\x15\x16\x17" +
-		"\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
-		` !\22#$%\26\27\28\29*\2b,-.\2f ` +
-		`0123456789\3a\3b\3c=\3e?` +
-		`@ABCDEFGHIJKLMNO` +
-		`PQRSTUVWXYZ[\\]^_` +
-		"`abcdefghijklmno" +
-		`pqrstuvwxyz\7b|\7d~` + "\u007f" +
-		"\u00A0\u0100\u2028\u2029\ufeff\U0001D11E")
-
-	got := cssEscaper(input)
-	if got != want {
-		t.Errorf("encode: want\n\t%q\nbut got\n\t%q", want, got)
-	}
-
-	got = string(decodeCSS([]byte(got)))
-	if input != got {
-		t.Errorf("decode: want\n\t%q\nbut got\n\t%q", input, got)
-	}
-}
-
-func TestCSSValueFilter(t *testing.T) {
-	tests := []struct {
-		css, want string
-	}{
-		{"", ""},
-		{"foo", "foo"},
-		{"0", "0"},
-		{"0px", "0px"},
-		{"-5px", "-5px"},
-		{"1.25in", "1.25in"},
-		{"+.33em", "+.33em"},
-		{"100%", "100%"},
-		{"12.5%", "12.5%"},
-		{".foo", ".foo"},
-		{"#bar", "#bar"},
-		{"corner-radius", "corner-radius"},
-		{"-moz-corner-radius", "-moz-corner-radius"},
-		{"#000", "#000"},
-		{"#48f", "#48f"},
-		{"#123456", "#123456"},
-		{"U+00-FF, U+980-9FF", "U+00-FF, U+980-9FF"},
-		{"color: red", "color: red"},
-		{"<!--", "ZgotmplZ"},
-		{"-->", "ZgotmplZ"},
-		{"<![CDATA[", "ZgotmplZ"},
-		{"]]>", "ZgotmplZ"},
-		{"</style", "ZgotmplZ"},
-		{`"`, "ZgotmplZ"},
-		{`'`, "ZgotmplZ"},
-		{"`", "ZgotmplZ"},
-		{"\x00", "ZgotmplZ"},
-		{"/* foo */", "ZgotmplZ"},
-		{"//", "ZgotmplZ"},
-		{"[href=~", "ZgotmplZ"},
-		{"expression(alert(1337))", "ZgotmplZ"},
-		{"-expression(alert(1337))", "ZgotmplZ"},
-		{"expression", "ZgotmplZ"},
-		{"Expression", "ZgotmplZ"},
-		{"EXPRESSION", "ZgotmplZ"},
-		{"-moz-binding", "ZgotmplZ"},
-		{"-expr\x00ession(alert(1337))", "ZgotmplZ"},
-		{`-expr\0ession(alert(1337))`, "ZgotmplZ"},
-		{`-express\69on(alert(1337))`, "ZgotmplZ"},
-		{`-express\69 on(alert(1337))`, "ZgotmplZ"},
-		{`-exp\72 ession(alert(1337))`, "ZgotmplZ"},
-		{`-exp\52 ession(alert(1337))`, "ZgotmplZ"},
-		{`-exp\000052 ession(alert(1337))`, "ZgotmplZ"},
-		{`-expre\0000073sion`, "-expre\x073sion"},
-		{`@import url evil.css`, "ZgotmplZ"},
-	}
-	for _, test := range tests {
-		got := cssValueFilter(test.css)
-		if got != test.want {
-			t.Errorf("%q: want %q but got %q", test.css, test.want, got)
-		}
-	}
-}
-
-func BenchmarkCSSEscaper(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		cssEscaper("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
-	}
-}
-
-func BenchmarkCSSEscaperNoSpecials(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		cssEscaper("The quick, brown fox jumps over the lazy dog.")
-	}
-}
-
-func BenchmarkDecodeCSS(b *testing.B) {
-	s := []byte(`The \3c i\3equick\3c/i\3e,\d\A\3cspan style=\27 color:brown\27\3e brown\3c/span\3e fox jumps\2028over the \3c canine class=\22lazy\22 \3edog\3c/canine\3e`)
-	b.ResetTimer()
-	for i := 0; i < b.N; i++ {
-		decodeCSS(s)
-	}
-}
-
-func BenchmarkDecodeCSSNoSpecials(b *testing.B) {
-	s := []byte("The quick, brown fox jumps over the lazy dog.")
-	b.ResetTimer()
-	for i := 0; i < b.N; i++ {
-		decodeCSS(s)
-	}
-}
-
-func BenchmarkCSSValueFilter(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		cssValueFilter(`  e\78preS\0Sio/**/n(alert(1337))`)
-	}
-}
-
-func BenchmarkCSSValueFilterOk(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		cssValueFilter(`Times New Roman`)
-	}
-}
diff --git a/internal/backport/html/template/delim_string.go b/internal/backport/html/template/delim_string.go
deleted file mode 100644
index 6d80e09..0000000
--- a/internal/backport/html/template/delim_string.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Code generated by "stringer -type delim"; DO NOT EDIT.
-
-package template
-
-import "strconv"
-
-const _delim_name = "delimNonedelimDoubleQuotedelimSingleQuotedelimSpaceOrTagEnd"
-
-var _delim_index = [...]uint8{0, 9, 25, 41, 59}
-
-func (i delim) String() string {
-	if i >= delim(len(_delim_index)-1) {
-		return "delim(" + strconv.FormatInt(int64(i), 10) + ")"
-	}
-	return _delim_name[_delim_index[i]:_delim_index[i+1]]
-}
diff --git a/internal/backport/html/template/doc.go b/internal/backport/html/template/doc.go
deleted file mode 100644
index cb5bcbe..0000000
--- a/internal/backport/html/template/doc.go
+++ /dev/null
@@ -1,235 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package template (html/template) implements data-driven templates for
-generating HTML output safe against code injection. It provides the
-same interface as package text/template and should be used instead of
-text/template whenever the output is HTML.
-
-The documentation here focuses on the security features of the package.
-For information about how to program the templates themselves, see the
-documentation for text/template.
-
-# Introduction
-
-This package wraps package text/template so you can share its template API
-to parse and execute HTML templates safely.
-
-	tmpl, err := template.New("name").Parse(...)
-	// Error checking elided
-	err = tmpl.Execute(out, data)
-
-If successful, tmpl will now be injection-safe. Otherwise, err is an error
-defined in the docs for ErrorCode.
-
-HTML templates treat data values as plain text which should be encoded so they
-can be safely embedded in an HTML document. The escaping is contextual, so
-actions can appear within JavaScript, CSS, and URI contexts.
-
-The security model used by this package assumes that template authors are
-trusted, while Execute's data parameter is not. More details are
-provided below.
-
-Example
-
-	import "golang.org/x/website/internal/backport/text/template"
-	...
-	t, err := template.New("foo").Parse(`{{define "T"}}Hello, {{.}}!{{end}}`)
-	err = t.ExecuteTemplate(out, "T", "<script>alert('you have been pwned')</script>")
-
-produces
-
-	Hello, <script>alert('you have been pwned')</script>!
-
-but the contextual autoescaping in html/template
-
-	import "html/template"
-	...
-	t, err := template.New("foo").Parse(`{{define "T"}}Hello, {{.}}!{{end}}`)
-	err = t.ExecuteTemplate(out, "T", "<script>alert('you have been pwned')</script>")
-
-produces safe, escaped HTML output
-
-	Hello, &lt;script&gt;alert(&#39;you have been pwned&#39;)&lt;/script&gt;!
-
-# Contexts
-
-This package understands HTML, CSS, JavaScript, and URIs. It adds sanitizing
-functions to each simple action pipeline, so given the excerpt
-
-	<a href="/search?q={{.}}">{{.}}</a>
-
-At parse time each {{.}} is overwritten to add escaping functions as necessary.
-In this case it becomes
-
-	<a href="/search?q={{. | urlescaper | attrescaper}}">{{. | htmlescaper}}</a>
-
-where urlescaper, attrescaper, and htmlescaper are aliases for internal escaping
-functions.
-
-For these internal escaping functions, if an action pipeline evaluates to
-a nil interface value, it is treated as though it were an empty string.
-
-# Namespaced and data- attributes
-
-Attributes with a namespace are treated as if they had no namespace.
-Given the excerpt
-
-	<a my:href="{{.}}"></a>
-
-At parse time the attribute will be treated as if it were just "href".
-So at parse time the template becomes:
-
-	<a my:href="{{. | urlescaper | attrescaper}}"></a>
-
-Similarly to attributes with namespaces, attributes with a "data-" prefix are
-treated as if they had no "data-" prefix. So given
-
-	<a data-href="{{.}}"></a>
-
-At parse time this becomes
-
-	<a data-href="{{. | urlescaper | attrescaper}}"></a>
-
-If an attribute has both a namespace and a "data-" prefix, only the namespace
-will be removed when determining the context. For example
-
-	<a my:data-href="{{.}}"></a>
-
-This is handled as if "my:data-href" was just "data-href" and not "href" as
-it would be if the "data-" prefix were to be ignored too. Thus at parse
-time this becomes just
-
-	<a my:data-href="{{. | attrescaper}}"></a>
-
-As a special case, attributes with the namespace "xmlns" are always treated
-as containing URLs. Given the excerpts
-
-	<a xmlns:title="{{.}}"></a>
-	<a xmlns:href="{{.}}"></a>
-	<a xmlns:onclick="{{.}}"></a>
-
-At parse time they become:
-
-	<a xmlns:title="{{. | urlescaper | attrescaper}}"></a>
-	<a xmlns:href="{{. | urlescaper | attrescaper}}"></a>
-	<a xmlns:onclick="{{. | urlescaper | attrescaper}}"></a>
-
-# Errors
-
-See the documentation of ErrorCode for details.
-
-# A fuller picture
-
-The rest of this package comment may be skipped on first reading; it includes
-details necessary to understand escaping contexts and error messages. Most users
-will not need to understand these details.
-
-# Contexts
-
-Assuming {{.}} is `O'Reilly: How are <i>you</i>?`, the table below shows
-how {{.}} appears when used in the context to the left.
-
-	Context                          {{.}} After
-	{{.}}                            O'Reilly: How are &lt;i&gt;you&lt;/i&gt;?
-	<a title='{{.}}'>                O&#39;Reilly: How are you?
-	<a href="/{{.}}">                O&#39;Reilly: How are %3ci%3eyou%3c/i%3e?
-	<a href="?q={{.}}">              O&#39;Reilly%3a%20How%20are%3ci%3e...%3f
-	<a onx='f("{{.}}")'>             O\x27Reilly: How are \x3ci\x3eyou...?
-	<a onx='f({{.}})'>               "O\x27Reilly: How are \x3ci\x3eyou...?"
-	<a onx='pattern = /{{.}}/;'>     O\x27Reilly: How are \x3ci\x3eyou...\x3f
-
-If used in an unsafe context, then the value might be filtered out:
-
-	Context                          {{.}} After
-	<a href="{{.}}">                 #ZgotmplZ
-
-since "O'Reilly:" is not an allowed protocol like "http:".
-
-If {{.}} is the innocuous word, `left`, then it can appear more widely,
-
-	Context                              {{.}} After
-	{{.}}                                left
-	<a title='{{.}}'>                    left
-	<a href='{{.}}'>                     left
-	<a href='/{{.}}'>                    left
-	<a href='?dir={{.}}'>                left
-	<a style="border-{{.}}: 4px">        left
-	<a style="align: {{.}}">             left
-	<a style="background: '{{.}}'>       left
-	<a style="background: url('{{.}}')>  left
-	<style>p.{{.}} {color:red}</style>   left
-
-Non-string values can be used in JavaScript contexts.
-If {{.}} is
-
-	struct{A,B string}{ "foo", "bar" }
-
-in the escaped template
-
-	<script>var pair = {{.}};</script>
-
-then the template output is
-
-	<script>var pair = {"A": "foo", "B": "bar"};</script>
-
-See package json to understand how non-string content is marshaled for
-embedding in JavaScript contexts.
-
-# Typed Strings
-
-By default, this package assumes that all pipelines produce a plain text string.
-It adds escaping pipeline stages necessary to correctly and safely embed that
-plain text string in the appropriate context.
-
-When a data value is not plain text, you can make sure it is not over-escaped
-by marking it with its type.
-
-Types HTML, JS, URL, and others from content.go can carry safe content that is
-exempted from escaping.
-
-The template
-
-	Hello, {{.}}!
-
-can be invoked with
-
-	tmpl.Execute(out, template.HTML(`<b>World</b>`))
-
-to produce
-
-	Hello, <b>World</b>!
-
-instead of the
-
-	Hello, &lt;b&gt;World&lt;b&gt;!
-
-that would have been produced if {{.}} was a regular string.
-
-# Security Model
-
-https://rawgit.com/mikesamuel/sanitized-jquery-templates/trunk/safetemplate.html#problem_definition defines "safe" as used by this package.
-
-This package assumes that template authors are trusted, that Execute's data
-parameter is not, and seeks to preserve the properties below in the face
-of untrusted data:
-
-Structure Preservation Property:
-"... when a template author writes an HTML tag in a safe templating language,
-the browser will interpret the corresponding portion of the output as a tag
-regardless of the values of untrusted data, and similarly for other structures
-such as attribute boundaries and JS and CSS string boundaries."
-
-Code Effect Property:
-"... only code specified by the template author should run as a result of
-injecting the template output into a page and all code specified by the
-template author should run as a result of the same."
-
-Least Surprise Property:
-"A developer (or code reviewer) familiar with HTML, CSS, and JavaScript, who
-knows that contextual autoescaping happens should be able to look at a {{.}}
-and correctly infer what sanitization happens."
-*/
-package template
diff --git a/internal/backport/html/template/element_string.go b/internal/backport/html/template/element_string.go
deleted file mode 100644
index 4573e08..0000000
--- a/internal/backport/html/template/element_string.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Code generated by "stringer -type element"; DO NOT EDIT.
-
-package template
-
-import "strconv"
-
-const _element_name = "elementNoneelementScriptelementStyleelementTextareaelementTitle"
-
-var _element_index = [...]uint8{0, 11, 24, 36, 51, 63}
-
-func (i element) String() string {
-	if i >= element(len(_element_index)-1) {
-		return "element(" + strconv.FormatInt(int64(i), 10) + ")"
-	}
-	return _element_name[_element_index[i]:_element_index[i+1]]
-}
diff --git a/internal/backport/html/template/error.go b/internal/backport/html/template/error.go
deleted file mode 100644
index 69b0756..0000000
--- a/internal/backport/html/template/error.go
+++ /dev/null
@@ -1,237 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
-	"fmt"
-
-	"golang.org/x/website/internal/backport/text/template/parse"
-)
-
-// Error describes a problem encountered during template Escaping.
-type Error struct {
-	// ErrorCode describes the kind of error.
-	ErrorCode ErrorCode
-	// Node is the node that caused the problem, if known.
-	// If not nil, it overrides Name and Line.
-	Node parse.Node
-	// Name is the name of the template in which the error was encountered.
-	Name string
-	// Line is the line number of the error in the template source or 0.
-	Line int
-	// Description is a human-readable description of the problem.
-	Description string
-}
-
-// ErrorCode is a code for a kind of error.
-type ErrorCode int
-
-// We define codes for each error that manifests while escaping templates, but
-// escaped templates may also fail at runtime.
-//
-// Output: "ZgotmplZ"
-// Example:
-//
-//	<img src="{{.X}}">
-//	where {{.X}} evaluates to `javascript:...`
-//
-// Discussion:
-//
-//	"ZgotmplZ" is a special value that indicates that unsafe content reached a
-//	CSS or URL context at runtime. The output of the example will be
-//	  <img src="#ZgotmplZ">
-//	If the data comes from a trusted source, use content types to exempt it
-//	from filtering: URL(`javascript:...`).
-const (
-	// OK indicates the lack of an error.
-	OK ErrorCode = iota
-
-	// ErrAmbigContext: "... appears in an ambiguous context within a URL"
-	// Example:
-	//   <a href="
-	//      {{if .C}}
-	//        /path/
-	//      {{else}}
-	//        /search?q=
-	//      {{end}}
-	//      {{.X}}
-	//   ">
-	// Discussion:
-	//   {{.X}} is in an ambiguous URL context since, depending on {{.C}},
-	//  it may be either a URL suffix or a query parameter.
-	//   Moving {{.X}} into the condition removes the ambiguity:
-	//   <a href="{{if .C}}/path/{{.X}}{{else}}/search?q={{.X}}">
-	ErrAmbigContext
-
-	// ErrBadHTML: "expected space, attr name, or end of tag, but got ...",
-	//   "... in unquoted attr", "... in attribute name"
-	// Example:
-	//   <a href = /search?q=foo>
-	//   <href=foo>
-	//   <form na<e=...>
-	//   <option selected<
-	// Discussion:
-	//   This is often due to a typo in an HTML element, but some runes
-	//   are banned in tag names, attribute names, and unquoted attribute
-	//   values because they can tickle parser ambiguities.
-	//   Quoting all attributes is the best policy.
-	ErrBadHTML
-
-	// ErrBranchEnd: "{{if}} branches end in different contexts"
-	// Example:
-	//   {{if .C}}<a href="{{end}}{{.X}}
-	// Discussion:
-	//   Package html/template statically examines each path through an
-	//   {{if}}, {{range}}, or {{with}} to escape any following pipelines.
-	//   The example is ambiguous since {{.X}} might be an HTML text node,
-	//   or a URL prefix in an HTML attribute. The context of {{.X}} is
-	//   used to figure out how to escape it, but that context depends on
-	//   the run-time value of {{.C}} which is not statically known.
-	//
-	//   The problem is usually something like missing quotes or angle
-	//   brackets, or can be avoided by refactoring to put the two contexts
-	//   into different branches of an if, range or with. If the problem
-	//   is in a {{range}} over a collection that should never be empty,
-	//   adding a dummy {{else}} can help.
-	ErrBranchEnd
-
-	// ErrEndContext: "... ends in a non-text context: ..."
-	// Examples:
-	//   <div
-	//   <div title="no close quote>
-	//   <script>f()
-	// Discussion:
-	//   Executed templates should produce a DocumentFragment of HTML.
-	//   Templates that end without closing tags will trigger this error.
-	//   Templates that should not be used in an HTML context or that
-	//   produce incomplete Fragments should not be executed directly.
-	//
-	//   {{define "main"}} <script>{{template "helper"}}</script> {{end}}
-	//   {{define "helper"}} document.write(' <div title=" ') {{end}}
-	//
-	//   "helper" does not produce a valid document fragment, so should
-	//   not be Executed directly.
-	ErrEndContext
-
-	// ErrNoSuchTemplate: "no such template ..."
-	// Examples:
-	//   {{define "main"}}<div {{template "attrs"}}>{{end}}
-	//   {{define "attrs"}}href="{{.URL}}"{{end}}
-	// Discussion:
-	//   Package html/template looks through template calls to compute the
-	//   context.
-	//   Here the {{.URL}} in "attrs" must be treated as a URL when called
-	//   from "main", but you will get this error if "attrs" is not defined
-	//   when "main" is parsed.
-	ErrNoSuchTemplate
-
-	// ErrOutputContext: "cannot compute output context for template ..."
-	// Examples:
-	//   {{define "t"}}{{if .T}}{{template "t" .T}}{{end}}{{.H}}",{{end}}
-	// Discussion:
-	//   A recursive template does not end in the same context in which it
-	//   starts, and a reliable output context cannot be computed.
-	//   Look for typos in the named template.
-	//   If the template should not be called in the named start context,
-	//   look for calls to that template in unexpected contexts.
-	//   Maybe refactor recursive templates to not be recursive.
-	ErrOutputContext
-
-	// ErrPartialCharset: "unfinished JS regexp charset in ..."
-	// Example:
-	//     <script>var pattern = /foo[{{.Chars}}]/</script>
-	// Discussion:
-	//   Package html/template does not support interpolation into regular
-	//   expression literal character sets.
-	ErrPartialCharset
-
-	// ErrPartialEscape: "unfinished escape sequence in ..."
-	// Example:
-	//   <script>alert("\{{.X}}")</script>
-	// Discussion:
-	//   Package html/template does not support actions following a
-	//   backslash.
-	//   This is usually an error and there are better solutions; for
-	//   example
-	//     <script>alert("{{.X}}")</script>
-	//   should work, and if {{.X}} is a partial escape sequence such as
-	//   "xA0", mark the whole sequence as safe content: JSStr(`\xA0`)
-	ErrPartialEscape
-
-	// ErrRangeLoopReentry: "on range loop re-entry: ..."
-	// Example:
-	//   <script>var x = [{{range .}}'{{.}},{{end}}]</script>
-	// Discussion:
-	//   If an iteration through a range would cause it to end in a
-	//   different context than an earlier pass, there is no single context.
-	//   In the example, there is missing a quote, so it is not clear
-	//   whether {{.}} is meant to be inside a JS string or in a JS value
-	//   context. The second iteration would produce something like
-	//
-	//     <script>var x = ['firstValue,'secondValue]</script>
-	ErrRangeLoopReentry
-
-	// ErrSlashAmbig: '/' could start a division or regexp.
-	// Example:
-	//   <script>
-	//     {{if .C}}var x = 1{{end}}
-	//     /-{{.N}}/i.test(x) ? doThis : doThat();
-	//   </script>
-	// Discussion:
-	//   The example above could produce `var x = 1/-2/i.test(s)...`
-	//   in which the first '/' is a mathematical division operator or it
-	//   could produce `/-2/i.test(s)` in which the first '/' starts a
-	//   regexp literal.
-	//   Look for missing semicolons inside branches, and maybe add
-	//   parentheses to make it clear which interpretation you intend.
-	ErrSlashAmbig
-
-	// ErrPredefinedEscaper: "predefined escaper ... disallowed in template"
-	// Example:
-	//   <div class={{. | html}}>Hello<div>
-	// Discussion:
-	//   Package html/template already contextually escapes all pipelines to
-	//   produce HTML output safe against code injection. Manually escaping
-	//   pipeline output using the predefined escapers "html" or "urlquery" is
-	//   unnecessary, and may affect the correctness or safety of the escaped
-	//   pipeline output in Go 1.8 and earlier.
-	//
-	//   In most cases, such as the given example, this error can be resolved by
-	//   simply removing the predefined escaper from the pipeline and letting the
-	//   contextual autoescaper handle the escaping of the pipeline. In other
-	//   instances, where the predefined escaper occurs in the middle of a
-	//   pipeline where subsequent commands expect escaped input, e.g.
-	//     {{.X | html | makeALink}}
-	//   where makeALink does
-	//     return `<a href="`+input+`">link</a>`
-	//   consider refactoring the surrounding template to make use of the
-	//   contextual autoescaper, i.e.
-	//     <a href="{{.X}}">link</a>
-	//
-	//   To ease migration to Go 1.9 and beyond, "html" and "urlquery" will
-	//   continue to be allowed as the last command in a pipeline. However, if the
-	//   pipeline occurs in an unquoted attribute value context, "html" is
-	//   disallowed. Avoid using "html" and "urlquery" entirely in new templates.
-	ErrPredefinedEscaper
-)
-
-func (e *Error) Error() string {
-	switch {
-	case e.Node != nil:
-		loc, _ := (*parse.Tree)(nil).ErrorContext(e.Node)
-		return fmt.Sprintf("html/template:%s: %s", loc, e.Description)
-	case e.Line != 0:
-		return fmt.Sprintf("html/template:%s:%d: %s", e.Name, e.Line, e.Description)
-	case e.Name != "":
-		return fmt.Sprintf("html/template:%s: %s", e.Name, e.Description)
-	}
-	return "html/template: " + e.Description
-}
-
-// errorf creates an error given a format string f and args.
-// The template Name still needs to be supplied.
-func errorf(k ErrorCode, node parse.Node, line int, f string, args ...interface{}) *Error {
-	return &Error{k, node, "", line, fmt.Sprintf(f, args...)}
-}
diff --git a/internal/backport/html/template/escape.go b/internal/backport/html/template/escape.go
deleted file mode 100644
index e7f3122..0000000
--- a/internal/backport/html/template/escape.go
+++ /dev/null
@@ -1,968 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
-	"bytes"
-	"fmt"
-	"html"
-	"io"
-
-	"golang.org/x/website/internal/backport/text/template"
-	"golang.org/x/website/internal/backport/text/template/parse"
-)
-
-// escapeTemplate rewrites the named template, which must be
-// associated with t, to guarantee that the output of any of the named
-// templates is properly escaped. If no error is returned, then the named templates have
-// been modified. Otherwise the named templates have been rendered
-// unusable.
-func escapeTemplate(tmpl *Template, node parse.Node, name string) error {
-	c, _ := tmpl.esc.escapeTree(context{}, node, name, 0)
-	var err error
-	if c.err != nil {
-		err, c.err.Name = c.err, name
-	} else if c.state != stateText {
-		err = &Error{ErrEndContext, nil, name, 0, fmt.Sprintf("ends in a non-text context: %v", c)}
-	}
-	if err != nil {
-		// Prevent execution of unsafe templates.
-		if t := tmpl.set[name]; t != nil {
-			t.escapeErr = err
-			t.text.Tree = nil
-			t.Tree = nil
-		}
-		return err
-	}
-	tmpl.esc.commit()
-	if t := tmpl.set[name]; t != nil {
-		t.escapeErr = escapeOK
-		t.Tree = t.text.Tree
-	}
-	return nil
-}
-
-// evalArgs formats the list of arguments into a string. It is equivalent to
-// fmt.Sprint(args...), except that it deferences all pointers.
-func evalArgs(args ...interface{}) string {
-	// Optimization for simple common case of a single string argument.
-	if len(args) == 1 {
-		if s, ok := args[0].(string); ok {
-			return s
-		}
-	}
-	for i, arg := range args {
-		args[i] = indirectToStringerOrError(arg)
-	}
-	return fmt.Sprint(args...)
-}
-
-// funcMap maps command names to functions that render their inputs safe.
-var funcMap = template.FuncMap{
-	"_html_template_attrescaper":     attrEscaper,
-	"_html_template_commentescaper":  commentEscaper,
-	"_html_template_cssescaper":      cssEscaper,
-	"_html_template_cssvaluefilter":  cssValueFilter,
-	"_html_template_htmlnamefilter":  htmlNameFilter,
-	"_html_template_htmlescaper":     htmlEscaper,
-	"_html_template_jsregexpescaper": jsRegexpEscaper,
-	"_html_template_jsstrescaper":    jsStrEscaper,
-	"_html_template_jsvalescaper":    jsValEscaper,
-	"_html_template_nospaceescaper":  htmlNospaceEscaper,
-	"_html_template_rcdataescaper":   rcdataEscaper,
-	"_html_template_srcsetescaper":   srcsetFilterAndEscaper,
-	"_html_template_urlescaper":      urlEscaper,
-	"_html_template_urlfilter":       urlFilter,
-	"_html_template_urlnormalizer":   urlNormalizer,
-	"_eval_args_":                    evalArgs,
-}
-
-// escaper collects type inferences about templates and changes needed to make
-// templates injection safe.
-type escaper struct {
-	// ns is the nameSpace that this escaper is associated with.
-	ns *nameSpace
-	// output[templateName] is the output context for a templateName that
-	// has been mangled to include its input context.
-	output map[string]context
-	// derived[c.mangle(name)] maps to a template derived from the template
-	// named name templateName for the start context c.
-	derived map[string]*template.Template
-	// called[templateName] is a set of called mangled template names.
-	called map[string]bool
-	// xxxNodeEdits are the accumulated edits to apply during commit.
-	// Such edits are not applied immediately in case a template set
-	// executes a given template in different escaping contexts.
-	actionNodeEdits   map[*parse.ActionNode][]string
-	templateNodeEdits map[*parse.TemplateNode]string
-	textNodeEdits     map[*parse.TextNode][]byte
-	// rangeContext holds context about the current range loop.
-	rangeContext *rangeContext
-}
-
-// rangeContext holds information about the current range loop.
-type rangeContext struct {
-	outer     *rangeContext // outer loop
-	breaks    []context     // context at each break action
-	continues []context     // context at each continue action
-}
-
-// makeEscaper creates a blank escaper for the given set.
-func makeEscaper(n *nameSpace) escaper {
-	return escaper{
-		n,
-		map[string]context{},
-		map[string]*template.Template{},
-		map[string]bool{},
-		map[*parse.ActionNode][]string{},
-		map[*parse.TemplateNode]string{},
-		map[*parse.TextNode][]byte{},
-		nil,
-	}
-}
-
-// filterFailsafe is an innocuous word that is emitted in place of unsafe values
-// by sanitizer functions. It is not a keyword in any programming language,
-// contains no special characters, is not empty, and when it appears in output
-// it is distinct enough that a developer can find the source of the problem
-// via a search engine.
-const filterFailsafe = "ZgotmplZ"
-
-// escape escapes a template node.
-func (e *escaper) escape(c context, n parse.Node) context {
-	switch n := n.(type) {
-	case *parse.ActionNode:
-		return e.escapeAction(c, n)
-	case *parse.BreakNode:
-		c.n = n
-		e.rangeContext.breaks = append(e.rangeContext.breaks, c)
-		return context{state: stateDead}
-	case *parse.CommentNode:
-		return c
-	case *parse.ContinueNode:
-		c.n = n
-		e.rangeContext.continues = append(e.rangeContext.breaks, c)
-		return context{state: stateDead}
-	case *parse.IfNode:
-		return e.escapeBranch(c, &n.BranchNode, "if")
-	case *parse.ListNode:
-		return e.escapeList(c, n)
-	case *parse.RangeNode:
-		return e.escapeBranch(c, &n.BranchNode, "range")
-	case *parse.TemplateNode:
-		return e.escapeTemplate(c, n)
-	case *parse.TextNode:
-		return e.escapeText(c, n)
-	case *parse.WithNode:
-		return e.escapeBranch(c, &n.BranchNode, "with")
-	}
-	panic("escaping " + n.String() + " is unimplemented")
-}
-
-// escapeAction escapes an action template node.
-func (e *escaper) escapeAction(c context, n *parse.ActionNode) context {
-	if len(n.Pipe.Decl) != 0 {
-		// A local variable assignment, not an interpolation.
-		return c
-	}
-	c = nudge(c)
-	// Check for disallowed use of predefined escapers in the pipeline.
-	for pos, idNode := range n.Pipe.Cmds {
-		node, ok := idNode.Args[0].(*parse.IdentifierNode)
-		if !ok {
-			// A predefined escaper "esc" will never be found as an identifier in a
-			// Chain or Field node, since:
-			// - "esc.x ..." is invalid, since predefined escapers return strings, and
-			//   strings do not have methods, keys or fields.
-			// - "... .esc" is invalid, since predefined escapers are global functions,
-			//   not methods or fields of any types.
-			// Therefore, it is safe to ignore these two node types.
-			continue
-		}
-		ident := node.Ident
-		if _, ok := predefinedEscapers[ident]; ok {
-			if pos < len(n.Pipe.Cmds)-1 ||
-				c.state == stateAttr && c.delim == delimSpaceOrTagEnd && ident == "html" {
-				return context{
-					state: stateError,
-					err:   errorf(ErrPredefinedEscaper, n, n.Line, "predefined escaper %q disallowed in template", ident),
-				}
-			}
-		}
-	}
-	s := make([]string, 0, 3)
-	switch c.state {
-	case stateError:
-		return c
-	case stateURL, stateCSSDqStr, stateCSSSqStr, stateCSSDqURL, stateCSSSqURL, stateCSSURL:
-		switch c.urlPart {
-		case urlPartNone:
-			s = append(s, "_html_template_urlfilter")
-			fallthrough
-		case urlPartPreQuery:
-			switch c.state {
-			case stateCSSDqStr, stateCSSSqStr:
-				s = append(s, "_html_template_cssescaper")
-			default:
-				s = append(s, "_html_template_urlnormalizer")
-			}
-		case urlPartQueryOrFrag:
-			s = append(s, "_html_template_urlescaper")
-		case urlPartUnknown:
-			return context{
-				state: stateError,
-				err:   errorf(ErrAmbigContext, n, n.Line, "%s appears in an ambiguous context within a URL", n),
-			}
-		default:
-			panic(c.urlPart.String())
-		}
-	case stateJS:
-		s = append(s, "_html_template_jsvalescaper")
-		// A slash after a value starts a div operator.
-		c.jsCtx = jsCtxDivOp
-	case stateJSDqStr, stateJSSqStr:
-		s = append(s, "_html_template_jsstrescaper")
-	case stateJSRegexp:
-		s = append(s, "_html_template_jsregexpescaper")
-	case stateCSS:
-		s = append(s, "_html_template_cssvaluefilter")
-	case stateText:
-		s = append(s, "_html_template_htmlescaper")
-	case stateRCDATA:
-		s = append(s, "_html_template_rcdataescaper")
-	case stateAttr:
-		// Handled below in delim check.
-	case stateAttrName, stateTag:
-		c.state = stateAttrName
-		s = append(s, "_html_template_htmlnamefilter")
-	case stateSrcset:
-		s = append(s, "_html_template_srcsetescaper")
-	default:
-		if isComment(c.state) {
-			s = append(s, "_html_template_commentescaper")
-		} else {
-			panic("unexpected state " + c.state.String())
-		}
-	}
-	switch c.delim {
-	case delimNone:
-		// No extra-escaping needed for raw text content.
-	case delimSpaceOrTagEnd:
-		s = append(s, "_html_template_nospaceescaper")
-	default:
-		s = append(s, "_html_template_attrescaper")
-	}
-	e.editActionNode(n, s)
-	return c
-}
-
-// ensurePipelineContains ensures that the pipeline ends with the commands with
-// the identifiers in s in order. If the pipeline ends with a predefined escaper
-// (i.e. "html" or "urlquery"), merge it with the identifiers in s.
-func ensurePipelineContains(p *parse.PipeNode, s []string) {
-	if len(s) == 0 {
-		// Do not rewrite pipeline if we have no escapers to insert.
-		return
-	}
-	// Precondition: p.Cmds contains at most one predefined escaper and the
-	// escaper will be present at p.Cmds[len(p.Cmds)-1]. This precondition is
-	// always true because of the checks in escapeAction.
-	pipelineLen := len(p.Cmds)
-	if pipelineLen > 0 {
-		lastCmd := p.Cmds[pipelineLen-1]
-		if idNode, ok := lastCmd.Args[0].(*parse.IdentifierNode); ok {
-			if esc := idNode.Ident; predefinedEscapers[esc] {
-				// Pipeline ends with a predefined escaper.
-				if len(p.Cmds) == 1 && len(lastCmd.Args) > 1 {
-					// Special case: pipeline is of the form {{ esc arg1 arg2 ... argN }},
-					// where esc is the predefined escaper, and arg1...argN are its arguments.
-					// Convert this into the equivalent form
-					// {{ _eval_args_ arg1 arg2 ... argN | esc }}, so that esc can be easily
-					// merged with the escapers in s.
-					lastCmd.Args[0] = parse.NewIdentifier("_eval_args_").SetTree(nil).SetPos(lastCmd.Args[0].Position())
-					p.Cmds = appendCmd(p.Cmds, newIdentCmd(esc, p.Position()))
-					pipelineLen++
-				}
-				// If any of the commands in s that we are about to insert is equivalent
-				// to the predefined escaper, use the predefined escaper instead.
-				dup := false
-				for i, escaper := range s {
-					if escFnsEq(esc, escaper) {
-						s[i] = idNode.Ident
-						dup = true
-					}
-				}
-				if dup {
-					// The predefined escaper will already be inserted along with the
-					// escapers in s, so do not copy it to the rewritten pipeline.
-					pipelineLen--
-				}
-			}
-		}
-	}
-	// Rewrite the pipeline, creating the escapers in s at the end of the pipeline.
-	newCmds := make([]*parse.CommandNode, pipelineLen, pipelineLen+len(s))
-	insertedIdents := make(map[string]bool)
-	for i := 0; i < pipelineLen; i++ {
-		cmd := p.Cmds[i]
-		newCmds[i] = cmd
-		if idNode, ok := cmd.Args[0].(*parse.IdentifierNode); ok {
-			insertedIdents[normalizeEscFn(idNode.Ident)] = true
-		}
-	}
-	for _, name := range s {
-		if !insertedIdents[normalizeEscFn(name)] {
-			// When two templates share an underlying parse tree via the use of
-			// AddParseTree and one template is executed after the other, this check
-			// ensures that escapers that were already inserted into the pipeline on
-			// the first escaping pass do not get inserted again.
-			newCmds = appendCmd(newCmds, newIdentCmd(name, p.Position()))
-		}
-	}
-	p.Cmds = newCmds
-}
-
-// predefinedEscapers contains template predefined escapers that are equivalent
-// to some contextual escapers. Keep in sync with equivEscapers.
-var predefinedEscapers = map[string]bool{
-	"html":     true,
-	"urlquery": true,
-}
-
-// equivEscapers matches contextual escapers to equivalent predefined
-// template escapers.
-var equivEscapers = map[string]string{
-	// The following pairs of HTML escapers provide equivalent security
-	// guarantees, since they all escape '\000', '\'', '"', '&', '<', and '>'.
-	"_html_template_attrescaper":   "html",
-	"_html_template_htmlescaper":   "html",
-	"_html_template_rcdataescaper": "html",
-	// These two URL escapers produce URLs safe for embedding in a URL query by
-	// percent-encoding all the reserved characters specified in RFC 3986 Section
-	// 2.2
-	"_html_template_urlescaper": "urlquery",
-	// These two functions are not actually equivalent; urlquery is stricter as it
-	// escapes reserved characters (e.g. '#'), while _html_template_urlnormalizer
-	// does not. It is therefore only safe to replace _html_template_urlnormalizer
-	// with urlquery (this happens in ensurePipelineContains), but not the otherI've
-	// way around. We keep this entry around to preserve the behavior of templates
-	// written before Go 1.9, which might depend on this substitution taking place.
-	"_html_template_urlnormalizer": "urlquery",
-}
-
-// escFnsEq reports whether the two escaping functions are equivalent.
-func escFnsEq(a, b string) bool {
-	return normalizeEscFn(a) == normalizeEscFn(b)
-}
-
-// normalizeEscFn(a) is equal to normalizeEscFn(b) for any pair of names of
-// escaper functions a and b that are equivalent.
-func normalizeEscFn(e string) string {
-	if norm := equivEscapers[e]; norm != "" {
-		return norm
-	}
-	return e
-}
-
-// redundantFuncs[a][b] implies that funcMap[b](funcMap[a](x)) == funcMap[a](x)
-// for all x.
-var redundantFuncs = map[string]map[string]bool{
-	"_html_template_commentescaper": {
-		"_html_template_attrescaper":    true,
-		"_html_template_nospaceescaper": true,
-		"_html_template_htmlescaper":    true,
-	},
-	"_html_template_cssescaper": {
-		"_html_template_attrescaper": true,
-	},
-	"_html_template_jsregexpescaper": {
-		"_html_template_attrescaper": true,
-	},
-	"_html_template_jsstrescaper": {
-		"_html_template_attrescaper": true,
-	},
-	"_html_template_urlescaper": {
-		"_html_template_urlnormalizer": true,
-	},
-}
-
-// appendCmd appends the given command to the end of the command pipeline
-// unless it is redundant with the last command.
-func appendCmd(cmds []*parse.CommandNode, cmd *parse.CommandNode) []*parse.CommandNode {
-	if n := len(cmds); n != 0 {
-		last, okLast := cmds[n-1].Args[0].(*parse.IdentifierNode)
-		next, okNext := cmd.Args[0].(*parse.IdentifierNode)
-		if okLast && okNext && redundantFuncs[last.Ident][next.Ident] {
-			return cmds
-		}
-	}
-	return append(cmds, cmd)
-}
-
-// newIdentCmd produces a command containing a single identifier node.
-func newIdentCmd(identifier string, pos parse.Pos) *parse.CommandNode {
-	return &parse.CommandNode{
-		NodeType: parse.NodeCommand,
-		Args:     []parse.Node{parse.NewIdentifier(identifier).SetTree(nil).SetPos(pos)}, // TODO: SetTree.
-	}
-}
-
-// nudge returns the context that would result from following empty string
-// transitions from the input context.
-// For example, parsing:
-//
-//	`<a href=`
-//
-// will end in context{stateBeforeValue, attrURL}, but parsing one extra rune:
-//
-//	`<a href=x`
-//
-// will end in context{stateURL, delimSpaceOrTagEnd, ...}.
-// There are two transitions that happen when the 'x' is seen:
-// (1) Transition from a before-value state to a start-of-value state without
-//
-//	consuming any character.
-//
-// (2) Consume 'x' and transition past the first value character.
-// In this case, nudging produces the context after (1) happens.
-func nudge(c context) context {
-	switch c.state {
-	case stateTag:
-		// In `<foo {{.}}`, the action should emit an attribute.
-		c.state = stateAttrName
-	case stateBeforeValue:
-		// In `<foo bar={{.}}`, the action is an undelimited value.
-		c.state, c.delim, c.attr = attrStartStates[c.attr], delimSpaceOrTagEnd, attrNone
-	case stateAfterName:
-		// In `<foo bar {{.}}`, the action is an attribute name.
-		c.state, c.attr = stateAttrName, attrNone
-	}
-	return c
-}
-
-// join joins the two contexts of a branch template node. The result is an
-// error context if either of the input contexts are error contexts, or if the
-// input contexts differ.
-func join(a, b context, node parse.Node, nodeName string) context {
-	if a.state == stateError {
-		return a
-	}
-	if b.state == stateError {
-		return b
-	}
-	if a.state == stateDead {
-		return b
-	}
-	if b.state == stateDead {
-		return a
-	}
-	if a.eq(b) {
-		return a
-	}
-
-	c := a
-	c.urlPart = b.urlPart
-	if c.eq(b) {
-		// The contexts differ only by urlPart.
-		c.urlPart = urlPartUnknown
-		return c
-	}
-
-	c = a
-	c.jsCtx = b.jsCtx
-	if c.eq(b) {
-		// The contexts differ only by jsCtx.
-		c.jsCtx = jsCtxUnknown
-		return c
-	}
-
-	// Allow a nudged context to join with an unnudged one.
-	// This means that
-	//   <p title={{if .C}}{{.}}{{end}}
-	// ends in an unquoted value state even though the else branch
-	// ends in stateBeforeValue.
-	if c, d := nudge(a), nudge(b); !(c.eq(a) && d.eq(b)) {
-		if e := join(c, d, node, nodeName); e.state != stateError {
-			return e
-		}
-	}
-
-	return context{
-		state: stateError,
-		err:   errorf(ErrBranchEnd, node, 0, "{{%s}} branches end in different contexts: %v, %v", nodeName, a, b),
-	}
-}
-
-// escapeBranch escapes a branch template node: "if", "range" and "with".
-func (e *escaper) escapeBranch(c context, n *parse.BranchNode, nodeName string) context {
-	if nodeName == "range" {
-		e.rangeContext = &rangeContext{outer: e.rangeContext}
-	}
-	c0 := e.escapeList(c, n.List)
-	if nodeName == "range" {
-		if c0.state != stateError {
-			c0 = joinRange(c0, e.rangeContext)
-		}
-		e.rangeContext = e.rangeContext.outer
-		if c0.state == stateError {
-			return c0
-		}
-
-		// The "true" branch of a "range" node can execute multiple times.
-		// We check that executing n.List once results in the same context
-		// as executing n.List twice.
-		e.rangeContext = &rangeContext{outer: e.rangeContext}
-		c1, _ := e.escapeListConditionally(c0, n.List, nil)
-		c0 = join(c0, c1, n, nodeName)
-		if c0.state == stateError {
-			e.rangeContext = e.rangeContext.outer
-			// Make clear that this is a problem on loop re-entry
-			// since developers tend to overlook that branch when
-			// debugging templates.
-			c0.err.Line = n.Line
-			c0.err.Description = "on range loop re-entry: " + c0.err.Description
-			return c0
-		}
-		c0 = joinRange(c0, e.rangeContext)
-		e.rangeContext = e.rangeContext.outer
-		if c0.state == stateError {
-			return c0
-		}
-	}
-	c1 := e.escapeList(c, n.ElseList)
-	return join(c0, c1, n, nodeName)
-}
-
-func joinRange(c0 context, rc *rangeContext) context {
-	// Merge contexts at break and continue statements into overall body context.
-	// In theory we could treat breaks differently from continues, but for now it is
-	// enough to treat them both as going back to the start of the loop (which may then stop).
-	for _, c := range rc.breaks {
-		c0 = join(c0, c, c.n, "range")
-		if c0.state == stateError {
-			c0.err.Line = c.n.(*parse.BreakNode).Line
-			c0.err.Description = "at range loop break: " + c0.err.Description
-			return c0
-		}
-	}
-	for _, c := range rc.continues {
-		c0 = join(c0, c, c.n, "range")
-		if c0.state == stateError {
-			c0.err.Line = c.n.(*parse.ContinueNode).Line
-			c0.err.Description = "at range loop continue: " + c0.err.Description
-			return c0
-		}
-	}
-	return c0
-}
-
-// escapeList escapes a list template node.
-func (e *escaper) escapeList(c context, n *parse.ListNode) context {
-	if n == nil {
-		return c
-	}
-	for _, m := range n.Nodes {
-		c = e.escape(c, m)
-		if c.state == stateDead {
-			break
-		}
-	}
-	return c
-}
-
-// escapeListConditionally escapes a list node but only preserves edits and
-// inferences in e if the inferences and output context satisfy filter.
-// It returns the best guess at an output context, and the result of the filter
-// which is the same as whether e was updated.
-func (e *escaper) escapeListConditionally(c context, n *parse.ListNode, filter func(*escaper, context) bool) (context, bool) {
-	e1 := makeEscaper(e.ns)
-	e1.rangeContext = e.rangeContext
-	// Make type inferences available to f.
-	for k, v := range e.output {
-		e1.output[k] = v
-	}
-	c = e1.escapeList(c, n)
-	ok := filter != nil && filter(&e1, c)
-	if ok {
-		// Copy inferences and edits from e1 back into e.
-		for k, v := range e1.output {
-			e.output[k] = v
-		}
-		for k, v := range e1.derived {
-			e.derived[k] = v
-		}
-		for k, v := range e1.called {
-			e.called[k] = v
-		}
-		for k, v := range e1.actionNodeEdits {
-			e.editActionNode(k, v)
-		}
-		for k, v := range e1.templateNodeEdits {
-			e.editTemplateNode(k, v)
-		}
-		for k, v := range e1.textNodeEdits {
-			e.editTextNode(k, v)
-		}
-	}
-	return c, ok
-}
-
-// escapeTemplate escapes a {{template}} call node.
-func (e *escaper) escapeTemplate(c context, n *parse.TemplateNode) context {
-	c, name := e.escapeTree(c, n, n.Name, n.Line)
-	if name != n.Name {
-		e.editTemplateNode(n, name)
-	}
-	return c
-}
-
-// escapeTree escapes the named template starting in the given context as
-// necessary and returns its output context.
-func (e *escaper) escapeTree(c context, node parse.Node, name string, line int) (context, string) {
-	// Mangle the template name with the input context to produce a reliable
-	// identifier.
-	dname := c.mangle(name)
-	e.called[dname] = true
-	if out, ok := e.output[dname]; ok {
-		// Already escaped.
-		return out, dname
-	}
-	t := e.template(name)
-	if t == nil {
-		// Two cases: The template exists but is empty, or has never been mentioned at
-		// all. Distinguish the cases in the error messages.
-		if e.ns.set[name] != nil {
-			return context{
-				state: stateError,
-				err:   errorf(ErrNoSuchTemplate, node, line, "%q is an incomplete or empty template", name),
-			}, dname
-		}
-		return context{
-			state: stateError,
-			err:   errorf(ErrNoSuchTemplate, node, line, "no such template %q", name),
-		}, dname
-	}
-	if dname != name {
-		// Use any template derived during an earlier call to escapeTemplate
-		// with different top level templates, or clone if necessary.
-		dt := e.template(dname)
-		if dt == nil {
-			dt = template.New(dname)
-			dt.Tree = &parse.Tree{Name: dname, Root: t.Root.CopyList()}
-			e.derived[dname] = dt
-		}
-		t = dt
-	}
-	return e.computeOutCtx(c, t), dname
-}
-
-// computeOutCtx takes a template and its start context and computes the output
-// context while storing any inferences in e.
-func (e *escaper) computeOutCtx(c context, t *template.Template) context {
-	// Propagate context over the body.
-	c1, ok := e.escapeTemplateBody(c, t)
-	if !ok {
-		// Look for a fixed point by assuming c1 as the output context.
-		if c2, ok2 := e.escapeTemplateBody(c1, t); ok2 {
-			c1, ok = c2, true
-		}
-		// Use c1 as the error context if neither assumption worked.
-	}
-	if !ok && c1.state != stateError {
-		return context{
-			state: stateError,
-			err:   errorf(ErrOutputContext, t.Tree.Root, 0, "cannot compute output context for template %s", t.Name()),
-		}
-	}
-	return c1
-}
-
-// escapeTemplateBody escapes the given template assuming the given output
-// context, and returns the best guess at the output context and whether the
-// assumption was correct.
-func (e *escaper) escapeTemplateBody(c context, t *template.Template) (context, bool) {
-	filter := func(e1 *escaper, c1 context) bool {
-		if c1.state == stateError {
-			// Do not update the input escaper, e.
-			return false
-		}
-		if !e1.called[t.Name()] {
-			// If t is not recursively called, then c1 is an
-			// accurate output context.
-			return true
-		}
-		// c1 is accurate if it matches our assumed output context.
-		return c.eq(c1)
-	}
-	// We need to assume an output context so that recursive template calls
-	// take the fast path out of escapeTree instead of infinitely recursing.
-	// Naively assuming that the input context is the same as the output
-	// works >90% of the time.
-	e.output[t.Name()] = c
-	return e.escapeListConditionally(c, t.Tree.Root, filter)
-}
-
-// delimEnds maps each delim to a string of characters that terminate it.
-var delimEnds = [...]string{
-	delimDoubleQuote: `"`,
-	delimSingleQuote: "'",
-	// Determined empirically by running the below in various browsers.
-	// var div = document.createElement("DIV");
-	// for (var i = 0; i < 0x10000; ++i) {
-	//   div.innerHTML = "<span title=x" + String.fromCharCode(i) + "-bar>";
-	//   if (div.getElementsByTagName("SPAN")[0].title.indexOf("bar") < 0)
-	//     document.write("<p>U+" + i.toString(16));
-	// }
-	delimSpaceOrTagEnd: " \t\n\f\r>",
-}
-
-var doctypeBytes = []byte("<!DOCTYPE")
-
-// escapeText escapes a text template node.
-func (e *escaper) escapeText(c context, n *parse.TextNode) context {
-	s, written, i, b := n.Text, 0, 0, new(bytes.Buffer)
-	for i != len(s) {
-		c1, nread := contextAfterText(c, s[i:])
-		i1 := i + nread
-		if c.state == stateText || c.state == stateRCDATA {
-			end := i1
-			if c1.state != c.state {
-				for j := end - 1; j >= i; j-- {
-					if s[j] == '<' {
-						end = j
-						break
-					}
-				}
-			}
-			for j := i; j < end; j++ {
-				if s[j] == '<' && !bytes.HasPrefix(bytes.ToUpper(s[j:]), doctypeBytes) {
-					b.Write(s[written:j])
-					b.WriteString("&lt;")
-					written = j + 1
-				}
-			}
-		} else if isComment(c.state) && c.delim == delimNone {
-			switch c.state {
-			case stateJSBlockCmt:
-				// https://es5.github.com/#x7.4:
-				// "Comments behave like white space and are
-				// discarded except that, if a MultiLineComment
-				// contains a line terminator character, then
-				// the entire comment is considered to be a
-				// LineTerminator for purposes of parsing by
-				// the syntactic grammar."
-				if bytes.ContainsAny(s[written:i1], "\n\r\u2028\u2029") {
-					b.WriteByte('\n')
-				} else {
-					b.WriteByte(' ')
-				}
-			case stateCSSBlockCmt:
-				b.WriteByte(' ')
-			}
-			written = i1
-		}
-		if c.state != c1.state && isComment(c1.state) && c1.delim == delimNone {
-			// Preserve the portion between written and the comment start.
-			cs := i1 - 2
-			if c1.state == stateHTMLCmt {
-				// "<!--" instead of "/*" or "//"
-				cs -= 2
-			}
-			b.Write(s[written:cs])
-			written = i1
-		}
-		if i == i1 && c.state == c1.state {
-			panic(fmt.Sprintf("infinite loop from %v to %v on %q..%q", c, c1, s[:i], s[i:]))
-		}
-		c, i = c1, i1
-	}
-
-	if written != 0 && c.state != stateError {
-		if !isComment(c.state) || c.delim != delimNone {
-			b.Write(n.Text[written:])
-		}
-		e.editTextNode(n, b.Bytes())
-	}
-	return c
-}
-
-// contextAfterText starts in context c, consumes some tokens from the front of
-// s, then returns the context after those tokens and the unprocessed suffix.
-func contextAfterText(c context, s []byte) (context, int) {
-	if c.delim == delimNone {
-		c1, i := tSpecialTagEnd(c, s)
-		if i == 0 {
-			// A special end tag (`</script>`) has been seen and
-			// all content preceding it has been consumed.
-			return c1, 0
-		}
-		// Consider all content up to any end tag.
-		return transitionFunc[c.state](c, s[:i])
-	}
-
-	// We are at the beginning of an attribute value.
-
-	i := bytes.IndexAny(s, delimEnds[c.delim])
-	if i == -1 {
-		i = len(s)
-	}
-	if c.delim == delimSpaceOrTagEnd {
-		// https://www.w3.org/TR/html5/syntax.html#attribute-value-(unquoted)-state
-		// lists the runes below as error characters.
-		// Error out because HTML parsers may differ on whether
-		// "<a id= onclick=f("     ends inside id's or onclick's value,
-		// "<a class=`foo "        ends inside a value,
-		// "<a style=font:'Arial'" needs open-quote fixup.
-		// IE treats '`' as a quotation character.
-		if j := bytes.IndexAny(s[:i], "\"'<=`"); j >= 0 {
-			return context{
-				state: stateError,
-				err:   errorf(ErrBadHTML, nil, 0, "%q in unquoted attr: %q", s[j:j+1], s[:i]),
-			}, len(s)
-		}
-	}
-	if i == len(s) {
-		// Remain inside the attribute.
-		// Decode the value so non-HTML rules can easily handle
-		//     <button onclick="alert(&quot;Hi!&quot;)">
-		// without having to entity decode token boundaries.
-		for u := []byte(html.UnescapeString(string(s))); len(u) != 0; {
-			c1, i1 := transitionFunc[c.state](c, u)
-			c, u = c1, u[i1:]
-		}
-		return c, len(s)
-	}
-
-	element := c.element
-
-	// If this is a non-JS "type" attribute inside "script" tag, do not treat the contents as JS.
-	if c.state == stateAttr && c.element == elementScript && c.attr == attrScriptType && !isJSType(string(s[:i])) {
-		element = elementNone
-	}
-
-	if c.delim != delimSpaceOrTagEnd {
-		// Consume any quote.
-		i++
-	}
-	// On exiting an attribute, we discard all state information
-	// except the state and element.
-	return context{state: stateTag, element: element}, i
-}
-
-// editActionNode records a change to an action pipeline for later commit.
-func (e *escaper) editActionNode(n *parse.ActionNode, cmds []string) {
-	if _, ok := e.actionNodeEdits[n]; ok {
-		panic(fmt.Sprintf("node %s shared between templates", n))
-	}
-	e.actionNodeEdits[n] = cmds
-}
-
-// editTemplateNode records a change to a {{template}} callee for later commit.
-func (e *escaper) editTemplateNode(n *parse.TemplateNode, callee string) {
-	if _, ok := e.templateNodeEdits[n]; ok {
-		panic(fmt.Sprintf("node %s shared between templates", n))
-	}
-	e.templateNodeEdits[n] = callee
-}
-
-// editTextNode records a change to a text node for later commit.
-func (e *escaper) editTextNode(n *parse.TextNode, text []byte) {
-	if _, ok := e.textNodeEdits[n]; ok {
-		panic(fmt.Sprintf("node %s shared between templates", n))
-	}
-	e.textNodeEdits[n] = text
-}
-
-// commit applies changes to actions and template calls needed to contextually
-// autoescape content and adds any derived templates to the set.
-func (e *escaper) commit() {
-	for name := range e.output {
-		e.template(name).Funcs(funcMap)
-	}
-	// Any template from the name space associated with this escaper can be used
-	// to add derived templates to the underlying text/template name space.
-	tmpl := e.arbitraryTemplate()
-	for _, t := range e.derived {
-		if _, err := tmpl.text.AddParseTree(t.Name(), t.Tree); err != nil {
-			panic("error adding derived template")
-		}
-	}
-	for n, s := range e.actionNodeEdits {
-		ensurePipelineContains(n.Pipe, s)
-	}
-	for n, name := range e.templateNodeEdits {
-		n.Name = name
-	}
-	for n, s := range e.textNodeEdits {
-		n.Text = s
-	}
-	// Reset state that is specific to this commit so that the same changes are
-	// not re-applied to the template on subsequent calls to commit.
-	e.called = make(map[string]bool)
-	e.actionNodeEdits = make(map[*parse.ActionNode][]string)
-	e.templateNodeEdits = make(map[*parse.TemplateNode]string)
-	e.textNodeEdits = make(map[*parse.TextNode][]byte)
-}
-
-// template returns the named template given a mangled template name.
-func (e *escaper) template(name string) *template.Template {
-	// Any template from the name space associated with this escaper can be used
-	// to look up templates in the underlying text/template name space.
-	t := e.arbitraryTemplate().text.Lookup(name)
-	if t == nil {
-		t = e.derived[name]
-	}
-	return t
-}
-
-// arbitraryTemplate returns an arbitrary template from the name space
-// associated with e and panics if no templates are found.
-func (e *escaper) arbitraryTemplate() *Template {
-	for _, t := range e.ns.set {
-		return t
-	}
-	panic("no templates in name space")
-}
-
-// Forwarding functions so that clients need only import this package
-// to reach the general escaping functions of text/template.
-
-// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
-func HTMLEscape(w io.Writer, b []byte) {
-	template.HTMLEscape(w, b)
-}
-
-// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
-func HTMLEscapeString(s string) string {
-	return template.HTMLEscapeString(s)
-}
-
-// HTMLEscaper returns the escaped HTML equivalent of the textual
-// representation of its arguments.
-func HTMLEscaper(args ...interface{}) string {
-	return template.HTMLEscaper(args...)
-}
-
-// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
-func JSEscape(w io.Writer, b []byte) {
-	template.JSEscape(w, b)
-}
-
-// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
-func JSEscapeString(s string) string {
-	return template.JSEscapeString(s)
-}
-
-// JSEscaper returns the escaped JavaScript equivalent of the textual
-// representation of its arguments.
-func JSEscaper(args ...interface{}) string {
-	return template.JSEscaper(args...)
-}
-
-// URLQueryEscaper returns the escaped value of the textual representation of
-// its arguments in a form suitable for embedding in a URL query.
-func URLQueryEscaper(args ...interface{}) string {
-	return template.URLQueryEscaper(args...)
-}
diff --git a/internal/backport/html/template/escape_test.go b/internal/backport/html/template/escape_test.go
deleted file mode 100644
index 8145269..0000000
--- a/internal/backport/html/template/escape_test.go
+++ /dev/null
@@ -1,1994 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
-	"bytes"
-	"encoding/json"
-	"fmt"
-	"os"
-	"strings"
-	"testing"
-
-	"golang.org/x/website/internal/backport/text/template"
-	"golang.org/x/website/internal/backport/text/template/parse"
-)
-
-type badMarshaler struct{}
-
-func (x *badMarshaler) MarshalJSON() ([]byte, error) {
-	// Keys in valid JSON must be double quoted as must all strings.
-	return []byte("{ foo: 'not quite valid JSON' }"), nil
-}
-
-type goodMarshaler struct{}
-
-func (x *goodMarshaler) MarshalJSON() ([]byte, error) {
-	return []byte(`{ "<foo>": "O'Reilly" }`), nil
-}
-
-func TestEscape(t *testing.T) {
-	data := struct {
-		F, T    bool
-		C, G, H string
-		A, E    []string
-		B, M    json.Marshaler
-		N       int
-		U       interface{} // untyped nil
-		Z       *int        // typed nil
-		W       HTML
-	}{
-		F: false,
-		T: true,
-		C: "<Cincinnati>",
-		G: "<Goodbye>",
-		H: "<Hello>",
-		A: []string{"<a>", "<b>"},
-		E: []string{},
-		N: 42,
-		B: &badMarshaler{},
-		M: &goodMarshaler{},
-		U: nil,
-		Z: nil,
-		W: HTML(`&iexcl;<b class="foo">Hello</b>, <textarea>O'World</textarea>!`),
-	}
-	pdata := &data
-
-	tests := []struct {
-		name   string
-		input  string
-		output string
-	}{
-		{
-			"if",
-			"{{if .T}}Hello{{end}}, {{.C}}!",
-			"Hello, &lt;Cincinnati&gt;!",
-		},
-		{
-			"else",
-			"{{if .F}}{{.H}}{{else}}{{.G}}{{end}}!",
-			"&lt;Goodbye&gt;!",
-		},
-		{
-			"overescaping1",
-			"Hello, {{.C | html}}!",
-			"Hello, &lt;Cincinnati&gt;!",
-		},
-		{
-			"overescaping2",
-			"Hello, {{html .C}}!",
-			"Hello, &lt;Cincinnati&gt;!",
-		},
-		{
-			"overescaping3",
-			"{{with .C}}{{$msg := .}}Hello, {{$msg}}!{{end}}",
-			"Hello, &lt;Cincinnati&gt;!",
-		},
-		{
-			"assignment",
-			"{{if $x := .H}}{{$x}}{{end}}",
-			"&lt;Hello&gt;",
-		},
-		{
-			"withBody",
-			"{{with .H}}{{.}}{{end}}",
-			"&lt;Hello&gt;",
-		},
-		{
-			"withElse",
-			"{{with .E}}{{.}}{{else}}{{.H}}{{end}}",
-			"&lt;Hello&gt;",
-		},
-		{
-			"rangeBody",
-			"{{range .A}}{{.}}{{end}}",
-			"&lt;a&gt;&lt;b&gt;",
-		},
-		{
-			"rangeElse",
-			"{{range .E}}{{.}}{{else}}{{.H}}{{end}}",
-			"&lt;Hello&gt;",
-		},
-		{
-			"nonStringValue",
-			"{{.T}}",
-			"true",
-		},
-		{
-			"untypedNilValue",
-			"{{.U}}",
-			"",
-		},
-		{
-			"typedNilValue",
-			"{{.Z}}",
-			"&lt;nil&gt;",
-		},
-		{
-			"constant",
-			`<a href="/search?q={{"'a<b'"}}">`,
-			`<a href="/search?q=%27a%3cb%27">`,
-		},
-		{
-			"multipleAttrs",
-			"<a b=1 c={{.H}}>",
-			"<a b=1 c=&lt;Hello&gt;>",
-		},
-		{
-			"urlStartRel",
-			`<a href='{{"/foo/bar?a=b&c=d"}}'>`,
-			`<a href='/foo/bar?a=b&amp;c=d'>`,
-		},
-		{
-			"urlStartAbsOk",
-			`<a href='{{"http://example.com/foo/bar?a=b&c=d"}}'>`,
-			`<a href='http://example.com/foo/bar?a=b&amp;c=d'>`,
-		},
-		{
-			"protocolRelativeURLStart",
-			`<a href='{{"//example.com:8000/foo/bar?a=b&c=d"}}'>`,
-			`<a href='//example.com:8000/foo/bar?a=b&amp;c=d'>`,
-		},
-		{
-			"pathRelativeURLStart",
-			`<a href="{{"/javascript:80/foo/bar"}}">`,
-			`<a href="/javascript:80/foo/bar">`,
-		},
-		{
-			"dangerousURLStart",
-			`<a href='{{"javascript:alert(%22pwned%22)"}}'>`,
-			`<a href='#ZgotmplZ'>`,
-		},
-		{
-			"dangerousURLStart2",
-			`<a href='  {{"javascript:alert(%22pwned%22)"}}'>`,
-			`<a href='  #ZgotmplZ'>`,
-		},
-		{
-			"nonHierURL",
-			`<a href={{"mailto:Muhammed \"The Greatest\" Ali <m.ali@example.com>"}}>`,
-			`<a href=mailto:Muhammed%20%22The%20Greatest%22%20Ali%20%3cm.ali@example.com%3e>`,
-		},
-		{
-			"urlPath",
-			`<a href='http://{{"javascript:80"}}/foo'>`,
-			`<a href='http://javascript:80/foo'>`,
-		},
-		{
-			"urlQuery",
-			`<a href='/search?q={{.H}}'>`,
-			`<a href='/search?q=%3cHello%3e'>`,
-		},
-		{
-			"urlFragment",
-			`<a href='/faq#{{.H}}'>`,
-			`<a href='/faq#%3cHello%3e'>`,
-		},
-		{
-			"urlBranch",
-			`<a href="{{if .F}}/foo?a=b{{else}}/bar{{end}}">`,
-			`<a href="/bar">`,
-		},
-		{
-			"urlBranchConflictMoot",
-			`<a href="{{if .T}}/foo?a={{else}}/bar#{{end}}{{.C}}">`,
-			`<a href="/foo?a=%3cCincinnati%3e">`,
-		},
-		{
-			"jsStrValue",
-			"<button onclick='alert({{.H}})'>",
-			`<button onclick='alert(&#34;\u003cHello\u003e&#34;)'>`,
-		},
-		{
-			"jsNumericValue",
-			"<button onclick='alert({{.N}})'>",
-			`<button onclick='alert( 42 )'>`,
-		},
-		{
-			"jsBoolValue",
-			"<button onclick='alert({{.T}})'>",
-			`<button onclick='alert( true )'>`,
-		},
-		{
-			"jsNilValueTyped",
-			"<button onclick='alert(typeof{{.Z}})'>",
-			`<button onclick='alert(typeof null )'>`,
-		},
-		{
-			"jsNilValueUntyped",
-			"<button onclick='alert(typeof{{.U}})'>",
-			`<button onclick='alert(typeof null )'>`,
-		},
-		{
-			"jsObjValue",
-			"<button onclick='alert({{.A}})'>",
-			`<button onclick='alert([&#34;\u003ca\u003e&#34;,&#34;\u003cb\u003e&#34;])'>`,
-		},
-		{
-			"jsObjValueScript",
-			"<script>alert({{.A}})</script>",
-			`<script>alert(["\u003ca\u003e","\u003cb\u003e"])</script>`,
-		},
-		{
-			"jsObjValueNotOverEscaped",
-			"<button onclick='alert({{.A | html}})'>",
-			`<button onclick='alert([&#34;\u003ca\u003e&#34;,&#34;\u003cb\u003e&#34;])'>`,
-		},
-		{
-			"jsStr",
-			"<button onclick='alert(&quot;{{.H}}&quot;)'>",
-			`<button onclick='alert(&quot;\u003cHello\u003e&quot;)'>`,
-		},
-		{
-			"badMarshaler",
-			`<button onclick='alert(1/{{.B}}in numbers)'>`,
-			`<button onclick='alert(1/ /* json: error calling MarshalJSON for type *template.badMarshaler: invalid character &#39;f&#39; looking for beginning of object key string */null in numbers)'>`,
-		},
-		{
-			"jsMarshaler",
-			`<button onclick='alert({{.M}})'>`,
-			`<button onclick='alert({&#34;\u003cfoo\u003e&#34;:&#34;O&#39;Reilly&#34;})'>`,
-		},
-		{
-			"jsStrNotUnderEscaped",
-			"<button onclick='alert({{.C | urlquery}})'>",
-			// URL escaped, then quoted for JS.
-			`<button onclick='alert(&#34;%3CCincinnati%3E&#34;)'>`,
-		},
-		{
-			"jsRe",
-			`<button onclick='alert(/{{"foo+bar"}}/.test(""))'>`,
-			`<button onclick='alert(/foo\u002bbar/.test(""))'>`,
-		},
-		{
-			"jsReBlank",
-			`<script>alert(/{{""}}/.test(""));</script>`,
-			`<script>alert(/(?:)/.test(""));</script>`,
-		},
-		{
-			"jsReAmbigOk",
-			`<script>{{if true}}var x = 1{{end}}</script>`,
-			// The {if} ends in an ambiguous jsCtx but there is
-			// no slash following so we shouldn't care.
-			`<script>var x = 1</script>`,
-		},
-		{
-			"styleBidiKeywordPassed",
-			`<p style="dir: {{"ltr"}}">`,
-			`<p style="dir: ltr">`,
-		},
-		{
-			"styleBidiPropNamePassed",
-			`<p style="border-{{"left"}}: 0; border-{{"right"}}: 1in">`,
-			`<p style="border-left: 0; border-right: 1in">`,
-		},
-		{
-			"styleExpressionBlocked",
-			`<p style="width: {{"expression(alert(1337))"}}">`,
-			`<p style="width: ZgotmplZ">`,
-		},
-		{
-			"styleTagSelectorPassed",
-			`<style>{{"p"}} { color: pink }</style>`,
-			`<style>p { color: pink }</style>`,
-		},
-		{
-			"styleIDPassed",
-			`<style>p{{"#my-ID"}} { font: Arial }</style>`,
-			`<style>p#my-ID { font: Arial }</style>`,
-		},
-		{
-			"styleClassPassed",
-			`<style>p{{".my_class"}} { font: Arial }</style>`,
-			`<style>p.my_class { font: Arial }</style>`,
-		},
-		{
-			"styleQuantityPassed",
-			`<a style="left: {{"2em"}}; top: {{0}}">`,
-			`<a style="left: 2em; top: 0">`,
-		},
-		{
-			"stylePctPassed",
-			`<table style=width:{{"100%"}}>`,
-			`<table style=width:100%>`,
-		},
-		{
-			"styleColorPassed",
-			`<p style="color: {{"#8ff"}}; background: {{"#000"}}">`,
-			`<p style="color: #8ff; background: #000">`,
-		},
-		{
-			"styleObfuscatedExpressionBlocked",
-			`<p style="width: {{"  e\\78preS\x00Sio/**/n(alert(1337))"}}">`,
-			`<p style="width: ZgotmplZ">`,
-		},
-		{
-			"styleMozBindingBlocked",
-			`<p style="{{"-moz-binding(alert(1337))"}}: ...">`,
-			`<p style="ZgotmplZ: ...">`,
-		},
-		{
-			"styleObfuscatedMozBindingBlocked",
-			`<p style="{{"  -mo\\7a-B\x00I/**/nding(alert(1337))"}}: ...">`,
-			`<p style="ZgotmplZ: ...">`,
-		},
-		{
-			"styleFontNameString",
-			`<p style='font-family: "{{"Times New Roman"}}"'>`,
-			`<p style='font-family: "Times New Roman"'>`,
-		},
-		{
-			"styleFontNameString",
-			`<p style='font-family: "{{"Times New Roman"}}", "{{"sans-serif"}}"'>`,
-			`<p style='font-family: "Times New Roman", "sans-serif"'>`,
-		},
-		{
-			"styleFontNameUnquoted",
-			`<p style='font-family: {{"Times New Roman"}}'>`,
-			`<p style='font-family: Times New Roman'>`,
-		},
-		{
-			"styleURLQueryEncoded",
-			`<p style="background: url(/img?name={{"O'Reilly Animal(1)<2>.png"}})">`,
-			`<p style="background: url(/img?name=O%27Reilly%20Animal%281%29%3c2%3e.png)">`,
-		},
-		{
-			"styleQuotedURLQueryEncoded",
-			`<p style="background: url('/img?name={{"O'Reilly Animal(1)<2>.png"}}')">`,
-			`<p style="background: url('/img?name=O%27Reilly%20Animal%281%29%3c2%3e.png')">`,
-		},
-		{
-			"styleStrQueryEncoded",
-			`<p style="background: '/img?name={{"O'Reilly Animal(1)<2>.png"}}'">`,
-			`<p style="background: '/img?name=O%27Reilly%20Animal%281%29%3c2%3e.png'">`,
-		},
-		{
-			"styleURLBadProtocolBlocked",
-			`<a style="background: url('{{"javascript:alert(1337)"}}')">`,
-			`<a style="background: url('#ZgotmplZ')">`,
-		},
-		{
-			"styleStrBadProtocolBlocked",
-			`<a style="background: '{{"vbscript:alert(1337)"}}'">`,
-			`<a style="background: '#ZgotmplZ'">`,
-		},
-		{
-			"styleStrEncodedProtocolEncoded",
-			`<a style="background: '{{"javascript\\3a alert(1337)"}}'">`,
-			// The CSS string 'javascript\\3a alert(1337)' does not contain a colon.
-			`<a style="background: 'javascript\\3a alert\28 1337\29 '">`,
-		},
-		{
-			"styleURLGoodProtocolPassed",
-			`<a style="background: url('{{"http://oreilly.com/O'Reilly Animals(1)<2>;{}.html"}}')">`,
-			`<a style="background: url('http://oreilly.com/O%27Reilly%20Animals%281%29%3c2%3e;%7b%7d.html')">`,
-		},
-		{
-			"styleStrGoodProtocolPassed",
-			`<a style="background: '{{"http://oreilly.com/O'Reilly Animals(1)<2>;{}.html"}}'">`,
-			`<a style="background: 'http\3a\2f\2foreilly.com\2fO\27Reilly Animals\28 1\29\3c 2\3e\3b\7b\7d.html'">`,
-		},
-		{
-			"styleURLEncodedForHTMLInAttr",
-			`<a style="background: url('{{"/search?img=foo&size=icon"}}')">`,
-			`<a style="background: url('/search?img=foo&amp;size=icon')">`,
-		},
-		{
-			"styleURLNotEncodedForHTMLInCdata",
-			`<style>body { background: url('{{"/search?img=foo&size=icon"}}') }</style>`,
-			`<style>body { background: url('/search?img=foo&size=icon') }</style>`,
-		},
-		{
-			"styleURLMixedCase",
-			`<p style="background: URL(#{{.H}})">`,
-			`<p style="background: URL(#%3cHello%3e)">`,
-		},
-		{
-			"stylePropertyPairPassed",
-			`<a style='{{"color: red"}}'>`,
-			`<a style='color: red'>`,
-		},
-		{
-			"styleStrSpecialsEncoded",
-			`<a style="font-family: '{{"/**/'\";:// \\"}}', &quot;{{"/**/'\";:// \\"}}&quot;">`,
-			`<a style="font-family: '\2f**\2f\27\22\3b\3a\2f\2f  \\', &quot;\2f**\2f\27\22\3b\3a\2f\2f  \\&quot;">`,
-		},
-		{
-			"styleURLSpecialsEncoded",
-			`<a style="border-image: url({{"/**/'\";:// \\"}}), url(&quot;{{"/**/'\";:// \\"}}&quot;), url('{{"/**/'\";:// \\"}}'), 'http://www.example.com/?q={{"/**/'\";:// \\"}}''">`,
-			`<a style="border-image: url(/**/%27%22;://%20%5c), url(&quot;/**/%27%22;://%20%5c&quot;), url('/**/%27%22;://%20%5c'), 'http://www.example.com/?q=%2f%2a%2a%2f%27%22%3b%3a%2f%2f%20%5c''">`,
-		},
-		{
-			"HTML comment",
-			"<b>Hello, <!-- name of world -->{{.C}}</b>",
-			"<b>Hello, &lt;Cincinnati&gt;</b>",
-		},
-		{
-			"HTML comment not first < in text node.",
-			"<<!-- -->!--",
-			"&lt;!--",
-		},
-		{
-			"HTML normalization 1",
-			"a < b",
-			"a &lt; b",
-		},
-		{
-			"HTML normalization 2",
-			"a << b",
-			"a &lt;&lt; b",
-		},
-		{
-			"HTML normalization 3",
-			"a<<!-- --><!-- -->b",
-			"a&lt;b",
-		},
-		{
-			"HTML doctype not normalized",
-			"<!DOCTYPE html>Hello, World!",
-			"<!DOCTYPE html>Hello, World!",
-		},
-		{
-			"HTML doctype not case-insensitive",
-			"<!doCtYPE htMl>Hello, World!",
-			"<!doCtYPE htMl>Hello, World!",
-		},
-		{
-			"No doctype injection",
-			`<!{{"DOCTYPE"}}`,
-			"&lt;!DOCTYPE",
-		},
-		{
-			"Split HTML comment",
-			"<b>Hello, <!-- name of {{if .T}}city -->{{.C}}{{else}}world -->{{.W}}{{end}}</b>",
-			"<b>Hello, &lt;Cincinnati&gt;</b>",
-		},
-		{
-			"JS line comment",
-			"<script>for (;;) { if (c()) break// foo not a label\n" +
-				"foo({{.T}});}</script>",
-			"<script>for (;;) { if (c()) break\n" +
-				"foo( true );}</script>",
-		},
-		{
-			"JS multiline block comment",
-			"<script>for (;;) { if (c()) break/* foo not a label\n" +
-				" */foo({{.T}});}</script>",
-			// Newline separates break from call. If newline
-			// removed, then break will consume label leaving
-			// code invalid.
-			"<script>for (;;) { if (c()) break\n" +
-				"foo( true );}</script>",
-		},
-		{
-			"JS single-line block comment",
-			"<script>for (;;) {\n" +
-				"if (c()) break/* foo a label */foo;" +
-				"x({{.T}});}</script>",
-			// Newline separates break from call. If newline
-			// removed, then break will consume label leaving
-			// code invalid.
-			"<script>for (;;) {\n" +
-				"if (c()) break foo;" +
-				"x( true );}</script>",
-		},
-		{
-			"JS block comment flush with mathematical division",
-			"<script>var a/*b*//c\nd</script>",
-			"<script>var a /c\nd</script>",
-		},
-		{
-			"JS mixed comments",
-			"<script>var a/*b*///c\nd</script>",
-			"<script>var a \nd</script>",
-		},
-		{
-			"CSS comments",
-			"<style>p// paragraph\n" +
-				`{border: 1px/* color */{{"#00f"}}}</style>`,
-			"<style>p\n" +
-				"{border: 1px #00f}</style>",
-		},
-		{
-			"JS attr block comment",
-			`<a onclick="f(&quot;&quot;); /* alert({{.H}}) */">`,
-			// Attribute comment tests should pass if the comments
-			// are successfully elided.
-			`<a onclick="f(&quot;&quot;); /* alert() */">`,
-		},
-		{
-			"JS attr line comment",
-			`<a onclick="// alert({{.G}})">`,
-			`<a onclick="// alert()">`,
-		},
-		{
-			"CSS attr block comment",
-			`<a style="/* color: {{.H}} */">`,
-			`<a style="/* color:  */">`,
-		},
-		{
-			"CSS attr line comment",
-			`<a style="// color: {{.G}}">`,
-			`<a style="// color: ">`,
-		},
-		{
-			"HTML substitution commented out",
-			"<p><!-- {{.H}} --></p>",
-			"<p></p>",
-		},
-		{
-			"Comment ends flush with start",
-			"<!--{{.}}--><script>/*{{.}}*///{{.}}\n</script><style>/*{{.}}*///{{.}}\n</style><a onclick='/*{{.}}*///{{.}}' style='/*{{.}}*///{{.}}'>",
-			"<script> \n</script><style> \n</style><a onclick='/**///' style='/**///'>",
-		},
-		{
-			"typed HTML in text",
-			`{{.W}}`,
-			`&iexcl;<b class="foo">Hello</b>, <textarea>O'World</textarea>!`,
-		},
-		{
-			"typed HTML in attribute",
-			`<div title="{{.W}}">`,
-			`<div title="&iexcl;Hello, O&#39;World!">`,
-		},
-		{
-			"typed HTML in script",
-			`<button onclick="alert({{.W}})">`,
-			`<button onclick="alert(&#34;\u0026iexcl;\u003cb class=\&#34;foo\&#34;\u003eHello\u003c/b\u003e, \u003ctextarea\u003eO&#39;World\u003c/textarea\u003e!&#34;)">`,
-		},
-		{
-			"typed HTML in RCDATA",
-			`<textarea>{{.W}}</textarea>`,
-			`<textarea>&iexcl;&lt;b class=&#34;foo&#34;&gt;Hello&lt;/b&gt;, &lt;textarea&gt;O&#39;World&lt;/textarea&gt;!</textarea>`,
-		},
-		{
-			"range in textarea",
-			"<textarea>{{range .A}}{{.}}{{end}}</textarea>",
-			"<textarea>&lt;a&gt;&lt;b&gt;</textarea>",
-		},
-		{
-			"No tag injection",
-			`{{"10$"}}<{{"script src,evil.org/pwnd.js"}}...`,
-			`10$&lt;script src,evil.org/pwnd.js...`,
-		},
-		{
-			"No comment injection",
-			`<{{"!--"}}`,
-			`&lt;!--`,
-		},
-		{
-			"No RCDATA end tag injection",
-			`<textarea><{{"/textarea "}}...</textarea>`,
-			`<textarea>&lt;/textarea ...</textarea>`,
-		},
-		{
-			"optional attrs",
-			`<img class="{{"iconClass"}}"` +
-				`{{if .T}} id="{{"<iconId>"}}"{{end}}` +
-				// Double quotes inside if/else.
-				` src=` +
-				`{{if .T}}"?{{"<iconPath>"}}"` +
-				`{{else}}"images/cleardot.gif"{{end}}` +
-				// Missing space before title, but it is not a
-				// part of the src attribute.
-				`{{if .T}}title="{{"<title>"}}"{{end}}` +
-				// Quotes outside if/else.
-				` alt="` +
-				`{{if .T}}{{"<alt>"}}` +
-				`{{else}}{{if .F}}{{"<title>"}}{{end}}` +
-				`{{end}}"` +
-				`>`,
-			`<img class="iconClass" id="&lt;iconId&gt;" src="?%3ciconPath%3e"title="&lt;title&gt;" alt="&lt;alt&gt;">`,
-		},
-		{
-			"conditional valueless attr name",
-			`<input{{if .T}} checked{{end}} name=n>`,
-			`<input checked name=n>`,
-		},
-		{
-			"conditional dynamic valueless attr name 1",
-			`<input{{if .T}} {{"checked"}}{{end}} name=n>`,
-			`<input checked name=n>`,
-		},
-		{
-			"conditional dynamic valueless attr name 2",
-			`<input {{if .T}}{{"checked"}} {{end}}name=n>`,
-			`<input checked name=n>`,
-		},
-		{
-			"dynamic attribute name",
-			`<img on{{"load"}}="alert({{"loaded"}})">`,
-			// Treated as JS since quotes are inserted.
-			`<img onload="alert(&#34;loaded&#34;)">`,
-		},
-		{
-			"bad dynamic attribute name 1",
-			// Allow checked, selected, disabled, but not JS or
-			// CSS attributes.
-			`<input {{"onchange"}}="{{"doEvil()"}}">`,
-			`<input ZgotmplZ="doEvil()">`,
-		},
-		{
-			"bad dynamic attribute name 2",
-			`<div {{"sTyle"}}="{{"color: expression(alert(1337))"}}">`,
-			`<div ZgotmplZ="color: expression(alert(1337))">`,
-		},
-		{
-			"bad dynamic attribute name 3",
-			// Allow title or alt, but not a URL.
-			`<img {{"src"}}="{{"javascript:doEvil()"}}">`,
-			`<img ZgotmplZ="javascript:doEvil()">`,
-		},
-		{
-			"bad dynamic attribute name 4",
-			// Structure preservation requires values to associate
-			// with a consistent attribute.
-			`<input checked {{""}}="Whose value am I?">`,
-			`<input checked ZgotmplZ="Whose value am I?">`,
-		},
-		{
-			"dynamic element name",
-			`<h{{3}}><table><t{{"head"}}>...</h{{3}}>`,
-			`<h3><table><thead>...</h3>`,
-		},
-		{
-			"bad dynamic element name",
-			// Dynamic element names are typically used to switch
-			// between (thead, tfoot, tbody), (ul, ol), (th, td),
-			// and other replaceable sets.
-			// We do not currently easily support (ul, ol).
-			// If we do change to support that, this test should
-			// catch failures to filter out special tag names which
-			// would violate the structure preservation property --
-			// if any special tag name could be substituted, then
-			// the content could be raw text/RCDATA for some inputs
-			// and regular HTML content for others.
-			`<{{"script"}}>{{"doEvil()"}}</{{"script"}}>`,
-			`&lt;script>doEvil()&lt;/script>`,
-		},
-		{
-			"srcset bad URL in second position",
-			`<img srcset="{{"/not-an-image#,javascript:alert(1)"}}">`,
-			// The second URL is also filtered.
-			`<img srcset="/not-an-image#,#ZgotmplZ">`,
-		},
-		{
-			"srcset buffer growth",
-			`<img srcset={{",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,"}}>`,
-			`<img srcset=,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,>`,
-		},
-	}
-
-	for _, test := range tests {
-		tmpl := New(test.name)
-		tmpl = Must(tmpl.Parse(test.input))
-		// Check for bug 6459: Tree field was not set in Parse.
-		if tmpl.Tree != tmpl.text.Tree {
-			t.Errorf("%s: tree not set properly", test.name)
-			continue
-		}
-		b := new(bytes.Buffer)
-		if err := tmpl.Execute(b, data); err != nil {
-			t.Errorf("%s: template execution failed: %s", test.name, err)
-			continue
-		}
-		if w, g := test.output, b.String(); w != g {
-			t.Errorf("%s: escaped output: want\n\t%q\ngot\n\t%q", test.name, w, g)
-			continue
-		}
-		b.Reset()
-		if err := tmpl.Execute(b, pdata); err != nil {
-			t.Errorf("%s: template execution failed for pointer: %s", test.name, err)
-			continue
-		}
-		if w, g := test.output, b.String(); w != g {
-			t.Errorf("%s: escaped output for pointer: want\n\t%q\ngot\n\t%q", test.name, w, g)
-			continue
-		}
-		if tmpl.Tree != tmpl.text.Tree {
-			t.Errorf("%s: tree mismatch", test.name)
-			continue
-		}
-	}
-}
-
-func TestEscapeMap(t *testing.T) {
-	data := map[string]string{
-		"html":     `<h1>Hi!</h1>`,
-		"urlquery": `http://www.foo.com/index.html?title=main`,
-	}
-	for _, test := range [...]struct {
-		desc, input, output string
-	}{
-		// covering issue 20323
-		{
-			"field with predefined escaper name 1",
-			`{{.html | print}}`,
-			`&lt;h1&gt;Hi!&lt;/h1&gt;`,
-		},
-		// covering issue 20323
-		{
-			"field with predefined escaper name 2",
-			`{{.urlquery | print}}`,
-			`http://www.foo.com/index.html?title=main`,
-		},
-	} {
-		tmpl := Must(New("").Parse(test.input))
-		b := new(bytes.Buffer)
-		if err := tmpl.Execute(b, data); err != nil {
-			t.Errorf("%s: template execution failed: %s", test.desc, err)
-			continue
-		}
-		if w, g := test.output, b.String(); w != g {
-			t.Errorf("%s: escaped output: want\n\t%q\ngot\n\t%q", test.desc, w, g)
-			continue
-		}
-	}
-}
-
-func TestEscapeSet(t *testing.T) {
-	type dataItem struct {
-		Children []*dataItem
-		X        string
-	}
-
-	data := dataItem{
-		Children: []*dataItem{
-			{X: "foo"},
-			{X: "<bar>"},
-			{
-				Children: []*dataItem{
-					{X: "baz"},
-				},
-			},
-		},
-	}
-
-	tests := []struct {
-		inputs map[string]string
-		want   string
-	}{
-		// The trivial set.
-		{
-			map[string]string{
-				"main": ``,
-			},
-			``,
-		},
-		// A template called in the start context.
-		{
-			map[string]string{
-				"main": `Hello, {{template "helper"}}!`,
-				// Not a valid top level HTML template.
-				// "<b" is not a full tag.
-				"helper": `{{"<World>"}}`,
-			},
-			`Hello, &lt;World&gt;!`,
-		},
-		// A template called in a context other than the start.
-		{
-			map[string]string{
-				"main": `<a onclick='a = {{template "helper"}};'>`,
-				// Not a valid top level HTML template.
-				// "<b" is not a full tag.
-				"helper": `{{"<a>"}}<b`,
-			},
-			`<a onclick='a = &#34;\u003ca\u003e&#34;<b;'>`,
-		},
-		// A recursive template that ends in its start context.
-		{
-			map[string]string{
-				"main": `{{range .Children}}{{template "main" .}}{{else}}{{.X}} {{end}}`,
-			},
-			`foo &lt;bar&gt; baz `,
-		},
-		// A recursive helper template that ends in its start context.
-		{
-			map[string]string{
-				"main":   `{{template "helper" .}}`,
-				"helper": `{{if .Children}}<ul>{{range .Children}}<li>{{template "main" .}}</li>{{end}}</ul>{{else}}{{.X}}{{end}}`,
-			},
-			`<ul><li>foo</li><li>&lt;bar&gt;</li><li><ul><li>baz</li></ul></li></ul>`,
-		},
-		// Co-recursive templates that end in its start context.
-		{
-			map[string]string{
-				"main":   `<blockquote>{{range .Children}}{{template "helper" .}}{{end}}</blockquote>`,
-				"helper": `{{if .Children}}{{template "main" .}}{{else}}{{.X}}<br>{{end}}`,
-			},
-			`<blockquote>foo<br>&lt;bar&gt;<br><blockquote>baz<br></blockquote></blockquote>`,
-		},
-		// A template that is called in two different contexts.
-		{
-			map[string]string{
-				"main":   `<button onclick="title='{{template "helper"}}'; ...">{{template "helper"}}</button>`,
-				"helper": `{{11}} of {{"<100>"}}`,
-			},
-			`<button onclick="title='11 of \u003c100\u003e'; ...">11 of &lt;100&gt;</button>`,
-		},
-		// A non-recursive template that ends in a different context.
-		// helper starts in jsCtxRegexp and ends in jsCtxDivOp.
-		{
-			map[string]string{
-				"main":   `<script>var x={{template "helper"}}/{{"42"}};</script>`,
-				"helper": "{{126}}",
-			},
-			`<script>var x= 126 /"42";</script>`,
-		},
-		// A recursive template that ends in a similar context.
-		{
-			map[string]string{
-				"main":      `<script>var x=[{{template "countdown" 4}}];</script>`,
-				"countdown": `{{.}}{{if .}},{{template "countdown" . | pred}}{{end}}`,
-			},
-			`<script>var x=[ 4 , 3 , 2 , 1 , 0 ];</script>`,
-		},
-		// A recursive template that ends in a different context.
-		/*
-			{
-				map[string]string{
-					"main":   `<a href="/foo{{template "helper" .}}">`,
-					"helper": `{{if .Children}}{{range .Children}}{{template "helper" .}}{{end}}{{else}}?x={{.X}}{{end}}`,
-				},
-				`<a href="/foo?x=foo?x=%3cbar%3e?x=baz">`,
-			},
-		*/
-	}
-
-	// pred is a template function that returns the predecessor of a
-	// natural number for testing recursive templates.
-	fns := FuncMap{"pred": func(a ...interface{}) (interface{}, error) {
-		if len(a) == 1 {
-			if i, _ := a[0].(int); i > 0 {
-				return i - 1, nil
-			}
-		}
-		return nil, fmt.Errorf("undefined pred(%v)", a)
-	}}
-
-	for _, test := range tests {
-		source := ""
-		for name, body := range test.inputs {
-			source += fmt.Sprintf("{{define %q}}%s{{end}} ", name, body)
-		}
-		tmpl, err := New("root").Funcs(fns).Parse(source)
-		if err != nil {
-			t.Errorf("error parsing %q: %v", source, err)
-			continue
-		}
-		var b bytes.Buffer
-
-		if err := tmpl.ExecuteTemplate(&b, "main", data); err != nil {
-			t.Errorf("%q executing %v", err.Error(), tmpl.Lookup("main"))
-			continue
-		}
-		if got := b.String(); test.want != got {
-			t.Errorf("want\n\t%q\ngot\n\t%q", test.want, got)
-		}
-	}
-
-}
-
-func TestErrors(t *testing.T) {
-	tests := []struct {
-		input string
-		err   string
-	}{
-		// Non-error cases.
-		{
-			"{{if .Cond}}<a>{{else}}<b>{{end}}",
-			"",
-		},
-		{
-			"{{if .Cond}}<a>{{end}}",
-			"",
-		},
-		{
-			"{{if .Cond}}{{else}}<b>{{end}}",
-			"",
-		},
-		{
-			"{{with .Cond}}<div>{{end}}",
-			"",
-		},
-		{
-			"{{range .Items}}<a>{{end}}",
-			"",
-		},
-		{
-			"<a href='/foo?{{range .Items}}&{{.K}}={{.V}}{{end}}'>",
-			"",
-		},
-		{
-			"{{range .Items}}<a{{if .X}}{{end}}>{{end}}",
-			"",
-		},
-		{
-			"{{range .Items}}<a{{if .X}}{{end}}>{{continue}}{{end}}",
-			"",
-		},
-		{
-			"{{range .Items}}<a{{if .X}}{{end}}>{{break}}{{end}}",
-			"",
-		},
-		{
-			"{{range .Items}}<a{{if .X}}{{end}}>{{if .X}}{{break}}{{end}}{{end}}",
-			"",
-		},
-		// Error cases.
-		{
-			"{{if .Cond}}<a{{end}}",
-			"z:1:5: {{if}} branches",
-		},
-		{
-			"{{if .Cond}}\n{{else}}\n<a{{end}}",
-			"z:1:5: {{if}} branches",
-		},
-		{
-			// Missing quote in the else branch.
-			`{{if .Cond}}<a href="foo">{{else}}<a href="bar>{{end}}`,
-			"z:1:5: {{if}} branches",
-		},
-		{
-			// Different kind of attribute: href implies a URL.
-			"<a {{if .Cond}}href='{{else}}title='{{end}}{{.X}}'>",
-			"z:1:8: {{if}} branches",
-		},
-		{
-			"\n{{with .X}}<a{{end}}",
-			"z:2:7: {{with}} branches",
-		},
-		{
-			"\n{{with .X}}<a>{{else}}<a{{end}}",
-			"z:2:7: {{with}} branches",
-		},
-		{
-			"{{range .Items}}<a{{end}}",
-			`z:1: on range loop re-entry: "<" in attribute name: "<a"`,
-		},
-		{
-			"\n{{range .Items}} x='<a{{end}}",
-			"z:2:8: on range loop re-entry: {{range}} branches",
-		},
-		{
-			"{{range .Items}}<a{{if .X}}{{break}}{{end}}>{{end}}",
-			"z:1:29: at range loop break: {{range}} branches end in different contexts",
-		},
-		{
-			"{{range .Items}}<a{{if .X}}{{continue}}{{end}}>{{end}}",
-			"z:1:29: at range loop continue: {{range}} branches end in different contexts",
-		},
-		{
-			"<a b=1 c={{.H}}",
-			"z: ends in a non-text context: {stateAttr delimSpaceOrTagEnd",
-		},
-		{
-			"<script>foo();",
-			"z: ends in a non-text context: {stateJS",
-		},
-		{
-			`<a href="{{if .F}}/foo?a={{else}}/bar/{{end}}{{.H}}">`,
-			"z:1:47: {{.H}} appears in an ambiguous context within a URL",
-		},
-		{
-			`<a onclick="alert('Hello \`,
-			`unfinished escape sequence in JS string: "Hello \\"`,
-		},
-		{
-			`<a onclick='alert("Hello\, World\`,
-			`unfinished escape sequence in JS string: "Hello\\, World\\"`,
-		},
-		{
-			`<a onclick='alert(/x+\`,
-			`unfinished escape sequence in JS string: "x+\\"`,
-		},
-		{
-			`<a onclick="/foo[\]/`,
-			`unfinished JS regexp charset: "foo[\\]/"`,
-		},
-		{
-			// It is ambiguous whether 1.5 should be 1\.5 or 1.5.
-			// Either `var x = 1/- 1.5 /i.test(x)`
-			// where `i.test(x)` is a method call of reference i,
-			// or `/-1\.5/i.test(x)` which is a method call on a
-			// case insensitive regular expression.
-			`<script>{{if false}}var x = 1{{end}}/-{{"1.5"}}/i.test(x)</script>`,
-			`'/' could start a division or regexp: "/-"`,
-		},
-		{
-			`{{template "foo"}}`,
-			"z:1:11: no such template \"foo\"",
-		},
-		{
-			`<div{{template "y"}}>` +
-				// Illegal starting in stateTag but not in stateText.
-				`{{define "y"}} foo<b{{end}}`,
-			`"<" in attribute name: " foo<b"`,
-		},
-		{
-			`<script>reverseList = [{{template "t"}}]</script>` +
-				// Missing " after recursive call.
-				`{{define "t"}}{{if .Tail}}{{template "t" .Tail}}{{end}}{{.Head}}",{{end}}`,
-			`: cannot compute output context for template t$htmltemplate_stateJS_elementScript`,
-		},
-		{
-			`<input type=button value=onclick=>`,
-			`html/template:z: "=" in unquoted attr: "onclick="`,
-		},
-		{
-			`<input type=button value= onclick=>`,
-			`html/template:z: "=" in unquoted attr: "onclick="`,
-		},
-		{
-			`<input type=button value= 1+1=2>`,
-			`html/template:z: "=" in unquoted attr: "1+1=2"`,
-		},
-		{
-			"<a class=`foo>",
-			"html/template:z: \"`\" in unquoted attr: \"`foo\"",
-		},
-		{
-			`<a style=font:'Arial'>`,
-			`html/template:z: "'" in unquoted attr: "font:'Arial'"`,
-		},
-		{
-			`<a=foo>`,
-			`: expected space, attr name, or end of tag, but got "=foo>"`,
-		},
-		{
-			`Hello, {{. | urlquery | print}}!`,
-			// urlquery is disallowed if it is not the last command in the pipeline.
-			`predefined escaper "urlquery" disallowed in template`,
-		},
-		{
-			`Hello, {{. | html | print}}!`,
-			// html is disallowed if it is not the last command in the pipeline.
-			`predefined escaper "html" disallowed in template`,
-		},
-		{
-			`Hello, {{html . | print}}!`,
-			// A direct call to html is disallowed if it is not the last command in the pipeline.
-			`predefined escaper "html" disallowed in template`,
-		},
-		{
-			`<div class={{. | html}}>Hello<div>`,
-			// html is disallowed in a pipeline that is in an unquoted attribute context,
-			// even if it is the last command in the pipeline.
-			`predefined escaper "html" disallowed in template`,
-		},
-		{
-			`Hello, {{. | urlquery | html}}!`,
-			// html is allowed since it is the last command in the pipeline, but urlquery is not.
-			`predefined escaper "urlquery" disallowed in template`,
-		},
-	}
-	for _, test := range tests {
-		buf := new(bytes.Buffer)
-		tmpl, err := New("z").Parse(test.input)
-		if err != nil {
-			t.Errorf("input=%q: unexpected parse error %s\n", test.input, err)
-			continue
-		}
-		err = tmpl.Execute(buf, nil)
-		var got string
-		if err != nil {
-			got = err.Error()
-		}
-		if test.err == "" {
-			if got != "" {
-				t.Errorf("input=%q: unexpected error %q", test.input, got)
-			}
-			continue
-		}
-		if !strings.Contains(got, test.err) {
-			t.Errorf("input=%q: error\n\t%q\ndoes not contain expected string\n\t%q", test.input, got, test.err)
-			continue
-		}
-		// Check that we get the same error if we call Execute again.
-		if err := tmpl.Execute(buf, nil); err == nil || err.Error() != got {
-			t.Errorf("input=%q: unexpected error on second call %q", test.input, err)
-
-		}
-	}
-}
-
-func TestEscapeText(t *testing.T) {
-	tests := []struct {
-		input  string
-		output context
-	}{
-		{
-			``,
-			context{},
-		},
-		{
-			`Hello, World!`,
-			context{},
-		},
-		{
-			// An orphaned "<" is OK.
-			`I <3 Ponies!`,
-			context{},
-		},
-		{
-			`<a`,
-			context{state: stateTag},
-		},
-		{
-			`<a `,
-			context{state: stateTag},
-		},
-		{
-			`<a>`,
-			context{state: stateText},
-		},
-		{
-			`<a href`,
-			context{state: stateAttrName, attr: attrURL},
-		},
-		{
-			`<a on`,
-			context{state: stateAttrName, attr: attrScript},
-		},
-		{
-			`<a href `,
-			context{state: stateAfterName, attr: attrURL},
-		},
-		{
-			`<a style  =  `,
-			context{state: stateBeforeValue, attr: attrStyle},
-		},
-		{
-			`<a href=`,
-			context{state: stateBeforeValue, attr: attrURL},
-		},
-		{
-			`<a href=x`,
-			context{state: stateURL, delim: delimSpaceOrTagEnd, urlPart: urlPartPreQuery, attr: attrURL},
-		},
-		{
-			`<a href=x `,
-			context{state: stateTag},
-		},
-		{
-			`<a href=>`,
-			context{state: stateText},
-		},
-		{
-			`<a href=x>`,
-			context{state: stateText},
-		},
-		{
-			`<a href ='`,
-			context{state: stateURL, delim: delimSingleQuote, attr: attrURL},
-		},
-		{
-			`<a href=''`,
-			context{state: stateTag},
-		},
-		{
-			`<a href= "`,
-			context{state: stateURL, delim: delimDoubleQuote, attr: attrURL},
-		},
-		{
-			`<a href=""`,
-			context{state: stateTag},
-		},
-		{
-			`<a title="`,
-			context{state: stateAttr, delim: delimDoubleQuote},
-		},
-		{
-			`<a HREF='http:`,
-			context{state: stateURL, delim: delimSingleQuote, urlPart: urlPartPreQuery, attr: attrURL},
-		},
-		{
-			`<a Href='/`,
-			context{state: stateURL, delim: delimSingleQuote, urlPart: urlPartPreQuery, attr: attrURL},
-		},
-		{
-			`<a href='"`,
-			context{state: stateURL, delim: delimSingleQuote, urlPart: urlPartPreQuery, attr: attrURL},
-		},
-		{
-			`<a href="'`,
-			context{state: stateURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery, attr: attrURL},
-		},
-		{
-			`<a href='&apos;`,
-			context{state: stateURL, delim: delimSingleQuote, urlPart: urlPartPreQuery, attr: attrURL},
-		},
-		{
-			`<a href="&quot;`,
-			context{state: stateURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery, attr: attrURL},
-		},
-		{
-			`<a href="&#34;`,
-			context{state: stateURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery, attr: attrURL},
-		},
-		{
-			`<a href=&quot;`,
-			context{state: stateURL, delim: delimSpaceOrTagEnd, urlPart: urlPartPreQuery, attr: attrURL},
-		},
-		{
-			`<img alt="1">`,
-			context{state: stateText},
-		},
-		{
-			`<img alt="1>"`,
-			context{state: stateTag},
-		},
-		{
-			`<img alt="1>">`,
-			context{state: stateText},
-		},
-		{
-			`<input checked type="checkbox"`,
-			context{state: stateTag},
-		},
-		{
-			`<a onclick="`,
-			context{state: stateJS, delim: delimDoubleQuote, attr: attrScript},
-		},
-		{
-			`<a onclick="//foo`,
-			context{state: stateJSLineCmt, delim: delimDoubleQuote, attr: attrScript},
-		},
-		{
-			"<a onclick='//\n",
-			context{state: stateJS, delim: delimSingleQuote, attr: attrScript},
-		},
-		{
-			"<a onclick='//\r\n",
-			context{state: stateJS, delim: delimSingleQuote, attr: attrScript},
-		},
-		{
-			"<a onclick='//\u2028",
-			context{state: stateJS, delim: delimSingleQuote, attr: attrScript},
-		},
-		{
-			`<a onclick="/*`,
-			context{state: stateJSBlockCmt, delim: delimDoubleQuote, attr: attrScript},
-		},
-		{
-			`<a onclick="/*/`,
-			context{state: stateJSBlockCmt, delim: delimDoubleQuote, attr: attrScript},
-		},
-		{
-			`<a onclick="/**/`,
-			context{state: stateJS, delim: delimDoubleQuote, attr: attrScript},
-		},
-		{
-			`<a onkeypress="&quot;`,
-			context{state: stateJSDqStr, delim: delimDoubleQuote, attr: attrScript},
-		},
-		{
-			`<a onclick='&quot;foo&quot;`,
-			context{state: stateJS, delim: delimSingleQuote, jsCtx: jsCtxDivOp, attr: attrScript},
-		},
-		{
-			`<a onclick=&#39;foo&#39;`,
-			context{state: stateJS, delim: delimSpaceOrTagEnd, jsCtx: jsCtxDivOp, attr: attrScript},
-		},
-		{
-			`<a onclick=&#39;foo`,
-			context{state: stateJSSqStr, delim: delimSpaceOrTagEnd, attr: attrScript},
-		},
-		{
-			`<a onclick="&quot;foo'`,
-			context{state: stateJSDqStr, delim: delimDoubleQuote, attr: attrScript},
-		},
-		{
-			`<a onclick="'foo&quot;`,
-			context{state: stateJSSqStr, delim: delimDoubleQuote, attr: attrScript},
-		},
-		{
-			`<A ONCLICK="'`,
-			context{state: stateJSSqStr, delim: delimDoubleQuote, attr: attrScript},
-		},
-		{
-			`<a onclick="/`,
-			context{state: stateJSRegexp, delim: delimDoubleQuote, attr: attrScript},
-		},
-		{
-			`<a onclick="'foo'`,
-			context{state: stateJS, delim: delimDoubleQuote, jsCtx: jsCtxDivOp, attr: attrScript},
-		},
-		{
-			`<a onclick="'foo\'`,
-			context{state: stateJSSqStr, delim: delimDoubleQuote, attr: attrScript},
-		},
-		{
-			`<a onclick="'foo\'`,
-			context{state: stateJSSqStr, delim: delimDoubleQuote, attr: attrScript},
-		},
-		{
-			`<a onclick="/foo/`,
-			context{state: stateJS, delim: delimDoubleQuote, jsCtx: jsCtxDivOp, attr: attrScript},
-		},
-		{
-			`<script>/foo/ /=`,
-			context{state: stateJS, element: elementScript},
-		},
-		{
-			`<a onclick="1 /foo`,
-			context{state: stateJS, delim: delimDoubleQuote, jsCtx: jsCtxDivOp, attr: attrScript},
-		},
-		{
-			`<a onclick="1 /*c*/ /foo`,
-			context{state: stateJS, delim: delimDoubleQuote, jsCtx: jsCtxDivOp, attr: attrScript},
-		},
-		{
-			`<a onclick="/foo[/]`,
-			context{state: stateJSRegexp, delim: delimDoubleQuote, attr: attrScript},
-		},
-		{
-			`<a onclick="/foo\/`,
-			context{state: stateJSRegexp, delim: delimDoubleQuote, attr: attrScript},
-		},
-		{
-			`<a onclick="/foo/`,
-			context{state: stateJS, delim: delimDoubleQuote, jsCtx: jsCtxDivOp, attr: attrScript},
-		},
-		{
-			`<input checked style="`,
-			context{state: stateCSS, delim: delimDoubleQuote, attr: attrStyle},
-		},
-		{
-			`<a style="//`,
-			context{state: stateCSSLineCmt, delim: delimDoubleQuote, attr: attrStyle},
-		},
-		{
-			`<a style="//</script>`,
-			context{state: stateCSSLineCmt, delim: delimDoubleQuote, attr: attrStyle},
-		},
-		{
-			"<a style='//\n",
-			context{state: stateCSS, delim: delimSingleQuote, attr: attrStyle},
-		},
-		{
-			"<a style='//\r",
-			context{state: stateCSS, delim: delimSingleQuote, attr: attrStyle},
-		},
-		{
-			`<a style="/*`,
-			context{state: stateCSSBlockCmt, delim: delimDoubleQuote, attr: attrStyle},
-		},
-		{
-			`<a style="/*/`,
-			context{state: stateCSSBlockCmt, delim: delimDoubleQuote, attr: attrStyle},
-		},
-		{
-			`<a style="/**/`,
-			context{state: stateCSS, delim: delimDoubleQuote, attr: attrStyle},
-		},
-		{
-			`<a style="background: '`,
-			context{state: stateCSSSqStr, delim: delimDoubleQuote, attr: attrStyle},
-		},
-		{
-			`<a style="background: &quot;`,
-			context{state: stateCSSDqStr, delim: delimDoubleQuote, attr: attrStyle},
-		},
-		{
-			`<a style="background: '/foo?img=`,
-			context{state: stateCSSSqStr, delim: delimDoubleQuote, urlPart: urlPartQueryOrFrag, attr: attrStyle},
-		},
-		{
-			`<a style="background: '/`,
-			context{state: stateCSSSqStr, delim: delimDoubleQuote, urlPart: urlPartPreQuery, attr: attrStyle},
-		},
-		{
-			`<a style="background: url(&#x22;/`,
-			context{state: stateCSSDqURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery, attr: attrStyle},
-		},
-		{
-			`<a style="background: url('/`,
-			context{state: stateCSSSqURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery, attr: attrStyle},
-		},
-		{
-			`<a style="background: url('/)`,
-			context{state: stateCSSSqURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery, attr: attrStyle},
-		},
-		{
-			`<a style="background: url('/ `,
-			context{state: stateCSSSqURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery, attr: attrStyle},
-		},
-		{
-			`<a style="background: url(/`,
-			context{state: stateCSSURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery, attr: attrStyle},
-		},
-		{
-			`<a style="background: url( `,
-			context{state: stateCSSURL, delim: delimDoubleQuote, attr: attrStyle},
-		},
-		{
-			`<a style="background: url( /image?name=`,
-			context{state: stateCSSURL, delim: delimDoubleQuote, urlPart: urlPartQueryOrFrag, attr: attrStyle},
-		},
-		{
-			`<a style="background: url(x)`,
-			context{state: stateCSS, delim: delimDoubleQuote, attr: attrStyle},
-		},
-		{
-			`<a style="background: url('x'`,
-			context{state: stateCSS, delim: delimDoubleQuote, attr: attrStyle},
-		},
-		{
-			`<a style="background: url( x `,
-			context{state: stateCSS, delim: delimDoubleQuote, attr: attrStyle},
-		},
-		{
-			`<!-- foo`,
-			context{state: stateHTMLCmt},
-		},
-		{
-			`<!-->`,
-			context{state: stateHTMLCmt},
-		},
-		{
-			`<!--->`,
-			context{state: stateHTMLCmt},
-		},
-		{
-			`<!-- foo -->`,
-			context{state: stateText},
-		},
-		{
-			`<script`,
-			context{state: stateTag, element: elementScript},
-		},
-		{
-			`<script `,
-			context{state: stateTag, element: elementScript},
-		},
-		{
-			`<script src="foo.js" `,
-			context{state: stateTag, element: elementScript},
-		},
-		{
-			`<script src='foo.js' `,
-			context{state: stateTag, element: elementScript},
-		},
-		{
-			`<script type=text/javascript `,
-			context{state: stateTag, element: elementScript},
-		},
-		{
-			`<script>`,
-			context{state: stateJS, jsCtx: jsCtxRegexp, element: elementScript},
-		},
-		{
-			`<script>foo`,
-			context{state: stateJS, jsCtx: jsCtxDivOp, element: elementScript},
-		},
-		{
-			`<script>foo</script>`,
-			context{state: stateText},
-		},
-		{
-			`<script>foo</script><!--`,
-			context{state: stateHTMLCmt},
-		},
-		{
-			`<script>document.write("<p>foo</p>");`,
-			context{state: stateJS, element: elementScript},
-		},
-		{
-			`<script>document.write("<p>foo<\/script>");`,
-			context{state: stateJS, element: elementScript},
-		},
-		{
-			`<script>document.write("<script>alert(1)</script>");`,
-			context{state: stateText},
-		},
-		{
-			`<script type="golang.org/x/website/internal/backport/text/template">`,
-			context{state: stateText},
-		},
-		// covering issue 19968
-		{
-			`<script type="TEXT/JAVASCRIPT">`,
-			context{state: stateJS, element: elementScript},
-		},
-		// covering issue 19965
-		{
-			`<script TYPE="golang.org/x/website/internal/backport/text/template">`,
-			context{state: stateText},
-		},
-		{
-			`<script type="notjs">`,
-			context{state: stateText},
-		},
-		{
-			`<Script>`,
-			context{state: stateJS, element: elementScript},
-		},
-		{
-			`<SCRIPT>foo`,
-			context{state: stateJS, jsCtx: jsCtxDivOp, element: elementScript},
-		},
-		{
-			`<textarea>value`,
-			context{state: stateRCDATA, element: elementTextarea},
-		},
-		{
-			`<textarea>value</TEXTAREA>`,
-			context{state: stateText},
-		},
-		{
-			`<textarea name=html><b`,
-			context{state: stateRCDATA, element: elementTextarea},
-		},
-		{
-			`<title>value`,
-			context{state: stateRCDATA, element: elementTitle},
-		},
-		{
-			`<style>value`,
-			context{state: stateCSS, element: elementStyle},
-		},
-		{
-			`<a xlink:href`,
-			context{state: stateAttrName, attr: attrURL},
-		},
-		{
-			`<a xmlns`,
-			context{state: stateAttrName, attr: attrURL},
-		},
-		{
-			`<a xmlns:foo`,
-			context{state: stateAttrName, attr: attrURL},
-		},
-		{
-			`<a xmlnsxyz`,
-			context{state: stateAttrName},
-		},
-		{
-			`<a data-url`,
-			context{state: stateAttrName, attr: attrURL},
-		},
-		{
-			`<a data-iconUri`,
-			context{state: stateAttrName, attr: attrURL},
-		},
-		{
-			`<a data-urlItem`,
-			context{state: stateAttrName, attr: attrURL},
-		},
-		{
-			`<a g:`,
-			context{state: stateAttrName},
-		},
-		{
-			`<a g:url`,
-			context{state: stateAttrName, attr: attrURL},
-		},
-		{
-			`<a g:iconUri`,
-			context{state: stateAttrName, attr: attrURL},
-		},
-		{
-			`<a g:urlItem`,
-			context{state: stateAttrName, attr: attrURL},
-		},
-		{
-			`<a g:value`,
-			context{state: stateAttrName},
-		},
-		{
-			`<a svg:style='`,
-			context{state: stateCSS, delim: delimSingleQuote, attr: attrStyle},
-		},
-		{
-			`<svg:font-face`,
-			context{state: stateTag},
-		},
-		{
-			`<svg:a svg:onclick="`,
-			context{state: stateJS, delim: delimDoubleQuote, attr: attrScript},
-		},
-		{
-			`<svg:a svg:onclick="x()">`,
-			context{},
-		},
-	}
-
-	for _, test := range tests {
-		b, e := []byte(test.input), makeEscaper(nil)
-		c := e.escapeText(context{}, &parse.TextNode{NodeType: parse.NodeText, Text: b})
-		if !test.output.eq(c) {
-			t.Errorf("input %q: want context\n\t%v\ngot\n\t%v", test.input, test.output, c)
-			continue
-		}
-		if test.input != string(b) {
-			t.Errorf("input %q: text node was modified: want %q got %q", test.input, test.input, b)
-			continue
-		}
-	}
-}
-
-func TestEnsurePipelineContains(t *testing.T) {
-	tests := []struct {
-		input, output string
-		ids           []string
-	}{
-		{
-			"{{.X}}",
-			".X",
-			[]string{},
-		},
-		{
-			"{{.X | html}}",
-			".X | html",
-			[]string{},
-		},
-		{
-			"{{.X}}",
-			".X | html",
-			[]string{"html"},
-		},
-		{
-			"{{html .X}}",
-			"_eval_args_ .X | html | urlquery",
-			[]string{"html", "urlquery"},
-		},
-		{
-			"{{html .X .Y .Z}}",
-			"_eval_args_ .X .Y .Z | html | urlquery",
-			[]string{"html", "urlquery"},
-		},
-		{
-			"{{.X | print}}",
-			".X | print | urlquery",
-			[]string{"urlquery"},
-		},
-		{
-			"{{.X | print | urlquery}}",
-			".X | print | urlquery",
-			[]string{"urlquery"},
-		},
-		{
-			"{{.X | urlquery}}",
-			".X | html | urlquery",
-			[]string{"html", "urlquery"},
-		},
-		{
-			"{{.X | print 2 | .f 3}}",
-			".X | print 2 | .f 3 | urlquery | html",
-			[]string{"urlquery", "html"},
-		},
-		{
-			// covering issue 10801
-			"{{.X | println.x }}",
-			".X | println.x | urlquery | html",
-			[]string{"urlquery", "html"},
-		},
-		{
-			// covering issue 10801
-			"{{.X | (print 12 | println).x }}",
-			".X | (print 12 | println).x | urlquery | html",
-			[]string{"urlquery", "html"},
-		},
-		// The following test cases ensure that the merging of internal escapers
-		// with the predefined "html" and "urlquery" escapers is correct.
-		{
-			"{{.X | urlquery}}",
-			".X | _html_template_urlfilter | urlquery",
-			[]string{"_html_template_urlfilter", "_html_template_urlnormalizer"},
-		},
-		{
-			"{{.X | urlquery}}",
-			".X | urlquery | _html_template_urlfilter | _html_template_cssescaper",
-			[]string{"_html_template_urlfilter", "_html_template_cssescaper"},
-		},
-		{
-			"{{.X | urlquery}}",
-			".X | urlquery",
-			[]string{"_html_template_urlnormalizer"},
-		},
-		{
-			"{{.X | urlquery}}",
-			".X | urlquery",
-			[]string{"_html_template_urlescaper"},
-		},
-		{
-			"{{.X | html}}",
-			".X | html",
-			[]string{"_html_template_htmlescaper"},
-		},
-		{
-			"{{.X | html}}",
-			".X | html",
-			[]string{"_html_template_rcdataescaper"},
-		},
-	}
-	for i, test := range tests {
-		tmpl := template.Must(template.New("test").Parse(test.input))
-		action, ok := (tmpl.Tree.Root.Nodes[0].(*parse.ActionNode))
-		if !ok {
-			t.Errorf("First node is not an action: %s", test.input)
-			continue
-		}
-		pipe := action.Pipe
-		originalIDs := make([]string, len(test.ids))
-		copy(originalIDs, test.ids)
-		ensurePipelineContains(pipe, test.ids)
-		got := pipe.String()
-		if got != test.output {
-			t.Errorf("#%d: %s, %v: want\n\t%s\ngot\n\t%s", i, test.input, originalIDs, test.output, got)
-		}
-	}
-}
-
-func TestEscapeMalformedPipelines(t *testing.T) {
-	tests := []string{
-		"{{ 0 | $ }}",
-		"{{ 0 | $ | urlquery }}",
-		"{{ 0 | (nil) }}",
-		"{{ 0 | (nil) | html }}",
-	}
-	for _, test := range tests {
-		var b bytes.Buffer
-		tmpl, err := New("test").Parse(test)
-		if err != nil {
-			t.Errorf("failed to parse set: %q", err)
-		}
-		err = tmpl.Execute(&b, nil)
-		if err == nil {
-			t.Errorf("Expected error for %q", test)
-		}
-	}
-}
-
-func TestEscapeErrorsNotIgnorable(t *testing.T) {
-	var b bytes.Buffer
-	tmpl, _ := New("dangerous").Parse("<a")
-	err := tmpl.Execute(&b, nil)
-	if err == nil {
-		t.Errorf("Expected error")
-	} else if b.Len() != 0 {
-		t.Errorf("Emitted output despite escaping failure")
-	}
-}
-
-func TestEscapeSetErrorsNotIgnorable(t *testing.T) {
-	var b bytes.Buffer
-	tmpl, err := New("root").Parse(`{{define "t"}}<a{{end}}`)
-	if err != nil {
-		t.Errorf("failed to parse set: %q", err)
-	}
-	err = tmpl.ExecuteTemplate(&b, "t", nil)
-	if err == nil {
-		t.Errorf("Expected error")
-	} else if b.Len() != 0 {
-		t.Errorf("Emitted output despite escaping failure")
-	}
-}
-
-func TestRedundantFuncs(t *testing.T) {
-	inputs := []interface{}{
-		"\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f" +
-			"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
-			` !"#$%&'()*+,-./` +
-			`0123456789:;<=>?` +
-			`@ABCDEFGHIJKLMNO` +
-			`PQRSTUVWXYZ[\]^_` +
-			"`abcdefghijklmno" +
-			"pqrstuvwxyz{|}~\x7f" +
-			"\u00A0\u0100\u2028\u2029\ufeff\ufdec\ufffd\uffff\U0001D11E" +
-			"&amp;%22\\",
-		CSS(`a[href =~ "//example.com"]#foo`),
-		HTML(`Hello, <b>World</b> &amp;tc!`),
-		HTMLAttr(` dir="ltr"`),
-		JS(`c && alert("Hello, World!");`),
-		JSStr(`Hello, World & O'Reilly\x21`),
-		URL(`greeting=H%69&addressee=(World)`),
-	}
-
-	for n0, m := range redundantFuncs {
-		f0 := funcMap[n0].(func(...interface{}) string)
-		for n1 := range m {
-			f1 := funcMap[n1].(func(...interface{}) string)
-			for _, input := range inputs {
-				want := f0(input)
-				if got := f1(want); want != got {
-					t.Errorf("%s %s with %T %q: want\n\t%q,\ngot\n\t%q", n0, n1, input, input, want, got)
-				}
-			}
-		}
-	}
-}
-
-func TestIndirectPrint(t *testing.T) {
-	a := 3
-	ap := &a
-	b := "hello"
-	bp := &b
-	bpp := &bp
-	tmpl := Must(New("t").Parse(`{{.}}`))
-	var buf bytes.Buffer
-	err := tmpl.Execute(&buf, ap)
-	if err != nil {
-		t.Errorf("Unexpected error: %s", err)
-	} else if buf.String() != "3" {
-		t.Errorf(`Expected "3"; got %q`, buf.String())
-	}
-	buf.Reset()
-	err = tmpl.Execute(&buf, bpp)
-	if err != nil {
-		t.Errorf("Unexpected error: %s", err)
-	} else if buf.String() != "hello" {
-		t.Errorf(`Expected "hello"; got %q`, buf.String())
-	}
-}
-
-// This is a test for issue 3272.
-func TestEmptyTemplateHTML(t *testing.T) {
-	page := Must(New("page").ParseFiles(os.DevNull))
-	if err := page.ExecuteTemplate(os.Stdout, "page", "nothing"); err == nil {
-		t.Fatal("expected error")
-	}
-}
-
-type Issue7379 int
-
-func (Issue7379) SomeMethod(x int) string {
-	return fmt.Sprintf("<%d>", x)
-}
-
-// This is a test for issue 7379: type assertion error caused panic, and then
-// the code to handle the panic breaks escaping. It's hard to see the second
-// problem once the first is fixed, but its fix is trivial so we let that go. See
-// the discussion for issue 7379.
-func TestPipeToMethodIsEscaped(t *testing.T) {
-	tmpl := Must(New("x").Parse("<html>{{0 | .SomeMethod}}</html>\n"))
-	tryExec := func() string {
-		defer func() {
-			panicValue := recover()
-			if panicValue != nil {
-				t.Errorf("panicked: %v\n", panicValue)
-			}
-		}()
-		var b bytes.Buffer
-		tmpl.Execute(&b, Issue7379(0))
-		return b.String()
-	}
-	for i := 0; i < 3; i++ {
-		str := tryExec()
-		const expect = "<html>&lt;0&gt;</html>\n"
-		if str != expect {
-			t.Errorf("expected %q got %q", expect, str)
-		}
-	}
-}
-
-// Unlike text/template, html/template crashed if given an incomplete
-// template, that is, a template that had been named but not given any content.
-// This is issue #10204.
-func TestErrorOnUndefined(t *testing.T) {
-	tmpl := New("undefined")
-
-	err := tmpl.Execute(nil, nil)
-	if err == nil {
-		t.Error("expected error")
-	} else if !strings.Contains(err.Error(), "incomplete") {
-		t.Errorf("expected error about incomplete template; got %s", err)
-	}
-}
-
-// This covers issue #20842.
-func TestIdempotentExecute(t *testing.T) {
-	tmpl := Must(New("").
-		Parse(`{{define "main"}}<body>{{template "hello"}}</body>{{end}}`))
-	Must(tmpl.
-		Parse(`{{define "hello"}}Hello, {{"Ladies & Gentlemen!"}}{{end}}`))
-	got := new(bytes.Buffer)
-	var err error
-	// Ensure that "hello" produces the same output when executed twice.
-	want := "Hello, Ladies &amp; Gentlemen!"
-	for i := 0; i < 2; i++ {
-		err = tmpl.ExecuteTemplate(got, "hello", nil)
-		if err != nil {
-			t.Errorf("unexpected error: %s", err)
-		}
-		if got.String() != want {
-			t.Errorf("after executing template \"hello\", got:\n\t%q\nwant:\n\t%q\n", got.String(), want)
-		}
-		got.Reset()
-	}
-	// Ensure that the implicit re-execution of "hello" during the execution of
-	// "main" does not cause the output of "hello" to change.
-	err = tmpl.ExecuteTemplate(got, "main", nil)
-	if err != nil {
-		t.Errorf("unexpected error: %s", err)
-	}
-	// If the HTML escaper is added again to the action {{"Ladies & Gentlemen!"}},
-	// we would expected to see the ampersand overescaped to "&amp;amp;".
-	want = "<body>Hello, Ladies &amp; Gentlemen!</body>"
-	if got.String() != want {
-		t.Errorf("after executing template \"main\", got:\n\t%q\nwant:\n\t%q\n", got.String(), want)
-	}
-}
-
-func BenchmarkEscapedExecute(b *testing.B) {
-	tmpl := Must(New("t").Parse(`<a onclick="alert('{{.}}')">{{.}}</a>`))
-	var buf bytes.Buffer
-	b.ResetTimer()
-	for i := 0; i < b.N; i++ {
-		tmpl.Execute(&buf, "foo & 'bar' & baz")
-		buf.Reset()
-	}
-}
-
-// Covers issue 22780.
-func TestOrphanedTemplate(t *testing.T) {
-	t1 := Must(New("foo").Parse(`<a href="{{.}}">link1</a>`))
-	t2 := Must(t1.New("foo").Parse(`bar`))
-
-	var b bytes.Buffer
-	const wantError = `template: "foo" is an incomplete or empty template`
-	if err := t1.Execute(&b, "javascript:alert(1)"); err == nil {
-		t.Fatal("expected error executing t1")
-	} else if gotError := err.Error(); gotError != wantError {
-		t.Fatalf("got t1 execution error:\n\t%s\nwant:\n\t%s", gotError, wantError)
-	}
-	b.Reset()
-	if err := t2.Execute(&b, nil); err != nil {
-		t.Fatalf("error executing t2: %s", err)
-	}
-	const want = "bar"
-	if got := b.String(); got != want {
-		t.Fatalf("t2 rendered %q, want %q", got, want)
-	}
-}
-
-// Covers issue 21844.
-func TestAliasedParseTreeDoesNotOverescape(t *testing.T) {
-	const (
-		tmplText = `{{.}}`
-		data     = `<baz>`
-		want     = `&lt;baz&gt;`
-	)
-	// Templates "foo" and "bar" both alias the same underlying parse tree.
-	tpl := Must(New("foo").Parse(tmplText))
-	if _, err := tpl.AddParseTree("bar", tpl.Tree); err != nil {
-		t.Fatalf("AddParseTree error: %v", err)
-	}
-	var b1, b2 bytes.Buffer
-	if err := tpl.ExecuteTemplate(&b1, "foo", data); err != nil {
-		t.Fatalf(`ExecuteTemplate failed for "foo": %v`, err)
-	}
-	if err := tpl.ExecuteTemplate(&b2, "bar", data); err != nil {
-		t.Fatalf(`ExecuteTemplate failed for "foo": %v`, err)
-	}
-	got1, got2 := b1.String(), b2.String()
-	if got1 != want {
-		t.Fatalf(`Template "foo" rendered %q, want %q`, got1, want)
-	}
-	if got1 != got2 {
-		t.Fatalf(`Template "foo" and "bar" rendered %q and %q respectively, expected equal values`, got1, got2)
-	}
-}
diff --git a/internal/backport/html/template/example_test.go b/internal/backport/html/template/example_test.go
deleted file mode 100644
index dbb6511..0000000
--- a/internal/backport/html/template/example_test.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template_test
-
-import (
-	"fmt"
-	"log"
-	"os"
-	"strings"
-
-	"golang.org/x/website/internal/backport/html/template"
-)
-
-func Example() {
-	const tpl = `
-<!DOCTYPE html>
-<html>
-	<head>
-		<meta charset="UTF-8">
-		<title>{{.Title}}</title>
-	</head>
-	<body>
-		{{range .Items}}<div>{{ . }}</div>{{else}}<div><strong>no rows</strong></div>{{end}}
-	</body>
-</html>`
-
-	check := func(err error) {
-		if err != nil {
-			log.Fatal(err)
-		}
-	}
-	t, err := template.New("webpage").Parse(tpl)
-	check(err)
-
-	data := struct {
-		Title string
-		Items []string
-	}{
-		Title: "My page",
-		Items: []string{
-			"My photos",
-			"My blog",
-		},
-	}
-
-	err = t.Execute(os.Stdout, data)
-	check(err)
-
-	noItems := struct {
-		Title string
-		Items []string
-	}{
-		Title: "My another page",
-		Items: []string{},
-	}
-
-	err = t.Execute(os.Stdout, noItems)
-	check(err)
-
-	// Output:
-	// <!DOCTYPE html>
-	// <html>
-	// 	<head>
-	// 		<meta charset="UTF-8">
-	// 		<title>My page</title>
-	// 	</head>
-	// 	<body>
-	// 		<div>My photos</div><div>My blog</div>
-	// 	</body>
-	// </html>
-	// <!DOCTYPE html>
-	// <html>
-	// 	<head>
-	// 		<meta charset="UTF-8">
-	// 		<title>My another page</title>
-	// 	</head>
-	// 	<body>
-	// 		<div><strong>no rows</strong></div>
-	// 	</body>
-	// </html>
-
-}
-
-func Example_autoescaping() {
-	check := func(err error) {
-		if err != nil {
-			log.Fatal(err)
-		}
-	}
-	t, err := template.New("foo").Parse(`{{define "T"}}Hello, {{.}}!{{end}}`)
-	check(err)
-	err = t.ExecuteTemplate(os.Stdout, "T", "<script>alert('you have been pwned')</script>")
-	check(err)
-	// Output:
-	// Hello, &lt;script&gt;alert(&#39;you have been pwned&#39;)&lt;/script&gt;!
-}
-
-func Example_escape() {
-	const s = `"Fran & Freddie's Diner" <tasty@example.com>`
-	v := []interface{}{`"Fran & Freddie's Diner"`, ' ', `<tasty@example.com>`}
-
-	fmt.Println(template.HTMLEscapeString(s))
-	template.HTMLEscape(os.Stdout, []byte(s))
-	fmt.Fprintln(os.Stdout, "")
-	fmt.Println(template.HTMLEscaper(v...))
-
-	fmt.Println(template.JSEscapeString(s))
-	template.JSEscape(os.Stdout, []byte(s))
-	fmt.Fprintln(os.Stdout, "")
-	fmt.Println(template.JSEscaper(v...))
-
-	fmt.Println(template.URLQueryEscaper(v...))
-
-	// Output:
-	// &#34;Fran &amp; Freddie&#39;s Diner&#34; &lt;tasty@example.com&gt;
-	// &#34;Fran &amp; Freddie&#39;s Diner&#34; &lt;tasty@example.com&gt;
-	// &#34;Fran &amp; Freddie&#39;s Diner&#34;32&lt;tasty@example.com&gt;
-	// \"Fran \u0026 Freddie\'s Diner\" \u003Ctasty@example.com\u003E
-	// \"Fran \u0026 Freddie\'s Diner\" \u003Ctasty@example.com\u003E
-	// \"Fran \u0026 Freddie\'s Diner\"32\u003Ctasty@example.com\u003E
-	// %22Fran+%26+Freddie%27s+Diner%2232%3Ctasty%40example.com%3E
-
-}
-
-func ExampleTemplate_Delims() {
-	const text = "<<.Greeting>> {{.Name}}"
-
-	data := struct {
-		Greeting string
-		Name     string
-	}{
-		Greeting: "Hello",
-		Name:     "Joe",
-	}
-
-	t := template.Must(template.New("tpl").Delims("<<", ">>").Parse(text))
-
-	err := t.Execute(os.Stdout, data)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	// Output:
-	// Hello {{.Name}}
-}
-
-// The following example is duplicated in text/template; keep them in sync.
-
-func ExampleTemplate_block() {
-	const (
-		master  = `Names:{{block "list" .}}{{"\n"}}{{range .}}{{println "-" .}}{{end}}{{end}}`
-		overlay = `{{define "list"}} {{join . ", "}}{{end}} `
-	)
-	var (
-		funcs     = template.FuncMap{"join": strings.Join}
-		guardians = []string{"Gamora", "Groot", "Nebula", "Rocket", "Star-Lord"}
-	)
-	masterTmpl, err := template.New("master").Funcs(funcs).Parse(master)
-	if err != nil {
-		log.Fatal(err)
-	}
-	overlayTmpl, err := template.Must(masterTmpl.Clone()).Parse(overlay)
-	if err != nil {
-		log.Fatal(err)
-	}
-	if err := masterTmpl.Execute(os.Stdout, guardians); err != nil {
-		log.Fatal(err)
-	}
-	if err := overlayTmpl.Execute(os.Stdout, guardians); err != nil {
-		log.Fatal(err)
-	}
-	// Output:
-	// Names:
-	// - Gamora
-	// - Groot
-	// - Nebula
-	// - Rocket
-	// - Star-Lord
-	// Names: Gamora, Groot, Nebula, Rocket, Star-Lord
-}
diff --git a/internal/backport/html/template/examplefiles_test.go b/internal/backport/html/template/examplefiles_test.go
deleted file mode 100644
index 2efc8e5..0000000
--- a/internal/backport/html/template/examplefiles_test.go
+++ /dev/null
@@ -1,227 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template_test
-
-import (
-	"io"
-	"io/ioutil"
-	"log"
-	"os"
-	"path/filepath"
-
-	"golang.org/x/website/internal/backport/text/template"
-)
-
-// templateFile defines the contents of a template to be stored in a file, for testing.
-type templateFile struct {
-	name     string
-	contents string
-}
-
-func createTestDir(files []templateFile) string {
-	dir, err := ioutil.TempDir("", "template")
-	if err != nil {
-		log.Fatal(err)
-	}
-	for _, file := range files {
-		f, err := os.Create(filepath.Join(dir, file.name))
-		if err != nil {
-			log.Fatal(err)
-		}
-		defer f.Close()
-		_, err = io.WriteString(f, file.contents)
-		if err != nil {
-			log.Fatal(err)
-		}
-	}
-	return dir
-}
-
-// The following example is duplicated in text/template; keep them in sync.
-
-// Here we demonstrate loading a set of templates from a directory.
-func ExampleTemplate_glob() {
-	// Here we create a temporary directory and populate it with our sample
-	// template definition files; usually the template files would already
-	// exist in some location known to the program.
-	dir := createTestDir([]templateFile{
-		// T0.tmpl is a plain template file that just invokes T1.
-		{"T0.tmpl", `T0 invokes T1: ({{template "T1"}})`},
-		// T1.tmpl defines a template, T1 that invokes T2.
-		{"T1.tmpl", `{{define "T1"}}T1 invokes T2: ({{template "T2"}}){{end}}`},
-		// T2.tmpl defines a template T2.
-		{"T2.tmpl", `{{define "T2"}}This is T2{{end}}`},
-	})
-	// Clean up after the test; another quirk of running as an example.
-	defer os.RemoveAll(dir)
-
-	// pattern is the glob pattern used to find all the template files.
-	pattern := filepath.Join(dir, "*.tmpl")
-
-	// Here starts the example proper.
-	// T0.tmpl is the first name matched, so it becomes the starting template,
-	// the value returned by ParseGlob.
-	tmpl := template.Must(template.ParseGlob(pattern))
-
-	err := tmpl.Execute(os.Stdout, nil)
-	if err != nil {
-		log.Fatalf("template execution: %s", err)
-	}
-	// Output:
-	// T0 invokes T1: (T1 invokes T2: (This is T2))
-}
-
-// Here we demonstrate loading a set of templates from files in different directories
-func ExampleTemplate_parsefiles() {
-	// Here we create different temporary directories and populate them with our sample
-	// template definition files; usually the template files would already
-	// exist in some location known to the program.
-	dir1 := createTestDir([]templateFile{
-		// T1.tmpl is a plain template file that just invokes T2.
-		{"T1.tmpl", `T1 invokes T2: ({{template "T2"}})`},
-	})
-
-	dir2 := createTestDir([]templateFile{
-		// T2.tmpl defines a template T2.
-		{"T2.tmpl", `{{define "T2"}}This is T2{{end}}`},
-	})
-
-	// Clean up after the test; another quirk of running as an example.
-	defer func(dirs ...string) {
-		for _, dir := range dirs {
-			os.RemoveAll(dir)
-		}
-	}(dir1, dir2)
-
-	// Here starts the example proper.
-	// Let's just parse only dir1/T0 and dir2/T2
-	paths := []string{
-		filepath.Join(dir1, "T1.tmpl"),
-		filepath.Join(dir2, "T2.tmpl"),
-	}
-	tmpl := template.Must(template.ParseFiles(paths...))
-
-	err := tmpl.Execute(os.Stdout, nil)
-	if err != nil {
-		log.Fatalf("template execution: %s", err)
-	}
-	// Output:
-	// T1 invokes T2: (This is T2)
-}
-
-// The following example is duplicated in text/template; keep them in sync.
-
-// This example demonstrates one way to share some templates
-// and use them in different contexts. In this variant we add multiple driver
-// templates by hand to an existing bundle of templates.
-func ExampleTemplate_helpers() {
-	// Here we create a temporary directory and populate it with our sample
-	// template definition files; usually the template files would already
-	// exist in some location known to the program.
-	dir := createTestDir([]templateFile{
-		// T1.tmpl defines a template, T1 that invokes T2.
-		{"T1.tmpl", `{{define "T1"}}T1 invokes T2: ({{template "T2"}}){{end}}`},
-		// T2.tmpl defines a template T2.
-		{"T2.tmpl", `{{define "T2"}}This is T2{{end}}`},
-	})
-	// Clean up after the test; another quirk of running as an example.
-	defer os.RemoveAll(dir)
-
-	// pattern is the glob pattern used to find all the template files.
-	pattern := filepath.Join(dir, "*.tmpl")
-
-	// Here starts the example proper.
-	// Load the helpers.
-	templates := template.Must(template.ParseGlob(pattern))
-	// Add one driver template to the bunch; we do this with an explicit template definition.
-	_, err := templates.Parse("{{define `driver1`}}Driver 1 calls T1: ({{template `T1`}})\n{{end}}")
-	if err != nil {
-		log.Fatal("parsing driver1: ", err)
-	}
-	// Add another driver template.
-	_, err = templates.Parse("{{define `driver2`}}Driver 2 calls T2: ({{template `T2`}})\n{{end}}")
-	if err != nil {
-		log.Fatal("parsing driver2: ", err)
-	}
-	// We load all the templates before execution. This package does not require
-	// that behavior but html/template's escaping does, so it's a good habit.
-	err = templates.ExecuteTemplate(os.Stdout, "driver1", nil)
-	if err != nil {
-		log.Fatalf("driver1 execution: %s", err)
-	}
-	err = templates.ExecuteTemplate(os.Stdout, "driver2", nil)
-	if err != nil {
-		log.Fatalf("driver2 execution: %s", err)
-	}
-	// Output:
-	// Driver 1 calls T1: (T1 invokes T2: (This is T2))
-	// Driver 2 calls T2: (This is T2)
-}
-
-// The following example is duplicated in text/template; keep them in sync.
-
-// This example demonstrates how to use one group of driver
-// templates with distinct sets of helper templates.
-func ExampleTemplate_share() {
-	// Here we create a temporary directory and populate it with our sample
-	// template definition files; usually the template files would already
-	// exist in some location known to the program.
-	dir := createTestDir([]templateFile{
-		// T0.tmpl is a plain template file that just invokes T1.
-		{"T0.tmpl", "T0 ({{.}} version) invokes T1: ({{template `T1`}})\n"},
-		// T1.tmpl defines a template, T1 that invokes T2. Note T2 is not defined
-		{"T1.tmpl", `{{define "T1"}}T1 invokes T2: ({{template "T2"}}){{end}}`},
-	})
-	// Clean up after the test; another quirk of running as an example.
-	defer os.RemoveAll(dir)
-
-	// pattern is the glob pattern used to find all the template files.
-	pattern := filepath.Join(dir, "*.tmpl")
-
-	// Here starts the example proper.
-	// Load the drivers.
-	drivers := template.Must(template.ParseGlob(pattern))
-
-	// We must define an implementation of the T2 template. First we clone
-	// the drivers, then add a definition of T2 to the template name space.
-
-	// 1. Clone the helper set to create a new name space from which to run them.
-	first, err := drivers.Clone()
-	if err != nil {
-		log.Fatal("cloning helpers: ", err)
-	}
-	// 2. Define T2, version A, and parse it.
-	_, err = first.Parse("{{define `T2`}}T2, version A{{end}}")
-	if err != nil {
-		log.Fatal("parsing T2: ", err)
-	}
-
-	// Now repeat the whole thing, using a different version of T2.
-	// 1. Clone the drivers.
-	second, err := drivers.Clone()
-	if err != nil {
-		log.Fatal("cloning drivers: ", err)
-	}
-	// 2. Define T2, version B, and parse it.
-	_, err = second.Parse("{{define `T2`}}T2, version B{{end}}")
-	if err != nil {
-		log.Fatal("parsing T2: ", err)
-	}
-
-	// Execute the templates in the reverse order to verify the
-	// first is unaffected by the second.
-	err = second.ExecuteTemplate(os.Stdout, "T0.tmpl", "second")
-	if err != nil {
-		log.Fatalf("second execution: %s", err)
-	}
-	err = first.ExecuteTemplate(os.Stdout, "T0.tmpl", "first")
-	if err != nil {
-		log.Fatalf("first: execution: %s", err)
-	}
-
-	// Output:
-	// T0 (second version) invokes T1: (T1 invokes T2: (T2, version B))
-	// T0 (first version) invokes T1: (T1 invokes T2: (T2, version A))
-}
diff --git a/internal/backport/html/template/exec_test.go b/internal/backport/html/template/exec_test.go
deleted file mode 100644
index 4342aa5..0000000
--- a/internal/backport/html/template/exec_test.go
+++ /dev/null
@@ -1,1834 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Tests for template execution, copied from text/template.
-
-package template
-
-import (
-	"bytes"
-	"errors"
-	"flag"
-	"fmt"
-	"io/ioutil"
-	"reflect"
-	"strings"
-	"sync"
-	"testing"
-
-	"golang.org/x/website/internal/backport/text/template"
-)
-
-var debug = flag.Bool("debug", false, "show the errors produced by the tests")
-
-// T has lots of interesting pieces to use to test execution.
-type T struct {
-	// Basics
-	True        bool
-	I           int
-	U16         uint16
-	X, S        string
-	FloatZero   float64
-	ComplexZero complex128
-	// Nested structs.
-	U *U
-	// Struct with String method.
-	V0     V
-	V1, V2 *V
-	// Struct with Error method.
-	W0     W
-	W1, W2 *W
-	// Slices
-	SI      []int
-	SICap   []int
-	SIEmpty []int
-	SB      []bool
-	// Arrays
-	AI [3]int
-	// Maps
-	MSI      map[string]int
-	MSIone   map[string]int // one element, for deterministic output
-	MSIEmpty map[string]int
-	MXI      map[interface{}]int
-	MII      map[int]int
-	MI32S    map[int32]string
-	MI64S    map[int64]string
-	MUI32S   map[uint32]string
-	MUI64S   map[uint64]string
-	MI8S     map[int8]string
-	MUI8S    map[uint8]string
-	SMSI     []map[string]int
-	// Empty interfaces; used to see if we can dig inside one.
-	Empty0 interface{} // nil
-	Empty1 interface{}
-	Empty2 interface{}
-	Empty3 interface{}
-	Empty4 interface{}
-	// Non-empty interfaces.
-	NonEmptyInterface         I
-	NonEmptyInterfacePtS      *I
-	NonEmptyInterfaceNil      I
-	NonEmptyInterfaceTypedNil I
-	// Stringer.
-	Str fmt.Stringer
-	Err error
-	// Pointers
-	PI  *int
-	PS  *string
-	PSI *[]int
-	NIL *int
-	// Function (not method)
-	BinaryFunc      func(string, string) string
-	VariadicFunc    func(...string) string
-	VariadicFuncInt func(int, ...string) string
-	NilOKFunc       func(*int) bool
-	ErrFunc         func() (string, error)
-	PanicFunc       func() string
-	// Template to test evaluation of templates.
-	Tmpl *Template
-	// Unexported field; cannot be accessed by template.
-	unexported int
-}
-
-type S []string
-
-func (S) Method0() string {
-	return "M0"
-}
-
-type U struct {
-	V string
-}
-
-type V struct {
-	j int
-}
-
-func (v *V) String() string {
-	if v == nil {
-		return "nilV"
-	}
-	return fmt.Sprintf("<%d>", v.j)
-}
-
-type W struct {
-	k int
-}
-
-func (w *W) Error() string {
-	if w == nil {
-		return "nilW"
-	}
-	return fmt.Sprintf("[%d]", w.k)
-}
-
-var siVal = I(S{"a", "b"})
-
-var tVal = &T{
-	True:   true,
-	I:      17,
-	U16:    16,
-	X:      "x",
-	S:      "xyz",
-	U:      &U{"v"},
-	V0:     V{6666},
-	V1:     &V{7777}, // leave V2 as nil
-	W0:     W{888},
-	W1:     &W{999}, // leave W2 as nil
-	SI:     []int{3, 4, 5},
-	SICap:  make([]int, 5, 10),
-	AI:     [3]int{3, 4, 5},
-	SB:     []bool{true, false},
-	MSI:    map[string]int{"one": 1, "two": 2, "three": 3},
-	MSIone: map[string]int{"one": 1},
-	MXI:    map[interface{}]int{"one": 1},
-	MII:    map[int]int{1: 1},
-	MI32S:  map[int32]string{1: "one", 2: "two"},
-	MI64S:  map[int64]string{2: "i642", 3: "i643"},
-	MUI32S: map[uint32]string{2: "u322", 3: "u323"},
-	MUI64S: map[uint64]string{2: "ui642", 3: "ui643"},
-	MI8S:   map[int8]string{2: "i82", 3: "i83"},
-	MUI8S:  map[uint8]string{2: "u82", 3: "u83"},
-	SMSI: []map[string]int{
-		{"one": 1, "two": 2},
-		{"eleven": 11, "twelve": 12},
-	},
-	Empty1:                    3,
-	Empty2:                    "empty2",
-	Empty3:                    []int{7, 8},
-	Empty4:                    &U{"UinEmpty"},
-	NonEmptyInterface:         &T{X: "x"},
-	NonEmptyInterfacePtS:      &siVal,
-	NonEmptyInterfaceTypedNil: (*T)(nil),
-	Str:                       bytes.NewBuffer([]byte("foozle")),
-	Err:                       errors.New("erroozle"),
-	PI:                        newInt(23),
-	PS:                        newString("a string"),
-	PSI:                       newIntSlice(21, 22, 23),
-	BinaryFunc:                func(a, b string) string { return fmt.Sprintf("[%s=%s]", a, b) },
-	VariadicFunc:              func(s ...string) string { return fmt.Sprint("<", strings.Join(s, "+"), ">") },
-	VariadicFuncInt:           func(a int, s ...string) string { return fmt.Sprint(a, "=<", strings.Join(s, "+"), ">") },
-	NilOKFunc:                 func(s *int) bool { return s == nil },
-	ErrFunc:                   func() (string, error) { return "bla", nil },
-	PanicFunc:                 func() string { panic("test panic") },
-	Tmpl:                      Must(New("x").Parse("test template")), // "x" is the value of .X
-}
-
-var tSliceOfNil = []*T{nil}
-
-// A non-empty interface.
-type I interface {
-	Method0() string
-}
-
-var iVal I = tVal
-
-// Helpers for creation.
-func newInt(n int) *int {
-	return &n
-}
-
-func newString(s string) *string {
-	return &s
-}
-
-func newIntSlice(n ...int) *[]int {
-	p := new([]int)
-	*p = make([]int, len(n))
-	copy(*p, n)
-	return p
-}
-
-// Simple methods with and without arguments.
-func (t *T) Method0() string {
-	return "M0"
-}
-
-func (t *T) Method1(a int) int {
-	return a
-}
-
-func (t *T) Method2(a uint16, b string) string {
-	return fmt.Sprintf("Method2: %d %s", a, b)
-}
-
-func (t *T) Method3(v interface{}) string {
-	return fmt.Sprintf("Method3: %v", v)
-}
-
-func (t *T) Copy() *T {
-	n := new(T)
-	*n = *t
-	return n
-}
-
-func (t *T) MAdd(a int, b []int) []int {
-	v := make([]int, len(b))
-	for i, x := range b {
-		v[i] = x + a
-	}
-	return v
-}
-
-var myError = errors.New("my error")
-
-// MyError returns a value and an error according to its argument.
-func (t *T) MyError(error bool) (bool, error) {
-	if error {
-		return true, myError
-	}
-	return false, nil
-}
-
-// A few methods to test chaining.
-func (t *T) GetU() *U {
-	return t.U
-}
-
-func (u *U) TrueFalse(b bool) string {
-	if b {
-		return "true"
-	}
-	return ""
-}
-
-func typeOf(arg interface{}) string {
-	return fmt.Sprintf("%T", arg)
-}
-
-type execTest struct {
-	name   string
-	input  string
-	output string
-	data   interface{}
-	ok     bool
-}
-
-// bigInt and bigUint are hex string representing numbers either side
-// of the max int boundary.
-// We do it this way so the test doesn't depend on ints being 32 bits.
-var (
-	bigInt  = fmt.Sprintf("0x%x", int(1<<uint(reflect.TypeOf(0).Bits()-1)-1))
-	bigUint = fmt.Sprintf("0x%x", uint(1<<uint(reflect.TypeOf(0).Bits()-1)))
-)
-
-var execTests = []execTest{
-	// Trivial cases.
-	{"empty", "", "", nil, true},
-	{"text", "some text", "some text", nil, true},
-	{"nil action", "{{nil}}", "", nil, false},
-
-	// Ideal constants.
-	{"ideal int", "{{typeOf 3}}", "int", 0, true},
-	{"ideal float", "{{typeOf 1.0}}", "float64", 0, true},
-	{"ideal exp float", "{{typeOf 1e1}}", "float64", 0, true},
-	{"ideal complex", "{{typeOf 1i}}", "complex128", 0, true},
-	{"ideal int", "{{typeOf " + bigInt + "}}", "int", 0, true},
-	{"ideal too big", "{{typeOf " + bigUint + "}}", "", 0, false},
-	{"ideal nil without type", "{{nil}}", "", 0, false},
-
-	// Fields of structs.
-	{".X", "-{{.X}}-", "-x-", tVal, true},
-	{".U.V", "-{{.U.V}}-", "-v-", tVal, true},
-	{".unexported", "{{.unexported}}", "", tVal, false},
-
-	// Fields on maps.
-	{"map .one", "{{.MSI.one}}", "1", tVal, true},
-	{"map .two", "{{.MSI.two}}", "2", tVal, true},
-	{"map .NO", "{{.MSI.NO}}", "", tVal, true}, // NOTE: <no value> in text/template
-	{"map .one interface", "{{.MXI.one}}", "1", tVal, true},
-	{"map .WRONG args", "{{.MSI.one 1}}", "", tVal, false},
-	{"map .WRONG type", "{{.MII.one}}", "", tVal, false},
-
-	// Dots of all kinds to test basic evaluation.
-	{"dot int", "<{{.}}>", "&lt;13>", 13, true},
-	{"dot uint", "<{{.}}>", "&lt;14>", uint(14), true},
-	{"dot float", "<{{.}}>", "&lt;15.1>", 15.1, true},
-	{"dot bool", "<{{.}}>", "&lt;true>", true, true},
-	{"dot complex", "<{{.}}>", "&lt;(16.2-17i)>", 16.2 - 17i, true},
-	{"dot string", "<{{.}}>", "&lt;hello>", "hello", true},
-	{"dot slice", "<{{.}}>", "&lt;[-1 -2 -3]>", []int{-1, -2, -3}, true},
-	{"dot map", "<{{.}}>", "&lt;map[two:22]>", map[string]int{"two": 22}, true},
-	{"dot struct", "<{{.}}>", "&lt;{7 seven}>", struct {
-		a int
-		b string
-	}{7, "seven"}, true},
-
-	// Variables.
-	{"$ int", "{{$}}", "123", 123, true},
-	{"$.I", "{{$.I}}", "17", tVal, true},
-	{"$.U.V", "{{$.U.V}}", "v", tVal, true},
-	{"declare in action", "{{$x := $.U.V}}{{$x}}", "v", tVal, true},
-	{"simple assignment", "{{$x := 2}}{{$x = 3}}{{$x}}", "3", tVal, true},
-	{"nested assignment",
-		"{{$x := 2}}{{if true}}{{$x = 3}}{{end}}{{$x}}",
-		"3", tVal, true},
-	{"nested assignment changes the last declaration",
-		"{{$x := 1}}{{if true}}{{$x := 2}}{{if true}}{{$x = 3}}{{end}}{{end}}{{$x}}",
-		"1", tVal, true},
-
-	// Type with String method.
-	{"V{6666}.String()", "-{{.V0}}-", "-{6666}-", tVal, true}, //  NOTE: -<6666>- in text/template
-	{"&V{7777}.String()", "-{{.V1}}-", "-&lt;7777&gt;-", tVal, true},
-	{"(*V)(nil).String()", "-{{.V2}}-", "-nilV-", tVal, true},
-
-	// Type with Error method.
-	{"W{888}.Error()", "-{{.W0}}-", "-{888}-", tVal, true}, // NOTE: -[888] in text/template
-	{"&W{999}.Error()", "-{{.W1}}-", "-[999]-", tVal, true},
-	{"(*W)(nil).Error()", "-{{.W2}}-", "-nilW-", tVal, true},
-
-	// Pointers.
-	{"*int", "{{.PI}}", "23", tVal, true},
-	{"*string", "{{.PS}}", "a string", tVal, true},
-	{"*[]int", "{{.PSI}}", "[21 22 23]", tVal, true},
-	{"*[]int[1]", "{{index .PSI 1}}", "22", tVal, true},
-	{"NIL", "{{.NIL}}", "&lt;nil&gt;", tVal, true},
-
-	// Empty interfaces holding values.
-	{"empty nil", "{{.Empty0}}", "", tVal, true}, // NOTE: <no value> in text/template
-	{"empty with int", "{{.Empty1}}", "3", tVal, true},
-	{"empty with string", "{{.Empty2}}", "empty2", tVal, true},
-	{"empty with slice", "{{.Empty3}}", "[7 8]", tVal, true},
-	{"empty with struct", "{{.Empty4}}", "{UinEmpty}", tVal, true},
-	{"empty with struct, field", "{{.Empty4.V}}", "UinEmpty", tVal, true},
-
-	// Edge cases with <no value> with an interface value
-	{"field on interface", "{{.foo}}", "", nil, true},                  // NOTE: <no value> in text/template
-	{"field on parenthesized interface", "{{(.).foo}}", "", nil, true}, // NOTE: <no value> in text/template
-
-	// Issue 31810: Parenthesized first element of pipeline with arguments.
-	// See also TestIssue31810.
-	{"unparenthesized non-function", "{{1 2}}", "", nil, false},
-	{"parenthesized non-function", "{{(1) 2}}", "", nil, false},
-	{"parenthesized non-function with no args", "{{(1)}}", "1", nil, true}, // This is fine.
-
-	// Method calls.
-	{".Method0", "-{{.Method0}}-", "-M0-", tVal, true},
-	{".Method1(1234)", "-{{.Method1 1234}}-", "-1234-", tVal, true},
-	{".Method1(.I)", "-{{.Method1 .I}}-", "-17-", tVal, true},
-	{".Method2(3, .X)", "-{{.Method2 3 .X}}-", "-Method2: 3 x-", tVal, true},
-	{".Method2(.U16, `str`)", "-{{.Method2 .U16 `str`}}-", "-Method2: 16 str-", tVal, true},
-	{".Method2(.U16, $x)", "{{if $x := .X}}-{{.Method2 .U16 $x}}{{end}}-", "-Method2: 16 x-", tVal, true},
-	{".Method3(nil constant)", "-{{.Method3 nil}}-", "-Method3: &lt;nil&gt;-", tVal, true},
-	{".Method3(nil value)", "-{{.Method3 .MXI.unset}}-", "-Method3: &lt;nil&gt;-", tVal, true},
-	{"method on var", "{{if $x := .}}-{{$x.Method2 .U16 $x.X}}{{end}}-", "-Method2: 16 x-", tVal, true},
-	{"method on chained var",
-		"{{range .MSIone}}{{if $.U.TrueFalse $.True}}{{$.U.TrueFalse $.True}}{{else}}WRONG{{end}}{{end}}",
-		"true", tVal, true},
-	{"chained method",
-		"{{range .MSIone}}{{if $.GetU.TrueFalse $.True}}{{$.U.TrueFalse $.True}}{{else}}WRONG{{end}}{{end}}",
-		"true", tVal, true},
-	{"chained method on variable",
-		"{{with $x := .}}{{with .SI}}{{$.GetU.TrueFalse $.True}}{{end}}{{end}}",
-		"true", tVal, true},
-	{".NilOKFunc not nil", "{{call .NilOKFunc .PI}}", "false", tVal, true},
-	{".NilOKFunc nil", "{{call .NilOKFunc nil}}", "true", tVal, true},
-	{"method on nil value from slice", "-{{range .}}{{.Method1 1234}}{{end}}-", "-1234-", tSliceOfNil, true},
-	{"method on typed nil interface value", "{{.NonEmptyInterfaceTypedNil.Method0}}", "M0", tVal, true},
-
-	// Function call builtin.
-	{".BinaryFunc", "{{call .BinaryFunc `1` `2`}}", "[1=2]", tVal, true},
-	{".VariadicFunc0", "{{call .VariadicFunc}}", "&lt;&gt;", tVal, true},
-	{".VariadicFunc2", "{{call .VariadicFunc `he` `llo`}}", "&lt;he&#43;llo&gt;", tVal, true},
-	{".VariadicFuncInt", "{{call .VariadicFuncInt 33 `he` `llo`}}", "33=&lt;he&#43;llo&gt;", tVal, true},
-	{"if .BinaryFunc call", "{{ if .BinaryFunc}}{{call .BinaryFunc `1` `2`}}{{end}}", "[1=2]", tVal, true},
-	{"if not .BinaryFunc call", "{{ if not .BinaryFunc}}{{call .BinaryFunc `1` `2`}}{{else}}No{{end}}", "No", tVal, true},
-	{"Interface Call", `{{stringer .S}}`, "foozle", map[string]interface{}{"S": bytes.NewBufferString("foozle")}, true},
-	{".ErrFunc", "{{call .ErrFunc}}", "bla", tVal, true},
-	{"call nil", "{{call nil}}", "", tVal, false},
-
-	// Erroneous function calls (check args).
-	{".BinaryFuncTooFew", "{{call .BinaryFunc `1`}}", "", tVal, false},
-	{".BinaryFuncTooMany", "{{call .BinaryFunc `1` `2` `3`}}", "", tVal, false},
-	{".BinaryFuncBad0", "{{call .BinaryFunc 1 3}}", "", tVal, false},
-	{".BinaryFuncBad1", "{{call .BinaryFunc `1` 3}}", "", tVal, false},
-	{".VariadicFuncBad0", "{{call .VariadicFunc 3}}", "", tVal, false},
-	{".VariadicFuncIntBad0", "{{call .VariadicFuncInt}}", "", tVal, false},
-	{".VariadicFuncIntBad`", "{{call .VariadicFuncInt `x`}}", "", tVal, false},
-	{".VariadicFuncNilBad", "{{call .VariadicFunc nil}}", "", tVal, false},
-
-	// Pipelines.
-	{"pipeline", "-{{.Method0 | .Method2 .U16}}-", "-Method2: 16 M0-", tVal, true},
-	{"pipeline func", "-{{call .VariadicFunc `llo` | call .VariadicFunc `he` }}-", "-&lt;he&#43;&lt;llo&gt;&gt;-", tVal, true},
-
-	// Nil values aren't missing arguments.
-	{"nil pipeline", "{{ .Empty0 | call .NilOKFunc }}", "true", tVal, true},
-	{"nil call arg", "{{ call .NilOKFunc .Empty0 }}", "true", tVal, true},
-	{"bad nil pipeline", "{{ .Empty0 | .VariadicFunc }}", "", tVal, false},
-
-	// Parenthesized expressions
-	{"parens in pipeline", "{{printf `%d %d %d` (1) (2 | add 3) (add 4 (add 5 6))}}", "1 5 15", tVal, true},
-
-	// Parenthesized expressions with field accesses
-	{"parens: $ in paren", "{{($).X}}", "x", tVal, true},
-	{"parens: $.GetU in paren", "{{($.GetU).V}}", "v", tVal, true},
-	{"parens: $ in paren in pipe", "{{($ | echo).X}}", "x", tVal, true},
-	{"parens: spaces and args", `{{(makemap "up" "down" "left" "right").left}}`, "right", tVal, true},
-
-	// If.
-	{"if true", "{{if true}}TRUE{{end}}", "TRUE", tVal, true},
-	{"if false", "{{if false}}TRUE{{else}}FALSE{{end}}", "FALSE", tVal, true},
-	{"if nil", "{{if nil}}TRUE{{end}}", "", tVal, false},
-	{"if on typed nil interface value", "{{if .NonEmptyInterfaceTypedNil}}TRUE{{ end }}", "", tVal, true},
-	{"if 1", "{{if 1}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true},
-	{"if 0", "{{if 0}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
-	{"if 1.5", "{{if 1.5}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true},
-	{"if 0.0", "{{if .FloatZero}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
-	{"if 1.5i", "{{if 1.5i}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true},
-	{"if 0.0i", "{{if .ComplexZero}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
-	{"if emptystring", "{{if ``}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
-	{"if string", "{{if `notempty`}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true},
-	{"if emptyslice", "{{if .SIEmpty}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
-	{"if slice", "{{if .SI}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true},
-	{"if emptymap", "{{if .MSIEmpty}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
-	{"if map", "{{if .MSI}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true},
-	{"if map unset", "{{if .MXI.none}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
-	{"if map not unset", "{{if not .MXI.none}}ZERO{{else}}NON-ZERO{{end}}", "ZERO", tVal, true},
-	{"if $x with $y int", "{{if $x := true}}{{with $y := .I}}{{$x}},{{$y}}{{end}}{{end}}", "true,17", tVal, true},
-	{"if $x with $x int", "{{if $x := true}}{{with $x := .I}}{{$x}},{{end}}{{$x}}{{end}}", "17,true", tVal, true},
-	{"if else if", "{{if false}}FALSE{{else if true}}TRUE{{end}}", "TRUE", tVal, true},
-	{"if else chain", "{{if eq 1 3}}1{{else if eq 2 3}}2{{else if eq 3 3}}3{{end}}", "3", tVal, true},
-
-	// Print etc.
-	{"print", `{{print "hello, print"}}`, "hello, print", tVal, true},
-	{"print 123", `{{print 1 2 3}}`, "1 2 3", tVal, true},
-	{"print nil", `{{print nil}}`, "&lt;nil&gt;", tVal, true},
-	{"println", `{{println 1 2 3}}`, "1 2 3\n", tVal, true},
-	{"printf int", `{{printf "%04x" 127}}`, "007f", tVal, true},
-	{"printf float", `{{printf "%g" 3.5}}`, "3.5", tVal, true},
-	{"printf complex", `{{printf "%g" 1+7i}}`, "(1&#43;7i)", tVal, true},
-	{"printf string", `{{printf "%s" "hello"}}`, "hello", tVal, true},
-	{"printf function", `{{printf "%#q" zeroArgs}}`, "`zeroArgs`", tVal, true},
-	{"printf field", `{{printf "%s" .U.V}}`, "v", tVal, true},
-	{"printf method", `{{printf "%s" .Method0}}`, "M0", tVal, true},
-	{"printf dot", `{{with .I}}{{printf "%d" .}}{{end}}`, "17", tVal, true},
-	{"printf var", `{{with $x := .I}}{{printf "%d" $x}}{{end}}`, "17", tVal, true},
-	{"printf lots", `{{printf "%d %s %g %s" 127 "hello" 7-3i .Method0}}`, "127 hello (7-3i) M0", tVal, true},
-
-	// HTML.
-	{"html", `{{html "<script>alert(\"XSS\");</script>"}}`,
-		"&lt;script&gt;alert(&#34;XSS&#34;);&lt;/script&gt;", nil, true},
-	{"html pipeline", `{{printf "<script>alert(\"XSS\");</script>" | html}}`,
-		"&lt;script&gt;alert(&#34;XSS&#34;);&lt;/script&gt;", nil, true},
-	{"html", `{{html .PS}}`, "a string", tVal, true},
-	{"html typed nil", `{{html .NIL}}`, "&lt;nil&gt;", tVal, true},
-	{"html untyped nil", `{{html .Empty0}}`, "&lt;nil&gt;", tVal, true}, // NOTE: "&lt;no value&gt;" in text/template
-
-	// JavaScript.
-	{"js", `{{js .}}`, `It\&#39;d be nice.`, `It'd be nice.`, true},
-
-	// URL query.
-	{"urlquery", `{{"http://www.example.org/"|urlquery}}`, "http%3A%2F%2Fwww.example.org%2F", nil, true},
-
-	// Booleans
-	{"not", "{{not true}} {{not false}}", "false true", nil, true},
-	{"and", "{{and false 0}} {{and 1 0}} {{and 0 true}} {{and 1 1}}", "false 0 0 1", nil, true},
-	{"or", "{{or 0 0}} {{or 1 0}} {{or 0 true}} {{or 1 1}}", "0 1 true 1", nil, true},
-	{"boolean if", "{{if and true 1 `hi`}}TRUE{{else}}FALSE{{end}}", "TRUE", tVal, true},
-	{"boolean if not", "{{if and true 1 `hi` | not}}TRUE{{else}}FALSE{{end}}", "FALSE", nil, true},
-
-	// Indexing.
-	{"slice[0]", "{{index .SI 0}}", "3", tVal, true},
-	{"slice[1]", "{{index .SI 1}}", "4", tVal, true},
-	{"slice[HUGE]", "{{index .SI 10}}", "", tVal, false},
-	{"slice[WRONG]", "{{index .SI `hello`}}", "", tVal, false},
-	{"slice[nil]", "{{index .SI nil}}", "", tVal, false},
-	{"map[one]", "{{index .MSI `one`}}", "1", tVal, true},
-	{"map[two]", "{{index .MSI `two`}}", "2", tVal, true},
-	{"map[NO]", "{{index .MSI `XXX`}}", "0", tVal, true},
-	{"map[nil]", "{{index .MSI nil}}", "", tVal, false},
-	{"map[``]", "{{index .MSI ``}}", "0", tVal, true},
-	{"map[WRONG]", "{{index .MSI 10}}", "", tVal, false},
-	{"double index", "{{index .SMSI 1 `eleven`}}", "11", tVal, true},
-	{"nil[1]", "{{index nil 1}}", "", tVal, false},
-	{"map MI64S", "{{index .MI64S 2}}", "i642", tVal, true},
-	{"map MI32S", "{{index .MI32S 2}}", "two", tVal, true},
-	{"map MUI64S", "{{index .MUI64S 3}}", "ui643", tVal, true},
-	{"map MI8S", "{{index .MI8S 3}}", "i83", tVal, true},
-	{"map MUI8S", "{{index .MUI8S 2}}", "u82", tVal, true},
-	{"index of an interface field", "{{index .Empty3 0}}", "7", tVal, true},
-
-	// Slicing.
-	{"slice[:]", "{{slice .SI}}", "[3 4 5]", tVal, true},
-	{"slice[1:]", "{{slice .SI 1}}", "[4 5]", tVal, true},
-	{"slice[1:2]", "{{slice .SI 1 2}}", "[4]", tVal, true},
-	{"slice[-1:]", "{{slice .SI -1}}", "", tVal, false},
-	{"slice[1:-2]", "{{slice .SI 1 -2}}", "", tVal, false},
-	{"slice[1:2:-1]", "{{slice .SI 1 2 -1}}", "", tVal, false},
-	{"slice[2:1]", "{{slice .SI 2 1}}", "", tVal, false},
-	{"slice[2:2:1]", "{{slice .SI 2 2 1}}", "", tVal, false},
-	{"out of range", "{{slice .SI 4 5}}", "", tVal, false},
-	{"out of range", "{{slice .SI 2 2 5}}", "", tVal, false},
-	{"len(s) < indexes < cap(s)", "{{slice .SICap 6 10}}", "[0 0 0 0]", tVal, true},
-	{"len(s) < indexes < cap(s)", "{{slice .SICap 6 10 10}}", "[0 0 0 0]", tVal, true},
-	{"indexes > cap(s)", "{{slice .SICap 10 11}}", "", tVal, false},
-	{"indexes > cap(s)", "{{slice .SICap 6 10 11}}", "", tVal, false},
-	{"array[:]", "{{slice .AI}}", "[3 4 5]", tVal, true},
-	{"array[1:]", "{{slice .AI 1}}", "[4 5]", tVal, true},
-	{"array[1:2]", "{{slice .AI 1 2}}", "[4]", tVal, true},
-	{"string[:]", "{{slice .S}}", "xyz", tVal, true},
-	{"string[0:1]", "{{slice .S 0 1}}", "x", tVal, true},
-	{"string[1:]", "{{slice .S 1}}", "yz", tVal, true},
-	{"string[1:2]", "{{slice .S 1 2}}", "y", tVal, true},
-	{"out of range", "{{slice .S 1 5}}", "", tVal, false},
-	{"3-index slice of string", "{{slice .S 1 2 2}}", "", tVal, false},
-	{"slice of an interface field", "{{slice .Empty3 0 1}}", "[7]", tVal, true},
-
-	// Len.
-	{"slice", "{{len .SI}}", "3", tVal, true},
-	{"map", "{{len .MSI }}", "3", tVal, true},
-	{"len of int", "{{len 3}}", "", tVal, false},
-	{"len of nothing", "{{len .Empty0}}", "", tVal, false},
-	{"len of an interface field", "{{len .Empty3}}", "2", tVal, true},
-
-	// With.
-	{"with true", "{{with true}}{{.}}{{end}}", "true", tVal, true},
-	{"with false", "{{with false}}{{.}}{{else}}FALSE{{end}}", "FALSE", tVal, true},
-	{"with 1", "{{with 1}}{{.}}{{else}}ZERO{{end}}", "1", tVal, true},
-	{"with 0", "{{with 0}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true},
-	{"with 1.5", "{{with 1.5}}{{.}}{{else}}ZERO{{end}}", "1.5", tVal, true},
-	{"with 0.0", "{{with .FloatZero}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true},
-	{"with 1.5i", "{{with 1.5i}}{{.}}{{else}}ZERO{{end}}", "(0&#43;1.5i)", tVal, true},
-	{"with 0.0i", "{{with .ComplexZero}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true},
-	{"with emptystring", "{{with ``}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
-	{"with string", "{{with `notempty`}}{{.}}{{else}}EMPTY{{end}}", "notempty", tVal, true},
-	{"with emptyslice", "{{with .SIEmpty}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
-	{"with slice", "{{with .SI}}{{.}}{{else}}EMPTY{{end}}", "[3 4 5]", tVal, true},
-	{"with emptymap", "{{with .MSIEmpty}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
-	{"with map", "{{with .MSIone}}{{.}}{{else}}EMPTY{{end}}", "map[one:1]", tVal, true},
-	{"with empty interface, struct field", "{{with .Empty4}}{{.V}}{{end}}", "UinEmpty", tVal, true},
-	{"with $x int", "{{with $x := .I}}{{$x}}{{end}}", "17", tVal, true},
-	{"with $x struct.U.V", "{{with $x := $}}{{$x.U.V}}{{end}}", "v", tVal, true},
-	{"with variable and action", "{{with $x := $}}{{$y := $.U.V}}{{$y}}{{end}}", "v", tVal, true},
-	{"with on typed nil interface value", "{{with .NonEmptyInterfaceTypedNil}}TRUE{{ end }}", "", tVal, true},
-
-	// Range.
-	{"range []int", "{{range .SI}}-{{.}}-{{end}}", "-3--4--5-", tVal, true},
-	{"range empty no else", "{{range .SIEmpty}}-{{.}}-{{end}}", "", tVal, true},
-	{"range []int else", "{{range .SI}}-{{.}}-{{else}}EMPTY{{end}}", "-3--4--5-", tVal, true},
-	{"range empty else", "{{range .SIEmpty}}-{{.}}-{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
-	{"range []int break else", "{{range .SI}}-{{.}}-{{break}}NOTREACHED{{else}}EMPTY{{end}}", "-3-", tVal, true},
-	{"range []int continue else", "{{range .SI}}-{{.}}-{{continue}}NOTREACHED{{else}}EMPTY{{end}}", "-3--4--5-", tVal, true},
-	{"range []bool", "{{range .SB}}-{{.}}-{{end}}", "-true--false-", tVal, true},
-	{"range []int method", "{{range .SI | .MAdd .I}}-{{.}}-{{end}}", "-20--21--22-", tVal, true},
-	{"range map", "{{range .MSI}}-{{.}}-{{end}}", "-1--3--2-", tVal, true},
-	{"range empty map no else", "{{range .MSIEmpty}}-{{.}}-{{end}}", "", tVal, true},
-	{"range map else", "{{range .MSI}}-{{.}}-{{else}}EMPTY{{end}}", "-1--3--2-", tVal, true},
-	{"range empty map else", "{{range .MSIEmpty}}-{{.}}-{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
-	{"range empty interface", "{{range .Empty3}}-{{.}}-{{else}}EMPTY{{end}}", "-7--8-", tVal, true},
-	{"range empty nil", "{{range .Empty0}}-{{.}}-{{end}}", "", tVal, true},
-	{"range $x SI", "{{range $x := .SI}}<{{$x}}>{{end}}", "&lt;3>&lt;4>&lt;5>", tVal, true},
-	{"range $x $y SI", "{{range $x, $y := .SI}}<{{$x}}={{$y}}>{{end}}", "&lt;0=3>&lt;1=4>&lt;2=5>", tVal, true},
-	{"range $x MSIone", "{{range $x := .MSIone}}<{{$x}}>{{end}}", "&lt;1>", tVal, true},
-	{"range $x $y MSIone", "{{range $x, $y := .MSIone}}<{{$x}}={{$y}}>{{end}}", "&lt;one=1>", tVal, true},
-	{"range $x PSI", "{{range $x := .PSI}}<{{$x}}>{{end}}", "&lt;21>&lt;22>&lt;23>", tVal, true},
-	{"declare in range", "{{range $x := .PSI}}<{{$foo:=$x}}{{$x}}>{{end}}", "&lt;21>&lt;22>&lt;23>", tVal, true},
-	{"range count", `{{range $i, $x := count 5}}[{{$i}}]{{$x}}{{end}}`, "[0]a[1]b[2]c[3]d[4]e", tVal, true},
-	{"range nil count", `{{range $i, $x := count 0}}{{else}}empty{{end}}`, "empty", tVal, true},
-
-	// Cute examples.
-	{"or as if true", `{{or .SI "slice is empty"}}`, "[3 4 5]", tVal, true},
-	{"or as if false", `{{or .SIEmpty "slice is empty"}}`, "slice is empty", tVal, true},
-
-	// Error handling.
-	{"error method, error", "{{.MyError true}}", "", tVal, false},
-	{"error method, no error", "{{.MyError false}}", "false", tVal, true},
-
-	// Numbers
-	{"decimal", "{{print 1234}}", "1234", tVal, true},
-	{"decimal _", "{{print 12_34}}", "1234", tVal, true},
-	{"binary", "{{print 0b101}}", "5", tVal, true},
-	{"binary _", "{{print 0b_1_0_1}}", "5", tVal, true},
-	{"BINARY", "{{print 0B101}}", "5", tVal, true},
-	{"octal0", "{{print 0377}}", "255", tVal, true},
-	{"octal", "{{print 0o377}}", "255", tVal, true},
-	{"octal _", "{{print 0o_3_7_7}}", "255", tVal, true},
-	{"OCTAL", "{{print 0O377}}", "255", tVal, true},
-	{"hex", "{{print 0x123}}", "291", tVal, true},
-	{"hex _", "{{print 0x1_23}}", "291", tVal, true},
-	{"HEX", "{{print 0X123ABC}}", "1194684", tVal, true},
-	{"float", "{{print 123.4}}", "123.4", tVal, true},
-	{"float _", "{{print 0_0_1_2_3.4}}", "123.4", tVal, true},
-	{"hex float", "{{print +0x1.ep+2}}", "7.5", tVal, true},
-	{"hex float _", "{{print +0x_1.e_0p+0_2}}", "7.5", tVal, true},
-	{"HEX float", "{{print +0X1.EP+2}}", "7.5", tVal, true},
-	{"print multi", "{{print 1_2_3_4 7.5_00_00_00}}", "1234 7.5", tVal, true},
-	{"print multi2", "{{print 1234 0x0_1.e_0p+02}}", "1234 7.5", tVal, true},
-
-	// Fixed bugs.
-	// Must separate dot and receiver; otherwise args are evaluated with dot set to variable.
-	{"bug0", "{{range .MSIone}}{{if $.Method1 .}}X{{end}}{{end}}", "X", tVal, true},
-	// Do not loop endlessly in indirect for non-empty interfaces.
-	// The bug appears with *interface only; looped forever.
-	{"bug1", "{{.Method0}}", "M0", &iVal, true},
-	// Was taking address of interface field, so method set was empty.
-	{"bug2", "{{$.NonEmptyInterface.Method0}}", "M0", tVal, true},
-	// Struct values were not legal in with - mere oversight.
-	{"bug3", "{{with $}}{{.Method0}}{{end}}", "M0", tVal, true},
-	// Nil interface values in if.
-	{"bug4", "{{if .Empty0}}non-nil{{else}}nil{{end}}", "nil", tVal, true},
-	// Stringer.
-	{"bug5", "{{.Str}}", "foozle", tVal, true},
-	{"bug5a", "{{.Err}}", "erroozle", tVal, true},
-	// Args need to be indirected and dereferenced sometimes.
-	{"bug6a", "{{vfunc .V0 .V1}}", "vfunc", tVal, true},
-	{"bug6b", "{{vfunc .V0 .V0}}", "vfunc", tVal, true},
-	{"bug6c", "{{vfunc .V1 .V0}}", "vfunc", tVal, true},
-	{"bug6d", "{{vfunc .V1 .V1}}", "vfunc", tVal, true},
-	// Legal parse but illegal execution: non-function should have no arguments.
-	{"bug7a", "{{3 2}}", "", tVal, false},
-	{"bug7b", "{{$x := 1}}{{$x 2}}", "", tVal, false},
-	{"bug7c", "{{$x := 1}}{{3 | $x}}", "", tVal, false},
-	// Pipelined arg was not being type-checked.
-	{"bug8a", "{{3|oneArg}}", "", tVal, false},
-	{"bug8b", "{{4|dddArg 3}}", "", tVal, false},
-	// A bug was introduced that broke map lookups for lower-case names.
-	{"bug9", "{{.cause}}", "neglect", map[string]string{"cause": "neglect"}, true},
-	// Field chain starting with function did not work.
-	{"bug10", "{{mapOfThree.three}}-{{(mapOfThree).three}}", "3-3", 0, true},
-	// Dereferencing nil pointer while evaluating function arguments should not panic. Issue 7333.
-	{"bug11", "{{valueString .PS}}", "", T{}, false},
-	// 0xef gave constant type float64. Issue 8622.
-	{"bug12xe", "{{printf `%T` 0xef}}", "int", T{}, true},
-	{"bug12xE", "{{printf `%T` 0xEE}}", "int", T{}, true},
-	{"bug12Xe", "{{printf `%T` 0Xef}}", "int", T{}, true},
-	{"bug12XE", "{{printf `%T` 0XEE}}", "int", T{}, true},
-	// Chained nodes did not work as arguments. Issue 8473.
-	{"bug13", "{{print (.Copy).I}}", "17", tVal, true},
-	// Didn't protect against nil or literal values in field chains.
-	{"bug14a", "{{(nil).True}}", "", tVal, false},
-	{"bug14b", "{{$x := nil}}{{$x.anything}}", "", tVal, false},
-	{"bug14c", `{{$x := (1.0)}}{{$y := ("hello")}}{{$x.anything}}{{$y.true}}`, "", tVal, false},
-	// Didn't call validateType on function results. Issue 10800.
-	{"bug15", "{{valueString returnInt}}", "", tVal, false},
-	// Variadic function corner cases. Issue 10946.
-	{"bug16a", "{{true|printf}}", "", tVal, false},
-	{"bug16b", "{{1|printf}}", "", tVal, false},
-	{"bug16c", "{{1.1|printf}}", "", tVal, false},
-	{"bug16d", "{{'x'|printf}}", "", tVal, false},
-	{"bug16e", "{{0i|printf}}", "", tVal, false},
-	{"bug16f", "{{true|twoArgs \"xxx\"}}", "", tVal, false},
-	{"bug16g", "{{\"aaa\" |twoArgs \"bbb\"}}", "twoArgs=bbbaaa", tVal, true},
-	{"bug16h", "{{1|oneArg}}", "", tVal, false},
-	{"bug16i", "{{\"aaa\"|oneArg}}", "oneArg=aaa", tVal, true},
-	{"bug16j", "{{1+2i|printf \"%v\"}}", "(1&#43;2i)", tVal, true},
-	{"bug16k", "{{\"aaa\"|printf }}", "aaa", tVal, true},
-	{"bug17a", "{{.NonEmptyInterface.X}}", "x", tVal, true},
-	{"bug17b", "-{{.NonEmptyInterface.Method1 1234}}-", "-1234-", tVal, true},
-	{"bug17c", "{{len .NonEmptyInterfacePtS}}", "2", tVal, true},
-	{"bug17d", "{{index .NonEmptyInterfacePtS 0}}", "a", tVal, true},
-	{"bug17e", "{{range .NonEmptyInterfacePtS}}-{{.}}-{{end}}", "-a--b-", tVal, true},
-
-	// More variadic function corner cases. Some runes would get evaluated
-	// as constant floats instead of ints. Issue 34483.
-	{"bug18a", "{{eq . '.'}}", "true", '.', true},
-	{"bug18b", "{{eq . 'e'}}", "true", 'e', true},
-	{"bug18c", "{{eq . 'P'}}", "true", 'P', true},
-}
-
-func zeroArgs() string {
-	return "zeroArgs"
-}
-
-func oneArg(a string) string {
-	return "oneArg=" + a
-}
-
-func twoArgs(a, b string) string {
-	return "twoArgs=" + a + b
-}
-
-func dddArg(a int, b ...string) string {
-	return fmt.Sprintln(a, b)
-}
-
-// count returns a channel that will deliver n sequential 1-letter strings starting at "a"
-func count(n int) chan string {
-	if n == 0 {
-		return nil
-	}
-	c := make(chan string)
-	go func() {
-		for i := 0; i < n; i++ {
-			c <- "abcdefghijklmnop"[i : i+1]
-		}
-		close(c)
-	}()
-	return c
-}
-
-// vfunc takes a *V and a V
-func vfunc(V, *V) string {
-	return "vfunc"
-}
-
-// valueString takes a string, not a pointer.
-func valueString(v string) string {
-	return "value is ignored"
-}
-
-// returnInt returns an int
-func returnInt() int {
-	return 7
-}
-
-func add(args ...int) int {
-	sum := 0
-	for _, x := range args {
-		sum += x
-	}
-	return sum
-}
-
-func echo(arg interface{}) interface{} {
-	return arg
-}
-
-func makemap(arg ...string) map[string]string {
-	if len(arg)%2 != 0 {
-		panic("bad makemap")
-	}
-	m := make(map[string]string)
-	for i := 0; i < len(arg); i += 2 {
-		m[arg[i]] = arg[i+1]
-	}
-	return m
-}
-
-func stringer(s fmt.Stringer) string {
-	return s.String()
-}
-
-func mapOfThree() interface{} {
-	return map[string]int{"three": 3}
-}
-
-func testExecute(execTests []execTest, template *Template, t *testing.T) {
-	b := new(bytes.Buffer)
-	funcs := FuncMap{
-		"add":         add,
-		"count":       count,
-		"dddArg":      dddArg,
-		"echo":        echo,
-		"makemap":     makemap,
-		"mapOfThree":  mapOfThree,
-		"oneArg":      oneArg,
-		"returnInt":   returnInt,
-		"stringer":    stringer,
-		"twoArgs":     twoArgs,
-		"typeOf":      typeOf,
-		"valueString": valueString,
-		"vfunc":       vfunc,
-		"zeroArgs":    zeroArgs,
-	}
-	for _, test := range execTests {
-		var tmpl *Template
-		var err error
-		if template == nil {
-			tmpl, err = New(test.name).Funcs(funcs).Parse(test.input)
-		} else {
-			tmpl, err = template.Clone()
-			if err != nil {
-				t.Errorf("%s: clone error: %s", test.name, err)
-				continue
-			}
-			tmpl, err = tmpl.New(test.name).Funcs(funcs).Parse(test.input)
-		}
-		if err != nil {
-			t.Errorf("%s: parse error: %s", test.name, err)
-			continue
-		}
-		b.Reset()
-		err = tmpl.Execute(b, test.data)
-		switch {
-		case !test.ok && err == nil:
-			t.Errorf("%s: expected error; got none", test.name)
-			continue
-		case test.ok && err != nil:
-			t.Errorf("%s: unexpected execute error: %s", test.name, err)
-			continue
-		case !test.ok && err != nil:
-			// expected error, got one
-			if *debug {
-				fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err)
-			}
-		}
-		result := b.String()
-		if result != test.output {
-			t.Errorf("%s: expected\n\t%q\ngot\n\t%q", test.name, test.output, result)
-		}
-	}
-}
-
-func TestExecute(t *testing.T) {
-	testExecute(execTests, nil, t)
-}
-
-var delimPairs = []string{
-	"", "", // default
-	"{{", "}}", // same as default
-	"|", "|", // same
-	"(日)", "(本)", // peculiar
-}
-
-func TestDelims(t *testing.T) {
-	const hello = "Hello, world"
-	var value = struct{ Str string }{hello}
-	for i := 0; i < len(delimPairs); i += 2 {
-		text := ".Str"
-		left := delimPairs[i+0]
-		trueLeft := left
-		right := delimPairs[i+1]
-		trueRight := right
-		if left == "" { // default case
-			trueLeft = "{{"
-		}
-		if right == "" { // default case
-			trueRight = "}}"
-		}
-		text = trueLeft + text + trueRight
-		// Now add a comment
-		text += trueLeft + "/*comment*/" + trueRight
-		// Now add  an action containing a string.
-		text += trueLeft + `"` + trueLeft + `"` + trueRight
-		// At this point text looks like `{{.Str}}{{/*comment*/}}{{"{{"}}`.
-		tmpl, err := New("delims").Delims(left, right).Parse(text)
-		if err != nil {
-			t.Fatalf("delim %q text %q parse err %s", left, text, err)
-		}
-		var b = new(bytes.Buffer)
-		err = tmpl.Execute(b, value)
-		if err != nil {
-			t.Fatalf("delim %q exec err %s", left, err)
-		}
-		if b.String() != hello+trueLeft {
-			t.Errorf("expected %q got %q", hello+trueLeft, b.String())
-		}
-	}
-}
-
-// Check that an error from a method flows back to the top.
-func TestExecuteError(t *testing.T) {
-	b := new(bytes.Buffer)
-	tmpl := New("error")
-	_, err := tmpl.Parse("{{.MyError true}}")
-	if err != nil {
-		t.Fatalf("parse error: %s", err)
-	}
-	err = tmpl.Execute(b, tVal)
-	if err == nil {
-		t.Errorf("expected error; got none")
-	} else if !strings.Contains(err.Error(), myError.Error()) {
-		if *debug {
-			fmt.Printf("test execute error: %s\n", err)
-		}
-		t.Errorf("expected myError; got %s", err)
-	}
-}
-
-const execErrorText = `line 1
-line 2
-line 3
-{{template "one" .}}
-{{define "one"}}{{template "two" .}}{{end}}
-{{define "two"}}{{template "three" .}}{{end}}
-{{define "three"}}{{index "hi" $}}{{end}}`
-
-// Check that an error from a nested template contains all the relevant information.
-func TestExecError(t *testing.T) {
-	tmpl, err := New("top").Parse(execErrorText)
-	if err != nil {
-		t.Fatal("parse error:", err)
-	}
-	var b bytes.Buffer
-	err = tmpl.Execute(&b, 5) // 5 is out of range indexing "hi"
-	if err == nil {
-		t.Fatal("expected error")
-	}
-	const want = `template: top:7:20: executing "three" at <index "hi" $>: error calling index: index out of range: 5`
-	got := err.Error()
-	if got != want {
-		t.Errorf("expected\n%q\ngot\n%q", want, got)
-	}
-}
-
-func TestJSEscaping(t *testing.T) {
-	testCases := []struct {
-		in, exp string
-	}{
-		{`a`, `a`},
-		{`'foo`, `\'foo`},
-		{`Go "jump" \`, `Go \"jump\" \\`},
-		{`Yukihiro says "今日は世界"`, `Yukihiro says \"今日は世界\"`},
-		{"unprintable \uFDFF", `unprintable \uFDFF`},
-		{`<html>`, `\u003Chtml\u003E`},
-		{`no = in attributes`, `no \u003D in attributes`},
-		{`&#x27; does not become HTML entity`, `\u0026#x27; does not become HTML entity`},
-	}
-	for _, tc := range testCases {
-		s := JSEscapeString(tc.in)
-		if s != tc.exp {
-			t.Errorf("JS escaping [%s] got [%s] want [%s]", tc.in, s, tc.exp)
-		}
-	}
-}
-
-// A nice example: walk a binary tree.
-
-type Tree struct {
-	Val         int
-	Left, Right *Tree
-}
-
-// Use different delimiters to test Set.Delims.
-// Also test the trimming of leading and trailing spaces.
-const treeTemplate = `
-	(- define "tree" -)
-	[
-		(- .Val -)
-		(- with .Left -)
-			(template "tree" . -)
-		(- end -)
-		(- with .Right -)
-			(- template "tree" . -)
-		(- end -)
-	]
-	(- end -)
-`
-
-func TestTree(t *testing.T) {
-	var tree = &Tree{
-		1,
-		&Tree{
-			2, &Tree{
-				3,
-				&Tree{
-					4, nil, nil,
-				},
-				nil,
-			},
-			&Tree{
-				5,
-				&Tree{
-					6, nil, nil,
-				},
-				nil,
-			},
-		},
-		&Tree{
-			7,
-			&Tree{
-				8,
-				&Tree{
-					9, nil, nil,
-				},
-				nil,
-			},
-			&Tree{
-				10,
-				&Tree{
-					11, nil, nil,
-				},
-				nil,
-			},
-		},
-	}
-	tmpl, err := New("root").Delims("(", ")").Parse(treeTemplate)
-	if err != nil {
-		t.Fatal("parse error:", err)
-	}
-	var b bytes.Buffer
-	const expect = "[1[2[3[4]][5[6]]][7[8[9]][10[11]]]]"
-	// First by looking up the template.
-	err = tmpl.Lookup("tree").Execute(&b, tree)
-	if err != nil {
-		t.Fatal("exec error:", err)
-	}
-	result := b.String()
-	if result != expect {
-		t.Errorf("expected %q got %q", expect, result)
-	}
-	// Then direct to execution.
-	b.Reset()
-	err = tmpl.ExecuteTemplate(&b, "tree", tree)
-	if err != nil {
-		t.Fatal("exec error:", err)
-	}
-	result = b.String()
-	if result != expect {
-		t.Errorf("expected %q got %q", expect, result)
-	}
-}
-
-func TestExecuteOnNewTemplate(t *testing.T) {
-	// This is issue 3872.
-	New("Name").Templates()
-	// This is issue 11379.
-	// new(Template).Templates() // TODO: crashes
-	// new(Template).Parse("") // TODO: crashes
-	// new(Template).New("abc").Parse("") // TODO: crashes
-	// new(Template).Execute(nil, nil)                // TODO: crashes; returns an error (but does not crash)
-	// new(Template).ExecuteTemplate(nil, "XXX", nil) // TODO: crashes; returns an error (but does not crash)
-}
-
-const testTemplates = `{{define "one"}}one{{end}}{{define "two"}}two{{end}}`
-
-func TestMessageForExecuteEmpty(t *testing.T) {
-	// Test a truly empty template.
-	tmpl := New("empty")
-	var b bytes.Buffer
-	err := tmpl.Execute(&b, 0)
-	if err == nil {
-		t.Fatal("expected initial error")
-	}
-	got := err.Error()
-	want := `template: "empty" is an incomplete or empty template` // NOTE: text/template has extra "empty: " in message
-	if got != want {
-		t.Errorf("expected error %s got %s", want, got)
-	}
-
-	// Add a non-empty template to check that the error is helpful.
-	tmpl = New("empty")
-	tests, err := New("").Parse(testTemplates)
-	if err != nil {
-		t.Fatal(err)
-	}
-	tmpl.AddParseTree("secondary", tests.Tree)
-	err = tmpl.Execute(&b, 0)
-	if err == nil {
-		t.Fatal("expected second error")
-	}
-	got = err.Error()
-	if got != want {
-		t.Errorf("expected error %s got %s", want, got)
-	}
-	// Make sure we can execute the secondary.
-	err = tmpl.ExecuteTemplate(&b, "secondary", 0)
-	if err != nil {
-		t.Fatal(err)
-	}
-}
-
-func TestFinalForPrintf(t *testing.T) {
-	tmpl, err := New("").Parse(`{{"x" | printf}}`)
-	if err != nil {
-		t.Fatal(err)
-	}
-	var b bytes.Buffer
-	err = tmpl.Execute(&b, 0)
-	if err != nil {
-		t.Fatal(err)
-	}
-}
-
-type cmpTest struct {
-	expr  string
-	truth string
-	ok    bool
-}
-
-var cmpTests = []cmpTest{
-	{"eq true true", "true", true},
-	{"eq true false", "false", true},
-	{"eq 1+2i 1+2i", "true", true},
-	{"eq 1+2i 1+3i", "false", true},
-	{"eq 1.5 1.5", "true", true},
-	{"eq 1.5 2.5", "false", true},
-	{"eq 1 1", "true", true},
-	{"eq 1 2", "false", true},
-	{"eq `xy` `xy`", "true", true},
-	{"eq `xy` `xyz`", "false", true},
-	{"eq .Uthree .Uthree", "true", true},
-	{"eq .Uthree .Ufour", "false", true},
-	{"eq 3 4 5 6 3", "true", true},
-	{"eq 3 4 5 6 7", "false", true},
-	{"ne true true", "false", true},
-	{"ne true false", "true", true},
-	{"ne 1+2i 1+2i", "false", true},
-	{"ne 1+2i 1+3i", "true", true},
-	{"ne 1.5 1.5", "false", true},
-	{"ne 1.5 2.5", "true", true},
-	{"ne 1 1", "false", true},
-	{"ne 1 2", "true", true},
-	{"ne `xy` `xy`", "false", true},
-	{"ne `xy` `xyz`", "true", true},
-	{"ne .Uthree .Uthree", "false", true},
-	{"ne .Uthree .Ufour", "true", true},
-	{"lt 1.5 1.5", "false", true},
-	{"lt 1.5 2.5", "true", true},
-	{"lt 1 1", "false", true},
-	{"lt 1 2", "true", true},
-	{"lt `xy` `xy`", "false", true},
-	{"lt `xy` `xyz`", "true", true},
-	{"lt .Uthree .Uthree", "false", true},
-	{"lt .Uthree .Ufour", "true", true},
-	{"le 1.5 1.5", "true", true},
-	{"le 1.5 2.5", "true", true},
-	{"le 2.5 1.5", "false", true},
-	{"le 1 1", "true", true},
-	{"le 1 2", "true", true},
-	{"le 2 1", "false", true},
-	{"le `xy` `xy`", "true", true},
-	{"le `xy` `xyz`", "true", true},
-	{"le `xyz` `xy`", "false", true},
-	{"le .Uthree .Uthree", "true", true},
-	{"le .Uthree .Ufour", "true", true},
-	{"le .Ufour .Uthree", "false", true},
-	{"gt 1.5 1.5", "false", true},
-	{"gt 1.5 2.5", "false", true},
-	{"gt 1 1", "false", true},
-	{"gt 2 1", "true", true},
-	{"gt 1 2", "false", true},
-	{"gt `xy` `xy`", "false", true},
-	{"gt `xy` `xyz`", "false", true},
-	{"gt .Uthree .Uthree", "false", true},
-	{"gt .Uthree .Ufour", "false", true},
-	{"gt .Ufour .Uthree", "true", true},
-	{"ge 1.5 1.5", "true", true},
-	{"ge 1.5 2.5", "false", true},
-	{"ge 2.5 1.5", "true", true},
-	{"ge 1 1", "true", true},
-	{"ge 1 2", "false", true},
-	{"ge 2 1", "true", true},
-	{"ge `xy` `xy`", "true", true},
-	{"ge `xy` `xyz`", "false", true},
-	{"ge `xyz` `xy`", "true", true},
-	{"ge .Uthree .Uthree", "true", true},
-	{"ge .Uthree .Ufour", "false", true},
-	{"ge .Ufour .Uthree", "true", true},
-	// Mixing signed and unsigned integers.
-	{"eq .Uthree .Three", "true", true},
-	{"eq .Three .Uthree", "true", true},
-	{"le .Uthree .Three", "true", true},
-	{"le .Three .Uthree", "true", true},
-	{"ge .Uthree .Three", "true", true},
-	{"ge .Three .Uthree", "true", true},
-	{"lt .Uthree .Three", "false", true},
-	{"lt .Three .Uthree", "false", true},
-	{"gt .Uthree .Three", "false", true},
-	{"gt .Three .Uthree", "false", true},
-	{"eq .Ufour .Three", "false", true},
-	{"lt .Ufour .Three", "false", true},
-	{"gt .Ufour .Three", "true", true},
-	{"eq .NegOne .Uthree", "false", true},
-	{"eq .Uthree .NegOne", "false", true},
-	{"ne .NegOne .Uthree", "true", true},
-	{"ne .Uthree .NegOne", "true", true},
-	{"lt .NegOne .Uthree", "true", true},
-	{"lt .Uthree .NegOne", "false", true},
-	{"le .NegOne .Uthree", "true", true},
-	{"le .Uthree .NegOne", "false", true},
-	{"gt .NegOne .Uthree", "false", true},
-	{"gt .Uthree .NegOne", "true", true},
-	{"ge .NegOne .Uthree", "false", true},
-	{"ge .Uthree .NegOne", "true", true},
-	{"eq (index `x` 0) 'x'", "true", true}, // The example that triggered this rule.
-	{"eq (index `x` 0) 'y'", "false", true},
-	{"eq .V1 .V2", "true", true},
-	{"eq .Ptr .Ptr", "true", true},
-	{"eq .Ptr .NilPtr", "false", true},
-	{"eq .NilPtr .NilPtr", "true", true},
-	{"eq .Iface1 .Iface1", "true", true},
-	{"eq .Iface1 .Iface2", "false", true},
-	{"eq .Iface2 .Iface2", "true", true},
-	// Errors
-	{"eq `xy` 1", "", false},       // Different types.
-	{"eq 2 2.0", "", false},        // Different types.
-	{"lt true true", "", false},    // Unordered types.
-	{"lt 1+0i 1+0i", "", false},    // Unordered types.
-	{"eq .Ptr 1", "", false},       // Incompatible types.
-	{"eq .Ptr .NegOne", "", false}, // Incompatible types.
-	{"eq .Map .Map", "", false},    // Uncomparable types.
-	{"eq .Map .V1", "", false},     // Uncomparable types.
-}
-
-func TestComparison(t *testing.T) {
-	b := new(bytes.Buffer)
-	var cmpStruct = struct {
-		Uthree, Ufour  uint
-		NegOne, Three  int
-		Ptr, NilPtr    *int
-		Map            map[int]int
-		V1, V2         V
-		Iface1, Iface2 fmt.Stringer
-	}{
-		Uthree: 3,
-		Ufour:  4,
-		NegOne: -1,
-		Three:  3,
-		Ptr:    new(int),
-		Iface1: b,
-	}
-	for _, test := range cmpTests {
-		text := fmt.Sprintf("{{if %s}}true{{else}}false{{end}}", test.expr)
-		tmpl, err := New("empty").Parse(text)
-		if err != nil {
-			t.Fatalf("%q: %s", test.expr, err)
-		}
-		b.Reset()
-		err = tmpl.Execute(b, &cmpStruct)
-		if test.ok && err != nil {
-			t.Errorf("%s errored incorrectly: %s", test.expr, err)
-			continue
-		}
-		if !test.ok && err == nil {
-			t.Errorf("%s did not error", test.expr)
-			continue
-		}
-		if b.String() != test.truth {
-			t.Errorf("%s: want %s; got %s", test.expr, test.truth, b.String())
-		}
-	}
-}
-
-func TestMissingMapKey(t *testing.T) {
-	data := map[string]int{
-		"x": 99,
-	}
-	tmpl, err := New("t1").Parse("{{.x}} {{.y}}")
-	if err != nil {
-		t.Fatal(err)
-	}
-	var b bytes.Buffer
-	// By default, just get "<no value>" // NOTE: not in html/template, get empty string
-	err = tmpl.Execute(&b, data)
-	if err != nil {
-		t.Fatal(err)
-	}
-	want := "99 "
-	got := b.String()
-	if got != want {
-		t.Errorf("got %q; expected %q", got, want)
-	}
-	// Same if we set the option explicitly to the default.
-	tmpl.Option("missingkey=default")
-	b.Reset()
-	err = tmpl.Execute(&b, data)
-	if err != nil {
-		t.Fatal("default:", err)
-	}
-	got = b.String()
-	if got != want {
-		t.Errorf("got %q; expected %q", got, want)
-	}
-	// Next we ask for a zero value
-	tmpl.Option("missingkey=zero")
-	b.Reset()
-	err = tmpl.Execute(&b, data)
-	if err != nil {
-		t.Fatal("zero:", err)
-	}
-	want = "99 0"
-	got = b.String()
-	if got != want {
-		t.Errorf("got %q; expected %q", got, want)
-	}
-	// Now we ask for an error.
-	tmpl.Option("missingkey=error")
-	err = tmpl.Execute(&b, data)
-	if err == nil {
-		t.Errorf("expected error; got none")
-	}
-	// same Option, but now a nil interface: ask for an error
-	err = tmpl.Execute(&b, nil)
-	t.Log(err)
-	if err == nil {
-		t.Errorf("expected error for nil-interface; got none")
-	}
-}
-
-// Test that the error message for multiline unterminated string
-// refers to the line number of the opening quote.
-func TestUnterminatedStringError(t *testing.T) {
-	_, err := New("X").Parse("hello\n\n{{`unterminated\n\n\n\n}}\n some more\n\n")
-	if err == nil {
-		t.Fatal("expected error")
-	}
-	str := err.Error()
-	if !strings.Contains(str, "X:3: unterminated raw quoted string") {
-		t.Fatalf("unexpected error: %s", str)
-	}
-}
-
-const alwaysErrorText = "always be failing"
-
-var alwaysError = errors.New(alwaysErrorText)
-
-type ErrorWriter int
-
-func (e ErrorWriter) Write(p []byte) (int, error) {
-	return 0, alwaysError
-}
-
-func TestExecuteGivesExecError(t *testing.T) {
-	// First, a non-execution error shouldn't be an ExecError.
-	tmpl, err := New("X").Parse("hello")
-	if err != nil {
-		t.Fatal(err)
-	}
-	err = tmpl.Execute(ErrorWriter(0), 0)
-	if err == nil {
-		t.Fatal("expected error; got none")
-	}
-	if err.Error() != alwaysErrorText {
-		t.Errorf("expected %q error; got %q", alwaysErrorText, err)
-	}
-	// This one should be an ExecError.
-	tmpl, err = New("X").Parse("hello, {{.X.Y}}")
-	if err != nil {
-		t.Fatal(err)
-	}
-	err = tmpl.Execute(ioutil.Discard, 0)
-	if err == nil {
-		t.Fatal("expected error; got none")
-	}
-	eerr, ok := err.(template.ExecError)
-	if !ok {
-		t.Fatalf("did not expect ExecError %s", eerr)
-	}
-	expect := "field X in type int"
-	if !strings.Contains(err.Error(), expect) {
-		t.Errorf("expected %q; got %q", expect, err)
-	}
-}
-
-func funcNameTestFunc() int {
-	return 0
-}
-
-func TestGoodFuncNames(t *testing.T) {
-	names := []string{
-		"_",
-		"a",
-		"a1",
-		"a1",
-		"Ӵ",
-	}
-	for _, name := range names {
-		tmpl := New("X").Funcs(
-			FuncMap{
-				name: funcNameTestFunc,
-			},
-		)
-		if tmpl == nil {
-			t.Fatalf("nil result for %q", name)
-		}
-	}
-}
-
-func TestBadFuncNames(t *testing.T) {
-	names := []string{
-		"",
-		"2",
-		"a-b",
-	}
-	for _, name := range names {
-		testBadFuncName(name, t)
-	}
-}
-
-func testBadFuncName(name string, t *testing.T) {
-	t.Helper()
-	defer func() {
-		recover()
-	}()
-	New("X").Funcs(
-		FuncMap{
-			name: funcNameTestFunc,
-		},
-	)
-	// If we get here, the name did not cause a panic, which is how Funcs
-	// reports an error.
-	t.Errorf("%q succeeded incorrectly as function name", name)
-}
-
-func TestBlock(t *testing.T) {
-	const (
-		input   = `a({{block "inner" .}}bar({{.}})baz{{end}})b`
-		want    = `a(bar(hello)baz)b`
-		overlay = `{{define "inner"}}foo({{.}})bar{{end}}`
-		want2   = `a(foo(goodbye)bar)b`
-	)
-	tmpl, err := New("outer").Parse(input)
-	if err != nil {
-		t.Fatal(err)
-	}
-	tmpl2, err := Must(tmpl.Clone()).Parse(overlay)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	var buf bytes.Buffer
-	if err := tmpl.Execute(&buf, "hello"); err != nil {
-		t.Fatal(err)
-	}
-	if got := buf.String(); got != want {
-		t.Errorf("got %q, want %q", got, want)
-	}
-
-	buf.Reset()
-	if err := tmpl2.Execute(&buf, "goodbye"); err != nil {
-		t.Fatal(err)
-	}
-	if got := buf.String(); got != want2 {
-		t.Errorf("got %q, want %q", got, want2)
-	}
-}
-
-func TestEvalFieldErrors(t *testing.T) {
-	tests := []struct {
-		name, src string
-		value     interface{}
-		want      string
-	}{
-		{
-			// Check that calling an invalid field on nil pointer
-			// prints a field error instead of a distracting nil
-			// pointer error. https://golang.org/issue/15125
-			"MissingFieldOnNil",
-			"{{.MissingField}}",
-			(*T)(nil),
-			"can't evaluate field MissingField in type *template.T",
-		},
-		{
-			"MissingFieldOnNonNil",
-			"{{.MissingField}}",
-			&T{},
-			"can't evaluate field MissingField in type *template.T",
-		},
-		{
-			"ExistingFieldOnNil",
-			"{{.X}}",
-			(*T)(nil),
-			"nil pointer evaluating *template.T.X",
-		},
-		{
-			"MissingKeyOnNilMap",
-			"{{.MissingKey}}",
-			(*map[string]string)(nil),
-			"nil pointer evaluating *map[string]string.MissingKey",
-		},
-		{
-			"MissingKeyOnNilMapPtr",
-			"{{.MissingKey}}",
-			(*map[string]string)(nil),
-			"nil pointer evaluating *map[string]string.MissingKey",
-		},
-		{
-			"MissingKeyOnMapPtrToNil",
-			"{{.MissingKey}}",
-			&map[string]string{},
-			"<nil>",
-		},
-	}
-	for _, tc := range tests {
-		t.Run(tc.name, func(t *testing.T) {
-			tmpl := Must(New("tmpl").Parse(tc.src))
-			err := tmpl.Execute(ioutil.Discard, tc.value)
-			got := "<nil>"
-			if err != nil {
-				got = err.Error()
-			}
-			if !strings.HasSuffix(got, tc.want) {
-				t.Fatalf("got error %q, want %q", got, tc.want)
-			}
-		})
-	}
-}
-
-func TestMaxExecDepth(t *testing.T) {
-	if testing.Short() {
-		t.Skip("skipping in -short mode")
-	}
-	tmpl := Must(New("tmpl").Parse(`{{template "tmpl" .}}`))
-	err := tmpl.Execute(ioutil.Discard, nil)
-	got := "<nil>"
-	if err != nil {
-		got = err.Error()
-	}
-	const want = "exceeded maximum template depth"
-	if !strings.Contains(got, want) {
-		t.Errorf("got error %q; want %q", got, want)
-	}
-}
-
-func TestAddrOfIndex(t *testing.T) {
-	// golang.org/issue/14916.
-	// Before index worked on reflect.Values, the .String could not be
-	// found on the (incorrectly unaddressable) V value,
-	// in contrast to range, which worked fine.
-	// Also testing that passing a reflect.Value to tmpl.Execute works.
-	texts := []string{
-		`{{range .}}{{.String}}{{end}}`,
-		`{{with index . 0}}{{.String}}{{end}}`,
-	}
-	for _, text := range texts {
-		tmpl := Must(New("tmpl").Parse(text))
-		var buf bytes.Buffer
-		err := tmpl.Execute(&buf, reflect.ValueOf([]V{{1}}))
-		if err != nil {
-			t.Fatalf("%s: Execute: %v", text, err)
-		}
-		if buf.String() != "&lt;1&gt;" {
-			t.Fatalf("%s: template output = %q, want %q", text, &buf, "&lt;1&gt;")
-		}
-	}
-}
-
-func TestInterfaceValues(t *testing.T) {
-	// golang.org/issue/17714.
-	// Before index worked on reflect.Values, interface values
-	// were always implicitly promoted to the underlying value,
-	// except that nil interfaces were promoted to the zero reflect.Value.
-	// Eliminating a round trip to interface{} and back to reflect.Value
-	// eliminated this promotion, breaking these cases.
-	tests := []struct {
-		text string
-		out  string
-	}{
-		{`{{index .Nil 1}}`, "ERROR: index of untyped nil"},
-		{`{{index .Slice 2}}`, "2"},
-		{`{{index .Slice .Two}}`, "2"},
-		{`{{call .Nil 1}}`, "ERROR: call of nil"},
-		{`{{call .PlusOne 1}}`, "2"},
-		{`{{call .PlusOne .One}}`, "2"},
-		{`{{and (index .Slice 0) true}}`, "0"},
-		{`{{and .Zero true}}`, "0"},
-		{`{{and (index .Slice 1) false}}`, "false"},
-		{`{{and .One false}}`, "false"},
-		{`{{or (index .Slice 0) false}}`, "false"},
-		{`{{or .Zero false}}`, "false"},
-		{`{{or (index .Slice 1) true}}`, "1"},
-		{`{{or .One true}}`, "1"},
-		{`{{not (index .Slice 0)}}`, "true"},
-		{`{{not .Zero}}`, "true"},
-		{`{{not (index .Slice 1)}}`, "false"},
-		{`{{not .One}}`, "false"},
-		{`{{eq (index .Slice 0) .Zero}}`, "true"},
-		{`{{eq (index .Slice 1) .One}}`, "true"},
-		{`{{ne (index .Slice 0) .Zero}}`, "false"},
-		{`{{ne (index .Slice 1) .One}}`, "false"},
-		{`{{ge (index .Slice 0) .One}}`, "false"},
-		{`{{ge (index .Slice 1) .Zero}}`, "true"},
-		{`{{gt (index .Slice 0) .One}}`, "false"},
-		{`{{gt (index .Slice 1) .Zero}}`, "true"},
-		{`{{le (index .Slice 0) .One}}`, "true"},
-		{`{{le (index .Slice 1) .Zero}}`, "false"},
-		{`{{lt (index .Slice 0) .One}}`, "true"},
-		{`{{lt (index .Slice 1) .Zero}}`, "false"},
-	}
-
-	for _, tt := range tests {
-		tmpl := Must(New("tmpl").Parse(tt.text))
-		var buf bytes.Buffer
-		err := tmpl.Execute(&buf, map[string]interface{}{
-			"PlusOne": func(n int) int {
-				return n + 1
-			},
-			"Slice": []int{0, 1, 2, 3},
-			"One":   1,
-			"Two":   2,
-			"Nil":   nil,
-			"Zero":  0,
-		})
-		if strings.HasPrefix(tt.out, "ERROR:") {
-			e := strings.TrimSpace(strings.TrimPrefix(tt.out, "ERROR:"))
-			if err == nil || !strings.Contains(err.Error(), e) {
-				t.Errorf("%s: Execute: %v, want error %q", tt.text, err, e)
-			}
-			continue
-		}
-		if err != nil {
-			t.Errorf("%s: Execute: %v", tt.text, err)
-			continue
-		}
-		if buf.String() != tt.out {
-			t.Errorf("%s: template output = %q, want %q", tt.text, &buf, tt.out)
-		}
-	}
-}
-
-// Check that panics during calls are recovered and returned as errors.
-func TestExecutePanicDuringCall(t *testing.T) {
-	funcs := map[string]interface{}{
-		"doPanic": func() string {
-			panic("custom panic string")
-		},
-	}
-	tests := []struct {
-		name    string
-		input   string
-		data    interface{}
-		wantErr string
-	}{
-		{
-			"direct func call panics",
-			"{{doPanic}}", (*T)(nil),
-			`template: t:1:2: executing "t" at <doPanic>: error calling doPanic: custom panic string`,
-		},
-		{
-			"indirect func call panics",
-			"{{call doPanic}}", (*T)(nil),
-			`template: t:1:7: executing "t" at <doPanic>: error calling doPanic: custom panic string`,
-		},
-		{
-			"direct method call panics",
-			"{{.GetU}}", (*T)(nil),
-			`template: t:1:2: executing "t" at <.GetU>: error calling GetU: runtime error: invalid memory address or nil pointer dereference`,
-		},
-		{
-			"indirect method call panics",
-			"{{call .GetU}}", (*T)(nil),
-			`template: t:1:7: executing "t" at <.GetU>: error calling GetU: runtime error: invalid memory address or nil pointer dereference`,
-		},
-		{
-			"func field call panics",
-			"{{call .PanicFunc}}", tVal,
-			`template: t:1:2: executing "t" at <call .PanicFunc>: error calling call: test panic`,
-		},
-		{
-			"method call on nil interface",
-			"{{.NonEmptyInterfaceNil.Method0}}", tVal,
-			`template: t:1:23: executing "t" at <.NonEmptyInterfaceNil.Method0>: nil pointer evaluating template.I.Method0`,
-		},
-	}
-	for _, tc := range tests {
-		b := new(bytes.Buffer)
-		tmpl, err := New("t").Funcs(funcs).Parse(tc.input)
-		if err != nil {
-			t.Fatalf("parse error: %s", err)
-		}
-		err = tmpl.Execute(b, tc.data)
-		if err == nil {
-			t.Errorf("%s: expected error; got none", tc.name)
-		} else if !strings.Contains(err.Error(), tc.wantErr) {
-			if *debug {
-				fmt.Printf("%s: test execute error: %s\n", tc.name, err)
-			}
-			t.Errorf("%s: expected error:\n%s\ngot:\n%s", tc.name, tc.wantErr, err)
-		}
-	}
-}
-
-// Issue 31810. Check that a parenthesized first argument behaves properly.
-func TestIssue31810(t *testing.T) {
-	t.Skip("broken in html/template")
-
-	// A simple value with no arguments is fine.
-	var b bytes.Buffer
-	const text = "{{ (.)  }}"
-	tmpl, err := New("").Parse(text)
-	if err != nil {
-		t.Error(err)
-	}
-	err = tmpl.Execute(&b, "result")
-	if err != nil {
-		t.Error(err)
-	}
-	if b.String() != "result" {
-		t.Errorf("%s got %q, expected %q", text, b.String(), "result")
-	}
-
-	// Even a plain function fails - need to use call.
-	f := func() string { return "result" }
-	b.Reset()
-	err = tmpl.Execute(&b, f)
-	if err == nil {
-		t.Error("expected error with no call, got none")
-	}
-
-	// Works if the function is explicitly called.
-	const textCall = "{{ (call .)  }}"
-	tmpl, err = New("").Parse(textCall)
-	b.Reset()
-	err = tmpl.Execute(&b, f)
-	if err != nil {
-		t.Error(err)
-	}
-	if b.String() != "result" {
-		t.Errorf("%s got %q, expected %q", textCall, b.String(), "result")
-	}
-}
-
-// Issue 39807. There was a race applying escapeTemplate.
-
-const raceText = `
-{{- define "jstempl" -}}
-var v = "v";
-{{- end -}}
-<script type="application/javascript">
-{{ template "jstempl" $ }}
-</script>
-`
-
-func TestEscapeRace(t *testing.T) {
-	tmpl := New("")
-	_, err := tmpl.New("templ.html").Parse(raceText)
-	if err != nil {
-		t.Fatal(err)
-	}
-	const count = 20
-	for i := 0; i < count; i++ {
-		_, err := tmpl.New(fmt.Sprintf("x%d.html", i)).Parse(`{{ template "templ.html" .}}`)
-		if err != nil {
-			t.Fatal(err)
-		}
-	}
-
-	var wg sync.WaitGroup
-	for i := 0; i < 10; i++ {
-		wg.Add(1)
-		go func() {
-			defer wg.Done()
-			for j := 0; j < count; j++ {
-				sub := tmpl.Lookup(fmt.Sprintf("x%d.html", j))
-				if err := sub.Execute(ioutil.Discard, nil); err != nil {
-					t.Error(err)
-				}
-			}
-		}()
-	}
-	wg.Wait()
-}
-
-func TestRecursiveExecute(t *testing.T) {
-	tmpl := New("")
-
-	recur := func() (HTML, error) {
-		var sb strings.Builder
-		if err := tmpl.ExecuteTemplate(&sb, "subroutine", nil); err != nil {
-			t.Fatal(err)
-		}
-		return HTML(sb.String()), nil
-	}
-
-	m := FuncMap{
-		"recur": recur,
-	}
-
-	top, err := tmpl.New("x.html").Funcs(m).Parse(`{{recur}}`)
-	if err != nil {
-		t.Fatal(err)
-	}
-	_, err = tmpl.New("subroutine").Parse(`<a href="/x?p={{"'a<b'"}}">`)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if err := top.Execute(ioutil.Discard, nil); err != nil {
-		t.Fatal(err)
-	}
-}
-
-// recursiveInvoker is for TestRecursiveExecuteViaMethod.
-type recursiveInvoker struct {
-	t    *testing.T
-	tmpl *Template
-}
-
-func (r *recursiveInvoker) Recur() (string, error) {
-	var sb strings.Builder
-	if err := r.tmpl.ExecuteTemplate(&sb, "subroutine", nil); err != nil {
-		r.t.Fatal(err)
-	}
-	return sb.String(), nil
-}
-
-func TestRecursiveExecuteViaMethod(t *testing.T) {
-	tmpl := New("")
-	top, err := tmpl.New("x.html").Parse(`{{.Recur}}`)
-	if err != nil {
-		t.Fatal(err)
-	}
-	_, err = tmpl.New("subroutine").Parse(`<a href="/x?p={{"'a<b'"}}">`)
-	if err != nil {
-		t.Fatal(err)
-	}
-	r := &recursiveInvoker{
-		t:    t,
-		tmpl: tmpl,
-	}
-	if err := top.Execute(ioutil.Discard, r); err != nil {
-		t.Fatal(err)
-	}
-}
-
-// Issue 43295.
-func TestTemplateFuncsAfterClone(t *testing.T) {
-	s := `{{ f . }}`
-	want := "test"
-	orig := New("orig").Funcs(map[string]interface{}{
-		"f": func(in string) string {
-			return in
-		},
-	}).New("child")
-
-	overviewTmpl := Must(Must(orig.Clone()).Parse(s))
-	var out strings.Builder
-	if err := overviewTmpl.Execute(&out, want); err != nil {
-		t.Fatal(err)
-	}
-	if got := out.String(); got != want {
-		t.Fatalf("got %q; want %q", got, want)
-	}
-}
diff --git a/internal/backport/html/template/html.go b/internal/backport/html/template/html.go
deleted file mode 100644
index 46e9b44..0000000
--- a/internal/backport/html/template/html.go
+++ /dev/null
@@ -1,267 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
-	"bytes"
-	"fmt"
-	"strings"
-	"unicode/utf8"
-)
-
-// htmlNospaceEscaper escapes for inclusion in unquoted attribute values.
-func htmlNospaceEscaper(args ...interface{}) string {
-	s, t := stringify(args...)
-	if t == contentTypeHTML {
-		return htmlReplacer(stripTags(s), htmlNospaceNormReplacementTable, false)
-	}
-	return htmlReplacer(s, htmlNospaceReplacementTable, false)
-}
-
-// attrEscaper escapes for inclusion in quoted attribute values.
-func attrEscaper(args ...interface{}) string {
-	s, t := stringify(args...)
-	if t == contentTypeHTML {
-		return htmlReplacer(stripTags(s), htmlNormReplacementTable, true)
-	}
-	return htmlReplacer(s, htmlReplacementTable, true)
-}
-
-// rcdataEscaper escapes for inclusion in an RCDATA element body.
-func rcdataEscaper(args ...interface{}) string {
-	s, t := stringify(args...)
-	if t == contentTypeHTML {
-		return htmlReplacer(s, htmlNormReplacementTable, true)
-	}
-	return htmlReplacer(s, htmlReplacementTable, true)
-}
-
-// htmlEscaper escapes for inclusion in HTML text.
-func htmlEscaper(args ...interface{}) string {
-	s, t := stringify(args...)
-	if t == contentTypeHTML {
-		return s
-	}
-	return htmlReplacer(s, htmlReplacementTable, true)
-}
-
-// htmlReplacementTable contains the runes that need to be escaped
-// inside a quoted attribute value or in a text node.
-var htmlReplacementTable = []string{
-	// https://www.w3.org/TR/html5/syntax.html#attribute-value-(unquoted)-state
-	// U+0000 NULL Parse error. Append a U+FFFD REPLACEMENT
-	// CHARACTER character to the current attribute's value.
-	// "
-	// and similarly
-	// https://www.w3.org/TR/html5/syntax.html#before-attribute-value-state
-	0:    "\uFFFD",
-	'"':  "&#34;",
-	'&':  "&amp;",
-	'\'': "&#39;",
-	'+':  "&#43;",
-	'<':  "&lt;",
-	'>':  "&gt;",
-}
-
-// htmlNormReplacementTable is like htmlReplacementTable but without '&' to
-// avoid over-encoding existing entities.
-var htmlNormReplacementTable = []string{
-	0:    "\uFFFD",
-	'"':  "&#34;",
-	'\'': "&#39;",
-	'+':  "&#43;",
-	'<':  "&lt;",
-	'>':  "&gt;",
-}
-
-// htmlNospaceReplacementTable contains the runes that need to be escaped
-// inside an unquoted attribute value.
-// The set of runes escaped is the union of the HTML specials and
-// those determined by running the JS below in browsers:
-// <div id=d></div>
-// <script>(function () {
-// var a = [], d = document.getElementById("d"), i, c, s;
-// for (i = 0; i < 0x10000; ++i) {
-//
-//	c = String.fromCharCode(i);
-//	d.innerHTML = "<span title=" + c + "lt" + c + "></span>"
-//	s = d.getElementsByTagName("SPAN")[0];
-//	if (!s || s.title !== c + "lt" + c) { a.push(i.toString(16)); }
-//
-// }
-// document.write(a.join(", "));
-// })()</script>
-var htmlNospaceReplacementTable = []string{
-	0:    "&#xfffd;",
-	'\t': "&#9;",
-	'\n': "&#10;",
-	'\v': "&#11;",
-	'\f': "&#12;",
-	'\r': "&#13;",
-	' ':  "&#32;",
-	'"':  "&#34;",
-	'&':  "&amp;",
-	'\'': "&#39;",
-	'+':  "&#43;",
-	'<':  "&lt;",
-	'=':  "&#61;",
-	'>':  "&gt;",
-	// A parse error in the attribute value (unquoted) and
-	// before attribute value states.
-	// Treated as a quoting character by IE.
-	'`': "&#96;",
-}
-
-// htmlNospaceNormReplacementTable is like htmlNospaceReplacementTable but
-// without '&' to avoid over-encoding existing entities.
-var htmlNospaceNormReplacementTable = []string{
-	0:    "&#xfffd;",
-	'\t': "&#9;",
-	'\n': "&#10;",
-	'\v': "&#11;",
-	'\f': "&#12;",
-	'\r': "&#13;",
-	' ':  "&#32;",
-	'"':  "&#34;",
-	'\'': "&#39;",
-	'+':  "&#43;",
-	'<':  "&lt;",
-	'=':  "&#61;",
-	'>':  "&gt;",
-	// A parse error in the attribute value (unquoted) and
-	// before attribute value states.
-	// Treated as a quoting character by IE.
-	'`': "&#96;",
-}
-
-// htmlReplacer returns s with runes replaced according to replacementTable
-// and when badRunes is true, certain bad runes are allowed through unescaped.
-func htmlReplacer(s string, replacementTable []string, badRunes bool) string {
-	written, b := 0, new(strings.Builder)
-	r, w := rune(0), 0
-	for i := 0; i < len(s); i += w {
-		// Cannot use 'for range s' because we need to preserve the width
-		// of the runes in the input. If we see a decoding error, the input
-		// width will not be utf8.Runelen(r) and we will overrun the buffer.
-		r, w = utf8.DecodeRuneInString(s[i:])
-		if int(r) < len(replacementTable) {
-			if repl := replacementTable[r]; len(repl) != 0 {
-				if written == 0 {
-					b.Grow(len(s))
-				}
-				b.WriteString(s[written:i])
-				b.WriteString(repl)
-				written = i + w
-			}
-		} else if badRunes {
-			// No-op.
-			// IE does not allow these ranges in unquoted attrs.
-		} else if 0xfdd0 <= r && r <= 0xfdef || 0xfff0 <= r && r <= 0xffff {
-			if written == 0 {
-				b.Grow(len(s))
-			}
-			fmt.Fprintf(b, "%s&#x%x;", s[written:i], r)
-			written = i + w
-		}
-	}
-	if written == 0 {
-		return s
-	}
-	b.WriteString(s[written:])
-	return b.String()
-}
-
-// stripTags takes a snippet of HTML and returns only the text content.
-// For example, `<b>&iexcl;Hi!</b> <script>...</script>` -> `&iexcl;Hi! `.
-func stripTags(html string) string {
-	var b bytes.Buffer
-	s, c, i, allText := []byte(html), context{}, 0, true
-	// Using the transition funcs helps us avoid mangling
-	// `<div title="1>2">` or `I <3 Ponies!`.
-	for i != len(s) {
-		if c.delim == delimNone {
-			st := c.state
-			// Use RCDATA instead of parsing into JS or CSS styles.
-			if c.element != elementNone && !isInTag(st) {
-				st = stateRCDATA
-			}
-			d, nread := transitionFunc[st](c, s[i:])
-			i1 := i + nread
-			if c.state == stateText || c.state == stateRCDATA {
-				// Emit text up to the start of the tag or comment.
-				j := i1
-				if d.state != c.state {
-					for j1 := j - 1; j1 >= i; j1-- {
-						if s[j1] == '<' {
-							j = j1
-							break
-						}
-					}
-				}
-				b.Write(s[i:j])
-			} else {
-				allText = false
-			}
-			c, i = d, i1
-			continue
-		}
-		i1 := i + bytes.IndexAny(s[i:], delimEnds[c.delim])
-		if i1 < i {
-			break
-		}
-		if c.delim != delimSpaceOrTagEnd {
-			// Consume any quote.
-			i1++
-		}
-		c, i = context{state: stateTag, element: c.element}, i1
-	}
-	if allText {
-		return html
-	} else if c.state == stateText || c.state == stateRCDATA {
-		b.Write(s[i:])
-	}
-	return b.String()
-}
-
-// htmlNameFilter accepts valid parts of an HTML attribute or tag name or
-// a known-safe HTML attribute.
-func htmlNameFilter(args ...interface{}) string {
-	s, t := stringify(args...)
-	if t == contentTypeHTMLAttr {
-		return s
-	}
-	if len(s) == 0 {
-		// Avoid violation of structure preservation.
-		// <input checked {{.K}}={{.V}}>.
-		// Without this, if .K is empty then .V is the value of
-		// checked, but otherwise .V is the value of the attribute
-		// named .K.
-		return filterFailsafe
-	}
-	s = strings.ToLower(s)
-	if t := attrType(s); t != contentTypePlain {
-		// TODO: Split attr and element name part filters so we can recognize known attributes.
-		return filterFailsafe
-	}
-	for _, r := range s {
-		switch {
-		case '0' <= r && r <= '9':
-		case 'a' <= r && r <= 'z':
-		default:
-			return filterFailsafe
-		}
-	}
-	return s
-}
-
-// commentEscaper returns the empty string regardless of input.
-// Comment content does not correspond to any parsed structure or
-// human-readable content, so the simplest and most secure policy is to drop
-// content interpolated into comments.
-// This approach is equally valid whether or not static comment content is
-// removed from the template.
-func commentEscaper(args ...interface{}) string {
-	return ""
-}
diff --git a/internal/backport/html/template/html_test.go b/internal/backport/html/template/html_test.go
deleted file mode 100644
index f04ee04..0000000
--- a/internal/backport/html/template/html_test.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
-	"html"
-	"strings"
-	"testing"
-)
-
-func TestHTMLNospaceEscaper(t *testing.T) {
-	input := ("\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f" +
-		"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
-		` !"#$%&'()*+,-./` +
-		`0123456789:;<=>?` +
-		`@ABCDEFGHIJKLMNO` +
-		`PQRSTUVWXYZ[\]^_` +
-		"`abcdefghijklmno" +
-		"pqrstuvwxyz{|}~\x7f" +
-		"\u00A0\u0100\u2028\u2029\ufeff\ufdec\U0001D11E" +
-		"erroneous\x960") // keep at the end
-
-	want := ("&#xfffd;\x01\x02\x03\x04\x05\x06\x07" +
-		"\x08&#9;&#10;&#11;&#12;&#13;\x0E\x0F" +
-		"\x10\x11\x12\x13\x14\x15\x16\x17" +
-		"\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
-		`&#32;!&#34;#$%&amp;&#39;()*&#43;,-./` +
-		`0123456789:;&lt;&#61;&gt;?` +
-		`@ABCDEFGHIJKLMNO` +
-		`PQRSTUVWXYZ[\]^_` +
-		`&#96;abcdefghijklmno` +
-		`pqrstuvwxyz{|}~` + "\u007f" +
-		"\u00A0\u0100\u2028\u2029\ufeff&#xfdec;\U0001D11E" +
-		"erroneous&#xfffd;0") // keep at the end
-
-	got := htmlNospaceEscaper(input)
-	if got != want {
-		t.Errorf("encode: want\n\t%q\nbut got\n\t%q", want, got)
-	}
-
-	r := strings.NewReplacer("\x00", "\ufffd", "\x96", "\ufffd")
-	got, want = html.UnescapeString(got), r.Replace(input)
-	if want != got {
-		t.Errorf("decode: want\n\t%q\nbut got\n\t%q", want, got)
-	}
-}
-
-func TestStripTags(t *testing.T) {
-	tests := []struct {
-		input, want string
-	}{
-		{"", ""},
-		{"Hello, World!", "Hello, World!"},
-		{"foo&amp;bar", "foo&amp;bar"},
-		{`Hello <a href="www.example.com/">World</a>!`, "Hello World!"},
-		{"Foo <textarea>Bar</textarea> Baz", "Foo Bar Baz"},
-		{"Foo <!-- Bar --> Baz", "Foo  Baz"},
-		{"<", "<"},
-		{"foo < bar", "foo < bar"},
-		{`Foo<script type="text/javascript">alert(1337)</script>Bar`, "FooBar"},
-		{`Foo<div title="1>2">Bar`, "FooBar"},
-		{`I <3 Ponies!`, `I <3 Ponies!`},
-		{`<script>foo()</script>`, ``},
-	}
-
-	for _, test := range tests {
-		if got := stripTags(test.input); got != test.want {
-			t.Errorf("%q: want %q, got %q", test.input, test.want, got)
-		}
-	}
-}
-
-func BenchmarkHTMLNospaceEscaper(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		htmlNospaceEscaper("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
-	}
-}
-
-func BenchmarkHTMLNospaceEscaperNoSpecials(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		htmlNospaceEscaper("The_quick,_brown_fox_jumps_over_the_lazy_dog.")
-	}
-}
-
-func BenchmarkStripTags(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		stripTags("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
-	}
-}
-
-func BenchmarkStripTagsNoSpecials(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		stripTags("The quick, brown fox jumps over the lazy dog.")
-	}
-}
diff --git a/internal/backport/html/template/js.go b/internal/backport/html/template/js.go
deleted file mode 100644
index ea9c183..0000000
--- a/internal/backport/html/template/js.go
+++ /dev/null
@@ -1,431 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
-	"bytes"
-	"encoding/json"
-	"fmt"
-	"reflect"
-	"strings"
-	"unicode/utf8"
-)
-
-// nextJSCtx returns the context that determines whether a slash after the
-// given run of tokens starts a regular expression instead of a division
-// operator: / or /=.
-//
-// This assumes that the token run does not include any string tokens, comment
-// tokens, regular expression literal tokens, or division operators.
-//
-// This fails on some valid but nonsensical JavaScript programs like
-// "x = ++/foo/i" which is quite different than "x++/foo/i", but is not known to
-// fail on any known useful programs. It is based on the draft
-// JavaScript 2.0 lexical grammar and requires one token of lookbehind:
-// https://www.mozilla.org/js/language/js20-2000-07/rationale/syntax.html
-func nextJSCtx(s []byte, preceding jsCtx) jsCtx {
-	s = bytes.TrimRight(s, "\t\n\f\r \u2028\u2029")
-	if len(s) == 0 {
-		return preceding
-	}
-
-	// All cases below are in the single-byte UTF-8 group.
-	switch c, n := s[len(s)-1], len(s); c {
-	case '+', '-':
-		// ++ and -- are not regexp preceders, but + and - are whether
-		// they are used as infix or prefix operators.
-		start := n - 1
-		// Count the number of adjacent dashes or pluses.
-		for start > 0 && s[start-1] == c {
-			start--
-		}
-		if (n-start)&1 == 1 {
-			// Reached for trailing minus signs since "---" is the
-			// same as "-- -".
-			return jsCtxRegexp
-		}
-		return jsCtxDivOp
-	case '.':
-		// Handle "42."
-		if n != 1 && '0' <= s[n-2] && s[n-2] <= '9' {
-			return jsCtxDivOp
-		}
-		return jsCtxRegexp
-	// Suffixes for all punctuators from section 7.7 of the language spec
-	// that only end binary operators not handled above.
-	case ',', '<', '>', '=', '*', '%', '&', '|', '^', '?':
-		return jsCtxRegexp
-	// Suffixes for all punctuators from section 7.7 of the language spec
-	// that are prefix operators not handled above.
-	case '!', '~':
-		return jsCtxRegexp
-	// Matches all the punctuators from section 7.7 of the language spec
-	// that are open brackets not handled above.
-	case '(', '[':
-		return jsCtxRegexp
-	// Matches all the punctuators from section 7.7 of the language spec
-	// that precede expression starts.
-	case ':', ';', '{':
-		return jsCtxRegexp
-	// CAVEAT: the close punctuators ('}', ']', ')') precede div ops and
-	// are handled in the default except for '}' which can precede a
-	// division op as in
-	//    ({ valueOf: function () { return 42 } } / 2
-	// which is valid, but, in practice, developers don't divide object
-	// literals, so our heuristic works well for code like
-	//    function () { ... }  /foo/.test(x) && sideEffect();
-	// The ')' punctuator can precede a regular expression as in
-	//     if (b) /foo/.test(x) && ...
-	// but this is much less likely than
-	//     (a + b) / c
-	case '}':
-		return jsCtxRegexp
-	default:
-		// Look for an IdentifierName and see if it is a keyword that
-		// can precede a regular expression.
-		j := n
-		for j > 0 && isJSIdentPart(rune(s[j-1])) {
-			j--
-		}
-		if regexpPrecederKeywords[string(s[j:])] {
-			return jsCtxRegexp
-		}
-	}
-	// Otherwise is a punctuator not listed above, or
-	// a string which precedes a div op, or an identifier
-	// which precedes a div op.
-	return jsCtxDivOp
-}
-
-// regexpPrecederKeywords is a set of reserved JS keywords that can precede a
-// regular expression in JS source.
-var regexpPrecederKeywords = map[string]bool{
-	"break":      true,
-	"case":       true,
-	"continue":   true,
-	"delete":     true,
-	"do":         true,
-	"else":       true,
-	"finally":    true,
-	"in":         true,
-	"instanceof": true,
-	"return":     true,
-	"throw":      true,
-	"try":        true,
-	"typeof":     true,
-	"void":       true,
-}
-
-var jsonMarshalType = reflect.TypeOf((*json.Marshaler)(nil)).Elem()
-
-// indirectToJSONMarshaler returns the value, after dereferencing as many times
-// as necessary to reach the base type (or nil) or an implementation of json.Marshal.
-func indirectToJSONMarshaler(a interface{}) interface{} {
-	// text/template now supports passing untyped nil as a func call
-	// argument, so we must support it. Otherwise we'd panic below, as one
-	// cannot call the Type or Interface methods on an invalid
-	// reflect.Value. See golang.org/issue/18716.
-	if a == nil {
-		return nil
-	}
-
-	v := reflect.ValueOf(a)
-	for !v.Type().Implements(jsonMarshalType) && v.Kind() == reflect.Ptr && !v.IsNil() {
-		v = v.Elem()
-	}
-	return v.Interface()
-}
-
-// jsValEscaper escapes its inputs to a JS Expression (section 11.14) that has
-// neither side-effects nor free variables outside (NaN, Infinity).
-func jsValEscaper(args ...interface{}) string {
-	var a interface{}
-	if len(args) == 1 {
-		a = indirectToJSONMarshaler(args[0])
-		switch t := a.(type) {
-		case JS:
-			return string(t)
-		case JSStr:
-			// TODO: normalize quotes.
-			return `"` + string(t) + `"`
-		case json.Marshaler:
-			// Do not treat as a Stringer.
-		case fmt.Stringer:
-			a = t.String()
-		}
-	} else {
-		for i, arg := range args {
-			args[i] = indirectToJSONMarshaler(arg)
-		}
-		a = fmt.Sprint(args...)
-	}
-	// TODO: detect cycles before calling Marshal which loops infinitely on
-	// cyclic data. This may be an unacceptable DoS risk.
-	b, err := json.Marshal(a)
-	if err != nil {
-		// Put a space before comment so that if it is flush against
-		// a division operator it is not turned into a line comment:
-		//     x/{{y}}
-		// turning into
-		//     x//* error marshaling y:
-		//          second line of error message */null
-		return fmt.Sprintf(" /* %s */null ", strings.ReplaceAll(err.Error(), "*/", "* /"))
-	}
-
-	// TODO: maybe post-process output to prevent it from containing
-	// "<!--", "-->", "<![CDATA[", "]]>", or "</script"
-	// in case custom marshalers produce output containing those.
-	// Note: Do not use \x escaping to save bytes because it is not JSON compatible and this escaper
-	// supports ld+json content-type.
-	if len(b) == 0 {
-		// In, `x=y/{{.}}*z` a json.Marshaler that produces "" should
-		// not cause the output `x=y/*z`.
-		return " null "
-	}
-	first, _ := utf8.DecodeRune(b)
-	last, _ := utf8.DecodeLastRune(b)
-	var buf strings.Builder
-	// Prevent IdentifierNames and NumericLiterals from running into
-	// keywords: in, instanceof, typeof, void
-	pad := isJSIdentPart(first) || isJSIdentPart(last)
-	if pad {
-		buf.WriteByte(' ')
-	}
-	written := 0
-	// Make sure that json.Marshal escapes codepoints U+2028 & U+2029
-	// so it falls within the subset of JSON which is valid JS.
-	for i := 0; i < len(b); {
-		rune, n := utf8.DecodeRune(b[i:])
-		repl := ""
-		if rune == 0x2028 {
-			repl = `\u2028`
-		} else if rune == 0x2029 {
-			repl = `\u2029`
-		}
-		if repl != "" {
-			buf.Write(b[written:i])
-			buf.WriteString(repl)
-			written = i + n
-		}
-		i += n
-	}
-	if buf.Len() != 0 {
-		buf.Write(b[written:])
-		if pad {
-			buf.WriteByte(' ')
-		}
-		return buf.String()
-	}
-	return string(b)
-}
-
-// jsStrEscaper produces a string that can be included between quotes in
-// JavaScript source, in JavaScript embedded in an HTML5 <script> element,
-// or in an HTML5 event handler attribute such as onclick.
-func jsStrEscaper(args ...interface{}) string {
-	s, t := stringify(args...)
-	if t == contentTypeJSStr {
-		return replace(s, jsStrNormReplacementTable)
-	}
-	return replace(s, jsStrReplacementTable)
-}
-
-// jsRegexpEscaper behaves like jsStrEscaper but escapes regular expression
-// specials so the result is treated literally when included in a regular
-// expression literal. /foo{{.X}}bar/ matches the string "foo" followed by
-// the literal text of {{.X}} followed by the string "bar".
-func jsRegexpEscaper(args ...interface{}) string {
-	s, _ := stringify(args...)
-	s = replace(s, jsRegexpReplacementTable)
-	if s == "" {
-		// /{{.X}}/ should not produce a line comment when .X == "".
-		return "(?:)"
-	}
-	return s
-}
-
-// replace replaces each rune r of s with replacementTable[r], provided that
-// r < len(replacementTable). If replacementTable[r] is the empty string then
-// no replacement is made.
-// It also replaces runes U+2028 and U+2029 with the raw strings `\u2028` and
-// `\u2029`.
-func replace(s string, replacementTable []string) string {
-	var b strings.Builder
-	r, w, written := rune(0), 0, 0
-	for i := 0; i < len(s); i += w {
-		// See comment in htmlEscaper.
-		r, w = utf8.DecodeRuneInString(s[i:])
-		var repl string
-		switch {
-		case int(r) < len(lowUnicodeReplacementTable):
-			repl = lowUnicodeReplacementTable[r]
-		case int(r) < len(replacementTable) && replacementTable[r] != "":
-			repl = replacementTable[r]
-		case r == '\u2028':
-			repl = `\u2028`
-		case r == '\u2029':
-			repl = `\u2029`
-		default:
-			continue
-		}
-		if written == 0 {
-			b.Grow(len(s))
-		}
-		b.WriteString(s[written:i])
-		b.WriteString(repl)
-		written = i + w
-	}
-	if written == 0 {
-		return s
-	}
-	b.WriteString(s[written:])
-	return b.String()
-}
-
-var lowUnicodeReplacementTable = []string{
-	0: `\u0000`, 1: `\u0001`, 2: `\u0002`, 3: `\u0003`, 4: `\u0004`, 5: `\u0005`, 6: `\u0006`,
-	'\a': `\u0007`,
-	'\b': `\u0008`,
-	'\t': `\t`,
-	'\n': `\n`,
-	'\v': `\u000b`, // "\v" == "v" on IE 6.
-	'\f': `\f`,
-	'\r': `\r`,
-	0xe:  `\u000e`, 0xf: `\u000f`, 0x10: `\u0010`, 0x11: `\u0011`, 0x12: `\u0012`, 0x13: `\u0013`,
-	0x14: `\u0014`, 0x15: `\u0015`, 0x16: `\u0016`, 0x17: `\u0017`, 0x18: `\u0018`, 0x19: `\u0019`,
-	0x1a: `\u001a`, 0x1b: `\u001b`, 0x1c: `\u001c`, 0x1d: `\u001d`, 0x1e: `\u001e`, 0x1f: `\u001f`,
-}
-
-var jsStrReplacementTable = []string{
-	0:    `\u0000`,
-	'\t': `\t`,
-	'\n': `\n`,
-	'\v': `\u000b`, // "\v" == "v" on IE 6.
-	'\f': `\f`,
-	'\r': `\r`,
-	// Encode HTML specials as hex so the output can be embedded
-	// in HTML attributes without further encoding.
-	'"':  `\u0022`,
-	'&':  `\u0026`,
-	'\'': `\u0027`,
-	'+':  `\u002b`,
-	'/':  `\/`,
-	'<':  `\u003c`,
-	'>':  `\u003e`,
-	'\\': `\\`,
-}
-
-// jsStrNormReplacementTable is like jsStrReplacementTable but does not
-// overencode existing escapes since this table has no entry for `\`.
-var jsStrNormReplacementTable = []string{
-	0:    `\u0000`,
-	'\t': `\t`,
-	'\n': `\n`,
-	'\v': `\u000b`, // "\v" == "v" on IE 6.
-	'\f': `\f`,
-	'\r': `\r`,
-	// Encode HTML specials as hex so the output can be embedded
-	// in HTML attributes without further encoding.
-	'"':  `\u0022`,
-	'&':  `\u0026`,
-	'\'': `\u0027`,
-	'+':  `\u002b`,
-	'/':  `\/`,
-	'<':  `\u003c`,
-	'>':  `\u003e`,
-}
-var jsRegexpReplacementTable = []string{
-	0:    `\u0000`,
-	'\t': `\t`,
-	'\n': `\n`,
-	'\v': `\u000b`, // "\v" == "v" on IE 6.
-	'\f': `\f`,
-	'\r': `\r`,
-	// Encode HTML specials as hex so the output can be embedded
-	// in HTML attributes without further encoding.
-	'"':  `\u0022`,
-	'$':  `\$`,
-	'&':  `\u0026`,
-	'\'': `\u0027`,
-	'(':  `\(`,
-	')':  `\)`,
-	'*':  `\*`,
-	'+':  `\u002b`,
-	'-':  `\-`,
-	'.':  `\.`,
-	'/':  `\/`,
-	'<':  `\u003c`,
-	'>':  `\u003e`,
-	'?':  `\?`,
-	'[':  `\[`,
-	'\\': `\\`,
-	']':  `\]`,
-	'^':  `\^`,
-	'{':  `\{`,
-	'|':  `\|`,
-	'}':  `\}`,
-}
-
-// isJSIdentPart reports whether the given rune is a JS identifier part.
-// It does not handle all the non-Latin letters, joiners, and combining marks,
-// but it does handle every codepoint that can occur in a numeric literal or
-// a keyword.
-func isJSIdentPart(r rune) bool {
-	switch {
-	case r == '$':
-		return true
-	case '0' <= r && r <= '9':
-		return true
-	case 'A' <= r && r <= 'Z':
-		return true
-	case r == '_':
-		return true
-	case 'a' <= r && r <= 'z':
-		return true
-	}
-	return false
-}
-
-// isJSType reports whether the given MIME type should be considered JavaScript.
-//
-// It is used to determine whether a script tag with a type attribute is a javascript container.
-func isJSType(mimeType string) bool {
-	// per
-	//   https://www.w3.org/TR/html5/scripting-1.html#attr-script-type
-	//   https://tools.ietf.org/html/rfc7231#section-3.1.1
-	//   https://tools.ietf.org/html/rfc4329#section-3
-	//   https://www.ietf.org/rfc/rfc4627.txt
-	// discard parameters
-	if i := strings.Index(mimeType, ";"); i >= 0 {
-		mimeType = mimeType[:i]
-	}
-	mimeType = strings.ToLower(mimeType)
-	mimeType = strings.TrimSpace(mimeType)
-	switch mimeType {
-	case
-		"application/ecmascript",
-		"application/javascript",
-		"application/json",
-		"application/ld+json",
-		"application/x-ecmascript",
-		"application/x-javascript",
-		"module",
-		"text/ecmascript",
-		"text/javascript",
-		"text/javascript1.0",
-		"text/javascript1.1",
-		"text/javascript1.2",
-		"text/javascript1.3",
-		"text/javascript1.4",
-		"text/javascript1.5",
-		"text/jscript",
-		"text/livescript",
-		"text/x-ecmascript",
-		"text/x-javascript":
-		return true
-	default:
-		return false
-	}
-}
diff --git a/internal/backport/html/template/js_test.go b/internal/backport/html/template/js_test.go
deleted file mode 100644
index d7ee47b..0000000
--- a/internal/backport/html/template/js_test.go
+++ /dev/null
@@ -1,423 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
-	"bytes"
-	"math"
-	"strings"
-	"testing"
-)
-
-func TestNextJsCtx(t *testing.T) {
-	tests := []struct {
-		jsCtx jsCtx
-		s     string
-	}{
-		// Statement terminators precede regexps.
-		{jsCtxRegexp, ";"},
-		// This is not airtight.
-		//     ({ valueOf: function () { return 1 } } / 2)
-		// is valid JavaScript but in practice, devs do not do this.
-		// A block followed by a statement starting with a RegExp is
-		// much more common:
-		//     while (x) {...} /foo/.test(x) || panic()
-		{jsCtxRegexp, "}"},
-		// But member, call, grouping, and array expression terminators
-		// precede div ops.
-		{jsCtxDivOp, ")"},
-		{jsCtxDivOp, "]"},
-		// At the start of a primary expression, array, or expression
-		// statement, expect a regexp.
-		{jsCtxRegexp, "("},
-		{jsCtxRegexp, "["},
-		{jsCtxRegexp, "{"},
-		// Assignment operators precede regexps as do all exclusively
-		// prefix and binary operators.
-		{jsCtxRegexp, "="},
-		{jsCtxRegexp, "+="},
-		{jsCtxRegexp, "*="},
-		{jsCtxRegexp, "*"},
-		{jsCtxRegexp, "!"},
-		// Whether the + or - is infix or prefix, it cannot precede a
-		// div op.
-		{jsCtxRegexp, "+"},
-		{jsCtxRegexp, "-"},
-		// An incr/decr op precedes a div operator.
-		// This is not airtight. In (g = ++/h/i) a regexp follows a
-		// pre-increment operator, but in practice devs do not try to
-		// increment or decrement regular expressions.
-		// (g++/h/i) where ++ is a postfix operator on g is much more
-		// common.
-		{jsCtxDivOp, "--"},
-		{jsCtxDivOp, "++"},
-		{jsCtxDivOp, "x--"},
-		// When we have many dashes or pluses, then they are grouped
-		// left to right.
-		{jsCtxRegexp, "x---"}, // A postfix -- then a -.
-		// return followed by a slash returns the regexp literal or the
-		// slash starts a regexp literal in an expression statement that
-		// is dead code.
-		{jsCtxRegexp, "return"},
-		{jsCtxRegexp, "return "},
-		{jsCtxRegexp, "return\t"},
-		{jsCtxRegexp, "return\n"},
-		{jsCtxRegexp, "return\u2028"},
-		// Identifiers can be divided and cannot validly be preceded by
-		// a regular expressions. Semicolon insertion cannot happen
-		// between an identifier and a regular expression on a new line
-		// because the one token lookahead for semicolon insertion has
-		// to conclude that it could be a div binary op and treat it as
-		// such.
-		{jsCtxDivOp, "x"},
-		{jsCtxDivOp, "x "},
-		{jsCtxDivOp, "x\t"},
-		{jsCtxDivOp, "x\n"},
-		{jsCtxDivOp, "x\u2028"},
-		{jsCtxDivOp, "preturn"},
-		// Numbers precede div ops.
-		{jsCtxDivOp, "0"},
-		// Dots that are part of a number are div preceders.
-		{jsCtxDivOp, "0."},
-	}
-
-	for _, test := range tests {
-		if nextJSCtx([]byte(test.s), jsCtxRegexp) != test.jsCtx {
-			t.Errorf("want %s got %q", test.jsCtx, test.s)
-		}
-		if nextJSCtx([]byte(test.s), jsCtxDivOp) != test.jsCtx {
-			t.Errorf("want %s got %q", test.jsCtx, test.s)
-		}
-	}
-
-	if nextJSCtx([]byte("   "), jsCtxRegexp) != jsCtxRegexp {
-		t.Error("Blank tokens")
-	}
-
-	if nextJSCtx([]byte("   "), jsCtxDivOp) != jsCtxDivOp {
-		t.Error("Blank tokens")
-	}
-}
-
-func TestJSValEscaper(t *testing.T) {
-	tests := []struct {
-		x  interface{}
-		js string
-	}{
-		{int(42), " 42 "},
-		{uint(42), " 42 "},
-		{int16(42), " 42 "},
-		{uint16(42), " 42 "},
-		{int32(-42), " -42 "},
-		{uint32(42), " 42 "},
-		{int16(-42), " -42 "},
-		{uint16(42), " 42 "},
-		{int64(-42), " -42 "},
-		{uint64(42), " 42 "},
-		{uint64(1) << 53, " 9007199254740992 "},
-		// ulp(1 << 53) > 1 so this loses precision in JS
-		// but it is still a representable integer literal.
-		{uint64(1)<<53 + 1, " 9007199254740993 "},
-		{float32(1.0), " 1 "},
-		{float32(-1.0), " -1 "},
-		{float32(0.5), " 0.5 "},
-		{float32(-0.5), " -0.5 "},
-		{float32(1.0) / float32(256), " 0.00390625 "},
-		{float32(0), " 0 "},
-		{math.Copysign(0, -1), " -0 "},
-		{float64(1.0), " 1 "},
-		{float64(-1.0), " -1 "},
-		{float64(0.5), " 0.5 "},
-		{float64(-0.5), " -0.5 "},
-		{float64(0), " 0 "},
-		{math.Copysign(0, -1), " -0 "},
-		{"", `""`},
-		{"foo", `"foo"`},
-		// Newlines.
-		{"\r\n\u2028\u2029", `"\r\n\u2028\u2029"`},
-		// "\v" == "v" on IE 6 so use "\u000b" instead.
-		{"\t\x0b", `"\t\u000b"`},
-		{struct{ X, Y int }{1, 2}, `{"X":1,"Y":2}`},
-		{[]interface{}{}, "[]"},
-		{[]interface{}{42, "foo", nil}, `[42,"foo",null]`},
-		{[]string{"<!--", "</script>", "-->"}, `["\u003c!--","\u003c/script\u003e","--\u003e"]`},
-		{"<!--", `"\u003c!--"`},
-		{"-->", `"--\u003e"`},
-		{"<![CDATA[", `"\u003c![CDATA["`},
-		{"]]>", `"]]\u003e"`},
-		{"</script", `"\u003c/script"`},
-		{"\U0001D11E", "\"\U0001D11E\""}, // or "\uD834\uDD1E"
-		{nil, " null "},
-	}
-
-	for _, test := range tests {
-		if js := jsValEscaper(test.x); js != test.js {
-			t.Errorf("%+v: want\n\t%q\ngot\n\t%q", test.x, test.js, js)
-		}
-		// Make sure that escaping corner cases are not broken
-		// by nesting.
-		a := []interface{}{test.x}
-		want := "[" + strings.TrimSpace(test.js) + "]"
-		if js := jsValEscaper(a); js != want {
-			t.Errorf("%+v: want\n\t%q\ngot\n\t%q", a, want, js)
-		}
-	}
-}
-
-func TestJSStrEscaper(t *testing.T) {
-	tests := []struct {
-		x   interface{}
-		esc string
-	}{
-		{"", ``},
-		{"foo", `foo`},
-		{"\u0000", `\u0000`},
-		{"\t", `\t`},
-		{"\n", `\n`},
-		{"\r", `\r`},
-		{"\u2028", `\u2028`},
-		{"\u2029", `\u2029`},
-		{"\\", `\\`},
-		{"\\n", `\\n`},
-		{"foo\r\nbar", `foo\r\nbar`},
-		// Preserve attribute boundaries.
-		{`"`, `\u0022`},
-		{`'`, `\u0027`},
-		// Allow embedding in HTML without further escaping.
-		{`&amp;`, `\u0026amp;`},
-		// Prevent breaking out of text node and element boundaries.
-		{"</script>", `\u003c\/script\u003e`},
-		{"<![CDATA[", `\u003c![CDATA[`},
-		{"]]>", `]]\u003e`},
-		// https://dev.w3.org/html5/markup/aria/syntax.html#escaping-text-span
-		//   "The text in style, script, title, and textarea elements
-		//   must not have an escaping text span start that is not
-		//   followed by an escaping text span end."
-		// Furthermore, spoofing an escaping text span end could lead
-		// to different interpretation of a </script> sequence otherwise
-		// masked by the escaping text span, and spoofing a start could
-		// allow regular text content to be interpreted as script
-		// allowing script execution via a combination of a JS string
-		// injection followed by an HTML text injection.
-		{"<!--", `\u003c!--`},
-		{"-->", `--\u003e`},
-		// From https://code.google.com/p/doctype/wiki/ArticleUtf7
-		{"+ADw-script+AD4-alert(1)+ADw-/script+AD4-",
-			`\u002bADw-script\u002bAD4-alert(1)\u002bADw-\/script\u002bAD4-`,
-		},
-		// Invalid UTF-8 sequence
-		{"foo\xA0bar", "foo\xA0bar"},
-		// Invalid unicode scalar value.
-		{"foo\xed\xa0\x80bar", "foo\xed\xa0\x80bar"},
-	}
-
-	for _, test := range tests {
-		esc := jsStrEscaper(test.x)
-		if esc != test.esc {
-			t.Errorf("%q: want %q got %q", test.x, test.esc, esc)
-		}
-	}
-}
-
-func TestJSRegexpEscaper(t *testing.T) {
-	tests := []struct {
-		x   interface{}
-		esc string
-	}{
-		{"", `(?:)`},
-		{"foo", `foo`},
-		{"\u0000", `\u0000`},
-		{"\t", `\t`},
-		{"\n", `\n`},
-		{"\r", `\r`},
-		{"\u2028", `\u2028`},
-		{"\u2029", `\u2029`},
-		{"\\", `\\`},
-		{"\\n", `\\n`},
-		{"foo\r\nbar", `foo\r\nbar`},
-		// Preserve attribute boundaries.
-		{`"`, `\u0022`},
-		{`'`, `\u0027`},
-		// Allow embedding in HTML without further escaping.
-		{`&amp;`, `\u0026amp;`},
-		// Prevent breaking out of text node and element boundaries.
-		{"</script>", `\u003c\/script\u003e`},
-		{"<![CDATA[", `\u003c!\[CDATA\[`},
-		{"]]>", `\]\]\u003e`},
-		// Escaping text spans.
-		{"<!--", `\u003c!\-\-`},
-		{"-->", `\-\-\u003e`},
-		{"*", `\*`},
-		{"+", `\u002b`},
-		{"?", `\?`},
-		{"[](){}", `\[\]\(\)\{\}`},
-		{"$foo|x.y", `\$foo\|x\.y`},
-		{"x^y", `x\^y`},
-	}
-
-	for _, test := range tests {
-		esc := jsRegexpEscaper(test.x)
-		if esc != test.esc {
-			t.Errorf("%q: want %q got %q", test.x, test.esc, esc)
-		}
-	}
-}
-
-func TestEscapersOnLower7AndSelectHighCodepoints(t *testing.T) {
-	input := ("\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f" +
-		"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
-		` !"#$%&'()*+,-./` +
-		`0123456789:;<=>?` +
-		`@ABCDEFGHIJKLMNO` +
-		`PQRSTUVWXYZ[\]^_` +
-		"`abcdefghijklmno" +
-		"pqrstuvwxyz{|}~\x7f" +
-		"\u00A0\u0100\u2028\u2029\ufeff\U0001D11E")
-
-	tests := []struct {
-		name    string
-		escaper func(...interface{}) string
-		escaped string
-	}{
-		{
-			"jsStrEscaper",
-			jsStrEscaper,
-			`\u0000\u0001\u0002\u0003\u0004\u0005\u0006\u0007` +
-				`\u0008\t\n\u000b\f\r\u000e\u000f` +
-				`\u0010\u0011\u0012\u0013\u0014\u0015\u0016\u0017` +
-				`\u0018\u0019\u001a\u001b\u001c\u001d\u001e\u001f` +
-				` !\u0022#$%\u0026\u0027()*\u002b,-.\/` +
-				`0123456789:;\u003c=\u003e?` +
-				`@ABCDEFGHIJKLMNO` +
-				`PQRSTUVWXYZ[\\]^_` +
-				"`abcdefghijklmno" +
-				"pqrstuvwxyz{|}~\u007f" +
-				"\u00A0\u0100\\u2028\\u2029\ufeff\U0001D11E",
-		},
-		{
-			"jsRegexpEscaper",
-			jsRegexpEscaper,
-			`\u0000\u0001\u0002\u0003\u0004\u0005\u0006\u0007` +
-				`\u0008\t\n\u000b\f\r\u000e\u000f` +
-				`\u0010\u0011\u0012\u0013\u0014\u0015\u0016\u0017` +
-				`\u0018\u0019\u001a\u001b\u001c\u001d\u001e\u001f` +
-				` !\u0022#\$%\u0026\u0027\(\)\*\u002b,\-\.\/` +
-				`0123456789:;\u003c=\u003e\?` +
-				`@ABCDEFGHIJKLMNO` +
-				`PQRSTUVWXYZ\[\\\]\^_` +
-				"`abcdefghijklmno" +
-				`pqrstuvwxyz\{\|\}~` + "\u007f" +
-				"\u00A0\u0100\\u2028\\u2029\ufeff\U0001D11E",
-		},
-	}
-
-	for _, test := range tests {
-		if s := test.escaper(input); s != test.escaped {
-			t.Errorf("%s once: want\n\t%q\ngot\n\t%q", test.name, test.escaped, s)
-			continue
-		}
-
-		// Escape it rune by rune to make sure that any
-		// fast-path checking does not break escaping.
-		var buf bytes.Buffer
-		for _, c := range input {
-			buf.WriteString(test.escaper(string(c)))
-		}
-
-		if s := buf.String(); s != test.escaped {
-			t.Errorf("%s rune-wise: want\n\t%q\ngot\n\t%q", test.name, test.escaped, s)
-			continue
-		}
-	}
-}
-
-func TestIsJsMimeType(t *testing.T) {
-	tests := []struct {
-		in  string
-		out bool
-	}{
-		{"application/javascript;version=1.8", true},
-		{"application/javascript;version=1.8;foo=bar", true},
-		{"application/javascript/version=1.8", false},
-		{"text/javascript", true},
-		{"application/json", true},
-		{"application/ld+json", true},
-		{"module", true},
-	}
-
-	for _, test := range tests {
-		if isJSType(test.in) != test.out {
-			t.Errorf("isJSType(%q) = %v, want %v", test.in, !test.out, test.out)
-		}
-	}
-}
-
-func BenchmarkJSValEscaperWithNum(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		jsValEscaper(3.141592654)
-	}
-}
-
-func BenchmarkJSValEscaperWithStr(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		jsValEscaper("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
-	}
-}
-
-func BenchmarkJSValEscaperWithStrNoSpecials(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		jsValEscaper("The quick, brown fox jumps over the lazy dog")
-	}
-}
-
-func BenchmarkJSValEscaperWithObj(b *testing.B) {
-	o := struct {
-		S string
-		N int
-	}{
-		"The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>\u2028",
-		42,
-	}
-	for i := 0; i < b.N; i++ {
-		jsValEscaper(o)
-	}
-}
-
-func BenchmarkJSValEscaperWithObjNoSpecials(b *testing.B) {
-	o := struct {
-		S string
-		N int
-	}{
-		"The quick, brown fox jumps over the lazy dog",
-		42,
-	}
-	for i := 0; i < b.N; i++ {
-		jsValEscaper(o)
-	}
-}
-
-func BenchmarkJSStrEscaperNoSpecials(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		jsStrEscaper("The quick, brown fox jumps over the lazy dog.")
-	}
-}
-
-func BenchmarkJSStrEscaper(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		jsStrEscaper("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
-	}
-}
-
-func BenchmarkJSRegexpEscaperNoSpecials(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		jsRegexpEscaper("The quick, brown fox jumps over the lazy dog")
-	}
-}
-
-func BenchmarkJSRegexpEscaper(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		jsRegexpEscaper("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
-	}
-}
diff --git a/internal/backport/html/template/jsctx_string.go b/internal/backport/html/template/jsctx_string.go
deleted file mode 100644
index dd1d87e..0000000
--- a/internal/backport/html/template/jsctx_string.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Code generated by "stringer -type jsCtx"; DO NOT EDIT.
-
-package template
-
-import "strconv"
-
-const _jsCtx_name = "jsCtxRegexpjsCtxDivOpjsCtxUnknown"
-
-var _jsCtx_index = [...]uint8{0, 11, 21, 33}
-
-func (i jsCtx) String() string {
-	if i >= jsCtx(len(_jsCtx_index)-1) {
-		return "jsCtx(" + strconv.FormatInt(int64(i), 10) + ")"
-	}
-	return _jsCtx_name[_jsCtx_index[i]:_jsCtx_index[i+1]]
-}
diff --git a/internal/backport/html/template/multi_test.go b/internal/backport/html/template/multi_test.go
deleted file mode 100644
index 275441f..0000000
--- a/internal/backport/html/template/multi_test.go
+++ /dev/null
@@ -1,277 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Tests for multiple-template execution, copied from text/template.
-
-package template
-
-import (
-	"bytes"
-	"os"
-	"testing"
-
-	"golang.org/x/website/internal/backport/text/template/parse"
-)
-
-var multiExecTests = []execTest{
-	{"empty", "", "", nil, true},
-	{"text", "some text", "some text", nil, true},
-	{"invoke x", `{{template "x" .SI}}`, "TEXT", tVal, true},
-	{"invoke x no args", `{{template "x"}}`, "TEXT", tVal, true},
-	{"invoke dot int", `{{template "dot" .I}}`, "17", tVal, true},
-	{"invoke dot []int", `{{template "dot" .SI}}`, "[3 4 5]", tVal, true},
-	{"invoke dotV", `{{template "dotV" .U}}`, "v", tVal, true},
-	{"invoke nested int", `{{template "nested" .I}}`, "17", tVal, true},
-	{"variable declared by template", `{{template "nested" $x:=.SI}},{{index $x 1}}`, "[3 4 5],4", tVal, true},
-
-	// User-defined function: test argument evaluator.
-	{"testFunc literal", `{{oneArg "joe"}}`, "oneArg=joe", tVal, true},
-	{"testFunc .", `{{oneArg .}}`, "oneArg=joe", "joe", true},
-}
-
-// These strings are also in testdata/*.
-const multiText1 = `
-	{{define "x"}}TEXT{{end}}
-	{{define "dotV"}}{{.V}}{{end}}
-`
-
-const multiText2 = `
-	{{define "dot"}}{{.}}{{end}}
-	{{define "nested"}}{{template "dot" .}}{{end}}
-`
-
-func TestMultiExecute(t *testing.T) {
-	// Declare a couple of templates first.
-	template, err := New("root").Parse(multiText1)
-	if err != nil {
-		t.Fatalf("parse error for 1: %s", err)
-	}
-	_, err = template.Parse(multiText2)
-	if err != nil {
-		t.Fatalf("parse error for 2: %s", err)
-	}
-	testExecute(multiExecTests, template, t)
-}
-
-func TestParseFiles(t *testing.T) {
-	_, err := ParseFiles("DOES NOT EXIST")
-	if err == nil {
-		t.Error("expected error for non-existent file; got none")
-	}
-	template := New("root")
-	_, err = template.ParseFiles("testdata/file1.tmpl", "testdata/file2.tmpl")
-	if err != nil {
-		t.Fatalf("error parsing files: %v", err)
-	}
-	testExecute(multiExecTests, template, t)
-}
-
-func TestParseGlob(t *testing.T) {
-	_, err := ParseGlob("DOES NOT EXIST")
-	if err == nil {
-		t.Error("expected error for non-existent file; got none")
-	}
-	_, err = New("error").ParseGlob("[x")
-	if err == nil {
-		t.Error("expected error for bad pattern; got none")
-	}
-	template := New("root")
-	_, err = template.ParseGlob("testdata/file*.tmpl")
-	if err != nil {
-		t.Fatalf("error parsing files: %v", err)
-	}
-	testExecute(multiExecTests, template, t)
-}
-
-func TestParseFS(t *testing.T) {
-	fs := os.DirFS("testdata")
-
-	{
-		_, err := ParseFS(fs, "DOES NOT EXIST")
-		if err == nil {
-			t.Error("expected error for non-existent file; got none")
-		}
-	}
-
-	{
-		template := New("root")
-		_, err := template.ParseFS(fs, "file1.tmpl", "file2.tmpl")
-		if err != nil {
-			t.Fatalf("error parsing files: %v", err)
-		}
-		testExecute(multiExecTests, template, t)
-	}
-
-	{
-		template := New("root")
-		_, err := template.ParseFS(fs, "file*.tmpl")
-		if err != nil {
-			t.Fatalf("error parsing files: %v", err)
-		}
-		testExecute(multiExecTests, template, t)
-	}
-}
-
-// In these tests, actual content (not just template definitions) comes from the parsed files.
-
-var templateFileExecTests = []execTest{
-	{"test", `{{template "tmpl1.tmpl"}}{{template "tmpl2.tmpl"}}`, "template1\n\ny\ntemplate2\n\nx\n", 0, true},
-}
-
-func TestParseFilesWithData(t *testing.T) {
-	template, err := New("root").ParseFiles("testdata/tmpl1.tmpl", "testdata/tmpl2.tmpl")
-	if err != nil {
-		t.Fatalf("error parsing files: %v", err)
-	}
-	testExecute(templateFileExecTests, template, t)
-}
-
-func TestParseGlobWithData(t *testing.T) {
-	template, err := New("root").ParseGlob("testdata/tmpl*.tmpl")
-	if err != nil {
-		t.Fatalf("error parsing files: %v", err)
-	}
-	testExecute(templateFileExecTests, template, t)
-}
-
-const (
-	cloneText1 = `{{define "a"}}{{template "b"}}{{template "c"}}{{end}}`
-	cloneText2 = `{{define "b"}}b{{end}}`
-	cloneText3 = `{{define "c"}}root{{end}}`
-	cloneText4 = `{{define "c"}}clone{{end}}`
-)
-
-// Issue 7032
-func TestAddParseTreeToUnparsedTemplate(t *testing.T) {
-	master := "{{define \"master\"}}{{end}}"
-	tmpl := New("master")
-	tree, err := parse.Parse("master", master, "", "", nil)
-	if err != nil {
-		t.Fatalf("unexpected parse err: %v", err)
-	}
-	masterTree := tree["master"]
-	tmpl.AddParseTree("master", masterTree) // used to panic
-}
-
-func TestRedefinition(t *testing.T) {
-	var tmpl *Template
-	var err error
-	if tmpl, err = New("tmpl1").Parse(`{{define "test"}}foo{{end}}`); err != nil {
-		t.Fatalf("parse 1: %v", err)
-	}
-	if _, err = tmpl.Parse(`{{define "test"}}bar{{end}}`); err != nil {
-		t.Fatalf("got error %v, expected nil", err)
-	}
-	if _, err = tmpl.New("tmpl2").Parse(`{{define "test"}}bar{{end}}`); err != nil {
-		t.Fatalf("got error %v, expected nil", err)
-	}
-}
-
-// Issue 10879
-func TestEmptyTemplateCloneCrash(t *testing.T) {
-	t1 := New("base")
-	t1.Clone() // used to panic
-}
-
-// Issue 10910, 10926
-func TestTemplateLookUp(t *testing.T) {
-	t.Skip("broken on html/template") // TODO
-	t1 := New("foo")
-	if t1.Lookup("foo") != nil {
-		t.Error("Lookup returned non-nil value for undefined template foo")
-	}
-	t1.New("bar")
-	if t1.Lookup("bar") != nil {
-		t.Error("Lookup returned non-nil value for undefined template bar")
-	}
-	t1.Parse(`{{define "foo"}}test{{end}}`)
-	if t1.Lookup("foo") == nil {
-		t.Error("Lookup returned nil value for defined template")
-	}
-}
-
-func TestParse(t *testing.T) {
-	// In multiple calls to Parse with the same receiver template, only one call
-	// can contain text other than space, comments, and template definitions
-	t1 := New("test")
-	if _, err := t1.Parse(`{{define "test"}}{{end}}`); err != nil {
-		t.Fatalf("parsing test: %s", err)
-	}
-	if _, err := t1.Parse(`{{define "test"}}{{/* this is a comment */}}{{end}}`); err != nil {
-		t.Fatalf("parsing test: %s", err)
-	}
-	if _, err := t1.Parse(`{{define "test"}}foo{{end}}`); err != nil {
-		t.Fatalf("parsing test: %s", err)
-	}
-}
-
-func TestEmptyTemplate(t *testing.T) {
-	cases := []struct {
-		defn []string
-		in   string
-		want string
-	}{
-		{[]string{"x", "y"}, "", "y"},
-		{[]string{""}, "once", ""},
-		{[]string{"", ""}, "twice", ""},
-		{[]string{"{{.}}", "{{.}}"}, "twice", "twice"},
-		{[]string{"{{/* a comment */}}", "{{/* a comment */}}"}, "comment", ""},
-		{[]string{"{{.}}", ""}, "twice", "twice"}, // TODO: should want "" not "twice"
-	}
-
-	for i, c := range cases {
-		root := New("root")
-
-		var (
-			m   *Template
-			err error
-		)
-		for _, d := range c.defn {
-			m, err = root.New(c.in).Parse(d)
-			if err != nil {
-				t.Fatal(err)
-			}
-		}
-		buf := &bytes.Buffer{}
-		if err := m.Execute(buf, c.in); err != nil {
-			t.Error(i, err)
-			continue
-		}
-		if buf.String() != c.want {
-			t.Errorf("expected string %q: got %q", c.want, buf.String())
-		}
-	}
-}
-
-// Issue 19249 was a regression in 1.8 caused by the handling of empty
-// templates added in that release, which got different answers depending
-// on the order templates appeared in the internal map.
-func TestIssue19294(t *testing.T) {
-	// The empty block in "xhtml" should be replaced during execution
-	// by the contents of "stylesheet", but if the internal map associating
-	// names with templates is built in the wrong order, the empty block
-	// looks non-empty and this doesn't happen.
-	var inlined = map[string]string{
-		"stylesheet": `{{define "stylesheet"}}stylesheet{{end}}`,
-		"xhtml":      `{{block "stylesheet" .}}{{end}}`,
-	}
-	all := []string{"stylesheet", "xhtml"}
-	for i := 0; i < 100; i++ {
-		res, err := New("title.xhtml").Parse(`{{template "xhtml" .}}`)
-		if err != nil {
-			t.Fatal(err)
-		}
-		for _, name := range all {
-			_, err := res.New(name).Parse(inlined[name])
-			if err != nil {
-				t.Fatal(err)
-			}
-		}
-		var buf bytes.Buffer
-		res.Execute(&buf, 0)
-		if buf.String() != "stylesheet" {
-			t.Fatalf("iteration %d: got %q; expected %q", i, buf.String(), "stylesheet")
-		}
-	}
-}
diff --git a/internal/backport/html/template/state_string.go b/internal/backport/html/template/state_string.go
deleted file mode 100644
index 05104be..0000000
--- a/internal/backport/html/template/state_string.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Code generated by "stringer -type state"; DO NOT EDIT.
-
-package template
-
-import "strconv"
-
-const _state_name = "stateTextstateTagstateAttrNamestateAfterNamestateBeforeValuestateHTMLCmtstateRCDATAstateAttrstateURLstateSrcsetstateJSstateJSDqStrstateJSSqStrstateJSRegexpstateJSBlockCmtstateJSLineCmtstateCSSstateCSSDqStrstateCSSSqStrstateCSSDqURLstateCSSSqURLstateCSSURLstateCSSBlockCmtstateCSSLineCmtstateError"
-
-var _state_index = [...]uint16{0, 9, 17, 30, 44, 60, 72, 83, 92, 100, 111, 118, 130, 142, 155, 170, 184, 192, 205, 218, 231, 244, 255, 271, 286, 296}
-
-func (i state) String() string {
-	if i >= state(len(_state_index)-1) {
-		return "state(" + strconv.FormatInt(int64(i), 10) + ")"
-	}
-	return _state_name[_state_index[i]:_state_index[i+1]]
-}
diff --git a/internal/backport/html/template/template.go b/internal/backport/html/template/template.go
deleted file mode 100644
index 6cc709e..0000000
--- a/internal/backport/html/template/template.go
+++ /dev/null
@@ -1,538 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
-	"fmt"
-	"io"
-	"io/fs"
-	"io/ioutil"
-	"path"
-	"path/filepath"
-	"sync"
-
-	"golang.org/x/website/internal/backport/text/template"
-	"golang.org/x/website/internal/backport/text/template/parse"
-)
-
-// Template is a specialized Template from "golang.org/x/website/internal/backport/text/template" that produces a safe
-// HTML document fragment.
-type Template struct {
-	// Sticky error if escaping fails, or escapeOK if succeeded.
-	escapeErr error
-	// We could embed the text/template field, but it's safer not to because
-	// we need to keep our version of the name space and the underlying
-	// template's in sync.
-	text *template.Template
-	// The underlying template's parse tree, updated to be HTML-safe.
-	Tree       *parse.Tree
-	*nameSpace // common to all associated templates
-}
-
-// escapeOK is a sentinel value used to indicate valid escaping.
-var escapeOK = fmt.Errorf("template escaped correctly")
-
-// nameSpace is the data structure shared by all templates in an association.
-type nameSpace struct {
-	mu      sync.Mutex
-	set     map[string]*Template
-	escaped bool
-	esc     escaper
-}
-
-// Templates returns a slice of the templates associated with t, including t
-// itself.
-func (t *Template) Templates() []*Template {
-	ns := t.nameSpace
-	ns.mu.Lock()
-	defer ns.mu.Unlock()
-	// Return a slice so we don't expose the map.
-	m := make([]*Template, 0, len(ns.set))
-	for _, v := range ns.set {
-		m = append(m, v)
-	}
-	return m
-}
-
-// Option sets options for the template. Options are described by
-// strings, either a simple string or "key=value". There can be at
-// most one equals sign in an option string. If the option string
-// is unrecognized or otherwise invalid, Option panics.
-//
-// Known options:
-//
-// missingkey: Control the behavior during execution if a map is
-// indexed with a key that is not present in the map.
-//
-//	"missingkey=default" or "missingkey=invalid"
-//		The default behavior: Do nothing and continue execution.
-//		If printed, the result of the index operation is the string
-//		"<no value>".
-//	"missingkey=zero"
-//		The operation returns the zero value for the map type's element.
-//	"missingkey=error"
-//		Execution stops immediately with an error.
-func (t *Template) Option(opt ...string) *Template {
-	t.text.Option(opt...)
-	return t
-}
-
-// checkCanParse checks whether it is OK to parse templates.
-// If not, it returns an error.
-func (t *Template) checkCanParse() error {
-	if t == nil {
-		return nil
-	}
-	t.nameSpace.mu.Lock()
-	defer t.nameSpace.mu.Unlock()
-	if t.nameSpace.escaped {
-		return fmt.Errorf("html/template: cannot Parse after Execute")
-	}
-	return nil
-}
-
-// escape escapes all associated templates.
-func (t *Template) escape() error {
-	t.nameSpace.mu.Lock()
-	defer t.nameSpace.mu.Unlock()
-	t.nameSpace.escaped = true
-	if t.escapeErr == nil {
-		if t.Tree == nil {
-			return fmt.Errorf("template: %q is an incomplete or empty template", t.Name())
-		}
-		if err := escapeTemplate(t, t.text.Root, t.Name()); err != nil {
-			return err
-		}
-	} else if t.escapeErr != escapeOK {
-		return t.escapeErr
-	}
-	return nil
-}
-
-// Execute applies a parsed template to the specified data object,
-// writing the output to wr.
-// If an error occurs executing the template or writing its output,
-// execution stops, but partial results may already have been written to
-// the output writer.
-// A template may be executed safely in parallel, although if parallel
-// executions share a Writer the output may be interleaved.
-func (t *Template) Execute(wr io.Writer, data interface{}) error {
-	if err := t.escape(); err != nil {
-		return err
-	}
-	return t.text.Execute(wr, data)
-}
-
-// ExecuteTemplate applies the template associated with t that has the given
-// name to the specified data object and writes the output to wr.
-// If an error occurs executing the template or writing its output,
-// execution stops, but partial results may already have been written to
-// the output writer.
-// A template may be executed safely in parallel, although if parallel
-// executions share a Writer the output may be interleaved.
-func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error {
-	tmpl, err := t.lookupAndEscapeTemplate(name)
-	if err != nil {
-		return err
-	}
-	return tmpl.text.Execute(wr, data)
-}
-
-// lookupAndEscapeTemplate guarantees that the template with the given name
-// is escaped, or returns an error if it cannot be. It returns the named
-// template.
-func (t *Template) lookupAndEscapeTemplate(name string) (tmpl *Template, err error) {
-	t.nameSpace.mu.Lock()
-	defer t.nameSpace.mu.Unlock()
-	t.nameSpace.escaped = true
-	tmpl = t.set[name]
-	if tmpl == nil {
-		return nil, fmt.Errorf("html/template: %q is undefined", name)
-	}
-	if tmpl.escapeErr != nil && tmpl.escapeErr != escapeOK {
-		return nil, tmpl.escapeErr
-	}
-	if tmpl.text.Tree == nil || tmpl.text.Root == nil {
-		return nil, fmt.Errorf("html/template: %q is an incomplete template", name)
-	}
-	if t.text.Lookup(name) == nil {
-		panic("html/template internal error: template escaping out of sync")
-	}
-	if tmpl.escapeErr == nil {
-		err = escapeTemplate(tmpl, tmpl.text.Root, name)
-	}
-	return tmpl, err
-}
-
-// DefinedTemplates returns a string listing the defined templates,
-// prefixed by the string "; defined templates are: ". If there are none,
-// it returns the empty string. Used to generate an error message.
-func (t *Template) DefinedTemplates() string {
-	return t.text.DefinedTemplates()
-}
-
-// Parse parses text as a template body for t.
-// Named template definitions ({{define ...}} or {{block ...}} statements) in text
-// define additional templates associated with t and are removed from the
-// definition of t itself.
-//
-// Templates can be redefined in successive calls to Parse,
-// before the first use of Execute on t or any associated template.
-// A template definition with a body containing only white space and comments
-// is considered empty and will not replace an existing template's body.
-// This allows using Parse to add new named template definitions without
-// overwriting the main template body.
-func (t *Template) Parse(text string) (*Template, error) {
-	if err := t.checkCanParse(); err != nil {
-		return nil, err
-	}
-
-	ret, err := t.text.Parse(text)
-	if err != nil {
-		return nil, err
-	}
-
-	// In general, all the named templates might have changed underfoot.
-	// Regardless, some new ones may have been defined.
-	// The template.Template set has been updated; update ours.
-	t.nameSpace.mu.Lock()
-	defer t.nameSpace.mu.Unlock()
-	for _, v := range ret.Templates() {
-		name := v.Name()
-		tmpl := t.set[name]
-		if tmpl == nil {
-			tmpl = t.new(name)
-		}
-		tmpl.text = v
-		tmpl.Tree = v.Tree
-	}
-	return t, nil
-}
-
-// AddParseTree creates a new template with the name and parse tree
-// and associates it with t.
-//
-// It returns an error if t or any associated template has already been executed.
-func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) {
-	if err := t.checkCanParse(); err != nil {
-		return nil, err
-	}
-
-	t.nameSpace.mu.Lock()
-	defer t.nameSpace.mu.Unlock()
-	text, err := t.text.AddParseTree(name, tree)
-	if err != nil {
-		return nil, err
-	}
-	ret := &Template{
-		nil,
-		text,
-		text.Tree,
-		t.nameSpace,
-	}
-	t.set[name] = ret
-	return ret, nil
-}
-
-// Clone returns a duplicate of the template, including all associated
-// templates. The actual representation is not copied, but the name space of
-// associated templates is, so further calls to Parse in the copy will add
-// templates to the copy but not to the original. Clone can be used to prepare
-// common templates and use them with variant definitions for other templates
-// by adding the variants after the clone is made.
-//
-// It returns an error if t has already been executed.
-func (t *Template) Clone() (*Template, error) {
-	t.nameSpace.mu.Lock()
-	defer t.nameSpace.mu.Unlock()
-	if t.escapeErr != nil {
-		return nil, fmt.Errorf("html/template: cannot Clone %q after it has executed", t.Name())
-	}
-	textClone, err := t.text.Clone()
-	if err != nil {
-		return nil, err
-	}
-	ns := &nameSpace{set: make(map[string]*Template)}
-	ns.esc = makeEscaper(ns)
-	ret := &Template{
-		nil,
-		textClone,
-		textClone.Tree,
-		ns,
-	}
-	ret.set[ret.Name()] = ret
-	for _, x := range textClone.Templates() {
-		name := x.Name()
-		src := t.set[name]
-		if src == nil || src.escapeErr != nil {
-			return nil, fmt.Errorf("html/template: cannot Clone %q after it has executed", t.Name())
-		}
-		x.Tree = x.Tree.Copy()
-		ret.set[name] = &Template{
-			nil,
-			x,
-			x.Tree,
-			ret.nameSpace,
-		}
-	}
-	// Return the template associated with the name of this template.
-	return ret.set[ret.Name()], nil
-}
-
-// New allocates a new HTML template with the given name.
-func New(name string) *Template {
-	ns := &nameSpace{set: make(map[string]*Template)}
-	ns.esc = makeEscaper(ns)
-	tmpl := &Template{
-		nil,
-		template.New(name),
-		nil,
-		ns,
-	}
-	tmpl.set[name] = tmpl
-	return tmpl
-}
-
-// New allocates a new HTML template associated with the given one
-// and with the same delimiters. The association, which is transitive,
-// allows one template to invoke another with a {{template}} action.
-//
-// If a template with the given name already exists, the new HTML template
-// will replace it. The existing template will be reset and disassociated with
-// t.
-func (t *Template) New(name string) *Template {
-	t.nameSpace.mu.Lock()
-	defer t.nameSpace.mu.Unlock()
-	return t.new(name)
-}
-
-// new is the implementation of New, without the lock.
-func (t *Template) new(name string) *Template {
-	tmpl := &Template{
-		nil,
-		t.text.New(name),
-		nil,
-		t.nameSpace,
-	}
-	if existing, ok := tmpl.set[name]; ok {
-		emptyTmpl := New(existing.Name())
-		*existing = *emptyTmpl
-	}
-	tmpl.set[name] = tmpl
-	return tmpl
-}
-
-// Name returns the name of the template.
-func (t *Template) Name() string {
-	return t.text.Name()
-}
-
-// FuncMap is the type of the map defining the mapping from names to
-// functions. Each function must have either a single return value, or two
-// return values of which the second has type error. In that case, if the
-// second (error) argument evaluates to non-nil during execution, execution
-// terminates and Execute returns that error. FuncMap has the same base type
-// as FuncMap in "golang.org/x/website/internal/backport/text/template", copied here so clients need not import
-// "golang.org/x/website/internal/backport/text/template".
-type FuncMap map[string]interface{}
-
-// Funcs adds the elements of the argument map to the template's function map.
-// It must be called before the template is parsed.
-// It panics if a value in the map is not a function with appropriate return
-// type. However, it is legal to overwrite elements of the map. The return
-// value is the template, so calls can be chained.
-func (t *Template) Funcs(funcMap FuncMap) *Template {
-	t.text.Funcs(template.FuncMap(funcMap))
-	return t
-}
-
-// Delims sets the action delimiters to the specified strings, to be used in
-// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template
-// definitions will inherit the settings. An empty delimiter stands for the
-// corresponding default: {{ or }}.
-// The return value is the template, so calls can be chained.
-func (t *Template) Delims(left, right string) *Template {
-	t.text.Delims(left, right)
-	return t
-}
-
-// Lookup returns the template with the given name that is associated with t,
-// or nil if there is no such template.
-func (t *Template) Lookup(name string) *Template {
-	t.nameSpace.mu.Lock()
-	defer t.nameSpace.mu.Unlock()
-	return t.set[name]
-}
-
-// Must is a helper that wraps a call to a function returning (*Template, error)
-// and panics if the error is non-nil. It is intended for use in variable initializations
-// such as
-//
-//	var t = template.Must(template.New("name").Parse("html"))
-func Must(t *Template, err error) *Template {
-	if err != nil {
-		panic(err)
-	}
-	return t
-}
-
-// ParseFiles creates a new Template and parses the template definitions from
-// the named files. The returned template's name will have the (base) name and
-// (parsed) contents of the first file. There must be at least one file.
-// If an error occurs, parsing stops and the returned *Template is nil.
-//
-// When parsing multiple files with the same name in different directories,
-// the last one mentioned will be the one that results.
-// For instance, ParseFiles("a/foo", "b/foo") stores "b/foo" as the template
-// named "foo", while "a/foo" is unavailable.
-func ParseFiles(filenames ...string) (*Template, error) {
-	return parseFiles(nil, readFileOS, filenames...)
-}
-
-// ParseFiles parses the named files and associates the resulting templates with
-// t. If an error occurs, parsing stops and the returned template is nil;
-// otherwise it is t. There must be at least one file.
-//
-// When parsing multiple files with the same name in different directories,
-// the last one mentioned will be the one that results.
-//
-// ParseFiles returns an error if t or any associated template has already been executed.
-func (t *Template) ParseFiles(filenames ...string) (*Template, error) {
-	return parseFiles(t, readFileOS, filenames...)
-}
-
-// parseFiles is the helper for the method and function. If the argument
-// template is nil, it is created from the first file.
-func parseFiles(t *Template, readFile func(string) (string, []byte, error), filenames ...string) (*Template, error) {
-	if err := t.checkCanParse(); err != nil {
-		return nil, err
-	}
-
-	if len(filenames) == 0 {
-		// Not really a problem, but be consistent.
-		return nil, fmt.Errorf("html/template: no files named in call to ParseFiles")
-	}
-	for _, filename := range filenames {
-		name, b, err := readFile(filename)
-		if err != nil {
-			return nil, err
-		}
-		s := string(b)
-		// First template becomes return value if not already defined,
-		// and we use that one for subsequent New calls to associate
-		// all the templates together. Also, if this file has the same name
-		// as t, this file becomes the contents of t, so
-		//  t, err := New(name).Funcs(xxx).ParseFiles(name)
-		// works. Otherwise we create a new template associated with t.
-		var tmpl *Template
-		if t == nil {
-			t = New(name)
-		}
-		if name == t.Name() {
-			tmpl = t
-		} else {
-			tmpl = t.New(name)
-		}
-		_, err = tmpl.Parse(s)
-		if err != nil {
-			return nil, err
-		}
-	}
-	return t, nil
-}
-
-// ParseGlob creates a new Template and parses the template definitions from
-// the files identified by the pattern. The files are matched according to the
-// semantics of filepath.Match, and the pattern must match at least one file.
-// The returned template will have the (base) name and (parsed) contents of the
-// first file matched by the pattern. ParseGlob is equivalent to calling
-// ParseFiles with the list of files matched by the pattern.
-//
-// When parsing multiple files with the same name in different directories,
-// the last one mentioned will be the one that results.
-func ParseGlob(pattern string) (*Template, error) {
-	return parseGlob(nil, pattern)
-}
-
-// ParseGlob parses the template definitions in the files identified by the
-// pattern and associates the resulting templates with t. The files are matched
-// according to the semantics of filepath.Match, and the pattern must match at
-// least one file. ParseGlob is equivalent to calling t.ParseFiles with the
-// list of files matched by the pattern.
-//
-// When parsing multiple files with the same name in different directories,
-// the last one mentioned will be the one that results.
-//
-// ParseGlob returns an error if t or any associated template has already been executed.
-func (t *Template) ParseGlob(pattern string) (*Template, error) {
-	return parseGlob(t, pattern)
-}
-
-// parseGlob is the implementation of the function and method ParseGlob.
-func parseGlob(t *Template, pattern string) (*Template, error) {
-	if err := t.checkCanParse(); err != nil {
-		return nil, err
-	}
-	filenames, err := filepath.Glob(pattern)
-	if err != nil {
-		return nil, err
-	}
-	if len(filenames) == 0 {
-		return nil, fmt.Errorf("html/template: pattern matches no files: %#q", pattern)
-	}
-	return parseFiles(t, readFileOS, filenames...)
-}
-
-// IsTrue reports whether the value is 'true', in the sense of not the zero of its type,
-// and whether the value has a meaningful truth value. This is the definition of
-// truth used by if and other such actions.
-func IsTrue(val interface{}) (truth, ok bool) {
-	return template.IsTrue(val)
-}
-
-// ParseFS is like ParseFiles or ParseGlob but reads from the file system fs
-// instead of the host operating system's file system.
-// It accepts a list of glob patterns.
-// (Note that most file names serve as glob patterns matching only themselves.)
-func ParseFS(fs fs.FS, patterns ...string) (*Template, error) {
-	return parseFS(nil, fs, patterns)
-}
-
-// ParseFS is like ParseFiles or ParseGlob but reads from the file system fs
-// instead of the host operating system's file system.
-// It accepts a list of glob patterns.
-// (Note that most file names serve as glob patterns matching only themselves.)
-func (t *Template) ParseFS(fs fs.FS, patterns ...string) (*Template, error) {
-	return parseFS(t, fs, patterns)
-}
-
-func parseFS(t *Template, fsys fs.FS, patterns []string) (*Template, error) {
-	var filenames []string
-	for _, pattern := range patterns {
-		list, err := fs.Glob(fsys, pattern)
-		if err != nil {
-			return nil, err
-		}
-		if len(list) == 0 {
-			return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
-		}
-		filenames = append(filenames, list...)
-	}
-	return parseFiles(t, readFileFS(fsys), filenames...)
-}
-
-func readFileOS(file string) (name string, b []byte, err error) {
-	name = filepath.Base(file)
-	b, err = ioutil.ReadFile(file)
-	return
-}
-
-func readFileFS(fsys fs.FS) func(string) (string, []byte, error) {
-	return func(file string) (name string, b []byte, err error) {
-		name = path.Base(file)
-		b, err = fs.ReadFile(fsys, file)
-		return
-	}
-}
diff --git a/internal/backport/html/template/template_test.go b/internal/backport/html/template/template_test.go
deleted file mode 100644
index 0f68e72..0000000
--- a/internal/backport/html/template/template_test.go
+++ /dev/null
@@ -1,219 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template_test
-
-import (
-	"bytes"
-	"encoding/json"
-	"strings"
-	"testing"
-
-	. "golang.org/x/website/internal/backport/html/template"
-	"golang.org/x/website/internal/backport/text/template/parse"
-)
-
-func TestTemplateClone(t *testing.T) {
-	// https://golang.org/issue/12996
-	orig := New("name")
-	clone, err := orig.Clone()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if len(clone.Templates()) != len(orig.Templates()) {
-		t.Fatalf("Invalid length of t.Clone().Templates()")
-	}
-
-	const want = "stuff"
-	parsed := Must(clone.Parse(want))
-	var buf bytes.Buffer
-	err = parsed.Execute(&buf, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if got := buf.String(); got != want {
-		t.Fatalf("got %q; want %q", got, want)
-	}
-}
-
-func TestRedefineNonEmptyAfterExecution(t *testing.T) {
-	c := newTestCase(t)
-	c.mustParse(c.root, `foo`)
-	c.mustExecute(c.root, nil, "foo")
-	c.mustNotParse(c.root, `bar`)
-}
-
-func TestRedefineEmptyAfterExecution(t *testing.T) {
-	c := newTestCase(t)
-	c.mustParse(c.root, ``)
-	c.mustExecute(c.root, nil, "")
-	c.mustNotParse(c.root, `foo`)
-	c.mustExecute(c.root, nil, "")
-}
-
-func TestRedefineAfterNonExecution(t *testing.T) {
-	c := newTestCase(t)
-	c.mustParse(c.root, `{{if .}}<{{template "X"}}>{{end}}{{define "X"}}foo{{end}}`)
-	c.mustExecute(c.root, 0, "")
-	c.mustNotParse(c.root, `{{define "X"}}bar{{end}}`)
-	c.mustExecute(c.root, 1, "&lt;foo>")
-}
-
-func TestRedefineAfterNamedExecution(t *testing.T) {
-	c := newTestCase(t)
-	c.mustParse(c.root, `<{{template "X" .}}>{{define "X"}}foo{{end}}`)
-	c.mustExecute(c.root, nil, "&lt;foo>")
-	c.mustNotParse(c.root, `{{define "X"}}bar{{end}}`)
-	c.mustExecute(c.root, nil, "&lt;foo>")
-}
-
-func TestRedefineNestedByNameAfterExecution(t *testing.T) {
-	c := newTestCase(t)
-	c.mustParse(c.root, `{{define "X"}}foo{{end}}`)
-	c.mustExecute(c.lookup("X"), nil, "foo")
-	c.mustNotParse(c.root, `{{define "X"}}bar{{end}}`)
-	c.mustExecute(c.lookup("X"), nil, "foo")
-}
-
-func TestRedefineNestedByTemplateAfterExecution(t *testing.T) {
-	c := newTestCase(t)
-	c.mustParse(c.root, `{{define "X"}}foo{{end}}`)
-	c.mustExecute(c.lookup("X"), nil, "foo")
-	c.mustNotParse(c.lookup("X"), `bar`)
-	c.mustExecute(c.lookup("X"), nil, "foo")
-}
-
-func TestRedefineSafety(t *testing.T) {
-	c := newTestCase(t)
-	c.mustParse(c.root, `<html><a href="{{template "X"}}">{{define "X"}}{{end}}`)
-	c.mustExecute(c.root, nil, `<html><a href="">`)
-	// Note: Every version of Go prior to Go 1.8 accepted the redefinition of "X"
-	// on the next line, but luckily kept it from being used in the outer template.
-	// Now we reject it, which makes clearer that we're not going to use it.
-	c.mustNotParse(c.root, `{{define "X"}}" bar="baz{{end}}`)
-	c.mustExecute(c.root, nil, `<html><a href="">`)
-}
-
-func TestRedefineTopUse(t *testing.T) {
-	c := newTestCase(t)
-	c.mustParse(c.root, `{{template "X"}}{{.}}{{define "X"}}{{end}}`)
-	c.mustExecute(c.root, 42, `42`)
-	c.mustNotParse(c.root, `{{define "X"}}<script>{{end}}`)
-	c.mustExecute(c.root, 42, `42`)
-}
-
-func TestRedefineOtherParsers(t *testing.T) {
-	c := newTestCase(t)
-	c.mustParse(c.root, ``)
-	c.mustExecute(c.root, nil, ``)
-	if _, err := c.root.ParseFiles("no.template"); err == nil || !strings.Contains(err.Error(), "Execute") {
-		t.Errorf("ParseFiles: %v\nwanted error about already having Executed", err)
-	}
-	if _, err := c.root.ParseGlob("*.no.template"); err == nil || !strings.Contains(err.Error(), "Execute") {
-		t.Errorf("ParseGlob: %v\nwanted error about already having Executed", err)
-	}
-	if _, err := c.root.AddParseTree("t1", c.root.Tree); err == nil || !strings.Contains(err.Error(), "Execute") {
-		t.Errorf("AddParseTree: %v\nwanted error about already having Executed", err)
-	}
-}
-
-func TestNumbers(t *testing.T) {
-	c := newTestCase(t)
-	c.mustParse(c.root, `{{print 1_2.3_4}} {{print 0x0_1.e_0p+02}}`)
-	c.mustExecute(c.root, nil, "12.34 7.5")
-}
-
-func TestStringsInScriptsWithJsonContentTypeAreCorrectlyEscaped(t *testing.T) {
-	// See #33671 and #37634 for more context on this.
-	tests := []struct{ name, in string }{
-		{"empty", ""},
-		{"invalid", string(rune(-1))},
-		{"null", "\u0000"},
-		{"unit separator", "\u001F"},
-		{"tab", "\t"},
-		{"gt and lt", "<>"},
-		{"quotes", `'"`},
-		{"ASCII letters", "ASCII letters"},
-		{"Unicode", "ʕ⊙ϖ⊙ʔ"},
-		{"Pizza", "🍕"},
-	}
-	const (
-		prefix = `<script type="application/ld+json">`
-		suffix = `</script>`
-		templ  = prefix + `"{{.}}"` + suffix
-	)
-	tpl := Must(New("JS string is JSON string").Parse(templ))
-	for _, tt := range tests {
-		t.Run(tt.name, func(t *testing.T) {
-			var buf bytes.Buffer
-			if err := tpl.Execute(&buf, tt.in); err != nil {
-				t.Fatalf("Cannot render template: %v", err)
-			}
-			trimmed := bytes.TrimSuffix(bytes.TrimPrefix(buf.Bytes(), []byte(prefix)), []byte(suffix))
-			var got string
-			if err := json.Unmarshal(trimmed, &got); err != nil {
-				t.Fatalf("Cannot parse JS string %q as JSON: %v", trimmed[1:len(trimmed)-1], err)
-			}
-			if got != tt.in {
-				t.Errorf("Serialization changed the string value: got %q want %q", got, tt.in)
-			}
-		})
-	}
-}
-
-func TestSkipEscapeComments(t *testing.T) {
-	c := newTestCase(t)
-	tr := parse.New("root")
-	tr.Mode = parse.ParseComments
-	newT, err := tr.Parse("{{/* A comment */}}{{ 1 }}{{/* Another comment */}}", "", "", make(map[string]*parse.Tree))
-	if err != nil {
-		t.Fatalf("Cannot parse template text: %v", err)
-	}
-	c.root, err = c.root.AddParseTree("root", newT)
-	if err != nil {
-		t.Fatalf("Cannot add parse tree to template: %v", err)
-	}
-	c.mustExecute(c.root, nil, "1")
-}
-
-type testCase struct {
-	t    *testing.T
-	root *Template
-}
-
-func newTestCase(t *testing.T) *testCase {
-	return &testCase{
-		t:    t,
-		root: New("root"),
-	}
-}
-
-func (c *testCase) lookup(name string) *Template {
-	return c.root.Lookup(name)
-}
-
-func (c *testCase) mustParse(t *Template, text string) {
-	_, err := t.Parse(text)
-	if err != nil {
-		c.t.Fatalf("parse: %v", err)
-	}
-}
-
-func (c *testCase) mustNotParse(t *Template, text string) {
-	_, err := t.Parse(text)
-	if err == nil {
-		c.t.Fatalf("parse: unexpected success")
-	}
-}
-
-func (c *testCase) mustExecute(t *Template, val interface{}, want string) {
-	var buf bytes.Buffer
-	err := t.Execute(&buf, val)
-	if err != nil {
-		c.t.Fatalf("execute: %v", err)
-	}
-	if buf.String() != want {
-		c.t.Fatalf("template output:\n%s\nwant:\n%s", buf.String(), want)
-	}
-}
diff --git a/internal/backport/html/template/testdata/file1.tmpl b/internal/backport/html/template/testdata/file1.tmpl
deleted file mode 100644
index febf9d9..0000000
--- a/internal/backport/html/template/testdata/file1.tmpl
+++ /dev/null
@@ -1,2 +0,0 @@
-{{define "x"}}TEXT{{end}}
-{{define "dotV"}}{{.V}}{{end}}
diff --git a/internal/backport/html/template/testdata/file2.tmpl b/internal/backport/html/template/testdata/file2.tmpl
deleted file mode 100644
index 39bf6fb..0000000
--- a/internal/backport/html/template/testdata/file2.tmpl
+++ /dev/null
@@ -1,2 +0,0 @@
-{{define "dot"}}{{.}}{{end}}
-{{define "nested"}}{{template "dot" .}}{{end}}
diff --git a/internal/backport/html/template/testdata/fs.zip b/internal/backport/html/template/testdata/fs.zip
deleted file mode 100644
index 8581313..0000000
--- a/internal/backport/html/template/testdata/fs.zip
+++ /dev/null
Binary files differ
diff --git a/internal/backport/html/template/testdata/tmpl1.tmpl b/internal/backport/html/template/testdata/tmpl1.tmpl
deleted file mode 100644
index b72b3a3..0000000
--- a/internal/backport/html/template/testdata/tmpl1.tmpl
+++ /dev/null
@@ -1,3 +0,0 @@
-template1
-{{define "x"}}x{{end}}
-{{template "y"}}
diff --git a/internal/backport/html/template/testdata/tmpl2.tmpl b/internal/backport/html/template/testdata/tmpl2.tmpl
deleted file mode 100644
index 16beba6..0000000
--- a/internal/backport/html/template/testdata/tmpl2.tmpl
+++ /dev/null
@@ -1,3 +0,0 @@
-template2
-{{define "y"}}y{{end}}
-{{template "x"}}
diff --git a/internal/backport/html/template/transition.go b/internal/backport/html/template/transition.go
deleted file mode 100644
index 06df679..0000000
--- a/internal/backport/html/template/transition.go
+++ /dev/null
@@ -1,592 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
-	"bytes"
-	"strings"
-)
-
-// transitionFunc is the array of context transition functions for text nodes.
-// A transition function takes a context and template text input, and returns
-// the updated context and the number of bytes consumed from the front of the
-// input.
-var transitionFunc = [...]func(context, []byte) (context, int){
-	stateText:        tText,
-	stateTag:         tTag,
-	stateAttrName:    tAttrName,
-	stateAfterName:   tAfterName,
-	stateBeforeValue: tBeforeValue,
-	stateHTMLCmt:     tHTMLCmt,
-	stateRCDATA:      tSpecialTagEnd,
-	stateAttr:        tAttr,
-	stateURL:         tURL,
-	stateSrcset:      tURL,
-	stateJS:          tJS,
-	stateJSDqStr:     tJSDelimited,
-	stateJSSqStr:     tJSDelimited,
-	stateJSRegexp:    tJSDelimited,
-	stateJSBlockCmt:  tBlockCmt,
-	stateJSLineCmt:   tLineCmt,
-	stateCSS:         tCSS,
-	stateCSSDqStr:    tCSSStr,
-	stateCSSSqStr:    tCSSStr,
-	stateCSSDqURL:    tCSSStr,
-	stateCSSSqURL:    tCSSStr,
-	stateCSSURL:      tCSSStr,
-	stateCSSBlockCmt: tBlockCmt,
-	stateCSSLineCmt:  tLineCmt,
-	stateError:       tError,
-}
-
-var commentStart = []byte("<!--")
-var commentEnd = []byte("-->")
-
-// tText is the context transition function for the text state.
-func tText(c context, s []byte) (context, int) {
-	k := 0
-	for {
-		i := k + bytes.IndexByte(s[k:], '<')
-		if i < k || i+1 == len(s) {
-			return c, len(s)
-		} else if i+4 <= len(s) && bytes.Equal(commentStart, s[i:i+4]) {
-			return context{state: stateHTMLCmt}, i + 4
-		}
-		i++
-		end := false
-		if s[i] == '/' {
-			if i+1 == len(s) {
-				return c, len(s)
-			}
-			end, i = true, i+1
-		}
-		j, e := eatTagName(s, i)
-		if j != i {
-			if end {
-				e = elementNone
-			}
-			// We've found an HTML tag.
-			return context{state: stateTag, element: e}, j
-		}
-		k = j
-	}
-}
-
-var elementContentType = [...]state{
-	elementNone:     stateText,
-	elementScript:   stateJS,
-	elementStyle:    stateCSS,
-	elementTextarea: stateRCDATA,
-	elementTitle:    stateRCDATA,
-}
-
-// tTag is the context transition function for the tag state.
-func tTag(c context, s []byte) (context, int) {
-	// Find the attribute name.
-	i := eatWhiteSpace(s, 0)
-	if i == len(s) {
-		return c, len(s)
-	}
-	if s[i] == '>' {
-		return context{
-			state:   elementContentType[c.element],
-			element: c.element,
-		}, i + 1
-	}
-	j, err := eatAttrName(s, i)
-	if err != nil {
-		return context{state: stateError, err: err}, len(s)
-	}
-	state, attr := stateTag, attrNone
-	if i == j {
-		return context{
-			state: stateError,
-			err:   errorf(ErrBadHTML, nil, 0, "expected space, attr name, or end of tag, but got %q", s[i:]),
-		}, len(s)
-	}
-
-	attrName := strings.ToLower(string(s[i:j]))
-	if c.element == elementScript && attrName == "type" {
-		attr = attrScriptType
-	} else {
-		switch attrType(attrName) {
-		case contentTypeURL:
-			attr = attrURL
-		case contentTypeCSS:
-			attr = attrStyle
-		case contentTypeJS:
-			attr = attrScript
-		case contentTypeSrcset:
-			attr = attrSrcset
-		}
-	}
-
-	if j == len(s) {
-		state = stateAttrName
-	} else {
-		state = stateAfterName
-	}
-	return context{state: state, element: c.element, attr: attr}, j
-}
-
-// tAttrName is the context transition function for stateAttrName.
-func tAttrName(c context, s []byte) (context, int) {
-	i, err := eatAttrName(s, 0)
-	if err != nil {
-		return context{state: stateError, err: err}, len(s)
-	} else if i != len(s) {
-		c.state = stateAfterName
-	}
-	return c, i
-}
-
-// tAfterName is the context transition function for stateAfterName.
-func tAfterName(c context, s []byte) (context, int) {
-	// Look for the start of the value.
-	i := eatWhiteSpace(s, 0)
-	if i == len(s) {
-		return c, len(s)
-	} else if s[i] != '=' {
-		// Occurs due to tag ending '>', and valueless attribute.
-		c.state = stateTag
-		return c, i
-	}
-	c.state = stateBeforeValue
-	// Consume the "=".
-	return c, i + 1
-}
-
-var attrStartStates = [...]state{
-	attrNone:       stateAttr,
-	attrScript:     stateJS,
-	attrScriptType: stateAttr,
-	attrStyle:      stateCSS,
-	attrURL:        stateURL,
-	attrSrcset:     stateSrcset,
-}
-
-// tBeforeValue is the context transition function for stateBeforeValue.
-func tBeforeValue(c context, s []byte) (context, int) {
-	i := eatWhiteSpace(s, 0)
-	if i == len(s) {
-		return c, len(s)
-	}
-	// Find the attribute delimiter.
-	delim := delimSpaceOrTagEnd
-	switch s[i] {
-	case '\'':
-		delim, i = delimSingleQuote, i+1
-	case '"':
-		delim, i = delimDoubleQuote, i+1
-	}
-	c.state, c.delim = attrStartStates[c.attr], delim
-	return c, i
-}
-
-// tHTMLCmt is the context transition function for stateHTMLCmt.
-func tHTMLCmt(c context, s []byte) (context, int) {
-	if i := bytes.Index(s, commentEnd); i != -1 {
-		return context{}, i + 3
-	}
-	return c, len(s)
-}
-
-// specialTagEndMarkers maps element types to the character sequence that
-// case-insensitively signals the end of the special tag body.
-var specialTagEndMarkers = [...][]byte{
-	elementScript:   []byte("script"),
-	elementStyle:    []byte("style"),
-	elementTextarea: []byte("textarea"),
-	elementTitle:    []byte("title"),
-}
-
-var (
-	specialTagEndPrefix = []byte("</")
-	tagEndSeparators    = []byte("> \t\n\f/")
-)
-
-// tSpecialTagEnd is the context transition function for raw text and RCDATA
-// element states.
-func tSpecialTagEnd(c context, s []byte) (context, int) {
-	if c.element != elementNone {
-		if i := indexTagEnd(s, specialTagEndMarkers[c.element]); i != -1 {
-			return context{}, i
-		}
-	}
-	return c, len(s)
-}
-
-// indexTagEnd finds the index of a special tag end in a case insensitive way, or returns -1
-func indexTagEnd(s []byte, tag []byte) int {
-	res := 0
-	plen := len(specialTagEndPrefix)
-	for len(s) > 0 {
-		// Try to find the tag end prefix first
-		i := bytes.Index(s, specialTagEndPrefix)
-		if i == -1 {
-			return i
-		}
-		s = s[i+plen:]
-		// Try to match the actual tag if there is still space for it
-		if len(tag) <= len(s) && bytes.EqualFold(tag, s[:len(tag)]) {
-			s = s[len(tag):]
-			// Check the tag is followed by a proper separator
-			if len(s) > 0 && bytes.IndexByte(tagEndSeparators, s[0]) != -1 {
-				return res + i
-			}
-			res += len(tag)
-		}
-		res += i + plen
-	}
-	return -1
-}
-
-// tAttr is the context transition function for the attribute state.
-func tAttr(c context, s []byte) (context, int) {
-	return c, len(s)
-}
-
-// tURL is the context transition function for the URL state.
-func tURL(c context, s []byte) (context, int) {
-	if bytes.ContainsAny(s, "#?") {
-		c.urlPart = urlPartQueryOrFrag
-	} else if len(s) != eatWhiteSpace(s, 0) && c.urlPart == urlPartNone {
-		// HTML5 uses "Valid URL potentially surrounded by spaces" for
-		// attrs: https://www.w3.org/TR/html5/index.html#attributes-1
-		c.urlPart = urlPartPreQuery
-	}
-	return c, len(s)
-}
-
-// tJS is the context transition function for the JS state.
-func tJS(c context, s []byte) (context, int) {
-	i := bytes.IndexAny(s, `"'/`)
-	if i == -1 {
-		// Entire input is non string, comment, regexp tokens.
-		c.jsCtx = nextJSCtx(s, c.jsCtx)
-		return c, len(s)
-	}
-	c.jsCtx = nextJSCtx(s[:i], c.jsCtx)
-	switch s[i] {
-	case '"':
-		c.state, c.jsCtx = stateJSDqStr, jsCtxRegexp
-	case '\'':
-		c.state, c.jsCtx = stateJSSqStr, jsCtxRegexp
-	case '/':
-		switch {
-		case i+1 < len(s) && s[i+1] == '/':
-			c.state, i = stateJSLineCmt, i+1
-		case i+1 < len(s) && s[i+1] == '*':
-			c.state, i = stateJSBlockCmt, i+1
-		case c.jsCtx == jsCtxRegexp:
-			c.state = stateJSRegexp
-		case c.jsCtx == jsCtxDivOp:
-			c.jsCtx = jsCtxRegexp
-		default:
-			return context{
-				state: stateError,
-				err:   errorf(ErrSlashAmbig, nil, 0, "'/' could start a division or regexp: %.32q", s[i:]),
-			}, len(s)
-		}
-	default:
-		panic("unreachable")
-	}
-	return c, i + 1
-}
-
-// tJSDelimited is the context transition function for the JS string and regexp
-// states.
-func tJSDelimited(c context, s []byte) (context, int) {
-	specials := `\"`
-	switch c.state {
-	case stateJSSqStr:
-		specials = `\'`
-	case stateJSRegexp:
-		specials = `\/[]`
-	}
-
-	k, inCharset := 0, false
-	for {
-		i := k + bytes.IndexAny(s[k:], specials)
-		if i < k {
-			break
-		}
-		switch s[i] {
-		case '\\':
-			i++
-			if i == len(s) {
-				return context{
-					state: stateError,
-					err:   errorf(ErrPartialEscape, nil, 0, "unfinished escape sequence in JS string: %q", s),
-				}, len(s)
-			}
-		case '[':
-			inCharset = true
-		case ']':
-			inCharset = false
-		default:
-			// end delimiter
-			if !inCharset {
-				c.state, c.jsCtx = stateJS, jsCtxDivOp
-				return c, i + 1
-			}
-		}
-		k = i + 1
-	}
-
-	if inCharset {
-		// This can be fixed by making context richer if interpolation
-		// into charsets is desired.
-		return context{
-			state: stateError,
-			err:   errorf(ErrPartialCharset, nil, 0, "unfinished JS regexp charset: %q", s),
-		}, len(s)
-	}
-
-	return c, len(s)
-}
-
-var blockCommentEnd = []byte("*/")
-
-// tBlockCmt is the context transition function for /*comment*/ states.
-func tBlockCmt(c context, s []byte) (context, int) {
-	i := bytes.Index(s, blockCommentEnd)
-	if i == -1 {
-		return c, len(s)
-	}
-	switch c.state {
-	case stateJSBlockCmt:
-		c.state = stateJS
-	case stateCSSBlockCmt:
-		c.state = stateCSS
-	default:
-		panic(c.state.String())
-	}
-	return c, i + 2
-}
-
-// tLineCmt is the context transition function for //comment states.
-func tLineCmt(c context, s []byte) (context, int) {
-	var lineTerminators string
-	var endState state
-	switch c.state {
-	case stateJSLineCmt:
-		lineTerminators, endState = "\n\r\u2028\u2029", stateJS
-	case stateCSSLineCmt:
-		lineTerminators, endState = "\n\f\r", stateCSS
-		// Line comments are not part of any published CSS standard but
-		// are supported by the 4 major browsers.
-		// This defines line comments as
-		//     LINECOMMENT ::= "//" [^\n\f\d]*
-		// since https://www.w3.org/TR/css3-syntax/#SUBTOK-nl defines
-		// newlines:
-		//     nl ::= #xA | #xD #xA | #xD | #xC
-	default:
-		panic(c.state.String())
-	}
-
-	i := bytes.IndexAny(s, lineTerminators)
-	if i == -1 {
-		return c, len(s)
-	}
-	c.state = endState
-	// Per section 7.4 of EcmaScript 5 : https://es5.github.com/#x7.4
-	// "However, the LineTerminator at the end of the line is not
-	// considered to be part of the single-line comment; it is
-	// recognized separately by the lexical grammar and becomes part
-	// of the stream of input elements for the syntactic grammar."
-	return c, i
-}
-
-// tCSS is the context transition function for the CSS state.
-func tCSS(c context, s []byte) (context, int) {
-	// CSS quoted strings are almost never used except for:
-	// (1) URLs as in background: "/foo.png"
-	// (2) Multiword font-names as in font-family: "Times New Roman"
-	// (3) List separators in content values as in inline-lists:
-	//    <style>
-	//    ul.inlineList { list-style: none; padding:0 }
-	//    ul.inlineList > li { display: inline }
-	//    ul.inlineList > li:before { content: ", " }
-	//    ul.inlineList > li:first-child:before { content: "" }
-	//    </style>
-	//    <ul class=inlineList><li>One<li>Two<li>Three</ul>
-	// (4) Attribute value selectors as in a[href="http://example.com/"]
-	//
-	// We conservatively treat all strings as URLs, but make some
-	// allowances to avoid confusion.
-	//
-	// In (1), our conservative assumption is justified.
-	// In (2), valid font names do not contain ':', '?', or '#', so our
-	// conservative assumption is fine since we will never transition past
-	// urlPartPreQuery.
-	// In (3), our protocol heuristic should not be tripped, and there
-	// should not be non-space content after a '?' or '#', so as long as
-	// we only %-encode RFC 3986 reserved characters we are ok.
-	// In (4), we should URL escape for URL attributes, and for others we
-	// have the attribute name available if our conservative assumption
-	// proves problematic for real code.
-
-	k := 0
-	for {
-		i := k + bytes.IndexAny(s[k:], `("'/`)
-		if i < k {
-			return c, len(s)
-		}
-		switch s[i] {
-		case '(':
-			// Look for url to the left.
-			p := bytes.TrimRight(s[:i], "\t\n\f\r ")
-			if endsWithCSSKeyword(p, "url") {
-				j := len(s) - len(bytes.TrimLeft(s[i+1:], "\t\n\f\r "))
-				switch {
-				case j != len(s) && s[j] == '"':
-					c.state, j = stateCSSDqURL, j+1
-				case j != len(s) && s[j] == '\'':
-					c.state, j = stateCSSSqURL, j+1
-				default:
-					c.state = stateCSSURL
-				}
-				return c, j
-			}
-		case '/':
-			if i+1 < len(s) {
-				switch s[i+1] {
-				case '/':
-					c.state = stateCSSLineCmt
-					return c, i + 2
-				case '*':
-					c.state = stateCSSBlockCmt
-					return c, i + 2
-				}
-			}
-		case '"':
-			c.state = stateCSSDqStr
-			return c, i + 1
-		case '\'':
-			c.state = stateCSSSqStr
-			return c, i + 1
-		}
-		k = i + 1
-	}
-}
-
-// tCSSStr is the context transition function for the CSS string and URL states.
-func tCSSStr(c context, s []byte) (context, int) {
-	var endAndEsc string
-	switch c.state {
-	case stateCSSDqStr, stateCSSDqURL:
-		endAndEsc = `\"`
-	case stateCSSSqStr, stateCSSSqURL:
-		endAndEsc = `\'`
-	case stateCSSURL:
-		// Unquoted URLs end with a newline or close parenthesis.
-		// The below includes the wc (whitespace character) and nl.
-		endAndEsc = "\\\t\n\f\r )"
-	default:
-		panic(c.state.String())
-	}
-
-	k := 0
-	for {
-		i := k + bytes.IndexAny(s[k:], endAndEsc)
-		if i < k {
-			c, nread := tURL(c, decodeCSS(s[k:]))
-			return c, k + nread
-		}
-		if s[i] == '\\' {
-			i++
-			if i == len(s) {
-				return context{
-					state: stateError,
-					err:   errorf(ErrPartialEscape, nil, 0, "unfinished escape sequence in CSS string: %q", s),
-				}, len(s)
-			}
-		} else {
-			c.state = stateCSS
-			return c, i + 1
-		}
-		c, _ = tURL(c, decodeCSS(s[:i+1]))
-		k = i + 1
-	}
-}
-
-// tError is the context transition function for the error state.
-func tError(c context, s []byte) (context, int) {
-	return c, len(s)
-}
-
-// eatAttrName returns the largest j such that s[i:j] is an attribute name.
-// It returns an error if s[i:] does not look like it begins with an
-// attribute name, such as encountering a quote mark without a preceding
-// equals sign.
-func eatAttrName(s []byte, i int) (int, *Error) {
-	for j := i; j < len(s); j++ {
-		switch s[j] {
-		case ' ', '\t', '\n', '\f', '\r', '=', '>':
-			return j, nil
-		case '\'', '"', '<':
-			// These result in a parse warning in HTML5 and are
-			// indicative of serious problems if seen in an attr
-			// name in a template.
-			return -1, errorf(ErrBadHTML, nil, 0, "%q in attribute name: %.32q", s[j:j+1], s)
-		default:
-			// No-op.
-		}
-	}
-	return len(s), nil
-}
-
-var elementNameMap = map[string]element{
-	"script":   elementScript,
-	"style":    elementStyle,
-	"textarea": elementTextarea,
-	"title":    elementTitle,
-}
-
-// asciiAlpha reports whether c is an ASCII letter.
-func asciiAlpha(c byte) bool {
-	return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z'
-}
-
-// asciiAlphaNum reports whether c is an ASCII letter or digit.
-func asciiAlphaNum(c byte) bool {
-	return asciiAlpha(c) || '0' <= c && c <= '9'
-}
-
-// eatTagName returns the largest j such that s[i:j] is a tag name and the tag type.
-func eatTagName(s []byte, i int) (int, element) {
-	if i == len(s) || !asciiAlpha(s[i]) {
-		return i, elementNone
-	}
-	j := i + 1
-	for j < len(s) {
-		x := s[j]
-		if asciiAlphaNum(x) {
-			j++
-			continue
-		}
-		// Allow "x-y" or "x:y" but not "x-", "-y", or "x--y".
-		if (x == ':' || x == '-') && j+1 < len(s) && asciiAlphaNum(s[j+1]) {
-			j += 2
-			continue
-		}
-		break
-	}
-	return j, elementNameMap[strings.ToLower(string(s[i:j]))]
-}
-
-// eatWhiteSpace returns the largest j such that s[i:j] is white space.
-func eatWhiteSpace(s []byte, i int) int {
-	for j := i; j < len(s); j++ {
-		switch s[j] {
-		case ' ', '\t', '\n', '\f', '\r':
-			// No-op.
-		default:
-			return j
-		}
-	}
-	return len(s)
-}
diff --git a/internal/backport/html/template/transition_test.go b/internal/backport/html/template/transition_test.go
deleted file mode 100644
index 412a4c7..0000000
--- a/internal/backport/html/template/transition_test.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
-	"bytes"
-	"strings"
-	"testing"
-)
-
-func TestFindEndTag(t *testing.T) {
-	tests := []struct {
-		s, tag string
-		want   int
-	}{
-		{"", "tag", -1},
-		{"hello </textarea> hello", "textarea", 6},
-		{"hello </TEXTarea> hello", "textarea", 6},
-		{"hello </textAREA>", "textarea", 6},
-		{"hello </textarea", "textareax", -1},
-		{"hello </textarea>", "tag", -1},
-		{"hello tag </textarea", "tag", -1},
-		{"hello </tag> </other> </textarea> <other>", "textarea", 22},
-		{"</textarea> <other>", "textarea", 0},
-		{"<div> </div> </TEXTAREA>", "textarea", 13},
-		{"<div> </div> </TEXTAREA\t>", "textarea", 13},
-		{"<div> </div> </TEXTAREA >", "textarea", 13},
-		{"<div> </div> </TEXTAREAfoo", "textarea", -1},
-		{"</TEXTAREAfoo </textarea>", "textarea", 14},
-		{"<</script >", "script", 1},
-		{"</script>", "textarea", -1},
-	}
-	for _, test := range tests {
-		if got := indexTagEnd([]byte(test.s), []byte(test.tag)); test.want != got {
-			t.Errorf("%q/%q: want\n\t%d\nbut got\n\t%d", test.s, test.tag, test.want, got)
-		}
-	}
-}
-
-func BenchmarkTemplateSpecialTags(b *testing.B) {
-
-	r := struct {
-		Name, Gift string
-	}{"Aunt Mildred", "bone china tea set"}
-
-	h1 := "<textarea> Hello Hello Hello </textarea> "
-	h2 := "<textarea> <p> Dear {{.Name}},\n{{with .Gift}}Thank you for the lovely {{.}}. {{end}}\nBest wishes. </p>\n</textarea>"
-	html := strings.Repeat(h1, 100) + h2 + strings.Repeat(h1, 100) + h2
-
-	var buf bytes.Buffer
-	for i := 0; i < b.N; i++ {
-		tmpl := Must(New("foo").Parse(html))
-		if err := tmpl.Execute(&buf, r); err != nil {
-			b.Fatal(err)
-		}
-		buf.Reset()
-	}
-}
diff --git a/internal/backport/html/template/url.go b/internal/backport/html/template/url.go
deleted file mode 100644
index a0c0012..0000000
--- a/internal/backport/html/template/url.go
+++ /dev/null
@@ -1,219 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
-	"bytes"
-	"fmt"
-	"strings"
-)
-
-// urlFilter returns its input unless it contains an unsafe scheme in which
-// case it defangs the entire URL.
-//
-// Schemes that cause unintended side effects that are irreversible without user
-// interaction are considered unsafe. For example, clicking on a "javascript:"
-// link can immediately trigger JavaScript code execution.
-//
-// This filter conservatively assumes that all schemes other than the following
-// are unsafe:
-//   - http:   Navigates to a new website, and may open a new window or tab.
-//     These side effects can be reversed by navigating back to the
-//     previous website, or closing the window or tab. No irreversible
-//     changes will take place without further user interaction with
-//     the new website.
-//   - https:  Same as http.
-//   - mailto: Opens an email program and starts a new draft. This side effect
-//     is not irreversible until the user explicitly clicks send; it
-//     can be undone by closing the email program.
-//
-// To allow URLs containing other schemes to bypass this filter, developers must
-// explicitly indicate that such a URL is expected and safe by encapsulating it
-// in a template.URL value.
-func urlFilter(args ...interface{}) string {
-	s, t := stringify(args...)
-	if t == contentTypeURL {
-		return s
-	}
-	if !isSafeURL(s) {
-		return "#" + filterFailsafe
-	}
-	return s
-}
-
-// isSafeURL is true if s is a relative URL or if URL has a protocol in
-// (http, https, mailto).
-func isSafeURL(s string) bool {
-	if i := strings.IndexRune(s, ':'); i >= 0 && !strings.ContainsRune(s[:i], '/') {
-
-		protocol := s[:i]
-		if !strings.EqualFold(protocol, "http") && !strings.EqualFold(protocol, "https") && !strings.EqualFold(protocol, "mailto") {
-			return false
-		}
-	}
-	return true
-}
-
-// urlEscaper produces an output that can be embedded in a URL query.
-// The output can be embedded in an HTML attribute without further escaping.
-func urlEscaper(args ...interface{}) string {
-	return urlProcessor(false, args...)
-}
-
-// urlNormalizer normalizes URL content so it can be embedded in a quote-delimited
-// string or parenthesis delimited url(...).
-// The normalizer does not encode all HTML specials. Specifically, it does not
-// encode '&' so correct embedding in an HTML attribute requires escaping of
-// '&' to '&amp;'.
-func urlNormalizer(args ...interface{}) string {
-	return urlProcessor(true, args...)
-}
-
-// urlProcessor normalizes (when norm is true) or escapes its input to produce
-// a valid hierarchical or opaque URL part.
-func urlProcessor(norm bool, args ...interface{}) string {
-	s, t := stringify(args...)
-	if t == contentTypeURL {
-		norm = true
-	}
-	var b bytes.Buffer
-	if processURLOnto(s, norm, &b) {
-		return b.String()
-	}
-	return s
-}
-
-// processURLOnto appends a normalized URL corresponding to its input to b
-// and reports whether the appended content differs from s.
-func processURLOnto(s string, norm bool, b *bytes.Buffer) bool {
-	b.Grow(len(s) + 16)
-	written := 0
-	// The byte loop below assumes that all URLs use UTF-8 as the
-	// content-encoding. This is similar to the URI to IRI encoding scheme
-	// defined in section 3.1 of  RFC 3987, and behaves the same as the
-	// EcmaScript builtin encodeURIComponent.
-	// It should not cause any misencoding of URLs in pages with
-	// Content-type: text/html;charset=UTF-8.
-	for i, n := 0, len(s); i < n; i++ {
-		c := s[i]
-		switch c {
-		// Single quote and parens are sub-delims in RFC 3986, but we
-		// escape them so the output can be embedded in single
-		// quoted attributes and unquoted CSS url(...) constructs.
-		// Single quotes are reserved in URLs, but are only used in
-		// the obsolete "mark" rule in an appendix in RFC 3986
-		// so can be safely encoded.
-		case '!', '#', '$', '&', '*', '+', ',', '/', ':', ';', '=', '?', '@', '[', ']':
-			if norm {
-				continue
-			}
-		// Unreserved according to RFC 3986 sec 2.3
-		// "For consistency, percent-encoded octets in the ranges of
-		// ALPHA (%41-%5A and %61-%7A), DIGIT (%30-%39), hyphen (%2D),
-		// period (%2E), underscore (%5F), or tilde (%7E) should not be
-		// created by URI producers
-		case '-', '.', '_', '~':
-			continue
-		case '%':
-			// When normalizing do not re-encode valid escapes.
-			if norm && i+2 < len(s) && isHex(s[i+1]) && isHex(s[i+2]) {
-				continue
-			}
-		default:
-			// Unreserved according to RFC 3986 sec 2.3
-			if 'a' <= c && c <= 'z' {
-				continue
-			}
-			if 'A' <= c && c <= 'Z' {
-				continue
-			}
-			if '0' <= c && c <= '9' {
-				continue
-			}
-		}
-		b.WriteString(s[written:i])
-		fmt.Fprintf(b, "%%%02x", c)
-		written = i + 1
-	}
-	b.WriteString(s[written:])
-	return written != 0
-}
-
-// Filters and normalizes srcset values which are comma separated
-// URLs followed by metadata.
-func srcsetFilterAndEscaper(args ...interface{}) string {
-	s, t := stringify(args...)
-	switch t {
-	case contentTypeSrcset:
-		return s
-	case contentTypeURL:
-		// Normalizing gets rid of all HTML whitespace
-		// which separate the image URL from its metadata.
-		var b bytes.Buffer
-		if processURLOnto(s, true, &b) {
-			s = b.String()
-		}
-		// Additionally, commas separate one source from another.
-		return strings.ReplaceAll(s, ",", "%2c")
-	}
-
-	var b bytes.Buffer
-	written := 0
-	for i := 0; i < len(s); i++ {
-		if s[i] == ',' {
-			filterSrcsetElement(s, written, i, &b)
-			b.WriteString(",")
-			written = i + 1
-		}
-	}
-	filterSrcsetElement(s, written, len(s), &b)
-	return b.String()
-}
-
-// Derived from https://play.golang.org/p/Dhmj7FORT5
-const htmlSpaceAndASCIIAlnumBytes = "\x00\x36\x00\x00\x01\x00\xff\x03\xfe\xff\xff\x07\xfe\xff\xff\x07"
-
-// isHTMLSpace is true iff c is a whitespace character per
-// https://infra.spec.whatwg.org/#ascii-whitespace
-func isHTMLSpace(c byte) bool {
-	return (c <= 0x20) && 0 != (htmlSpaceAndASCIIAlnumBytes[c>>3]&(1<<uint(c&0x7)))
-}
-
-func isHTMLSpaceOrASCIIAlnum(c byte) bool {
-	return (c < 0x80) && 0 != (htmlSpaceAndASCIIAlnumBytes[c>>3]&(1<<uint(c&0x7)))
-}
-
-func filterSrcsetElement(s string, left int, right int, b *bytes.Buffer) {
-	start := left
-	for start < right && isHTMLSpace(s[start]) {
-		start++
-	}
-	end := right
-	for i := start; i < right; i++ {
-		if isHTMLSpace(s[i]) {
-			end = i
-			break
-		}
-	}
-	if url := s[start:end]; isSafeURL(url) {
-		// If image metadata is only spaces or alnums then
-		// we don't need to URL normalize it.
-		metadataOk := true
-		for i := end; i < right; i++ {
-			if !isHTMLSpaceOrASCIIAlnum(s[i]) {
-				metadataOk = false
-				break
-			}
-		}
-		if metadataOk {
-			b.WriteString(s[left:start])
-			processURLOnto(url, true, b)
-			b.WriteString(s[end:right])
-			return
-		}
-	}
-	b.WriteString("#")
-	b.WriteString(filterFailsafe)
-}
diff --git a/internal/backport/html/template/url_test.go b/internal/backport/html/template/url_test.go
deleted file mode 100644
index 75c354e..0000000
--- a/internal/backport/html/template/url_test.go
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
-	"testing"
-)
-
-func TestURLNormalizer(t *testing.T) {
-	tests := []struct {
-		url, want string
-	}{
-		{"", ""},
-		{
-			"http://example.com:80/foo/bar?q=foo%20&bar=x+y#frag",
-			"http://example.com:80/foo/bar?q=foo%20&bar=x+y#frag",
-		},
-		{" ", "%20"},
-		{"%7c", "%7c"},
-		{"%7C", "%7C"},
-		{"%2", "%252"},
-		{"%", "%25"},
-		{"%z", "%25z"},
-		{"/foo|bar/%5c\u1234", "/foo%7cbar/%5c%e1%88%b4"},
-	}
-	for _, test := range tests {
-		if got := urlNormalizer(test.url); test.want != got {
-			t.Errorf("%q: want\n\t%q\nbut got\n\t%q", test.url, test.want, got)
-		}
-		if test.want != urlNormalizer(test.want) {
-			t.Errorf("not idempotent: %q", test.want)
-		}
-	}
-}
-
-func TestURLFilters(t *testing.T) {
-	input := ("\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f" +
-		"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
-		` !"#$%&'()*+,-./` +
-		`0123456789:;<=>?` +
-		`@ABCDEFGHIJKLMNO` +
-		`PQRSTUVWXYZ[\]^_` +
-		"`abcdefghijklmno" +
-		"pqrstuvwxyz{|}~\x7f" +
-		"\u00A0\u0100\u2028\u2029\ufeff\U0001D11E")
-
-	tests := []struct {
-		name    string
-		escaper func(...interface{}) string
-		escaped string
-	}{
-		{
-			"urlEscaper",
-			urlEscaper,
-			"%00%01%02%03%04%05%06%07%08%09%0a%0b%0c%0d%0e%0f" +
-				"%10%11%12%13%14%15%16%17%18%19%1a%1b%1c%1d%1e%1f" +
-				"%20%21%22%23%24%25%26%27%28%29%2a%2b%2c-.%2f" +
-				"0123456789%3a%3b%3c%3d%3e%3f" +
-				"%40ABCDEFGHIJKLMNO" +
-				"PQRSTUVWXYZ%5b%5c%5d%5e_" +
-				"%60abcdefghijklmno" +
-				"pqrstuvwxyz%7b%7c%7d~%7f" +
-				"%c2%a0%c4%80%e2%80%a8%e2%80%a9%ef%bb%bf%f0%9d%84%9e",
-		},
-		{
-			"urlNormalizer",
-			urlNormalizer,
-			"%00%01%02%03%04%05%06%07%08%09%0a%0b%0c%0d%0e%0f" +
-				"%10%11%12%13%14%15%16%17%18%19%1a%1b%1c%1d%1e%1f" +
-				"%20!%22#$%25&%27%28%29*+,-./" +
-				"0123456789:;%3c=%3e?" +
-				"@ABCDEFGHIJKLMNO" +
-				"PQRSTUVWXYZ[%5c]%5e_" +
-				"%60abcdefghijklmno" +
-				"pqrstuvwxyz%7b%7c%7d~%7f" +
-				"%c2%a0%c4%80%e2%80%a8%e2%80%a9%ef%bb%bf%f0%9d%84%9e",
-		},
-	}
-
-	for _, test := range tests {
-		if s := test.escaper(input); s != test.escaped {
-			t.Errorf("%s: want\n\t%q\ngot\n\t%q", test.name, test.escaped, s)
-			continue
-		}
-	}
-}
-
-func TestSrcsetFilter(t *testing.T) {
-	tests := []struct {
-		name  string
-		input string
-		want  string
-	}{
-		{
-			"one ok",
-			"http://example.com/img.png",
-			"http://example.com/img.png",
-		},
-		{
-			"one ok with metadata",
-			" /img.png 200w",
-			" /img.png 200w",
-		},
-		{
-			"one bad",
-			"javascript:alert(1) 200w",
-			"#ZgotmplZ",
-		},
-		{
-			"two ok",
-			"foo.png, bar.png",
-			"foo.png, bar.png",
-		},
-		{
-			"left bad",
-			"javascript:alert(1), /foo.png",
-			"#ZgotmplZ, /foo.png",
-		},
-		{
-			"right bad",
-			"/bogus#, javascript:alert(1)",
-			"/bogus#,#ZgotmplZ",
-		},
-	}
-
-	for _, test := range tests {
-		if got := srcsetFilterAndEscaper(test.input); got != test.want {
-			t.Errorf("%s: srcsetFilterAndEscaper(%q) want %q != %q", test.name, test.input, test.want, got)
-		}
-	}
-}
-
-func BenchmarkURLEscaper(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		urlEscaper("http://example.com:80/foo?q=bar%20&baz=x+y#frag")
-	}
-}
-
-func BenchmarkURLEscaperNoSpecials(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		urlEscaper("TheQuickBrownFoxJumpsOverTheLazyDog.")
-	}
-}
-
-func BenchmarkURLNormalizer(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		urlNormalizer("The quick brown fox jumps over the lazy dog.\n")
-	}
-}
-
-func BenchmarkURLNormalizerNoSpecials(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		urlNormalizer("http://example.com:80/foo?q=bar%20&baz=x+y#frag")
-	}
-}
-
-func BenchmarkSrcsetFilter(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		srcsetFilterAndEscaper(" /foo/bar.png 200w, /baz/boo(1).png")
-	}
-}
-
-func BenchmarkSrcsetFilterNoSpecials(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		srcsetFilterAndEscaper("http://example.com:80/foo?q=bar%20&baz=x+y#frag")
-	}
-}
diff --git a/internal/backport/html/template/urlpart_string.go b/internal/backport/html/template/urlpart_string.go
deleted file mode 100644
index 813eea9..0000000
--- a/internal/backport/html/template/urlpart_string.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Code generated by "stringer -type urlPart"; DO NOT EDIT.
-
-package template
-
-import "strconv"
-
-const _urlPart_name = "urlPartNoneurlPartPreQueryurlPartQueryOrFragurlPartUnknown"
-
-var _urlPart_index = [...]uint8{0, 11, 26, 44, 58}
-
-func (i urlPart) String() string {
-	if i >= urlPart(len(_urlPart_index)-1) {
-		return "urlPart(" + strconv.FormatInt(int64(i), 10) + ")"
-	}
-	return _urlPart_name[_urlPart_index[i]:_urlPart_index[i+1]]
-}
diff --git a/internal/backport/text/template/doc.go b/internal/backport/text/template/doc.go
deleted file mode 100644
index 5332002..0000000
--- a/internal/backport/text/template/doc.go
+++ /dev/null
@@ -1,463 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package template implements data-driven templates for generating textual output.
-
-To generate HTML output, see package html/template, which has the same interface
-as this package but automatically secures HTML output against certain attacks.
-
-Templates are executed by applying them to a data structure. Annotations in the
-template refer to elements of the data structure (typically a field of a struct
-or a key in a map) to control execution and derive values to be displayed.
-Execution of the template walks the structure and sets the cursor, represented
-by a period '.' and called "dot", to the value at the current location in the
-structure as execution proceeds.
-
-The input text for a template is UTF-8-encoded text in any format.
-"Actions"--data evaluations or control structures--are delimited by
-"{{" and "}}"; all text outside actions is copied to the output unchanged.
-Except for raw strings, actions may not span newlines, although comments can.
-
-Once parsed, a template may be executed safely in parallel, although if parallel
-executions share a Writer the output may be interleaved.
-
-Here is a trivial example that prints "17 items are made of wool".
-
-	type Inventory struct {
-		Material string
-		Count    uint
-	}
-	sweaters := Inventory{"wool", 17}
-	tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}")
-	if err != nil { panic(err) }
-	err = tmpl.Execute(os.Stdout, sweaters)
-	if err != nil { panic(err) }
-
-More intricate examples appear below.
-
-Text and spaces
-
-By default, all text between actions is copied verbatim when the template is
-executed. For example, the string " items are made of " in the example above
-appears on standard output when the program is run.
-
-However, to aid in formatting template source code, if an action's left
-delimiter (by default "{{") is followed immediately by a minus sign and white
-space, all trailing white space is trimmed from the immediately preceding text.
-Similarly, if the right delimiter ("}}") is preceded by white space and a minus
-sign, all leading white space is trimmed from the immediately following text.
-In these trim markers, the white space must be present:
-"{{- 3}}" is like "{{3}}" but trims the immediately preceding text, while
-"{{-3}}" parses as an action containing the number -3.
-
-For instance, when executing the template whose source is
-
-	"{{23 -}} < {{- 45}}"
-
-the generated output would be
-
-	"23<45"
-
-For this trimming, the definition of white space characters is the same as in Go:
-space, horizontal tab, carriage return, and newline.
-
-Actions
-
-Here is the list of actions. "Arguments" and "pipelines" are evaluations of
-data, defined in detail in the corresponding sections that follow.
-
-*/
-//	{{/* a comment */}}
-//	{{- /* a comment with white space trimmed from preceding and following text */ -}}
-//		A comment; discarded. May contain newlines.
-//		Comments do not nest and must start and end at the
-//		delimiters, as shown here.
-/*
-
-	{{pipeline}}
-		The default textual representation (the same as would be
-		printed by fmt.Print) of the value of the pipeline is copied
-		to the output.
-
-	{{if pipeline}} T1 {{end}}
-		If the value of the pipeline is empty, no output is generated;
-		otherwise, T1 is executed. The empty values are false, 0, any
-		nil pointer or interface value, and any array, slice, map, or
-		string of length zero.
-		Dot is unaffected.
-
-	{{if pipeline}} T1 {{else}} T0 {{end}}
-		If the value of the pipeline is empty, T0 is executed;
-		otherwise, T1 is executed. Dot is unaffected.
-
-	{{if pipeline}} T1 {{else if pipeline}} T0 {{end}}
-		To simplify the appearance of if-else chains, the else action
-		of an if may include another if directly; the effect is exactly
-		the same as writing
-			{{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}}
-
-	{{range pipeline}} T1 {{end}}
-		The value of the pipeline must be an array, slice, map, or channel.
-		If the value of the pipeline has length zero, nothing is output;
-		otherwise, dot is set to the successive elements of the array,
-		slice, or map and T1 is executed. If the value is a map and the
-		keys are of basic type with a defined order, the elements will be
-		visited in sorted key order.
-
-	{{range pipeline}} T1 {{else}} T0 {{end}}
-		The value of the pipeline must be an array, slice, map, or channel.
-		If the value of the pipeline has length zero, dot is unaffected and
-		T0 is executed; otherwise, dot is set to the successive elements
-		of the array, slice, or map and T1 is executed.
-
-	{{break}}
-		The innermost {{range pipeline}} loop is ended early, stopping the
-		current iteration and bypassing all remaining iterations.
-
-	{{continue}}
-		The current iteration of the innermost {{range pipeline}} loop is
-		stopped, and the loop starts the next iteration.
-
-	{{template "name"}}
-		The template with the specified name is executed with nil data.
-
-	{{template "name" pipeline}}
-		The template with the specified name is executed with dot set
-		to the value of the pipeline.
-
-	{{block "name" pipeline}} T1 {{end}}
-		A block is shorthand for defining a template
-			{{define "name"}} T1 {{end}}
-		and then executing it in place
-			{{template "name" pipeline}}
-		The typical use is to define a set of root templates that are
-		then customized by redefining the block templates within.
-
-	{{with pipeline}} T1 {{end}}
-		If the value of the pipeline is empty, no output is generated;
-		otherwise, dot is set to the value of the pipeline and T1 is
-		executed.
-
-	{{with pipeline}} T1 {{else}} T0 {{end}}
-		If the value of the pipeline is empty, dot is unaffected and T0
-		is executed; otherwise, dot is set to the value of the pipeline
-		and T1 is executed.
-
-Arguments
-
-An argument is a simple value, denoted by one of the following.
-
-	- A boolean, string, character, integer, floating-point, imaginary
-	  or complex constant in Go syntax. These behave like Go's untyped
-	  constants. Note that, as in Go, whether a large integer constant
-	  overflows when assigned or passed to a function can depend on whether
-	  the host machine's ints are 32 or 64 bits.
-	- The keyword nil, representing an untyped Go nil.
-	- The character '.' (period):
-		.
-	  The result is the value of dot.
-	- A variable name, which is a (possibly empty) alphanumeric string
-	  preceded by a dollar sign, such as
-		$piOver2
-	  or
-		$
-	  The result is the value of the variable.
-	  Variables are described below.
-	- The name of a field of the data, which must be a struct, preceded
-	  by a period, such as
-		.Field
-	  The result is the value of the field. Field invocations may be
-	  chained:
-	    .Field1.Field2
-	  Fields can also be evaluated on variables, including chaining:
-	    $x.Field1.Field2
-	- The name of a key of the data, which must be a map, preceded
-	  by a period, such as
-		.Key
-	  The result is the map element value indexed by the key.
-	  Key invocations may be chained and combined with fields to any
-	  depth:
-	    .Field1.Key1.Field2.Key2
-	  Although the key must be an alphanumeric identifier, unlike with
-	  field names they do not need to start with an upper case letter.
-	  Keys can also be evaluated on variables, including chaining:
-	    $x.key1.key2
-	- The name of a niladic method of the data, preceded by a period,
-	  such as
-		.Method
-	  The result is the value of invoking the method with dot as the
-	  receiver, dot.Method(). Such a method must have one return value (of
-	  any type) or two return values, the second of which is an error.
-	  If it has two and the returned error is non-nil, execution terminates
-	  and an error is returned to the caller as the value of Execute.
-	  Method invocations may be chained and combined with fields and keys
-	  to any depth:
-	    .Field1.Key1.Method1.Field2.Key2.Method2
-	  Methods can also be evaluated on variables, including chaining:
-	    $x.Method1.Field
-	- The name of a niladic function, such as
-		fun
-	  The result is the value of invoking the function, fun(). The return
-	  types and values behave as in methods. Functions and function
-	  names are described below.
-	- A parenthesized instance of one the above, for grouping. The result
-	  may be accessed by a field or map key invocation.
-		print (.F1 arg1) (.F2 arg2)
-		(.StructValuedMethod "arg").Field
-
-Arguments may evaluate to any type; if they are pointers the implementation
-automatically indirects to the base type when required.
-If an evaluation yields a function value, such as a function-valued
-field of a struct, the function is not invoked automatically, but it
-can be used as a truth value for an if action and the like. To invoke
-it, use the call function, defined below.
-
-Pipelines
-
-A pipeline is a possibly chained sequence of "commands". A command is a simple
-value (argument) or a function or method call, possibly with multiple arguments:
-
-	Argument
-		The result is the value of evaluating the argument.
-	.Method [Argument...]
-		The method can be alone or the last element of a chain but,
-		unlike methods in the middle of a chain, it can take arguments.
-		The result is the value of calling the method with the
-		arguments:
-			dot.Method(Argument1, etc.)
-	functionName [Argument...]
-		The result is the value of calling the function associated
-		with the name:
-			function(Argument1, etc.)
-		Functions and function names are described below.
-
-A pipeline may be "chained" by separating a sequence of commands with pipeline
-characters '|'. In a chained pipeline, the result of each command is
-passed as the last argument of the following command. The output of the final
-command in the pipeline is the value of the pipeline.
-
-The output of a command will be either one value or two values, the second of
-which has type error. If that second value is present and evaluates to
-non-nil, execution terminates and the error is returned to the caller of
-Execute.
-
-Variables
-
-A pipeline inside an action may initialize a variable to capture the result.
-The initialization has syntax
-
-	$variable := pipeline
-
-where $variable is the name of the variable. An action that declares a
-variable produces no output.
-
-Variables previously declared can also be assigned, using the syntax
-
-	$variable = pipeline
-
-If a "range" action initializes a variable, the variable is set to the
-successive elements of the iteration. Also, a "range" may declare two
-variables, separated by a comma:
-
-	range $index, $element := pipeline
-
-in which case $index and $element are set to the successive values of the
-array/slice index or map key and element, respectively. Note that if there is
-only one variable, it is assigned the element; this is opposite to the
-convention in Go range clauses.
-
-A variable's scope extends to the "end" action of the control structure ("if",
-"with", or "range") in which it is declared, or to the end of the template if
-there is no such control structure. A template invocation does not inherit
-variables from the point of its invocation.
-
-When execution begins, $ is set to the data argument passed to Execute, that is,
-to the starting value of dot.
-
-Examples
-
-Here are some example one-line templates demonstrating pipelines and variables.
-All produce the quoted word "output":
-
-	{{"\"output\""}}
-		A string constant.
-	{{`"output"`}}
-		A raw string constant.
-	{{printf "%q" "output"}}
-		A function call.
-	{{"output" | printf "%q"}}
-		A function call whose final argument comes from the previous
-		command.
-	{{printf "%q" (print "out" "put")}}
-		A parenthesized argument.
-	{{"put" | printf "%s%s" "out" | printf "%q"}}
-		A more elaborate call.
-	{{"output" | printf "%s" | printf "%q"}}
-		A longer chain.
-	{{with "output"}}{{printf "%q" .}}{{end}}
-		A with action using dot.
-	{{with $x := "output" | printf "%q"}}{{$x}}{{end}}
-		A with action that creates and uses a variable.
-	{{with $x := "output"}}{{printf "%q" $x}}{{end}}
-		A with action that uses the variable in another action.
-	{{with $x := "output"}}{{$x | printf "%q"}}{{end}}
-		The same, but pipelined.
-
-Functions
-
-During execution functions are found in two function maps: first in the
-template, then in the global function map. By default, no functions are defined
-in the template but the Funcs method can be used to add them.
-
-Predefined global functions are named as follows.
-
-	and
-		Returns the boolean AND of its arguments by returning the
-		first empty argument or the last argument, that is,
-		"and x y" behaves as "if x then y else x." Only those
-		arguments necessary to determine the answer are evaluated.
-	call
-		Returns the result of calling the first argument, which
-		must be a function, with the remaining arguments as parameters.
-		Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where
-		Y is a func-valued field, map entry, or the like.
-		The first argument must be the result of an evaluation
-		that yields a value of function type (as distinct from
-		a predefined function such as print). The function must
-		return either one or two result values, the second of which
-		is of type error. If the arguments don't match the function
-		or the returned error value is non-nil, execution stops.
-	html
-		Returns the escaped HTML equivalent of the textual
-		representation of its arguments. This function is unavailable
-		in html/template, with a few exceptions.
-	index
-		Returns the result of indexing its first argument by the
-		following arguments. Thus "index x 1 2 3" is, in Go syntax,
-		x[1][2][3]. Each indexed item must be a map, slice, or array.
-	slice
-		slice returns the result of slicing its first argument by the
-		remaining arguments. Thus "slice x 1 2" is, in Go syntax, x[1:2],
-		while "slice x" is x[:], "slice x 1" is x[1:], and "slice x 1 2 3"
-		is x[1:2:3]. The first argument must be a string, slice, or array.
-	js
-		Returns the escaped JavaScript equivalent of the textual
-		representation of its arguments.
-	len
-		Returns the integer length of its argument.
-	not
-		Returns the boolean negation of its single argument.
-	or
-		Returns the boolean OR of its arguments by returning the
-		first non-empty argument or the last argument, that is,
-		"or x y" behaves as "if x then x else y". Only those
-		arguments necessary to determine the answer are evaluated.
-	print
-		An alias for fmt.Sprint
-	printf
-		An alias for fmt.Sprintf
-	println
-		An alias for fmt.Sprintln
-	urlquery
-		Returns the escaped value of the textual representation of
-		its arguments in a form suitable for embedding in a URL query.
-		This function is unavailable in html/template, with a few
-		exceptions.
-
-The boolean functions take any zero value to be false and a non-zero
-value to be true.
-
-There is also a set of binary comparison operators defined as
-functions:
-
-	eq
-		Returns the boolean truth of arg1 == arg2
-	ne
-		Returns the boolean truth of arg1 != arg2
-	lt
-		Returns the boolean truth of arg1 < arg2
-	le
-		Returns the boolean truth of arg1 <= arg2
-	gt
-		Returns the boolean truth of arg1 > arg2
-	ge
-		Returns the boolean truth of arg1 >= arg2
-
-For simpler multi-way equality tests, eq (only) accepts two or more
-arguments and compares the second and subsequent to the first,
-returning in effect
-
-	arg1==arg2 || arg1==arg3 || arg1==arg4 ...
-
-(Unlike with || in Go, however, eq is a function call and all the
-arguments will be evaluated.)
-
-The comparison functions work on any values whose type Go defines as
-comparable. For basic types such as integers, the rules are relaxed:
-size and exact type are ignored, so any integer value, signed or unsigned,
-may be compared with any other integer value. (The arithmetic value is compared,
-not the bit pattern, so all negative integers are less than all unsigned integers.)
-However, as usual, one may not compare an int with a float32 and so on.
-
-Associated templates
-
-Each template is named by a string specified when it is created. Also, each
-template is associated with zero or more other templates that it may invoke by
-name; such associations are transitive and form a name space of templates.
-
-A template may use a template invocation to instantiate another associated
-template; see the explanation of the "template" action above. The name must be
-that of a template associated with the template that contains the invocation.
-
-Nested template definitions
-
-When parsing a template, another template may be defined and associated with the
-template being parsed. Template definitions must appear at the top level of the
-template, much like global variables in a Go program.
-
-The syntax of such definitions is to surround each template declaration with a
-"define" and "end" action.
-
-The define action names the template being created by providing a string
-constant. Here is a simple example:
-
-	`{{define "T1"}}ONE{{end}}
-	{{define "T2"}}TWO{{end}}
-	{{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}}
-	{{template "T3"}}`
-
-This defines two templates, T1 and T2, and a third T3 that invokes the other two
-when it is executed. Finally it invokes T3. If executed this template will
-produce the text
-
-	ONE TWO
-
-By construction, a template may reside in only one association. If it's
-necessary to have a template addressable from multiple associations, the
-template definition must be parsed multiple times to create distinct *Template
-values, or must be copied with the Clone or AddParseTree method.
-
-Parse may be called multiple times to assemble the various associated templates;
-see the ParseFiles and ParseGlob functions and methods for simple ways to parse
-related templates stored in files.
-
-A template may be executed directly or through ExecuteTemplate, which executes
-an associated template identified by name. To invoke our example above, we
-might write,
-
-	err := tmpl.Execute(os.Stdout, "no data needed")
-	if err != nil {
-		log.Fatalf("execution failed: %s", err)
-	}
-
-or to invoke a particular template explicitly by name,
-
-	err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed")
-	if err != nil {
-		log.Fatalf("execution failed: %s", err)
-	}
-
-*/
-package template
diff --git a/internal/backport/text/template/example_test.go b/internal/backport/text/template/example_test.go
deleted file mode 100644
index b07611e..0000000
--- a/internal/backport/text/template/example_test.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template_test
-
-import (
-	"log"
-	"os"
-	"strings"
-
-	"golang.org/x/website/internal/backport/text/template"
-)
-
-func ExampleTemplate() {
-	// Define a template.
-	const letter = `
-Dear {{.Name}},
-{{if .Attended}}
-It was a pleasure to see you at the wedding.
-{{- else}}
-It is a shame you couldn't make it to the wedding.
-{{- end}}
-{{with .Gift -}}
-Thank you for the lovely {{.}}.
-{{end}}
-Best wishes,
-Josie
-`
-
-	// Prepare some data to insert into the template.
-	type Recipient struct {
-		Name, Gift string
-		Attended   bool
-	}
-	var recipients = []Recipient{
-		{"Aunt Mildred", "bone china tea set", true},
-		{"Uncle John", "moleskin pants", false},
-		{"Cousin Rodney", "", false},
-	}
-
-	// Create a new template and parse the letter into it.
-	t := template.Must(template.New("letter").Parse(letter))
-
-	// Execute the template for each recipient.
-	for _, r := range recipients {
-		err := t.Execute(os.Stdout, r)
-		if err != nil {
-			log.Println("executing template:", err)
-		}
-	}
-
-	// Output:
-	// Dear Aunt Mildred,
-	//
-	// It was a pleasure to see you at the wedding.
-	// Thank you for the lovely bone china tea set.
-	//
-	// Best wishes,
-	// Josie
-	//
-	// Dear Uncle John,
-	//
-	// It is a shame you couldn't make it to the wedding.
-	// Thank you for the lovely moleskin pants.
-	//
-	// Best wishes,
-	// Josie
-	//
-	// Dear Cousin Rodney,
-	//
-	// It is a shame you couldn't make it to the wedding.
-	//
-	// Best wishes,
-	// Josie
-}
-
-// The following example is duplicated in html/template; keep them in sync.
-
-func ExampleTemplate_block() {
-	const (
-		master  = `Names:{{block "list" .}}{{"\n"}}{{range .}}{{println "-" .}}{{end}}{{end}}`
-		overlay = `{{define "list"}} {{join . ", "}}{{end}} `
-	)
-	var (
-		funcs     = template.FuncMap{"join": strings.Join}
-		guardians = []string{"Gamora", "Groot", "Nebula", "Rocket", "Star-Lord"}
-	)
-	masterTmpl, err := template.New("master").Funcs(funcs).Parse(master)
-	if err != nil {
-		log.Fatal(err)
-	}
-	overlayTmpl, err := template.Must(masterTmpl.Clone()).Parse(overlay)
-	if err != nil {
-		log.Fatal(err)
-	}
-	if err := masterTmpl.Execute(os.Stdout, guardians); err != nil {
-		log.Fatal(err)
-	}
-	if err := overlayTmpl.Execute(os.Stdout, guardians); err != nil {
-		log.Fatal(err)
-	}
-	// Output:
-	// Names:
-	// - Gamora
-	// - Groot
-	// - Nebula
-	// - Rocket
-	// - Star-Lord
-	// Names: Gamora, Groot, Nebula, Rocket, Star-Lord
-}
diff --git a/internal/backport/text/template/examplefiles_test.go b/internal/backport/text/template/examplefiles_test.go
deleted file mode 100644
index 9b03198..0000000
--- a/internal/backport/text/template/examplefiles_test.go
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template_test
-
-import (
-	"io"
-	"io/ioutil"
-	"log"
-	"os"
-	"path/filepath"
-
-	"golang.org/x/website/internal/backport/text/template"
-)
-
-// templateFile defines the contents of a template to be stored in a file, for testing.
-type templateFile struct {
-	name     string
-	contents string
-}
-
-func createTestDir(files []templateFile) string {
-	dir, err := ioutil.TempDir("", "template")
-	if err != nil {
-		log.Fatal(err)
-	}
-	for _, file := range files {
-		f, err := os.Create(filepath.Join(dir, file.name))
-		if err != nil {
-			log.Fatal(err)
-		}
-		defer f.Close()
-		_, err = io.WriteString(f, file.contents)
-		if err != nil {
-			log.Fatal(err)
-		}
-	}
-	return dir
-}
-
-// Here we demonstrate loading a set of templates from a directory.
-func ExampleTemplate_glob() {
-	// Here we create a temporary directory and populate it with our sample
-	// template definition files; usually the template files would already
-	// exist in some location known to the program.
-	dir := createTestDir([]templateFile{
-		// T0.tmpl is a plain template file that just invokes T1.
-		{"T0.tmpl", `T0 invokes T1: ({{template "T1"}})`},
-		// T1.tmpl defines a template, T1 that invokes T2.
-		{"T1.tmpl", `{{define "T1"}}T1 invokes T2: ({{template "T2"}}){{end}}`},
-		// T2.tmpl defines a template T2.
-		{"T2.tmpl", `{{define "T2"}}This is T2{{end}}`},
-	})
-	// Clean up after the test; another quirk of running as an example.
-	defer os.RemoveAll(dir)
-
-	// pattern is the glob pattern used to find all the template files.
-	pattern := filepath.Join(dir, "*.tmpl")
-
-	// Here starts the example proper.
-	// T0.tmpl is the first name matched, so it becomes the starting template,
-	// the value returned by ParseGlob.
-	tmpl := template.Must(template.ParseGlob(pattern))
-
-	err := tmpl.Execute(os.Stdout, nil)
-	if err != nil {
-		log.Fatalf("template execution: %s", err)
-	}
-	// Output:
-	// T0 invokes T1: (T1 invokes T2: (This is T2))
-}
-
-// This example demonstrates one way to share some templates
-// and use them in different contexts. In this variant we add multiple driver
-// templates by hand to an existing bundle of templates.
-func ExampleTemplate_helpers() {
-	// Here we create a temporary directory and populate it with our sample
-	// template definition files; usually the template files would already
-	// exist in some location known to the program.
-	dir := createTestDir([]templateFile{
-		// T1.tmpl defines a template, T1 that invokes T2.
-		{"T1.tmpl", `{{define "T1"}}T1 invokes T2: ({{template "T2"}}){{end}}`},
-		// T2.tmpl defines a template T2.
-		{"T2.tmpl", `{{define "T2"}}This is T2{{end}}`},
-	})
-	// Clean up after the test; another quirk of running as an example.
-	defer os.RemoveAll(dir)
-
-	// pattern is the glob pattern used to find all the template files.
-	pattern := filepath.Join(dir, "*.tmpl")
-
-	// Here starts the example proper.
-	// Load the helpers.
-	templates := template.Must(template.ParseGlob(pattern))
-	// Add one driver template to the bunch; we do this with an explicit template definition.
-	_, err := templates.Parse("{{define `driver1`}}Driver 1 calls T1: ({{template `T1`}})\n{{end}}")
-	if err != nil {
-		log.Fatal("parsing driver1: ", err)
-	}
-	// Add another driver template.
-	_, err = templates.Parse("{{define `driver2`}}Driver 2 calls T2: ({{template `T2`}})\n{{end}}")
-	if err != nil {
-		log.Fatal("parsing driver2: ", err)
-	}
-	// We load all the templates before execution. This package does not require
-	// that behavior but html/template's escaping does, so it's a good habit.
-	err = templates.ExecuteTemplate(os.Stdout, "driver1", nil)
-	if err != nil {
-		log.Fatalf("driver1 execution: %s", err)
-	}
-	err = templates.ExecuteTemplate(os.Stdout, "driver2", nil)
-	if err != nil {
-		log.Fatalf("driver2 execution: %s", err)
-	}
-	// Output:
-	// Driver 1 calls T1: (T1 invokes T2: (This is T2))
-	// Driver 2 calls T2: (This is T2)
-}
-
-// This example demonstrates how to use one group of driver
-// templates with distinct sets of helper templates.
-func ExampleTemplate_share() {
-	// Here we create a temporary directory and populate it with our sample
-	// template definition files; usually the template files would already
-	// exist in some location known to the program.
-	dir := createTestDir([]templateFile{
-		// T0.tmpl is a plain template file that just invokes T1.
-		{"T0.tmpl", "T0 ({{.}} version) invokes T1: ({{template `T1`}})\n"},
-		// T1.tmpl defines a template, T1 that invokes T2. Note T2 is not defined
-		{"T1.tmpl", `{{define "T1"}}T1 invokes T2: ({{template "T2"}}){{end}}`},
-	})
-	// Clean up after the test; another quirk of running as an example.
-	defer os.RemoveAll(dir)
-
-	// pattern is the glob pattern used to find all the template files.
-	pattern := filepath.Join(dir, "*.tmpl")
-
-	// Here starts the example proper.
-	// Load the drivers.
-	drivers := template.Must(template.ParseGlob(pattern))
-
-	// We must define an implementation of the T2 template. First we clone
-	// the drivers, then add a definition of T2 to the template name space.
-
-	// 1. Clone the helper set to create a new name space from which to run them.
-	first, err := drivers.Clone()
-	if err != nil {
-		log.Fatal("cloning helpers: ", err)
-	}
-	// 2. Define T2, version A, and parse it.
-	_, err = first.Parse("{{define `T2`}}T2, version A{{end}}")
-	if err != nil {
-		log.Fatal("parsing T2: ", err)
-	}
-
-	// Now repeat the whole thing, using a different version of T2.
-	// 1. Clone the drivers.
-	second, err := drivers.Clone()
-	if err != nil {
-		log.Fatal("cloning drivers: ", err)
-	}
-	// 2. Define T2, version B, and parse it.
-	_, err = second.Parse("{{define `T2`}}T2, version B{{end}}")
-	if err != nil {
-		log.Fatal("parsing T2: ", err)
-	}
-
-	// Execute the templates in the reverse order to verify the
-	// first is unaffected by the second.
-	err = second.ExecuteTemplate(os.Stdout, "T0.tmpl", "second")
-	if err != nil {
-		log.Fatalf("second execution: %s", err)
-	}
-	err = first.ExecuteTemplate(os.Stdout, "T0.tmpl", "first")
-	if err != nil {
-		log.Fatalf("first: execution: %s", err)
-	}
-
-	// Output:
-	// T0 (second version) invokes T1: (T1 invokes T2: (T2, version B))
-	// T0 (first version) invokes T1: (T1 invokes T2: (T2, version A))
-}
diff --git a/internal/backport/text/template/examplefunc_test.go b/internal/backport/text/template/examplefunc_test.go
deleted file mode 100644
index ed558ad..0000000
--- a/internal/backport/text/template/examplefunc_test.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template_test
-
-import (
-	"log"
-	"os"
-	"strings"
-
-	"golang.org/x/website/internal/backport/text/template"
-)
-
-// This example demonstrates a custom function to process template text.
-// It installs the strings.Title function and uses it to
-// Make Title Text Look Good In Our Template's Output.
-func ExampleTemplate_func() {
-	// First we create a FuncMap with which to register the function.
-	funcMap := template.FuncMap{
-		// The name "title" is what the function will be called in the template text.
-		"title": strings.Title,
-	}
-
-	// A simple template definition to test our function.
-	// We print the input text several ways:
-	// - the original
-	// - title-cased
-	// - title-cased and then printed with %q
-	// - printed with %q and then title-cased.
-	const templateText = `
-Input: {{printf "%q" .}}
-Output 0: {{title .}}
-Output 1: {{title . | printf "%q"}}
-Output 2: {{printf "%q" . | title}}
-`
-
-	// Create a template, add the function map, and parse the text.
-	tmpl, err := template.New("titleTest").Funcs(funcMap).Parse(templateText)
-	if err != nil {
-		log.Fatalf("parsing: %s", err)
-	}
-
-	// Run the template to verify the output.
-	err = tmpl.Execute(os.Stdout, "the go programming language")
-	if err != nil {
-		log.Fatalf("execution: %s", err)
-	}
-
-	// Output:
-	// Input: "the go programming language"
-	// Output 0: The Go Programming Language
-	// Output 1: "The Go Programming Language"
-	// Output 2: "The Go Programming Language"
-}
diff --git a/internal/backport/text/template/exec.go b/internal/backport/text/template/exec.go
deleted file mode 100644
index 4ff29bb..0000000
--- a/internal/backport/text/template/exec.go
+++ /dev/null
@@ -1,1028 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
-	"errors"
-	"fmt"
-	"io"
-	"reflect"
-	"runtime"
-	"strings"
-
-	"golang.org/x/website/internal/backport/fmtsort"
-	"golang.org/x/website/internal/backport/text/template/parse"
-)
-
-// maxExecDepth specifies the maximum stack depth of templates within
-// templates. This limit is only practically reached by accidentally
-// recursive template invocations. This limit allows us to return
-// an error instead of triggering a stack overflow.
-var maxExecDepth = initMaxExecDepth()
-
-func initMaxExecDepth() int {
-	if runtime.GOARCH == "wasm" {
-		return 1000
-	}
-	return 100000
-}
-
-// state represents the state of an execution. It's not part of the
-// template so that multiple executions of the same template
-// can execute in parallel.
-type state struct {
-	tmpl  *Template
-	wr    io.Writer
-	node  parse.Node // current node, for errors
-	vars  []variable // push-down stack of variable values.
-	depth int        // the height of the stack of executing templates.
-}
-
-// variable holds the dynamic value of a variable such as $, $x etc.
-type variable struct {
-	name  string
-	value reflect.Value
-}
-
-// push pushes a new variable on the stack.
-func (s *state) push(name string, value reflect.Value) {
-	s.vars = append(s.vars, variable{name, value})
-}
-
-// mark returns the length of the variable stack.
-func (s *state) mark() int {
-	return len(s.vars)
-}
-
-// pop pops the variable stack up to the mark.
-func (s *state) pop(mark int) {
-	s.vars = s.vars[0:mark]
-}
-
-// setVar overwrites the last declared variable with the given name.
-// Used by variable assignments.
-func (s *state) setVar(name string, value reflect.Value) {
-	for i := s.mark() - 1; i >= 0; i-- {
-		if s.vars[i].name == name {
-			s.vars[i].value = value
-			return
-		}
-	}
-	s.errorf("undefined variable: %s", name)
-}
-
-// setTopVar overwrites the top-nth variable on the stack. Used by range iterations.
-func (s *state) setTopVar(n int, value reflect.Value) {
-	s.vars[len(s.vars)-n].value = value
-}
-
-// varValue returns the value of the named variable.
-func (s *state) varValue(name string) reflect.Value {
-	for i := s.mark() - 1; i >= 0; i-- {
-		if s.vars[i].name == name {
-			return s.vars[i].value
-		}
-	}
-	s.errorf("undefined variable: %s", name)
-	return zero
-}
-
-var zero reflect.Value
-
-type missingValType struct{}
-
-var missingVal = reflect.ValueOf(missingValType{})
-
-// at marks the state to be on node n, for error reporting.
-func (s *state) at(node parse.Node) {
-	s.node = node
-}
-
-// doublePercent returns the string with %'s replaced by %%, if necessary,
-// so it can be used safely inside a Printf format string.
-func doublePercent(str string) string {
-	return strings.ReplaceAll(str, "%", "%%")
-}
-
-// TODO: It would be nice if ExecError was more broken down, but
-// the way ErrorContext embeds the template name makes the
-// processing too clumsy.
-
-// ExecError is the custom error type returned when Execute has an
-// error evaluating its template. (If a write error occurs, the actual
-// error is returned; it will not be of type ExecError.)
-type ExecError struct {
-	Name string // Name of template.
-	Err  error  // Pre-formatted error.
-}
-
-func (e ExecError) Error() string {
-	return e.Err.Error()
-}
-
-func (e ExecError) Unwrap() error {
-	return e.Err
-}
-
-// errorf records an ExecError and terminates processing.
-func (s *state) errorf(format string, args ...interface{}) {
-	name := doublePercent(s.tmpl.Name())
-	if s.node == nil {
-		format = fmt.Sprintf("template: %s: %s", name, format)
-	} else {
-		location, context := s.tmpl.ErrorContext(s.node)
-		format = fmt.Sprintf("template: %s: executing %q at <%s>: %s", location, name, doublePercent(context), format)
-	}
-	panic(ExecError{
-		Name: s.tmpl.Name(),
-		Err:  fmt.Errorf(format, args...),
-	})
-}
-
-// writeError is the wrapper type used internally when Execute has an
-// error writing to its output. We strip the wrapper in errRecover.
-// Note that this is not an implementation of error, so it cannot escape
-// from the package as an error value.
-type writeError struct {
-	Err error // Original error.
-}
-
-func (s *state) writeError(err error) {
-	panic(writeError{
-		Err: err,
-	})
-}
-
-// errRecover is the handler that turns panics into returns from the top
-// level of Parse.
-func errRecover(errp *error) {
-	e := recover()
-	if e != nil {
-		switch err := e.(type) {
-		case runtime.Error:
-			panic(e)
-		case writeError:
-			*errp = err.Err // Strip the wrapper.
-		case ExecError:
-			*errp = err // Keep the wrapper.
-		default:
-			panic(e)
-		}
-	}
-}
-
-// ExecuteTemplate applies the template associated with t that has the given name
-// to the specified data object and writes the output to wr.
-// If an error occurs executing the template or writing its output,
-// execution stops, but partial results may already have been written to
-// the output writer.
-// A template may be executed safely in parallel, although if parallel
-// executions share a Writer the output may be interleaved.
-func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error {
-	tmpl := t.Lookup(name)
-	if tmpl == nil {
-		return fmt.Errorf("template: no template %q associated with template %q", name, t.name)
-	}
-	return tmpl.Execute(wr, data)
-}
-
-// Execute applies a parsed template to the specified data object,
-// and writes the output to wr.
-// If an error occurs executing the template or writing its output,
-// execution stops, but partial results may already have been written to
-// the output writer.
-// A template may be executed safely in parallel, although if parallel
-// executions share a Writer the output may be interleaved.
-//
-// If data is a reflect.Value, the template applies to the concrete
-// value that the reflect.Value holds, as in fmt.Print.
-func (t *Template) Execute(wr io.Writer, data interface{}) error {
-	return t.execute(wr, data)
-}
-
-func (t *Template) execute(wr io.Writer, data interface{}) (err error) {
-	defer errRecover(&err)
-	value, ok := data.(reflect.Value)
-	if !ok {
-		value = reflect.ValueOf(data)
-	}
-	state := &state{
-		tmpl: t,
-		wr:   wr,
-		vars: []variable{{"$", value}},
-	}
-	if t.Tree == nil || t.Root == nil {
-		state.errorf("%q is an incomplete or empty template", t.Name())
-	}
-	state.walk(value, t.Root)
-	return
-}
-
-// DefinedTemplates returns a string listing the defined templates,
-// prefixed by the string "; defined templates are: ". If there are none,
-// it returns the empty string. For generating an error message here
-// and in html/template.
-func (t *Template) DefinedTemplates() string {
-	if t.common == nil {
-		return ""
-	}
-	var b strings.Builder
-	t.muTmpl.RLock()
-	defer t.muTmpl.RUnlock()
-	for name, tmpl := range t.tmpl {
-		if tmpl.Tree == nil || tmpl.Root == nil {
-			continue
-		}
-		if b.Len() == 0 {
-			b.WriteString("; defined templates are: ")
-		} else {
-			b.WriteString(", ")
-		}
-		fmt.Fprintf(&b, "%q", name)
-	}
-	return b.String()
-}
-
-// Sentinel errors for use with panic to signal early exits from range loops.
-var (
-	walkBreak    = errors.New("break")
-	walkContinue = errors.New("continue")
-)
-
-// Walk functions step through the major pieces of the template structure,
-// generating output as they go.
-func (s *state) walk(dot reflect.Value, node parse.Node) {
-	s.at(node)
-	switch node := node.(type) {
-	case *parse.ActionNode:
-		// Do not pop variables so they persist until next end.
-		// Also, if the action declares variables, don't print the result.
-		val := s.evalPipeline(dot, node.Pipe)
-		if len(node.Pipe.Decl) == 0 {
-			s.printValue(node, val)
-		}
-	case *parse.BreakNode:
-		panic(walkBreak)
-	case *parse.CommentNode:
-	case *parse.ContinueNode:
-		panic(walkContinue)
-	case *parse.IfNode:
-		s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList)
-	case *parse.ListNode:
-		for _, node := range node.Nodes {
-			s.walk(dot, node)
-		}
-	case *parse.RangeNode:
-		s.walkRange(dot, node)
-	case *parse.TemplateNode:
-		s.walkTemplate(dot, node)
-	case *parse.TextNode:
-		if _, err := s.wr.Write(node.Text); err != nil {
-			s.writeError(err)
-		}
-	case *parse.WithNode:
-		s.walkIfOrWith(parse.NodeWith, dot, node.Pipe, node.List, node.ElseList)
-	default:
-		s.errorf("unknown node: %s", node)
-	}
-}
-
-// walkIfOrWith walks an 'if' or 'with' node. The two control structures
-// are identical in behavior except that 'with' sets dot.
-func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) {
-	defer s.pop(s.mark())
-	val := s.evalPipeline(dot, pipe)
-	truth, ok := isTrue(indirectInterface(val))
-	if !ok {
-		s.errorf("if/with can't use %v", val)
-	}
-	if truth {
-		if typ == parse.NodeWith {
-			s.walk(val, list)
-		} else {
-			s.walk(dot, list)
-		}
-	} else if elseList != nil {
-		s.walk(dot, elseList)
-	}
-}
-
-// IsTrue reports whether the value is 'true', in the sense of not the zero of its type,
-// and whether the value has a meaningful truth value. This is the definition of
-// truth used by if and other such actions.
-func IsTrue(val interface{}) (truth, ok bool) {
-	return isTrue(reflect.ValueOf(val))
-}
-
-func isTrue(val reflect.Value) (truth, ok bool) {
-	if !val.IsValid() {
-		// Something like var x interface{}, never set. It's a form of nil.
-		return false, true
-	}
-	switch val.Kind() {
-	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
-		truth = val.Len() > 0
-	case reflect.Bool:
-		truth = val.Bool()
-	case reflect.Complex64, reflect.Complex128:
-		truth = val.Complex() != 0
-	case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface:
-		truth = !val.IsNil()
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		truth = val.Int() != 0
-	case reflect.Float32, reflect.Float64:
-		truth = val.Float() != 0
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		truth = val.Uint() != 0
-	case reflect.Struct:
-		truth = true // Struct values are always true.
-	default:
-		return
-	}
-	return truth, true
-}
-
-func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) {
-	s.at(r)
-	defer func() {
-		if r := recover(); r != nil && r != walkBreak {
-			panic(r)
-		}
-	}()
-	defer s.pop(s.mark())
-	val, _ := indirect(s.evalPipeline(dot, r.Pipe))
-	// mark top of stack before any variables in the body are pushed.
-	mark := s.mark()
-	oneIteration := func(index, elem reflect.Value) {
-		// Set top var (lexically the second if there are two) to the element.
-		if len(r.Pipe.Decl) > 0 {
-			s.setTopVar(1, elem)
-		}
-		// Set next var (lexically the first if there are two) to the index.
-		if len(r.Pipe.Decl) > 1 {
-			s.setTopVar(2, index)
-		}
-		defer s.pop(mark)
-		defer func() {
-			// Consume panic(walkContinue)
-			if r := recover(); r != nil && r != walkContinue {
-				panic(r)
-			}
-		}()
-		s.walk(elem, r.List)
-	}
-	switch val.Kind() {
-	case reflect.Array, reflect.Slice:
-		if val.Len() == 0 {
-			break
-		}
-		for i := 0; i < val.Len(); i++ {
-			oneIteration(reflect.ValueOf(i), val.Index(i))
-		}
-		return
-	case reflect.Map:
-		if val.Len() == 0 {
-			break
-		}
-		om := fmtsort.Sort(val)
-		for i, key := range om.Key {
-			oneIteration(key, om.Value[i])
-		}
-		return
-	case reflect.Chan:
-		if val.IsNil() {
-			break
-		}
-		if val.Type().ChanDir() == reflect.SendDir {
-			s.errorf("range over send-only channel %v", val)
-			break
-		}
-		i := 0
-		for ; ; i++ {
-			elem, ok := val.Recv()
-			if !ok {
-				break
-			}
-			oneIteration(reflect.ValueOf(i), elem)
-		}
-		if i == 0 {
-			break
-		}
-		return
-	case reflect.Invalid:
-		break // An invalid value is likely a nil map, etc. and acts like an empty map.
-	default:
-		s.errorf("range can't iterate over %v", val)
-	}
-	if r.ElseList != nil {
-		s.walk(dot, r.ElseList)
-	}
-}
-
-func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) {
-	s.at(t)
-	tmpl := s.tmpl.Lookup(t.Name)
-	if tmpl == nil {
-		s.errorf("template %q not defined", t.Name)
-	}
-	if s.depth == maxExecDepth {
-		s.errorf("exceeded maximum template depth (%v)", maxExecDepth)
-	}
-	// Variables declared by the pipeline persist.
-	dot = s.evalPipeline(dot, t.Pipe)
-	newState := *s
-	newState.depth++
-	newState.tmpl = tmpl
-	// No dynamic scoping: template invocations inherit no variables.
-	newState.vars = []variable{{"$", dot}}
-	newState.walk(dot, tmpl.Root)
-}
-
-// Eval functions evaluate pipelines, commands, and their elements and extract
-// values from the data structure by examining fields, calling methods, and so on.
-// The printing of those values happens only through walk functions.
-
-// evalPipeline returns the value acquired by evaluating a pipeline. If the
-// pipeline has a variable declaration, the variable will be pushed on the
-// stack. Callers should therefore pop the stack after they are finished
-// executing commands depending on the pipeline value.
-func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) {
-	if pipe == nil {
-		return
-	}
-	s.at(pipe)
-	value = missingVal
-	for _, cmd := range pipe.Cmds {
-		value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg.
-		// If the object has type interface{}, dig down one level to the thing inside.
-		if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 {
-			value = reflect.ValueOf(value.Interface()) // lovely!
-		}
-	}
-	for _, variable := range pipe.Decl {
-		if pipe.IsAssign {
-			s.setVar(variable.Ident[0], value)
-		} else {
-			s.push(variable.Ident[0], value)
-		}
-	}
-	return value
-}
-
-func (s *state) notAFunction(args []parse.Node, final reflect.Value) {
-	if len(args) > 1 || final != missingVal {
-		s.errorf("can't give argument to non-function %s", args[0])
-	}
-}
-
-func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value {
-	firstWord := cmd.Args[0]
-	switch n := firstWord.(type) {
-	case *parse.FieldNode:
-		return s.evalFieldNode(dot, n, cmd.Args, final)
-	case *parse.ChainNode:
-		return s.evalChainNode(dot, n, cmd.Args, final)
-	case *parse.IdentifierNode:
-		// Must be a function.
-		return s.evalFunction(dot, n, cmd, cmd.Args, final)
-	case *parse.PipeNode:
-		// Parenthesized pipeline. The arguments are all inside the pipeline; final must be absent.
-		s.notAFunction(cmd.Args, final)
-		return s.evalPipeline(dot, n)
-	case *parse.VariableNode:
-		return s.evalVariableNode(dot, n, cmd.Args, final)
-	}
-	s.at(firstWord)
-	s.notAFunction(cmd.Args, final)
-	switch word := firstWord.(type) {
-	case *parse.BoolNode:
-		return reflect.ValueOf(word.True)
-	case *parse.DotNode:
-		return dot
-	case *parse.NilNode:
-		s.errorf("nil is not a command")
-	case *parse.NumberNode:
-		return s.idealConstant(word)
-	case *parse.StringNode:
-		return reflect.ValueOf(word.Text)
-	}
-	s.errorf("can't evaluate command %q", firstWord)
-	panic("not reached")
-}
-
-// idealConstant is called to return the value of a number in a context where
-// we don't know the type. In that case, the syntax of the number tells us
-// its type, and we use Go rules to resolve. Note there is no such thing as
-// a uint ideal constant in this situation - the value must be of int type.
-func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value {
-	// These are ideal constants but we don't know the type
-	// and we have no context.  (If it was a method argument,
-	// we'd know what we need.) The syntax guides us to some extent.
-	s.at(constant)
-	switch {
-	case constant.IsComplex:
-		return reflect.ValueOf(constant.Complex128) // incontrovertible.
-
-	case constant.IsFloat &&
-		!isHexInt(constant.Text) && !isRuneInt(constant.Text) &&
-		strings.ContainsAny(constant.Text, ".eEpP"):
-		return reflect.ValueOf(constant.Float64)
-
-	case constant.IsInt:
-		n := int(constant.Int64)
-		if int64(n) != constant.Int64 {
-			s.errorf("%s overflows int", constant.Text)
-		}
-		return reflect.ValueOf(n)
-
-	case constant.IsUint:
-		s.errorf("%s overflows int", constant.Text)
-	}
-	return zero
-}
-
-func isRuneInt(s string) bool {
-	return len(s) > 0 && s[0] == '\''
-}
-
-func isHexInt(s string) bool {
-	return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X') && !strings.ContainsAny(s, "pP")
-}
-
-func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value {
-	s.at(field)
-	return s.evalFieldChain(dot, dot, field, field.Ident, args, final)
-}
-
-func (s *state) evalChainNode(dot reflect.Value, chain *parse.ChainNode, args []parse.Node, final reflect.Value) reflect.Value {
-	s.at(chain)
-	if len(chain.Field) == 0 {
-		s.errorf("internal error: no fields in evalChainNode")
-	}
-	if chain.Node.Type() == parse.NodeNil {
-		s.errorf("indirection through explicit nil in %s", chain)
-	}
-	// (pipe).Field1.Field2 has pipe as .Node, fields as .Field. Eval the pipeline, then the fields.
-	pipe := s.evalArg(dot, nil, chain.Node)
-	return s.evalFieldChain(dot, pipe, chain, chain.Field, args, final)
-}
-
-func (s *state) evalVariableNode(dot reflect.Value, variable *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value {
-	// $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields.
-	s.at(variable)
-	value := s.varValue(variable.Ident[0])
-	if len(variable.Ident) == 1 {
-		s.notAFunction(args, final)
-		return value
-	}
-	return s.evalFieldChain(dot, value, variable, variable.Ident[1:], args, final)
-}
-
-// evalFieldChain evaluates .X.Y.Z possibly followed by arguments.
-// dot is the environment in which to evaluate arguments, while
-// receiver is the value being walked along the chain.
-func (s *state) evalFieldChain(dot, receiver reflect.Value, node parse.Node, ident []string, args []parse.Node, final reflect.Value) reflect.Value {
-	n := len(ident)
-	for i := 0; i < n-1; i++ {
-		receiver = s.evalField(dot, ident[i], node, nil, missingVal, receiver)
-	}
-	// Now if it's a method, it gets the arguments.
-	return s.evalField(dot, ident[n-1], node, args, final, receiver)
-}
-
-func (s *state) evalFunction(dot reflect.Value, node *parse.IdentifierNode, cmd parse.Node, args []parse.Node, final reflect.Value) reflect.Value {
-	s.at(node)
-	name := node.Ident
-	function, isBuiltin, ok := findFunction(name, s.tmpl)
-	if !ok {
-		s.errorf("%q is not a defined function", name)
-	}
-	return s.evalCall(dot, function, isBuiltin, cmd, name, args, final)
-}
-
-// evalField evaluates an expression like (.Field) or (.Field arg1 arg2).
-// The 'final' argument represents the return value from the preceding
-// value of the pipeline, if any.
-func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value {
-	if !receiver.IsValid() {
-		if s.tmpl.option.missingKey == mapError { // Treat invalid value as missing map key.
-			s.errorf("nil data; no entry for key %q", fieldName)
-		}
-		return zero
-	}
-	typ := receiver.Type()
-	receiver, isNil := indirect(receiver)
-	if receiver.Kind() == reflect.Interface && isNil {
-		// Calling a method on a nil interface can't work. The
-		// MethodByName method call below would panic.
-		s.errorf("nil pointer evaluating %s.%s", typ, fieldName)
-		return zero
-	}
-
-	// Unless it's an interface, need to get to a value of type *T to guarantee
-	// we see all methods of T and *T.
-	ptr := receiver
-	if ptr.Kind() != reflect.Interface && ptr.Kind() != reflect.Ptr && ptr.CanAddr() {
-		ptr = ptr.Addr()
-	}
-	if method := ptr.MethodByName(fieldName); method.IsValid() {
-		return s.evalCall(dot, method, false, node, fieldName, args, final)
-	}
-	hasArgs := len(args) > 1 || final != missingVal
-	// It's not a method; must be a field of a struct or an element of a map.
-	switch receiver.Kind() {
-	case reflect.Struct:
-		tField, ok := receiver.Type().FieldByName(fieldName)
-		if ok {
-			field := receiver.FieldByIndex(tField.Index)
-			if tField.PkgPath != "" {
-				s.errorf("%s is an unexported field of struct type %s", fieldName, typ)
-			}
-			// If it's a function, we must call it.
-			if hasArgs {
-				s.errorf("%s has arguments but cannot be invoked as function", fieldName)
-			}
-			return field
-		}
-	case reflect.Map:
-		// If it's a map, attempt to use the field name as a key.
-		nameVal := reflect.ValueOf(fieldName)
-		if nameVal.Type().AssignableTo(receiver.Type().Key()) {
-			if hasArgs {
-				s.errorf("%s is not a method but has arguments", fieldName)
-			}
-			result := receiver.MapIndex(nameVal)
-			if !result.IsValid() {
-				switch s.tmpl.option.missingKey {
-				case mapInvalid:
-					// Just use the invalid value.
-				case mapZeroValue:
-					result = reflect.Zero(receiver.Type().Elem())
-				case mapError:
-					s.errorf("map has no entry for key %q", fieldName)
-				}
-			}
-			return result
-		}
-	case reflect.Ptr:
-		etyp := receiver.Type().Elem()
-		if etyp.Kind() == reflect.Struct {
-			if _, ok := etyp.FieldByName(fieldName); !ok {
-				// If there's no such field, say "can't evaluate"
-				// instead of "nil pointer evaluating".
-				break
-			}
-		}
-		if isNil {
-			s.errorf("nil pointer evaluating %s.%s", typ, fieldName)
-		}
-	}
-	s.errorf("can't evaluate field %s in type %s", fieldName, typ)
-	panic("not reached")
-}
-
-var (
-	errorType        = reflect.TypeOf((*error)(nil)).Elem()
-	fmtStringerType  = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
-	reflectValueType = reflect.TypeOf((*reflect.Value)(nil)).Elem()
-)
-
-// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so
-// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0]
-// as the function itself.
-func (s *state) evalCall(dot, fun reflect.Value, isBuiltin bool, node parse.Node, name string, args []parse.Node, final reflect.Value) reflect.Value {
-	if args != nil {
-		args = args[1:] // Zeroth arg is function name/node; not passed to function.
-	}
-	typ := fun.Type()
-	numIn := len(args)
-	if final != missingVal {
-		numIn++
-	}
-	numFixed := len(args)
-	if typ.IsVariadic() {
-		numFixed = typ.NumIn() - 1 // last arg is the variadic one.
-		if numIn < numFixed {
-			s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args))
-		}
-	} else if numIn != typ.NumIn() {
-		s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), numIn)
-	}
-	if !goodFunc(typ) {
-		// TODO: This could still be a confusing error; maybe goodFunc should provide info.
-		s.errorf("can't call method/function %q with %d results", name, typ.NumOut())
-	}
-
-	// Special case for builtin and/or, which short-circuit.
-	if isBuiltin && (name == "and" || name == "or") {
-		argType := typ.In(0)
-		var v reflect.Value
-		for _, arg := range args {
-			v = s.evalArg(dot, argType, arg).Interface().(reflect.Value)
-			if truth(v) == (name == "or") {
-				break
-			}
-		}
-		return v
-	}
-
-	// Build the arg list.
-	argv := make([]reflect.Value, numIn)
-	// Args must be evaluated. Fixed args first.
-	i := 0
-	for ; i < numFixed && i < len(args); i++ {
-		argv[i] = s.evalArg(dot, typ.In(i), args[i])
-	}
-	// Now the ... args.
-	if typ.IsVariadic() {
-		argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice.
-		for ; i < len(args); i++ {
-			argv[i] = s.evalArg(dot, argType, args[i])
-		}
-	}
-	// Add final value if necessary.
-	if final != missingVal {
-		t := typ.In(typ.NumIn() - 1)
-		if typ.IsVariadic() {
-			if numIn-1 < numFixed {
-				// The added final argument corresponds to a fixed parameter of the function.
-				// Validate against the type of the actual parameter.
-				t = typ.In(numIn - 1)
-			} else {
-				// The added final argument corresponds to the variadic part.
-				// Validate against the type of the elements of the variadic slice.
-				t = t.Elem()
-			}
-		}
-		argv[i] = s.validateType(final, t)
-	}
-	v, err := safeCall(fun, argv)
-	// If we have an error that is not nil, stop execution and return that
-	// error to the caller.
-	if err != nil {
-		s.at(node)
-		s.errorf("error calling %s: %w", name, err)
-	}
-	if v.Type() == reflectValueType {
-		v = v.Interface().(reflect.Value)
-	}
-	return v
-}
-
-// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero.
-func canBeNil(typ reflect.Type) bool {
-	switch typ.Kind() {
-	case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
-		return true
-	case reflect.Struct:
-		return typ == reflectValueType
-	}
-	return false
-}
-
-// validateType guarantees that the value is valid and assignable to the type.
-func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value {
-	if !value.IsValid() {
-		if typ == nil {
-			// An untyped nil interface{}. Accept as a proper nil value.
-			return reflect.ValueOf(nil)
-		}
-		if canBeNil(typ) {
-			// Like above, but use the zero value of the non-nil type.
-			return reflect.Zero(typ)
-		}
-		s.errorf("invalid value; expected %s", typ)
-	}
-	if typ == reflectValueType && value.Type() != typ {
-		return reflect.ValueOf(value)
-	}
-	if typ != nil && !value.Type().AssignableTo(typ) {
-		if value.Kind() == reflect.Interface && !value.IsNil() {
-			value = value.Elem()
-			if value.Type().AssignableTo(typ) {
-				return value
-			}
-			// fallthrough
-		}
-		// Does one dereference or indirection work? We could do more, as we
-		// do with method receivers, but that gets messy and method receivers
-		// are much more constrained, so it makes more sense there than here.
-		// Besides, one is almost always all you need.
-		switch {
-		case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ):
-			value = value.Elem()
-			if !value.IsValid() {
-				s.errorf("dereference of nil pointer of type %s", typ)
-			}
-		case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr():
-			value = value.Addr()
-		default:
-			s.errorf("wrong type for value; expected %s; got %s", typ, value.Type())
-		}
-	}
-	return value
-}
-
-func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value {
-	s.at(n)
-	switch arg := n.(type) {
-	case *parse.DotNode:
-		return s.validateType(dot, typ)
-	case *parse.NilNode:
-		if canBeNil(typ) {
-			return reflect.Zero(typ)
-		}
-		s.errorf("cannot assign nil to %s", typ)
-	case *parse.FieldNode:
-		return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, missingVal), typ)
-	case *parse.VariableNode:
-		return s.validateType(s.evalVariableNode(dot, arg, nil, missingVal), typ)
-	case *parse.PipeNode:
-		return s.validateType(s.evalPipeline(dot, arg), typ)
-	case *parse.IdentifierNode:
-		return s.validateType(s.evalFunction(dot, arg, arg, nil, missingVal), typ)
-	case *parse.ChainNode:
-		return s.validateType(s.evalChainNode(dot, arg, nil, missingVal), typ)
-	}
-	switch typ.Kind() {
-	case reflect.Bool:
-		return s.evalBool(typ, n)
-	case reflect.Complex64, reflect.Complex128:
-		return s.evalComplex(typ, n)
-	case reflect.Float32, reflect.Float64:
-		return s.evalFloat(typ, n)
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return s.evalInteger(typ, n)
-	case reflect.Interface:
-		if typ.NumMethod() == 0 {
-			return s.evalEmptyInterface(dot, n)
-		}
-	case reflect.Struct:
-		if typ == reflectValueType {
-			return reflect.ValueOf(s.evalEmptyInterface(dot, n))
-		}
-	case reflect.String:
-		return s.evalString(typ, n)
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		return s.evalUnsignedInteger(typ, n)
-	}
-	s.errorf("can't handle %s for arg of type %s", n, typ)
-	panic("not reached")
-}
-
-func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value {
-	s.at(n)
-	if n, ok := n.(*parse.BoolNode); ok {
-		value := reflect.New(typ).Elem()
-		value.SetBool(n.True)
-		return value
-	}
-	s.errorf("expected bool; found %s", n)
-	panic("not reached")
-}
-
-func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value {
-	s.at(n)
-	if n, ok := n.(*parse.StringNode); ok {
-		value := reflect.New(typ).Elem()
-		value.SetString(n.Text)
-		return value
-	}
-	s.errorf("expected string; found %s", n)
-	panic("not reached")
-}
-
-func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value {
-	s.at(n)
-	if n, ok := n.(*parse.NumberNode); ok && n.IsInt {
-		value := reflect.New(typ).Elem()
-		value.SetInt(n.Int64)
-		return value
-	}
-	s.errorf("expected integer; found %s", n)
-	panic("not reached")
-}
-
-func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value {
-	s.at(n)
-	if n, ok := n.(*parse.NumberNode); ok && n.IsUint {
-		value := reflect.New(typ).Elem()
-		value.SetUint(n.Uint64)
-		return value
-	}
-	s.errorf("expected unsigned integer; found %s", n)
-	panic("not reached")
-}
-
-func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value {
-	s.at(n)
-	if n, ok := n.(*parse.NumberNode); ok && n.IsFloat {
-		value := reflect.New(typ).Elem()
-		value.SetFloat(n.Float64)
-		return value
-	}
-	s.errorf("expected float; found %s", n)
-	panic("not reached")
-}
-
-func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value {
-	if n, ok := n.(*parse.NumberNode); ok && n.IsComplex {
-		value := reflect.New(typ).Elem()
-		value.SetComplex(n.Complex128)
-		return value
-	}
-	s.errorf("expected complex; found %s", n)
-	panic("not reached")
-}
-
-func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value {
-	s.at(n)
-	switch n := n.(type) {
-	case *parse.BoolNode:
-		return reflect.ValueOf(n.True)
-	case *parse.DotNode:
-		return dot
-	case *parse.FieldNode:
-		return s.evalFieldNode(dot, n, nil, missingVal)
-	case *parse.IdentifierNode:
-		return s.evalFunction(dot, n, n, nil, missingVal)
-	case *parse.NilNode:
-		// NilNode is handled in evalArg, the only place that calls here.
-		s.errorf("evalEmptyInterface: nil (can't happen)")
-	case *parse.NumberNode:
-		return s.idealConstant(n)
-	case *parse.StringNode:
-		return reflect.ValueOf(n.Text)
-	case *parse.VariableNode:
-		return s.evalVariableNode(dot, n, nil, missingVal)
-	case *parse.PipeNode:
-		return s.evalPipeline(dot, n)
-	}
-	s.errorf("can't handle assignment of %s to empty interface argument", n)
-	panic("not reached")
-}
-
-// indirect returns the item at the end of indirection, and a bool to indicate
-// if it's nil. If the returned bool is true, the returned value's kind will be
-// either a pointer or interface.
-func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
-	for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() {
-		if v.IsNil() {
-			return v, true
-		}
-	}
-	return v, false
-}
-
-// indirectInterface returns the concrete value in an interface value,
-// or else the zero reflect.Value.
-// That is, if v represents the interface value x, the result is the same as reflect.ValueOf(x):
-// the fact that x was an interface value is forgotten.
-func indirectInterface(v reflect.Value) reflect.Value {
-	if v.Kind() != reflect.Interface {
-		return v
-	}
-	if v.IsNil() {
-		return reflect.Value{}
-	}
-	return v.Elem()
-}
-
-// printValue writes the textual representation of the value to the output of
-// the template.
-func (s *state) printValue(n parse.Node, v reflect.Value) {
-	s.at(n)
-	iface, ok := printableValue(v)
-	if !ok {
-		s.errorf("can't print %s of type %s", n, v.Type())
-	}
-	_, err := fmt.Fprint(s.wr, iface)
-	if err != nil {
-		s.writeError(err)
-	}
-}
-
-// printableValue returns the, possibly indirected, interface value inside v that
-// is best for a call to formatted printer.
-func printableValue(v reflect.Value) (interface{}, bool) {
-	if v.Kind() == reflect.Ptr {
-		v, _ = indirect(v) // fmt.Fprint handles nil.
-	}
-	if !v.IsValid() {
-		return "<no value>", true
-	}
-
-	if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) {
-		if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) {
-			v = v.Addr()
-		} else {
-			switch v.Kind() {
-			case reflect.Chan, reflect.Func:
-				return nil, false
-			}
-		}
-	}
-	return v.Interface(), true
-}
diff --git a/internal/backport/text/template/exec_test.go b/internal/backport/text/template/exec_test.go
deleted file mode 100644
index dbf2631..0000000
--- a/internal/backport/text/template/exec_test.go
+++ /dev/null
@@ -1,1782 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
-	"bytes"
-	"errors"
-	"flag"
-	"fmt"
-	"io/ioutil"
-	"reflect"
-	"strings"
-	"sync"
-	"testing"
-)
-
-var debug = flag.Bool("debug", false, "show the errors produced by the tests")
-
-// T has lots of interesting pieces to use to test execution.
-type T struct {
-	// Basics
-	True        bool
-	I           int
-	U16         uint16
-	X, S        string
-	FloatZero   float64
-	ComplexZero complex128
-	// Nested structs.
-	U *U
-	// Struct with String method.
-	V0     V
-	V1, V2 *V
-	// Struct with Error method.
-	W0     W
-	W1, W2 *W
-	// Slices
-	SI      []int
-	SICap   []int
-	SIEmpty []int
-	SB      []bool
-	// Arrays
-	AI [3]int
-	// Maps
-	MSI      map[string]int
-	MSIone   map[string]int // one element, for deterministic output
-	MSIEmpty map[string]int
-	MXI      map[interface{}]int
-	MII      map[int]int
-	MI32S    map[int32]string
-	MI64S    map[int64]string
-	MUI32S   map[uint32]string
-	MUI64S   map[uint64]string
-	MI8S     map[int8]string
-	MUI8S    map[uint8]string
-	SMSI     []map[string]int
-	// Empty interfaces; used to see if we can dig inside one.
-	Empty0 interface{} // nil
-	Empty1 interface{}
-	Empty2 interface{}
-	Empty3 interface{}
-	Empty4 interface{}
-	// Non-empty interfaces.
-	NonEmptyInterface         I
-	NonEmptyInterfacePtS      *I
-	NonEmptyInterfaceNil      I
-	NonEmptyInterfaceTypedNil I
-	// Stringer.
-	Str fmt.Stringer
-	Err error
-	// Pointers
-	PI  *int
-	PS  *string
-	PSI *[]int
-	NIL *int
-	// Function (not method)
-	BinaryFunc      func(string, string) string
-	VariadicFunc    func(...string) string
-	VariadicFuncInt func(int, ...string) string
-	NilOKFunc       func(*int) bool
-	ErrFunc         func() (string, error)
-	PanicFunc       func() string
-	// Template to test evaluation of templates.
-	Tmpl *Template
-	// Unexported field; cannot be accessed by template.
-	unexported int
-}
-
-type S []string
-
-func (S) Method0() string {
-	return "M0"
-}
-
-type U struct {
-	V string
-}
-
-type V struct {
-	j int
-}
-
-func (v *V) String() string {
-	if v == nil {
-		return "nilV"
-	}
-	return fmt.Sprintf("<%d>", v.j)
-}
-
-type W struct {
-	k int
-}
-
-func (w *W) Error() string {
-	if w == nil {
-		return "nilW"
-	}
-	return fmt.Sprintf("[%d]", w.k)
-}
-
-var siVal = I(S{"a", "b"})
-
-var tVal = &T{
-	True:   true,
-	I:      17,
-	U16:    16,
-	X:      "x",
-	S:      "xyz",
-	U:      &U{"v"},
-	V0:     V{6666},
-	V1:     &V{7777}, // leave V2 as nil
-	W0:     W{888},
-	W1:     &W{999}, // leave W2 as nil
-	SI:     []int{3, 4, 5},
-	SICap:  make([]int, 5, 10),
-	AI:     [3]int{3, 4, 5},
-	SB:     []bool{true, false},
-	MSI:    map[string]int{"one": 1, "two": 2, "three": 3},
-	MSIone: map[string]int{"one": 1},
-	MXI:    map[interface{}]int{"one": 1},
-	MII:    map[int]int{1: 1},
-	MI32S:  map[int32]string{1: "one", 2: "two"},
-	MI64S:  map[int64]string{2: "i642", 3: "i643"},
-	MUI32S: map[uint32]string{2: "u322", 3: "u323"},
-	MUI64S: map[uint64]string{2: "ui642", 3: "ui643"},
-	MI8S:   map[int8]string{2: "i82", 3: "i83"},
-	MUI8S:  map[uint8]string{2: "u82", 3: "u83"},
-	SMSI: []map[string]int{
-		{"one": 1, "two": 2},
-		{"eleven": 11, "twelve": 12},
-	},
-	Empty1:                    3,
-	Empty2:                    "empty2",
-	Empty3:                    []int{7, 8},
-	Empty4:                    &U{"UinEmpty"},
-	NonEmptyInterface:         &T{X: "x"},
-	NonEmptyInterfacePtS:      &siVal,
-	NonEmptyInterfaceTypedNil: (*T)(nil),
-	Str:                       bytes.NewBuffer([]byte("foozle")),
-	Err:                       errors.New("erroozle"),
-	PI:                        newInt(23),
-	PS:                        newString("a string"),
-	PSI:                       newIntSlice(21, 22, 23),
-	BinaryFunc:                func(a, b string) string { return fmt.Sprintf("[%s=%s]", a, b) },
-	VariadicFunc:              func(s ...string) string { return fmt.Sprint("<", strings.Join(s, "+"), ">") },
-	VariadicFuncInt:           func(a int, s ...string) string { return fmt.Sprint(a, "=<", strings.Join(s, "+"), ">") },
-	NilOKFunc:                 func(s *int) bool { return s == nil },
-	ErrFunc:                   func() (string, error) { return "bla", nil },
-	PanicFunc:                 func() string { panic("test panic") },
-	Tmpl:                      Must(New("x").Parse("test template")), // "x" is the value of .X
-}
-
-var tSliceOfNil = []*T{nil}
-
-// A non-empty interface.
-type I interface {
-	Method0() string
-}
-
-var iVal I = tVal
-
-// Helpers for creation.
-func newInt(n int) *int {
-	return &n
-}
-
-func newString(s string) *string {
-	return &s
-}
-
-func newIntSlice(n ...int) *[]int {
-	p := new([]int)
-	*p = make([]int, len(n))
-	copy(*p, n)
-	return p
-}
-
-// Simple methods with and without arguments.
-func (t *T) Method0() string {
-	return "M0"
-}
-
-func (t *T) Method1(a int) int {
-	return a
-}
-
-func (t *T) Method2(a uint16, b string) string {
-	return fmt.Sprintf("Method2: %d %s", a, b)
-}
-
-func (t *T) Method3(v interface{}) string {
-	return fmt.Sprintf("Method3: %v", v)
-}
-
-func (t *T) Copy() *T {
-	n := new(T)
-	*n = *t
-	return n
-}
-
-func (t *T) MAdd(a int, b []int) []int {
-	v := make([]int, len(b))
-	for i, x := range b {
-		v[i] = x + a
-	}
-	return v
-}
-
-var myError = errors.New("my error")
-
-// MyError returns a value and an error according to its argument.
-func (t *T) MyError(error bool) (bool, error) {
-	if error {
-		return true, myError
-	}
-	return false, nil
-}
-
-// A few methods to test chaining.
-func (t *T) GetU() *U {
-	return t.U
-}
-
-func (u *U) TrueFalse(b bool) string {
-	if b {
-		return "true"
-	}
-	return ""
-}
-
-func typeOf(arg interface{}) string {
-	return fmt.Sprintf("%T", arg)
-}
-
-type execTest struct {
-	name   string
-	input  string
-	output string
-	data   interface{}
-	ok     bool
-}
-
-// bigInt and bigUint are hex string representing numbers either side
-// of the max int boundary.
-// We do it this way so the test doesn't depend on ints being 32 bits.
-var (
-	bigInt  = fmt.Sprintf("0x%x", int(1<<uint(reflect.TypeOf(0).Bits()-1)-1))
-	bigUint = fmt.Sprintf("0x%x", uint(1<<uint(reflect.TypeOf(0).Bits()-1)))
-)
-
-var execTests = []execTest{
-	// Trivial cases.
-	{"empty", "", "", nil, true},
-	{"text", "some text", "some text", nil, true},
-	{"nil action", "{{nil}}", "", nil, false},
-
-	// Ideal constants.
-	{"ideal int", "{{typeOf 3}}", "int", 0, true},
-	{"ideal float", "{{typeOf 1.0}}", "float64", 0, true},
-	{"ideal exp float", "{{typeOf 1e1}}", "float64", 0, true},
-	{"ideal complex", "{{typeOf 1i}}", "complex128", 0, true},
-	{"ideal int", "{{typeOf " + bigInt + "}}", "int", 0, true},
-	{"ideal too big", "{{typeOf " + bigUint + "}}", "", 0, false},
-	{"ideal nil without type", "{{nil}}", "", 0, false},
-
-	// Fields of structs.
-	{".X", "-{{.X}}-", "-x-", tVal, true},
-	{".U.V", "-{{.U.V}}-", "-v-", tVal, true},
-	{".unexported", "{{.unexported}}", "", tVal, false},
-
-	// Fields on maps.
-	{"map .one", "{{.MSI.one}}", "1", tVal, true},
-	{"map .two", "{{.MSI.two}}", "2", tVal, true},
-	{"map .NO", "{{.MSI.NO}}", "<no value>", tVal, true},
-	{"map .one interface", "{{.MXI.one}}", "1", tVal, true},
-	{"map .WRONG args", "{{.MSI.one 1}}", "", tVal, false},
-	{"map .WRONG type", "{{.MII.one}}", "", tVal, false},
-
-	// Dots of all kinds to test basic evaluation.
-	{"dot int", "<{{.}}>", "<13>", 13, true},
-	{"dot uint", "<{{.}}>", "<14>", uint(14), true},
-	{"dot float", "<{{.}}>", "<15.1>", 15.1, true},
-	{"dot bool", "<{{.}}>", "<true>", true, true},
-	{"dot complex", "<{{.}}>", "<(16.2-17i)>", 16.2 - 17i, true},
-	{"dot string", "<{{.}}>", "<hello>", "hello", true},
-	{"dot slice", "<{{.}}>", "<[-1 -2 -3]>", []int{-1, -2, -3}, true},
-	{"dot map", "<{{.}}>", "<map[two:22]>", map[string]int{"two": 22}, true},
-	{"dot struct", "<{{.}}>", "<{7 seven}>", struct {
-		a int
-		b string
-	}{7, "seven"}, true},
-
-	// Variables.
-	{"$ int", "{{$}}", "123", 123, true},
-	{"$.I", "{{$.I}}", "17", tVal, true},
-	{"$.U.V", "{{$.U.V}}", "v", tVal, true},
-	{"declare in action", "{{$x := $.U.V}}{{$x}}", "v", tVal, true},
-	{"simple assignment", "{{$x := 2}}{{$x = 3}}{{$x}}", "3", tVal, true},
-	{"nested assignment",
-		"{{$x := 2}}{{if true}}{{$x = 3}}{{end}}{{$x}}",
-		"3", tVal, true},
-	{"nested assignment changes the last declaration",
-		"{{$x := 1}}{{if true}}{{$x := 2}}{{if true}}{{$x = 3}}{{end}}{{end}}{{$x}}",
-		"1", tVal, true},
-
-	// Type with String method.
-	{"V{6666}.String()", "-{{.V0}}-", "-<6666>-", tVal, true},
-	{"&V{7777}.String()", "-{{.V1}}-", "-<7777>-", tVal, true},
-	{"(*V)(nil).String()", "-{{.V2}}-", "-nilV-", tVal, true},
-
-	// Type with Error method.
-	{"W{888}.Error()", "-{{.W0}}-", "-[888]-", tVal, true},
-	{"&W{999}.Error()", "-{{.W1}}-", "-[999]-", tVal, true},
-	{"(*W)(nil).Error()", "-{{.W2}}-", "-nilW-", tVal, true},
-
-	// Pointers.
-	{"*int", "{{.PI}}", "23", tVal, true},
-	{"*string", "{{.PS}}", "a string", tVal, true},
-	{"*[]int", "{{.PSI}}", "[21 22 23]", tVal, true},
-	{"*[]int[1]", "{{index .PSI 1}}", "22", tVal, true},
-	{"NIL", "{{.NIL}}", "<nil>", tVal, true},
-
-	// Empty interfaces holding values.
-	{"empty nil", "{{.Empty0}}", "<no value>", tVal, true},
-	{"empty with int", "{{.Empty1}}", "3", tVal, true},
-	{"empty with string", "{{.Empty2}}", "empty2", tVal, true},
-	{"empty with slice", "{{.Empty3}}", "[7 8]", tVal, true},
-	{"empty with struct", "{{.Empty4}}", "{UinEmpty}", tVal, true},
-	{"empty with struct, field", "{{.Empty4.V}}", "UinEmpty", tVal, true},
-
-	// Edge cases with <no value> with an interface value
-	{"field on interface", "{{.foo}}", "<no value>", nil, true},
-	{"field on parenthesized interface", "{{(.).foo}}", "<no value>", nil, true},
-
-	// Issue 31810: Parenthesized first element of pipeline with arguments.
-	// See also TestIssue31810.
-	{"unparenthesized non-function", "{{1 2}}", "", nil, false},
-	{"parenthesized non-function", "{{(1) 2}}", "", nil, false},
-	{"parenthesized non-function with no args", "{{(1)}}", "1", nil, true}, // This is fine.
-
-	// Method calls.
-	{".Method0", "-{{.Method0}}-", "-M0-", tVal, true},
-	{".Method1(1234)", "-{{.Method1 1234}}-", "-1234-", tVal, true},
-	{".Method1(.I)", "-{{.Method1 .I}}-", "-17-", tVal, true},
-	{".Method2(3, .X)", "-{{.Method2 3 .X}}-", "-Method2: 3 x-", tVal, true},
-	{".Method2(.U16, `str`)", "-{{.Method2 .U16 `str`}}-", "-Method2: 16 str-", tVal, true},
-	{".Method2(.U16, $x)", "{{if $x := .X}}-{{.Method2 .U16 $x}}{{end}}-", "-Method2: 16 x-", tVal, true},
-	{".Method3(nil constant)", "-{{.Method3 nil}}-", "-Method3: <nil>-", tVal, true},
-	{".Method3(nil value)", "-{{.Method3 .MXI.unset}}-", "-Method3: <nil>-", tVal, true},
-	{"method on var", "{{if $x := .}}-{{$x.Method2 .U16 $x.X}}{{end}}-", "-Method2: 16 x-", tVal, true},
-	{"method on chained var",
-		"{{range .MSIone}}{{if $.U.TrueFalse $.True}}{{$.U.TrueFalse $.True}}{{else}}WRONG{{end}}{{end}}",
-		"true", tVal, true},
-	{"chained method",
-		"{{range .MSIone}}{{if $.GetU.TrueFalse $.True}}{{$.U.TrueFalse $.True}}{{else}}WRONG{{end}}{{end}}",
-		"true", tVal, true},
-	{"chained method on variable",
-		"{{with $x := .}}{{with .SI}}{{$.GetU.TrueFalse $.True}}{{end}}{{end}}",
-		"true", tVal, true},
-	{".NilOKFunc not nil", "{{call .NilOKFunc .PI}}", "false", tVal, true},
-	{".NilOKFunc nil", "{{call .NilOKFunc nil}}", "true", tVal, true},
-	{"method on nil value from slice", "-{{range .}}{{.Method1 1234}}{{end}}-", "-1234-", tSliceOfNil, true},
-	{"method on typed nil interface value", "{{.NonEmptyInterfaceTypedNil.Method0}}", "M0", tVal, true},
-
-	// Function call builtin.
-	{".BinaryFunc", "{{call .BinaryFunc `1` `2`}}", "[1=2]", tVal, true},
-	{".VariadicFunc0", "{{call .VariadicFunc}}", "<>", tVal, true},
-	{".VariadicFunc2", "{{call .VariadicFunc `he` `llo`}}", "<he+llo>", tVal, true},
-	{".VariadicFuncInt", "{{call .VariadicFuncInt 33 `he` `llo`}}", "33=<he+llo>", tVal, true},
-	{"if .BinaryFunc call", "{{ if .BinaryFunc}}{{call .BinaryFunc `1` `2`}}{{end}}", "[1=2]", tVal, true},
-	{"if not .BinaryFunc call", "{{ if not .BinaryFunc}}{{call .BinaryFunc `1` `2`}}{{else}}No{{end}}", "No", tVal, true},
-	{"Interface Call", `{{stringer .S}}`, "foozle", map[string]interface{}{"S": bytes.NewBufferString("foozle")}, true},
-	{".ErrFunc", "{{call .ErrFunc}}", "bla", tVal, true},
-	{"call nil", "{{call nil}}", "", tVal, false},
-
-	// Erroneous function calls (check args).
-	{".BinaryFuncTooFew", "{{call .BinaryFunc `1`}}", "", tVal, false},
-	{".BinaryFuncTooMany", "{{call .BinaryFunc `1` `2` `3`}}", "", tVal, false},
-	{".BinaryFuncBad0", "{{call .BinaryFunc 1 3}}", "", tVal, false},
-	{".BinaryFuncBad1", "{{call .BinaryFunc `1` 3}}", "", tVal, false},
-	{".VariadicFuncBad0", "{{call .VariadicFunc 3}}", "", tVal, false},
-	{".VariadicFuncIntBad0", "{{call .VariadicFuncInt}}", "", tVal, false},
-	{".VariadicFuncIntBad`", "{{call .VariadicFuncInt `x`}}", "", tVal, false},
-	{".VariadicFuncNilBad", "{{call .VariadicFunc nil}}", "", tVal, false},
-
-	// Pipelines.
-	{"pipeline", "-{{.Method0 | .Method2 .U16}}-", "-Method2: 16 M0-", tVal, true},
-	{"pipeline func", "-{{call .VariadicFunc `llo` | call .VariadicFunc `he` }}-", "-<he+<llo>>-", tVal, true},
-
-	// Nil values aren't missing arguments.
-	{"nil pipeline", "{{ .Empty0 | call .NilOKFunc }}", "true", tVal, true},
-	{"nil call arg", "{{ call .NilOKFunc .Empty0 }}", "true", tVal, true},
-	{"bad nil pipeline", "{{ .Empty0 | .VariadicFunc }}", "", tVal, false},
-
-	// Parenthesized expressions
-	{"parens in pipeline", "{{printf `%d %d %d` (1) (2 | add 3) (add 4 (add 5 6))}}", "1 5 15", tVal, true},
-
-	// Parenthesized expressions with field accesses
-	{"parens: $ in paren", "{{($).X}}", "x", tVal, true},
-	{"parens: $.GetU in paren", "{{($.GetU).V}}", "v", tVal, true},
-	{"parens: $ in paren in pipe", "{{($ | echo).X}}", "x", tVal, true},
-	{"parens: spaces and args", `{{(makemap "up" "down" "left" "right").left}}`, "right", tVal, true},
-
-	// If.
-	{"if true", "{{if true}}TRUE{{end}}", "TRUE", tVal, true},
-	{"if false", "{{if false}}TRUE{{else}}FALSE{{end}}", "FALSE", tVal, true},
-	{"if nil", "{{if nil}}TRUE{{end}}", "", tVal, false},
-	{"if on typed nil interface value", "{{if .NonEmptyInterfaceTypedNil}}TRUE{{ end }}", "", tVal, true},
-	{"if 1", "{{if 1}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true},
-	{"if 0", "{{if 0}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
-	{"if 1.5", "{{if 1.5}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true},
-	{"if 0.0", "{{if .FloatZero}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
-	{"if 1.5i", "{{if 1.5i}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true},
-	{"if 0.0i", "{{if .ComplexZero}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
-	{"if emptystring", "{{if ``}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
-	{"if string", "{{if `notempty`}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true},
-	{"if emptyslice", "{{if .SIEmpty}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
-	{"if slice", "{{if .SI}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true},
-	{"if emptymap", "{{if .MSIEmpty}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
-	{"if map", "{{if .MSI}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true},
-	{"if map unset", "{{if .MXI.none}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
-	{"if map not unset", "{{if not .MXI.none}}ZERO{{else}}NON-ZERO{{end}}", "ZERO", tVal, true},
-	{"if $x with $y int", "{{if $x := true}}{{with $y := .I}}{{$x}},{{$y}}{{end}}{{end}}", "true,17", tVal, true},
-	{"if $x with $x int", "{{if $x := true}}{{with $x := .I}}{{$x}},{{end}}{{$x}}{{end}}", "17,true", tVal, true},
-	{"if else if", "{{if false}}FALSE{{else if true}}TRUE{{end}}", "TRUE", tVal, true},
-	{"if else chain", "{{if eq 1 3}}1{{else if eq 2 3}}2{{else if eq 3 3}}3{{end}}", "3", tVal, true},
-
-	// Print etc.
-	{"print", `{{print "hello, print"}}`, "hello, print", tVal, true},
-	{"print 123", `{{print 1 2 3}}`, "1 2 3", tVal, true},
-	{"print nil", `{{print nil}}`, "<nil>", tVal, true},
-	{"println", `{{println 1 2 3}}`, "1 2 3\n", tVal, true},
-	{"printf int", `{{printf "%04x" 127}}`, "007f", tVal, true},
-	{"printf float", `{{printf "%g" 3.5}}`, "3.5", tVal, true},
-	{"printf complex", `{{printf "%g" 1+7i}}`, "(1+7i)", tVal, true},
-	{"printf string", `{{printf "%s" "hello"}}`, "hello", tVal, true},
-	{"printf function", `{{printf "%#q" zeroArgs}}`, "`zeroArgs`", tVal, true},
-	{"printf field", `{{printf "%s" .U.V}}`, "v", tVal, true},
-	{"printf method", `{{printf "%s" .Method0}}`, "M0", tVal, true},
-	{"printf dot", `{{with .I}}{{printf "%d" .}}{{end}}`, "17", tVal, true},
-	{"printf var", `{{with $x := .I}}{{printf "%d" $x}}{{end}}`, "17", tVal, true},
-	{"printf lots", `{{printf "%d %s %g %s" 127 "hello" 7-3i .Method0}}`, "127 hello (7-3i) M0", tVal, true},
-
-	// HTML.
-	{"html", `{{html "<script>alert(\"XSS\");</script>"}}`,
-		"&lt;script&gt;alert(&#34;XSS&#34;);&lt;/script&gt;", nil, true},
-	{"html pipeline", `{{printf "<script>alert(\"XSS\");</script>" | html}}`,
-		"&lt;script&gt;alert(&#34;XSS&#34;);&lt;/script&gt;", nil, true},
-	{"html", `{{html .PS}}`, "a string", tVal, true},
-	{"html typed nil", `{{html .NIL}}`, "&lt;nil&gt;", tVal, true},
-	{"html untyped nil", `{{html .Empty0}}`, "&lt;no value&gt;", tVal, true},
-
-	// JavaScript.
-	{"js", `{{js .}}`, `It\'d be nice.`, `It'd be nice.`, true},
-
-	// URL query.
-	{"urlquery", `{{"http://www.example.org/"|urlquery}}`, "http%3A%2F%2Fwww.example.org%2F", nil, true},
-
-	// Booleans
-	{"not", "{{not true}} {{not false}}", "false true", nil, true},
-	{"and", "{{and false 0}} {{and 1 0}} {{and 0 true}} {{and 1 1}}", "false 0 0 1", nil, true},
-	{"or", "{{or 0 0}} {{or 1 0}} {{or 0 true}} {{or 1 1}}", "0 1 true 1", nil, true},
-	{"or short-circuit", "{{or 0 1 (die)}}", "1", nil, true},
-	{"and short-circuit", "{{and 1 0 (die)}}", "0", nil, true},
-	{"or short-circuit2", "{{or 0 0 (die)}}", "", nil, false},
-	{"and short-circuit2", "{{and 1 1 (die)}}", "", nil, false},
-	{"boolean if", "{{if and true 1 `hi`}}TRUE{{else}}FALSE{{end}}", "TRUE", tVal, true},
-	{"boolean if not", "{{if and true 1 `hi` | not}}TRUE{{else}}FALSE{{end}}", "FALSE", nil, true},
-
-	// Indexing.
-	{"slice[0]", "{{index .SI 0}}", "3", tVal, true},
-	{"slice[1]", "{{index .SI 1}}", "4", tVal, true},
-	{"slice[HUGE]", "{{index .SI 10}}", "", tVal, false},
-	{"slice[WRONG]", "{{index .SI `hello`}}", "", tVal, false},
-	{"slice[nil]", "{{index .SI nil}}", "", tVal, false},
-	{"map[one]", "{{index .MSI `one`}}", "1", tVal, true},
-	{"map[two]", "{{index .MSI `two`}}", "2", tVal, true},
-	{"map[NO]", "{{index .MSI `XXX`}}", "0", tVal, true},
-	{"map[nil]", "{{index .MSI nil}}", "", tVal, false},
-	{"map[``]", "{{index .MSI ``}}", "0", tVal, true},
-	{"map[WRONG]", "{{index .MSI 10}}", "", tVal, false},
-	{"double index", "{{index .SMSI 1 `eleven`}}", "11", tVal, true},
-	{"nil[1]", "{{index nil 1}}", "", tVal, false},
-	{"map MI64S", "{{index .MI64S 2}}", "i642", tVal, true},
-	{"map MI32S", "{{index .MI32S 2}}", "two", tVal, true},
-	{"map MUI64S", "{{index .MUI64S 3}}", "ui643", tVal, true},
-	{"map MI8S", "{{index .MI8S 3}}", "i83", tVal, true},
-	{"map MUI8S", "{{index .MUI8S 2}}", "u82", tVal, true},
-	{"index of an interface field", "{{index .Empty3 0}}", "7", tVal, true},
-
-	// Slicing.
-	{"slice[:]", "{{slice .SI}}", "[3 4 5]", tVal, true},
-	{"slice[1:]", "{{slice .SI 1}}", "[4 5]", tVal, true},
-	{"slice[1:2]", "{{slice .SI 1 2}}", "[4]", tVal, true},
-	{"slice[-1:]", "{{slice .SI -1}}", "", tVal, false},
-	{"slice[1:-2]", "{{slice .SI 1 -2}}", "", tVal, false},
-	{"slice[1:2:-1]", "{{slice .SI 1 2 -1}}", "", tVal, false},
-	{"slice[2:1]", "{{slice .SI 2 1}}", "", tVal, false},
-	{"slice[2:2:1]", "{{slice .SI 2 2 1}}", "", tVal, false},
-	{"out of range", "{{slice .SI 4 5}}", "", tVal, false},
-	{"out of range", "{{slice .SI 2 2 5}}", "", tVal, false},
-	{"len(s) < indexes < cap(s)", "{{slice .SICap 6 10}}", "[0 0 0 0]", tVal, true},
-	{"len(s) < indexes < cap(s)", "{{slice .SICap 6 10 10}}", "[0 0 0 0]", tVal, true},
-	{"indexes > cap(s)", "{{slice .SICap 10 11}}", "", tVal, false},
-	{"indexes > cap(s)", "{{slice .SICap 6 10 11}}", "", tVal, false},
-	{"array[:]", "{{slice .AI}}", "[3 4 5]", tVal, true},
-	{"array[1:]", "{{slice .AI 1}}", "[4 5]", tVal, true},
-	{"array[1:2]", "{{slice .AI 1 2}}", "[4]", tVal, true},
-	{"string[:]", "{{slice .S}}", "xyz", tVal, true},
-	{"string[0:1]", "{{slice .S 0 1}}", "x", tVal, true},
-	{"string[1:]", "{{slice .S 1}}", "yz", tVal, true},
-	{"string[1:2]", "{{slice .S 1 2}}", "y", tVal, true},
-	{"out of range", "{{slice .S 1 5}}", "", tVal, false},
-	{"3-index slice of string", "{{slice .S 1 2 2}}", "", tVal, false},
-	{"slice of an interface field", "{{slice .Empty3 0 1}}", "[7]", tVal, true},
-
-	// Len.
-	{"slice", "{{len .SI}}", "3", tVal, true},
-	{"map", "{{len .MSI }}", "3", tVal, true},
-	{"len of int", "{{len 3}}", "", tVal, false},
-	{"len of nothing", "{{len .Empty0}}", "", tVal, false},
-	{"len of an interface field", "{{len .Empty3}}", "2", tVal, true},
-
-	// With.
-	{"with true", "{{with true}}{{.}}{{end}}", "true", tVal, true},
-	{"with false", "{{with false}}{{.}}{{else}}FALSE{{end}}", "FALSE", tVal, true},
-	{"with 1", "{{with 1}}{{.}}{{else}}ZERO{{end}}", "1", tVal, true},
-	{"with 0", "{{with 0}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true},
-	{"with 1.5", "{{with 1.5}}{{.}}{{else}}ZERO{{end}}", "1.5", tVal, true},
-	{"with 0.0", "{{with .FloatZero}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true},
-	{"with 1.5i", "{{with 1.5i}}{{.}}{{else}}ZERO{{end}}", "(0+1.5i)", tVal, true},
-	{"with 0.0i", "{{with .ComplexZero}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true},
-	{"with emptystring", "{{with ``}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
-	{"with string", "{{with `notempty`}}{{.}}{{else}}EMPTY{{end}}", "notempty", tVal, true},
-	{"with emptyslice", "{{with .SIEmpty}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
-	{"with slice", "{{with .SI}}{{.}}{{else}}EMPTY{{end}}", "[3 4 5]", tVal, true},
-	{"with emptymap", "{{with .MSIEmpty}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
-	{"with map", "{{with .MSIone}}{{.}}{{else}}EMPTY{{end}}", "map[one:1]", tVal, true},
-	{"with empty interface, struct field", "{{with .Empty4}}{{.V}}{{end}}", "UinEmpty", tVal, true},
-	{"with $x int", "{{with $x := .I}}{{$x}}{{end}}", "17", tVal, true},
-	{"with $x struct.U.V", "{{with $x := $}}{{$x.U.V}}{{end}}", "v", tVal, true},
-	{"with variable and action", "{{with $x := $}}{{$y := $.U.V}}{{$y}}{{end}}", "v", tVal, true},
-	{"with on typed nil interface value", "{{with .NonEmptyInterfaceTypedNil}}TRUE{{ end }}", "", tVal, true},
-
-	// Range.
-	{"range []int", "{{range .SI}}-{{.}}-{{end}}", "-3--4--5-", tVal, true},
-	{"range empty no else", "{{range .SIEmpty}}-{{.}}-{{end}}", "", tVal, true},
-	{"range []int else", "{{range .SI}}-{{.}}-{{else}}EMPTY{{end}}", "-3--4--5-", tVal, true},
-	{"range empty else", "{{range .SIEmpty}}-{{.}}-{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
-	{"range []int break else", "{{range .SI}}-{{.}}-{{break}}NOTREACHED{{else}}EMPTY{{end}}", "-3-", tVal, true},
-	{"range []int continue else", "{{range .SI}}-{{.}}-{{continue}}NOTREACHED{{else}}EMPTY{{end}}", "-3--4--5-", tVal, true},
-	{"range []bool", "{{range .SB}}-{{.}}-{{end}}", "-true--false-", tVal, true},
-	{"range []int method", "{{range .SI | .MAdd .I}}-{{.}}-{{end}}", "-20--21--22-", tVal, true},
-	{"range map", "{{range .MSI}}-{{.}}-{{end}}", "-1--3--2-", tVal, true},
-	{"range empty map no else", "{{range .MSIEmpty}}-{{.}}-{{end}}", "", tVal, true},
-	{"range map else", "{{range .MSI}}-{{.}}-{{else}}EMPTY{{end}}", "-1--3--2-", tVal, true},
-	{"range empty map else", "{{range .MSIEmpty}}-{{.}}-{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
-	{"range empty interface", "{{range .Empty3}}-{{.}}-{{else}}EMPTY{{end}}", "-7--8-", tVal, true},
-	{"range empty nil", "{{range .Empty0}}-{{.}}-{{end}}", "", tVal, true},
-	{"range $x SI", "{{range $x := .SI}}<{{$x}}>{{end}}", "<3><4><5>", tVal, true},
-	{"range $x $y SI", "{{range $x, $y := .SI}}<{{$x}}={{$y}}>{{end}}", "<0=3><1=4><2=5>", tVal, true},
-	{"range $x MSIone", "{{range $x := .MSIone}}<{{$x}}>{{end}}", "<1>", tVal, true},
-	{"range $x $y MSIone", "{{range $x, $y := .MSIone}}<{{$x}}={{$y}}>{{end}}", "<one=1>", tVal, true},
-	{"range $x PSI", "{{range $x := .PSI}}<{{$x}}>{{end}}", "<21><22><23>", tVal, true},
-	{"declare in range", "{{range $x := .PSI}}<{{$foo:=$x}}{{$x}}>{{end}}", "<21><22><23>", tVal, true},
-	{"range count", `{{range $i, $x := count 5}}[{{$i}}]{{$x}}{{end}}`, "[0]a[1]b[2]c[3]d[4]e", tVal, true},
-	{"range nil count", `{{range $i, $x := count 0}}{{else}}empty{{end}}`, "empty", tVal, true},
-
-	// Cute examples.
-	{"or as if true", `{{or .SI "slice is empty"}}`, "[3 4 5]", tVal, true},
-	{"or as if false", `{{or .SIEmpty "slice is empty"}}`, "slice is empty", tVal, true},
-
-	// Error handling.
-	{"error method, error", "{{.MyError true}}", "", tVal, false},
-	{"error method, no error", "{{.MyError false}}", "false", tVal, true},
-
-	// Numbers
-	{"decimal", "{{print 1234}}", "1234", tVal, true},
-	{"decimal _", "{{print 12_34}}", "1234", tVal, true},
-	{"binary", "{{print 0b101}}", "5", tVal, true},
-	{"binary _", "{{print 0b_1_0_1}}", "5", tVal, true},
-	{"BINARY", "{{print 0B101}}", "5", tVal, true},
-	{"octal0", "{{print 0377}}", "255", tVal, true},
-	{"octal", "{{print 0o377}}", "255", tVal, true},
-	{"octal _", "{{print 0o_3_7_7}}", "255", tVal, true},
-	{"OCTAL", "{{print 0O377}}", "255", tVal, true},
-	{"hex", "{{print 0x123}}", "291", tVal, true},
-	{"hex _", "{{print 0x1_23}}", "291", tVal, true},
-	{"HEX", "{{print 0X123ABC}}", "1194684", tVal, true},
-	{"float", "{{print 123.4}}", "123.4", tVal, true},
-	{"float _", "{{print 0_0_1_2_3.4}}", "123.4", tVal, true},
-	{"hex float", "{{print +0x1.ep+2}}", "7.5", tVal, true},
-	{"hex float _", "{{print +0x_1.e_0p+0_2}}", "7.5", tVal, true},
-	{"HEX float", "{{print +0X1.EP+2}}", "7.5", tVal, true},
-	{"print multi", "{{print 1_2_3_4 7.5_00_00_00}}", "1234 7.5", tVal, true},
-	{"print multi2", "{{print 1234 0x0_1.e_0p+02}}", "1234 7.5", tVal, true},
-
-	// Fixed bugs.
-	// Must separate dot and receiver; otherwise args are evaluated with dot set to variable.
-	{"bug0", "{{range .MSIone}}{{if $.Method1 .}}X{{end}}{{end}}", "X", tVal, true},
-	// Do not loop endlessly in indirect for non-empty interfaces.
-	// The bug appears with *interface only; looped forever.
-	{"bug1", "{{.Method0}}", "M0", &iVal, true},
-	// Was taking address of interface field, so method set was empty.
-	{"bug2", "{{$.NonEmptyInterface.Method0}}", "M0", tVal, true},
-	// Struct values were not legal in with - mere oversight.
-	{"bug3", "{{with $}}{{.Method0}}{{end}}", "M0", tVal, true},
-	// Nil interface values in if.
-	{"bug4", "{{if .Empty0}}non-nil{{else}}nil{{end}}", "nil", tVal, true},
-	// Stringer.
-	{"bug5", "{{.Str}}", "foozle", tVal, true},
-	{"bug5a", "{{.Err}}", "erroozle", tVal, true},
-	// Args need to be indirected and dereferenced sometimes.
-	{"bug6a", "{{vfunc .V0 .V1}}", "vfunc", tVal, true},
-	{"bug6b", "{{vfunc .V0 .V0}}", "vfunc", tVal, true},
-	{"bug6c", "{{vfunc .V1 .V0}}", "vfunc", tVal, true},
-	{"bug6d", "{{vfunc .V1 .V1}}", "vfunc", tVal, true},
-	// Legal parse but illegal execution: non-function should have no arguments.
-	{"bug7a", "{{3 2}}", "", tVal, false},
-	{"bug7b", "{{$x := 1}}{{$x 2}}", "", tVal, false},
-	{"bug7c", "{{$x := 1}}{{3 | $x}}", "", tVal, false},
-	// Pipelined arg was not being type-checked.
-	{"bug8a", "{{3|oneArg}}", "", tVal, false},
-	{"bug8b", "{{4|dddArg 3}}", "", tVal, false},
-	// A bug was introduced that broke map lookups for lower-case names.
-	{"bug9", "{{.cause}}", "neglect", map[string]string{"cause": "neglect"}, true},
-	// Field chain starting with function did not work.
-	{"bug10", "{{mapOfThree.three}}-{{(mapOfThree).three}}", "3-3", 0, true},
-	// Dereferencing nil pointer while evaluating function arguments should not panic. Issue 7333.
-	{"bug11", "{{valueString .PS}}", "", T{}, false},
-	// 0xef gave constant type float64. Issue 8622.
-	{"bug12xe", "{{printf `%T` 0xef}}", "int", T{}, true},
-	{"bug12xE", "{{printf `%T` 0xEE}}", "int", T{}, true},
-	{"bug12Xe", "{{printf `%T` 0Xef}}", "int", T{}, true},
-	{"bug12XE", "{{printf `%T` 0XEE}}", "int", T{}, true},
-	// Chained nodes did not work as arguments. Issue 8473.
-	{"bug13", "{{print (.Copy).I}}", "17", tVal, true},
-	// Didn't protect against nil or literal values in field chains.
-	{"bug14a", "{{(nil).True}}", "", tVal, false},
-	{"bug14b", "{{$x := nil}}{{$x.anything}}", "", tVal, false},
-	{"bug14c", `{{$x := (1.0)}}{{$y := ("hello")}}{{$x.anything}}{{$y.true}}`, "", tVal, false},
-	// Didn't call validateType on function results. Issue 10800.
-	{"bug15", "{{valueString returnInt}}", "", tVal, false},
-	// Variadic function corner cases. Issue 10946.
-	{"bug16a", "{{true|printf}}", "", tVal, false},
-	{"bug16b", "{{1|printf}}", "", tVal, false},
-	{"bug16c", "{{1.1|printf}}", "", tVal, false},
-	{"bug16d", "{{'x'|printf}}", "", tVal, false},
-	{"bug16e", "{{0i|printf}}", "", tVal, false},
-	{"bug16f", "{{true|twoArgs \"xxx\"}}", "", tVal, false},
-	{"bug16g", "{{\"aaa\" |twoArgs \"bbb\"}}", "twoArgs=bbbaaa", tVal, true},
-	{"bug16h", "{{1|oneArg}}", "", tVal, false},
-	{"bug16i", "{{\"aaa\"|oneArg}}", "oneArg=aaa", tVal, true},
-	{"bug16j", "{{1+2i|printf \"%v\"}}", "(1+2i)", tVal, true},
-	{"bug16k", "{{\"aaa\"|printf }}", "aaa", tVal, true},
-	{"bug17a", "{{.NonEmptyInterface.X}}", "x", tVal, true},
-	{"bug17b", "-{{.NonEmptyInterface.Method1 1234}}-", "-1234-", tVal, true},
-	{"bug17c", "{{len .NonEmptyInterfacePtS}}", "2", tVal, true},
-	{"bug17d", "{{index .NonEmptyInterfacePtS 0}}", "a", tVal, true},
-	{"bug17e", "{{range .NonEmptyInterfacePtS}}-{{.}}-{{end}}", "-a--b-", tVal, true},
-
-	// More variadic function corner cases. Some runes would get evaluated
-	// as constant floats instead of ints. Issue 34483.
-	{"bug18a", "{{eq . '.'}}", "true", '.', true},
-	{"bug18b", "{{eq . 'e'}}", "true", 'e', true},
-	{"bug18c", "{{eq . 'P'}}", "true", 'P', true},
-}
-
-func zeroArgs() string {
-	return "zeroArgs"
-}
-
-func oneArg(a string) string {
-	return "oneArg=" + a
-}
-
-func twoArgs(a, b string) string {
-	return "twoArgs=" + a + b
-}
-
-func dddArg(a int, b ...string) string {
-	return fmt.Sprintln(a, b)
-}
-
-// count returns a channel that will deliver n sequential 1-letter strings starting at "a"
-func count(n int) chan string {
-	if n == 0 {
-		return nil
-	}
-	c := make(chan string)
-	go func() {
-		for i := 0; i < n; i++ {
-			c <- "abcdefghijklmnop"[i : i+1]
-		}
-		close(c)
-	}()
-	return c
-}
-
-// vfunc takes a *V and a V
-func vfunc(V, *V) string {
-	return "vfunc"
-}
-
-// valueString takes a string, not a pointer.
-func valueString(v string) string {
-	return "value is ignored"
-}
-
-// returnInt returns an int
-func returnInt() int {
-	return 7
-}
-
-func add(args ...int) int {
-	sum := 0
-	for _, x := range args {
-		sum += x
-	}
-	return sum
-}
-
-func echo(arg interface{}) interface{} {
-	return arg
-}
-
-func makemap(arg ...string) map[string]string {
-	if len(arg)%2 != 0 {
-		panic("bad makemap")
-	}
-	m := make(map[string]string)
-	for i := 0; i < len(arg); i += 2 {
-		m[arg[i]] = arg[i+1]
-	}
-	return m
-}
-
-func stringer(s fmt.Stringer) string {
-	return s.String()
-}
-
-func mapOfThree() interface{} {
-	return map[string]int{"three": 3}
-}
-
-func testExecute(execTests []execTest, template *Template, t *testing.T) {
-	b := new(bytes.Buffer)
-	funcs := FuncMap{
-		"add":         add,
-		"count":       count,
-		"dddArg":      dddArg,
-		"die":         func() bool { panic("die") },
-		"echo":        echo,
-		"makemap":     makemap,
-		"mapOfThree":  mapOfThree,
-		"oneArg":      oneArg,
-		"returnInt":   returnInt,
-		"stringer":    stringer,
-		"twoArgs":     twoArgs,
-		"typeOf":      typeOf,
-		"valueString": valueString,
-		"vfunc":       vfunc,
-		"zeroArgs":    zeroArgs,
-	}
-	for _, test := range execTests {
-		var tmpl *Template
-		var err error
-		if template == nil {
-			tmpl, err = New(test.name).Funcs(funcs).Parse(test.input)
-		} else {
-			tmpl, err = template.New(test.name).Funcs(funcs).Parse(test.input)
-		}
-		if err != nil {
-			t.Errorf("%s: parse error: %s", test.name, err)
-			continue
-		}
-		b.Reset()
-		err = tmpl.Execute(b, test.data)
-		switch {
-		case !test.ok && err == nil:
-			t.Errorf("%s: expected error; got none", test.name)
-			continue
-		case test.ok && err != nil:
-			t.Errorf("%s: unexpected execute error: %s", test.name, err)
-			continue
-		case !test.ok && err != nil:
-			// expected error, got one
-			if *debug {
-				fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err)
-			}
-		}
-		result := b.String()
-		if result != test.output {
-			t.Errorf("%s: expected\n\t%q\ngot\n\t%q", test.name, test.output, result)
-		}
-	}
-}
-
-func TestExecute(t *testing.T) {
-	testExecute(execTests, nil, t)
-}
-
-var delimPairs = []string{
-	"", "", // default
-	"{{", "}}", // same as default
-	"<<", ">>", // distinct
-	"|", "|", // same
-	"(日)", "(本)", // peculiar
-}
-
-func TestDelims(t *testing.T) {
-	const hello = "Hello, world"
-	var value = struct{ Str string }{hello}
-	for i := 0; i < len(delimPairs); i += 2 {
-		text := ".Str"
-		left := delimPairs[i+0]
-		trueLeft := left
-		right := delimPairs[i+1]
-		trueRight := right
-		if left == "" { // default case
-			trueLeft = "{{"
-		}
-		if right == "" { // default case
-			trueRight = "}}"
-		}
-		text = trueLeft + text + trueRight
-		// Now add a comment
-		text += trueLeft + "/*comment*/" + trueRight
-		// Now add  an action containing a string.
-		text += trueLeft + `"` + trueLeft + `"` + trueRight
-		// At this point text looks like `{{.Str}}{{/*comment*/}}{{"{{"}}`.
-		tmpl, err := New("delims").Delims(left, right).Parse(text)
-		if err != nil {
-			t.Fatalf("delim %q text %q parse err %s", left, text, err)
-		}
-		var b = new(bytes.Buffer)
-		err = tmpl.Execute(b, value)
-		if err != nil {
-			t.Fatalf("delim %q exec err %s", left, err)
-		}
-		if b.String() != hello+trueLeft {
-			t.Errorf("expected %q got %q", hello+trueLeft, b.String())
-		}
-	}
-}
-
-// Check that an error from a method flows back to the top.
-func TestExecuteError(t *testing.T) {
-	b := new(bytes.Buffer)
-	tmpl := New("error")
-	_, err := tmpl.Parse("{{.MyError true}}")
-	if err != nil {
-		t.Fatalf("parse error: %s", err)
-	}
-	err = tmpl.Execute(b, tVal)
-	if err == nil {
-		t.Errorf("expected error; got none")
-	} else if !strings.Contains(err.Error(), myError.Error()) {
-		if *debug {
-			fmt.Printf("test execute error: %s\n", err)
-		}
-		t.Errorf("expected myError; got %s", err)
-	}
-}
-
-const execErrorText = `line 1
-line 2
-line 3
-{{template "one" .}}
-{{define "one"}}{{template "two" .}}{{end}}
-{{define "two"}}{{template "three" .}}{{end}}
-{{define "three"}}{{index "hi" $}}{{end}}`
-
-// Check that an error from a nested template contains all the relevant information.
-func TestExecError(t *testing.T) {
-	tmpl, err := New("top").Parse(execErrorText)
-	if err != nil {
-		t.Fatal("parse error:", err)
-	}
-	var b bytes.Buffer
-	err = tmpl.Execute(&b, 5) // 5 is out of range indexing "hi"
-	if err == nil {
-		t.Fatal("expected error")
-	}
-	const want = `template: top:7:20: executing "three" at <index "hi" $>: error calling index: index out of range: 5`
-	got := err.Error()
-	if got != want {
-		t.Errorf("expected\n%q\ngot\n%q", want, got)
-	}
-}
-
-type CustomError struct{}
-
-func (*CustomError) Error() string { return "heyo !" }
-
-// Check that a custom error can be returned.
-func TestExecError_CustomError(t *testing.T) {
-	failingFunc := func() (string, error) {
-		return "", &CustomError{}
-	}
-	tmpl := Must(New("top").Funcs(FuncMap{
-		"err": failingFunc,
-	}).Parse("{{ err }}"))
-
-	var b bytes.Buffer
-	err := tmpl.Execute(&b, nil)
-
-	var e *CustomError
-	if !errors.As(err, &e) {
-		t.Fatalf("expected custom error; got %s", err)
-	}
-}
-
-func TestJSEscaping(t *testing.T) {
-	testCases := []struct {
-		in, exp string
-	}{
-		{`a`, `a`},
-		{`'foo`, `\'foo`},
-		{`Go "jump" \`, `Go \"jump\" \\`},
-		{`Yukihiro says "今日は世界"`, `Yukihiro says \"今日は世界\"`},
-		{"unprintable \uFDFF", `unprintable \uFDFF`},
-		{`<html>`, `\u003Chtml\u003E`},
-		{`no = in attributes`, `no \u003D in attributes`},
-		{`&#x27; does not become HTML entity`, `\u0026#x27; does not become HTML entity`},
-	}
-	for _, tc := range testCases {
-		s := JSEscapeString(tc.in)
-		if s != tc.exp {
-			t.Errorf("JS escaping [%s] got [%s] want [%s]", tc.in, s, tc.exp)
-		}
-	}
-}
-
-// A nice example: walk a binary tree.
-
-type Tree struct {
-	Val         int
-	Left, Right *Tree
-}
-
-// Use different delimiters to test Set.Delims.
-// Also test the trimming of leading and trailing spaces.
-const treeTemplate = `
-	(- define "tree" -)
-	[
-		(- .Val -)
-		(- with .Left -)
-			(template "tree" . -)
-		(- end -)
-		(- with .Right -)
-			(- template "tree" . -)
-		(- end -)
-	]
-	(- end -)
-`
-
-func TestTree(t *testing.T) {
-	var tree = &Tree{
-		1,
-		&Tree{
-			2, &Tree{
-				3,
-				&Tree{
-					4, nil, nil,
-				},
-				nil,
-			},
-			&Tree{
-				5,
-				&Tree{
-					6, nil, nil,
-				},
-				nil,
-			},
-		},
-		&Tree{
-			7,
-			&Tree{
-				8,
-				&Tree{
-					9, nil, nil,
-				},
-				nil,
-			},
-			&Tree{
-				10,
-				&Tree{
-					11, nil, nil,
-				},
-				nil,
-			},
-		},
-	}
-	tmpl, err := New("root").Delims("(", ")").Parse(treeTemplate)
-	if err != nil {
-		t.Fatal("parse error:", err)
-	}
-	var b bytes.Buffer
-	const expect = "[1[2[3[4]][5[6]]][7[8[9]][10[11]]]]"
-	// First by looking up the template.
-	err = tmpl.Lookup("tree").Execute(&b, tree)
-	if err != nil {
-		t.Fatal("exec error:", err)
-	}
-	result := b.String()
-	if result != expect {
-		t.Errorf("expected %q got %q", expect, result)
-	}
-	// Then direct to execution.
-	b.Reset()
-	err = tmpl.ExecuteTemplate(&b, "tree", tree)
-	if err != nil {
-		t.Fatal("exec error:", err)
-	}
-	result = b.String()
-	if result != expect {
-		t.Errorf("expected %q got %q", expect, result)
-	}
-}
-
-func TestExecuteOnNewTemplate(t *testing.T) {
-	// This is issue 3872.
-	New("Name").Templates()
-	// This is issue 11379.
-	new(Template).Templates()
-	new(Template).Parse("")
-	new(Template).New("abc").Parse("")
-	new(Template).Execute(nil, nil)                // returns an error (but does not crash)
-	new(Template).ExecuteTemplate(nil, "XXX", nil) // returns an error (but does not crash)
-}
-
-const testTemplates = `{{define "one"}}one{{end}}{{define "two"}}two{{end}}`
-
-func TestMessageForExecuteEmpty(t *testing.T) {
-	// Test a truly empty template.
-	tmpl := New("empty")
-	var b bytes.Buffer
-	err := tmpl.Execute(&b, 0)
-	if err == nil {
-		t.Fatal("expected initial error")
-	}
-	got := err.Error()
-	want := `template: empty: "empty" is an incomplete or empty template`
-	if got != want {
-		t.Errorf("expected error %s got %s", want, got)
-	}
-	// Add a non-empty template to check that the error is helpful.
-	tests, err := New("").Parse(testTemplates)
-	if err != nil {
-		t.Fatal(err)
-	}
-	tmpl.AddParseTree("secondary", tests.Tree)
-	err = tmpl.Execute(&b, 0)
-	if err == nil {
-		t.Fatal("expected second error")
-	}
-	got = err.Error()
-	want = `template: empty: "empty" is an incomplete or empty template`
-	if got != want {
-		t.Errorf("expected error %s got %s", want, got)
-	}
-	// Make sure we can execute the secondary.
-	err = tmpl.ExecuteTemplate(&b, "secondary", 0)
-	if err != nil {
-		t.Fatal(err)
-	}
-}
-
-func TestFinalForPrintf(t *testing.T) {
-	tmpl, err := New("").Parse(`{{"x" | printf}}`)
-	if err != nil {
-		t.Fatal(err)
-	}
-	var b bytes.Buffer
-	err = tmpl.Execute(&b, 0)
-	if err != nil {
-		t.Fatal(err)
-	}
-}
-
-type cmpTest struct {
-	expr  string
-	truth string
-	ok    bool
-}
-
-var cmpTests = []cmpTest{
-	{"eq true true", "true", true},
-	{"eq true false", "false", true},
-	{"eq 1+2i 1+2i", "true", true},
-	{"eq 1+2i 1+3i", "false", true},
-	{"eq 1.5 1.5", "true", true},
-	{"eq 1.5 2.5", "false", true},
-	{"eq 1 1", "true", true},
-	{"eq 1 2", "false", true},
-	{"eq `xy` `xy`", "true", true},
-	{"eq `xy` `xyz`", "false", true},
-	{"eq .Uthree .Uthree", "true", true},
-	{"eq .Uthree .Ufour", "false", true},
-	{"eq 3 4 5 6 3", "true", true},
-	{"eq 3 4 5 6 7", "false", true},
-	{"ne true true", "false", true},
-	{"ne true false", "true", true},
-	{"ne 1+2i 1+2i", "false", true},
-	{"ne 1+2i 1+3i", "true", true},
-	{"ne 1.5 1.5", "false", true},
-	{"ne 1.5 2.5", "true", true},
-	{"ne 1 1", "false", true},
-	{"ne 1 2", "true", true},
-	{"ne `xy` `xy`", "false", true},
-	{"ne `xy` `xyz`", "true", true},
-	{"ne .Uthree .Uthree", "false", true},
-	{"ne .Uthree .Ufour", "true", true},
-	{"lt 1.5 1.5", "false", true},
-	{"lt 1.5 2.5", "true", true},
-	{"lt 1 1", "false", true},
-	{"lt 1 2", "true", true},
-	{"lt `xy` `xy`", "false", true},
-	{"lt `xy` `xyz`", "true", true},
-	{"lt .Uthree .Uthree", "false", true},
-	{"lt .Uthree .Ufour", "true", true},
-	{"le 1.5 1.5", "true", true},
-	{"le 1.5 2.5", "true", true},
-	{"le 2.5 1.5", "false", true},
-	{"le 1 1", "true", true},
-	{"le 1 2", "true", true},
-	{"le 2 1", "false", true},
-	{"le `xy` `xy`", "true", true},
-	{"le `xy` `xyz`", "true", true},
-	{"le `xyz` `xy`", "false", true},
-	{"le .Uthree .Uthree", "true", true},
-	{"le .Uthree .Ufour", "true", true},
-	{"le .Ufour .Uthree", "false", true},
-	{"gt 1.5 1.5", "false", true},
-	{"gt 1.5 2.5", "false", true},
-	{"gt 1 1", "false", true},
-	{"gt 2 1", "true", true},
-	{"gt 1 2", "false", true},
-	{"gt `xy` `xy`", "false", true},
-	{"gt `xy` `xyz`", "false", true},
-	{"gt .Uthree .Uthree", "false", true},
-	{"gt .Uthree .Ufour", "false", true},
-	{"gt .Ufour .Uthree", "true", true},
-	{"ge 1.5 1.5", "true", true},
-	{"ge 1.5 2.5", "false", true},
-	{"ge 2.5 1.5", "true", true},
-	{"ge 1 1", "true", true},
-	{"ge 1 2", "false", true},
-	{"ge 2 1", "true", true},
-	{"ge `xy` `xy`", "true", true},
-	{"ge `xy` `xyz`", "false", true},
-	{"ge `xyz` `xy`", "true", true},
-	{"ge .Uthree .Uthree", "true", true},
-	{"ge .Uthree .Ufour", "false", true},
-	{"ge .Ufour .Uthree", "true", true},
-	// Mixing signed and unsigned integers.
-	{"eq .Uthree .Three", "true", true},
-	{"eq .Three .Uthree", "true", true},
-	{"le .Uthree .Three", "true", true},
-	{"le .Three .Uthree", "true", true},
-	{"ge .Uthree .Three", "true", true},
-	{"ge .Three .Uthree", "true", true},
-	{"lt .Uthree .Three", "false", true},
-	{"lt .Three .Uthree", "false", true},
-	{"gt .Uthree .Three", "false", true},
-	{"gt .Three .Uthree", "false", true},
-	{"eq .Ufour .Three", "false", true},
-	{"lt .Ufour .Three", "false", true},
-	{"gt .Ufour .Three", "true", true},
-	{"eq .NegOne .Uthree", "false", true},
-	{"eq .Uthree .NegOne", "false", true},
-	{"ne .NegOne .Uthree", "true", true},
-	{"ne .Uthree .NegOne", "true", true},
-	{"lt .NegOne .Uthree", "true", true},
-	{"lt .Uthree .NegOne", "false", true},
-	{"le .NegOne .Uthree", "true", true},
-	{"le .Uthree .NegOne", "false", true},
-	{"gt .NegOne .Uthree", "false", true},
-	{"gt .Uthree .NegOne", "true", true},
-	{"ge .NegOne .Uthree", "false", true},
-	{"ge .Uthree .NegOne", "true", true},
-	{"eq (index `x` 0) 'x'", "true", true}, // The example that triggered this rule.
-	{"eq (index `x` 0) 'y'", "false", true},
-	{"eq .V1 .V2", "true", true},
-	{"eq .Ptr .Ptr", "true", true},
-	{"eq .Ptr .NilPtr", "false", true},
-	{"eq .NilPtr .NilPtr", "true", true},
-	{"eq .Iface1 .Iface1", "true", true},
-	{"eq .Iface1 .NilIface", "false", true},
-	{"eq .NilIface .NilIface", "true", true},
-	{"eq .NilIface .Iface1", "false", true},
-	{"eq .NilIface 0", "false", true},
-	{"eq 0 .NilIface", "false", true},
-	// Errors
-	{"eq `xy` 1", "", false},       // Different types.
-	{"eq 2 2.0", "", false},        // Different types.
-	{"lt true true", "", false},    // Unordered types.
-	{"lt 1+0i 1+0i", "", false},    // Unordered types.
-	{"eq .Ptr 1", "", false},       // Incompatible types.
-	{"eq .Ptr .NegOne", "", false}, // Incompatible types.
-	{"eq .Map .Map", "", false},    // Uncomparable types.
-	{"eq .Map .V1", "", false},     // Uncomparable types.
-}
-
-func TestComparison(t *testing.T) {
-	b := new(bytes.Buffer)
-	var cmpStruct = struct {
-		Uthree, Ufour    uint
-		NegOne, Three    int
-		Ptr, NilPtr      *int
-		Map              map[int]int
-		V1, V2           V
-		Iface1, NilIface fmt.Stringer
-	}{
-		Uthree: 3,
-		Ufour:  4,
-		NegOne: -1,
-		Three:  3,
-		Ptr:    new(int),
-		Iface1: b,
-	}
-	for _, test := range cmpTests {
-		text := fmt.Sprintf("{{if %s}}true{{else}}false{{end}}", test.expr)
-		tmpl, err := New("empty").Parse(text)
-		if err != nil {
-			t.Fatalf("%q: %s", test.expr, err)
-		}
-		b.Reset()
-		err = tmpl.Execute(b, &cmpStruct)
-		if test.ok && err != nil {
-			t.Errorf("%s errored incorrectly: %s", test.expr, err)
-			continue
-		}
-		if !test.ok && err == nil {
-			t.Errorf("%s did not error", test.expr)
-			continue
-		}
-		if b.String() != test.truth {
-			t.Errorf("%s: want %s; got %s", test.expr, test.truth, b.String())
-		}
-	}
-}
-
-func TestMissingMapKey(t *testing.T) {
-	data := map[string]int{
-		"x": 99,
-	}
-	tmpl, err := New("t1").Parse("{{.x}} {{.y}}")
-	if err != nil {
-		t.Fatal(err)
-	}
-	var b bytes.Buffer
-	// By default, just get "<no value>"
-	err = tmpl.Execute(&b, data)
-	if err != nil {
-		t.Fatal(err)
-	}
-	want := "99 <no value>"
-	got := b.String()
-	if got != want {
-		t.Errorf("got %q; expected %q", got, want)
-	}
-	// Same if we set the option explicitly to the default.
-	tmpl.Option("missingkey=default")
-	b.Reset()
-	err = tmpl.Execute(&b, data)
-	if err != nil {
-		t.Fatal("default:", err)
-	}
-	want = "99 <no value>"
-	got = b.String()
-	if got != want {
-		t.Errorf("got %q; expected %q", got, want)
-	}
-	// Next we ask for a zero value
-	tmpl.Option("missingkey=zero")
-	b.Reset()
-	err = tmpl.Execute(&b, data)
-	if err != nil {
-		t.Fatal("zero:", err)
-	}
-	want = "99 0"
-	got = b.String()
-	if got != want {
-		t.Errorf("got %q; expected %q", got, want)
-	}
-	// Now we ask for an error.
-	tmpl.Option("missingkey=error")
-	err = tmpl.Execute(&b, data)
-	if err == nil {
-		t.Errorf("expected error; got none")
-	}
-	// same Option, but now a nil interface: ask for an error
-	err = tmpl.Execute(&b, nil)
-	t.Log(err)
-	if err == nil {
-		t.Errorf("expected error for nil-interface; got none")
-	}
-}
-
-// Test that the error message for multiline unterminated string
-// refers to the line number of the opening quote.
-func TestUnterminatedStringError(t *testing.T) {
-	_, err := New("X").Parse("hello\n\n{{`unterminated\n\n\n\n}}\n some more\n\n")
-	if err == nil {
-		t.Fatal("expected error")
-	}
-	str := err.Error()
-	if !strings.Contains(str, "X:3: unterminated raw quoted string") {
-		t.Fatalf("unexpected error: %s", str)
-	}
-}
-
-const alwaysErrorText = "always be failing"
-
-var alwaysError = errors.New(alwaysErrorText)
-
-type ErrorWriter int
-
-func (e ErrorWriter) Write(p []byte) (int, error) {
-	return 0, alwaysError
-}
-
-func TestExecuteGivesExecError(t *testing.T) {
-	// First, a non-execution error shouldn't be an ExecError.
-	tmpl, err := New("X").Parse("hello")
-	if err != nil {
-		t.Fatal(err)
-	}
-	err = tmpl.Execute(ErrorWriter(0), 0)
-	if err == nil {
-		t.Fatal("expected error; got none")
-	}
-	if err.Error() != alwaysErrorText {
-		t.Errorf("expected %q error; got %q", alwaysErrorText, err)
-	}
-	// This one should be an ExecError.
-	tmpl, err = New("X").Parse("hello, {{.X.Y}}")
-	if err != nil {
-		t.Fatal(err)
-	}
-	err = tmpl.Execute(ioutil.Discard, 0)
-	if err == nil {
-		t.Fatal("expected error; got none")
-	}
-	eerr, ok := err.(ExecError)
-	if !ok {
-		t.Fatalf("did not expect ExecError %s", eerr)
-	}
-	expect := "field X in type int"
-	if !strings.Contains(err.Error(), expect) {
-		t.Errorf("expected %q; got %q", expect, err)
-	}
-}
-
-func funcNameTestFunc() int {
-	return 0
-}
-
-func TestGoodFuncNames(t *testing.T) {
-	names := []string{
-		"_",
-		"a",
-		"a1",
-		"a1",
-		"Ӵ",
-	}
-	for _, name := range names {
-		tmpl := New("X").Funcs(
-			FuncMap{
-				name: funcNameTestFunc,
-			},
-		)
-		if tmpl == nil {
-			t.Fatalf("nil result for %q", name)
-		}
-	}
-}
-
-func TestBadFuncNames(t *testing.T) {
-	names := []string{
-		"",
-		"2",
-		"a-b",
-	}
-	for _, name := range names {
-		testBadFuncName(name, t)
-	}
-}
-
-func testBadFuncName(name string, t *testing.T) {
-	t.Helper()
-	defer func() {
-		recover()
-	}()
-	New("X").Funcs(
-		FuncMap{
-			name: funcNameTestFunc,
-		},
-	)
-	// If we get here, the name did not cause a panic, which is how Funcs
-	// reports an error.
-	t.Errorf("%q succeeded incorrectly as function name", name)
-}
-
-func TestBlock(t *testing.T) {
-	const (
-		input   = `a({{block "inner" .}}bar({{.}})baz{{end}})b`
-		want    = `a(bar(hello)baz)b`
-		overlay = `{{define "inner"}}foo({{.}})bar{{end}}`
-		want2   = `a(foo(goodbye)bar)b`
-	)
-	tmpl, err := New("outer").Parse(input)
-	if err != nil {
-		t.Fatal(err)
-	}
-	tmpl2, err := Must(tmpl.Clone()).Parse(overlay)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	var buf bytes.Buffer
-	if err := tmpl.Execute(&buf, "hello"); err != nil {
-		t.Fatal(err)
-	}
-	if got := buf.String(); got != want {
-		t.Errorf("got %q, want %q", got, want)
-	}
-
-	buf.Reset()
-	if err := tmpl2.Execute(&buf, "goodbye"); err != nil {
-		t.Fatal(err)
-	}
-	if got := buf.String(); got != want2 {
-		t.Errorf("got %q, want %q", got, want2)
-	}
-}
-
-func TestEvalFieldErrors(t *testing.T) {
-	tests := []struct {
-		name, src string
-		value     interface{}
-		want      string
-	}{
-		{
-			// Check that calling an invalid field on nil pointer
-			// prints a field error instead of a distracting nil
-			// pointer error. https://golang.org/issue/15125
-			"MissingFieldOnNil",
-			"{{.MissingField}}",
-			(*T)(nil),
-			"can't evaluate field MissingField in type *template.T",
-		},
-		{
-			"MissingFieldOnNonNil",
-			"{{.MissingField}}",
-			&T{},
-			"can't evaluate field MissingField in type *template.T",
-		},
-		{
-			"ExistingFieldOnNil",
-			"{{.X}}",
-			(*T)(nil),
-			"nil pointer evaluating *template.T.X",
-		},
-		{
-			"MissingKeyOnNilMap",
-			"{{.MissingKey}}",
-			(*map[string]string)(nil),
-			"nil pointer evaluating *map[string]string.MissingKey",
-		},
-		{
-			"MissingKeyOnNilMapPtr",
-			"{{.MissingKey}}",
-			(*map[string]string)(nil),
-			"nil pointer evaluating *map[string]string.MissingKey",
-		},
-		{
-			"MissingKeyOnMapPtrToNil",
-			"{{.MissingKey}}",
-			&map[string]string{},
-			"<nil>",
-		},
-	}
-	for _, tc := range tests {
-		t.Run(tc.name, func(t *testing.T) {
-			tmpl := Must(New("tmpl").Parse(tc.src))
-			err := tmpl.Execute(ioutil.Discard, tc.value)
-			got := "<nil>"
-			if err != nil {
-				got = err.Error()
-			}
-			if !strings.HasSuffix(got, tc.want) {
-				t.Fatalf("got error %q, want %q", got, tc.want)
-			}
-		})
-	}
-}
-
-func TestMaxExecDepth(t *testing.T) {
-	if testing.Short() {
-		t.Skip("skipping in -short mode")
-	}
-	tmpl := Must(New("tmpl").Parse(`{{template "tmpl" .}}`))
-	err := tmpl.Execute(ioutil.Discard, nil)
-	got := "<nil>"
-	if err != nil {
-		got = err.Error()
-	}
-	const want = "exceeded maximum template depth"
-	if !strings.Contains(got, want) {
-		t.Errorf("got error %q; want %q", got, want)
-	}
-}
-
-func TestAddrOfIndex(t *testing.T) {
-	// golang.org/issue/14916.
-	// Before index worked on reflect.Values, the .String could not be
-	// found on the (incorrectly unaddressable) V value,
-	// in contrast to range, which worked fine.
-	// Also testing that passing a reflect.Value to tmpl.Execute works.
-	texts := []string{
-		`{{range .}}{{.String}}{{end}}`,
-		`{{with index . 0}}{{.String}}{{end}}`,
-	}
-	for _, text := range texts {
-		tmpl := Must(New("tmpl").Parse(text))
-		var buf bytes.Buffer
-		err := tmpl.Execute(&buf, reflect.ValueOf([]V{{1}}))
-		if err != nil {
-			t.Fatalf("%s: Execute: %v", text, err)
-		}
-		if buf.String() != "<1>" {
-			t.Fatalf("%s: template output = %q, want %q", text, &buf, "<1>")
-		}
-	}
-}
-
-func TestInterfaceValues(t *testing.T) {
-	// golang.org/issue/17714.
-	// Before index worked on reflect.Values, interface values
-	// were always implicitly promoted to the underlying value,
-	// except that nil interfaces were promoted to the zero reflect.Value.
-	// Eliminating a round trip to interface{} and back to reflect.Value
-	// eliminated this promotion, breaking these cases.
-	tests := []struct {
-		text string
-		out  string
-	}{
-		{`{{index .Nil 1}}`, "ERROR: index of untyped nil"},
-		{`{{index .Slice 2}}`, "2"},
-		{`{{index .Slice .Two}}`, "2"},
-		{`{{call .Nil 1}}`, "ERROR: call of nil"},
-		{`{{call .PlusOne 1}}`, "2"},
-		{`{{call .PlusOne .One}}`, "2"},
-		{`{{and (index .Slice 0) true}}`, "0"},
-		{`{{and .Zero true}}`, "0"},
-		{`{{and (index .Slice 1) false}}`, "false"},
-		{`{{and .One false}}`, "false"},
-		{`{{or (index .Slice 0) false}}`, "false"},
-		{`{{or .Zero false}}`, "false"},
-		{`{{or (index .Slice 1) true}}`, "1"},
-		{`{{or .One true}}`, "1"},
-		{`{{not (index .Slice 0)}}`, "true"},
-		{`{{not .Zero}}`, "true"},
-		{`{{not (index .Slice 1)}}`, "false"},
-		{`{{not .One}}`, "false"},
-		{`{{eq (index .Slice 0) .Zero}}`, "true"},
-		{`{{eq (index .Slice 1) .One}}`, "true"},
-		{`{{ne (index .Slice 0) .Zero}}`, "false"},
-		{`{{ne (index .Slice 1) .One}}`, "false"},
-		{`{{ge (index .Slice 0) .One}}`, "false"},
-		{`{{ge (index .Slice 1) .Zero}}`, "true"},
-		{`{{gt (index .Slice 0) .One}}`, "false"},
-		{`{{gt (index .Slice 1) .Zero}}`, "true"},
-		{`{{le (index .Slice 0) .One}}`, "true"},
-		{`{{le (index .Slice 1) .Zero}}`, "false"},
-		{`{{lt (index .Slice 0) .One}}`, "true"},
-		{`{{lt (index .Slice 1) .Zero}}`, "false"},
-	}
-
-	for _, tt := range tests {
-		tmpl := Must(New("tmpl").Parse(tt.text))
-		var buf bytes.Buffer
-		err := tmpl.Execute(&buf, map[string]interface{}{
-			"PlusOne": func(n int) int {
-				return n + 1
-			},
-			"Slice": []int{0, 1, 2, 3},
-			"One":   1,
-			"Two":   2,
-			"Nil":   nil,
-			"Zero":  0,
-		})
-		if strings.HasPrefix(tt.out, "ERROR:") {
-			e := strings.TrimSpace(strings.TrimPrefix(tt.out, "ERROR:"))
-			if err == nil || !strings.Contains(err.Error(), e) {
-				t.Errorf("%s: Execute: %v, want error %q", tt.text, err, e)
-			}
-			continue
-		}
-		if err != nil {
-			t.Errorf("%s: Execute: %v", tt.text, err)
-			continue
-		}
-		if buf.String() != tt.out {
-			t.Errorf("%s: template output = %q, want %q", tt.text, &buf, tt.out)
-		}
-	}
-}
-
-// Check that panics during calls are recovered and returned as errors.
-func TestExecutePanicDuringCall(t *testing.T) {
-	funcs := map[string]interface{}{
-		"doPanic": func() string {
-			panic("custom panic string")
-		},
-	}
-	tests := []struct {
-		name    string
-		input   string
-		data    interface{}
-		wantErr string
-	}{
-		{
-			"direct func call panics",
-			"{{doPanic}}", (*T)(nil),
-			`template: t:1:2: executing "t" at <doPanic>: error calling doPanic: custom panic string`,
-		},
-		{
-			"indirect func call panics",
-			"{{call doPanic}}", (*T)(nil),
-			`template: t:1:7: executing "t" at <doPanic>: error calling doPanic: custom panic string`,
-		},
-		{
-			"direct method call panics",
-			"{{.GetU}}", (*T)(nil),
-			`template: t:1:2: executing "t" at <.GetU>: error calling GetU: runtime error: invalid memory address or nil pointer dereference`,
-		},
-		{
-			"indirect method call panics",
-			"{{call .GetU}}", (*T)(nil),
-			`template: t:1:7: executing "t" at <.GetU>: error calling GetU: runtime error: invalid memory address or nil pointer dereference`,
-		},
-		{
-			"func field call panics",
-			"{{call .PanicFunc}}", tVal,
-			`template: t:1:2: executing "t" at <call .PanicFunc>: error calling call: test panic`,
-		},
-		{
-			"method call on nil interface",
-			"{{.NonEmptyInterfaceNil.Method0}}", tVal,
-			`template: t:1:23: executing "t" at <.NonEmptyInterfaceNil.Method0>: nil pointer evaluating template.I.Method0`,
-		},
-	}
-	for _, tc := range tests {
-		b := new(bytes.Buffer)
-		tmpl, err := New("t").Funcs(funcs).Parse(tc.input)
-		if err != nil {
-			t.Fatalf("parse error: %s", err)
-		}
-		err = tmpl.Execute(b, tc.data)
-		if err == nil {
-			t.Errorf("%s: expected error; got none", tc.name)
-		} else if !strings.Contains(err.Error(), tc.wantErr) {
-			if *debug {
-				fmt.Printf("%s: test execute error: %s\n", tc.name, err)
-			}
-			t.Errorf("%s: expected error:\n%s\ngot:\n%s", tc.name, tc.wantErr, err)
-		}
-	}
-}
-
-// Issue 31810. Check that a parenthesized first argument behaves properly.
-func TestIssue31810(t *testing.T) {
-	// A simple value with no arguments is fine.
-	var b bytes.Buffer
-	const text = "{{ (.)  }}"
-	tmpl, err := New("").Parse(text)
-	if err != nil {
-		t.Error(err)
-	}
-	err = tmpl.Execute(&b, "result")
-	if err != nil {
-		t.Error(err)
-	}
-	if b.String() != "result" {
-		t.Errorf("%s got %q, expected %q", text, b.String(), "result")
-	}
-
-	// Even a plain function fails - need to use call.
-	f := func() string { return "result" }
-	b.Reset()
-	err = tmpl.Execute(&b, f)
-	if err == nil {
-		t.Error("expected error with no call, got none")
-	}
-
-	// Works if the function is explicitly called.
-	const textCall = "{{ (call .)  }}"
-	tmpl, err = New("").Parse(textCall)
-	b.Reset()
-	err = tmpl.Execute(&b, f)
-	if err != nil {
-		t.Error(err)
-	}
-	if b.String() != "result" {
-		t.Errorf("%s got %q, expected %q", textCall, b.String(), "result")
-	}
-}
-
-// Issue 43065, range over send only channel
-func TestIssue43065(t *testing.T) {
-	var b bytes.Buffer
-	tmp := Must(New("").Parse(`{{range .}}{{end}}`))
-	ch := make(chan<- int)
-	err := tmp.Execute(&b, ch)
-	if err == nil {
-		t.Error("expected err got nil")
-	} else if !strings.Contains(err.Error(), "range over send-only channel") {
-		t.Errorf("%s", err)
-	}
-}
-
-// Issue 39807: data race in html/template & text/template
-func TestIssue39807(t *testing.T) {
-	var wg sync.WaitGroup
-
-	tplFoo, err := New("foo").Parse(`{{ template "bar" . }}`)
-	if err != nil {
-		t.Error(err)
-	}
-
-	tplBar, err := New("bar").Parse("bar")
-	if err != nil {
-		t.Error(err)
-	}
-
-	gofuncs := 10
-	numTemplates := 10
-
-	for i := 1; i <= gofuncs; i++ {
-		wg.Add(1)
-		go func() {
-			defer wg.Done()
-			for j := 0; j < numTemplates; j++ {
-				_, err := tplFoo.AddParseTree(tplBar.Name(), tplBar.Tree)
-				if err != nil {
-					t.Error(err)
-				}
-				err = tplFoo.Execute(ioutil.Discard, nil)
-				if err != nil {
-					t.Error(err)
-				}
-			}
-		}()
-	}
-
-	wg.Wait()
-}
diff --git a/internal/backport/text/template/funcs.go b/internal/backport/text/template/funcs.go
deleted file mode 100644
index 23425b4..0000000
--- a/internal/backport/text/template/funcs.go
+++ /dev/null
@@ -1,755 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
-	"bytes"
-	"errors"
-	"fmt"
-	"io"
-	"net/url"
-	"reflect"
-	"strings"
-	"sync"
-	"unicode"
-	"unicode/utf8"
-)
-
-// FuncMap is the type of the map defining the mapping from names to functions.
-// Each function must have either a single return value, or two return values of
-// which the second has type error. In that case, if the second (error)
-// return value evaluates to non-nil during execution, execution terminates and
-// Execute returns that error.
-//
-// Errors returned by Execute wrap the underlying error; call errors.As to
-// uncover them.
-//
-// When template execution invokes a function with an argument list, that list
-// must be assignable to the function's parameter types. Functions meant to
-// apply to arguments of arbitrary type can use parameters of type interface{} or
-// of type reflect.Value. Similarly, functions meant to return a result of arbitrary
-// type can return interface{} or reflect.Value.
-type FuncMap map[string]interface{}
-
-// builtins returns the FuncMap.
-// It is not a global variable so the linker can dead code eliminate
-// more when this isn't called. See golang.org/issue/36021.
-// TODO: revert this back to a global map once golang.org/issue/2559 is fixed.
-func builtins() FuncMap {
-	return FuncMap{
-		"and":      and,
-		"call":     call,
-		"html":     HTMLEscaper,
-		"index":    index,
-		"slice":    slice,
-		"js":       JSEscaper,
-		"len":      length,
-		"not":      not,
-		"or":       or,
-		"print":    fmt.Sprint,
-		"printf":   fmt.Sprintf,
-		"println":  fmt.Sprintln,
-		"urlquery": URLQueryEscaper,
-
-		// Comparisons
-		"eq": eq, // ==
-		"ge": ge, // >=
-		"gt": gt, // >
-		"le": le, // <=
-		"lt": lt, // <
-		"ne": ne, // !=
-	}
-}
-
-var builtinFuncsOnce struct {
-	sync.Once
-	v map[string]reflect.Value
-}
-
-// builtinFuncsOnce lazily computes & caches the builtinFuncs map.
-// TODO: revert this back to a global map once golang.org/issue/2559 is fixed.
-func builtinFuncs() map[string]reflect.Value {
-	builtinFuncsOnce.Do(func() {
-		builtinFuncsOnce.v = createValueFuncs(builtins())
-	})
-	return builtinFuncsOnce.v
-}
-
-// createValueFuncs turns a FuncMap into a map[string]reflect.Value
-func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
-	m := make(map[string]reflect.Value)
-	addValueFuncs(m, funcMap)
-	return m
-}
-
-// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
-func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
-	for name, fn := range in {
-		if !goodName(name) {
-			panic(fmt.Errorf("function name %q is not a valid identifier", name))
-		}
-		v := reflect.ValueOf(fn)
-		if v.Kind() != reflect.Func {
-			panic("value for " + name + " not a function")
-		}
-		if !goodFunc(v.Type()) {
-			panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut()))
-		}
-		out[name] = v
-	}
-}
-
-// addFuncs adds to values the functions in funcs. It does no checking of the input -
-// call addValueFuncs first.
-func addFuncs(out, in FuncMap) {
-	for name, fn := range in {
-		out[name] = fn
-	}
-}
-
-// goodFunc reports whether the function or method has the right result signature.
-func goodFunc(typ reflect.Type) bool {
-	// We allow functions with 1 result or 2 results where the second is an error.
-	switch {
-	case typ.NumOut() == 1:
-		return true
-	case typ.NumOut() == 2 && typ.Out(1) == errorType:
-		return true
-	}
-	return false
-}
-
-// goodName reports whether the function name is a valid identifier.
-func goodName(name string) bool {
-	if name == "" {
-		return false
-	}
-	for i, r := range name {
-		switch {
-		case r == '_':
-		case i == 0 && !unicode.IsLetter(r):
-			return false
-		case !unicode.IsLetter(r) && !unicode.IsDigit(r):
-			return false
-		}
-	}
-	return true
-}
-
-// findFunction looks for a function in the template, and global map.
-func findFunction(name string, tmpl *Template) (v reflect.Value, isBuiltin, ok bool) {
-	if tmpl != nil && tmpl.common != nil {
-		tmpl.muFuncs.RLock()
-		defer tmpl.muFuncs.RUnlock()
-		if fn := tmpl.execFuncs[name]; fn.IsValid() {
-			return fn, false, true
-		}
-	}
-	if fn := builtinFuncs()[name]; fn.IsValid() {
-		return fn, true, true
-	}
-	return reflect.Value{}, false, false
-}
-
-// prepareArg checks if value can be used as an argument of type argType, and
-// converts an invalid value to appropriate zero if possible.
-func prepareArg(value reflect.Value, argType reflect.Type) (reflect.Value, error) {
-	if !value.IsValid() {
-		if !canBeNil(argType) {
-			return reflect.Value{}, fmt.Errorf("value is nil; should be of type %s", argType)
-		}
-		value = reflect.Zero(argType)
-	}
-	if value.Type().AssignableTo(argType) {
-		return value, nil
-	}
-	if intLike(value.Kind()) && intLike(argType.Kind()) && value.Type().ConvertibleTo(argType) {
-		value = value.Convert(argType)
-		return value, nil
-	}
-	return reflect.Value{}, fmt.Errorf("value has type %s; should be %s", value.Type(), argType)
-}
-
-func intLike(typ reflect.Kind) bool {
-	switch typ {
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return true
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		return true
-	}
-	return false
-}
-
-// indexArg checks if a reflect.Value can be used as an index, and converts it to int if possible.
-func indexArg(index reflect.Value, cap int) (int, error) {
-	var x int64
-	switch index.Kind() {
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		x = index.Int()
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		x = int64(index.Uint())
-	case reflect.Invalid:
-		return 0, fmt.Errorf("cannot index slice/array with nil")
-	default:
-		return 0, fmt.Errorf("cannot index slice/array with type %s", index.Type())
-	}
-	if x < 0 || int(x) < 0 || int(x) > cap {
-		return 0, fmt.Errorf("index out of range: %d", x)
-	}
-	return int(x), nil
-}
-
-// Indexing.
-
-// index returns the result of indexing its first argument by the following
-// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
-// indexed item must be a map, slice, or array.
-func index(item reflect.Value, indexes ...reflect.Value) (reflect.Value, error) {
-	item = indirectInterface(item)
-	if !item.IsValid() {
-		return reflect.Value{}, fmt.Errorf("index of untyped nil")
-	}
-	for _, index := range indexes {
-		index = indirectInterface(index)
-		var isNil bool
-		if item, isNil = indirect(item); isNil {
-			return reflect.Value{}, fmt.Errorf("index of nil pointer")
-		}
-		switch item.Kind() {
-		case reflect.Array, reflect.Slice, reflect.String:
-			x, err := indexArg(index, item.Len())
-			if err != nil {
-				return reflect.Value{}, err
-			}
-			item = item.Index(x)
-		case reflect.Map:
-			index, err := prepareArg(index, item.Type().Key())
-			if err != nil {
-				return reflect.Value{}, err
-			}
-			if x := item.MapIndex(index); x.IsValid() {
-				item = x
-			} else {
-				item = reflect.Zero(item.Type().Elem())
-			}
-		case reflect.Invalid:
-			// the loop holds invariant: item.IsValid()
-			panic("unreachable")
-		default:
-			return reflect.Value{}, fmt.Errorf("can't index item of type %s", item.Type())
-		}
-	}
-	return item, nil
-}
-
-// Slicing.
-
-// slice returns the result of slicing its first argument by the remaining
-// arguments. Thus "slice x 1 2" is, in Go syntax, x[1:2], while "slice x"
-// is x[:], "slice x 1" is x[1:], and "slice x 1 2 3" is x[1:2:3]. The first
-// argument must be a string, slice, or array.
-func slice(item reflect.Value, indexes ...reflect.Value) (reflect.Value, error) {
-	item = indirectInterface(item)
-	if !item.IsValid() {
-		return reflect.Value{}, fmt.Errorf("slice of untyped nil")
-	}
-	if len(indexes) > 3 {
-		return reflect.Value{}, fmt.Errorf("too many slice indexes: %d", len(indexes))
-	}
-	var cap int
-	switch item.Kind() {
-	case reflect.String:
-		if len(indexes) == 3 {
-			return reflect.Value{}, fmt.Errorf("cannot 3-index slice a string")
-		}
-		cap = item.Len()
-	case reflect.Array, reflect.Slice:
-		cap = item.Cap()
-	default:
-		return reflect.Value{}, fmt.Errorf("can't slice item of type %s", item.Type())
-	}
-	// set default values for cases item[:], item[i:].
-	idx := [3]int{0, item.Len()}
-	for i, index := range indexes {
-		x, err := indexArg(index, cap)
-		if err != nil {
-			return reflect.Value{}, err
-		}
-		idx[i] = x
-	}
-	// given item[i:j], make sure i <= j.
-	if idx[0] > idx[1] {
-		return reflect.Value{}, fmt.Errorf("invalid slice index: %d > %d", idx[0], idx[1])
-	}
-	if len(indexes) < 3 {
-		return item.Slice(idx[0], idx[1]), nil
-	}
-	// given item[i:j:k], make sure i <= j <= k.
-	if idx[1] > idx[2] {
-		return reflect.Value{}, fmt.Errorf("invalid slice index: %d > %d", idx[1], idx[2])
-	}
-	return item.Slice3(idx[0], idx[1], idx[2]), nil
-}
-
-// Length
-
-// length returns the length of the item, with an error if it has no defined length.
-func length(item reflect.Value) (int, error) {
-	item, isNil := indirect(item)
-	if isNil {
-		return 0, fmt.Errorf("len of nil pointer")
-	}
-	switch item.Kind() {
-	case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
-		return item.Len(), nil
-	}
-	return 0, fmt.Errorf("len of type %s", item.Type())
-}
-
-// Function invocation
-
-// call returns the result of evaluating the first argument as a function.
-// The function must return 1 result, or 2 results, the second of which is an error.
-func call(fn reflect.Value, args ...reflect.Value) (reflect.Value, error) {
-	fn = indirectInterface(fn)
-	if !fn.IsValid() {
-		return reflect.Value{}, fmt.Errorf("call of nil")
-	}
-	typ := fn.Type()
-	if typ.Kind() != reflect.Func {
-		return reflect.Value{}, fmt.Errorf("non-function of type %s", typ)
-	}
-	if !goodFunc(typ) {
-		return reflect.Value{}, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut())
-	}
-	numIn := typ.NumIn()
-	var dddType reflect.Type
-	if typ.IsVariadic() {
-		if len(args) < numIn-1 {
-			return reflect.Value{}, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1)
-		}
-		dddType = typ.In(numIn - 1).Elem()
-	} else {
-		if len(args) != numIn {
-			return reflect.Value{}, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn)
-		}
-	}
-	argv := make([]reflect.Value, len(args))
-	for i, arg := range args {
-		arg = indirectInterface(arg)
-		// Compute the expected type. Clumsy because of variadics.
-		argType := dddType
-		if !typ.IsVariadic() || i < numIn-1 {
-			argType = typ.In(i)
-		}
-
-		var err error
-		if argv[i], err = prepareArg(arg, argType); err != nil {
-			return reflect.Value{}, fmt.Errorf("arg %d: %w", i, err)
-		}
-	}
-	return safeCall(fn, argv)
-}
-
-// safeCall runs fun.Call(args), and returns the resulting value and error, if
-// any. If the call panics, the panic value is returned as an error.
-func safeCall(fun reflect.Value, args []reflect.Value) (val reflect.Value, err error) {
-	defer func() {
-		if r := recover(); r != nil {
-			if e, ok := r.(error); ok {
-				err = e
-			} else {
-				err = fmt.Errorf("%v", r)
-			}
-		}
-	}()
-	ret := fun.Call(args)
-	if len(ret) == 2 && !ret[1].IsNil() {
-		return ret[0], ret[1].Interface().(error)
-	}
-	return ret[0], nil
-}
-
-// Boolean logic.
-
-func truth(arg reflect.Value) bool {
-	t, _ := isTrue(indirectInterface(arg))
-	return t
-}
-
-// and computes the Boolean AND of its arguments, returning
-// the first false argument it encounters, or the last argument.
-func and(arg0 reflect.Value, args ...reflect.Value) reflect.Value {
-	panic("unreachable") // implemented as a special case in evalCall
-}
-
-// or computes the Boolean OR of its arguments, returning
-// the first true argument it encounters, or the last argument.
-func or(arg0 reflect.Value, args ...reflect.Value) reflect.Value {
-	panic("unreachable") // implemented as a special case in evalCall
-}
-
-// not returns the Boolean negation of its argument.
-func not(arg reflect.Value) bool {
-	return !truth(arg)
-}
-
-// Comparison.
-
-// TODO: Perhaps allow comparison between signed and unsigned integers.
-
-var (
-	errBadComparisonType = errors.New("invalid type for comparison")
-	errBadComparison     = errors.New("incompatible types for comparison")
-	errNoComparison      = errors.New("missing argument for comparison")
-)
-
-type kind int
-
-const (
-	invalidKind kind = iota
-	boolKind
-	complexKind
-	intKind
-	floatKind
-	stringKind
-	uintKind
-)
-
-func basicKind(v reflect.Value) (kind, error) {
-	switch v.Kind() {
-	case reflect.Bool:
-		return boolKind, nil
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return intKind, nil
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		return uintKind, nil
-	case reflect.Float32, reflect.Float64:
-		return floatKind, nil
-	case reflect.Complex64, reflect.Complex128:
-		return complexKind, nil
-	case reflect.String:
-		return stringKind, nil
-	}
-	return invalidKind, errBadComparisonType
-}
-
-// eq evaluates the comparison a == b || a == c || ...
-func eq(arg1 reflect.Value, arg2 ...reflect.Value) (bool, error) {
-	arg1 = indirectInterface(arg1)
-	if arg1 != zero {
-		if t1 := arg1.Type(); !t1.Comparable() {
-			return false, fmt.Errorf("uncomparable type %s: %v", t1, arg1)
-		}
-	}
-	if len(arg2) == 0 {
-		return false, errNoComparison
-	}
-	k1, _ := basicKind(arg1)
-	for _, arg := range arg2 {
-		arg = indirectInterface(arg)
-		k2, _ := basicKind(arg)
-		truth := false
-		if k1 != k2 {
-			// Special case: Can compare integer values regardless of type's sign.
-			switch {
-			case k1 == intKind && k2 == uintKind:
-				truth = arg1.Int() >= 0 && uint64(arg1.Int()) == arg.Uint()
-			case k1 == uintKind && k2 == intKind:
-				truth = arg.Int() >= 0 && arg1.Uint() == uint64(arg.Int())
-			default:
-				if arg1 != zero && arg != zero {
-					return false, errBadComparison
-				}
-			}
-		} else {
-			switch k1 {
-			case boolKind:
-				truth = arg1.Bool() == arg.Bool()
-			case complexKind:
-				truth = arg1.Complex() == arg.Complex()
-			case floatKind:
-				truth = arg1.Float() == arg.Float()
-			case intKind:
-				truth = arg1.Int() == arg.Int()
-			case stringKind:
-				truth = arg1.String() == arg.String()
-			case uintKind:
-				truth = arg1.Uint() == arg.Uint()
-			default:
-				if arg == zero || arg1 == zero {
-					truth = arg1 == arg
-				} else {
-					if t2 := arg.Type(); !t2.Comparable() {
-						return false, fmt.Errorf("uncomparable type %s: %v", t2, arg)
-					}
-					truth = arg1.Interface() == arg.Interface()
-				}
-			}
-		}
-		if truth {
-			return true, nil
-		}
-	}
-	return false, nil
-}
-
-// ne evaluates the comparison a != b.
-func ne(arg1, arg2 reflect.Value) (bool, error) {
-	// != is the inverse of ==.
-	equal, err := eq(arg1, arg2)
-	return !equal, err
-}
-
-// lt evaluates the comparison a < b.
-func lt(arg1, arg2 reflect.Value) (bool, error) {
-	arg1 = indirectInterface(arg1)
-	k1, err := basicKind(arg1)
-	if err != nil {
-		return false, err
-	}
-	arg2 = indirectInterface(arg2)
-	k2, err := basicKind(arg2)
-	if err != nil {
-		return false, err
-	}
-	truth := false
-	if k1 != k2 {
-		// Special case: Can compare integer values regardless of type's sign.
-		switch {
-		case k1 == intKind && k2 == uintKind:
-			truth = arg1.Int() < 0 || uint64(arg1.Int()) < arg2.Uint()
-		case k1 == uintKind && k2 == intKind:
-			truth = arg2.Int() >= 0 && arg1.Uint() < uint64(arg2.Int())
-		default:
-			return false, errBadComparison
-		}
-	} else {
-		switch k1 {
-		case boolKind, complexKind:
-			return false, errBadComparisonType
-		case floatKind:
-			truth = arg1.Float() < arg2.Float()
-		case intKind:
-			truth = arg1.Int() < arg2.Int()
-		case stringKind:
-			truth = arg1.String() < arg2.String()
-		case uintKind:
-			truth = arg1.Uint() < arg2.Uint()
-		default:
-			panic("invalid kind")
-		}
-	}
-	return truth, nil
-}
-
-// le evaluates the comparison <= b.
-func le(arg1, arg2 reflect.Value) (bool, error) {
-	// <= is < or ==.
-	lessThan, err := lt(arg1, arg2)
-	if lessThan || err != nil {
-		return lessThan, err
-	}
-	return eq(arg1, arg2)
-}
-
-// gt evaluates the comparison a > b.
-func gt(arg1, arg2 reflect.Value) (bool, error) {
-	// > is the inverse of <=.
-	lessOrEqual, err := le(arg1, arg2)
-	if err != nil {
-		return false, err
-	}
-	return !lessOrEqual, nil
-}
-
-// ge evaluates the comparison a >= b.
-func ge(arg1, arg2 reflect.Value) (bool, error) {
-	// >= is the inverse of <.
-	lessThan, err := lt(arg1, arg2)
-	if err != nil {
-		return false, err
-	}
-	return !lessThan, nil
-}
-
-// HTML escaping.
-
-var (
-	htmlQuot = []byte("&#34;") // shorter than "&quot;"
-	htmlApos = []byte("&#39;") // shorter than "&apos;" and apos was not in HTML until HTML5
-	htmlAmp  = []byte("&amp;")
-	htmlLt   = []byte("&lt;")
-	htmlGt   = []byte("&gt;")
-	htmlNull = []byte("\uFFFD")
-)
-
-// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
-func HTMLEscape(w io.Writer, b []byte) {
-	last := 0
-	for i, c := range b {
-		var html []byte
-		switch c {
-		case '\000':
-			html = htmlNull
-		case '"':
-			html = htmlQuot
-		case '\'':
-			html = htmlApos
-		case '&':
-			html = htmlAmp
-		case '<':
-			html = htmlLt
-		case '>':
-			html = htmlGt
-		default:
-			continue
-		}
-		w.Write(b[last:i])
-		w.Write(html)
-		last = i + 1
-	}
-	w.Write(b[last:])
-}
-
-// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
-func HTMLEscapeString(s string) string {
-	// Avoid allocation if we can.
-	if !strings.ContainsAny(s, "'\"&<>\000") {
-		return s
-	}
-	var b bytes.Buffer
-	HTMLEscape(&b, []byte(s))
-	return b.String()
-}
-
-// HTMLEscaper returns the escaped HTML equivalent of the textual
-// representation of its arguments.
-func HTMLEscaper(args ...interface{}) string {
-	return HTMLEscapeString(evalArgs(args))
-}
-
-// JavaScript escaping.
-
-var (
-	jsLowUni = []byte(`\u00`)
-	hex      = []byte("0123456789ABCDEF")
-
-	jsBackslash = []byte(`\\`)
-	jsApos      = []byte(`\'`)
-	jsQuot      = []byte(`\"`)
-	jsLt        = []byte(`\u003C`)
-	jsGt        = []byte(`\u003E`)
-	jsAmp       = []byte(`\u0026`)
-	jsEq        = []byte(`\u003D`)
-)
-
-// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
-func JSEscape(w io.Writer, b []byte) {
-	last := 0
-	for i := 0; i < len(b); i++ {
-		c := b[i]
-
-		if !jsIsSpecial(rune(c)) {
-			// fast path: nothing to do
-			continue
-		}
-		w.Write(b[last:i])
-
-		if c < utf8.RuneSelf {
-			// Quotes, slashes and angle brackets get quoted.
-			// Control characters get written as \u00XX.
-			switch c {
-			case '\\':
-				w.Write(jsBackslash)
-			case '\'':
-				w.Write(jsApos)
-			case '"':
-				w.Write(jsQuot)
-			case '<':
-				w.Write(jsLt)
-			case '>':
-				w.Write(jsGt)
-			case '&':
-				w.Write(jsAmp)
-			case '=':
-				w.Write(jsEq)
-			default:
-				w.Write(jsLowUni)
-				t, b := c>>4, c&0x0f
-				w.Write(hex[t : t+1])
-				w.Write(hex[b : b+1])
-			}
-		} else {
-			// Unicode rune.
-			r, size := utf8.DecodeRune(b[i:])
-			if unicode.IsPrint(r) {
-				w.Write(b[i : i+size])
-			} else {
-				fmt.Fprintf(w, "\\u%04X", r)
-			}
-			i += size - 1
-		}
-		last = i + 1
-	}
-	w.Write(b[last:])
-}
-
-// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
-func JSEscapeString(s string) string {
-	// Avoid allocation if we can.
-	if strings.IndexFunc(s, jsIsSpecial) < 0 {
-		return s
-	}
-	var b bytes.Buffer
-	JSEscape(&b, []byte(s))
-	return b.String()
-}
-
-func jsIsSpecial(r rune) bool {
-	switch r {
-	case '\\', '\'', '"', '<', '>', '&', '=':
-		return true
-	}
-	return r < ' ' || utf8.RuneSelf <= r
-}
-
-// JSEscaper returns the escaped JavaScript equivalent of the textual
-// representation of its arguments.
-func JSEscaper(args ...interface{}) string {
-	return JSEscapeString(evalArgs(args))
-}
-
-// URLQueryEscaper returns the escaped value of the textual representation of
-// its arguments in a form suitable for embedding in a URL query.
-func URLQueryEscaper(args ...interface{}) string {
-	return url.QueryEscape(evalArgs(args))
-}
-
-// evalArgs formats the list of arguments into a string. It is therefore equivalent to
-//
-//	fmt.Sprint(args...)
-//
-// except that each argument is indirected (if a pointer), as required,
-// using the same rules as the default string evaluation during template
-// execution.
-func evalArgs(args []interface{}) string {
-	ok := false
-	var s string
-	// Fast path for simple common case.
-	if len(args) == 1 {
-		s, ok = args[0].(string)
-	}
-	if !ok {
-		for i, arg := range args {
-			a, ok := printableValue(reflect.ValueOf(arg))
-			if ok {
-				args[i] = a
-			} // else let fmt do its thing
-		}
-		s = fmt.Sprint(args...)
-	}
-	return s
-}
diff --git a/internal/backport/text/template/funcs.go.save b/internal/backport/text/template/funcs.go.save
deleted file mode 100644
index c3f6d47..0000000
--- a/internal/backport/text/template/funcs.go.save
+++ /dev/null
@@ -1,771 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
-	"bytes"
-	"errors"
-	"fmt"
-	"io"
-	"net/url"
-	"reflect"
-	"strings"
-	"sync"
-	"unicode"
-	"unicode/utf8"
-)
-
-// FuncMap is the type of the map defining the mapping from names to functions.
-// Each function must have either a single return value, or two return values of
-// which the second has type error. In that case, if the second (error)
-// return value evaluates to non-nil during execution, execution terminates and
-// Execute returns that error.
-//
-// Errors returned by Execute wrap the underlying error; call errors.As to
-// uncover them.
-//
-// When template execution invokes a function with an argument list, that list
-// must be assignable to the function's parameter types. Functions meant to
-// apply to arguments of arbitrary type can use parameters of type interface{} or
-// of type reflect.Value. Similarly, functions meant to return a result of arbitrary
-// type can return interface{} or reflect.Value.
-type FuncMap map[string]interface{}
-
-// builtins returns the FuncMap.
-// It is not a global variable so the linker can dead code eliminate
-// more when this isn't called. See golang.org/issue/36021.
-// TODO: revert this back to a global map once golang.org/issue/2559 is fixed.
-func builtins() FuncMap {
-	return FuncMap{
-		"and":      and,
-		"call":     call,
-		"html":     HTMLEscaper,
-		"index":    index,
-		"slice":    slice,
-		"js":       JSEscaper,
-		"len":      length,
-		"not":      not,
-		"or":       or,
-		"print":    fmt.Sprint,
-		"printf":   fmt.Sprintf,
-		"println":  fmt.Sprintln,
-		"urlquery": URLQueryEscaper,
-
-		// Comparisons
-		"eq": eq, // ==
-		"ge": ge, // >=
-		"gt": gt, // >
-		"le": le, // <=
-		"lt": lt, // <
-		"ne": ne, // !=
-	}
-}
-
-var builtinFuncsOnce struct {
-	sync.Once
-	v map[string]reflect.Value
-}
-
-// builtinFuncsOnce lazily computes & caches the builtinFuncs map.
-// TODO: revert this back to a global map once golang.org/issue/2559 is fixed.
-func builtinFuncs() map[string]reflect.Value {
-	builtinFuncsOnce.Do(func() {
-		builtinFuncsOnce.v = createValueFuncs(builtins())
-	})
-	return builtinFuncsOnce.v
-}
-
-// createValueFuncs turns a FuncMap into a map[string]reflect.Value
-func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
-	m := make(map[string]reflect.Value)
-	addValueFuncs(m, funcMap)
-	return m
-}
-
-// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
-func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
-	for name, fn := range in {
-		if !goodName(name) {
-			panic(fmt.Errorf("function name %q is not a valid identifier", name))
-		}
-		v := reflect.ValueOf(fn)
-		if v.Kind() != reflect.Func {
-			panic("value for " + name + " not a function")
-		}
-		if !goodFunc(v.Type()) {
-			panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut()))
-		}
-		out[name] = v
-	}
-}
-
-// addFuncs adds to values the functions in funcs. It does no checking of the input -
-// call addValueFuncs first.
-func addFuncs(out, in FuncMap) {
-	for name, fn := range in {
-		out[name] = fn
-	}
-}
-
-// goodFunc reports whether the function or method has the right result signature.
-func goodFunc(typ reflect.Type) bool {
-	// We allow functions with 1 result or 2 results where the second is an error.
-	switch {
-	case typ.NumOut() == 1:
-		return true
-	case typ.NumOut() == 2 && typ.Out(1) == errorType:
-		return true
-	}
-	return false
-}
-
-// goodName reports whether the function name is a valid identifier.
-func goodName(name string) bool {
-	if name == "" {
-		return false
-	}
-	for i, r := range name {
-		switch {
-		case r == '_':
-		case i == 0 && !unicode.IsLetter(r):
-			return false
-		case !unicode.IsLetter(r) && !unicode.IsDigit(r):
-			return false
-		}
-	}
-	return true
-}
-
-// findFunction looks for a function in the template, and global map.
-func findFunction(name string, tmpl *Template) (reflect.Value, bool) {
-	if tmpl != nil && tmpl.common != nil {
-		tmpl.muFuncs.RLock()
-		defer tmpl.muFuncs.RUnlock()
-		if fn := tmpl.execFuncs[name]; fn.IsValid() {
-			return fn, true
-		}
-	}
-	if fn := builtinFuncs()[name]; fn.IsValid() {
-		return fn, true
-	}
-	return reflect.Value{}, false
-}
-
-// prepareArg checks if value can be used as an argument of type argType, and
-// converts an invalid value to appropriate zero if possible.
-func prepareArg(value reflect.Value, argType reflect.Type) (reflect.Value, error) {
-	if !value.IsValid() {
-		if !canBeNil(argType) {
-			return reflect.Value{}, fmt.Errorf("value is nil; should be of type %s", argType)
-		}
-		value = reflect.Zero(argType)
-	}
-	if value.Type().AssignableTo(argType) {
-		return value, nil
-	}
-	if intLike(value.Kind()) && intLike(argType.Kind()) && value.Type().ConvertibleTo(argType) {
-		value = value.Convert(argType)
-		return value, nil
-	}
-	return reflect.Value{}, fmt.Errorf("value has type %s; should be %s", value.Type(), argType)
-}
-
-func intLike(typ reflect.Kind) bool {
-	switch typ {
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return true
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		return true
-	}
-	return false
-}
-
-// indexArg checks if a reflect.Value can be used as an index, and converts it to int if possible.
-func indexArg(index reflect.Value, cap int) (int, error) {
-	var x int64
-	switch index.Kind() {
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		x = index.Int()
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		x = int64(index.Uint())
-	case reflect.Invalid:
-		return 0, fmt.Errorf("cannot index slice/array with nil")
-	default:
-		return 0, fmt.Errorf("cannot index slice/array with type %s", index.Type())
-	}
-	if x < 0 || int(x) < 0 || int(x) > cap {
-		return 0, fmt.Errorf("index out of range: %d", x)
-	}
-	return int(x), nil
-}
-
-// Indexing.
-
-// index returns the result of indexing its first argument by the following
-// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
-// indexed item must be a map, slice, or array.
-func index(item reflect.Value, indexes ...reflect.Value) (reflect.Value, error) {
-	item = indirectInterface(item)
-	if !item.IsValid() {
-		return reflect.Value{}, fmt.Errorf("index of untyped nil")
-	}
-	for _, index := range indexes {
-		index = indirectInterface(index)
-		var isNil bool
-		if item, isNil = indirect(item); isNil {
-			return reflect.Value{}, fmt.Errorf("index of nil pointer")
-		}
-		switch item.Kind() {
-		case reflect.Array, reflect.Slice, reflect.String:
-			x, err := indexArg(index, item.Len())
-			if err != nil {
-				return reflect.Value{}, err
-			}
-			item = item.Index(x)
-		case reflect.Map:
-			index, err := prepareArg(index, item.Type().Key())
-			if err != nil {
-				return reflect.Value{}, err
-			}
-			if x := item.MapIndex(index); x.IsValid() {
-				item = x
-			} else {
-				item = reflect.Zero(item.Type().Elem())
-			}
-		case reflect.Invalid:
-			// the loop holds invariant: item.IsValid()
-			panic("unreachable")
-		default:
-			return reflect.Value{}, fmt.Errorf("can't index item of type %s", item.Type())
-		}
-	}
-	return item, nil
-}
-
-// Slicing.
-
-// slice returns the result of slicing its first argument by the remaining
-// arguments. Thus "slice x 1 2" is, in Go syntax, x[1:2], while "slice x"
-// is x[:], "slice x 1" is x[1:], and "slice x 1 2 3" is x[1:2:3]. The first
-// argument must be a string, slice, or array.
-func slice(item reflect.Value, indexes ...reflect.Value) (reflect.Value, error) {
-	item = indirectInterface(item)
-	if !item.IsValid() {
-		return reflect.Value{}, fmt.Errorf("slice of untyped nil")
-	}
-	if len(indexes) > 3 {
-		return reflect.Value{}, fmt.Errorf("too many slice indexes: %d", len(indexes))
-	}
-	var cap int
-	switch item.Kind() {
-	case reflect.String:
-		if len(indexes) == 3 {
-			return reflect.Value{}, fmt.Errorf("cannot 3-index slice a string")
-		}
-		cap = item.Len()
-	case reflect.Array, reflect.Slice:
-		cap = item.Cap()
-	default:
-		return reflect.Value{}, fmt.Errorf("can't slice item of type %s", item.Type())
-	}
-	// set default values for cases item[:], item[i:].
-	idx := [3]int{0, item.Len()}
-	for i, index := range indexes {
-		x, err := indexArg(index, cap)
-		if err != nil {
-			return reflect.Value{}, err
-		}
-		idx[i] = x
-	}
-	// given item[i:j], make sure i <= j.
-	if idx[0] > idx[1] {
-		return reflect.Value{}, fmt.Errorf("invalid slice index: %d > %d", idx[0], idx[1])
-	}
-	if len(indexes) < 3 {
-		return item.Slice(idx[0], idx[1]), nil
-	}
-	// given item[i:j:k], make sure i <= j <= k.
-	if idx[1] > idx[2] {
-		return reflect.Value{}, fmt.Errorf("invalid slice index: %d > %d", idx[1], idx[2])
-	}
-	return item.Slice3(idx[0], idx[1], idx[2]), nil
-}
-
-// Length
-
-// length returns the length of the item, with an error if it has no defined length.
-func length(item reflect.Value) (int, error) {
-	item, isNil := indirect(item)
-	if isNil {
-		return 0, fmt.Errorf("len of nil pointer")
-	}
-	switch item.Kind() {
-	case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
-		return item.Len(), nil
-	}
-	return 0, fmt.Errorf("len of type %s", item.Type())
-}
-
-// Function invocation
-
-// call returns the result of evaluating the first argument as a function.
-// The function must return 1 result, or 2 results, the second of which is an error.
-func call(fn reflect.Value, args ...reflect.Value) (reflect.Value, error) {
-	fn = indirectInterface(fn)
-	if !fn.IsValid() {
-		return reflect.Value{}, fmt.Errorf("call of nil")
-	}
-	typ := fn.Type()
-	if typ.Kind() != reflect.Func {
-		return reflect.Value{}, fmt.Errorf("non-function of type %s", typ)
-	}
-	if !goodFunc(typ) {
-		return reflect.Value{}, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut())
-	}
-	numIn := typ.NumIn()
-	var dddType reflect.Type
-	if typ.IsVariadic() {
-		if len(args) < numIn-1 {
-			return reflect.Value{}, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1)
-		}
-		dddType = typ.In(numIn - 1).Elem()
-	} else {
-		if len(args) != numIn {
-			return reflect.Value{}, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn)
-		}
-	}
-	argv := make([]reflect.Value, len(args))
-	for i, arg := range args {
-		arg = indirectInterface(arg)
-		// Compute the expected type. Clumsy because of variadics.
-		argType := dddType
-		if !typ.IsVariadic() || i < numIn-1 {
-			argType = typ.In(i)
-		}
-
-		var err error
-		if argv[i], err = prepareArg(arg, argType); err != nil {
-			return reflect.Value{}, fmt.Errorf("arg %d: %w", i, err)
-		}
-	}
-	return safeCall(fn, argv)
-}
-
-// safeCall runs fun.Call(args), and returns the resulting value and error, if
-// any. If the call panics, the panic value is returned as an error.
-func safeCall(fun reflect.Value, args []reflect.Value) (val reflect.Value, err error) {
-	defer func() {
-		if r := recover(); r != nil {
-			if e, ok := r.(error); ok {
-				err = e
-			} else {
-				err = fmt.Errorf("%v", r)
-			}
-		}
-	}()
-	ret := fun.Call(args)
-	if len(ret) == 2 && !ret[1].IsNil() {
-		return ret[0], ret[1].Interface().(error)
-	}
-	return ret[0], nil
-}
-
-// Boolean logic.
-
-func truth(arg reflect.Value) bool {
-	t, _ := isTrue(indirectInterface(arg))
-	return t
-}
-
-// and computes the Boolean AND of its arguments, returning
-// the first false argument it encounters, or the last argument.
-func and(arg0 reflect.Value, args ...reflect.Value) reflect.Value {
-	if !truth(arg0) {
-		return arg0
-	}
-	for i := range args {
-		arg0 = args[i]
-		if !truth(arg0) {
-			break
-		}
-	}
-	return arg0
-}
-
-// or computes the Boolean OR of its arguments, returning
-// the first true argument it encounters, or the last argument.
-func or(arg0 reflect.Value, args ...reflect.Value) reflect.Value {
-	if truth(arg0) {
-		return arg0
-	}
-	for i := range args {
-		arg0 = args[i]
-		if truth(arg0) {
-			break
-		}
-	}
-	return arg0
-}
-
-// not returns the Boolean negation of its argument.
-func not(arg reflect.Value) bool {
-	return !truth(arg)
-}
-
-// Comparison.
-
-// TODO: Perhaps allow comparison between signed and unsigned integers.
-
-var (
-	errBadComparisonType = errors.New("invalid type for comparison")
-	errBadComparison     = errors.New("incompatible types for comparison")
-	errNoComparison      = errors.New("missing argument for comparison")
-)
-
-type kind int
-
-const (
-	invalidKind kind = iota
-	boolKind
-	complexKind
-	intKind
-	floatKind
-	stringKind
-	uintKind
-)
-
-func basicKind(v reflect.Value) (kind, error) {
-	switch v.Kind() {
-	case reflect.Bool:
-		return boolKind, nil
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return intKind, nil
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		return uintKind, nil
-	case reflect.Float32, reflect.Float64:
-		return floatKind, nil
-	case reflect.Complex64, reflect.Complex128:
-		return complexKind, nil
-	case reflect.String:
-		return stringKind, nil
-	}
-	return invalidKind, errBadComparisonType
-}
-
-// eq evaluates the comparison a == b || a == c || ...
-func eq(arg1 reflect.Value, arg2 ...reflect.Value) (bool, error) {
-	arg1 = indirectInterface(arg1)
-	if arg1 != zero {
-		if t1 := arg1.Type(); !t1.Comparable() {
-			return false, fmt.Errorf("uncomparable type %s: %v", t1, arg1)
-		}
-	}
-	if len(arg2) == 0 {
-		return false, errNoComparison
-	}
-	k1, _ := basicKind(arg1)
-	for _, arg := range arg2 {
-		arg = indirectInterface(arg)
-		k2, _ := basicKind(arg)
-		truth := false
-		if k1 != k2 {
-			// Special case: Can compare integer values regardless of type's sign.
-			switch {
-			case k1 == intKind && k2 == uintKind:
-				truth = arg1.Int() >= 0 && uint64(arg1.Int()) == arg.Uint()
-			case k1 == uintKind && k2 == intKind:
-				truth = arg.Int() >= 0 && arg1.Uint() == uint64(arg.Int())
-			default:
-				if arg1 != zero && arg != zero {
-					return false, errBadComparison
-				}
-			}
-		} else {
-			switch k1 {
-			case boolKind:
-				truth = arg1.Bool() == arg.Bool()
-			case complexKind:
-				truth = arg1.Complex() == arg.Complex()
-			case floatKind:
-				truth = arg1.Float() == arg.Float()
-			case intKind:
-				truth = arg1.Int() == arg.Int()
-			case stringKind:
-				truth = arg1.String() == arg.String()
-			case uintKind:
-				truth = arg1.Uint() == arg.Uint()
-			default:
-				if arg == zero {
-					truth = arg1 == arg
-				} else {
-					if t2 := arg.Type(); !t2.Comparable() {
-						return false, fmt.Errorf("uncomparable type %s: %v", t2, arg)
-					}
-					truth = arg1.Interface() == arg.Interface()
-				}
-			}
-		}
-		if truth {
-			return true, nil
-		}
-	}
-	return false, nil
-}
-
-// ne evaluates the comparison a != b.
-func ne(arg1, arg2 reflect.Value) (bool, error) {
-	// != is the inverse of ==.
-	equal, err := eq(arg1, arg2)
-	return !equal, err
-}
-
-// lt evaluates the comparison a < b.
-func lt(arg1, arg2 reflect.Value) (bool, error) {
-	arg1 = indirectInterface(arg1)
-	k1, err := basicKind(arg1)
-	if err != nil {
-		return false, err
-	}
-	arg2 = indirectInterface(arg2)
-	k2, err := basicKind(arg2)
-	if err != nil {
-		return false, err
-	}
-	truth := false
-	if k1 != k2 {
-		// Special case: Can compare integer values regardless of type's sign.
-		switch {
-		case k1 == intKind && k2 == uintKind:
-			truth = arg1.Int() < 0 || uint64(arg1.Int()) < arg2.Uint()
-		case k1 == uintKind && k2 == intKind:
-			truth = arg2.Int() >= 0 && arg1.Uint() < uint64(arg2.Int())
-		default:
-			return false, errBadComparison
-		}
-	} else {
-		switch k1 {
-		case boolKind, complexKind:
-			return false, errBadComparisonType
-		case floatKind:
-			truth = arg1.Float() < arg2.Float()
-		case intKind:
-			truth = arg1.Int() < arg2.Int()
-		case stringKind:
-			truth = arg1.String() < arg2.String()
-		case uintKind:
-			truth = arg1.Uint() < arg2.Uint()
-		default:
-			panic("invalid kind")
-		}
-	}
-	return truth, nil
-}
-
-// le evaluates the comparison <= b.
-func le(arg1, arg2 reflect.Value) (bool, error) {
-	// <= is < or ==.
-	lessThan, err := lt(arg1, arg2)
-	if lessThan || err != nil {
-		return lessThan, err
-	}
-	return eq(arg1, arg2)
-}
-
-// gt evaluates the comparison a > b.
-func gt(arg1, arg2 reflect.Value) (bool, error) {
-	// > is the inverse of <=.
-	lessOrEqual, err := le(arg1, arg2)
-	if err != nil {
-		return false, err
-	}
-	return !lessOrEqual, nil
-}
-
-// ge evaluates the comparison a >= b.
-func ge(arg1, arg2 reflect.Value) (bool, error) {
-	// >= is the inverse of <.
-	lessThan, err := lt(arg1, arg2)
-	if err != nil {
-		return false, err
-	}
-	return !lessThan, nil
-}
-
-// HTML escaping.
-
-var (
-	htmlQuot = []byte("&#34;") // shorter than "&quot;"
-	htmlApos = []byte("&#39;") // shorter than "&apos;" and apos was not in HTML until HTML5
-	htmlAmp  = []byte("&amp;")
-	htmlLt   = []byte("&lt;")
-	htmlGt   = []byte("&gt;")
-	htmlNull = []byte("\uFFFD")
-)
-
-// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
-func HTMLEscape(w io.Writer, b []byte) {
-	last := 0
-	for i, c := range b {
-		var html []byte
-		switch c {
-		case '\000':
-			html = htmlNull
-		case '"':
-			html = htmlQuot
-		case '\'':
-			html = htmlApos
-		case '&':
-			html = htmlAmp
-		case '<':
-			html = htmlLt
-		case '>':
-			html = htmlGt
-		default:
-			continue
-		}
-		w.Write(b[last:i])
-		w.Write(html)
-		last = i + 1
-	}
-	w.Write(b[last:])
-}
-
-// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
-func HTMLEscapeString(s string) string {
-	// Avoid allocation if we can.
-	if !strings.ContainsAny(s, "'\"&<>\000") {
-		return s
-	}
-	var b bytes.Buffer
-	HTMLEscape(&b, []byte(s))
-	return b.String()
-}
-
-// HTMLEscaper returns the escaped HTML equivalent of the textual
-// representation of its arguments.
-func HTMLEscaper(args ...interface{}) string {
-	return HTMLEscapeString(evalArgs(args))
-}
-
-// JavaScript escaping.
-
-var (
-	jsLowUni = []byte(`\u00`)
-	hex      = []byte("0123456789ABCDEF")
-
-	jsBackslash = []byte(`\\`)
-	jsApos      = []byte(`\'`)
-	jsQuot      = []byte(`\"`)
-	jsLt        = []byte(`\u003C`)
-	jsGt        = []byte(`\u003E`)
-	jsAmp       = []byte(`\u0026`)
-	jsEq        = []byte(`\u003D`)
-)
-
-// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
-func JSEscape(w io.Writer, b []byte) {
-	last := 0
-	for i := 0; i < len(b); i++ {
-		c := b[i]
-
-		if !jsIsSpecial(rune(c)) {
-			// fast path: nothing to do
-			continue
-		}
-		w.Write(b[last:i])
-
-		if c < utf8.RuneSelf {
-			// Quotes, slashes and angle brackets get quoted.
-			// Control characters get written as \u00XX.
-			switch c {
-			case '\\':
-				w.Write(jsBackslash)
-			case '\'':
-				w.Write(jsApos)
-			case '"':
-				w.Write(jsQuot)
-			case '<':
-				w.Write(jsLt)
-			case '>':
-				w.Write(jsGt)
-			case '&':
-				w.Write(jsAmp)
-			case '=':
-				w.Write(jsEq)
-			default:
-				w.Write(jsLowUni)
-				t, b := c>>4, c&0x0f
-				w.Write(hex[t : t+1])
-				w.Write(hex[b : b+1])
-			}
-		} else {
-			// Unicode rune.
-			r, size := utf8.DecodeRune(b[i:])
-			if unicode.IsPrint(r) {
-				w.Write(b[i : i+size])
-			} else {
-				fmt.Fprintf(w, "\\u%04X", r)
-			}
-			i += size - 1
-		}
-		last = i + 1
-	}
-	w.Write(b[last:])
-}
-
-// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
-func JSEscapeString(s string) string {
-	// Avoid allocation if we can.
-	if strings.IndexFunc(s, jsIsSpecial) < 0 {
-		return s
-	}
-	var b bytes.Buffer
-	JSEscape(&b, []byte(s))
-	return b.String()
-}
-
-func jsIsSpecial(r rune) bool {
-	switch r {
-	case '\\', '\'', '"', '<', '>', '&', '=':
-		return true
-	}
-	return r < ' ' || utf8.RuneSelf <= r
-}
-
-// JSEscaper returns the escaped JavaScript equivalent of the textual
-// representation of its arguments.
-func JSEscaper(args ...interface{}) string {
-	return JSEscapeString(evalArgs(args))
-}
-
-// URLQueryEscaper returns the escaped value of the textual representation of
-// its arguments in a form suitable for embedding in a URL query.
-func URLQueryEscaper(args ...interface{}) string {
-	return url.QueryEscape(evalArgs(args))
-}
-
-// evalArgs formats the list of arguments into a string. It is therefore equivalent to
-//	fmt.Sprint(args...)
-// except that each argument is indirected (if a pointer), as required,
-// using the same rules as the default string evaluation during template
-// execution.
-func evalArgs(args []interface{}) string {
-	ok := false
-	var s string
-	// Fast path for simple common case.
-	if len(args) == 1 {
-		s, ok = args[0].(string)
-	}
-	if !ok {
-		for i, arg := range args {
-			a, ok := printableValue(reflect.ValueOf(arg))
-			if ok {
-				args[i] = a
-			} // else let fmt do its thing
-		}
-		s = fmt.Sprint(args...)
-	}
-	return s
-}
diff --git a/internal/backport/text/template/helper.go b/internal/backport/text/template/helper.go
deleted file mode 100644
index 481527a..0000000
--- a/internal/backport/text/template/helper.go
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Helper functions to make constructing templates easier.
-
-package template
-
-import (
-	"fmt"
-	"io/fs"
-	"io/ioutil"
-	"path"
-	"path/filepath"
-)
-
-// Functions and methods to parse templates.
-
-// Must is a helper that wraps a call to a function returning (*Template, error)
-// and panics if the error is non-nil. It is intended for use in variable
-// initializations such as
-//
-//	var t = template.Must(template.New("name").Parse("text"))
-func Must(t *Template, err error) *Template {
-	if err != nil {
-		panic(err)
-	}
-	return t
-}
-
-// ParseFiles creates a new Template and parses the template definitions from
-// the named files. The returned template's name will have the base name and
-// parsed contents of the first file. There must be at least one file.
-// If an error occurs, parsing stops and the returned *Template is nil.
-//
-// When parsing multiple files with the same name in different directories,
-// the last one mentioned will be the one that results.
-// For instance, ParseFiles("a/foo", "b/foo") stores "b/foo" as the template
-// named "foo", while "a/foo" is unavailable.
-func ParseFiles(filenames ...string) (*Template, error) {
-	return parseFiles(nil, readFileOS, filenames...)
-}
-
-// ParseFiles parses the named files and associates the resulting templates with
-// t. If an error occurs, parsing stops and the returned template is nil;
-// otherwise it is t. There must be at least one file.
-// Since the templates created by ParseFiles are named by the base
-// names of the argument files, t should usually have the name of one
-// of the (base) names of the files. If it does not, depending on t's
-// contents before calling ParseFiles, t.Execute may fail. In that
-// case use t.ExecuteTemplate to execute a valid template.
-//
-// When parsing multiple files with the same name in different directories,
-// the last one mentioned will be the one that results.
-func (t *Template) ParseFiles(filenames ...string) (*Template, error) {
-	t.init()
-	return parseFiles(t, readFileOS, filenames...)
-}
-
-// parseFiles is the helper for the method and function. If the argument
-// template is nil, it is created from the first file.
-func parseFiles(t *Template, readFile func(string) (string, []byte, error), filenames ...string) (*Template, error) {
-	if len(filenames) == 0 {
-		// Not really a problem, but be consistent.
-		return nil, fmt.Errorf("template: no files named in call to ParseFiles")
-	}
-	for _, filename := range filenames {
-		name, b, err := readFile(filename)
-		if err != nil {
-			return nil, err
-		}
-		s := string(b)
-		// First template becomes return value if not already defined,
-		// and we use that one for subsequent New calls to associate
-		// all the templates together. Also, if this file has the same name
-		// as t, this file becomes the contents of t, so
-		//  t, err := New(name).Funcs(xxx).ParseFiles(name)
-		// works. Otherwise we create a new template associated with t.
-		var tmpl *Template
-		if t == nil {
-			t = New(name)
-		}
-		if name == t.Name() {
-			tmpl = t
-		} else {
-			tmpl = t.New(name)
-		}
-		_, err = tmpl.Parse(s)
-		if err != nil {
-			return nil, err
-		}
-	}
-	return t, nil
-}
-
-// ParseGlob creates a new Template and parses the template definitions from
-// the files identified by the pattern. The files are matched according to the
-// semantics of filepath.Match, and the pattern must match at least one file.
-// The returned template will have the (base) name and (parsed) contents of the
-// first file matched by the pattern. ParseGlob is equivalent to calling
-// ParseFiles with the list of files matched by the pattern.
-//
-// When parsing multiple files with the same name in different directories,
-// the last one mentioned will be the one that results.
-func ParseGlob(pattern string) (*Template, error) {
-	return parseGlob(nil, pattern)
-}
-
-// ParseGlob parses the template definitions in the files identified by the
-// pattern and associates the resulting templates with t. The files are matched
-// according to the semantics of filepath.Match, and the pattern must match at
-// least one file. ParseGlob is equivalent to calling t.ParseFiles with the
-// list of files matched by the pattern.
-//
-// When parsing multiple files with the same name in different directories,
-// the last one mentioned will be the one that results.
-func (t *Template) ParseGlob(pattern string) (*Template, error) {
-	t.init()
-	return parseGlob(t, pattern)
-}
-
-// parseGlob is the implementation of the function and method ParseGlob.
-func parseGlob(t *Template, pattern string) (*Template, error) {
-	filenames, err := filepath.Glob(pattern)
-	if err != nil {
-		return nil, err
-	}
-	if len(filenames) == 0 {
-		return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
-	}
-	return parseFiles(t, readFileOS, filenames...)
-}
-
-// ParseFS is like ParseFiles or ParseGlob but reads from the file system fsys
-// instead of the host operating system's file system.
-// It accepts a list of glob patterns.
-// (Note that most file names serve as glob patterns matching only themselves.)
-func ParseFS(fsys fs.FS, patterns ...string) (*Template, error) {
-	return parseFS(nil, fsys, patterns)
-}
-
-// ParseFS is like ParseFiles or ParseGlob but reads from the file system fsys
-// instead of the host operating system's file system.
-// It accepts a list of glob patterns.
-// (Note that most file names serve as glob patterns matching only themselves.)
-func (t *Template) ParseFS(fsys fs.FS, patterns ...string) (*Template, error) {
-	t.init()
-	return parseFS(t, fsys, patterns)
-}
-
-func parseFS(t *Template, fsys fs.FS, patterns []string) (*Template, error) {
-	var filenames []string
-	for _, pattern := range patterns {
-		list, err := fs.Glob(fsys, pattern)
-		if err != nil {
-			return nil, err
-		}
-		if len(list) == 0 {
-			return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
-		}
-		filenames = append(filenames, list...)
-	}
-	return parseFiles(t, readFileFS(fsys), filenames...)
-}
-
-func readFileOS(file string) (name string, b []byte, err error) {
-	name = filepath.Base(file)
-	b, err = ioutil.ReadFile(file)
-	return
-}
-
-func readFileFS(fsys fs.FS) func(string) (string, []byte, error) {
-	return func(file string) (name string, b []byte, err error) {
-		name = path.Base(file)
-		b, err = fs.ReadFile(fsys, file)
-		return
-	}
-}
diff --git a/internal/backport/text/template/multi_test.go b/internal/backport/text/template/multi_test.go
deleted file mode 100644
index 6ce4408..0000000
--- a/internal/backport/text/template/multi_test.go
+++ /dev/null
@@ -1,455 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-// Tests for multiple-template parsing and execution.
-
-import (
-	"bytes"
-	"fmt"
-	"os"
-	"testing"
-
-	"golang.org/x/website/internal/backport/text/template/parse"
-)
-
-const (
-	noError  = true
-	hasError = false
-)
-
-type multiParseTest struct {
-	name    string
-	input   string
-	ok      bool
-	names   []string
-	results []string
-}
-
-var multiParseTests = []multiParseTest{
-	{"empty", "", noError,
-		nil,
-		nil},
-	{"one", `{{define "foo"}} FOO {{end}}`, noError,
-		[]string{"foo"},
-		[]string{" FOO "}},
-	{"two", `{{define "foo"}} FOO {{end}}{{define "bar"}} BAR {{end}}`, noError,
-		[]string{"foo", "bar"},
-		[]string{" FOO ", " BAR "}},
-	// errors
-	{"missing end", `{{define "foo"}} FOO `, hasError,
-		nil,
-		nil},
-	{"malformed name", `{{define "foo}} FOO `, hasError,
-		nil,
-		nil},
-}
-
-func TestMultiParse(t *testing.T) {
-	for _, test := range multiParseTests {
-		template, err := New("root").Parse(test.input)
-		switch {
-		case err == nil && !test.ok:
-			t.Errorf("%q: expected error; got none", test.name)
-			continue
-		case err != nil && test.ok:
-			t.Errorf("%q: unexpected error: %v", test.name, err)
-			continue
-		case err != nil && !test.ok:
-			// expected error, got one
-			if *debug {
-				fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err)
-			}
-			continue
-		}
-		if template == nil {
-			continue
-		}
-		if len(template.tmpl) != len(test.names)+1 { // +1 for root
-			t.Errorf("%s: wrong number of templates; wanted %d got %d", test.name, len(test.names), len(template.tmpl))
-			continue
-		}
-		for i, name := range test.names {
-			tmpl, ok := template.tmpl[name]
-			if !ok {
-				t.Errorf("%s: can't find template %q", test.name, name)
-				continue
-			}
-			result := tmpl.Root.String()
-			if result != test.results[i] {
-				t.Errorf("%s=(%q): got\n\t%v\nexpected\n\t%v", test.name, test.input, result, test.results[i])
-			}
-		}
-	}
-}
-
-var multiExecTests = []execTest{
-	{"empty", "", "", nil, true},
-	{"text", "some text", "some text", nil, true},
-	{"invoke x", `{{template "x" .SI}}`, "TEXT", tVal, true},
-	{"invoke x no args", `{{template "x"}}`, "TEXT", tVal, true},
-	{"invoke dot int", `{{template "dot" .I}}`, "17", tVal, true},
-	{"invoke dot []int", `{{template "dot" .SI}}`, "[3 4 5]", tVal, true},
-	{"invoke dotV", `{{template "dotV" .U}}`, "v", tVal, true},
-	{"invoke nested int", `{{template "nested" .I}}`, "17", tVal, true},
-	{"variable declared by template", `{{template "nested" $x:=.SI}},{{index $x 1}}`, "[3 4 5],4", tVal, true},
-
-	// User-defined function: test argument evaluator.
-	{"testFunc literal", `{{oneArg "joe"}}`, "oneArg=joe", tVal, true},
-	{"testFunc .", `{{oneArg .}}`, "oneArg=joe", "joe", true},
-}
-
-// These strings are also in testdata/*.
-const multiText1 = `
-	{{define "x"}}TEXT{{end}}
-	{{define "dotV"}}{{.V}}{{end}}
-`
-
-const multiText2 = `
-	{{define "dot"}}{{.}}{{end}}
-	{{define "nested"}}{{template "dot" .}}{{end}}
-`
-
-func TestMultiExecute(t *testing.T) {
-	// Declare a couple of templates first.
-	template, err := New("root").Parse(multiText1)
-	if err != nil {
-		t.Fatalf("parse error for 1: %s", err)
-	}
-	_, err = template.Parse(multiText2)
-	if err != nil {
-		t.Fatalf("parse error for 2: %s", err)
-	}
-	testExecute(multiExecTests, template, t)
-}
-
-func TestParseFiles(t *testing.T) {
-	_, err := ParseFiles("DOES NOT EXIST")
-	if err == nil {
-		t.Error("expected error for non-existent file; got none")
-	}
-	template := New("root")
-	_, err = template.ParseFiles("testdata/file1.tmpl", "testdata/file2.tmpl")
-	if err != nil {
-		t.Fatalf("error parsing files: %v", err)
-	}
-	testExecute(multiExecTests, template, t)
-}
-
-func TestParseGlob(t *testing.T) {
-	_, err := ParseGlob("DOES NOT EXIST")
-	if err == nil {
-		t.Error("expected error for non-existent file; got none")
-	}
-	_, err = New("error").ParseGlob("[x")
-	if err == nil {
-		t.Error("expected error for bad pattern; got none")
-	}
-	template := New("root")
-	_, err = template.ParseGlob("testdata/file*.tmpl")
-	if err != nil {
-		t.Fatalf("error parsing files: %v", err)
-	}
-	testExecute(multiExecTests, template, t)
-}
-
-func TestParseFS(t *testing.T) {
-	fs := os.DirFS("testdata")
-
-	{
-		_, err := ParseFS(fs, "DOES NOT EXIST")
-		if err == nil {
-			t.Error("expected error for non-existent file; got none")
-		}
-	}
-
-	{
-		template := New("root")
-		_, err := template.ParseFS(fs, "file1.tmpl", "file2.tmpl")
-		if err != nil {
-			t.Fatalf("error parsing files: %v", err)
-		}
-		testExecute(multiExecTests, template, t)
-	}
-
-	{
-		template := New("root")
-		_, err := template.ParseFS(fs, "file*.tmpl")
-		if err != nil {
-			t.Fatalf("error parsing files: %v", err)
-		}
-		testExecute(multiExecTests, template, t)
-	}
-}
-
-// In these tests, actual content (not just template definitions) comes from the parsed files.
-
-var templateFileExecTests = []execTest{
-	{"test", `{{template "tmpl1.tmpl"}}{{template "tmpl2.tmpl"}}`, "template1\n\ny\ntemplate2\n\nx\n", 0, true},
-}
-
-func TestParseFilesWithData(t *testing.T) {
-	template, err := New("root").ParseFiles("testdata/tmpl1.tmpl", "testdata/tmpl2.tmpl")
-	if err != nil {
-		t.Fatalf("error parsing files: %v", err)
-	}
-	testExecute(templateFileExecTests, template, t)
-}
-
-func TestParseGlobWithData(t *testing.T) {
-	template, err := New("root").ParseGlob("testdata/tmpl*.tmpl")
-	if err != nil {
-		t.Fatalf("error parsing files: %v", err)
-	}
-	testExecute(templateFileExecTests, template, t)
-}
-
-const (
-	cloneText1 = `{{define "a"}}{{template "b"}}{{template "c"}}{{end}}`
-	cloneText2 = `{{define "b"}}b{{end}}`
-	cloneText3 = `{{define "c"}}root{{end}}`
-	cloneText4 = `{{define "c"}}clone{{end}}`
-)
-
-func TestClone(t *testing.T) {
-	// Create some templates and clone the root.
-	root, err := New("root").Parse(cloneText1)
-	if err != nil {
-		t.Fatal(err)
-	}
-	_, err = root.Parse(cloneText2)
-	if err != nil {
-		t.Fatal(err)
-	}
-	clone := Must(root.Clone())
-	// Add variants to both.
-	_, err = root.Parse(cloneText3)
-	if err != nil {
-		t.Fatal(err)
-	}
-	_, err = clone.Parse(cloneText4)
-	if err != nil {
-		t.Fatal(err)
-	}
-	// Verify that the clone is self-consistent.
-	for k, v := range clone.tmpl {
-		if k == clone.name && v.tmpl[k] != clone {
-			t.Error("clone does not contain root")
-		}
-		if v != v.tmpl[v.name] {
-			t.Errorf("clone does not contain self for %q", k)
-		}
-	}
-	// Execute root.
-	var b bytes.Buffer
-	err = root.ExecuteTemplate(&b, "a", 0)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if b.String() != "broot" {
-		t.Errorf("expected %q got %q", "broot", b.String())
-	}
-	// Execute copy.
-	b.Reset()
-	err = clone.ExecuteTemplate(&b, "a", 0)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if b.String() != "bclone" {
-		t.Errorf("expected %q got %q", "bclone", b.String())
-	}
-}
-
-func TestAddParseTree(t *testing.T) {
-	// Create some templates.
-	root, err := New("root").Parse(cloneText1)
-	if err != nil {
-		t.Fatal(err)
-	}
-	_, err = root.Parse(cloneText2)
-	if err != nil {
-		t.Fatal(err)
-	}
-	// Add a new parse tree.
-	tree, err := parse.Parse("cloneText3", cloneText3, "", "", nil, builtins())
-	if err != nil {
-		t.Fatal(err)
-	}
-	added, err := root.AddParseTree("c", tree["c"])
-	if err != nil {
-		t.Fatal(err)
-	}
-	// Execute.
-	var b bytes.Buffer
-	err = added.ExecuteTemplate(&b, "a", 0)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if b.String() != "broot" {
-		t.Errorf("expected %q got %q", "broot", b.String())
-	}
-}
-
-// Issue 7032
-func TestAddParseTreeToUnparsedTemplate(t *testing.T) {
-	master := "{{define \"master\"}}{{end}}"
-	tmpl := New("master")
-	tree, err := parse.Parse("master", master, "", "", nil)
-	if err != nil {
-		t.Fatalf("unexpected parse err: %v", err)
-	}
-	masterTree := tree["master"]
-	tmpl.AddParseTree("master", masterTree) // used to panic
-}
-
-func TestRedefinition(t *testing.T) {
-	var tmpl *Template
-	var err error
-	if tmpl, err = New("tmpl1").Parse(`{{define "test"}}foo{{end}}`); err != nil {
-		t.Fatalf("parse 1: %v", err)
-	}
-	if _, err = tmpl.Parse(`{{define "test"}}bar{{end}}`); err != nil {
-		t.Fatalf("got error %v, expected nil", err)
-	}
-	if _, err = tmpl.New("tmpl2").Parse(`{{define "test"}}bar{{end}}`); err != nil {
-		t.Fatalf("got error %v, expected nil", err)
-	}
-}
-
-// Issue 10879
-func TestEmptyTemplateCloneCrash(t *testing.T) {
-	t1 := New("base")
-	t1.Clone() // used to panic
-}
-
-// Issue 10910, 10926
-func TestTemplateLookUp(t *testing.T) {
-	t1 := New("foo")
-	if t1.Lookup("foo") != nil {
-		t.Error("Lookup returned non-nil value for undefined template foo")
-	}
-	t1.New("bar")
-	if t1.Lookup("bar") != nil {
-		t.Error("Lookup returned non-nil value for undefined template bar")
-	}
-	t1.Parse(`{{define "foo"}}test{{end}}`)
-	if t1.Lookup("foo") == nil {
-		t.Error("Lookup returned nil value for defined template")
-	}
-}
-
-func TestNew(t *testing.T) {
-	// template with same name already exists
-	t1, _ := New("test").Parse(`{{define "test"}}foo{{end}}`)
-	t2 := t1.New("test")
-
-	if t1.common != t2.common {
-		t.Errorf("t1 & t2 didn't share common struct; got %v != %v", t1.common, t2.common)
-	}
-	if t1.Tree == nil {
-		t.Error("defined template got nil Tree")
-	}
-	if t2.Tree != nil {
-		t.Error("undefined template got non-nil Tree")
-	}
-
-	containsT1 := false
-	for _, tmpl := range t1.Templates() {
-		if tmpl == t2 {
-			t.Error("Templates included undefined template")
-		}
-		if tmpl == t1 {
-			containsT1 = true
-		}
-	}
-	if !containsT1 {
-		t.Error("Templates didn't include defined template")
-	}
-}
-
-func TestParse(t *testing.T) {
-	// In multiple calls to Parse with the same receiver template, only one call
-	// can contain text other than space, comments, and template definitions
-	t1 := New("test")
-	if _, err := t1.Parse(`{{define "test"}}{{end}}`); err != nil {
-		t.Fatalf("parsing test: %s", err)
-	}
-	if _, err := t1.Parse(`{{define "test"}}{{/* this is a comment */}}{{end}}`); err != nil {
-		t.Fatalf("parsing test: %s", err)
-	}
-	if _, err := t1.Parse(`{{define "test"}}foo{{end}}`); err != nil {
-		t.Fatalf("parsing test: %s", err)
-	}
-}
-
-func TestEmptyTemplate(t *testing.T) {
-	cases := []struct {
-		defn []string
-		in   string
-		want string
-	}{
-		{[]string{"x", "y"}, "", "y"},
-		{[]string{""}, "once", ""},
-		{[]string{"", ""}, "twice", ""},
-		{[]string{"{{.}}", "{{.}}"}, "twice", "twice"},
-		{[]string{"{{/* a comment */}}", "{{/* a comment */}}"}, "comment", ""},
-		{[]string{"{{.}}", ""}, "twice", ""},
-	}
-
-	for i, c := range cases {
-		root := New("root")
-
-		var (
-			m   *Template
-			err error
-		)
-		for _, d := range c.defn {
-			m, err = root.New(c.in).Parse(d)
-			if err != nil {
-				t.Fatal(err)
-			}
-		}
-		buf := &bytes.Buffer{}
-		if err := m.Execute(buf, c.in); err != nil {
-			t.Error(i, err)
-			continue
-		}
-		if buf.String() != c.want {
-			t.Errorf("expected string %q: got %q", c.want, buf.String())
-		}
-	}
-}
-
-// Issue 19249 was a regression in 1.8 caused by the handling of empty
-// templates added in that release, which got different answers depending
-// on the order templates appeared in the internal map.
-func TestIssue19294(t *testing.T) {
-	// The empty block in "xhtml" should be replaced during execution
-	// by the contents of "stylesheet", but if the internal map associating
-	// names with templates is built in the wrong order, the empty block
-	// looks non-empty and this doesn't happen.
-	var inlined = map[string]string{
-		"stylesheet": `{{define "stylesheet"}}stylesheet{{end}}`,
-		"xhtml":      `{{block "stylesheet" .}}{{end}}`,
-	}
-	all := []string{"stylesheet", "xhtml"}
-	for i := 0; i < 100; i++ {
-		res, err := New("title.xhtml").Parse(`{{template "xhtml" .}}`)
-		if err != nil {
-			t.Fatal(err)
-		}
-		for _, name := range all {
-			_, err := res.New(name).Parse(inlined[name])
-			if err != nil {
-				t.Fatal(err)
-			}
-		}
-		var buf bytes.Buffer
-		res.Execute(&buf, 0)
-		if buf.String() != "stylesheet" {
-			t.Fatalf("iteration %d: got %q; expected %q", i, buf.String(), "stylesheet")
-		}
-	}
-}
diff --git a/internal/backport/text/template/option.go b/internal/backport/text/template/option.go
deleted file mode 100644
index f2c6681..0000000
--- a/internal/backport/text/template/option.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file contains the code to handle template options.
-
-package template
-
-import "strings"
-
-// missingKeyAction defines how to respond to indexing a map with a key that is not present.
-type missingKeyAction int
-
-const (
-	mapInvalid   missingKeyAction = iota // Return an invalid reflect.Value.
-	mapZeroValue                         // Return the zero value for the map element.
-	mapError                             // Error out
-)
-
-type option struct {
-	missingKey missingKeyAction
-}
-
-// Option sets options for the template. Options are described by
-// strings, either a simple string or "key=value". There can be at
-// most one equals sign in an option string. If the option string
-// is unrecognized or otherwise invalid, Option panics.
-//
-// Known options:
-//
-// missingkey: Control the behavior during execution if a map is
-// indexed with a key that is not present in the map.
-//
-//	"missingkey=default" or "missingkey=invalid"
-//		The default behavior: Do nothing and continue execution.
-//		If printed, the result of the index operation is the string
-//		"<no value>".
-//	"missingkey=zero"
-//		The operation returns the zero value for the map type's element.
-//	"missingkey=error"
-//		Execution stops immediately with an error.
-func (t *Template) Option(opt ...string) *Template {
-	t.init()
-	for _, s := range opt {
-		t.setOption(s)
-	}
-	return t
-}
-
-func (t *Template) setOption(opt string) {
-	if opt == "" {
-		panic("empty option string")
-	}
-	elems := strings.Split(opt, "=")
-	switch len(elems) {
-	case 2:
-		// key=value
-		switch elems[0] {
-		case "missingkey":
-			switch elems[1] {
-			case "invalid", "default":
-				t.option.missingKey = mapInvalid
-				return
-			case "zero":
-				t.option.missingKey = mapZeroValue
-				return
-			case "error":
-				t.option.missingKey = mapError
-				return
-			}
-		}
-	}
-	panic("unrecognized option: " + opt)
-}
diff --git a/internal/backport/text/template/parse/lex.go b/internal/backport/text/template/parse/lex.go
deleted file mode 100644
index 95e3377..0000000
--- a/internal/backport/text/template/parse/lex.go
+++ /dev/null
@@ -1,682 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package parse
-
-import (
-	"fmt"
-	"strings"
-	"unicode"
-	"unicode/utf8"
-)
-
-// item represents a token or text string returned from the scanner.
-type item struct {
-	typ  itemType // The type of this item.
-	pos  Pos      // The starting position, in bytes, of this item in the input string.
-	val  string   // The value of this item.
-	line int      // The line number at the start of this item.
-}
-
-func (i item) String() string {
-	switch {
-	case i.typ == itemEOF:
-		return "EOF"
-	case i.typ == itemError:
-		return i.val
-	case i.typ > itemKeyword:
-		return fmt.Sprintf("<%s>", i.val)
-	case len(i.val) > 10:
-		return fmt.Sprintf("%.10q...", i.val)
-	}
-	return fmt.Sprintf("%q", i.val)
-}
-
-// itemType identifies the type of lex items.
-type itemType int
-
-const (
-	itemError        itemType = iota // error occurred; value is text of error
-	itemBool                         // boolean constant
-	itemChar                         // printable ASCII character; grab bag for comma etc.
-	itemCharConstant                 // character constant
-	itemComment                      // comment text
-	itemComplex                      // complex constant (1+2i); imaginary is just a number
-	itemAssign                       // equals ('=') introducing an assignment
-	itemDeclare                      // colon-equals (':=') introducing a declaration
-	itemEOF
-	itemField      // alphanumeric identifier starting with '.'
-	itemIdentifier // alphanumeric identifier not starting with '.'
-	itemLeftDelim  // left action delimiter
-	itemLeftParen  // '(' inside action
-	itemNumber     // simple number, including imaginary
-	itemPipe       // pipe symbol
-	itemRawString  // raw quoted string (includes quotes)
-	itemRightDelim // right action delimiter
-	itemRightParen // ')' inside action
-	itemSpace      // run of spaces separating arguments
-	itemString     // quoted string (includes quotes)
-	itemText       // plain text
-	itemVariable   // variable starting with '$', such as '$' or  '$1' or '$hello'
-	// Keywords appear after all the rest.
-	itemKeyword  // used only to delimit the keywords
-	itemBlock    // block keyword
-	itemBreak    // break keyword
-	itemContinue // continue keyword
-	itemDot      // the cursor, spelled '.'
-	itemDefine   // define keyword
-	itemElse     // else keyword
-	itemEnd      // end keyword
-	itemIf       // if keyword
-	itemNil      // the untyped nil constant, easiest to treat as a keyword
-	itemRange    // range keyword
-	itemTemplate // template keyword
-	itemWith     // with keyword
-)
-
-var key = map[string]itemType{
-	".":        itemDot,
-	"block":    itemBlock,
-	"break":    itemBreak,
-	"continue": itemContinue,
-	"define":   itemDefine,
-	"else":     itemElse,
-	"end":      itemEnd,
-	"if":       itemIf,
-	"range":    itemRange,
-	"nil":      itemNil,
-	"template": itemTemplate,
-	"with":     itemWith,
-}
-
-const eof = -1
-
-// Trimming spaces.
-// If the action begins "{{- " rather than "{{", then all space/tab/newlines
-// preceding the action are trimmed; conversely if it ends " -}}" the
-// leading spaces are trimmed. This is done entirely in the lexer; the
-// parser never sees it happen. We require an ASCII space (' ', \t, \r, \n)
-// to be present to avoid ambiguity with things like "{{-3}}". It reads
-// better with the space present anyway. For simplicity, only ASCII
-// does the job.
-const (
-	spaceChars    = " \t\r\n"  // These are the space characters defined by Go itself.
-	trimMarker    = '-'        // Attached to left/right delimiter, trims trailing spaces from preceding/following text.
-	trimMarkerLen = Pos(1 + 1) // marker plus space before or after
-)
-
-// stateFn represents the state of the scanner as a function that returns the next state.
-type stateFn func(*lexer) stateFn
-
-// lexer holds the state of the scanner.
-type lexer struct {
-	name        string    // the name of the input; used only for error reports
-	input       string    // the string being scanned
-	leftDelim   string    // start of action
-	rightDelim  string    // end of action
-	emitComment bool      // emit itemComment tokens.
-	pos         Pos       // current position in the input
-	start       Pos       // start position of this item
-	width       Pos       // width of last rune read from input
-	items       chan item // channel of scanned items
-	parenDepth  int       // nesting depth of ( ) exprs
-	line        int       // 1+number of newlines seen
-	startLine   int       // start line of this item
-	breakOK     bool      // break keyword allowed
-	continueOK  bool      // continue keyword allowed
-}
-
-// next returns the next rune in the input.
-func (l *lexer) next() rune {
-	if int(l.pos) >= len(l.input) {
-		l.width = 0
-		return eof
-	}
-	r, w := utf8.DecodeRuneInString(l.input[l.pos:])
-	l.width = Pos(w)
-	l.pos += l.width
-	if r == '\n' {
-		l.line++
-	}
-	return r
-}
-
-// peek returns but does not consume the next rune in the input.
-func (l *lexer) peek() rune {
-	r := l.next()
-	l.backup()
-	return r
-}
-
-// backup steps back one rune. Can only be called once per call of next.
-func (l *lexer) backup() {
-	l.pos -= l.width
-	// Correct newline count.
-	if l.width == 1 && l.input[l.pos] == '\n' {
-		l.line--
-	}
-}
-
-// emit passes an item back to the client.
-func (l *lexer) emit(t itemType) {
-	l.items <- item{t, l.start, l.input[l.start:l.pos], l.startLine}
-	l.start = l.pos
-	l.startLine = l.line
-}
-
-// ignore skips over the pending input before this point.
-func (l *lexer) ignore() {
-	l.line += strings.Count(l.input[l.start:l.pos], "\n")
-	l.start = l.pos
-	l.startLine = l.line
-}
-
-// accept consumes the next rune if it's from the valid set.
-func (l *lexer) accept(valid string) bool {
-	if strings.ContainsRune(valid, l.next()) {
-		return true
-	}
-	l.backup()
-	return false
-}
-
-// acceptRun consumes a run of runes from the valid set.
-func (l *lexer) acceptRun(valid string) {
-	for strings.ContainsRune(valid, l.next()) {
-	}
-	l.backup()
-}
-
-// errorf returns an error token and terminates the scan by passing
-// back a nil pointer that will be the next state, terminating l.nextItem.
-func (l *lexer) errorf(format string, args ...interface{}) stateFn {
-	l.items <- item{itemError, l.start, fmt.Sprintf(format, args...), l.startLine}
-	return nil
-}
-
-// nextItem returns the next item from the input.
-// Called by the parser, not in the lexing goroutine.
-func (l *lexer) nextItem() item {
-	return <-l.items
-}
-
-// drain drains the output so the lexing goroutine will exit.
-// Called by the parser, not in the lexing goroutine.
-func (l *lexer) drain() {
-	for range l.items {
-	}
-}
-
-// lex creates a new scanner for the input string.
-func lex(name, input, left, right string, emitComment bool) *lexer {
-	if left == "" {
-		left = leftDelim
-	}
-	if right == "" {
-		right = rightDelim
-	}
-	l := &lexer{
-		name:        name,
-		input:       input,
-		leftDelim:   left,
-		rightDelim:  right,
-		emitComment: emitComment,
-		items:       make(chan item),
-		line:        1,
-		startLine:   1,
-	}
-	go l.run()
-	return l
-}
-
-// run runs the state machine for the lexer.
-func (l *lexer) run() {
-	for state := lexText; state != nil; {
-		state = state(l)
-	}
-	close(l.items)
-}
-
-// state functions
-
-const (
-	leftDelim    = "{{"
-	rightDelim   = "}}"
-	leftComment  = "/*"
-	rightComment = "*/"
-)
-
-// lexText scans until an opening action delimiter, "{{".
-func lexText(l *lexer) stateFn {
-	l.width = 0
-	if x := strings.Index(l.input[l.pos:], l.leftDelim); x >= 0 {
-		ldn := Pos(len(l.leftDelim))
-		l.pos += Pos(x)
-		trimLength := Pos(0)
-		if hasLeftTrimMarker(l.input[l.pos+ldn:]) {
-			trimLength = rightTrimLength(l.input[l.start:l.pos])
-		}
-		l.pos -= trimLength
-		if l.pos > l.start {
-			l.line += strings.Count(l.input[l.start:l.pos], "\n")
-			l.emit(itemText)
-		}
-		l.pos += trimLength
-		l.ignore()
-		return lexLeftDelim
-	}
-	l.pos = Pos(len(l.input))
-	// Correctly reached EOF.
-	if l.pos > l.start {
-		l.line += strings.Count(l.input[l.start:l.pos], "\n")
-		l.emit(itemText)
-	}
-	l.emit(itemEOF)
-	return nil
-}
-
-// rightTrimLength returns the length of the spaces at the end of the string.
-func rightTrimLength(s string) Pos {
-	return Pos(len(s) - len(strings.TrimRight(s, spaceChars)))
-}
-
-// atRightDelim reports whether the lexer is at a right delimiter, possibly preceded by a trim marker.
-func (l *lexer) atRightDelim() (delim, trimSpaces bool) {
-	if hasRightTrimMarker(l.input[l.pos:]) && strings.HasPrefix(l.input[l.pos+trimMarkerLen:], l.rightDelim) { // With trim marker.
-		return true, true
-	}
-	if strings.HasPrefix(l.input[l.pos:], l.rightDelim) { // Without trim marker.
-		return true, false
-	}
-	return false, false
-}
-
-// leftTrimLength returns the length of the spaces at the beginning of the string.
-func leftTrimLength(s string) Pos {
-	return Pos(len(s) - len(strings.TrimLeft(s, spaceChars)))
-}
-
-// lexLeftDelim scans the left delimiter, which is known to be present, possibly with a trim marker.
-func lexLeftDelim(l *lexer) stateFn {
-	l.pos += Pos(len(l.leftDelim))
-	trimSpace := hasLeftTrimMarker(l.input[l.pos:])
-	afterMarker := Pos(0)
-	if trimSpace {
-		afterMarker = trimMarkerLen
-	}
-	if strings.HasPrefix(l.input[l.pos+afterMarker:], leftComment) {
-		l.pos += afterMarker
-		l.ignore()
-		return lexComment
-	}
-	l.emit(itemLeftDelim)
-	l.pos += afterMarker
-	l.ignore()
-	l.parenDepth = 0
-	return lexInsideAction
-}
-
-// lexComment scans a comment. The left comment marker is known to be present.
-func lexComment(l *lexer) stateFn {
-	l.pos += Pos(len(leftComment))
-	i := strings.Index(l.input[l.pos:], rightComment)
-	if i < 0 {
-		return l.errorf("unclosed comment")
-	}
-	l.pos += Pos(i + len(rightComment))
-	delim, trimSpace := l.atRightDelim()
-	if !delim {
-		return l.errorf("comment ends before closing delimiter")
-	}
-	if l.emitComment {
-		l.emit(itemComment)
-	}
-	if trimSpace {
-		l.pos += trimMarkerLen
-	}
-	l.pos += Pos(len(l.rightDelim))
-	if trimSpace {
-		l.pos += leftTrimLength(l.input[l.pos:])
-	}
-	l.ignore()
-	return lexText
-}
-
-// lexRightDelim scans the right delimiter, which is known to be present, possibly with a trim marker.
-func lexRightDelim(l *lexer) stateFn {
-	trimSpace := hasRightTrimMarker(l.input[l.pos:])
-	if trimSpace {
-		l.pos += trimMarkerLen
-		l.ignore()
-	}
-	l.pos += Pos(len(l.rightDelim))
-	l.emit(itemRightDelim)
-	if trimSpace {
-		l.pos += leftTrimLength(l.input[l.pos:])
-		l.ignore()
-	}
-	return lexText
-}
-
-// lexInsideAction scans the elements inside action delimiters.
-func lexInsideAction(l *lexer) stateFn {
-	// Either number, quoted string, or identifier.
-	// Spaces separate arguments; runs of spaces turn into itemSpace.
-	// Pipe symbols separate and are emitted.
-	delim, _ := l.atRightDelim()
-	if delim {
-		if l.parenDepth == 0 {
-			return lexRightDelim
-		}
-		return l.errorf("unclosed left paren")
-	}
-	switch r := l.next(); {
-	case r == eof:
-		return l.errorf("unclosed action")
-	case isSpace(r):
-		l.backup() // Put space back in case we have " -}}".
-		return lexSpace
-	case r == '=':
-		l.emit(itemAssign)
-	case r == ':':
-		if l.next() != '=' {
-			return l.errorf("expected :=")
-		}
-		l.emit(itemDeclare)
-	case r == '|':
-		l.emit(itemPipe)
-	case r == '"':
-		return lexQuote
-	case r == '`':
-		return lexRawQuote
-	case r == '$':
-		return lexVariable
-	case r == '\'':
-		return lexChar
-	case r == '.':
-		// special look-ahead for ".field" so we don't break l.backup().
-		if l.pos < Pos(len(l.input)) {
-			r := l.input[l.pos]
-			if r < '0' || '9' < r {
-				return lexField
-			}
-		}
-		fallthrough // '.' can start a number.
-	case r == '+' || r == '-' || ('0' <= r && r <= '9'):
-		l.backup()
-		return lexNumber
-	case isAlphaNumeric(r):
-		l.backup()
-		return lexIdentifier
-	case r == '(':
-		l.emit(itemLeftParen)
-		l.parenDepth++
-	case r == ')':
-		l.emit(itemRightParen)
-		l.parenDepth--
-		if l.parenDepth < 0 {
-			return l.errorf("unexpected right paren %#U", r)
-		}
-	case r <= unicode.MaxASCII && unicode.IsPrint(r):
-		l.emit(itemChar)
-	default:
-		return l.errorf("unrecognized character in action: %#U", r)
-	}
-	return lexInsideAction
-}
-
-// lexSpace scans a run of space characters.
-// We have not consumed the first space, which is known to be present.
-// Take care if there is a trim-marked right delimiter, which starts with a space.
-func lexSpace(l *lexer) stateFn {
-	var r rune
-	var numSpaces int
-	for {
-		r = l.peek()
-		if !isSpace(r) {
-			break
-		}
-		l.next()
-		numSpaces++
-	}
-	// Be careful about a trim-marked closing delimiter, which has a minus
-	// after a space. We know there is a space, so check for the '-' that might follow.
-	if hasRightTrimMarker(l.input[l.pos-1:]) && strings.HasPrefix(l.input[l.pos-1+trimMarkerLen:], l.rightDelim) {
-		l.backup() // Before the space.
-		if numSpaces == 1 {
-			return lexRightDelim // On the delim, so go right to that.
-		}
-	}
-	l.emit(itemSpace)
-	return lexInsideAction
-}
-
-// lexIdentifier scans an alphanumeric.
-func lexIdentifier(l *lexer) stateFn {
-Loop:
-	for {
-		switch r := l.next(); {
-		case isAlphaNumeric(r):
-			// absorb.
-		default:
-			l.backup()
-			word := l.input[l.start:l.pos]
-			if !l.atTerminator() {
-				return l.errorf("bad character %#U", r)
-			}
-			switch {
-			case key[word] > itemKeyword:
-				item := key[word]
-				if item == itemBreak && !l.breakOK || item == itemContinue && !l.continueOK {
-					l.emit(itemIdentifier)
-				} else {
-					l.emit(item)
-				}
-			case word[0] == '.':
-				l.emit(itemField)
-			case word == "true", word == "false":
-				l.emit(itemBool)
-			default:
-				l.emit(itemIdentifier)
-			}
-			break Loop
-		}
-	}
-	return lexInsideAction
-}
-
-// lexField scans a field: .Alphanumeric.
-// The . has been scanned.
-func lexField(l *lexer) stateFn {
-	return lexFieldOrVariable(l, itemField)
-}
-
-// lexVariable scans a Variable: $Alphanumeric.
-// The $ has been scanned.
-func lexVariable(l *lexer) stateFn {
-	if l.atTerminator() { // Nothing interesting follows -> "$".
-		l.emit(itemVariable)
-		return lexInsideAction
-	}
-	return lexFieldOrVariable(l, itemVariable)
-}
-
-// lexVariable scans a field or variable: [.$]Alphanumeric.
-// The . or $ has been scanned.
-func lexFieldOrVariable(l *lexer, typ itemType) stateFn {
-	if l.atTerminator() { // Nothing interesting follows -> "." or "$".
-		if typ == itemVariable {
-			l.emit(itemVariable)
-		} else {
-			l.emit(itemDot)
-		}
-		return lexInsideAction
-	}
-	var r rune
-	for {
-		r = l.next()
-		if !isAlphaNumeric(r) {
-			l.backup()
-			break
-		}
-	}
-	if !l.atTerminator() {
-		return l.errorf("bad character %#U", r)
-	}
-	l.emit(typ)
-	return lexInsideAction
-}
-
-// atTerminator reports whether the input is at valid termination character to
-// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases
-// like "$x+2" not being acceptable without a space, in case we decide one
-// day to implement arithmetic.
-func (l *lexer) atTerminator() bool {
-	r := l.peek()
-	if isSpace(r) {
-		return true
-	}
-	switch r {
-	case eof, '.', ',', '|', ':', ')', '(':
-		return true
-	}
-	// Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will
-	// succeed but should fail) but only in extremely rare cases caused by willfully
-	// bad choice of delimiter.
-	if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r {
-		return true
-	}
-	return false
-}
-
-// lexChar scans a character constant. The initial quote is already
-// scanned. Syntax checking is done by the parser.
-func lexChar(l *lexer) stateFn {
-Loop:
-	for {
-		switch l.next() {
-		case '\\':
-			if r := l.next(); r != eof && r != '\n' {
-				break
-			}
-			fallthrough
-		case eof, '\n':
-			return l.errorf("unterminated character constant")
-		case '\'':
-			break Loop
-		}
-	}
-	l.emit(itemCharConstant)
-	return lexInsideAction
-}
-
-// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
-// isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
-// and "089" - but when it's wrong the input is invalid and the parser (via
-// strconv) will notice.
-func lexNumber(l *lexer) stateFn {
-	if !l.scanNumber() {
-		return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
-	}
-	if sign := l.peek(); sign == '+' || sign == '-' {
-		// Complex: 1+2i. No spaces, must end in 'i'.
-		if !l.scanNumber() || l.input[l.pos-1] != 'i' {
-			return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
-		}
-		l.emit(itemComplex)
-	} else {
-		l.emit(itemNumber)
-	}
-	return lexInsideAction
-}
-
-func (l *lexer) scanNumber() bool {
-	// Optional leading sign.
-	l.accept("+-")
-	// Is it hex?
-	digits := "0123456789_"
-	if l.accept("0") {
-		// Note: Leading 0 does not mean octal in floats.
-		if l.accept("xX") {
-			digits = "0123456789abcdefABCDEF_"
-		} else if l.accept("oO") {
-			digits = "01234567_"
-		} else if l.accept("bB") {
-			digits = "01_"
-		}
-	}
-	l.acceptRun(digits)
-	if l.accept(".") {
-		l.acceptRun(digits)
-	}
-	if len(digits) == 10+1 && l.accept("eE") {
-		l.accept("+-")
-		l.acceptRun("0123456789_")
-	}
-	if len(digits) == 16+6+1 && l.accept("pP") {
-		l.accept("+-")
-		l.acceptRun("0123456789_")
-	}
-	// Is it imaginary?
-	l.accept("i")
-	// Next thing mustn't be alphanumeric.
-	if isAlphaNumeric(l.peek()) {
-		l.next()
-		return false
-	}
-	return true
-}
-
-// lexQuote scans a quoted string.
-func lexQuote(l *lexer) stateFn {
-Loop:
-	for {
-		switch l.next() {
-		case '\\':
-			if r := l.next(); r != eof && r != '\n' {
-				break
-			}
-			fallthrough
-		case eof, '\n':
-			return l.errorf("unterminated quoted string")
-		case '"':
-			break Loop
-		}
-	}
-	l.emit(itemString)
-	return lexInsideAction
-}
-
-// lexRawQuote scans a raw quoted string.
-func lexRawQuote(l *lexer) stateFn {
-Loop:
-	for {
-		switch l.next() {
-		case eof:
-			return l.errorf("unterminated raw quoted string")
-		case '`':
-			break Loop
-		}
-	}
-	l.emit(itemRawString)
-	return lexInsideAction
-}
-
-// isSpace reports whether r is a space character.
-func isSpace(r rune) bool {
-	return r == ' ' || r == '\t' || r == '\r' || r == '\n'
-}
-
-// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
-func isAlphaNumeric(r rune) bool {
-	return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
-}
-
-func hasLeftTrimMarker(s string) bool {
-	return len(s) >= 2 && s[0] == trimMarker && isSpace(rune(s[1]))
-}
-
-func hasRightTrimMarker(s string) bool {
-	return len(s) >= 2 && isSpace(rune(s[0])) && s[1] == trimMarker
-}
diff --git a/internal/backport/text/template/parse/lex_test.go b/internal/backport/text/template/parse/lex_test.go
deleted file mode 100644
index df6aabf..0000000
--- a/internal/backport/text/template/parse/lex_test.go
+++ /dev/null
@@ -1,559 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package parse
-
-import (
-	"fmt"
-	"testing"
-)
-
-// Make the types prettyprint.
-var itemName = map[itemType]string{
-	itemError:        "error",
-	itemBool:         "bool",
-	itemChar:         "char",
-	itemCharConstant: "charconst",
-	itemComment:      "comment",
-	itemComplex:      "complex",
-	itemDeclare:      ":=",
-	itemEOF:          "EOF",
-	itemField:        "field",
-	itemIdentifier:   "identifier",
-	itemLeftDelim:    "left delim",
-	itemLeftParen:    "(",
-	itemNumber:       "number",
-	itemPipe:         "pipe",
-	itemRawString:    "raw string",
-	itemRightDelim:   "right delim",
-	itemRightParen:   ")",
-	itemSpace:        "space",
-	itemString:       "string",
-	itemVariable:     "variable",
-
-	// keywords
-	itemDot:      ".",
-	itemBlock:    "block",
-	itemBreak:    "break",
-	itemContinue: "continue",
-	itemDefine:   "define",
-	itemElse:     "else",
-	itemIf:       "if",
-	itemEnd:      "end",
-	itemNil:      "nil",
-	itemRange:    "range",
-	itemTemplate: "template",
-	itemWith:     "with",
-}
-
-func (i itemType) String() string {
-	s := itemName[i]
-	if s == "" {
-		return fmt.Sprintf("item%d", int(i))
-	}
-	return s
-}
-
-type lexTest struct {
-	name  string
-	input string
-	items []item
-}
-
-func mkItem(typ itemType, text string) item {
-	return item{
-		typ: typ,
-		val: text,
-	}
-}
-
-var (
-	tDot        = mkItem(itemDot, ".")
-	tBlock      = mkItem(itemBlock, "block")
-	tEOF        = mkItem(itemEOF, "")
-	tFor        = mkItem(itemIdentifier, "for")
-	tLeft       = mkItem(itemLeftDelim, "{{")
-	tLpar       = mkItem(itemLeftParen, "(")
-	tPipe       = mkItem(itemPipe, "|")
-	tQuote      = mkItem(itemString, `"abc \n\t\" "`)
-	tRange      = mkItem(itemRange, "range")
-	tRight      = mkItem(itemRightDelim, "}}")
-	tRpar       = mkItem(itemRightParen, ")")
-	tSpace      = mkItem(itemSpace, " ")
-	raw         = "`" + `abc\n\t\" ` + "`"
-	rawNL       = "`now is{{\n}}the time`" // Contains newline inside raw quote.
-	tRawQuote   = mkItem(itemRawString, raw)
-	tRawQuoteNL = mkItem(itemRawString, rawNL)
-)
-
-var lexTests = []lexTest{
-	{"empty", "", []item{tEOF}},
-	{"spaces", " \t\n", []item{mkItem(itemText, " \t\n"), tEOF}},
-	{"text", `now is the time`, []item{mkItem(itemText, "now is the time"), tEOF}},
-	{"text with comment", "hello-{{/* this is a comment */}}-world", []item{
-		mkItem(itemText, "hello-"),
-		mkItem(itemComment, "/* this is a comment */"),
-		mkItem(itemText, "-world"),
-		tEOF,
-	}},
-	{"punctuation", "{{,@% }}", []item{
-		tLeft,
-		mkItem(itemChar, ","),
-		mkItem(itemChar, "@"),
-		mkItem(itemChar, "%"),
-		tSpace,
-		tRight,
-		tEOF,
-	}},
-	{"parens", "{{((3))}}", []item{
-		tLeft,
-		tLpar,
-		tLpar,
-		mkItem(itemNumber, "3"),
-		tRpar,
-		tRpar,
-		tRight,
-		tEOF,
-	}},
-	{"empty action", `{{}}`, []item{tLeft, tRight, tEOF}},
-	{"for", `{{for}}`, []item{tLeft, tFor, tRight, tEOF}},
-	{"block", `{{block "foo" .}}`, []item{
-		tLeft, tBlock, tSpace, mkItem(itemString, `"foo"`), tSpace, tDot, tRight, tEOF,
-	}},
-	{"quote", `{{"abc \n\t\" "}}`, []item{tLeft, tQuote, tRight, tEOF}},
-	{"raw quote", "{{" + raw + "}}", []item{tLeft, tRawQuote, tRight, tEOF}},
-	{"raw quote with newline", "{{" + rawNL + "}}", []item{tLeft, tRawQuoteNL, tRight, tEOF}},
-	{"numbers", "{{1 02 0x14 0X14 -7.2i 1e3 1E3 +1.2e-4 4.2i 1+2i 1_2 0x1.e_fp4 0X1.E_FP4}}", []item{
-		tLeft,
-		mkItem(itemNumber, "1"),
-		tSpace,
-		mkItem(itemNumber, "02"),
-		tSpace,
-		mkItem(itemNumber, "0x14"),
-		tSpace,
-		mkItem(itemNumber, "0X14"),
-		tSpace,
-		mkItem(itemNumber, "-7.2i"),
-		tSpace,
-		mkItem(itemNumber, "1e3"),
-		tSpace,
-		mkItem(itemNumber, "1E3"),
-		tSpace,
-		mkItem(itemNumber, "+1.2e-4"),
-		tSpace,
-		mkItem(itemNumber, "4.2i"),
-		tSpace,
-		mkItem(itemComplex, "1+2i"),
-		tSpace,
-		mkItem(itemNumber, "1_2"),
-		tSpace,
-		mkItem(itemNumber, "0x1.e_fp4"),
-		tSpace,
-		mkItem(itemNumber, "0X1.E_FP4"),
-		tRight,
-		tEOF,
-	}},
-	{"characters", `{{'a' '\n' '\'' '\\' '\u00FF' '\xFF' '本'}}`, []item{
-		tLeft,
-		mkItem(itemCharConstant, `'a'`),
-		tSpace,
-		mkItem(itemCharConstant, `'\n'`),
-		tSpace,
-		mkItem(itemCharConstant, `'\''`),
-		tSpace,
-		mkItem(itemCharConstant, `'\\'`),
-		tSpace,
-		mkItem(itemCharConstant, `'\u00FF'`),
-		tSpace,
-		mkItem(itemCharConstant, `'\xFF'`),
-		tSpace,
-		mkItem(itemCharConstant, `'本'`),
-		tRight,
-		tEOF,
-	}},
-	{"bools", "{{true false}}", []item{
-		tLeft,
-		mkItem(itemBool, "true"),
-		tSpace,
-		mkItem(itemBool, "false"),
-		tRight,
-		tEOF,
-	}},
-	{"dot", "{{.}}", []item{
-		tLeft,
-		tDot,
-		tRight,
-		tEOF,
-	}},
-	{"nil", "{{nil}}", []item{
-		tLeft,
-		mkItem(itemNil, "nil"),
-		tRight,
-		tEOF,
-	}},
-	{"dots", "{{.x . .2 .x.y.z}}", []item{
-		tLeft,
-		mkItem(itemField, ".x"),
-		tSpace,
-		tDot,
-		tSpace,
-		mkItem(itemNumber, ".2"),
-		tSpace,
-		mkItem(itemField, ".x"),
-		mkItem(itemField, ".y"),
-		mkItem(itemField, ".z"),
-		tRight,
-		tEOF,
-	}},
-	{"keywords", "{{range if else end with}}", []item{
-		tLeft,
-		mkItem(itemRange, "range"),
-		tSpace,
-		mkItem(itemIf, "if"),
-		tSpace,
-		mkItem(itemElse, "else"),
-		tSpace,
-		mkItem(itemEnd, "end"),
-		tSpace,
-		mkItem(itemWith, "with"),
-		tRight,
-		tEOF,
-	}},
-	{"variables", "{{$c := printf $ $hello $23 $ $var.Field .Method}}", []item{
-		tLeft,
-		mkItem(itemVariable, "$c"),
-		tSpace,
-		mkItem(itemDeclare, ":="),
-		tSpace,
-		mkItem(itemIdentifier, "printf"),
-		tSpace,
-		mkItem(itemVariable, "$"),
-		tSpace,
-		mkItem(itemVariable, "$hello"),
-		tSpace,
-		mkItem(itemVariable, "$23"),
-		tSpace,
-		mkItem(itemVariable, "$"),
-		tSpace,
-		mkItem(itemVariable, "$var"),
-		mkItem(itemField, ".Field"),
-		tSpace,
-		mkItem(itemField, ".Method"),
-		tRight,
-		tEOF,
-	}},
-	{"variable invocation", "{{$x 23}}", []item{
-		tLeft,
-		mkItem(itemVariable, "$x"),
-		tSpace,
-		mkItem(itemNumber, "23"),
-		tRight,
-		tEOF,
-	}},
-	{"pipeline", `intro {{echo hi 1.2 |noargs|args 1 "hi"}} outro`, []item{
-		mkItem(itemText, "intro "),
-		tLeft,
-		mkItem(itemIdentifier, "echo"),
-		tSpace,
-		mkItem(itemIdentifier, "hi"),
-		tSpace,
-		mkItem(itemNumber, "1.2"),
-		tSpace,
-		tPipe,
-		mkItem(itemIdentifier, "noargs"),
-		tPipe,
-		mkItem(itemIdentifier, "args"),
-		tSpace,
-		mkItem(itemNumber, "1"),
-		tSpace,
-		mkItem(itemString, `"hi"`),
-		tRight,
-		mkItem(itemText, " outro"),
-		tEOF,
-	}},
-	{"declaration", "{{$v := 3}}", []item{
-		tLeft,
-		mkItem(itemVariable, "$v"),
-		tSpace,
-		mkItem(itemDeclare, ":="),
-		tSpace,
-		mkItem(itemNumber, "3"),
-		tRight,
-		tEOF,
-	}},
-	{"2 declarations", "{{$v , $w := 3}}", []item{
-		tLeft,
-		mkItem(itemVariable, "$v"),
-		tSpace,
-		mkItem(itemChar, ","),
-		tSpace,
-		mkItem(itemVariable, "$w"),
-		tSpace,
-		mkItem(itemDeclare, ":="),
-		tSpace,
-		mkItem(itemNumber, "3"),
-		tRight,
-		tEOF,
-	}},
-	{"field of parenthesized expression", "{{(.X).Y}}", []item{
-		tLeft,
-		tLpar,
-		mkItem(itemField, ".X"),
-		tRpar,
-		mkItem(itemField, ".Y"),
-		tRight,
-		tEOF,
-	}},
-	{"trimming spaces before and after", "hello- {{- 3 -}} -world", []item{
-		mkItem(itemText, "hello-"),
-		tLeft,
-		mkItem(itemNumber, "3"),
-		tRight,
-		mkItem(itemText, "-world"),
-		tEOF,
-	}},
-	{"trimming spaces before and after comment", "hello- {{- /* hello */ -}} -world", []item{
-		mkItem(itemText, "hello-"),
-		mkItem(itemComment, "/* hello */"),
-		mkItem(itemText, "-world"),
-		tEOF,
-	}},
-	// errors
-	{"badchar", "#{{\x01}}", []item{
-		mkItem(itemText, "#"),
-		tLeft,
-		mkItem(itemError, "unrecognized character in action: U+0001"),
-	}},
-	{"unclosed action", "{{", []item{
-		tLeft,
-		mkItem(itemError, "unclosed action"),
-	}},
-	{"EOF in action", "{{range", []item{
-		tLeft,
-		tRange,
-		mkItem(itemError, "unclosed action"),
-	}},
-	{"unclosed quote", "{{\"\n\"}}", []item{
-		tLeft,
-		mkItem(itemError, "unterminated quoted string"),
-	}},
-	{"unclosed raw quote", "{{`xx}}", []item{
-		tLeft,
-		mkItem(itemError, "unterminated raw quoted string"),
-	}},
-	{"unclosed char constant", "{{'\n}}", []item{
-		tLeft,
-		mkItem(itemError, "unterminated character constant"),
-	}},
-	{"bad number", "{{3k}}", []item{
-		tLeft,
-		mkItem(itemError, `bad number syntax: "3k"`),
-	}},
-	{"unclosed paren", "{{(3}}", []item{
-		tLeft,
-		tLpar,
-		mkItem(itemNumber, "3"),
-		mkItem(itemError, `unclosed left paren`),
-	}},
-	{"extra right paren", "{{3)}}", []item{
-		tLeft,
-		mkItem(itemNumber, "3"),
-		tRpar,
-		mkItem(itemError, `unexpected right paren U+0029 ')'`),
-	}},
-
-	// Fixed bugs
-	// Many elements in an action blew the lookahead until
-	// we made lexInsideAction not loop.
-	{"long pipeline deadlock", "{{|||||}}", []item{
-		tLeft,
-		tPipe,
-		tPipe,
-		tPipe,
-		tPipe,
-		tPipe,
-		tRight,
-		tEOF,
-	}},
-	{"text with bad comment", "hello-{{/*/}}-world", []item{
-		mkItem(itemText, "hello-"),
-		mkItem(itemError, `unclosed comment`),
-	}},
-	{"text with comment close separated from delim", "hello-{{/* */ }}-world", []item{
-		mkItem(itemText, "hello-"),
-		mkItem(itemError, `comment ends before closing delimiter`),
-	}},
-	// This one is an error that we can't catch because it breaks templates with
-	// minimized JavaScript. Should have fixed it before Go 1.1.
-	{"unmatched right delimiter", "hello-{.}}-world", []item{
-		mkItem(itemText, "hello-{.}}-world"),
-		tEOF,
-	}},
-}
-
-// collect gathers the emitted items into a slice.
-func collect(t *lexTest, left, right string) (items []item) {
-	l := lex(t.name, t.input, left, right, true)
-	for {
-		item := l.nextItem()
-		items = append(items, item)
-		if item.typ == itemEOF || item.typ == itemError {
-			break
-		}
-	}
-	return
-}
-
-func equal(i1, i2 []item, checkPos bool) bool {
-	if len(i1) != len(i2) {
-		return false
-	}
-	for k := range i1 {
-		if i1[k].typ != i2[k].typ {
-			return false
-		}
-		if i1[k].val != i2[k].val {
-			return false
-		}
-		if checkPos && i1[k].pos != i2[k].pos {
-			return false
-		}
-		if checkPos && i1[k].line != i2[k].line {
-			return false
-		}
-	}
-	return true
-}
-
-func TestLex(t *testing.T) {
-	for _, test := range lexTests {
-		items := collect(&test, "", "")
-		if !equal(items, test.items, false) {
-			t.Errorf("%s: got\n\t%+v\nexpected\n\t%v", test.name, items, test.items)
-		}
-	}
-}
-
-// Some easy cases from above, but with delimiters $$ and @@
-var lexDelimTests = []lexTest{
-	{"punctuation", "$$,@%{{}}@@", []item{
-		tLeftDelim,
-		mkItem(itemChar, ","),
-		mkItem(itemChar, "@"),
-		mkItem(itemChar, "%"),
-		mkItem(itemChar, "{"),
-		mkItem(itemChar, "{"),
-		mkItem(itemChar, "}"),
-		mkItem(itemChar, "}"),
-		tRightDelim,
-		tEOF,
-	}},
-	{"empty action", `$$@@`, []item{tLeftDelim, tRightDelim, tEOF}},
-	{"for", `$$for@@`, []item{tLeftDelim, tFor, tRightDelim, tEOF}},
-	{"quote", `$$"abc \n\t\" "@@`, []item{tLeftDelim, tQuote, tRightDelim, tEOF}},
-	{"raw quote", "$$" + raw + "@@", []item{tLeftDelim, tRawQuote, tRightDelim, tEOF}},
-}
-
-var (
-	tLeftDelim  = mkItem(itemLeftDelim, "$$")
-	tRightDelim = mkItem(itemRightDelim, "@@")
-)
-
-func TestDelims(t *testing.T) {
-	for _, test := range lexDelimTests {
-		items := collect(&test, "$$", "@@")
-		if !equal(items, test.items, false) {
-			t.Errorf("%s: got\n\t%v\nexpected\n\t%v", test.name, items, test.items)
-		}
-	}
-}
-
-var lexPosTests = []lexTest{
-	{"empty", "", []item{{itemEOF, 0, "", 1}}},
-	{"punctuation", "{{,@%#}}", []item{
-		{itemLeftDelim, 0, "{{", 1},
-		{itemChar, 2, ",", 1},
-		{itemChar, 3, "@", 1},
-		{itemChar, 4, "%", 1},
-		{itemChar, 5, "#", 1},
-		{itemRightDelim, 6, "}}", 1},
-		{itemEOF, 8, "", 1},
-	}},
-	{"sample", "0123{{hello}}xyz", []item{
-		{itemText, 0, "0123", 1},
-		{itemLeftDelim, 4, "{{", 1},
-		{itemIdentifier, 6, "hello", 1},
-		{itemRightDelim, 11, "}}", 1},
-		{itemText, 13, "xyz", 1},
-		{itemEOF, 16, "", 1},
-	}},
-	{"trimafter", "{{x -}}\n{{y}}", []item{
-		{itemLeftDelim, 0, "{{", 1},
-		{itemIdentifier, 2, "x", 1},
-		{itemRightDelim, 5, "}}", 1},
-		{itemLeftDelim, 8, "{{", 2},
-		{itemIdentifier, 10, "y", 2},
-		{itemRightDelim, 11, "}}", 2},
-		{itemEOF, 13, "", 2},
-	}},
-	{"trimbefore", "{{x}}\n{{- y}}", []item{
-		{itemLeftDelim, 0, "{{", 1},
-		{itemIdentifier, 2, "x", 1},
-		{itemRightDelim, 3, "}}", 1},
-		{itemLeftDelim, 6, "{{", 2},
-		{itemIdentifier, 10, "y", 2},
-		{itemRightDelim, 11, "}}", 2},
-		{itemEOF, 13, "", 2},
-	}},
-}
-
-// The other tests don't check position, to make the test cases easier to construct.
-// This one does.
-func TestPos(t *testing.T) {
-	for _, test := range lexPosTests {
-		items := collect(&test, "", "")
-		if !equal(items, test.items, true) {
-			t.Errorf("%s: got\n\t%v\nexpected\n\t%v", test.name, items, test.items)
-			if len(items) == len(test.items) {
-				// Detailed print; avoid item.String() to expose the position value.
-				for i := range items {
-					if !equal(items[i:i+1], test.items[i:i+1], true) {
-						i1 := items[i]
-						i2 := test.items[i]
-						t.Errorf("\t#%d: got {%v %d %q %d} expected {%v %d %q %d}",
-							i, i1.typ, i1.pos, i1.val, i1.line, i2.typ, i2.pos, i2.val, i2.line)
-					}
-				}
-			}
-		}
-	}
-}
-
-// Test that an error shuts down the lexing goroutine.
-func TestShutdown(t *testing.T) {
-	// We need to duplicate template.Parse here to hold on to the lexer.
-	const text = "erroneous{{define}}{{else}}1234"
-	lexer := lex("foo", text, "{{", "}}", false)
-	_, err := New("root").parseLexer(lexer)
-	if err == nil {
-		t.Fatalf("expected error")
-	}
-	// The error should have drained the input. Therefore, the lexer should be shut down.
-	token, ok := <-lexer.items
-	if ok {
-		t.Fatalf("input was not drained; got %v", token)
-	}
-}
-
-// parseLexer is a local version of parse that lets us pass in the lexer instead of building it.
-// We expect an error, so the tree set and funcs list are explicitly nil.
-func (t *Tree) parseLexer(lex *lexer) (tree *Tree, err error) {
-	defer t.recover(&err)
-	t.ParseName = t.Name
-	t.startParse(nil, lex, map[string]*Tree{})
-	t.parse()
-	t.add()
-	t.stopParse()
-	return t, nil
-}
diff --git a/internal/backport/text/template/parse/node.go b/internal/backport/text/template/parse/node.go
deleted file mode 100644
index a2da634..0000000
--- a/internal/backport/text/template/parse/node.go
+++ /dev/null
@@ -1,1008 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Parse nodes.
-
-package parse
-
-import (
-	"fmt"
-	"strconv"
-	"strings"
-)
-
-var textFormat = "%s" // Changed to "%q" in tests for better error messages.
-
-// A Node is an element in the parse tree. The interface is trivial.
-// The interface contains an unexported method so that only
-// types local to this package can satisfy it.
-type Node interface {
-	Type() NodeType
-	String() string
-	// Copy does a deep copy of the Node and all its components.
-	// To avoid type assertions, some XxxNodes also have specialized
-	// CopyXxx methods that return *XxxNode.
-	Copy() Node
-	Position() Pos // byte position of start of node in full original input string
-	// tree returns the containing *Tree.
-	// It is unexported so all implementations of Node are in this package.
-	tree() *Tree
-	// writeTo writes the String output to the builder.
-	writeTo(*strings.Builder)
-}
-
-// NodeType identifies the type of a parse tree node.
-type NodeType int
-
-// Pos represents a byte position in the original input text from which
-// this template was parsed.
-type Pos int
-
-func (p Pos) Position() Pos {
-	return p
-}
-
-// Type returns itself and provides an easy default implementation
-// for embedding in a Node. Embedded in all non-trivial Nodes.
-func (t NodeType) Type() NodeType {
-	return t
-}
-
-const (
-	NodeText       NodeType = iota // Plain text.
-	NodeAction                     // A non-control action such as a field evaluation.
-	NodeBool                       // A boolean constant.
-	NodeBreak                      // A break action.
-	NodeChain                      // A sequence of field accesses.
-	NodeCommand                    // An element of a pipeline.
-	NodeContinue                   // A continue action.
-	NodeDot                        // The cursor, dot.
-	nodeElse                       // An else action. Not added to tree.
-	nodeEnd                        // An end action. Not added to tree.
-	NodeField                      // A field or method name.
-	NodeIdentifier                 // An identifier; always a function name.
-	NodeIf                         // An if action.
-	NodeList                       // A list of Nodes.
-	NodeNil                        // An untyped nil constant.
-	NodeNumber                     // A numerical constant.
-	NodePipe                       // A pipeline of commands.
-	NodeRange                      // A range action.
-	NodeString                     // A string constant.
-	NodeTemplate                   // A template invocation action.
-	NodeVariable                   // A $ variable.
-	NodeWith                       // A with action.
-	NodeComment                    // A comment.
-)
-
-// Nodes.
-
-// ListNode holds a sequence of nodes.
-type ListNode struct {
-	NodeType
-	Pos
-	tr    *Tree
-	Nodes []Node // The element nodes in lexical order.
-}
-
-func (t *Tree) newList(pos Pos) *ListNode {
-	return &ListNode{tr: t, NodeType: NodeList, Pos: pos}
-}
-
-func (l *ListNode) append(n Node) {
-	l.Nodes = append(l.Nodes, n)
-}
-
-func (l *ListNode) tree() *Tree {
-	return l.tr
-}
-
-func (l *ListNode) String() string {
-	var sb strings.Builder
-	l.writeTo(&sb)
-	return sb.String()
-}
-
-func (l *ListNode) writeTo(sb *strings.Builder) {
-	for _, n := range l.Nodes {
-		n.writeTo(sb)
-	}
-}
-
-func (l *ListNode) CopyList() *ListNode {
-	if l == nil {
-		return l
-	}
-	n := l.tr.newList(l.Pos)
-	for _, elem := range l.Nodes {
-		n.append(elem.Copy())
-	}
-	return n
-}
-
-func (l *ListNode) Copy() Node {
-	return l.CopyList()
-}
-
-// TextNode holds plain text.
-type TextNode struct {
-	NodeType
-	Pos
-	tr   *Tree
-	Text []byte // The text; may span newlines.
-}
-
-func (t *Tree) newText(pos Pos, text string) *TextNode {
-	return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)}
-}
-
-func (t *TextNode) String() string {
-	return fmt.Sprintf(textFormat, t.Text)
-}
-
-func (t *TextNode) writeTo(sb *strings.Builder) {
-	sb.WriteString(t.String())
-}
-
-func (t *TextNode) tree() *Tree {
-	return t.tr
-}
-
-func (t *TextNode) Copy() Node {
-	return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)}
-}
-
-// CommentNode holds a comment.
-type CommentNode struct {
-	NodeType
-	Pos
-	tr   *Tree
-	Text string // Comment text.
-}
-
-func (t *Tree) newComment(pos Pos, text string) *CommentNode {
-	return &CommentNode{tr: t, NodeType: NodeComment, Pos: pos, Text: text}
-}
-
-func (c *CommentNode) String() string {
-	var sb strings.Builder
-	c.writeTo(&sb)
-	return sb.String()
-}
-
-func (c *CommentNode) writeTo(sb *strings.Builder) {
-	sb.WriteString("{{")
-	sb.WriteString(c.Text)
-	sb.WriteString("}}")
-}
-
-func (c *CommentNode) tree() *Tree {
-	return c.tr
-}
-
-func (c *CommentNode) Copy() Node {
-	return &CommentNode{tr: c.tr, NodeType: NodeComment, Pos: c.Pos, Text: c.Text}
-}
-
-// PipeNode holds a pipeline with optional declaration
-type PipeNode struct {
-	NodeType
-	Pos
-	tr       *Tree
-	Line     int             // The line number in the input. Deprecated: Kept for compatibility.
-	IsAssign bool            // The variables are being assigned, not declared.
-	Decl     []*VariableNode // Variables in lexical order.
-	Cmds     []*CommandNode  // The commands in lexical order.
-}
-
-func (t *Tree) newPipeline(pos Pos, line int, vars []*VariableNode) *PipeNode {
-	return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: vars}
-}
-
-func (p *PipeNode) append(command *CommandNode) {
-	p.Cmds = append(p.Cmds, command)
-}
-
-func (p *PipeNode) String() string {
-	var sb strings.Builder
-	p.writeTo(&sb)
-	return sb.String()
-}
-
-func (p *PipeNode) writeTo(sb *strings.Builder) {
-	if len(p.Decl) > 0 {
-		for i, v := range p.Decl {
-			if i > 0 {
-				sb.WriteString(", ")
-			}
-			v.writeTo(sb)
-		}
-		sb.WriteString(" := ")
-	}
-	for i, c := range p.Cmds {
-		if i > 0 {
-			sb.WriteString(" | ")
-		}
-		c.writeTo(sb)
-	}
-}
-
-func (p *PipeNode) tree() *Tree {
-	return p.tr
-}
-
-func (p *PipeNode) CopyPipe() *PipeNode {
-	if p == nil {
-		return p
-	}
-	vars := make([]*VariableNode, len(p.Decl))
-	for i, d := range p.Decl {
-		vars[i] = d.Copy().(*VariableNode)
-	}
-	n := p.tr.newPipeline(p.Pos, p.Line, vars)
-	n.IsAssign = p.IsAssign
-	for _, c := range p.Cmds {
-		n.append(c.Copy().(*CommandNode))
-	}
-	return n
-}
-
-func (p *PipeNode) Copy() Node {
-	return p.CopyPipe()
-}
-
-// ActionNode holds an action (something bounded by delimiters).
-// Control actions have their own nodes; ActionNode represents simple
-// ones such as field evaluations and parenthesized pipelines.
-type ActionNode struct {
-	NodeType
-	Pos
-	tr   *Tree
-	Line int       // The line number in the input. Deprecated: Kept for compatibility.
-	Pipe *PipeNode // The pipeline in the action.
-}
-
-func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode {
-	return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe}
-}
-
-func (a *ActionNode) String() string {
-	var sb strings.Builder
-	a.writeTo(&sb)
-	return sb.String()
-}
-
-func (a *ActionNode) writeTo(sb *strings.Builder) {
-	sb.WriteString("{{")
-	a.Pipe.writeTo(sb)
-	sb.WriteString("}}")
-}
-
-func (a *ActionNode) tree() *Tree {
-	return a.tr
-}
-
-func (a *ActionNode) Copy() Node {
-	return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe())
-
-}
-
-// CommandNode holds a command (a pipeline inside an evaluating action).
-type CommandNode struct {
-	NodeType
-	Pos
-	tr   *Tree
-	Args []Node // Arguments in lexical order: Identifier, field, or constant.
-}
-
-func (t *Tree) newCommand(pos Pos) *CommandNode {
-	return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos}
-}
-
-func (c *CommandNode) append(arg Node) {
-	c.Args = append(c.Args, arg)
-}
-
-func (c *CommandNode) String() string {
-	var sb strings.Builder
-	c.writeTo(&sb)
-	return sb.String()
-}
-
-func (c *CommandNode) writeTo(sb *strings.Builder) {
-	for i, arg := range c.Args {
-		if i > 0 {
-			sb.WriteByte(' ')
-		}
-		if arg, ok := arg.(*PipeNode); ok {
-			sb.WriteByte('(')
-			arg.writeTo(sb)
-			sb.WriteByte(')')
-			continue
-		}
-		arg.writeTo(sb)
-	}
-}
-
-func (c *CommandNode) tree() *Tree {
-	return c.tr
-}
-
-func (c *CommandNode) Copy() Node {
-	if c == nil {
-		return c
-	}
-	n := c.tr.newCommand(c.Pos)
-	for _, c := range c.Args {
-		n.append(c.Copy())
-	}
-	return n
-}
-
-// IdentifierNode holds an identifier.
-type IdentifierNode struct {
-	NodeType
-	Pos
-	tr    *Tree
-	Ident string // The identifier's name.
-}
-
-// NewIdentifier returns a new IdentifierNode with the given identifier name.
-func NewIdentifier(ident string) *IdentifierNode {
-	return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident}
-}
-
-// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature.
-// Chained for convenience.
-// TODO: fix one day?
-func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode {
-	i.Pos = pos
-	return i
-}
-
-// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature.
-// Chained for convenience.
-// TODO: fix one day?
-func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode {
-	i.tr = t
-	return i
-}
-
-func (i *IdentifierNode) String() string {
-	return i.Ident
-}
-
-func (i *IdentifierNode) writeTo(sb *strings.Builder) {
-	sb.WriteString(i.String())
-}
-
-func (i *IdentifierNode) tree() *Tree {
-	return i.tr
-}
-
-func (i *IdentifierNode) Copy() Node {
-	return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos)
-}
-
-// VariableNode holds a list of variable names, possibly with chained field
-// accesses. The dollar sign is part of the (first) name.
-type VariableNode struct {
-	NodeType
-	Pos
-	tr    *Tree
-	Ident []string // Variable name and fields in lexical order.
-}
-
-func (t *Tree) newVariable(pos Pos, ident string) *VariableNode {
-	return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")}
-}
-
-func (v *VariableNode) String() string {
-	var sb strings.Builder
-	v.writeTo(&sb)
-	return sb.String()
-}
-
-func (v *VariableNode) writeTo(sb *strings.Builder) {
-	for i, id := range v.Ident {
-		if i > 0 {
-			sb.WriteByte('.')
-		}
-		sb.WriteString(id)
-	}
-}
-
-func (v *VariableNode) tree() *Tree {
-	return v.tr
-}
-
-func (v *VariableNode) Copy() Node {
-	return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)}
-}
-
-// DotNode holds the special identifier '.'.
-type DotNode struct {
-	NodeType
-	Pos
-	tr *Tree
-}
-
-func (t *Tree) newDot(pos Pos) *DotNode {
-	return &DotNode{tr: t, NodeType: NodeDot, Pos: pos}
-}
-
-func (d *DotNode) Type() NodeType {
-	// Override method on embedded NodeType for API compatibility.
-	// TODO: Not really a problem; could change API without effect but
-	// api tool complains.
-	return NodeDot
-}
-
-func (d *DotNode) String() string {
-	return "."
-}
-
-func (d *DotNode) writeTo(sb *strings.Builder) {
-	sb.WriteString(d.String())
-}
-
-func (d *DotNode) tree() *Tree {
-	return d.tr
-}
-
-func (d *DotNode) Copy() Node {
-	return d.tr.newDot(d.Pos)
-}
-
-// NilNode holds the special identifier 'nil' representing an untyped nil constant.
-type NilNode struct {
-	NodeType
-	Pos
-	tr *Tree
-}
-
-func (t *Tree) newNil(pos Pos) *NilNode {
-	return &NilNode{tr: t, NodeType: NodeNil, Pos: pos}
-}
-
-func (n *NilNode) Type() NodeType {
-	// Override method on embedded NodeType for API compatibility.
-	// TODO: Not really a problem; could change API without effect but
-	// api tool complains.
-	return NodeNil
-}
-
-func (n *NilNode) String() string {
-	return "nil"
-}
-
-func (n *NilNode) writeTo(sb *strings.Builder) {
-	sb.WriteString(n.String())
-}
-
-func (n *NilNode) tree() *Tree {
-	return n.tr
-}
-
-func (n *NilNode) Copy() Node {
-	return n.tr.newNil(n.Pos)
-}
-
-// FieldNode holds a field (identifier starting with '.').
-// The names may be chained ('.x.y').
-// The period is dropped from each ident.
-type FieldNode struct {
-	NodeType
-	Pos
-	tr    *Tree
-	Ident []string // The identifiers in lexical order.
-}
-
-func (t *Tree) newField(pos Pos, ident string) *FieldNode {
-	return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period
-}
-
-func (f *FieldNode) String() string {
-	var sb strings.Builder
-	f.writeTo(&sb)
-	return sb.String()
-}
-
-func (f *FieldNode) writeTo(sb *strings.Builder) {
-	for _, id := range f.Ident {
-		sb.WriteByte('.')
-		sb.WriteString(id)
-	}
-}
-
-func (f *FieldNode) tree() *Tree {
-	return f.tr
-}
-
-func (f *FieldNode) Copy() Node {
-	return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)}
-}
-
-// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.').
-// The names may be chained ('.x.y').
-// The periods are dropped from each ident.
-type ChainNode struct {
-	NodeType
-	Pos
-	tr    *Tree
-	Node  Node
-	Field []string // The identifiers in lexical order.
-}
-
-func (t *Tree) newChain(pos Pos, node Node) *ChainNode {
-	return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node}
-}
-
-// Add adds the named field (which should start with a period) to the end of the chain.
-func (c *ChainNode) Add(field string) {
-	if len(field) == 0 || field[0] != '.' {
-		panic("no dot in field")
-	}
-	field = field[1:] // Remove leading dot.
-	if field == "" {
-		panic("empty field")
-	}
-	c.Field = append(c.Field, field)
-}
-
-func (c *ChainNode) String() string {
-	var sb strings.Builder
-	c.writeTo(&sb)
-	return sb.String()
-}
-
-func (c *ChainNode) writeTo(sb *strings.Builder) {
-	if _, ok := c.Node.(*PipeNode); ok {
-		sb.WriteByte('(')
-		c.Node.writeTo(sb)
-		sb.WriteByte(')')
-	} else {
-		c.Node.writeTo(sb)
-	}
-	for _, field := range c.Field {
-		sb.WriteByte('.')
-		sb.WriteString(field)
-	}
-}
-
-func (c *ChainNode) tree() *Tree {
-	return c.tr
-}
-
-func (c *ChainNode) Copy() Node {
-	return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)}
-}
-
-// BoolNode holds a boolean constant.
-type BoolNode struct {
-	NodeType
-	Pos
-	tr   *Tree
-	True bool // The value of the boolean constant.
-}
-
-func (t *Tree) newBool(pos Pos, true bool) *BoolNode {
-	return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true}
-}
-
-func (b *BoolNode) String() string {
-	if b.True {
-		return "true"
-	}
-	return "false"
-}
-
-func (b *BoolNode) writeTo(sb *strings.Builder) {
-	sb.WriteString(b.String())
-}
-
-func (b *BoolNode) tree() *Tree {
-	return b.tr
-}
-
-func (b *BoolNode) Copy() Node {
-	return b.tr.newBool(b.Pos, b.True)
-}
-
-// NumberNode holds a number: signed or unsigned integer, float, or complex.
-// The value is parsed and stored under all the types that can represent the value.
-// This simulates in a small amount of code the behavior of Go's ideal constants.
-type NumberNode struct {
-	NodeType
-	Pos
-	tr         *Tree
-	IsInt      bool       // Number has an integral value.
-	IsUint     bool       // Number has an unsigned integral value.
-	IsFloat    bool       // Number has a floating-point value.
-	IsComplex  bool       // Number is complex.
-	Int64      int64      // The signed integer value.
-	Uint64     uint64     // The unsigned integer value.
-	Float64    float64    // The floating-point value.
-	Complex128 complex128 // The complex value.
-	Text       string     // The original textual representation from the input.
-}
-
-func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) {
-	n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text}
-	switch typ {
-	case itemCharConstant:
-		rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0])
-		if err != nil {
-			return nil, err
-		}
-		if tail != "'" {
-			return nil, fmt.Errorf("malformed character constant: %s", text)
-		}
-		n.Int64 = int64(rune)
-		n.IsInt = true
-		n.Uint64 = uint64(rune)
-		n.IsUint = true
-		n.Float64 = float64(rune) // odd but those are the rules.
-		n.IsFloat = true
-		return n, nil
-	case itemComplex:
-		// fmt.Sscan can parse the pair, so let it do the work.
-		if _, err := fmt.Sscan(text, &n.Complex128); err != nil {
-			return nil, err
-		}
-		n.IsComplex = true
-		n.simplifyComplex()
-		return n, nil
-	}
-	// Imaginary constants can only be complex unless they are zero.
-	if len(text) > 0 && text[len(text)-1] == 'i' {
-		f, err := strconv.ParseFloat(text[:len(text)-1], 64)
-		if err == nil {
-			n.IsComplex = true
-			n.Complex128 = complex(0, f)
-			n.simplifyComplex()
-			return n, nil
-		}
-	}
-	// Do integer test first so we get 0x123 etc.
-	u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below.
-	if err == nil {
-		n.IsUint = true
-		n.Uint64 = u
-	}
-	i, err := strconv.ParseInt(text, 0, 64)
-	if err == nil {
-		n.IsInt = true
-		n.Int64 = i
-		if i == 0 {
-			n.IsUint = true // in case of -0.
-			n.Uint64 = u
-		}
-	}
-	// If an integer extraction succeeded, promote the float.
-	if n.IsInt {
-		n.IsFloat = true
-		n.Float64 = float64(n.Int64)
-	} else if n.IsUint {
-		n.IsFloat = true
-		n.Float64 = float64(n.Uint64)
-	} else {
-		f, err := strconv.ParseFloat(text, 64)
-		if err == nil {
-			// If we parsed it as a float but it looks like an integer,
-			// it's a huge number too large to fit in an int. Reject it.
-			if !strings.ContainsAny(text, ".eEpP") {
-				return nil, fmt.Errorf("integer overflow: %q", text)
-			}
-			n.IsFloat = true
-			n.Float64 = f
-			// If a floating-point extraction succeeded, extract the int if needed.
-			if !n.IsInt && float64(int64(f)) == f {
-				n.IsInt = true
-				n.Int64 = int64(f)
-			}
-			if !n.IsUint && float64(uint64(f)) == f {
-				n.IsUint = true
-				n.Uint64 = uint64(f)
-			}
-		}
-	}
-	if !n.IsInt && !n.IsUint && !n.IsFloat {
-		return nil, fmt.Errorf("illegal number syntax: %q", text)
-	}
-	return n, nil
-}
-
-// simplifyComplex pulls out any other types that are represented by the complex number.
-// These all require that the imaginary part be zero.
-func (n *NumberNode) simplifyComplex() {
-	n.IsFloat = imag(n.Complex128) == 0
-	if n.IsFloat {
-		n.Float64 = real(n.Complex128)
-		n.IsInt = float64(int64(n.Float64)) == n.Float64
-		if n.IsInt {
-			n.Int64 = int64(n.Float64)
-		}
-		n.IsUint = float64(uint64(n.Float64)) == n.Float64
-		if n.IsUint {
-			n.Uint64 = uint64(n.Float64)
-		}
-	}
-}
-
-func (n *NumberNode) String() string {
-	return n.Text
-}
-
-func (n *NumberNode) writeTo(sb *strings.Builder) {
-	sb.WriteString(n.String())
-}
-
-func (n *NumberNode) tree() *Tree {
-	return n.tr
-}
-
-func (n *NumberNode) Copy() Node {
-	nn := new(NumberNode)
-	*nn = *n // Easy, fast, correct.
-	return nn
-}
-
-// StringNode holds a string constant. The value has been "unquoted".
-type StringNode struct {
-	NodeType
-	Pos
-	tr     *Tree
-	Quoted string // The original text of the string, with quotes.
-	Text   string // The string, after quote processing.
-}
-
-func (t *Tree) newString(pos Pos, orig, text string) *StringNode {
-	return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text}
-}
-
-func (s *StringNode) String() string {
-	return s.Quoted
-}
-
-func (s *StringNode) writeTo(sb *strings.Builder) {
-	sb.WriteString(s.String())
-}
-
-func (s *StringNode) tree() *Tree {
-	return s.tr
-}
-
-func (s *StringNode) Copy() Node {
-	return s.tr.newString(s.Pos, s.Quoted, s.Text)
-}
-
-// endNode represents an {{end}} action.
-// It does not appear in the final parse tree.
-type endNode struct {
-	NodeType
-	Pos
-	tr *Tree
-}
-
-func (t *Tree) newEnd(pos Pos) *endNode {
-	return &endNode{tr: t, NodeType: nodeEnd, Pos: pos}
-}
-
-func (e *endNode) String() string {
-	return "{{end}}"
-}
-
-func (e *endNode) writeTo(sb *strings.Builder) {
-	sb.WriteString(e.String())
-}
-
-func (e *endNode) tree() *Tree {
-	return e.tr
-}
-
-func (e *endNode) Copy() Node {
-	return e.tr.newEnd(e.Pos)
-}
-
-// elseNode represents an {{else}} action. Does not appear in the final tree.
-type elseNode struct {
-	NodeType
-	Pos
-	tr   *Tree
-	Line int // The line number in the input. Deprecated: Kept for compatibility.
-}
-
-func (t *Tree) newElse(pos Pos, line int) *elseNode {
-	return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line}
-}
-
-func (e *elseNode) Type() NodeType {
-	return nodeElse
-}
-
-func (e *elseNode) String() string {
-	return "{{else}}"
-}
-
-func (e *elseNode) writeTo(sb *strings.Builder) {
-	sb.WriteString(e.String())
-}
-
-func (e *elseNode) tree() *Tree {
-	return e.tr
-}
-
-func (e *elseNode) Copy() Node {
-	return e.tr.newElse(e.Pos, e.Line)
-}
-
-// BranchNode is the common representation of if, range, and with.
-type BranchNode struct {
-	NodeType
-	Pos
-	tr       *Tree
-	Line     int       // The line number in the input. Deprecated: Kept for compatibility.
-	Pipe     *PipeNode // The pipeline to be evaluated.
-	List     *ListNode // What to execute if the value is non-empty.
-	ElseList *ListNode // What to execute if the value is empty (nil if absent).
-}
-
-func (b *BranchNode) String() string {
-	var sb strings.Builder
-	b.writeTo(&sb)
-	return sb.String()
-}
-
-func (b *BranchNode) writeTo(sb *strings.Builder) {
-	name := ""
-	switch b.NodeType {
-	case NodeIf:
-		name = "if"
-	case NodeRange:
-		name = "range"
-	case NodeWith:
-		name = "with"
-	default:
-		panic("unknown branch type")
-	}
-	sb.WriteString("{{")
-	sb.WriteString(name)
-	sb.WriteByte(' ')
-	b.Pipe.writeTo(sb)
-	sb.WriteString("}}")
-	b.List.writeTo(sb)
-	if b.ElseList != nil {
-		sb.WriteString("{{else}}")
-		b.ElseList.writeTo(sb)
-	}
-	sb.WriteString("{{end}}")
-}
-
-func (b *BranchNode) tree() *Tree {
-	return b.tr
-}
-
-func (b *BranchNode) Copy() Node {
-	switch b.NodeType {
-	case NodeIf:
-		return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
-	case NodeRange:
-		return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
-	case NodeWith:
-		return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
-	default:
-		panic("unknown branch type")
-	}
-}
-
-// IfNode represents an {{if}} action and its commands.
-type IfNode struct {
-	BranchNode
-}
-
-func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode {
-	return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
-}
-
-func (i *IfNode) Copy() Node {
-	return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList())
-}
-
-// BreakNode represents a {{break}} action.
-type BreakNode struct {
-	tr *Tree
-	NodeType
-	Pos
-	Line int
-}
-
-func (t *Tree) newBreak(pos Pos, line int) *BreakNode {
-	return &BreakNode{tr: t, NodeType: NodeBreak, Pos: pos, Line: line}
-}
-
-func (b *BreakNode) Copy() Node                  { return b.tr.newBreak(b.Pos, b.Line) }
-func (b *BreakNode) String() string              { return "{{break}}" }
-func (b *BreakNode) tree() *Tree                 { return b.tr }
-func (b *BreakNode) writeTo(sb *strings.Builder) { sb.WriteString("{{break}}") }
-
-// ContinueNode represents a {{continue}} action.
-type ContinueNode struct {
-	tr *Tree
-	NodeType
-	Pos
-	Line int
-}
-
-func (t *Tree) newContinue(pos Pos, line int) *ContinueNode {
-	return &ContinueNode{tr: t, NodeType: NodeContinue, Pos: pos, Line: line}
-}
-
-func (c *ContinueNode) Copy() Node                  { return c.tr.newContinue(c.Pos, c.Line) }
-func (c *ContinueNode) String() string              { return "{{continue}}" }
-func (c *ContinueNode) tree() *Tree                 { return c.tr }
-func (c *ContinueNode) writeTo(sb *strings.Builder) { sb.WriteString("{{continue}}") }
-
-// RangeNode represents a {{range}} action and its commands.
-type RangeNode struct {
-	BranchNode
-}
-
-func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode {
-	return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
-}
-
-func (r *RangeNode) Copy() Node {
-	return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList())
-}
-
-// WithNode represents a {{with}} action and its commands.
-type WithNode struct {
-	BranchNode
-}
-
-func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode {
-	return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
-}
-
-func (w *WithNode) Copy() Node {
-	return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList())
-}
-
-// TemplateNode represents a {{template}} action.
-type TemplateNode struct {
-	NodeType
-	Pos
-	tr   *Tree
-	Line int       // The line number in the input. Deprecated: Kept for compatibility.
-	Name string    // The name of the template (unquoted).
-	Pipe *PipeNode // The command to evaluate as dot for the template.
-}
-
-func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode {
-	return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe}
-}
-
-func (t *TemplateNode) String() string {
-	var sb strings.Builder
-	t.writeTo(&sb)
-	return sb.String()
-}
-
-func (t *TemplateNode) writeTo(sb *strings.Builder) {
-	sb.WriteString("{{template ")
-	sb.WriteString(strconv.Quote(t.Name))
-	if t.Pipe != nil {
-		sb.WriteByte(' ')
-		t.Pipe.writeTo(sb)
-	}
-	sb.WriteString("}}")
-}
-
-func (t *TemplateNode) tree() *Tree {
-	return t.tr
-}
-
-func (t *TemplateNode) Copy() Node {
-	return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe())
-}
diff --git a/internal/backport/text/template/parse/parse.go b/internal/backport/text/template/parse/parse.go
deleted file mode 100644
index efae999..0000000
--- a/internal/backport/text/template/parse/parse.go
+++ /dev/null
@@ -1,826 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package parse builds parse trees for templates as defined by text/template
-// and html/template. Clients should use those packages to construct templates
-// rather than this one, which provides shared internal data structures not
-// intended for general use.
-package parse
-
-import (
-	"bytes"
-	"fmt"
-	"runtime"
-	"strconv"
-	"strings"
-)
-
-// Tree is the representation of a single parsed template.
-type Tree struct {
-	Name      string    // name of the template represented by the tree.
-	ParseName string    // name of the top-level template during parsing, for error messages.
-	Root      *ListNode // top-level root of the tree.
-	Mode      Mode      // parsing mode.
-	text      string    // text parsed to create the template (or its parent)
-	// Parsing only; cleared after parse.
-	funcs      []map[string]interface{}
-	lex        *lexer
-	token      [3]item // three-token lookahead for parser.
-	peekCount  int
-	vars       []string // variables defined at the moment.
-	treeSet    map[string]*Tree
-	actionLine int // line of left delim starting action
-	rangeDepth int
-	mode       Mode
-}
-
-// A mode value is a set of flags (or 0). Modes control parser behavior.
-type Mode uint
-
-const (
-	ParseComments Mode = 1 << iota // parse comments and add them to AST
-	SkipFuncCheck                  // do not check that functions are defined
-)
-
-// Copy returns a copy of the Tree. Any parsing state is discarded.
-func (t *Tree) Copy() *Tree {
-	if t == nil {
-		return nil
-	}
-	return &Tree{
-		Name:      t.Name,
-		ParseName: t.ParseName,
-		Root:      t.Root.CopyList(),
-		text:      t.text,
-	}
-}
-
-// Parse returns a map from template name to parse.Tree, created by parsing the
-// templates described in the argument string. The top-level template will be
-// given the specified name. If an error is encountered, parsing stops and an
-// empty map is returned with the error.
-func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (map[string]*Tree, error) {
-	treeSet := make(map[string]*Tree)
-	t := New(name)
-	t.text = text
-	_, err := t.Parse(text, leftDelim, rightDelim, treeSet, funcs...)
-	return treeSet, err
-}
-
-// next returns the next token.
-func (t *Tree) next() item {
-	if t.peekCount > 0 {
-		t.peekCount--
-	} else {
-		t.token[0] = t.lex.nextItem()
-	}
-	return t.token[t.peekCount]
-}
-
-// backup backs the input stream up one token.
-func (t *Tree) backup() {
-	t.peekCount++
-}
-
-// backup2 backs the input stream up two tokens.
-// The zeroth token is already there.
-func (t *Tree) backup2(t1 item) {
-	t.token[1] = t1
-	t.peekCount = 2
-}
-
-// backup3 backs the input stream up three tokens
-// The zeroth token is already there.
-func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back.
-	t.token[1] = t1
-	t.token[2] = t2
-	t.peekCount = 3
-}
-
-// peek returns but does not consume the next token.
-func (t *Tree) peek() item {
-	if t.peekCount > 0 {
-		return t.token[t.peekCount-1]
-	}
-	t.peekCount = 1
-	t.token[0] = t.lex.nextItem()
-	return t.token[0]
-}
-
-// nextNonSpace returns the next non-space token.
-func (t *Tree) nextNonSpace() (token item) {
-	for {
-		token = t.next()
-		if token.typ != itemSpace {
-			break
-		}
-	}
-	return token
-}
-
-// peekNonSpace returns but does not consume the next non-space token.
-func (t *Tree) peekNonSpace() item {
-	token := t.nextNonSpace()
-	t.backup()
-	return token
-}
-
-// Parsing.
-
-// New allocates a new parse tree with the given name.
-func New(name string, funcs ...map[string]interface{}) *Tree {
-	return &Tree{
-		Name:  name,
-		funcs: funcs,
-	}
-}
-
-// ErrorContext returns a textual representation of the location of the node in the input text.
-// The receiver is only used when the node does not have a pointer to the tree inside,
-// which can occur in old code.
-func (t *Tree) ErrorContext(n Node) (location, context string) {
-	pos := int(n.Position())
-	tree := n.tree()
-	if tree == nil {
-		tree = t
-	}
-	text := tree.text[:pos]
-	byteNum := strings.LastIndex(text, "\n")
-	if byteNum == -1 {
-		byteNum = pos // On first line.
-	} else {
-		byteNum++ // After the newline.
-		byteNum = pos - byteNum
-	}
-	lineNum := 1 + strings.Count(text, "\n")
-	context = n.String()
-	return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context
-}
-
-// errorf formats the error and terminates processing.
-func (t *Tree) errorf(format string, args ...interface{}) {
-	t.Root = nil
-	format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.token[0].line, format)
-	panic(fmt.Errorf(format, args...))
-}
-
-// error terminates processing.
-func (t *Tree) error(err error) {
-	t.errorf("%s", err)
-}
-
-// expect consumes the next token and guarantees it has the required type.
-func (t *Tree) expect(expected itemType, context string) item {
-	token := t.nextNonSpace()
-	if token.typ != expected {
-		t.unexpected(token, context)
-	}
-	return token
-}
-
-// expectOneOf consumes the next token and guarantees it has one of the required types.
-func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item {
-	token := t.nextNonSpace()
-	if token.typ != expected1 && token.typ != expected2 {
-		t.unexpected(token, context)
-	}
-	return token
-}
-
-// unexpected complains about the token and terminates processing.
-func (t *Tree) unexpected(token item, context string) {
-	if token.typ == itemError {
-		extra := ""
-		if t.actionLine != 0 && t.actionLine != token.line {
-			extra = fmt.Sprintf(" in action started at %s:%d", t.ParseName, t.actionLine)
-			if strings.HasSuffix(token.val, " action") {
-				extra = extra[len(" in action"):] // avoid "action in action"
-			}
-		}
-		t.errorf("%s%s", token, extra)
-	}
-	t.errorf("unexpected %s in %s", token, context)
-}
-
-// recover is the handler that turns panics into returns from the top level of Parse.
-func (t *Tree) recover(errp *error) {
-	e := recover()
-	if e != nil {
-		if _, ok := e.(runtime.Error); ok {
-			panic(e)
-		}
-		if t != nil {
-			t.lex.drain()
-			t.stopParse()
-		}
-		*errp = e.(error)
-	}
-}
-
-// startParse initializes the parser, using the lexer.
-func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer, treeSet map[string]*Tree) {
-	t.Root = nil
-	t.lex = lex
-	t.vars = []string{"$"}
-	t.funcs = funcs
-	t.treeSet = treeSet
-	lex.breakOK = !t.hasFunction("break")
-	lex.continueOK = !t.hasFunction("continue")
-}
-
-// stopParse terminates parsing.
-func (t *Tree) stopParse() {
-	t.lex = nil
-	t.vars = nil
-	t.funcs = nil
-	t.treeSet = nil
-}
-
-// Parse parses the template definition string to construct a representation of
-// the template for execution. If either action delimiter string is empty, the
-// default ("{{" or "}}") is used. Embedded template definitions are added to
-// the treeSet map.
-func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) {
-	defer t.recover(&err)
-	t.ParseName = t.Name
-	emitComment := t.Mode&ParseComments != 0
-	t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim, emitComment), treeSet)
-	t.text = text
-	t.parse()
-	t.add()
-	t.stopParse()
-	return t, nil
-}
-
-// add adds tree to t.treeSet.
-func (t *Tree) add() {
-	tree := t.treeSet[t.Name]
-	if tree == nil || IsEmptyTree(tree.Root) {
-		t.treeSet[t.Name] = t
-		return
-	}
-	if !IsEmptyTree(t.Root) {
-		t.errorf("template: multiple definition of template %q", t.Name)
-	}
-}
-
-// IsEmptyTree reports whether this tree (node) is empty of everything but space or comments.
-func IsEmptyTree(n Node) bool {
-	switch n := n.(type) {
-	case nil:
-		return true
-	case *ActionNode:
-	case *CommentNode:
-		return true
-	case *IfNode:
-	case *ListNode:
-		for _, node := range n.Nodes {
-			if !IsEmptyTree(node) {
-				return false
-			}
-		}
-		return true
-	case *RangeNode:
-	case *TemplateNode:
-	case *TextNode:
-		return len(bytes.TrimSpace(n.Text)) == 0
-	case *WithNode:
-	default:
-		panic("unknown node: " + n.String())
-	}
-	return false
-}
-
-// parse is the top-level parser for a template, essentially the same
-// as itemList except it also parses {{define}} actions.
-// It runs to EOF.
-func (t *Tree) parse() {
-	t.Root = t.newList(t.peek().pos)
-	for t.peek().typ != itemEOF {
-		if t.peek().typ == itemLeftDelim {
-			delim := t.next()
-			if t.nextNonSpace().typ == itemDefine {
-				newT := New("definition") // name will be updated once we know it.
-				newT.text = t.text
-				newT.Mode = t.Mode
-				newT.ParseName = t.ParseName
-				newT.startParse(t.funcs, t.lex, t.treeSet)
-				newT.parseDefinition()
-				continue
-			}
-			t.backup2(delim)
-		}
-		switch n := t.textOrAction(); n.Type() {
-		case nodeEnd, nodeElse:
-			t.errorf("unexpected %s", n)
-		default:
-			t.Root.append(n)
-		}
-	}
-}
-
-// parseDefinition parses a {{define}} ...  {{end}} template definition and
-// installs the definition in t.treeSet. The "define" keyword has already
-// been scanned.
-func (t *Tree) parseDefinition() {
-	const context = "define clause"
-	name := t.expectOneOf(itemString, itemRawString, context)
-	var err error
-	t.Name, err = strconv.Unquote(name.val)
-	if err != nil {
-		t.error(err)
-	}
-	t.expect(itemRightDelim, context)
-	var end Node
-	t.Root, end = t.itemList()
-	if end.Type() != nodeEnd {
-		t.errorf("unexpected %s in %s", end, context)
-	}
-	t.add()
-	t.stopParse()
-}
-
-// itemList:
-//
-//	textOrAction*
-//
-// Terminates at {{end}} or {{else}}, returned separately.
-func (t *Tree) itemList() (list *ListNode, next Node) {
-	list = t.newList(t.peekNonSpace().pos)
-	for t.peekNonSpace().typ != itemEOF {
-		n := t.textOrAction()
-		switch n.Type() {
-		case nodeEnd, nodeElse:
-			return list, n
-		}
-		list.append(n)
-	}
-	t.errorf("unexpected EOF")
-	return
-}
-
-// textOrAction:
-//
-//	text | comment | action
-func (t *Tree) textOrAction() Node {
-	switch token := t.nextNonSpace(); token.typ {
-	case itemText:
-		return t.newText(token.pos, token.val)
-	case itemLeftDelim:
-		t.actionLine = token.line
-		defer t.clearActionLine()
-		return t.action()
-	case itemComment:
-		return t.newComment(token.pos, token.val)
-	default:
-		t.unexpected(token, "input")
-	}
-	return nil
-}
-
-func (t *Tree) clearActionLine() {
-	t.actionLine = 0
-}
-
-// Action:
-//
-//	control
-//	command ("|" command)*
-//
-// Left delim is past. Now get actions.
-// First word could be a keyword such as range.
-func (t *Tree) action() (n Node) {
-	switch token := t.nextNonSpace(); token.typ {
-	case itemBlock:
-		return t.blockControl()
-	case itemBreak:
-		return t.breakControl(token.pos, token.line)
-	case itemContinue:
-		return t.continueControl(token.pos, token.line)
-	case itemElse:
-		return t.elseControl()
-	case itemEnd:
-		return t.endControl()
-	case itemIf:
-		return t.ifControl()
-	case itemRange:
-		return t.rangeControl()
-	case itemTemplate:
-		return t.templateControl()
-	case itemWith:
-		return t.withControl()
-	}
-	t.backup()
-	token := t.peek()
-	// Do not pop variables; they persist until "end".
-	return t.newAction(token.pos, token.line, t.pipeline("command", itemRightDelim))
-}
-
-// Break:
-//
-//	{{break}}
-//
-// Break keyword is past.
-func (t *Tree) breakControl(pos Pos, line int) Node {
-	if token := t.next(); token.typ != itemRightDelim {
-		t.unexpected(token, "in {{break}}")
-	}
-	if t.rangeDepth == 0 {
-		t.errorf("{{break}} outside {{range}}")
-	}
-	return t.newBreak(pos, line)
-}
-
-// Continue:
-//
-//	{{continue}}
-//
-// Continue keyword is past.
-func (t *Tree) continueControl(pos Pos, line int) Node {
-	if token := t.next(); token.typ != itemRightDelim {
-		t.unexpected(token, "in {{continue}}")
-	}
-	if t.rangeDepth == 0 {
-		t.errorf("{{continue}} outside {{range}}")
-	}
-	return t.newContinue(pos, line)
-}
-
-// Pipeline:
-//
-//	declarations? command ('|' command)*
-func (t *Tree) pipeline(context string, end itemType) (pipe *PipeNode) {
-	token := t.peekNonSpace()
-	pipe = t.newPipeline(token.pos, token.line, nil)
-	// Are there declarations or assignments?
-decls:
-	if v := t.peekNonSpace(); v.typ == itemVariable {
-		t.next()
-		// Since space is a token, we need 3-token look-ahead here in the worst case:
-		// in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an
-		// argument variable rather than a declaration. So remember the token
-		// adjacent to the variable so we can push it back if necessary.
-		tokenAfterVariable := t.peek()
-		next := t.peekNonSpace()
-		switch {
-		case next.typ == itemAssign, next.typ == itemDeclare:
-			pipe.IsAssign = next.typ == itemAssign
-			t.nextNonSpace()
-			pipe.Decl = append(pipe.Decl, t.newVariable(v.pos, v.val))
-			t.vars = append(t.vars, v.val)
-		case next.typ == itemChar && next.val == ",":
-			t.nextNonSpace()
-			pipe.Decl = append(pipe.Decl, t.newVariable(v.pos, v.val))
-			t.vars = append(t.vars, v.val)
-			if context == "range" && len(pipe.Decl) < 2 {
-				switch t.peekNonSpace().typ {
-				case itemVariable, itemRightDelim, itemRightParen:
-					// second initialized variable in a range pipeline
-					goto decls
-				default:
-					t.errorf("range can only initialize variables")
-				}
-			}
-			t.errorf("too many declarations in %s", context)
-		case tokenAfterVariable.typ == itemSpace:
-			t.backup3(v, tokenAfterVariable)
-		default:
-			t.backup2(v)
-		}
-	}
-	for {
-		switch token := t.nextNonSpace(); token.typ {
-		case end:
-			// At this point, the pipeline is complete
-			t.checkPipeline(pipe, context)
-			return
-		case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier,
-			itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen:
-			t.backup()
-			pipe.append(t.command())
-		default:
-			t.unexpected(token, context)
-		}
-	}
-}
-
-func (t *Tree) checkPipeline(pipe *PipeNode, context string) {
-	// Reject empty pipelines
-	if len(pipe.Cmds) == 0 {
-		t.errorf("missing value for %s", context)
-	}
-	// Only the first command of a pipeline can start with a non executable operand
-	for i, c := range pipe.Cmds[1:] {
-		switch c.Args[0].Type() {
-		case NodeBool, NodeDot, NodeNil, NodeNumber, NodeString:
-			// With A|B|C, pipeline stage 2 is B
-			t.errorf("non executable command in pipeline stage %d", i+2)
-		}
-	}
-}
-
-func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) {
-	defer t.popVars(len(t.vars))
-	pipe = t.pipeline(context, itemRightDelim)
-	if context == "range" {
-		t.rangeDepth++
-	}
-	var next Node
-	list, next = t.itemList()
-	if context == "range" {
-		t.rangeDepth--
-	}
-	switch next.Type() {
-	case nodeEnd: //done
-	case nodeElse:
-		if allowElseIf {
-			// Special case for "else if". If the "else" is followed immediately by an "if",
-			// the elseControl will have left the "if" token pending. Treat
-			//	{{if a}}_{{else if b}}_{{end}}
-			// as
-			//	{{if a}}_{{else}}{{if b}}_{{end}}{{end}}.
-			// To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}}
-			// is assumed. This technique works even for long if-else-if chains.
-			// TODO: Should we allow else-if in with and range?
-			if t.peek().typ == itemIf {
-				t.next() // Consume the "if" token.
-				elseList = t.newList(next.Position())
-				elseList.append(t.ifControl())
-				// Do not consume the next item - only one {{end}} required.
-				break
-			}
-		}
-		elseList, next = t.itemList()
-		if next.Type() != nodeEnd {
-			t.errorf("expected end; found %s", next)
-		}
-	}
-	return pipe.Position(), pipe.Line, pipe, list, elseList
-}
-
-// If:
-//
-//	{{if pipeline}} itemList {{end}}
-//	{{if pipeline}} itemList {{else}} itemList {{end}}
-//
-// If keyword is past.
-func (t *Tree) ifControl() Node {
-	return t.newIf(t.parseControl(true, "if"))
-}
-
-// Range:
-//
-//	{{range pipeline}} itemList {{end}}
-//	{{range pipeline}} itemList {{else}} itemList {{end}}
-//
-// Range keyword is past.
-func (t *Tree) rangeControl() Node {
-	r := t.newRange(t.parseControl(false, "range"))
-	return r
-}
-
-// With:
-//
-//	{{with pipeline}} itemList {{end}}
-//	{{with pipeline}} itemList {{else}} itemList {{end}}
-//
-// If keyword is past.
-func (t *Tree) withControl() Node {
-	return t.newWith(t.parseControl(false, "with"))
-}
-
-// End:
-//
-//	{{end}}
-//
-// End keyword is past.
-func (t *Tree) endControl() Node {
-	return t.newEnd(t.expect(itemRightDelim, "end").pos)
-}
-
-// Else:
-//
-//	{{else}}
-//
-// Else keyword is past.
-func (t *Tree) elseControl() Node {
-	// Special case for "else if".
-	peek := t.peekNonSpace()
-	if peek.typ == itemIf {
-		// We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ".
-		return t.newElse(peek.pos, peek.line)
-	}
-	token := t.expect(itemRightDelim, "else")
-	return t.newElse(token.pos, token.line)
-}
-
-// Block:
-//
-//	{{block stringValue pipeline}}
-//
-// Block keyword is past.
-// The name must be something that can evaluate to a string.
-// The pipeline is mandatory.
-func (t *Tree) blockControl() Node {
-	const context = "block clause"
-
-	token := t.nextNonSpace()
-	name := t.parseTemplateName(token, context)
-	pipe := t.pipeline(context, itemRightDelim)
-
-	block := New(name) // name will be updated once we know it.
-	block.text = t.text
-	block.Mode = t.Mode
-	block.ParseName = t.ParseName
-	block.startParse(t.funcs, t.lex, t.treeSet)
-	var end Node
-	block.Root, end = block.itemList()
-	if end.Type() != nodeEnd {
-		t.errorf("unexpected %s in %s", end, context)
-	}
-	block.add()
-	block.stopParse()
-
-	return t.newTemplate(token.pos, token.line, name, pipe)
-}
-
-// Template:
-//
-//	{{template stringValue pipeline}}
-//
-// Template keyword is past. The name must be something that can evaluate
-// to a string.
-func (t *Tree) templateControl() Node {
-	const context = "template clause"
-	token := t.nextNonSpace()
-	name := t.parseTemplateName(token, context)
-	var pipe *PipeNode
-	if t.nextNonSpace().typ != itemRightDelim {
-		t.backup()
-		// Do not pop variables; they persist until "end".
-		pipe = t.pipeline(context, itemRightDelim)
-	}
-	return t.newTemplate(token.pos, token.line, name, pipe)
-}
-
-func (t *Tree) parseTemplateName(token item, context string) (name string) {
-	switch token.typ {
-	case itemString, itemRawString:
-		s, err := strconv.Unquote(token.val)
-		if err != nil {
-			t.error(err)
-		}
-		name = s
-	default:
-		t.unexpected(token, context)
-	}
-	return
-}
-
-// command:
-//
-//	operand (space operand)*
-//
-// space-separated arguments up to a pipeline character or right delimiter.
-// we consume the pipe character but leave the right delim to terminate the action.
-func (t *Tree) command() *CommandNode {
-	cmd := t.newCommand(t.peekNonSpace().pos)
-	for {
-		t.peekNonSpace() // skip leading spaces.
-		operand := t.operand()
-		if operand != nil {
-			cmd.append(operand)
-		}
-		switch token := t.next(); token.typ {
-		case itemSpace:
-			continue
-		case itemRightDelim, itemRightParen:
-			t.backup()
-		case itemPipe:
-			// nothing here; break loop below
-		default:
-			t.unexpected(token, "operand")
-		}
-		break
-	}
-	if len(cmd.Args) == 0 {
-		t.errorf("empty command")
-	}
-	return cmd
-}
-
-// operand:
-//
-//	term .Field*
-//
-// An operand is a space-separated component of a command,
-// a term possibly followed by field accesses.
-// A nil return means the next item is not an operand.
-func (t *Tree) operand() Node {
-	node := t.term()
-	if node == nil {
-		return nil
-	}
-	if t.peek().typ == itemField {
-		chain := t.newChain(t.peek().pos, node)
-		for t.peek().typ == itemField {
-			chain.Add(t.next().val)
-		}
-		// Compatibility with original API: If the term is of type NodeField
-		// or NodeVariable, just put more fields on the original.
-		// Otherwise, keep the Chain node.
-		// Obvious parsing errors involving literal values are detected here.
-		// More complex error cases will have to be handled at execution time.
-		switch node.Type() {
-		case NodeField:
-			node = t.newField(chain.Position(), chain.String())
-		case NodeVariable:
-			node = t.newVariable(chain.Position(), chain.String())
-		case NodeBool, NodeString, NodeNumber, NodeNil, NodeDot:
-			t.errorf("unexpected . after term %q", node.String())
-		default:
-			node = chain
-		}
-	}
-	return node
-}
-
-// term:
-//
-//	literal (number, string, nil, boolean)
-//	function (identifier)
-//	.
-//	.Field
-//	$
-//	'(' pipeline ')'
-//
-// A term is a simple "expression".
-// A nil return means the next item is not a term.
-func (t *Tree) term() Node {
-	switch token := t.nextNonSpace(); token.typ {
-	case itemIdentifier:
-		checkFunc := t.Mode&SkipFuncCheck == 0
-		if checkFunc && !t.hasFunction(token.val) {
-			t.errorf("function %q not defined", token.val)
-		}
-		return NewIdentifier(token.val).SetTree(t).SetPos(token.pos)
-	case itemDot:
-		return t.newDot(token.pos)
-	case itemNil:
-		return t.newNil(token.pos)
-	case itemVariable:
-		return t.useVar(token.pos, token.val)
-	case itemField:
-		return t.newField(token.pos, token.val)
-	case itemBool:
-		return t.newBool(token.pos, token.val == "true")
-	case itemCharConstant, itemComplex, itemNumber:
-		number, err := t.newNumber(token.pos, token.val, token.typ)
-		if err != nil {
-			t.error(err)
-		}
-		return number
-	case itemLeftParen:
-		return t.pipeline("parenthesized pipeline", itemRightParen)
-	case itemString, itemRawString:
-		s, err := strconv.Unquote(token.val)
-		if err != nil {
-			t.error(err)
-		}
-		return t.newString(token.pos, token.val, s)
-	}
-	t.backup()
-	return nil
-}
-
-// hasFunction reports if a function name exists in the Tree's maps.
-func (t *Tree) hasFunction(name string) bool {
-	for _, funcMap := range t.funcs {
-		if funcMap == nil {
-			continue
-		}
-		if funcMap[name] != nil {
-			return true
-		}
-	}
-	return false
-}
-
-// popVars trims the variable list to the specified length
-func (t *Tree) popVars(n int) {
-	t.vars = t.vars[:n]
-}
-
-// useVar returns a node for a variable reference. It errors if the
-// variable is not defined.
-func (t *Tree) useVar(pos Pos, name string) Node {
-	v := t.newVariable(pos, name)
-	for _, varName := range t.vars {
-		if varName == v.Ident[0] {
-			return v
-		}
-	}
-	t.errorf("undefined variable %q", v.Ident[0])
-	return nil
-}
diff --git a/internal/backport/text/template/parse/parse_test.go b/internal/backport/text/template/parse/parse_test.go
deleted file mode 100644
index c3679a0..0000000
--- a/internal/backport/text/template/parse/parse_test.go
+++ /dev/null
@@ -1,676 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package parse
-
-import (
-	"flag"
-	"fmt"
-	"strings"
-	"testing"
-)
-
-var debug = flag.Bool("debug", false, "show the errors produced by the main tests")
-
-type numberTest struct {
-	text      string
-	isInt     bool
-	isUint    bool
-	isFloat   bool
-	isComplex bool
-	int64
-	uint64
-	float64
-	complex128
-}
-
-var numberTests = []numberTest{
-	// basics
-	{"0", true, true, true, false, 0, 0, 0, 0},
-	{"-0", true, true, true, false, 0, 0, 0, 0}, // check that -0 is a uint.
-	{"73", true, true, true, false, 73, 73, 73, 0},
-	{"7_3", true, true, true, false, 73, 73, 73, 0},
-	{"0b10_010_01", true, true, true, false, 73, 73, 73, 0},
-	{"0B10_010_01", true, true, true, false, 73, 73, 73, 0},
-	{"073", true, true, true, false, 073, 073, 073, 0},
-	{"0o73", true, true, true, false, 073, 073, 073, 0},
-	{"0O73", true, true, true, false, 073, 073, 073, 0},
-	{"0x73", true, true, true, false, 0x73, 0x73, 0x73, 0},
-	{"0X73", true, true, true, false, 0x73, 0x73, 0x73, 0},
-	{"0x7_3", true, true, true, false, 0x73, 0x73, 0x73, 0},
-	{"-73", true, false, true, false, -73, 0, -73, 0},
-	{"+73", true, false, true, false, 73, 0, 73, 0},
-	{"100", true, true, true, false, 100, 100, 100, 0},
-	{"1e9", true, true, true, false, 1e9, 1e9, 1e9, 0},
-	{"-1e9", true, false, true, false, -1e9, 0, -1e9, 0},
-	{"-1.2", false, false, true, false, 0, 0, -1.2, 0},
-	{"1e19", false, true, true, false, 0, 1e19, 1e19, 0},
-	{"1e1_9", false, true, true, false, 0, 1e19, 1e19, 0},
-	{"1E19", false, true, true, false, 0, 1e19, 1e19, 0},
-	{"-1e19", false, false, true, false, 0, 0, -1e19, 0},
-	{"0x_1p4", true, true, true, false, 16, 16, 16, 0},
-	{"0X_1P4", true, true, true, false, 16, 16, 16, 0},
-	{"0x_1p-4", false, false, true, false, 0, 0, 1 / 16., 0},
-	{"4i", false, false, false, true, 0, 0, 0, 4i},
-	{"-1.2+4.2i", false, false, false, true, 0, 0, 0, -1.2 + 4.2i},
-	{"073i", false, false, false, true, 0, 0, 0, 73i}, // not octal!
-	// complex with 0 imaginary are float (and maybe integer)
-	{"0i", true, true, true, true, 0, 0, 0, 0},
-	{"-1.2+0i", false, false, true, true, 0, 0, -1.2, -1.2},
-	{"-12+0i", true, false, true, true, -12, 0, -12, -12},
-	{"13+0i", true, true, true, true, 13, 13, 13, 13},
-	// funny bases
-	{"0123", true, true, true, false, 0123, 0123, 0123, 0},
-	{"-0x0", true, true, true, false, 0, 0, 0, 0},
-	{"0xdeadbeef", true, true, true, false, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef, 0},
-	// character constants
-	{`'a'`, true, true, true, false, 'a', 'a', 'a', 0},
-	{`'\n'`, true, true, true, false, '\n', '\n', '\n', 0},
-	{`'\\'`, true, true, true, false, '\\', '\\', '\\', 0},
-	{`'\''`, true, true, true, false, '\'', '\'', '\'', 0},
-	{`'\xFF'`, true, true, true, false, 0xFF, 0xFF, 0xFF, 0},
-	{`'パ'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0},
-	{`'\u30d1'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0},
-	{`'\U000030d1'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0},
-	// some broken syntax
-	{text: "+-2"},
-	{text: "0x123."},
-	{text: "1e."},
-	{text: "0xi."},
-	{text: "1+2."},
-	{text: "'x"},
-	{text: "'xx'"},
-	{text: "'433937734937734969526500969526500'"}, // Integer too large - issue 10634.
-	// Issue 8622 - 0xe parsed as floating point. Very embarrassing.
-	{"0xef", true, true, true, false, 0xef, 0xef, 0xef, 0},
-}
-
-func TestNumberParse(t *testing.T) {
-	for _, test := range numberTests {
-		// If fmt.Sscan thinks it's complex, it's complex. We can't trust the output
-		// because imaginary comes out as a number.
-		var c complex128
-		typ := itemNumber
-		var tree *Tree
-		if test.text[0] == '\'' {
-			typ = itemCharConstant
-		} else {
-			_, err := fmt.Sscan(test.text, &c)
-			if err == nil {
-				typ = itemComplex
-			}
-		}
-		n, err := tree.newNumber(0, test.text, typ)
-		ok := test.isInt || test.isUint || test.isFloat || test.isComplex
-		if ok && err != nil {
-			t.Errorf("unexpected error for %q: %s", test.text, err)
-			continue
-		}
-		if !ok && err == nil {
-			t.Errorf("expected error for %q", test.text)
-			continue
-		}
-		if !ok {
-			if *debug {
-				fmt.Printf("%s\n\t%s\n", test.text, err)
-			}
-			continue
-		}
-		if n.IsComplex != test.isComplex {
-			t.Errorf("complex incorrect for %q; should be %t", test.text, test.isComplex)
-		}
-		if test.isInt {
-			if !n.IsInt {
-				t.Errorf("expected integer for %q", test.text)
-			}
-			if n.Int64 != test.int64 {
-				t.Errorf("int64 for %q should be %d Is %d", test.text, test.int64, n.Int64)
-			}
-		} else if n.IsInt {
-			t.Errorf("did not expect integer for %q", test.text)
-		}
-		if test.isUint {
-			if !n.IsUint {
-				t.Errorf("expected unsigned integer for %q", test.text)
-			}
-			if n.Uint64 != test.uint64 {
-				t.Errorf("uint64 for %q should be %d Is %d", test.text, test.uint64, n.Uint64)
-			}
-		} else if n.IsUint {
-			t.Errorf("did not expect unsigned integer for %q", test.text)
-		}
-		if test.isFloat {
-			if !n.IsFloat {
-				t.Errorf("expected float for %q", test.text)
-			}
-			if n.Float64 != test.float64 {
-				t.Errorf("float64 for %q should be %g Is %g", test.text, test.float64, n.Float64)
-			}
-		} else if n.IsFloat {
-			t.Errorf("did not expect float for %q", test.text)
-		}
-		if test.isComplex {
-			if !n.IsComplex {
-				t.Errorf("expected complex for %q", test.text)
-			}
-			if n.Complex128 != test.complex128 {
-				t.Errorf("complex128 for %q should be %g Is %g", test.text, test.complex128, n.Complex128)
-			}
-		} else if n.IsComplex {
-			t.Errorf("did not expect complex for %q", test.text)
-		}
-	}
-}
-
-type parseTest struct {
-	name   string
-	input  string
-	ok     bool
-	result string // what the user would see in an error message.
-}
-
-const (
-	noError  = true
-	hasError = false
-)
-
-var parseTests = []parseTest{
-	{"empty", "", noError,
-		``},
-	{"comment", "{{/*\n\n\n*/}}", noError,
-		``},
-	{"spaces", " \t\n", noError,
-		`" \t\n"`},
-	{"text", "some text", noError,
-		`"some text"`},
-	{"emptyAction", "{{}}", hasError,
-		`{{}}`},
-	{"field", "{{.X}}", noError,
-		`{{.X}}`},
-	{"simple command", "{{printf}}", noError,
-		`{{printf}}`},
-	{"$ invocation", "{{$}}", noError,
-		"{{$}}"},
-	{"variable invocation", "{{with $x := 3}}{{$x 23}}{{end}}", noError,
-		"{{with $x := 3}}{{$x 23}}{{end}}"},
-	{"variable with fields", "{{$.I}}", noError,
-		"{{$.I}}"},
-	{"multi-word command", "{{printf `%d` 23}}", noError,
-		"{{printf `%d` 23}}"},
-	{"pipeline", "{{.X|.Y}}", noError,
-		`{{.X | .Y}}`},
-	{"pipeline with decl", "{{$x := .X|.Y}}", noError,
-		`{{$x := .X | .Y}}`},
-	{"nested pipeline", "{{.X (.Y .Z) (.A | .B .C) (.E)}}", noError,
-		`{{.X (.Y .Z) (.A | .B .C) (.E)}}`},
-	{"field applied to parentheses", "{{(.Y .Z).Field}}", noError,
-		`{{(.Y .Z).Field}}`},
-	{"simple if", "{{if .X}}hello{{end}}", noError,
-		`{{if .X}}"hello"{{end}}`},
-	{"if with else", "{{if .X}}true{{else}}false{{end}}", noError,
-		`{{if .X}}"true"{{else}}"false"{{end}}`},
-	{"if with else if", "{{if .X}}true{{else if .Y}}false{{end}}", noError,
-		`{{if .X}}"true"{{else}}{{if .Y}}"false"{{end}}{{end}}`},
-	{"if else chain", "+{{if .X}}X{{else if .Y}}Y{{else if .Z}}Z{{end}}+", noError,
-		`"+"{{if .X}}"X"{{else}}{{if .Y}}"Y"{{else}}{{if .Z}}"Z"{{end}}{{end}}{{end}}"+"`},
-	{"simple range", "{{range .X}}hello{{end}}", noError,
-		`{{range .X}}"hello"{{end}}`},
-	{"chained field range", "{{range .X.Y.Z}}hello{{end}}", noError,
-		`{{range .X.Y.Z}}"hello"{{end}}`},
-	{"nested range", "{{range .X}}hello{{range .Y}}goodbye{{end}}{{end}}", noError,
-		`{{range .X}}"hello"{{range .Y}}"goodbye"{{end}}{{end}}`},
-	{"range with else", "{{range .X}}true{{else}}false{{end}}", noError,
-		`{{range .X}}"true"{{else}}"false"{{end}}`},
-	{"range over pipeline", "{{range .X|.M}}true{{else}}false{{end}}", noError,
-		`{{range .X | .M}}"true"{{else}}"false"{{end}}`},
-	{"range []int", "{{range .SI}}{{.}}{{end}}", noError,
-		`{{range .SI}}{{.}}{{end}}`},
-	{"range 1 var", "{{range $x := .SI}}{{.}}{{end}}", noError,
-		`{{range $x := .SI}}{{.}}{{end}}`},
-	{"range 2 vars", "{{range $x, $y := .SI}}{{.}}{{end}}", noError,
-		`{{range $x, $y := .SI}}{{.}}{{end}}`},
-	{"range with break", "{{range .SI}}{{.}}{{break}}{{end}}", noError,
-		`{{range .SI}}{{.}}{{break}}{{end}}`},
-	{"range with continue", "{{range .SI}}{{.}}{{continue}}{{end}}", noError,
-		`{{range .SI}}{{.}}{{continue}}{{end}}`},
-	{"constants", "{{range .SI 1 -3.2i true false 'a' nil}}{{end}}", noError,
-		`{{range .SI 1 -3.2i true false 'a' nil}}{{end}}`},
-	{"template", "{{template `x`}}", noError,
-		`{{template "x"}}`},
-	{"template with arg", "{{template `x` .Y}}", noError,
-		`{{template "x" .Y}}`},
-	{"with", "{{with .X}}hello{{end}}", noError,
-		`{{with .X}}"hello"{{end}}`},
-	{"with with else", "{{with .X}}hello{{else}}goodbye{{end}}", noError,
-		`{{with .X}}"hello"{{else}}"goodbye"{{end}}`},
-	// Trimming spaces.
-	{"trim left", "x \r\n\t{{- 3}}", noError, `"x"{{3}}`},
-	{"trim right", "{{3 -}}\n\n\ty", noError, `{{3}}"y"`},
-	{"trim left and right", "x \r\n\t{{- 3 -}}\n\n\ty", noError, `"x"{{3}}"y"`},
-	{"trim with extra spaces", "x\n{{-  3   -}}\ny", noError, `"x"{{3}}"y"`},
-	{"comment trim left", "x \r\n\t{{- /* hi */}}", noError, `"x"`},
-	{"comment trim right", "{{/* hi */ -}}\n\n\ty", noError, `"y"`},
-	{"comment trim left and right", "x \r\n\t{{- /* */ -}}\n\n\ty", noError, `"x""y"`},
-	{"block definition", `{{block "foo" .}}hello{{end}}`, noError,
-		`{{template "foo" .}}`},
-
-	{"newline in assignment", "{{ $x \n := \n 1 \n }}", noError, "{{$x := 1}}"},
-	{"newline in empty action", "{{\n}}", hasError, "{{\n}}"},
-	{"newline in pipeline", "{{\n\"x\"\n|\nprintf\n}}", noError, `{{"x" | printf}}`},
-	{"newline in comment", "{{/*\nhello\n*/}}", noError, ""},
-	{"newline in comment", "{{-\n/*\nhello\n*/\n-}}", noError, ""},
-
-	// Errors.
-	{"unclosed action", "hello{{range", hasError, ""},
-	{"unmatched end", "{{end}}", hasError, ""},
-	{"unmatched else", "{{else}}", hasError, ""},
-	{"unmatched else after if", "{{if .X}}hello{{end}}{{else}}", hasError, ""},
-	{"multiple else", "{{if .X}}1{{else}}2{{else}}3{{end}}", hasError, ""},
-	{"missing end", "hello{{range .x}}", hasError, ""},
-	{"missing end after else", "hello{{range .x}}{{else}}", hasError, ""},
-	{"undefined function", "hello{{undefined}}", hasError, ""},
-	{"undefined variable", "{{$x}}", hasError, ""},
-	{"variable undefined after end", "{{with $x := 4}}{{end}}{{$x}}", hasError, ""},
-	{"variable undefined in template", "{{template $v}}", hasError, ""},
-	{"declare with field", "{{with $x.Y := 4}}{{end}}", hasError, ""},
-	{"template with field ref", "{{template .X}}", hasError, ""},
-	{"template with var", "{{template $v}}", hasError, ""},
-	{"invalid punctuation", "{{printf 3, 4}}", hasError, ""},
-	{"multidecl outside range", "{{with $v, $u := 3}}{{end}}", hasError, ""},
-	{"too many decls in range", "{{range $u, $v, $w := 3}}{{end}}", hasError, ""},
-	{"dot applied to parentheses", "{{printf (printf .).}}", hasError, ""},
-	{"adjacent args", "{{printf 3`x`}}", hasError, ""},
-	{"adjacent args with .", "{{printf `x`.}}", hasError, ""},
-	{"extra end after if", "{{if .X}}a{{else if .Y}}b{{end}}{{end}}", hasError, ""},
-	{"break outside range", "{{range .}}{{end}} {{break}}", hasError, ""},
-	{"continue outside range", "{{range .}}{{end}} {{continue}}", hasError, ""},
-	{"break in range else", "{{range .}}{{else}}{{break}}{{end}}", hasError, ""},
-	{"continue in range else", "{{range .}}{{else}}{{continue}}{{end}}", hasError, ""},
-	// Other kinds of assignments and operators aren't available yet.
-	{"bug0a", "{{$x := 0}}{{$x}}", noError, "{{$x := 0}}{{$x}}"},
-	{"bug0b", "{{$x += 1}}{{$x}}", hasError, ""},
-	{"bug0c", "{{$x ! 2}}{{$x}}", hasError, ""},
-	{"bug0d", "{{$x % 3}}{{$x}}", hasError, ""},
-	// Check the parse fails for := rather than comma.
-	{"bug0e", "{{range $x := $y := 3}}{{end}}", hasError, ""},
-	// Another bug: variable read must ignore following punctuation.
-	{"bug1a", "{{$x:=.}}{{$x!2}}", hasError, ""},                     // ! is just illegal here.
-	{"bug1b", "{{$x:=.}}{{$x+2}}", hasError, ""},                     // $x+2 should not parse as ($x) (+2).
-	{"bug1c", "{{$x:=.}}{{$x +2}}", noError, "{{$x := .}}{{$x +2}}"}, // It's OK with a space.
-	// dot following a literal value
-	{"dot after integer", "{{1.E}}", hasError, ""},
-	{"dot after float", "{{0.1.E}}", hasError, ""},
-	{"dot after boolean", "{{true.E}}", hasError, ""},
-	{"dot after char", "{{'a'.any}}", hasError, ""},
-	{"dot after string", `{{"hello".guys}}`, hasError, ""},
-	{"dot after dot", "{{..E}}", hasError, ""},
-	{"dot after nil", "{{nil.E}}", hasError, ""},
-	// Wrong pipeline
-	{"wrong pipeline dot", "{{12|.}}", hasError, ""},
-	{"wrong pipeline number", "{{.|12|printf}}", hasError, ""},
-	{"wrong pipeline string", "{{.|printf|\"error\"}}", hasError, ""},
-	{"wrong pipeline char", "{{12|printf|'e'}}", hasError, ""},
-	{"wrong pipeline boolean", "{{.|true}}", hasError, ""},
-	{"wrong pipeline nil", "{{'c'|nil}}", hasError, ""},
-	{"empty pipeline", `{{printf "%d" ( ) }}`, hasError, ""},
-	// Missing pipeline in block
-	{"block definition", `{{block "foo"}}hello{{end}}`, hasError, ""},
-}
-
-var builtins = map[string]interface{}{
-	"printf":   fmt.Sprintf,
-	"contains": strings.Contains,
-}
-
-func testParse(doCopy bool, t *testing.T) {
-	textFormat = "%q"
-	defer func() { textFormat = "%s" }()
-	for _, test := range parseTests {
-		tmpl, err := New(test.name).Parse(test.input, "", "", make(map[string]*Tree), builtins)
-		switch {
-		case err == nil && !test.ok:
-			t.Errorf("%q: expected error; got none", test.name)
-			continue
-		case err != nil && test.ok:
-			t.Errorf("%q: unexpected error: %v", test.name, err)
-			continue
-		case err != nil && !test.ok:
-			// expected error, got one
-			if *debug {
-				fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err)
-			}
-			continue
-		}
-		var result string
-		if doCopy {
-			result = tmpl.Root.Copy().String()
-		} else {
-			result = tmpl.Root.String()
-		}
-		if result != test.result {
-			t.Errorf("%s=(%q): got\n\t%v\nexpected\n\t%v", test.name, test.input, result, test.result)
-		}
-	}
-}
-
-func TestParse(t *testing.T) {
-	testParse(false, t)
-}
-
-// Same as TestParse, but we copy the node first
-func TestParseCopy(t *testing.T) {
-	testParse(true, t)
-}
-
-func TestParseWithComments(t *testing.T) {
-	textFormat = "%q"
-	defer func() { textFormat = "%s" }()
-	tests := [...]parseTest{
-		{"comment", "{{/*\n\n\n*/}}", noError, "{{/*\n\n\n*/}}"},
-		{"comment trim left", "x \r\n\t{{- /* hi */}}", noError, `"x"{{/* hi */}}`},
-		{"comment trim right", "{{/* hi */ -}}\n\n\ty", noError, `{{/* hi */}}"y"`},
-		{"comment trim left and right", "x \r\n\t{{- /* */ -}}\n\n\ty", noError, `"x"{{/* */}}"y"`},
-	}
-	for _, test := range tests {
-		t.Run(test.name, func(t *testing.T) {
-			tr := New(test.name)
-			tr.Mode = ParseComments
-			tmpl, err := tr.Parse(test.input, "", "", make(map[string]*Tree))
-			if err != nil {
-				t.Errorf("%q: expected error; got none", test.name)
-			}
-			if result := tmpl.Root.String(); result != test.result {
-				t.Errorf("%s=(%q): got\n\t%v\nexpected\n\t%v", test.name, test.input, result, test.result)
-			}
-		})
-	}
-}
-
-func TestSkipFuncCheck(t *testing.T) {
-	oldTextFormat := textFormat
-	textFormat = "%q"
-	defer func() { textFormat = oldTextFormat }()
-	tr := New("skip func check")
-	tr.Mode = SkipFuncCheck
-	tmpl, err := tr.Parse("{{fn 1 2}}", "", "", make(map[string]*Tree))
-	if err != nil {
-		t.Fatalf("unexpected error: %v", err)
-	}
-	expected := "{{fn 1 2}}"
-	if result := tmpl.Root.String(); result != expected {
-		t.Errorf("got\n\t%v\nexpected\n\t%v", result, expected)
-	}
-}
-
-type isEmptyTest struct {
-	name  string
-	input string
-	empty bool
-}
-
-var isEmptyTests = []isEmptyTest{
-	{"empty", ``, true},
-	{"nonempty", `hello`, false},
-	{"spaces only", " \t\n \t\n", true},
-	{"comment only", "{{/* comment */}}", true},
-	{"definition", `{{define "x"}}something{{end}}`, true},
-	{"definitions and space", "{{define `x`}}something{{end}}\n\n{{define `y`}}something{{end}}\n\n", true},
-	{"definitions and text", "{{define `x`}}something{{end}}\nx\n{{define `y`}}something{{end}}\ny\n", false},
-	{"definition and action", "{{define `x`}}something{{end}}{{if 3}}foo{{end}}", false},
-}
-
-func TestIsEmpty(t *testing.T) {
-	if !IsEmptyTree(nil) {
-		t.Errorf("nil tree is not empty")
-	}
-	for _, test := range isEmptyTests {
-		tree, err := New("root").Parse(test.input, "", "", make(map[string]*Tree), nil)
-		if err != nil {
-			t.Errorf("%q: unexpected error: %v", test.name, err)
-			continue
-		}
-		if empty := IsEmptyTree(tree.Root); empty != test.empty {
-			t.Errorf("%q: expected %t got %t", test.name, test.empty, empty)
-		}
-	}
-}
-
-func TestErrorContextWithTreeCopy(t *testing.T) {
-	tree, err := New("root").Parse("{{if true}}{{end}}", "", "", make(map[string]*Tree), nil)
-	if err != nil {
-		t.Fatalf("unexpected tree parse failure: %v", err)
-	}
-	treeCopy := tree.Copy()
-	wantLocation, wantContext := tree.ErrorContext(tree.Root.Nodes[0])
-	gotLocation, gotContext := treeCopy.ErrorContext(treeCopy.Root.Nodes[0])
-	if wantLocation != gotLocation {
-		t.Errorf("wrong error location want %q got %q", wantLocation, gotLocation)
-	}
-	if wantContext != gotContext {
-		t.Errorf("wrong error location want %q got %q", wantContext, gotContext)
-	}
-}
-
-// All failures, and the result is a string that must appear in the error message.
-var errorTests = []parseTest{
-	// Check line numbers are accurate.
-	{"unclosed1",
-		"line1\n{{",
-		hasError, `unclosed1:2: unclosed action`},
-	{"unclosed2",
-		"line1\n{{define `x`}}line2\n{{",
-		hasError, `unclosed2:3: unclosed action`},
-	{"unclosed3",
-		"line1\n{{\"x\"\n\"y\"\n",
-		hasError, `unclosed3:4: unclosed action started at unclosed3:2`},
-	{"unclosed4",
-		"{{\n\n\n\n\n",
-		hasError, `unclosed4:6: unclosed action started at unclosed4:1`},
-	{"var1",
-		"line1\n{{\nx\n}}",
-		hasError, `var1:3: function "x" not defined`},
-	// Specific errors.
-	{"function",
-		"{{foo}}",
-		hasError, `function "foo" not defined`},
-	{"comment1",
-		"{{/*}}",
-		hasError, `comment1:1: unclosed comment`},
-	{"comment2",
-		"{{/*\nhello\n}}",
-		hasError, `comment2:1: unclosed comment`},
-	{"lparen",
-		"{{.X (1 2 3}}",
-		hasError, `unclosed left paren`},
-	{"rparen",
-		"{{.X 1 2 3 ) }}",
-		hasError, `unexpected ")" in command`},
-	{"rparen2",
-		"{{(.X 1 2 3",
-		hasError, `unclosed action`},
-	{"space",
-		"{{`x`3}}",
-		hasError, `in operand`},
-	{"idchar",
-		"{{a#}}",
-		hasError, `'#'`},
-	{"charconst",
-		"{{'a}}",
-		hasError, `unterminated character constant`},
-	{"stringconst",
-		`{{"a}}`,
-		hasError, `unterminated quoted string`},
-	{"rawstringconst",
-		"{{`a}}",
-		hasError, `unterminated raw quoted string`},
-	{"number",
-		"{{0xi}}",
-		hasError, `number syntax`},
-	{"multidefine",
-		"{{define `a`}}a{{end}}{{define `a`}}b{{end}}",
-		hasError, `multiple definition of template`},
-	{"eof",
-		"{{range .X}}",
-		hasError, `unexpected EOF`},
-	{"variable",
-		// Declare $x so it's defined, to avoid that error, and then check we don't parse a declaration.
-		"{{$x := 23}}{{with $x.y := 3}}{{$x 23}}{{end}}",
-		hasError, `unexpected ":="`},
-	{"multidecl",
-		"{{$a,$b,$c := 23}}",
-		hasError, `too many declarations`},
-	{"undefvar",
-		"{{$a}}",
-		hasError, `undefined variable`},
-	{"wrongdot",
-		"{{true.any}}",
-		hasError, `unexpected . after term`},
-	{"wrongpipeline",
-		"{{12|false}}",
-		hasError, `non executable command in pipeline`},
-	{"emptypipeline",
-		`{{ ( ) }}`,
-		hasError, `missing value for parenthesized pipeline`},
-	{"multilinerawstring",
-		"{{ $v := `\n` }} {{",
-		hasError, `multilinerawstring:2: unclosed action`},
-	{"rangeundefvar",
-		"{{range $k}}{{end}}",
-		hasError, `undefined variable`},
-	{"rangeundefvars",
-		"{{range $k, $v}}{{end}}",
-		hasError, `undefined variable`},
-	{"rangemissingvalue1",
-		"{{range $k,}}{{end}}",
-		hasError, `missing value for range`},
-	{"rangemissingvalue2",
-		"{{range $k, $v := }}{{end}}",
-		hasError, `missing value for range`},
-	{"rangenotvariable1",
-		"{{range $k, .}}{{end}}",
-		hasError, `range can only initialize variables`},
-	{"rangenotvariable2",
-		"{{range $k, 123 := .}}{{end}}",
-		hasError, `range can only initialize variables`},
-}
-
-func TestErrors(t *testing.T) {
-	for _, test := range errorTests {
-		t.Run(test.name, func(t *testing.T) {
-			_, err := New(test.name).Parse(test.input, "", "", make(map[string]*Tree))
-			if err == nil {
-				t.Fatalf("expected error %q, got nil", test.result)
-			}
-			if !strings.Contains(err.Error(), test.result) {
-				t.Fatalf("error %q does not contain %q", err, test.result)
-			}
-		})
-	}
-}
-
-func TestBlock(t *testing.T) {
-	const (
-		input = `a{{block "inner" .}}bar{{.}}baz{{end}}b`
-		outer = `a{{template "inner" .}}b`
-		inner = `bar{{.}}baz`
-	)
-	treeSet := make(map[string]*Tree)
-	tmpl, err := New("outer").Parse(input, "", "", treeSet, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if g, w := tmpl.Root.String(), outer; g != w {
-		t.Errorf("outer template = %q, want %q", g, w)
-	}
-	inTmpl := treeSet["inner"]
-	if inTmpl == nil {
-		t.Fatal("block did not define template")
-	}
-	if g, w := inTmpl.Root.String(), inner; g != w {
-		t.Errorf("inner template = %q, want %q", g, w)
-	}
-}
-
-func TestLineNum(t *testing.T) {
-	const count = 100
-	text := strings.Repeat("{{printf 1234}}\n", count)
-	tree, err := New("bench").Parse(text, "", "", make(map[string]*Tree), builtins)
-	if err != nil {
-		t.Fatal(err)
-	}
-	// Check the line numbers. Each line is an action containing a template, followed by text.
-	// That's two nodes per line.
-	nodes := tree.Root.Nodes
-	for i := 0; i < len(nodes); i += 2 {
-		line := 1 + i/2
-		// Action first.
-		action := nodes[i].(*ActionNode)
-		if action.Line != line {
-			t.Fatalf("line %d: action is line %d", line, action.Line)
-		}
-		pipe := action.Pipe
-		if pipe.Line != line {
-			t.Fatalf("line %d: pipe is line %d", line, pipe.Line)
-		}
-	}
-}
-
-func BenchmarkParseLarge(b *testing.B) {
-	text := strings.Repeat("{{1234}}\n", 10000)
-	for i := 0; i < b.N; i++ {
-		_, err := New("bench").Parse(text, "", "", make(map[string]*Tree), builtins)
-		if err != nil {
-			b.Fatal(err)
-		}
-	}
-}
-
-var sinkv, sinkl string
-
-func BenchmarkVariableString(b *testing.B) {
-	v := &VariableNode{
-		Ident: []string{"$", "A", "BB", "CCC", "THIS_IS_THE_VARIABLE_BEING_PROCESSED"},
-	}
-	b.ResetTimer()
-	b.ReportAllocs()
-	for i := 0; i < b.N; i++ {
-		sinkv = v.String()
-	}
-	if sinkv == "" {
-		b.Fatal("Benchmark was not run")
-	}
-}
-
-func BenchmarkListString(b *testing.B) {
-	text := `
-{{(printf .Field1.Field2.Field3).Value}}
-{{$x := (printf .Field1.Field2.Field3).Value}}
-{{$y := (printf $x.Field1.Field2.Field3).Value}}
-{{$z := $y.Field1.Field2.Field3}}
-{{if contains $y $z}}
-	{{printf "%q" $y}}
-{{else}}
-	{{printf "%q" $x}}
-{{end}}
-{{with $z.Field1 | contains "boring"}}
-	{{printf "%q" . | printf "%s"}}
-{{else}}
-	{{printf "%d %d %d" 11 11 11}}
-	{{printf "%d %d %s" 22 22 $x.Field1.Field2.Field3 | printf "%s"}}
-	{{printf "%v" (contains $z.Field1.Field2 $y)}}
-{{end}}
-`
-	tree, err := New("bench").Parse(text, "", "", make(map[string]*Tree), builtins)
-	if err != nil {
-		b.Fatal(err)
-	}
-	b.ResetTimer()
-	b.ReportAllocs()
-	for i := 0; i < b.N; i++ {
-		sinkl = tree.Root.String()
-	}
-	if sinkl == "" {
-		b.Fatal("Benchmark was not run")
-	}
-}
diff --git a/internal/backport/text/template/template.go b/internal/backport/text/template/template.go
deleted file mode 100644
index 978b986..0000000
--- a/internal/backport/text/template/template.go
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
-	"reflect"
-	"sync"
-
-	"golang.org/x/website/internal/backport/text/template/parse"
-)
-
-// common holds the information shared by related templates.
-type common struct {
-	tmpl   map[string]*Template // Map from name to defined templates.
-	muTmpl sync.RWMutex         // protects tmpl
-	option option
-	// We use two maps, one for parsing and one for execution.
-	// This separation makes the API cleaner since it doesn't
-	// expose reflection to the client.
-	muFuncs    sync.RWMutex // protects parseFuncs and execFuncs
-	parseFuncs FuncMap
-	execFuncs  map[string]reflect.Value
-}
-
-// Template is the representation of a parsed template. The *parse.Tree
-// field is exported only for use by html/template and should be treated
-// as unexported by all other clients.
-type Template struct {
-	name string
-	*parse.Tree
-	*common
-	leftDelim  string
-	rightDelim string
-}
-
-// New allocates a new, undefined template with the given name.
-func New(name string) *Template {
-	t := &Template{
-		name: name,
-	}
-	t.init()
-	return t
-}
-
-// Name returns the name of the template.
-func (t *Template) Name() string {
-	return t.name
-}
-
-// New allocates a new, undefined template associated with the given one and with the same
-// delimiters. The association, which is transitive, allows one template to
-// invoke another with a {{template}} action.
-//
-// Because associated templates share underlying data, template construction
-// cannot be done safely in parallel. Once the templates are constructed, they
-// can be executed in parallel.
-func (t *Template) New(name string) *Template {
-	t.init()
-	nt := &Template{
-		name:       name,
-		common:     t.common,
-		leftDelim:  t.leftDelim,
-		rightDelim: t.rightDelim,
-	}
-	return nt
-}
-
-// init guarantees that t has a valid common structure.
-func (t *Template) init() {
-	if t.common == nil {
-		c := new(common)
-		c.tmpl = make(map[string]*Template)
-		c.parseFuncs = make(FuncMap)
-		c.execFuncs = make(map[string]reflect.Value)
-		t.common = c
-	}
-}
-
-// Clone returns a duplicate of the template, including all associated
-// templates. The actual representation is not copied, but the name space of
-// associated templates is, so further calls to Parse in the copy will add
-// templates to the copy but not to the original. Clone can be used to prepare
-// common templates and use them with variant definitions for other templates
-// by adding the variants after the clone is made.
-func (t *Template) Clone() (*Template, error) {
-	nt := t.copy(nil)
-	nt.init()
-	if t.common == nil {
-		return nt, nil
-	}
-	t.muTmpl.RLock()
-	defer t.muTmpl.RUnlock()
-	for k, v := range t.tmpl {
-		if k == t.name {
-			nt.tmpl[t.name] = nt
-			continue
-		}
-		// The associated templates share nt's common structure.
-		tmpl := v.copy(nt.common)
-		nt.tmpl[k] = tmpl
-	}
-	t.muFuncs.RLock()
-	defer t.muFuncs.RUnlock()
-	for k, v := range t.parseFuncs {
-		nt.parseFuncs[k] = v
-	}
-	for k, v := range t.execFuncs {
-		nt.execFuncs[k] = v
-	}
-	return nt, nil
-}
-
-// copy returns a shallow copy of t, with common set to the argument.
-func (t *Template) copy(c *common) *Template {
-	return &Template{
-		name:       t.name,
-		Tree:       t.Tree,
-		common:     c,
-		leftDelim:  t.leftDelim,
-		rightDelim: t.rightDelim,
-	}
-}
-
-// AddParseTree associates the argument parse tree with the template t, giving
-// it the specified name. If the template has not been defined, this tree becomes
-// its definition. If it has been defined and already has that name, the existing
-// definition is replaced; otherwise a new template is created, defined, and returned.
-func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) {
-	t.muTmpl.Lock()
-	defer t.muTmpl.Unlock()
-	t.init()
-	nt := t
-	if name != t.name {
-		nt = t.New(name)
-	}
-	// Even if nt == t, we need to install it in the common.tmpl map.
-	if t.associate(nt, tree) || nt.Tree == nil {
-		nt.Tree = tree
-	}
-	return nt, nil
-}
-
-// Templates returns a slice of defined templates associated with t.
-func (t *Template) Templates() []*Template {
-	if t.common == nil {
-		return nil
-	}
-	// Return a slice so we don't expose the map.
-	t.muTmpl.RLock()
-	defer t.muTmpl.RUnlock()
-	m := make([]*Template, 0, len(t.tmpl))
-	for _, v := range t.tmpl {
-		m = append(m, v)
-	}
-	return m
-}
-
-// Delims sets the action delimiters to the specified strings, to be used in
-// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template
-// definitions will inherit the settings. An empty delimiter stands for the
-// corresponding default: {{ or }}.
-// The return value is the template, so calls can be chained.
-func (t *Template) Delims(left, right string) *Template {
-	t.init()
-	t.leftDelim = left
-	t.rightDelim = right
-	return t
-}
-
-// Funcs adds the elements of the argument map to the template's function map.
-// It must be called before the template is parsed.
-// It panics if a value in the map is not a function with appropriate return
-// type or if the name cannot be used syntactically as a function in a template.
-// It is legal to overwrite elements of the map. The return value is the template,
-// so calls can be chained.
-func (t *Template) Funcs(funcMap FuncMap) *Template {
-	t.init()
-	t.muFuncs.Lock()
-	defer t.muFuncs.Unlock()
-	addValueFuncs(t.execFuncs, funcMap)
-	addFuncs(t.parseFuncs, funcMap)
-	return t
-}
-
-// Lookup returns the template with the given name that is associated with t.
-// It returns nil if there is no such template or the template has no definition.
-func (t *Template) Lookup(name string) *Template {
-	if t.common == nil {
-		return nil
-	}
-	t.muTmpl.RLock()
-	defer t.muTmpl.RUnlock()
-	return t.tmpl[name]
-}
-
-// Parse parses text as a template body for t.
-// Named template definitions ({{define ...}} or {{block ...}} statements) in text
-// define additional templates associated with t and are removed from the
-// definition of t itself.
-//
-// Templates can be redefined in successive calls to Parse.
-// A template definition with a body containing only white space and comments
-// is considered empty and will not replace an existing template's body.
-// This allows using Parse to add new named template definitions without
-// overwriting the main template body.
-func (t *Template) Parse(text string) (*Template, error) {
-	t.init()
-	t.muFuncs.RLock()
-	trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins())
-	t.muFuncs.RUnlock()
-	if err != nil {
-		return nil, err
-	}
-	// Add the newly parsed trees, including the one for t, into our common structure.
-	for name, tree := range trees {
-		if _, err := t.AddParseTree(name, tree); err != nil {
-			return nil, err
-		}
-	}
-	return t, nil
-}
-
-// associate installs the new template into the group of templates associated
-// with t. The two are already known to share the common structure.
-// The boolean return value reports whether to store this tree as t.Tree.
-func (t *Template) associate(new *Template, tree *parse.Tree) bool {
-	if new.common != t.common {
-		panic("internal error: associate not common")
-	}
-	if old := t.tmpl[new.name]; old != nil && parse.IsEmptyTree(tree.Root) && old.Tree != nil {
-		// If a template by that name exists,
-		// don't replace it with an empty template.
-		return false
-	}
-	t.tmpl[new.name] = new
-	return true
-}
diff --git a/internal/backport/text/template/testdata/file1.tmpl b/internal/backport/text/template/testdata/file1.tmpl
deleted file mode 100644
index febf9d9..0000000
--- a/internal/backport/text/template/testdata/file1.tmpl
+++ /dev/null
@@ -1,2 +0,0 @@
-{{define "x"}}TEXT{{end}}
-{{define "dotV"}}{{.V}}{{end}}
diff --git a/internal/backport/text/template/testdata/file2.tmpl b/internal/backport/text/template/testdata/file2.tmpl
deleted file mode 100644
index 39bf6fb..0000000
--- a/internal/backport/text/template/testdata/file2.tmpl
+++ /dev/null
@@ -1,2 +0,0 @@
-{{define "dot"}}{{.}}{{end}}
-{{define "nested"}}{{template "dot" .}}{{end}}
diff --git a/internal/backport/text/template/testdata/tmpl1.tmpl b/internal/backport/text/template/testdata/tmpl1.tmpl
deleted file mode 100644
index b72b3a3..0000000
--- a/internal/backport/text/template/testdata/tmpl1.tmpl
+++ /dev/null
@@ -1,3 +0,0 @@
-template1
-{{define "x"}}x{{end}}
-{{template "y"}}
diff --git a/internal/backport/text/template/testdata/tmpl2.tmpl b/internal/backport/text/template/testdata/tmpl2.tmpl
deleted file mode 100644
index 16beba6..0000000
--- a/internal/backport/text/template/testdata/tmpl2.tmpl
+++ /dev/null
@@ -1,3 +0,0 @@
-template2
-{{define "y"}}y{{end}}
-{{template "x"}}
diff --git a/internal/codewalk/codewalk.go b/internal/codewalk/codewalk.go
index a705b59..cdcc632 100644
--- a/internal/codewalk/codewalk.go
+++ b/internal/codewalk/codewalk.go
@@ -16,6 +16,7 @@
 	"encoding/xml"
 	"errors"
 	"fmt"
+	"html/template"
 	"io"
 	"io/fs"
 	"log"
@@ -28,7 +29,6 @@
 	"strings"
 	"unicode/utf8"
 
-	"golang.org/x/website/internal/backport/html/template"
 	"golang.org/x/website/internal/web"
 )
 
diff --git a/internal/history/history.go b/internal/history/history.go
index e25485c..f8bab54 100644
--- a/internal/history/history.go
+++ b/internal/history/history.go
@@ -8,11 +8,10 @@
 import (
 	"fmt"
 	"html"
+	"html/template"
 	"sort"
 	"strings"
 	"time"
-
-	"golang.org/x/website/internal/backport/html/template"
 )
 
 // A Release describes a single Go release.
diff --git a/internal/history/release.go b/internal/history/release.go
index beb32a5..fa8b0b9 100644
--- a/internal/history/release.go
+++ b/internal/history/release.go
@@ -5,7 +5,7 @@
 // Package history stores historical data for the Go project.
 package history
 
-import "golang.org/x/website/internal/backport/html/template"
+import "html/template"
 
 // Releases summarizes the changes between official stable releases of Go.
 // It contains entries for all releases of Go, but releases older than Go 1.9
diff --git a/internal/pkgdoc/dir.go b/internal/pkgdoc/dir.go
index 5e75e16..2d71230 100644
--- a/internal/pkgdoc/dir.go
+++ b/internal/pkgdoc/dir.go
@@ -8,10 +8,10 @@
 
 import (
 	"bytes"
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/doc"
-	"golang.org/x/website/internal/backport/go/parser"
-	"golang.org/x/website/internal/backport/go/token"
+	"go/ast"
+	"go/doc"
+	"go/parser"
+	"go/token"
 	"io/fs"
 	"log"
 	"path"
diff --git a/internal/pkgdoc/dir_test.go b/internal/pkgdoc/dir_test.go
index 09e0cfd..1b110ff 100644
--- a/internal/pkgdoc/dir_test.go
+++ b/internal/pkgdoc/dir_test.go
@@ -5,7 +5,7 @@
 package pkgdoc
 
 import (
-	"golang.org/x/website/internal/backport/go/token"
+	"go/token"
 	"os"
 	"runtime"
 	"sort"
diff --git a/internal/pkgdoc/doc.go b/internal/pkgdoc/doc.go
index 4481fd6..e915b70 100644
--- a/internal/pkgdoc/doc.go
+++ b/internal/pkgdoc/doc.go
@@ -11,6 +11,10 @@
 
 import (
 	"bytes"
+	"go/ast"
+	"go/build"
+	"go/doc"
+	"go/token"
 	"io"
 	"io/fs"
 	"io/ioutil"
@@ -26,10 +30,6 @@
 	"unicode/utf8"
 
 	"golang.org/x/website/internal/api"
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/build"
-	"golang.org/x/website/internal/backport/go/doc"
-	"golang.org/x/website/internal/backport/go/token"
 	"golang.org/x/website/internal/web"
 )
 
diff --git a/internal/pkgdoc/funcs.go b/internal/pkgdoc/funcs.go
index a18b824..4ba74f7 100644
--- a/internal/pkgdoc/funcs.go
+++ b/internal/pkgdoc/funcs.go
@@ -8,6 +8,12 @@
 	"bufio"
 	"bytes"
 	"fmt"
+	"go/ast"
+	"go/doc"
+	"go/format"
+	"go/printer"
+	"go/token"
+	"html/template"
 	"io"
 	"log"
 	"path"
@@ -17,12 +23,6 @@
 	"unicode/utf8"
 
 	"golang.org/x/website/internal/api"
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/doc"
-	"golang.org/x/website/internal/backport/go/format"
-	"golang.org/x/website/internal/backport/go/printer"
-	"golang.org/x/website/internal/backport/go/token"
-	"golang.org/x/website/internal/backport/html/template"
 	"golang.org/x/website/internal/texthtml"
 )
 
@@ -155,7 +155,10 @@
 
 // Comment formats the given documentation comment as HTML.
 func (p *Page) Comment(comment string) template.HTML {
-	return template.HTML(p.PDoc.HTML(comment))
+	// TODO: After Go 1.20 is out, this can be simplified to:
+	//return template.HTML(p.PDoc.HTML(comment))
+	// While deleting the go118.go and go119.go files.
+	return template.HTML(docPackageHTML(p.PDoc, comment))
 }
 
 // sanitize sanitizes the argument src by replacing newlines with
diff --git a/internal/pkgdoc/go118.go b/internal/pkgdoc/go118.go
new file mode 100644
index 0000000..94640ef
--- /dev/null
+++ b/internal/pkgdoc/go118.go
@@ -0,0 +1,19 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.19
+// +build !go1.19
+
+package pkgdoc
+
+import (
+	"bytes"
+	"go/doc"
+)
+
+func docPackageHTML(_ *doc.Package, text string) []byte {
+	var buf bytes.Buffer
+	doc.ToHTML(&buf, text, nil)
+	return buf.Bytes()
+}
diff --git a/internal/pkgdoc/go119.go b/internal/pkgdoc/go119.go
new file mode 100644
index 0000000..dd604f4
--- /dev/null
+++ b/internal/pkgdoc/go119.go
@@ -0,0 +1,12 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.19
+// +build go1.19
+
+package pkgdoc
+
+import "go/doc"
+
+var docPackageHTML = (*doc.Package).HTML
diff --git a/internal/pkgdoc/html_test.go b/internal/pkgdoc/html_test.go
index 1a98420..0dd09b0 100644
--- a/internal/pkgdoc/html_test.go
+++ b/internal/pkgdoc/html_test.go
@@ -7,12 +7,11 @@
 import (
 	"bytes"
 	"fmt"
-	"golang.org/x/website/internal/backport/go/parser"
-	"golang.org/x/website/internal/backport/go/token"
+	"go/parser"
+	"go/token"
+	"html/template"
 	"strings"
 	"testing"
-
-	"golang.org/x/website/internal/backport/html/template"
 )
 
 func TestSrcPosLink(t *testing.T) {
diff --git a/internal/redirect/redirect.go b/internal/redirect/redirect.go
index d701dea..f471547 100644
--- a/internal/redirect/redirect.go
+++ b/internal/redirect/redirect.go
@@ -11,6 +11,7 @@
 	"context"
 	_ "embed"
 	"fmt"
+	"html/template"
 	"net/http"
 	"regexp"
 	"strconv"
@@ -19,7 +20,6 @@
 	"time"
 
 	"golang.org/x/net/context/ctxhttp"
-	"golang.org/x/website/internal/backport/html/template"
 )
 
 // Register registers HTTP handlers that redirect old godoc paths to their new
diff --git a/internal/short/short.go b/internal/short/short.go
index 371986e..1b6588e 100644
--- a/internal/short/short.go
+++ b/internal/short/short.go
@@ -13,6 +13,7 @@
 	_ "embed"
 	"errors"
 	"fmt"
+	"html/template"
 	"log"
 	"net/http"
 	"net/url"
@@ -20,7 +21,6 @@
 	"strings"
 
 	"cloud.google.com/go/datastore"
-	"golang.org/x/website/internal/backport/html/template"
 	"golang.org/x/website/internal/memcache"
 )
 
diff --git a/internal/texthtml/ast.go b/internal/texthtml/ast.go
index 7a9bb32..c7e0560 100644
--- a/internal/texthtml/ast.go
+++ b/internal/texthtml/ast.go
@@ -7,9 +7,9 @@
 import (
 	"bytes"
 	"fmt"
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/doc"
-	"golang.org/x/website/internal/backport/go/token"
+	"go/ast"
+	"go/doc"
+	"go/token"
 	"strconv"
 	"unicode"
 	"unicode/utf8"
diff --git a/internal/texthtml/texthtml.go b/internal/texthtml/texthtml.go
index 90cc448..7f772bc 100644
--- a/internal/texthtml/texthtml.go
+++ b/internal/texthtml/texthtml.go
@@ -8,9 +8,9 @@
 import (
 	"bytes"
 	"fmt"
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/scanner"
-	"golang.org/x/website/internal/backport/go/token"
+	"go/ast"
+	"go/scanner"
+	"go/token"
 	"io"
 	"regexp"
 	"text/template"
diff --git a/internal/tmplfunc/func.go b/internal/tmplfunc/func.go
index 7e48e25..dbec717 100644
--- a/internal/tmplfunc/func.go
+++ b/internal/tmplfunc/func.go
@@ -10,11 +10,10 @@
 	"reflect"
 	"regexp"
 	"strings"
+	"text/template/parse"
 
-	"golang.org/x/website/internal/backport/text/template/parse"
-
-	htmltemplate "golang.org/x/website/internal/backport/html/template"
-	texttemplate "golang.org/x/website/internal/backport/text/template"
+	htmltemplate "html/template"
+	texttemplate "text/template"
 )
 
 var validNameRE = regexp.MustCompile(`\A[_\pL][_\pL\p{Nd}]*\z`)
diff --git a/internal/tmplfunc/tmpl.go b/internal/tmplfunc/tmpl.go
index 63ab5a2..3bca424 100644
--- a/internal/tmplfunc/tmpl.go
+++ b/internal/tmplfunc/tmpl.go
@@ -86,8 +86,8 @@
 	"path"
 	"path/filepath"
 
-	htmltemplate "golang.org/x/website/internal/backport/html/template"
-	texttemplate "golang.org/x/website/internal/backport/text/template"
+	htmltemplate "html/template"
+	texttemplate "text/template"
 )
 
 // A Template is a *template.Template, where template refers to either
diff --git a/internal/tmplfunc/tmplfunc_test.go b/internal/tmplfunc/tmplfunc_test.go
index baa6fd9..4b27819 100644
--- a/internal/tmplfunc/tmplfunc_test.go
+++ b/internal/tmplfunc/tmplfunc_test.go
@@ -10,8 +10,8 @@
 	"strings"
 	"testing"
 
-	htmltemplate "golang.org/x/website/internal/backport/html/template"
-	texttemplate "golang.org/x/website/internal/backport/text/template"
+	htmltemplate "html/template"
+	texttemplate "text/template"
 )
 
 var tests = []struct {
diff --git a/internal/tour/fmt.go b/internal/tour/fmt.go
index e126fb4..3010895 100644
--- a/internal/tour/fmt.go
+++ b/internal/tour/fmt.go
@@ -7,10 +7,10 @@
 import (
 	"bytes"
 	"encoding/json"
-	"golang.org/x/website/internal/backport/go/ast"
-	"golang.org/x/website/internal/backport/go/parser"
-	"golang.org/x/website/internal/backport/go/printer"
-	"golang.org/x/website/internal/backport/go/token"
+	"go/ast"
+	"go/parser"
+	"go/printer"
+	"go/token"
 	"net/http"
 
 	"golang.org/x/tools/imports"
diff --git a/internal/web/code.go b/internal/web/code.go
index 6dc435f..03c0de1 100644
--- a/internal/web/code.go
+++ b/internal/web/code.go
@@ -8,11 +8,11 @@
 	"bytes"
 	"fmt"
 	"html"
+	"html/template"
 	"log"
 	"regexp"
 	"strings"
 
-	"golang.org/x/website/internal/backport/html/template"
 	"golang.org/x/website/internal/texthtml"
 )
 
diff --git a/internal/web/render.go b/internal/web/render.go
index 9ef934f..16c3822 100644
--- a/internal/web/render.go
+++ b/internal/web/render.go
@@ -7,6 +7,7 @@
 import (
 	"bytes"
 	"fmt"
+	"html/template"
 	"io/fs"
 	"net/http"
 	"net/url"
@@ -22,7 +23,6 @@
 	"github.com/yuin/goldmark/renderer/html"
 	"github.com/yuin/goldmark/text"
 	"github.com/yuin/goldmark/util"
-	"golang.org/x/website/internal/backport/html/template"
 	"golang.org/x/website/internal/tmplfunc"
 )
 
diff --git a/internal/web/site.go b/internal/web/site.go
index f1b4abc..1b1a72f 100644
--- a/internal/web/site.go
+++ b/internal/web/site.go
@@ -302,6 +302,7 @@
 	"errors"
 	"fmt"
 	"html"
+	"html/template"
 	"io"
 	"io/fs"
 	"log"
@@ -313,7 +314,6 @@
 	"sync"
 
 	"github.com/evanw/esbuild/pkg/api"
-	"golang.org/x/website/internal/backport/html/template"
 	"golang.org/x/website/internal/spec"
 	"golang.org/x/website/internal/texthtml"
 )
diff --git a/internal/web/tmpl.go b/internal/web/tmpl.go
index 19f6a7a..bfd1940 100644
--- a/internal/web/tmpl.go
+++ b/internal/web/tmpl.go
@@ -6,6 +6,7 @@
 
 import (
 	"fmt"
+	"html/template"
 	"io/fs"
 	"path"
 	"reflect"
@@ -13,7 +14,6 @@
 	"strings"
 
 	"golang.org/x/tools/present"
-	"golang.org/x/website/internal/backport/html/template"
 	"gopkg.in/yaml.v3"
 )