internal/backport: add backports of current Go dev template, fs packages

This will let us use these APIs on App Engine before Go 1.16 is available.
The only thing we can't backport from Go 1.16 is embed, so that has to
be build-tagged off still, but we don't really need embed on App Engine.

This will let us move golang.org off App Engine Flex onto regular App Engine,
which will greatly simplify deployment and make it possible to do automatic
deployments.

When App Engine adds Go 1.16 support, most of this can go away.
(The template packages will need to stay, as go.dev will need some
Go 1.17 bug fixes from them.)

Change-Id: I16ee64862a43f591a31fae04caf4caa0fdb5af62
Reviewed-on: https://go-review.googlesource.com/c/website/+/323890
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org>
diff --git a/internal/backport/README.md b/internal/backport/README.md
new file mode 100644
index 0000000..92c80ad
--- /dev/null
+++ b/internal/backport/README.md
@@ -0,0 +1,10 @@
+This directory contains backports of upcoming packages from the
+standard library, beyond the Go version supplied by the latest Google
+App Engine.
+
+The procedure for adding a backport is fairly manual: copy the files
+to a new directory and then global search and replace to modify import
+paths to point to the backport.
+
+As new versions of Go land on Google App Engine, this directory should
+be pruned as much as possible.
diff --git a/internal/backport/archive/zip/example_test.go b/internal/backport/archive/zip/example_test.go
new file mode 100644
index 0000000..1eed304
--- /dev/null
+++ b/internal/backport/archive/zip/example_test.go
@@ -0,0 +1,93 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zip_test
+
+import (
+	"archive/zip"
+	"bytes"
+	"compress/flate"
+	"fmt"
+	"io"
+	"log"
+	"os"
+)
+
+func ExampleWriter() {
+	// Create a buffer to write our archive to.
+	buf := new(bytes.Buffer)
+
+	// Create a new zip archive.
+	w := zip.NewWriter(buf)
+
+	// Add some files to the archive.
+	var files = []struct {
+		Name, Body string
+	}{
+		{"readme.txt", "This archive contains some text files."},
+		{"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"},
+		{"todo.txt", "Get animal handling licence.\nWrite more examples."},
+	}
+	for _, file := range files {
+		f, err := w.Create(file.Name)
+		if err != nil {
+			log.Fatal(err)
+		}
+		_, err = f.Write([]byte(file.Body))
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+
+	// Make sure to check the error on Close.
+	err := w.Close()
+	if err != nil {
+		log.Fatal(err)
+	}
+}
+
+func ExampleReader() {
+	// Open a zip archive for reading.
+	r, err := zip.OpenReader("testdata/readme.zip")
+	if err != nil {
+		log.Fatal(err)
+	}
+	defer r.Close()
+
+	// Iterate through the files in the archive,
+	// printing some of their contents.
+	for _, f := range r.File {
+		fmt.Printf("Contents of %s:\n", f.Name)
+		rc, err := f.Open()
+		if err != nil {
+			log.Fatal(err)
+		}
+		_, err = io.CopyN(os.Stdout, rc, 68)
+		if err != nil {
+			log.Fatal(err)
+		}
+		rc.Close()
+		fmt.Println()
+	}
+	// Output:
+	// Contents of README:
+	// This is the source code repository for the Go programming language.
+}
+
+func ExampleWriter_RegisterCompressor() {
+	// Override the default Deflate compressor with a higher compression level.
+
+	// Create a buffer to write our archive to.
+	buf := new(bytes.Buffer)
+
+	// Create a new zip archive.
+	w := zip.NewWriter(buf)
+
+	// Register a custom Deflate compressor.
+	w.RegisterCompressor(zip.Deflate, func(out io.Writer) (io.WriteCloser, error) {
+		return flate.NewWriter(out, flate.BestCompression)
+	})
+
+	// Proceed to add files to w.
+}
diff --git a/internal/backport/archive/zip/reader.go b/internal/backport/archive/zip/reader.go
new file mode 100644
index 0000000..e99ed1c
--- /dev/null
+++ b/internal/backport/archive/zip/reader.go
@@ -0,0 +1,872 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zip
+
+import (
+	"bufio"
+	"encoding/binary"
+	"errors"
+	"hash"
+	"hash/crc32"
+	"io"
+	"os"
+	"path"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+
+	"golang.org/x/website/internal/backport/io/fs"
+)
+
+var (
+	ErrFormat    = errors.New("zip: not a valid zip file")
+	ErrAlgorithm = errors.New("zip: unsupported compression algorithm")
+	ErrChecksum  = errors.New("zip: checksum error")
+)
+
+// A Reader serves content from a ZIP archive.
+type Reader struct {
+	r             io.ReaderAt
+	File          []*File
+	Comment       string
+	decompressors map[uint16]Decompressor
+
+	// fileList is a list of files sorted by ename,
+	// for use by the Open method.
+	fileListOnce sync.Once
+	fileList     []fileListEntry
+}
+
+// A ReadCloser is a Reader that must be closed when no longer needed.
+type ReadCloser struct {
+	f *os.File
+	Reader
+}
+
+// A File is a single file in a ZIP archive.
+// The file information is in the embedded FileHeader.
+// The file content can be accessed by calling Open.
+type File struct {
+	FileHeader
+	zip          *Reader
+	zipr         io.ReaderAt
+	headerOffset int64
+	zip64        bool  // zip64 extended information extra field presence
+	descErr      error // error reading the data descriptor during init
+}
+
+// OpenReader will open the Zip file specified by name and return a ReadCloser.
+func OpenReader(name string) (*ReadCloser, error) {
+	f, err := os.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	fi, err := f.Stat()
+	if err != nil {
+		f.Close()
+		return nil, err
+	}
+	r := new(ReadCloser)
+	if err := r.init(f, fi.Size()); err != nil {
+		f.Close()
+		return nil, err
+	}
+	r.f = f
+	return r, nil
+}
+
+// NewReader returns a new Reader reading from r, which is assumed to
+// have the given size in bytes.
+func NewReader(r io.ReaderAt, size int64) (*Reader, error) {
+	if size < 0 {
+		return nil, errors.New("zip: size cannot be negative")
+	}
+	zr := new(Reader)
+	if err := zr.init(r, size); err != nil {
+		return nil, err
+	}
+	return zr, nil
+}
+
+func (z *Reader) init(r io.ReaderAt, size int64) error {
+	end, err := readDirectoryEnd(r, size)
+	if err != nil {
+		return err
+	}
+	z.r = r
+	z.File = make([]*File, 0, end.directoryRecords)
+	z.Comment = end.comment
+	rs := io.NewSectionReader(r, 0, size)
+	if _, err = rs.Seek(int64(end.directoryOffset), io.SeekStart); err != nil {
+		return err
+	}
+	buf := bufio.NewReader(rs)
+
+	// The count of files inside a zip is truncated to fit in a uint16.
+	// Gloss over this by reading headers until we encounter
+	// a bad one, and then only report an ErrFormat or UnexpectedEOF if
+	// the file count modulo 65536 is incorrect.
+	for {
+		f := &File{zip: z, zipr: r}
+		err = readDirectoryHeader(f, buf)
+		if err == ErrFormat || err == io.ErrUnexpectedEOF {
+			break
+		}
+		if err != nil {
+			return err
+		}
+		f.readDataDescriptor()
+		z.File = append(z.File, f)
+	}
+	if uint16(len(z.File)) != uint16(end.directoryRecords) { // only compare 16 bits here
+		// Return the readDirectoryHeader error if we read
+		// the wrong number of directory entries.
+		return err
+	}
+	return nil
+}
+
+// RegisterDecompressor registers or overrides a custom decompressor for a
+// specific method ID. If a decompressor for a given method is not found,
+// Reader will default to looking up the decompressor at the package level.
+func (z *Reader) RegisterDecompressor(method uint16, dcomp Decompressor) {
+	if z.decompressors == nil {
+		z.decompressors = make(map[uint16]Decompressor)
+	}
+	z.decompressors[method] = dcomp
+}
+
+func (z *Reader) decompressor(method uint16) Decompressor {
+	dcomp := z.decompressors[method]
+	if dcomp == nil {
+		dcomp = decompressor(method)
+	}
+	return dcomp
+}
+
+// Close closes the Zip file, rendering it unusable for I/O.
+func (rc *ReadCloser) Close() error {
+	return rc.f.Close()
+}
+
+// DataOffset returns the offset of the file's possibly-compressed
+// data, relative to the beginning of the zip file.
+//
+// Most callers should instead use Open, which transparently
+// decompresses data and verifies checksums.
+func (f *File) DataOffset() (offset int64, err error) {
+	bodyOffset, err := f.findBodyOffset()
+	if err != nil {
+		return
+	}
+	return f.headerOffset + bodyOffset, nil
+}
+
+// Open returns a ReadCloser that provides access to the File's contents.
+// Multiple files may be read concurrently.
+func (f *File) Open() (io.ReadCloser, error) {
+	bodyOffset, err := f.findBodyOffset()
+	if err != nil {
+		return nil, err
+	}
+	size := int64(f.CompressedSize64)
+	r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, size)
+	dcomp := f.zip.decompressor(f.Method)
+	if dcomp == nil {
+		return nil, ErrAlgorithm
+	}
+	var rc io.ReadCloser = dcomp(r)
+	rc = &checksumReader{
+		rc:   rc,
+		hash: crc32.NewIEEE(),
+		f:    f,
+	}
+	return rc, nil
+}
+
+// OpenRaw returns a Reader that provides access to the File's contents without
+// decompression.
+func (f *File) OpenRaw() (io.Reader, error) {
+	bodyOffset, err := f.findBodyOffset()
+	if err != nil {
+		return nil, err
+	}
+	r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, int64(f.CompressedSize64))
+	return r, nil
+}
+
+func (f *File) readDataDescriptor() {
+	if !f.hasDataDescriptor() {
+		return
+	}
+
+	bodyOffset, err := f.findBodyOffset()
+	if err != nil {
+		f.descErr = err
+		return
+	}
+
+	// In section 4.3.9.2 of the spec: "However ZIP64 format MAY be used
+	// regardless of the size of a file.  When extracting, if the zip64
+	// extended information extra field is present for the file the
+	// compressed and uncompressed sizes will be 8 byte values."
+	//
+	// Historically, this package has used the compressed and uncompressed
+	// sizes from the central directory to determine if the package is
+	// zip64.
+	//
+	// For this case we allow either the extra field or sizes to determine
+	// the data descriptor length.
+	zip64 := f.zip64 || f.isZip64()
+	n := int64(dataDescriptorLen)
+	if zip64 {
+		n = dataDescriptor64Len
+	}
+	size := int64(f.CompressedSize64)
+	r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset+size, n)
+	dd, err := readDataDescriptor(r, zip64)
+	if err != nil {
+		f.descErr = err
+		return
+	}
+	f.CRC32 = dd.crc32
+}
+
+type checksumReader struct {
+	rc    io.ReadCloser
+	hash  hash.Hash32
+	nread uint64 // number of bytes read so far
+	f     *File
+	err   error // sticky error
+}
+
+func (r *checksumReader) Stat() (fs.FileInfo, error) {
+	return headerFileInfo{&r.f.FileHeader}, nil
+}
+
+func (r *checksumReader) Read(b []byte) (n int, err error) {
+	if r.err != nil {
+		return 0, r.err
+	}
+	n, err = r.rc.Read(b)
+	r.hash.Write(b[:n])
+	r.nread += uint64(n)
+	if err == nil {
+		return
+	}
+	if err == io.EOF {
+		if r.nread != r.f.UncompressedSize64 {
+			return 0, io.ErrUnexpectedEOF
+		}
+		if r.f.hasDataDescriptor() {
+			if r.f.descErr != nil {
+				if r.f.descErr == io.EOF {
+					err = io.ErrUnexpectedEOF
+				} else {
+					err = r.f.descErr
+				}
+			} else if r.hash.Sum32() != r.f.CRC32 {
+				err = ErrChecksum
+			}
+		} else {
+			// If there's not a data descriptor, we still compare
+			// the CRC32 of what we've read against the file header
+			// or TOC's CRC32, if it seems like it was set.
+			if r.f.CRC32 != 0 && r.hash.Sum32() != r.f.CRC32 {
+				err = ErrChecksum
+			}
+		}
+	}
+	r.err = err
+	return
+}
+
+func (r *checksumReader) Close() error { return r.rc.Close() }
+
+// findBodyOffset does the minimum work to verify the file has a header
+// and returns the file body offset.
+func (f *File) findBodyOffset() (int64, error) {
+	var buf [fileHeaderLen]byte
+	if _, err := f.zipr.ReadAt(buf[:], f.headerOffset); err != nil {
+		return 0, err
+	}
+	b := readBuf(buf[:])
+	if sig := b.uint32(); sig != fileHeaderSignature {
+		return 0, ErrFormat
+	}
+	b = b[22:] // skip over most of the header
+	filenameLen := int(b.uint16())
+	extraLen := int(b.uint16())
+	return int64(fileHeaderLen + filenameLen + extraLen), nil
+}
+
+// readDirectoryHeader attempts to read a directory header from r.
+// It returns io.ErrUnexpectedEOF if it cannot read a complete header,
+// and ErrFormat if it doesn't find a valid header signature.
+func readDirectoryHeader(f *File, r io.Reader) error {
+	var buf [directoryHeaderLen]byte
+	if _, err := io.ReadFull(r, buf[:]); err != nil {
+		return err
+	}
+	b := readBuf(buf[:])
+	if sig := b.uint32(); sig != directoryHeaderSignature {
+		return ErrFormat
+	}
+	f.CreatorVersion = b.uint16()
+	f.ReaderVersion = b.uint16()
+	f.Flags = b.uint16()
+	f.Method = b.uint16()
+	f.ModifiedTime = b.uint16()
+	f.ModifiedDate = b.uint16()
+	f.CRC32 = b.uint32()
+	f.CompressedSize = b.uint32()
+	f.UncompressedSize = b.uint32()
+	f.CompressedSize64 = uint64(f.CompressedSize)
+	f.UncompressedSize64 = uint64(f.UncompressedSize)
+	filenameLen := int(b.uint16())
+	extraLen := int(b.uint16())
+	commentLen := int(b.uint16())
+	b = b[4:] // skipped start disk number and internal attributes (2x uint16)
+	f.ExternalAttrs = b.uint32()
+	f.headerOffset = int64(b.uint32())
+	d := make([]byte, filenameLen+extraLen+commentLen)
+	if _, err := io.ReadFull(r, d); err != nil {
+		return err
+	}
+	f.Name = string(d[:filenameLen])
+	f.Extra = d[filenameLen : filenameLen+extraLen]
+	f.Comment = string(d[filenameLen+extraLen:])
+
+	// Determine the character encoding.
+	utf8Valid1, utf8Require1 := detectUTF8(f.Name)
+	utf8Valid2, utf8Require2 := detectUTF8(f.Comment)
+	switch {
+	case !utf8Valid1 || !utf8Valid2:
+		// Name and Comment definitely not UTF-8.
+		f.NonUTF8 = true
+	case !utf8Require1 && !utf8Require2:
+		// Name and Comment use only single-byte runes that overlap with UTF-8.
+		f.NonUTF8 = false
+	default:
+		// Might be UTF-8, might be some other encoding; preserve existing flag.
+		// Some ZIP writers use UTF-8 encoding without setting the UTF-8 flag.
+		// Since it is impossible to always distinguish valid UTF-8 from some
+		// other encoding (e.g., GBK or Shift-JIS), we trust the flag.
+		f.NonUTF8 = f.Flags&0x800 == 0
+	}
+
+	needUSize := f.UncompressedSize == ^uint32(0)
+	needCSize := f.CompressedSize == ^uint32(0)
+	needHeaderOffset := f.headerOffset == int64(^uint32(0))
+
+	// Best effort to find what we need.
+	// Other zip authors might not even follow the basic format,
+	// and we'll just ignore the Extra content in that case.
+	var modified time.Time
+parseExtras:
+	for extra := readBuf(f.Extra); len(extra) >= 4; { // need at least tag and size
+		fieldTag := extra.uint16()
+		fieldSize := int(extra.uint16())
+		if len(extra) < fieldSize {
+			break
+		}
+		fieldBuf := extra.sub(fieldSize)
+
+		switch fieldTag {
+		case zip64ExtraID:
+			f.zip64 = true
+
+			// update directory values from the zip64 extra block.
+			// They should only be consulted if the sizes read earlier
+			// are maxed out.
+			// See golang.org/issue/13367.
+			if needUSize {
+				needUSize = false
+				if len(fieldBuf) < 8 {
+					return ErrFormat
+				}
+				f.UncompressedSize64 = fieldBuf.uint64()
+			}
+			if needCSize {
+				needCSize = false
+				if len(fieldBuf) < 8 {
+					return ErrFormat
+				}
+				f.CompressedSize64 = fieldBuf.uint64()
+			}
+			if needHeaderOffset {
+				needHeaderOffset = false
+				if len(fieldBuf) < 8 {
+					return ErrFormat
+				}
+				f.headerOffset = int64(fieldBuf.uint64())
+			}
+		case ntfsExtraID:
+			if len(fieldBuf) < 4 {
+				continue parseExtras
+			}
+			fieldBuf.uint32()        // reserved (ignored)
+			for len(fieldBuf) >= 4 { // need at least tag and size
+				attrTag := fieldBuf.uint16()
+				attrSize := int(fieldBuf.uint16())
+				if len(fieldBuf) < attrSize {
+					continue parseExtras
+				}
+				attrBuf := fieldBuf.sub(attrSize)
+				if attrTag != 1 || attrSize != 24 {
+					continue // Ignore irrelevant attributes
+				}
+
+				const ticksPerSecond = 1e7    // Windows timestamp resolution
+				ts := int64(attrBuf.uint64()) // ModTime since Windows epoch
+				secs := int64(ts / ticksPerSecond)
+				nsecs := (1e9 / ticksPerSecond) * int64(ts%ticksPerSecond)
+				epoch := time.Date(1601, time.January, 1, 0, 0, 0, 0, time.UTC)
+				modified = time.Unix(epoch.Unix()+secs, nsecs)
+			}
+		case unixExtraID, infoZipUnixExtraID:
+			if len(fieldBuf) < 8 {
+				continue parseExtras
+			}
+			fieldBuf.uint32()              // AcTime (ignored)
+			ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch
+			modified = time.Unix(ts, 0)
+		case extTimeExtraID:
+			if len(fieldBuf) < 5 || fieldBuf.uint8()&1 == 0 {
+				continue parseExtras
+			}
+			ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch
+			modified = time.Unix(ts, 0)
+		}
+	}
+
+	msdosModified := msDosTimeToTime(f.ModifiedDate, f.ModifiedTime)
+	f.Modified = msdosModified
+	if !modified.IsZero() {
+		f.Modified = modified.UTC()
+
+		// If legacy MS-DOS timestamps are set, we can use the delta between
+		// the legacy and extended versions to estimate timezone offset.
+		//
+		// A non-UTC timezone is always used (even if offset is zero).
+		// Thus, FileHeader.Modified.Location() == time.UTC is useful for
+		// determining whether extended timestamps are present.
+		// This is necessary for users that need to do additional time
+		// calculations when dealing with legacy ZIP formats.
+		if f.ModifiedTime != 0 || f.ModifiedDate != 0 {
+			f.Modified = modified.In(timeZone(msdosModified.Sub(modified)))
+		}
+	}
+
+	// Assume that uncompressed size 2³²-1 could plausibly happen in
+	// an old zip32 file that was sharding inputs into the largest chunks
+	// possible (or is just malicious; search the web for 42.zip).
+	// If needUSize is true still, it means we didn't see a zip64 extension.
+	// As long as the compressed size is not also 2³²-1 (implausible)
+	// and the header is not also 2³²-1 (equally implausible),
+	// accept the uncompressed size 2³²-1 as valid.
+	// If nothing else, this keeps archive/zip working with 42.zip.
+	_ = needUSize
+
+	if needCSize || needHeaderOffset {
+		return ErrFormat
+	}
+
+	return nil
+}
+
+func readDataDescriptor(r io.Reader, zip64 bool) (*dataDescriptor, error) {
+	// Create enough space for the largest possible size
+	var buf [dataDescriptor64Len]byte
+
+	// The spec says: "Although not originally assigned a
+	// signature, the value 0x08074b50 has commonly been adopted
+	// as a signature value for the data descriptor record.
+	// Implementers should be aware that ZIP files may be
+	// encountered with or without this signature marking data
+	// descriptors and should account for either case when reading
+	// ZIP files to ensure compatibility."
+	//
+	// First read just those 4 bytes to see if the signature exists.
+	if _, err := io.ReadFull(r, buf[:4]); err != nil {
+		return nil, err
+	}
+	off := 0
+	maybeSig := readBuf(buf[:4])
+	if maybeSig.uint32() != dataDescriptorSignature {
+		// No data descriptor signature. Keep these four
+		// bytes.
+		off += 4
+	}
+
+	end := dataDescriptorLen - 4
+	if zip64 {
+		end = dataDescriptor64Len - 4
+	}
+	if _, err := io.ReadFull(r, buf[off:end]); err != nil {
+		return nil, err
+	}
+	b := readBuf(buf[:end])
+
+	out := &dataDescriptor{
+		crc32: b.uint32(),
+	}
+
+	if zip64 {
+		out.compressedSize = b.uint64()
+		out.uncompressedSize = b.uint64()
+	} else {
+		out.compressedSize = uint64(b.uint32())
+		out.uncompressedSize = uint64(b.uint32())
+	}
+	return out, nil
+}
+
+func readDirectoryEnd(r io.ReaderAt, size int64) (dir *directoryEnd, err error) {
+	// look for directoryEndSignature in the last 1k, then in the last 65k
+	var buf []byte
+	var directoryEndOffset int64
+	for i, bLen := range []int64{1024, 65 * 1024} {
+		if bLen > size {
+			bLen = size
+		}
+		buf = make([]byte, int(bLen))
+		if _, err := r.ReadAt(buf, size-bLen); err != nil && err != io.EOF {
+			return nil, err
+		}
+		if p := findSignatureInBlock(buf); p >= 0 {
+			buf = buf[p:]
+			directoryEndOffset = size - bLen + int64(p)
+			break
+		}
+		if i == 1 || bLen == size {
+			return nil, ErrFormat
+		}
+	}
+
+	// read header into struct
+	b := readBuf(buf[4:]) // skip signature
+	d := &directoryEnd{
+		diskNbr:            uint32(b.uint16()),
+		dirDiskNbr:         uint32(b.uint16()),
+		dirRecordsThisDisk: uint64(b.uint16()),
+		directoryRecords:   uint64(b.uint16()),
+		directorySize:      uint64(b.uint32()),
+		directoryOffset:    uint64(b.uint32()),
+		commentLen:         b.uint16(),
+	}
+	l := int(d.commentLen)
+	if l > len(b) {
+		return nil, errors.New("zip: invalid comment length")
+	}
+	d.comment = string(b[:l])
+
+	// These values mean that the file can be a zip64 file
+	if d.directoryRecords == 0xffff || d.directorySize == 0xffff || d.directoryOffset == 0xffffffff {
+		p, err := findDirectory64End(r, directoryEndOffset)
+		if err == nil && p >= 0 {
+			err = readDirectory64End(r, p, d)
+		}
+		if err != nil {
+			return nil, err
+		}
+	}
+	// Make sure directoryOffset points to somewhere in our file.
+	if o := int64(d.directoryOffset); o < 0 || o >= size {
+		return nil, ErrFormat
+	}
+	return d, nil
+}
+
+// findDirectory64End tries to read the zip64 locator just before the
+// directory end and returns the offset of the zip64 directory end if
+// found.
+func findDirectory64End(r io.ReaderAt, directoryEndOffset int64) (int64, error) {
+	locOffset := directoryEndOffset - directory64LocLen
+	if locOffset < 0 {
+		return -1, nil // no need to look for a header outside the file
+	}
+	buf := make([]byte, directory64LocLen)
+	if _, err := r.ReadAt(buf, locOffset); err != nil {
+		return -1, err
+	}
+	b := readBuf(buf)
+	if sig := b.uint32(); sig != directory64LocSignature {
+		return -1, nil
+	}
+	if b.uint32() != 0 { // number of the disk with the start of the zip64 end of central directory
+		return -1, nil // the file is not a valid zip64-file
+	}
+	p := b.uint64()      // relative offset of the zip64 end of central directory record
+	if b.uint32() != 1 { // total number of disks
+		return -1, nil // the file is not a valid zip64-file
+	}
+	return int64(p), nil
+}
+
+// readDirectory64End reads the zip64 directory end and updates the
+// directory end with the zip64 directory end values.
+func readDirectory64End(r io.ReaderAt, offset int64, d *directoryEnd) (err error) {
+	buf := make([]byte, directory64EndLen)
+	if _, err := r.ReadAt(buf, offset); err != nil {
+		return err
+	}
+
+	b := readBuf(buf)
+	if sig := b.uint32(); sig != directory64EndSignature {
+		return ErrFormat
+	}
+
+	b = b[12:]                        // skip dir size, version and version needed (uint64 + 2x uint16)
+	d.diskNbr = b.uint32()            // number of this disk
+	d.dirDiskNbr = b.uint32()         // number of the disk with the start of the central directory
+	d.dirRecordsThisDisk = b.uint64() // total number of entries in the central directory on this disk
+	d.directoryRecords = b.uint64()   // total number of entries in the central directory
+	d.directorySize = b.uint64()      // size of the central directory
+	d.directoryOffset = b.uint64()    // offset of start of central directory with respect to the starting disk number
+
+	return nil
+}
+
+func findSignatureInBlock(b []byte) int {
+	for i := len(b) - directoryEndLen; i >= 0; i-- {
+		// defined from directoryEndSignature in struct.go
+		if b[i] == 'P' && b[i+1] == 'K' && b[i+2] == 0x05 && b[i+3] == 0x06 {
+			// n is length of comment
+			n := int(b[i+directoryEndLen-2]) | int(b[i+directoryEndLen-1])<<8
+			if n+directoryEndLen+i <= len(b) {
+				return i
+			}
+		}
+	}
+	return -1
+}
+
+type readBuf []byte
+
+func (b *readBuf) uint8() uint8 {
+	v := (*b)[0]
+	*b = (*b)[1:]
+	return v
+}
+
+func (b *readBuf) uint16() uint16 {
+	v := binary.LittleEndian.Uint16(*b)
+	*b = (*b)[2:]
+	return v
+}
+
+func (b *readBuf) uint32() uint32 {
+	v := binary.LittleEndian.Uint32(*b)
+	*b = (*b)[4:]
+	return v
+}
+
+func (b *readBuf) uint64() uint64 {
+	v := binary.LittleEndian.Uint64(*b)
+	*b = (*b)[8:]
+	return v
+}
+
+func (b *readBuf) sub(n int) readBuf {
+	b2 := (*b)[:n]
+	*b = (*b)[n:]
+	return b2
+}
+
+// A fileListEntry is a File and its ename.
+// If file == nil, the fileListEntry describes a directory without metadata.
+type fileListEntry struct {
+	name  string
+	file  *File
+	isDir bool
+}
+
+type fileInfoDirEntry interface {
+	fs.FileInfo
+	fs.DirEntry
+}
+
+func (e *fileListEntry) stat() fileInfoDirEntry {
+	if !e.isDir {
+		return headerFileInfo{&e.file.FileHeader}
+	}
+	return e
+}
+
+// Only used for directories.
+func (f *fileListEntry) Name() string      { _, elem, _ := split(f.name); return elem }
+func (f *fileListEntry) Size() int64       { return 0 }
+func (f *fileListEntry) Mode() fs.FileMode { return fs.ModeDir | 0555 }
+func (f *fileListEntry) Type() fs.FileMode { return fs.ModeDir }
+func (f *fileListEntry) IsDir() bool       { return true }
+func (f *fileListEntry) Sys() interface{}  { return nil }
+
+func (f *fileListEntry) ModTime() time.Time {
+	if f.file == nil {
+		return time.Time{}
+	}
+	return f.file.FileHeader.Modified.UTC()
+}
+
+func (f *fileListEntry) Info() (fs.FileInfo, error) { return f, nil }
+
+// toValidName coerces name to be a valid name for fs.FS.Open.
+func toValidName(name string) string {
+	name = strings.ReplaceAll(name, `\`, `/`)
+	p := path.Clean(name)
+	if strings.HasPrefix(p, "/") {
+		p = p[len("/"):]
+	}
+	for strings.HasPrefix(p, "../") {
+		p = p[len("../"):]
+	}
+	return p
+}
+
+func (r *Reader) initFileList() {
+	r.fileListOnce.Do(func() {
+		dirs := make(map[string]bool)
+		knownDirs := make(map[string]bool)
+		for _, file := range r.File {
+			isDir := len(file.Name) > 0 && file.Name[len(file.Name)-1] == '/'
+			name := toValidName(file.Name)
+			for dir := path.Dir(name); dir != "."; dir = path.Dir(dir) {
+				dirs[dir] = true
+			}
+			entry := fileListEntry{
+				name:  name,
+				file:  file,
+				isDir: isDir,
+			}
+			r.fileList = append(r.fileList, entry)
+			if isDir {
+				knownDirs[name] = true
+			}
+		}
+		for dir := range dirs {
+			if !knownDirs[dir] {
+				entry := fileListEntry{
+					name:  dir,
+					file:  nil,
+					isDir: true,
+				}
+				r.fileList = append(r.fileList, entry)
+			}
+		}
+
+		sort.Slice(r.fileList, func(i, j int) bool { return fileEntryLess(r.fileList[i].name, r.fileList[j].name) })
+	})
+}
+
+func fileEntryLess(x, y string) bool {
+	xdir, xelem, _ := split(x)
+	ydir, yelem, _ := split(y)
+	return xdir < ydir || xdir == ydir && xelem < yelem
+}
+
+// Open opens the named file in the ZIP archive,
+// using the semantics of fs.FS.Open:
+// paths are always slash separated, with no
+// leading / or ../ elements.
+func (r *Reader) Open(name string) (fs.File, error) {
+	r.initFileList()
+
+	e := r.openLookup(name)
+	if e == nil || !fs.ValidPath(name) {
+		return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist}
+	}
+	if e.isDir {
+		return &openDir{e, r.openReadDir(name), 0}, nil
+	}
+	rc, err := e.file.Open()
+	if err != nil {
+		return nil, err
+	}
+	return rc.(fs.File), nil
+}
+
+func split(name string) (dir, elem string, isDir bool) {
+	if name[len(name)-1] == '/' {
+		isDir = true
+		name = name[:len(name)-1]
+	}
+	i := len(name) - 1
+	for i >= 0 && name[i] != '/' {
+		i--
+	}
+	if i < 0 {
+		return ".", name, isDir
+	}
+	return name[:i], name[i+1:], isDir
+}
+
+var dotFile = &fileListEntry{name: "./", isDir: true}
+
+func (r *Reader) openLookup(name string) *fileListEntry {
+	if name == "." {
+		return dotFile
+	}
+
+	dir, elem, _ := split(name)
+	files := r.fileList
+	i := sort.Search(len(files), func(i int) bool {
+		idir, ielem, _ := split(files[i].name)
+		return idir > dir || idir == dir && ielem >= elem
+	})
+	if i < len(files) {
+		fname := files[i].name
+		if fname == name || len(fname) == len(name)+1 && fname[len(name)] == '/' && fname[:len(name)] == name {
+			return &files[i]
+		}
+	}
+	return nil
+}
+
+func (r *Reader) openReadDir(dir string) []fileListEntry {
+	files := r.fileList
+	i := sort.Search(len(files), func(i int) bool {
+		idir, _, _ := split(files[i].name)
+		return idir >= dir
+	})
+	j := sort.Search(len(files), func(j int) bool {
+		jdir, _, _ := split(files[j].name)
+		return jdir > dir
+	})
+	return files[i:j]
+}
+
+type openDir struct {
+	e      *fileListEntry
+	files  []fileListEntry
+	offset int
+}
+
+func (d *openDir) Close() error               { return nil }
+func (d *openDir) Stat() (fs.FileInfo, error) { return d.e.stat(), nil }
+
+func (d *openDir) Read([]byte) (int, error) {
+	return 0, &fs.PathError{Op: "read", Path: d.e.name, Err: errors.New("is a directory")}
+}
+
+func (d *openDir) ReadDir(count int) ([]fs.DirEntry, error) {
+	n := len(d.files) - d.offset
+	if count > 0 && n > count {
+		n = count
+	}
+	if n == 0 {
+		if count <= 0 {
+			return nil, nil
+		}
+		return nil, io.EOF
+	}
+	list := make([]fs.DirEntry, n)
+	for i := range list {
+		list[i] = d.files[d.offset+i].stat()
+	}
+	d.offset += n
+	return list, nil
+}
diff --git a/internal/backport/archive/zip/reader_test.go b/internal/backport/archive/zip/reader_test.go
new file mode 100644
index 0000000..a3b43fa
--- /dev/null
+++ b/internal/backport/archive/zip/reader_test.go
@@ -0,0 +1,1329 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zip
+
+import (
+	"bytes"
+	"encoding/binary"
+	"encoding/hex"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"regexp"
+	"strings"
+	"testing"
+	"time"
+
+	"golang.org/x/website/internal/backport/io/fs"
+	"golang.org/x/website/internal/backport/obscuretestdata"
+	"golang.org/x/website/internal/backport/testing/fstest"
+)
+
+type ZipTest struct {
+	Name     string
+	Source   func() (r io.ReaderAt, size int64) // if non-nil, used instead of testdata/<Name> file
+	Comment  string
+	File     []ZipTestFile
+	Obscured bool  // needed for Apple notarization (golang.org/issue/34986)
+	Error    error // the error that Opening this file should return
+}
+
+type ZipTestFile struct {
+	Name     string
+	Mode     fs.FileMode
+	NonUTF8  bool
+	ModTime  time.Time
+	Modified time.Time
+
+	// Information describing expected zip file content.
+	// First, reading the entire content should produce the error ContentErr.
+	// Second, if ContentErr==nil, the content should match Content.
+	// If content is large, an alternative to setting Content is to set File,
+	// which names a file in the testdata/ directory containing the
+	// uncompressed expected content.
+	// If content is very large, an alternative to setting Content or File
+	// is to set Size, which will then be checked against the header-reported size
+	// but will bypass the decompressing of the actual data.
+	// This last option is used for testing very large (multi-GB) compressed files.
+	ContentErr error
+	Content    []byte
+	File       string
+	Size       uint64
+}
+
+var tests = []ZipTest{
+	{
+		Name:    "test.zip",
+		Comment: "This is a zipfile comment.",
+		File: []ZipTestFile{
+			{
+				Name:     "test.txt",
+				Content:  []byte("This is a test text file.\n"),
+				Modified: time.Date(2010, 9, 5, 12, 12, 1, 0, timeZone(+10*time.Hour)),
+				Mode:     0644,
+			},
+			{
+				Name:     "gophercolor16x16.png",
+				File:     "gophercolor16x16.png",
+				Modified: time.Date(2010, 9, 5, 15, 52, 58, 0, timeZone(+10*time.Hour)),
+				Mode:     0644,
+			},
+		},
+	},
+	{
+		Name:    "test-trailing-junk.zip",
+		Comment: "This is a zipfile comment.",
+		File: []ZipTestFile{
+			{
+				Name:     "test.txt",
+				Content:  []byte("This is a test text file.\n"),
+				Modified: time.Date(2010, 9, 5, 12, 12, 1, 0, timeZone(+10*time.Hour)),
+				Mode:     0644,
+			},
+			{
+				Name:     "gophercolor16x16.png",
+				File:     "gophercolor16x16.png",
+				Modified: time.Date(2010, 9, 5, 15, 52, 58, 0, timeZone(+10*time.Hour)),
+				Mode:     0644,
+			},
+		},
+	},
+	{
+		Name:   "r.zip",
+		Source: returnRecursiveZip,
+		File: []ZipTestFile{
+			{
+				Name:     "r/r.zip",
+				Content:  rZipBytes(),
+				Modified: time.Date(2010, 3, 4, 0, 24, 16, 0, time.UTC),
+				Mode:     0666,
+			},
+		},
+	},
+	{
+		Name: "symlink.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "symlink",
+				Content:  []byte("../target"),
+				Modified: time.Date(2012, 2, 3, 19, 56, 48, 0, timeZone(-2*time.Hour)),
+				Mode:     0777 | fs.ModeSymlink,
+			},
+		},
+	},
+	{
+		Name: "readme.zip",
+	},
+	{
+		Name:  "readme.notzip",
+		Error: ErrFormat,
+	},
+	{
+		Name: "dd.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "filename",
+				Content:  []byte("This is a test textfile.\n"),
+				Modified: time.Date(2011, 2, 2, 13, 6, 20, 0, time.UTC),
+				Mode:     0666,
+			},
+		},
+	},
+	{
+		// created in windows XP file manager.
+		Name: "winxp.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "hello",
+				Content:  []byte("world \r\n"),
+				Modified: time.Date(2011, 12, 8, 10, 4, 24, 0, time.UTC),
+				Mode:     0666,
+			},
+			{
+				Name:     "dir/bar",
+				Content:  []byte("foo \r\n"),
+				Modified: time.Date(2011, 12, 8, 10, 4, 50, 0, time.UTC),
+				Mode:     0666,
+			},
+			{
+				Name:     "dir/empty/",
+				Content:  []byte{},
+				Modified: time.Date(2011, 12, 8, 10, 8, 6, 0, time.UTC),
+				Mode:     fs.ModeDir | 0777,
+			},
+			{
+				Name:     "readonly",
+				Content:  []byte("important \r\n"),
+				Modified: time.Date(2011, 12, 8, 10, 6, 8, 0, time.UTC),
+				Mode:     0444,
+			},
+		},
+	},
+	{
+		// created by Zip 3.0 under Linux
+		Name: "unix.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "hello",
+				Content:  []byte("world \r\n"),
+				Modified: time.Date(2011, 12, 8, 10, 4, 24, 0, timeZone(0)),
+				Mode:     0666,
+			},
+			{
+				Name:     "dir/bar",
+				Content:  []byte("foo \r\n"),
+				Modified: time.Date(2011, 12, 8, 10, 4, 50, 0, timeZone(0)),
+				Mode:     0666,
+			},
+			{
+				Name:     "dir/empty/",
+				Content:  []byte{},
+				Modified: time.Date(2011, 12, 8, 10, 8, 6, 0, timeZone(0)),
+				Mode:     fs.ModeDir | 0777,
+			},
+			{
+				Name:     "readonly",
+				Content:  []byte("important \r\n"),
+				Modified: time.Date(2011, 12, 8, 10, 6, 8, 0, timeZone(0)),
+				Mode:     0444,
+			},
+		},
+	},
+	{
+		// created by Go, before we wrote the "optional" data
+		// descriptor signatures (which are required by macOS).
+		// Use obscured file to avoid Apple’s notarization service
+		// rejecting the toolchain due to an inability to unzip this archive.
+		// See golang.org/issue/34986
+		Name:     "go-no-datadesc-sig.zip.base64",
+		Obscured: true,
+		File: []ZipTestFile{
+			{
+				Name:     "foo.txt",
+				Content:  []byte("foo\n"),
+				Modified: time.Date(2012, 3, 8, 16, 59, 10, 0, timeZone(-8*time.Hour)),
+				Mode:     0644,
+			},
+			{
+				Name:     "bar.txt",
+				Content:  []byte("bar\n"),
+				Modified: time.Date(2012, 3, 8, 16, 59, 12, 0, timeZone(-8*time.Hour)),
+				Mode:     0644,
+			},
+		},
+	},
+	{
+		// created by Go, after we wrote the "optional" data
+		// descriptor signatures (which are required by macOS)
+		Name: "go-with-datadesc-sig.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "foo.txt",
+				Content:  []byte("foo\n"),
+				Modified: time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC),
+				Mode:     0666,
+			},
+			{
+				Name:     "bar.txt",
+				Content:  []byte("bar\n"),
+				Modified: time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC),
+				Mode:     0666,
+			},
+		},
+	},
+	{
+		Name:   "Bad-CRC32-in-data-descriptor",
+		Source: returnCorruptCRC32Zip,
+		File: []ZipTestFile{
+			{
+				Name:       "foo.txt",
+				Content:    []byte("foo\n"),
+				Modified:   time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC),
+				Mode:       0666,
+				ContentErr: ErrChecksum,
+			},
+			{
+				Name:     "bar.txt",
+				Content:  []byte("bar\n"),
+				Modified: time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC),
+				Mode:     0666,
+			},
+		},
+	},
+	// Tests that we verify (and accept valid) crc32s on files
+	// with crc32s in their file header (not in data descriptors)
+	{
+		Name: "crc32-not-streamed.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "foo.txt",
+				Content:  []byte("foo\n"),
+				Modified: time.Date(2012, 3, 8, 16, 59, 10, 0, timeZone(-8*time.Hour)),
+				Mode:     0644,
+			},
+			{
+				Name:     "bar.txt",
+				Content:  []byte("bar\n"),
+				Modified: time.Date(2012, 3, 8, 16, 59, 12, 0, timeZone(-8*time.Hour)),
+				Mode:     0644,
+			},
+		},
+	},
+	// Tests that we verify (and reject invalid) crc32s on files
+	// with crc32s in their file header (not in data descriptors)
+	{
+		Name:   "crc32-not-streamed.zip",
+		Source: returnCorruptNotStreamedZip,
+		File: []ZipTestFile{
+			{
+				Name:       "foo.txt",
+				Content:    []byte("foo\n"),
+				Modified:   time.Date(2012, 3, 8, 16, 59, 10, 0, timeZone(-8*time.Hour)),
+				Mode:       0644,
+				ContentErr: ErrChecksum,
+			},
+			{
+				Name:     "bar.txt",
+				Content:  []byte("bar\n"),
+				Modified: time.Date(2012, 3, 8, 16, 59, 12, 0, timeZone(-8*time.Hour)),
+				Mode:     0644,
+			},
+		},
+	},
+	{
+		Name: "zip64.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "README",
+				Content:  []byte("This small file is in ZIP64 format.\n"),
+				Modified: time.Date(2012, 8, 10, 14, 33, 32, 0, time.UTC),
+				Mode:     0644,
+			},
+		},
+	},
+	// Another zip64 file with different Extras fields. (golang.org/issue/7069)
+	{
+		Name: "zip64-2.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "README",
+				Content:  []byte("This small file is in ZIP64 format.\n"),
+				Modified: time.Date(2012, 8, 10, 14, 33, 32, 0, timeZone(-4*time.Hour)),
+				Mode:     0644,
+			},
+		},
+	},
+	// Largest possible non-zip64 file, with no zip64 header.
+	{
+		Name:   "big.zip",
+		Source: returnBigZipBytes,
+		File: []ZipTestFile{
+			{
+				Name:     "big.file",
+				Content:  nil,
+				Size:     1<<32 - 1,
+				Modified: time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC),
+				Mode:     0666,
+			},
+		},
+	},
+	{
+		Name: "utf8-7zip.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "世界",
+				Content:  []byte{},
+				Mode:     0666,
+				Modified: time.Date(2017, 11, 6, 13, 9, 27, 867862500, timeZone(-8*time.Hour)),
+			},
+		},
+	},
+	{
+		Name: "utf8-infozip.zip",
+		File: []ZipTestFile{
+			{
+				Name:    "世界",
+				Content: []byte{},
+				Mode:    0644,
+				// Name is valid UTF-8, but format does not have UTF-8 flag set.
+				// We don't do UTF-8 detection for multi-byte runes due to
+				// false-positives with other encodings (e.g., Shift-JIS).
+				// Format says encoding is not UTF-8, so we trust it.
+				NonUTF8:  true,
+				Modified: time.Date(2017, 11, 6, 13, 9, 27, 0, timeZone(-8*time.Hour)),
+			},
+		},
+	},
+	{
+		Name: "utf8-osx.zip",
+		File: []ZipTestFile{
+			{
+				Name:    "世界",
+				Content: []byte{},
+				Mode:    0644,
+				// Name is valid UTF-8, but format does not have UTF-8 set.
+				NonUTF8:  true,
+				Modified: time.Date(2017, 11, 6, 13, 9, 27, 0, timeZone(-8*time.Hour)),
+			},
+		},
+	},
+	{
+		Name: "utf8-winrar.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "世界",
+				Content:  []byte{},
+				Mode:     0666,
+				Modified: time.Date(2017, 11, 6, 13, 9, 27, 867862500, timeZone(-8*time.Hour)),
+			},
+		},
+	},
+	{
+		Name: "utf8-winzip.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "世界",
+				Content:  []byte{},
+				Mode:     0666,
+				Modified: time.Date(2017, 11, 6, 13, 9, 27, 867000000, timeZone(-8*time.Hour)),
+			},
+		},
+	},
+	{
+		Name: "time-7zip.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "test.txt",
+				Content:  []byte{},
+				Size:     1<<32 - 1,
+				Modified: time.Date(2017, 10, 31, 21, 11, 57, 244817900, timeZone(-7*time.Hour)),
+				Mode:     0666,
+			},
+		},
+	},
+	{
+		Name: "time-infozip.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "test.txt",
+				Content:  []byte{},
+				Size:     1<<32 - 1,
+				Modified: time.Date(2017, 10, 31, 21, 11, 57, 0, timeZone(-7*time.Hour)),
+				Mode:     0644,
+			},
+		},
+	},
+	{
+		Name: "time-osx.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "test.txt",
+				Content:  []byte{},
+				Size:     1<<32 - 1,
+				Modified: time.Date(2017, 10, 31, 21, 11, 57, 0, timeZone(-7*time.Hour)),
+				Mode:     0644,
+			},
+		},
+	},
+	{
+		Name: "time-win7.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "test.txt",
+				Content:  []byte{},
+				Size:     1<<32 - 1,
+				Modified: time.Date(2017, 10, 31, 21, 11, 58, 0, time.UTC),
+				Mode:     0666,
+			},
+		},
+	},
+	{
+		Name: "time-winrar.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "test.txt",
+				Content:  []byte{},
+				Size:     1<<32 - 1,
+				Modified: time.Date(2017, 10, 31, 21, 11, 57, 244817900, timeZone(-7*time.Hour)),
+				Mode:     0666,
+			},
+		},
+	},
+	{
+		Name: "time-winzip.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "test.txt",
+				Content:  []byte{},
+				Size:     1<<32 - 1,
+				Modified: time.Date(2017, 10, 31, 21, 11, 57, 244000000, timeZone(-7*time.Hour)),
+				Mode:     0666,
+			},
+		},
+	},
+	{
+		Name: "time-go.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "test.txt",
+				Content:  []byte{},
+				Size:     1<<32 - 1,
+				Modified: time.Date(2017, 10, 31, 21, 11, 57, 0, timeZone(-7*time.Hour)),
+				Mode:     0666,
+			},
+		},
+	},
+	{
+		Name: "time-22738.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "file",
+				Content:  []byte{},
+				Mode:     0666,
+				Modified: time.Date(1999, 12, 31, 19, 0, 0, 0, timeZone(-5*time.Hour)),
+				ModTime:  time.Date(1999, 12, 31, 19, 0, 0, 0, time.UTC),
+			},
+		},
+	},
+}
+
+func TestReader(t *testing.T) {
+	for _, zt := range tests {
+		t.Run(zt.Name, func(t *testing.T) {
+			readTestZip(t, zt)
+		})
+	}
+}
+
+func readTestZip(t *testing.T, zt ZipTest) {
+	var z *Reader
+	var err error
+	var raw []byte
+	if zt.Source != nil {
+		rat, size := zt.Source()
+		z, err = NewReader(rat, size)
+		raw = make([]byte, size)
+		if _, err := rat.ReadAt(raw, 0); err != nil {
+			t.Errorf("ReadAt error=%v", err)
+			return
+		}
+	} else {
+		path := filepath.Join("testdata", zt.Name)
+		if zt.Obscured {
+			tf, err := obscuretestdata.DecodeToTempFile(path)
+			if err != nil {
+				t.Errorf("obscuretestdata.DecodeToTempFile(%s): %v", path, err)
+				return
+			}
+			defer os.Remove(tf)
+			path = tf
+		}
+		var rc *ReadCloser
+		rc, err = OpenReader(path)
+		if err == nil {
+			defer rc.Close()
+			z = &rc.Reader
+		}
+		var err2 error
+		raw, err2 = ioutil.ReadFile(path)
+		if err2 != nil {
+			t.Errorf("ReadFile(%s) error=%v", path, err2)
+			return
+		}
+	}
+	if err != zt.Error {
+		t.Errorf("error=%v, want %v", err, zt.Error)
+		return
+	}
+
+	// bail if file is not zip
+	if err == ErrFormat {
+		return
+	}
+
+	// bail here if no Files expected to be tested
+	// (there may actually be files in the zip, but we don't care)
+	if zt.File == nil {
+		return
+	}
+
+	if z.Comment != zt.Comment {
+		t.Errorf("comment=%q, want %q", z.Comment, zt.Comment)
+	}
+	if len(z.File) != len(zt.File) {
+		t.Fatalf("file count=%d, want %d", len(z.File), len(zt.File))
+	}
+
+	// test read of each file
+	for i, ft := range zt.File {
+		readTestFile(t, zt, ft, z.File[i], raw)
+	}
+	if t.Failed() {
+		return
+	}
+
+	// test simultaneous reads
+	n := 0
+	done := make(chan bool)
+	for i := 0; i < 5; i++ {
+		for j, ft := range zt.File {
+			go func(j int, ft ZipTestFile) {
+				readTestFile(t, zt, ft, z.File[j], raw)
+				done <- true
+			}(j, ft)
+			n++
+		}
+	}
+	for ; n > 0; n-- {
+		<-done
+	}
+}
+
+func equalTimeAndZone(t1, t2 time.Time) bool {
+	name1, offset1 := t1.Zone()
+	name2, offset2 := t2.Zone()
+	return t1.Equal(t2) && name1 == name2 && offset1 == offset2
+}
+
+func readTestFile(t *testing.T, zt ZipTest, ft ZipTestFile, f *File, raw []byte) {
+	if f.Name != ft.Name {
+		t.Errorf("name=%q, want %q", f.Name, ft.Name)
+	}
+	if !ft.Modified.IsZero() && !equalTimeAndZone(f.Modified, ft.Modified) {
+		t.Errorf("%s: Modified=%s, want %s", f.Name, f.Modified, ft.Modified)
+	}
+	if !ft.ModTime.IsZero() && !equalTimeAndZone(f.ModTime(), ft.ModTime) {
+		t.Errorf("%s: ModTime=%s, want %s", f.Name, f.ModTime(), ft.ModTime)
+	}
+
+	testFileMode(t, f, ft.Mode)
+
+	size := uint64(f.UncompressedSize)
+	if size == uint32max {
+		size = f.UncompressedSize64
+	} else if size != f.UncompressedSize64 {
+		t.Errorf("%v: UncompressedSize=%#x does not match UncompressedSize64=%#x", f.Name, size, f.UncompressedSize64)
+	}
+
+	// Check that OpenRaw returns the correct byte segment
+	rw, err := f.OpenRaw()
+	if err != nil {
+		t.Errorf("%v: OpenRaw error=%v", f.Name, err)
+		return
+	}
+	start, err := f.DataOffset()
+	if err != nil {
+		t.Errorf("%v: DataOffset error=%v", f.Name, err)
+		return
+	}
+	got, err := ioutil.ReadAll(rw)
+	if err != nil {
+		t.Errorf("%v: OpenRaw ReadAll error=%v", f.Name, err)
+		return
+	}
+	end := uint64(start) + f.CompressedSize64
+	want := raw[start:end]
+	if !bytes.Equal(got, want) {
+		t.Logf("got %q", got)
+		t.Logf("want %q", want)
+		t.Errorf("%v: OpenRaw returned unexpected bytes", f.Name)
+		return
+	}
+
+	r, err := f.Open()
+	if err != nil {
+		t.Errorf("%v", err)
+		return
+	}
+
+	// For very large files, just check that the size is correct.
+	// The content is expected to be all zeros.
+	// Don't bother uncompressing: too big.
+	if ft.Content == nil && ft.File == "" && ft.Size > 0 {
+		if size != ft.Size {
+			t.Errorf("%v: uncompressed size %#x, want %#x", ft.Name, size, ft.Size)
+		}
+		r.Close()
+		return
+	}
+
+	var b bytes.Buffer
+	_, err = io.Copy(&b, r)
+	if err != ft.ContentErr {
+		t.Errorf("copying contents: %v (want %v)", err, ft.ContentErr)
+	}
+	if err != nil {
+		return
+	}
+	r.Close()
+
+	if g := uint64(b.Len()); g != size {
+		t.Errorf("%v: read %v bytes but f.UncompressedSize == %v", f.Name, g, size)
+	}
+
+	var c []byte
+	if ft.Content != nil {
+		c = ft.Content
+	} else if c, err = ioutil.ReadFile("testdata/" + ft.File); err != nil {
+		t.Error(err)
+		return
+	}
+
+	if b.Len() != len(c) {
+		t.Errorf("%s: len=%d, want %d", f.Name, b.Len(), len(c))
+		return
+	}
+
+	for i, b := range b.Bytes() {
+		if b != c[i] {
+			t.Errorf("%s: content[%d]=%q want %q", f.Name, i, b, c[i])
+			return
+		}
+	}
+}
+
+func testFileMode(t *testing.T, f *File, want fs.FileMode) {
+	mode := f.Mode()
+	if want == 0 {
+		t.Errorf("%s mode: got %v, want none", f.Name, mode)
+	} else if mode != want {
+		t.Errorf("%s mode: want %v, got %v", f.Name, want, mode)
+	}
+}
+
+func TestInvalidFiles(t *testing.T) {
+	const size = 1024 * 70 // 70kb
+	b := make([]byte, size)
+
+	// zeroes
+	_, err := NewReader(bytes.NewReader(b), size)
+	if err != ErrFormat {
+		t.Errorf("zeroes: error=%v, want %v", err, ErrFormat)
+	}
+
+	// repeated directoryEndSignatures
+	sig := make([]byte, 4)
+	binary.LittleEndian.PutUint32(sig, directoryEndSignature)
+	for i := 0; i < size-4; i += 4 {
+		copy(b[i:i+4], sig)
+	}
+	_, err = NewReader(bytes.NewReader(b), size)
+	if err != ErrFormat {
+		t.Errorf("sigs: error=%v, want %v", err, ErrFormat)
+	}
+
+	// negative size
+	_, err = NewReader(bytes.NewReader([]byte("foobar")), -1)
+	if err == nil {
+		t.Errorf("archive/zip.NewReader: expected error when negative size is passed")
+	}
+}
+
+func messWith(fileName string, corrupter func(b []byte)) (r io.ReaderAt, size int64) {
+	data, err := ioutil.ReadFile(filepath.Join("testdata", fileName))
+	if err != nil {
+		panic("Error reading " + fileName + ": " + err.Error())
+	}
+	corrupter(data)
+	return bytes.NewReader(data), int64(len(data))
+}
+
+func returnCorruptCRC32Zip() (r io.ReaderAt, size int64) {
+	return messWith("go-with-datadesc-sig.zip", func(b []byte) {
+		// Corrupt one of the CRC32s in the data descriptor:
+		b[0x2d]++
+	})
+}
+
+func returnCorruptNotStreamedZip() (r io.ReaderAt, size int64) {
+	return messWith("crc32-not-streamed.zip", func(b []byte) {
+		// Corrupt foo.txt's final crc32 byte, in both
+		// the file header and TOC. (0x7e -> 0x7f)
+		b[0x11]++
+		b[0x9d]++
+
+		// TODO(bradfitz): add a new test that only corrupts
+		// one of these values, and verify that that's also an
+		// error. Currently, the reader code doesn't verify the
+		// fileheader and TOC's crc32 match if they're both
+		// non-zero and only the second line above, the TOC,
+		// is what matters.
+	})
+}
+
+// rZipBytes returns the bytes of a recursive zip file, without
+// putting it on disk and triggering certain virus scanners.
+func rZipBytes() []byte {
+	s := `
+0000000 50 4b 03 04 14 00 00 00 08 00 08 03 64 3c f9 f4
+0000010 89 64 48 01 00 00 b8 01 00 00 07 00 00 00 72 2f
+0000020 72 2e 7a 69 70 00 25 00 da ff 50 4b 03 04 14 00
+0000030 00 00 08 00 08 03 64 3c f9 f4 89 64 48 01 00 00
+0000040 b8 01 00 00 07 00 00 00 72 2f 72 2e 7a 69 70 00
+0000050 2f 00 d0 ff 00 25 00 da ff 50 4b 03 04 14 00 00
+0000060 00 08 00 08 03 64 3c f9 f4 89 64 48 01 00 00 b8
+0000070 01 00 00 07 00 00 00 72 2f 72 2e 7a 69 70 00 2f
+0000080 00 d0 ff c2 54 8e 57 39 00 05 00 fa ff c2 54 8e
+0000090 57 39 00 05 00 fa ff 00 05 00 fa ff 00 14 00 eb
+00000a0 ff c2 54 8e 57 39 00 05 00 fa ff 00 05 00 fa ff
+00000b0 00 14 00 eb ff 42 88 21 c4 00 00 14 00 eb ff 42
+00000c0 88 21 c4 00 00 14 00 eb ff 42 88 21 c4 00 00 14
+00000d0 00 eb ff 42 88 21 c4 00 00 14 00 eb ff 42 88 21
+00000e0 c4 00 00 00 00 ff ff 00 00 00 ff ff 00 34 00 cb
+00000f0 ff 42 88 21 c4 00 00 00 00 ff ff 00 00 00 ff ff
+0000100 00 34 00 cb ff 42 e8 21 5e 0f 00 00 00 ff ff 0a
+0000110 f0 66 64 12 61 c0 15 dc e8 a0 48 bf 48 af 2a b3
+0000120 20 c0 9b 95 0d c4 67 04 42 53 06 06 06 40 00 06
+0000130 00 f9 ff 6d 01 00 00 00 00 42 e8 21 5e 0f 00 00
+0000140 00 ff ff 0a f0 66 64 12 61 c0 15 dc e8 a0 48 bf
+0000150 48 af 2a b3 20 c0 9b 95 0d c4 67 04 42 53 06 06
+0000160 06 40 00 06 00 f9 ff 6d 01 00 00 00 00 50 4b 01
+0000170 02 14 00 14 00 00 00 08 00 08 03 64 3c f9 f4 89
+0000180 64 48 01 00 00 b8 01 00 00 07 00 00 00 00 00 00
+0000190 00 00 00 00 00 00 00 00 00 00 00 72 2f 72 2e 7a
+00001a0 69 70 50 4b 05 06 00 00 00 00 01 00 01 00 35 00
+00001b0 00 00 6d 01 00 00 00 00`
+	s = regexp.MustCompile(`[0-9a-f]{7}`).ReplaceAllString(s, "")
+	s = regexp.MustCompile(`\s+`).ReplaceAllString(s, "")
+	b, err := hex.DecodeString(s)
+	if err != nil {
+		panic(err)
+	}
+	return b
+}
+
+func returnRecursiveZip() (r io.ReaderAt, size int64) {
+	b := rZipBytes()
+	return bytes.NewReader(b), int64(len(b))
+}
+
+// biggestZipBytes returns the bytes of a zip file biggest.zip
+// that contains a zip file bigger.zip that contains a zip file
+// big.zip that contains big.file, which contains 2³²-1 zeros.
+// The big.zip file is interesting because it has no zip64 header,
+// much like the innermost zip files in the well-known 42.zip.
+//
+// biggest.zip was generated by changing isZip64 to use > uint32max
+// instead of >= uint32max and then running this program:
+//
+//	package main
+//
+//	import (
+//		"archive/zip"
+//		"bytes"
+//		"io"
+//		"log"
+//		"os"
+//	)
+//
+//	type zeros struct{}
+//
+//	func (zeros) Read(b []byte) (int, error) {
+//		for i := range b {
+//			b[i] = 0
+//		}
+//		return len(b), nil
+//	}
+//
+//	func main() {
+//		bigZip := makeZip("big.file", io.LimitReader(zeros{}, 1<<32-1))
+//		if err := os.WriteFile("/tmp/big.zip", bigZip, 0666); err != nil {
+//			log.Fatal(err)
+//		}
+//
+//		biggerZip := makeZip("big.zip", bytes.NewReader(bigZip))
+//		if err := os.WriteFile("/tmp/bigger.zip", biggerZip, 0666); err != nil {
+//			log.Fatal(err)
+//		}
+//
+//		biggestZip := makeZip("bigger.zip", bytes.NewReader(biggerZip))
+//		if err := os.WriteFile("/tmp/biggest.zip", biggestZip, 0666); err != nil {
+//			log.Fatal(err)
+//		}
+//	}
+//
+//	func makeZip(name string, r io.Reader) []byte {
+//		var buf bytes.Buffer
+//		w := zip.NewWriter(&buf)
+//		wf, err := w.Create(name)
+//		if err != nil {
+//			log.Fatal(err)
+//		}
+//		if _, err = io.Copy(wf, r); err != nil {
+//			log.Fatal(err)
+//		}
+//		if err := w.Close(); err != nil {
+//			log.Fatal(err)
+//		}
+//		return buf.Bytes()
+//	}
+//
+// The 4 GB of zeros compresses to 4 MB, which compresses to 20 kB,
+// which compresses to 1252 bytes (in the hex dump below).
+//
+// It's here in hex for the same reason as rZipBytes above: to avoid
+// problems with on-disk virus scanners or other zip processors.
+//
+func biggestZipBytes() []byte {
+	s := `
+0000000 50 4b 03 04 14 00 08 00 08 00 00 00 00 00 00 00
+0000010 00 00 00 00 00 00 00 00 00 00 0a 00 00 00 62 69
+0000020 67 67 65 72 2e 7a 69 70 ec dc 6b 4c 53 67 18 07
+0000030 f0 16 c5 ca 65 2e cb b8 94 20 61 1f 44 33 c7 cd
+0000040 c0 86 4a b5 c0 62 8a 61 05 c6 cd 91 b2 54 8c 1b
+0000050 63 8b 03 9c 1b 95 52 5a e3 a0 19 6c b2 05 59 44
+0000060 64 9d 73 83 71 11 46 61 14 b9 1d 14 09 4a c3 60
+0000070 2e 4c 6e a5 60 45 02 62 81 95 b6 94 9e 9e 77 e7
+0000080 d0 43 b6 f8 71 df 96 3c e7 a4 69 ce bf cf e9 79
+0000090 ce ef 79 3f bf f1 31 db b6 bb 31 76 92 e7 f3 07
+00000a0 8b fc 9c ca cc 08 cc cb cc 5e d2 1c 88 d9 7e bb
+00000b0 4f bb 3a 3f 75 f1 5d 7f 8f c2 68 67 77 8f 25 ff
+00000c0 84 e2 93 2d ef a4 95 3d 71 4e 2c b9 b0 87 c3 be
+00000d0 3d f8 a7 60 24 61 c5 ef ae 9e c8 6c 6d 4e 69 c8
+00000e0 67 65 34 f8 37 76 2d 76 5c 54 f3 95 65 49 c7 0f
+00000f0 18 71 4b 7e 5b 6a d1 79 47 61 41 b0 4e 2a 74 45
+0000100 43 58 12 b2 5a a5 c6 7d 68 55 88 d4 98 75 18 6d
+0000110 08 d1 1f 8f 5a 9e 96 ee 45 cf a4 84 4e 4b e8 50
+0000120 a7 13 d9 06 de 52 81 97 36 b2 d7 b8 fc 2b 5f 55
+0000130 23 1f 32 59 cf 30 27 fb e2 8a b9 de 45 dd 63 9c
+0000140 4b b5 8b 96 4c 7a 62 62 cc a1 a7 cf fa f1 fe dd
+0000150 54 62 11 bf 36 78 b3 c7 b1 b5 f2 61 4d 4e dd 66
+0000160 32 2e e6 70 34 5f f4 c9 e6 6c 43 6f da 6b c6 c3
+0000170 09 2c ce 09 57 7f d2 7e b4 23 ba 7c 1b 99 bc 22
+0000180 3e f1 de 91 2f e3 9c 1b 82 cc c2 84 39 aa e6 de
+0000190 b4 69 fc cc cb 72 a6 61 45 f0 d3 1d 26 19 7c 8d
+00001a0 29 c8 66 02 be 77 6a f9 3d 34 79 17 19 c8 96 24
+00001b0 a3 ac e4 dd 3b 1a 8e c6 fe 96 38 6b bf 67 5a 23
+00001c0 f4 16 f4 e6 8a b4 fc c2 cd bf 95 66 1d bb 35 aa
+00001d0 92 7d 66 d8 08 8d a5 1f 54 2a af 09 cf 61 ff d2
+00001e0 85 9d 8f b6 d7 88 07 4a 86 03 db 64 f3 d9 92 73
+00001f0 df ec a7 fc 23 4c 8d 83 79 63 2a d9 fd 8d b3 c8
+0000200 8f 7e d4 19 85 e6 8d 1c 76 f0 8b 58 32 fd 9a d6
+0000210 85 e2 48 ad c3 d5 60 6f 7e 22 dd ef 09 49 7c 7f
+0000220 3a 45 c3 71 b7 df f3 4c 63 fb b5 d9 31 5f 6e d6
+0000230 24 1d a4 4a fe 32 a7 5c 16 48 5c 3e 08 6b 8a d3
+0000240 25 1d a2 12 a5 59 24 ea 20 5f 52 6d ad 94 db 6b
+0000250 94 b9 5d eb 4b a7 5c 44 bb 1e f2 3c 6b cf 52 c9
+0000260 e9 e5 ba 06 b9 c4 e5 0a d0 00 0d d0 00 0d d0 00
+0000270 0d d0 00 0d d0 00 0d d0 00 0d d0 00 0d d0 00 0d
+0000280 d0 00 0d d0 00 0d d0 00 0d d0 00 0d d0 00 0d d0
+0000290 00 0d d0 00 0d d0 00 0d d0 00 0d d0 00 0d d0 00
+00002a0 0d d0 00 cd ff 9e 46 86 fa a7 7d 3a 43 d7 8e 10
+00002b0 52 e9 be e6 6e cf eb 9e 85 4d 65 ce cc 30 c1 44
+00002c0 c0 4e af bc 9c 6c 4b a0 d7 54 ff 1d d5 5c 89 fb
+00002d0 b5 34 7e c4 c2 9e f5 a0 f6 5b 7e 6e ca 73 c7 ef
+00002e0 5d be de f9 e8 81 eb a5 0a a5 63 54 2c d7 1c d1
+00002f0 89 17 85 f8 16 94 f2 8a b2 a3 f5 b6 6d df 75 cd
+0000300 90 dd 64 bd 5d 55 4e f2 55 19 1b b7 cc ef 1b ea
+0000310 2e 05 9c f4 aa 1e a8 cd a6 82 c7 59 0f 5e 9d e0
+0000320 bb fc 6c d6 99 23 eb 36 ad c6 c5 e1 d8 e1 e2 3e
+0000330 d9 90 5a f7 91 5d 6f bc 33 6d 98 47 d2 7c 2e 2f
+0000340 99 a4 25 72 85 49 2c be 0b 5b af 8f e5 6e 81 a6
+0000350 a3 5a 6f 39 53 3a ab 7a 8b 1e 26 f7 46 6c 7d 26
+0000360 53 b3 22 31 94 d3 83 f2 18 4d f5 92 33 27 53 97
+0000370 0f d3 e6 55 9c a6 c5 31 87 6f d3 f3 ae 39 6f 56
+0000380 10 7b ab 7e d0 b4 ca f2 b8 05 be 3f 0e 6e 5a 75
+0000390 ab 0c f5 37 0e ba 8e 75 71 7a aa ed 7a dd 6a 63
+00003a0 be 9b a0 97 27 6a 6f e7 d3 8b c4 7c ec d3 91 56
+00003b0 d9 ac 5e bf 16 42 2f 00 1f 93 a2 23 87 bd e2 59
+00003c0 a0 de 1a 66 c8 62 eb 55 8f 91 17 b4 61 42 7a 50
+00003d0 40 03 34 40 03 34 40 03 34 40 03 34 40 03 34 40
+00003e0 03 34 40 03 34 40 03 34 40 03 34 40 03 34 40 03
+00003f0 34 40 03 34 40 03 34 ff 85 86 90 8b ea 67 90 0d
+0000400 e1 42 1b d2 61 d6 79 ec fd 3e 44 28 a4 51 6c 5c
+0000410 fc d2 72 ca ba 82 18 46 16 61 cd 93 a9 0f d1 24
+0000420 17 99 e2 2c 71 16 84 0c c8 7a 13 0f 9a 5e c5 f0
+0000430 79 64 e2 12 4d c8 82 a1 81 19 2d aa 44 6d 87 54
+0000440 84 71 c1 f6 d4 ca 25 8c 77 b9 08 c7 c8 5e 10 8a
+0000450 8f 61 ed 8c ba 30 1f 79 9a c7 60 34 2b b9 8c f8
+0000460 18 a6 83 1b e3 9f ad 79 fe fd 1b 8b f1 fc 41 6f
+0000470 d4 13 1f e3 b8 83 ba 64 92 e7 eb e4 77 05 8f ba
+0000480 fa 3b 00 00 ff ff 50 4b 07 08 a6 18 b1 91 5e 04
+0000490 00 00 e4 47 00 00 50 4b 01 02 14 00 14 00 08 00
+00004a0 08 00 00 00 00 00 a6 18 b1 91 5e 04 00 00 e4 47
+00004b0 00 00 0a 00 00 00 00 00 00 00 00 00 00 00 00 00
+00004c0 00 00 00 00 62 69 67 67 65 72 2e 7a 69 70 50 4b
+00004d0 05 06 00 00 00 00 01 00 01 00 38 00 00 00 96 04
+00004e0 00 00 00 00`
+	s = regexp.MustCompile(`[0-9a-f]{7}`).ReplaceAllString(s, "")
+	s = regexp.MustCompile(`\s+`).ReplaceAllString(s, "")
+	b, err := hex.DecodeString(s)
+	if err != nil {
+		panic(err)
+	}
+	return b
+}
+
+func returnBigZipBytes() (r io.ReaderAt, size int64) {
+	b := biggestZipBytes()
+	for i := 0; i < 2; i++ {
+		r, err := NewReader(bytes.NewReader(b), int64(len(b)))
+		if err != nil {
+			panic(err)
+		}
+		f, err := r.File[0].Open()
+		if err != nil {
+			panic(err)
+		}
+		b, err = ioutil.ReadAll(f)
+		if err != nil {
+			panic(err)
+		}
+	}
+	return bytes.NewReader(b), int64(len(b))
+}
+
+func TestIssue8186(t *testing.T) {
+	// Directory headers & data found in the TOC of a JAR file.
+	dirEnts := []string{
+		"PK\x01\x02\n\x00\n\x00\x00\b\x00\x004\x9d3?\xaa\x1b\x06\xf0\x81\x02\x00\x00\x81\x02\x00\x00-\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00res/drawable-xhdpi-v4/ic_actionbar_accept.png\xfe\xca\x00\x00\x00",
+		"PK\x01\x02\n\x00\n\x00\x00\b\x00\x004\x9d3?\x90K\x89\xc7t\n\x00\x00t\n\x00\x00\x0e\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x02\x00\x00resources.arsc\x00\x00\x00",
+		"PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?\xff$\x18\xed3\x03\x00\x00\xb4\b\x00\x00\x13\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00t\r\x00\x00AndroidManifest.xml",
+		"PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?\x14\xc5K\xab\x192\x02\x00\xc8\xcd\x04\x00\v\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe8\x10\x00\x00classes.dex",
+		"PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?E\x96\nD\xac\x01\x00\x00P\x03\x00\x00&\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00:C\x02\x00res/layout/actionbar_set_wallpaper.xml",
+		"PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?Ļ\x14\xe3\xd8\x01\x00\x00\xd8\x03\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00:E\x02\x00res/layout/wallpaper_cropper.xml",
+		"PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?}\xc1\x15\x9eZ\x01\x00\x00!\x02\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00`G\x02\x00META-INF/MANIFEST.MF",
+		"PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?\xe6\x98Ьo\x01\x00\x00\x84\x02\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfcH\x02\x00META-INF/CERT.SF",
+		"PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?\xbfP\x96b\x86\x04\x00\x00\xb2\x06\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa9J\x02\x00META-INF/CERT.RSA",
+	}
+	for i, s := range dirEnts {
+		var f File
+		err := readDirectoryHeader(&f, strings.NewReader(s))
+		if err != nil {
+			t.Errorf("error reading #%d: %v", i, err)
+		}
+	}
+}
+
+// Verify we return ErrUnexpectedEOF when length is short.
+func TestIssue10957(t *testing.T) {
+	data := []byte("PK\x03\x040000000PK\x01\x0200000" +
+		"0000000000000000000\x00" +
+		"\x00\x00\x00\x00\x00000000000000PK\x01" +
+		"\x020000000000000000000" +
+		"00000\v\x00\x00\x00\x00\x00000000000" +
+		"00000000000000PK\x01\x0200" +
+		"00000000000000000000" +
+		"00\v\x00\x00\x00\x00\x00000000000000" +
+		"00000000000PK\x01\x020000<" +
+		"0\x00\x0000000000000000\v\x00\v" +
+		"\x00\x00\x00\x00\x0000000000\x00\x00\x00\x00000" +
+		"00000000PK\x01\x0200000000" +
+		"0000000000000000\v\x00\x00\x00" +
+		"\x00\x0000PK\x05\x06000000\x05\x000000" +
+		"\v\x00\x00\x00\x00\x00")
+	z, err := NewReader(bytes.NewReader(data), int64(len(data)))
+	if err != nil {
+		t.Fatal(err)
+	}
+	for i, f := range z.File {
+		r, err := f.Open()
+		if err != nil {
+			continue
+		}
+		if f.UncompressedSize64 < 1e6 {
+			n, err := io.Copy(ioutil.Discard, r)
+			if i == 3 && err != io.ErrUnexpectedEOF {
+				t.Errorf("File[3] error = %v; want io.ErrUnexpectedEOF", err)
+			}
+			if err == nil && uint64(n) != f.UncompressedSize64 {
+				t.Errorf("file %d: bad size: copied=%d; want=%d", i, n, f.UncompressedSize64)
+			}
+		}
+		r.Close()
+	}
+}
+
+// Verify that this particular malformed zip file is rejected.
+func TestIssue10956(t *testing.T) {
+	data := []byte("PK\x06\x06PK\x06\a0000\x00\x00\x00\x00\x00\x00\x00\x00" +
+		"0000PK\x05\x06000000000000" +
+		"0000\v\x00000\x00\x00\x00\x00\x00\x00\x000")
+	r, err := NewReader(bytes.NewReader(data), int64(len(data)))
+	if err == nil {
+		t.Errorf("got nil error, want ErrFormat")
+	}
+	if r != nil {
+		t.Errorf("got non-nil Reader, want nil")
+	}
+}
+
+// Verify we return ErrUnexpectedEOF when reading truncated data descriptor.
+func TestIssue11146(t *testing.T) {
+	data := []byte("PK\x03\x040000000000000000" +
+		"000000\x01\x00\x00\x000\x01\x00\x00\xff\xff0000" +
+		"0000000000000000PK\x01\x02" +
+		"0000\b0\b\x00000000000000" +
+		"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x000000PK\x05\x06\x00\x00" +
+		"\x00\x0000\x01\x0000008\x00\x00\x00\x00\x00")
+	z, err := NewReader(bytes.NewReader(data), int64(len(data)))
+	if err != nil {
+		t.Fatal(err)
+	}
+	r, err := z.File[0].Open()
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, err = ioutil.ReadAll(r)
+	if err != io.ErrUnexpectedEOF {
+		t.Errorf("File[0] error = %v; want io.ErrUnexpectedEOF", err)
+	}
+	r.Close()
+}
+
+// Verify we do not treat non-zip64 archives as zip64
+func TestIssue12449(t *testing.T) {
+	data := []byte{
+		0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x08, 0x00,
+		0x00, 0x00, 0x6b, 0xb4, 0xba, 0x46, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x03, 0x00, 0x18, 0x00, 0xca, 0x64,
+		0x55, 0x75, 0x78, 0x0b, 0x00, 0x50, 0x4b, 0x05,
+		0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01,
+		0x00, 0x49, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00,
+		0x00, 0x31, 0x31, 0x31, 0x32, 0x32, 0x32, 0x0a,
+		0x50, 0x4b, 0x07, 0x08, 0x1d, 0x88, 0x77, 0xb0,
+		0x07, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00,
+		0x50, 0x4b, 0x01, 0x02, 0x14, 0x03, 0x14, 0x00,
+		0x08, 0x00, 0x00, 0x00, 0x6b, 0xb4, 0xba, 0x46,
+		0x1d, 0x88, 0x77, 0xb0, 0x07, 0x00, 0x00, 0x00,
+		0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x18, 0x00,
+		0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0xa0, 0x81, 0x00, 0x00, 0x00, 0x00, 0xca, 0x64,
+		0x55, 0x75, 0x78, 0x0b, 0x00, 0x50, 0x4b, 0x05,
+		0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01,
+		0x00, 0x49, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00,
+		0x00, 0x97, 0x2b, 0x49, 0x23, 0x05, 0xc5, 0x0b,
+		0xa7, 0xd1, 0x52, 0xa2, 0x9c, 0x50, 0x4b, 0x06,
+		0x07, 0xc8, 0x19, 0xc1, 0xaf, 0x94, 0x9c, 0x61,
+		0x44, 0xbe, 0x94, 0x19, 0x42, 0x58, 0x12, 0xc6,
+		0x5b, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00,
+		0x00, 0x01, 0x00, 0x01, 0x00, 0x69, 0x00, 0x00,
+		0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00,
+	}
+	// Read in the archive.
+	_, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data)))
+	if err != nil {
+		t.Errorf("Error reading the archive: %v", err)
+	}
+}
+
+func TestFS(t *testing.T) {
+	for _, test := range []struct {
+		file string
+		want []string
+	}{
+		{
+			"testdata/unix.zip",
+			[]string{"hello", "dir/bar", "readonly"},
+		},
+		{
+			"testdata/subdir.zip",
+			[]string{"a/b/c"},
+		},
+	} {
+		t.Run(test.file, func(t *testing.T) {
+			t.Parallel()
+			z, err := OpenReader(test.file)
+			if err != nil {
+				t.Fatal(err)
+			}
+			defer z.Close()
+			if err := fstest.TestFS(z, test.want...); err != nil {
+				t.Error(err)
+			}
+		})
+	}
+}
+
+func TestFSModTime(t *testing.T) {
+	t.Parallel()
+	z, err := OpenReader("testdata/subdir.zip")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer z.Close()
+
+	for _, test := range []struct {
+		name string
+		want time.Time
+	}{
+		{
+			"a",
+			time.Date(2021, 4, 19, 12, 29, 56, 0, timeZone(-7*time.Hour)).UTC(),
+		},
+		{
+			"a/b/c",
+			time.Date(2021, 4, 19, 12, 29, 59, 0, timeZone(-7*time.Hour)).UTC(),
+		},
+	} {
+		fi, err := fs.Stat(z, test.name)
+		if err != nil {
+			t.Errorf("%s: %v", test.name, err)
+			continue
+		}
+		if got := fi.ModTime(); !got.Equal(test.want) {
+			t.Errorf("%s: got modtime %v, want %v", test.name, got, test.want)
+		}
+	}
+}
+
+func TestCVE202127919(t *testing.T) {
+	// Archive containing only the file "../test.txt"
+	data := []byte{
+		0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x08, 0x00,
+		0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x2e, 0x2e,
+		0x2f, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x74, 0x78,
+		0x74, 0x0a, 0xc9, 0xc8, 0x2c, 0x56, 0xc8, 0x2c,
+		0x56, 0x48, 0x54, 0x28, 0x49, 0x2d, 0x2e, 0x51,
+		0x28, 0x49, 0xad, 0x28, 0x51, 0x48, 0xcb, 0xcc,
+		0x49, 0xd5, 0xe3, 0x02, 0x04, 0x00, 0x00, 0xff,
+		0xff, 0x50, 0x4b, 0x07, 0x08, 0xc0, 0xd7, 0xed,
+		0xc3, 0x20, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00,
+		0x00, 0x50, 0x4b, 0x01, 0x02, 0x14, 0x00, 0x14,
+		0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0xc0, 0xd7, 0xed, 0xc3, 0x20, 0x00, 0x00,
+		0x00, 0x1a, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e,
+		0x2e, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x74,
+		0x78, 0x74, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00,
+		0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x39, 0x00,
+		0x00, 0x00, 0x59, 0x00, 0x00, 0x00, 0x00, 0x00,
+	}
+	r, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data)))
+	if err != nil {
+		t.Fatalf("Error reading the archive: %v", err)
+	}
+	_, err = r.Open("test.txt")
+	if err != nil {
+		t.Errorf("Error reading file: %v", err)
+	}
+}
+
+func TestReadDataDescriptor(t *testing.T) {
+	tests := []struct {
+		desc    string
+		in      []byte
+		zip64   bool
+		want    *dataDescriptor
+		wantErr error
+	}{{
+		desc: "valid 32 bit with signature",
+		in: []byte{
+			0x50, 0x4b, 0x07, 0x08, // signature
+			0x00, 0x01, 0x02, 0x03, // crc32
+			0x04, 0x05, 0x06, 0x07, // compressed size
+			0x08, 0x09, 0x0a, 0x0b, // uncompressed size
+		},
+		want: &dataDescriptor{
+			crc32:            0x03020100,
+			compressedSize:   0x07060504,
+			uncompressedSize: 0x0b0a0908,
+		},
+	}, {
+		desc: "valid 32 bit without signature",
+		in: []byte{
+			0x00, 0x01, 0x02, 0x03, // crc32
+			0x04, 0x05, 0x06, 0x07, // compressed size
+			0x08, 0x09, 0x0a, 0x0b, // uncompressed size
+		},
+		want: &dataDescriptor{
+			crc32:            0x03020100,
+			compressedSize:   0x07060504,
+			uncompressedSize: 0x0b0a0908,
+		},
+	}, {
+		desc: "valid 64 bit with signature",
+		in: []byte{
+			0x50, 0x4b, 0x07, 0x08, // signature
+			0x00, 0x01, 0x02, 0x03, // crc32
+			0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, // compressed size
+			0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, // uncompressed size
+		},
+		zip64: true,
+		want: &dataDescriptor{
+			crc32:            0x03020100,
+			compressedSize:   0x0b0a090807060504,
+			uncompressedSize: 0x131211100f0e0d0c,
+		},
+	}, {
+		desc: "valid 64 bit without signature",
+		in: []byte{
+			0x00, 0x01, 0x02, 0x03, // crc32
+			0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, // compressed size
+			0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, // uncompressed size
+		},
+		zip64: true,
+		want: &dataDescriptor{
+			crc32:            0x03020100,
+			compressedSize:   0x0b0a090807060504,
+			uncompressedSize: 0x131211100f0e0d0c,
+		},
+	}, {
+		desc: "invalid 32 bit with signature",
+		in: []byte{
+			0x50, 0x4b, 0x07, 0x08, // signature
+			0x00, 0x01, 0x02, 0x03, // crc32
+			0x04, 0x05, // unexpected end
+		},
+		wantErr: io.ErrUnexpectedEOF,
+	}, {
+		desc: "invalid 32 bit without signature",
+		in: []byte{
+			0x00, 0x01, 0x02, 0x03, // crc32
+			0x04, 0x05, // unexpected end
+		},
+		wantErr: io.ErrUnexpectedEOF,
+	}, {
+		desc: "invalid 64 bit with signature",
+		in: []byte{
+			0x50, 0x4b, 0x07, 0x08, // signature
+			0x00, 0x01, 0x02, 0x03, // crc32
+			0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, // compressed size
+			0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, // unexpected end
+		},
+		zip64:   true,
+		wantErr: io.ErrUnexpectedEOF,
+	}, {
+		desc: "invalid 64 bit without signature",
+		in: []byte{
+			0x00, 0x01, 0x02, 0x03, // crc32
+			0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, // compressed size
+			0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, // unexpected end
+		},
+		zip64:   true,
+		wantErr: io.ErrUnexpectedEOF,
+	}}
+
+	for _, test := range tests {
+		t.Run(test.desc, func(t *testing.T) {
+			r := bytes.NewReader(test.in)
+
+			desc, err := readDataDescriptor(r, test.zip64)
+			if err != test.wantErr {
+				t.Fatalf("got err %v; want nil", err)
+			}
+			if test.want == nil {
+				return
+			}
+			if desc == nil {
+				t.Fatalf("got nil DataDescriptor; want non-nil")
+			}
+			if desc.crc32 != test.want.crc32 {
+				t.Errorf("got CRC32 %#x; want %#x", desc.crc32, test.want.crc32)
+			}
+			if desc.compressedSize != test.want.compressedSize {
+				t.Errorf("got CompressedSize %#x; want %#x", desc.compressedSize, test.want.compressedSize)
+			}
+			if desc.uncompressedSize != test.want.uncompressedSize {
+				t.Errorf("got UncompressedSize %#x; want %#x", desc.uncompressedSize, test.want.uncompressedSize)
+			}
+		})
+	}
+}
diff --git a/internal/backport/archive/zip/register.go b/internal/backport/archive/zip/register.go
new file mode 100644
index 0000000..51e9c3e
--- /dev/null
+++ b/internal/backport/archive/zip/register.go
@@ -0,0 +1,148 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zip
+
+import (
+	"compress/flate"
+	"errors"
+	"io"
+	"io/ioutil"
+	"sync"
+)
+
+// A Compressor returns a new compressing writer, writing to w.
+// The WriteCloser's Close method must be used to flush pending data to w.
+// The Compressor itself must be safe to invoke from multiple goroutines
+// simultaneously, but each returned writer will be used only by
+// one goroutine at a time.
+type Compressor func(w io.Writer) (io.WriteCloser, error)
+
+// A Decompressor returns a new decompressing reader, reading from r.
+// The ReadCloser's Close method must be used to release associated resources.
+// The Decompressor itself must be safe to invoke from multiple goroutines
+// simultaneously, but each returned reader will be used only by
+// one goroutine at a time.
+type Decompressor func(r io.Reader) io.ReadCloser
+
+var flateWriterPool sync.Pool
+
+func newFlateWriter(w io.Writer) io.WriteCloser {
+	fw, ok := flateWriterPool.Get().(*flate.Writer)
+	if ok {
+		fw.Reset(w)
+	} else {
+		fw, _ = flate.NewWriter(w, 5)
+	}
+	return &pooledFlateWriter{fw: fw}
+}
+
+type pooledFlateWriter struct {
+	mu sync.Mutex // guards Close and Write
+	fw *flate.Writer
+}
+
+func (w *pooledFlateWriter) Write(p []byte) (n int, err error) {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+	if w.fw == nil {
+		return 0, errors.New("Write after Close")
+	}
+	return w.fw.Write(p)
+}
+
+func (w *pooledFlateWriter) Close() error {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+	var err error
+	if w.fw != nil {
+		err = w.fw.Close()
+		flateWriterPool.Put(w.fw)
+		w.fw = nil
+	}
+	return err
+}
+
+var flateReaderPool sync.Pool
+
+func newFlateReader(r io.Reader) io.ReadCloser {
+	fr, ok := flateReaderPool.Get().(io.ReadCloser)
+	if ok {
+		fr.(flate.Resetter).Reset(r, nil)
+	} else {
+		fr = flate.NewReader(r)
+	}
+	return &pooledFlateReader{fr: fr}
+}
+
+type pooledFlateReader struct {
+	mu sync.Mutex // guards Close and Read
+	fr io.ReadCloser
+}
+
+func (r *pooledFlateReader) Read(p []byte) (n int, err error) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	if r.fr == nil {
+		return 0, errors.New("Read after Close")
+	}
+	return r.fr.Read(p)
+}
+
+func (r *pooledFlateReader) Close() error {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	var err error
+	if r.fr != nil {
+		err = r.fr.Close()
+		flateReaderPool.Put(r.fr)
+		r.fr = nil
+	}
+	return err
+}
+
+var (
+	compressors   sync.Map // map[uint16]Compressor
+	decompressors sync.Map // map[uint16]Decompressor
+)
+
+func init() {
+	compressors.Store(Store, Compressor(func(w io.Writer) (io.WriteCloser, error) { return &nopCloser{w}, nil }))
+	compressors.Store(Deflate, Compressor(func(w io.Writer) (io.WriteCloser, error) { return newFlateWriter(w), nil }))
+
+	decompressors.Store(Store, Decompressor(ioutil.NopCloser))
+	decompressors.Store(Deflate, Decompressor(newFlateReader))
+}
+
+// RegisterDecompressor allows custom decompressors for a specified method ID.
+// The common methods Store and Deflate are built in.
+func RegisterDecompressor(method uint16, dcomp Decompressor) {
+	if _, dup := decompressors.LoadOrStore(method, dcomp); dup {
+		panic("decompressor already registered")
+	}
+}
+
+// RegisterCompressor registers custom compressors for a specified method ID.
+// The common methods Store and Deflate are built in.
+func RegisterCompressor(method uint16, comp Compressor) {
+	if _, dup := compressors.LoadOrStore(method, comp); dup {
+		panic("compressor already registered")
+	}
+}
+
+func compressor(method uint16) Compressor {
+	ci, ok := compressors.Load(method)
+	if !ok {
+		return nil
+	}
+	return ci.(Compressor)
+}
+
+func decompressor(method uint16) Decompressor {
+	di, ok := decompressors.Load(method)
+	if !ok {
+		return nil
+	}
+	return di.(Decompressor)
+}
diff --git a/internal/backport/archive/zip/struct.go b/internal/backport/archive/zip/struct.go
new file mode 100644
index 0000000..d407427
--- /dev/null
+++ b/internal/backport/archive/zip/struct.go
@@ -0,0 +1,401 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package zip provides support for reading and writing ZIP archives.
+
+See: https://www.pkware.com/appnote
+
+This package does not support disk spanning.
+
+A note about ZIP64:
+
+To be backwards compatible the FileHeader has both 32 and 64 bit Size
+fields. The 64 bit fields will always contain the correct value and
+for normal archives both fields will be the same. For files requiring
+the ZIP64 format the 32 bit fields will be 0xffffffff and the 64 bit
+fields must be used instead.
+*/
+package zip
+
+import (
+	"path"
+	"time"
+
+	"golang.org/x/website/internal/backport/io/fs"
+)
+
+// Compression methods.
+const (
+	Store   uint16 = 0 // no compression
+	Deflate uint16 = 8 // DEFLATE compressed
+)
+
+const (
+	fileHeaderSignature      = 0x04034b50
+	directoryHeaderSignature = 0x02014b50
+	directoryEndSignature    = 0x06054b50
+	directory64LocSignature  = 0x07064b50
+	directory64EndSignature  = 0x06064b50
+	dataDescriptorSignature  = 0x08074b50 // de-facto standard; required by OS X Finder
+	fileHeaderLen            = 30         // + filename + extra
+	directoryHeaderLen       = 46         // + filename + extra + comment
+	directoryEndLen          = 22         // + comment
+	dataDescriptorLen        = 16         // four uint32: descriptor signature, crc32, compressed size, size
+	dataDescriptor64Len      = 24         // two uint32: signature, crc32 | two uint64: compressed size, size
+	directory64LocLen        = 20         //
+	directory64EndLen        = 56         // + extra
+
+	// Constants for the first byte in CreatorVersion.
+	creatorFAT    = 0
+	creatorUnix   = 3
+	creatorNTFS   = 11
+	creatorVFAT   = 14
+	creatorMacOSX = 19
+
+	// Version numbers.
+	zipVersion20 = 20 // 2.0
+	zipVersion45 = 45 // 4.5 (reads and writes zip64 archives)
+
+	// Limits for non zip64 files.
+	uint16max = (1 << 16) - 1
+	uint32max = (1 << 32) - 1
+
+	// Extra header IDs.
+	//
+	// IDs 0..31 are reserved for official use by PKWARE.
+	// IDs above that range are defined by third-party vendors.
+	// Since ZIP lacked high precision timestamps (nor a official specification
+	// of the timezone used for the date fields), many competing extra fields
+	// have been invented. Pervasive use effectively makes them "official".
+	//
+	// See http://mdfs.net/Docs/Comp/Archiving/Zip/ExtraField
+	zip64ExtraID       = 0x0001 // Zip64 extended information
+	ntfsExtraID        = 0x000a // NTFS
+	unixExtraID        = 0x000d // UNIX
+	extTimeExtraID     = 0x5455 // Extended timestamp
+	infoZipUnixExtraID = 0x5855 // Info-ZIP Unix extension
+)
+
+// FileHeader describes a file within a zip file.
+// See the zip spec for details.
+type FileHeader struct {
+	// Name is the name of the file.
+	//
+	// It must be a relative path, not start with a drive letter (such as "C:"),
+	// and must use forward slashes instead of back slashes. A trailing slash
+	// indicates that this file is a directory and should have no data.
+	//
+	// When reading zip files, the Name field is populated from
+	// the zip file directly and is not validated for correctness.
+	// It is the caller's responsibility to sanitize it as
+	// appropriate, including canonicalizing slash directions,
+	// validating that paths are relative, and preventing path
+	// traversal through filenames ("../../../").
+	Name string
+
+	// Comment is any arbitrary user-defined string shorter than 64KiB.
+	Comment string
+
+	// NonUTF8 indicates that Name and Comment are not encoded in UTF-8.
+	//
+	// By specification, the only other encoding permitted should be CP-437,
+	// but historically many ZIP readers interpret Name and Comment as whatever
+	// the system's local character encoding happens to be.
+	//
+	// This flag should only be set if the user intends to encode a non-portable
+	// ZIP file for a specific localized region. Otherwise, the Writer
+	// automatically sets the ZIP format's UTF-8 flag for valid UTF-8 strings.
+	NonUTF8 bool
+
+	CreatorVersion uint16
+	ReaderVersion  uint16
+	Flags          uint16
+
+	// Method is the compression method. If zero, Store is used.
+	Method uint16
+
+	// Modified is the modified time of the file.
+	//
+	// When reading, an extended timestamp is preferred over the legacy MS-DOS
+	// date field, and the offset between the times is used as the timezone.
+	// If only the MS-DOS date is present, the timezone is assumed to be UTC.
+	//
+	// When writing, an extended timestamp (which is timezone-agnostic) is
+	// always emitted. The legacy MS-DOS date field is encoded according to the
+	// location of the Modified time.
+	Modified     time.Time
+	ModifiedTime uint16 // Deprecated: Legacy MS-DOS date; use Modified instead.
+	ModifiedDate uint16 // Deprecated: Legacy MS-DOS time; use Modified instead.
+
+	CRC32              uint32
+	CompressedSize     uint32 // Deprecated: Use CompressedSize64 instead.
+	UncompressedSize   uint32 // Deprecated: Use UncompressedSize64 instead.
+	CompressedSize64   uint64
+	UncompressedSize64 uint64
+	Extra              []byte
+	ExternalAttrs      uint32 // Meaning depends on CreatorVersion
+}
+
+// FileInfo returns an fs.FileInfo for the FileHeader.
+func (h *FileHeader) FileInfo() fs.FileInfo {
+	return headerFileInfo{h}
+}
+
+// headerFileInfo implements fs.FileInfo.
+type headerFileInfo struct {
+	fh *FileHeader
+}
+
+func (fi headerFileInfo) Name() string { return path.Base(fi.fh.Name) }
+func (fi headerFileInfo) Size() int64 {
+	if fi.fh.UncompressedSize64 > 0 {
+		return int64(fi.fh.UncompressedSize64)
+	}
+	return int64(fi.fh.UncompressedSize)
+}
+func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
+func (fi headerFileInfo) ModTime() time.Time {
+	if fi.fh.Modified.IsZero() {
+		return fi.fh.ModTime()
+	}
+	return fi.fh.Modified.UTC()
+}
+func (fi headerFileInfo) Mode() fs.FileMode { return fi.fh.Mode() }
+func (fi headerFileInfo) Type() fs.FileMode { return fi.fh.Mode() & fs.ModeType }
+func (fi headerFileInfo) Sys() interface{}  { return fi.fh }
+
+func (fi headerFileInfo) Info() (fs.FileInfo, error) { return fi, nil }
+
+// FileInfoHeader creates a partially-populated FileHeader from an
+// fs.FileInfo.
+// Because fs.FileInfo's Name method returns only the base name of
+// the file it describes, it may be necessary to modify the Name field
+// of the returned header to provide the full path name of the file.
+// If compression is desired, callers should set the FileHeader.Method
+// field; it is unset by default.
+func FileInfoHeader(fi fs.FileInfo) (*FileHeader, error) {
+	size := fi.Size()
+	fh := &FileHeader{
+		Name:               fi.Name(),
+		UncompressedSize64: uint64(size),
+	}
+	fh.SetModTime(fi.ModTime())
+	fh.SetMode(fi.Mode())
+	if fh.UncompressedSize64 > uint32max {
+		fh.UncompressedSize = uint32max
+	} else {
+		fh.UncompressedSize = uint32(fh.UncompressedSize64)
+	}
+	return fh, nil
+}
+
+type directoryEnd struct {
+	diskNbr            uint32 // unused
+	dirDiskNbr         uint32 // unused
+	dirRecordsThisDisk uint64 // unused
+	directoryRecords   uint64
+	directorySize      uint64
+	directoryOffset    uint64 // relative to file
+	commentLen         uint16
+	comment            string
+}
+
+// timeZone returns a *time.Location based on the provided offset.
+// If the offset is non-sensible, then this uses an offset of zero.
+func timeZone(offset time.Duration) *time.Location {
+	const (
+		minOffset   = -12 * time.Hour  // E.g., Baker island at -12:00
+		maxOffset   = +14 * time.Hour  // E.g., Line island at +14:00
+		offsetAlias = 15 * time.Minute // E.g., Nepal at +5:45
+	)
+	offset = offset.Round(offsetAlias)
+	if offset < minOffset || maxOffset < offset {
+		offset = 0
+	}
+	return time.FixedZone("", int(offset/time.Second))
+}
+
+// msDosTimeToTime converts an MS-DOS date and time into a time.Time.
+// The resolution is 2s.
+// See: https://msdn.microsoft.com/en-us/library/ms724247(v=VS.85).aspx
+func msDosTimeToTime(dosDate, dosTime uint16) time.Time {
+	return time.Date(
+		// date bits 0-4: day of month; 5-8: month; 9-15: years since 1980
+		int(dosDate>>9+1980),
+		time.Month(dosDate>>5&0xf),
+		int(dosDate&0x1f),
+
+		// time bits 0-4: second/2; 5-10: minute; 11-15: hour
+		int(dosTime>>11),
+		int(dosTime>>5&0x3f),
+		int(dosTime&0x1f*2),
+		0, // nanoseconds
+
+		time.UTC,
+	)
+}
+
+// timeToMsDosTime converts a time.Time to an MS-DOS date and time.
+// The resolution is 2s.
+// See: https://msdn.microsoft.com/en-us/library/ms724274(v=VS.85).aspx
+func timeToMsDosTime(t time.Time) (fDate uint16, fTime uint16) {
+	fDate = uint16(t.Day() + int(t.Month())<<5 + (t.Year()-1980)<<9)
+	fTime = uint16(t.Second()/2 + t.Minute()<<5 + t.Hour()<<11)
+	return
+}
+
+// ModTime returns the modification time in UTC using the legacy
+// ModifiedDate and ModifiedTime fields.
+//
+// Deprecated: Use Modified instead.
+func (h *FileHeader) ModTime() time.Time {
+	return msDosTimeToTime(h.ModifiedDate, h.ModifiedTime)
+}
+
+// SetModTime sets the Modified, ModifiedTime, and ModifiedDate fields
+// to the given time in UTC.
+//
+// Deprecated: Use Modified instead.
+func (h *FileHeader) SetModTime(t time.Time) {
+	t = t.UTC() // Convert to UTC for compatibility
+	h.Modified = t
+	h.ModifiedDate, h.ModifiedTime = timeToMsDosTime(t)
+}
+
+const (
+	// Unix constants. The specification doesn't mention them,
+	// but these seem to be the values agreed on by tools.
+	s_IFMT   = 0xf000
+	s_IFSOCK = 0xc000
+	s_IFLNK  = 0xa000
+	s_IFREG  = 0x8000
+	s_IFBLK  = 0x6000
+	s_IFDIR  = 0x4000
+	s_IFCHR  = 0x2000
+	s_IFIFO  = 0x1000
+	s_ISUID  = 0x800
+	s_ISGID  = 0x400
+	s_ISVTX  = 0x200
+
+	msdosDir      = 0x10
+	msdosReadOnly = 0x01
+)
+
+// Mode returns the permission and mode bits for the FileHeader.
+func (h *FileHeader) Mode() (mode fs.FileMode) {
+	switch h.CreatorVersion >> 8 {
+	case creatorUnix, creatorMacOSX:
+		mode = unixModeToFileMode(h.ExternalAttrs >> 16)
+	case creatorNTFS, creatorVFAT, creatorFAT:
+		mode = msdosModeToFileMode(h.ExternalAttrs)
+	}
+	if len(h.Name) > 0 && h.Name[len(h.Name)-1] == '/' {
+		mode |= fs.ModeDir
+	}
+	return mode
+}
+
+// SetMode changes the permission and mode bits for the FileHeader.
+func (h *FileHeader) SetMode(mode fs.FileMode) {
+	h.CreatorVersion = h.CreatorVersion&0xff | creatorUnix<<8
+	h.ExternalAttrs = fileModeToUnixMode(mode) << 16
+
+	// set MSDOS attributes too, as the original zip does.
+	if mode&fs.ModeDir != 0 {
+		h.ExternalAttrs |= msdosDir
+	}
+	if mode&0200 == 0 {
+		h.ExternalAttrs |= msdosReadOnly
+	}
+}
+
+// isZip64 reports whether the file size exceeds the 32 bit limit
+func (h *FileHeader) isZip64() bool {
+	return h.CompressedSize64 >= uint32max || h.UncompressedSize64 >= uint32max
+}
+
+func (f *FileHeader) hasDataDescriptor() bool {
+	return f.Flags&0x8 != 0
+}
+
+func msdosModeToFileMode(m uint32) (mode fs.FileMode) {
+	if m&msdosDir != 0 {
+		mode = fs.ModeDir | 0777
+	} else {
+		mode = 0666
+	}
+	if m&msdosReadOnly != 0 {
+		mode &^= 0222
+	}
+	return mode
+}
+
+func fileModeToUnixMode(mode fs.FileMode) uint32 {
+	var m uint32
+	switch mode & fs.ModeType {
+	default:
+		m = s_IFREG
+	case fs.ModeDir:
+		m = s_IFDIR
+	case fs.ModeSymlink:
+		m = s_IFLNK
+	case fs.ModeNamedPipe:
+		m = s_IFIFO
+	case fs.ModeSocket:
+		m = s_IFSOCK
+	case fs.ModeDevice:
+		m = s_IFBLK
+	case fs.ModeDevice | fs.ModeCharDevice:
+		m = s_IFCHR
+	}
+	if mode&fs.ModeSetuid != 0 {
+		m |= s_ISUID
+	}
+	if mode&fs.ModeSetgid != 0 {
+		m |= s_ISGID
+	}
+	if mode&fs.ModeSticky != 0 {
+		m |= s_ISVTX
+	}
+	return m | uint32(mode&0777)
+}
+
+func unixModeToFileMode(m uint32) fs.FileMode {
+	mode := fs.FileMode(m & 0777)
+	switch m & s_IFMT {
+	case s_IFBLK:
+		mode |= fs.ModeDevice
+	case s_IFCHR:
+		mode |= fs.ModeDevice | fs.ModeCharDevice
+	case s_IFDIR:
+		mode |= fs.ModeDir
+	case s_IFIFO:
+		mode |= fs.ModeNamedPipe
+	case s_IFLNK:
+		mode |= fs.ModeSymlink
+	case s_IFREG:
+		// nothing to do
+	case s_IFSOCK:
+		mode |= fs.ModeSocket
+	}
+	if m&s_ISGID != 0 {
+		mode |= fs.ModeSetgid
+	}
+	if m&s_ISUID != 0 {
+		mode |= fs.ModeSetuid
+	}
+	if m&s_ISVTX != 0 {
+		mode |= fs.ModeSticky
+	}
+	return mode
+}
+
+// dataDescriptor holds the data descriptor that optionally follows the file
+// contents in the zip file.
+type dataDescriptor struct {
+	crc32            uint32
+	compressedSize   uint64
+	uncompressedSize uint64
+}
diff --git a/internal/backport/archive/zip/testdata/crc32-not-streamed.zip b/internal/backport/archive/zip/testdata/crc32-not-streamed.zip
new file mode 100644
index 0000000..f268d88
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/crc32-not-streamed.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/dd.zip b/internal/backport/archive/zip/testdata/dd.zip
new file mode 100644
index 0000000..e53378b
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/dd.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/go-no-datadesc-sig.zip.base64 b/internal/backport/archive/zip/testdata/go-no-datadesc-sig.zip.base64
new file mode 100644
index 0000000..1c2c071
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/go-no-datadesc-sig.zip.base64
@@ -0,0 +1 @@
+UEsDBBQACAAAAGWHaECoZTJ+BAAAAAQAAAAHABgAZm9vLnR4dFVUBQAD3lVZT3V4CwABBPUBAAAEFAAAAGZvbwqoZTJ+BAAAAAQAAABQSwMEFAAIAAAAZodoQOmzogQEAAAABAAAAAcAGABiYXIudHh0VVQFAAPgVVlPdXgLAAEE9QEAAAQUAAAAYmFyCumzogQEAAAABAAAAFBLAQIUAxQACAAAAGWHaECoZTJ+BAAAAAQAAAAHABgAAAAAAAAAAACkgQAAAABmb28udHh0VVQFAAPeVVlPdXgLAAEE9QEAAAQUAAAAUEsBAhQDFAAIAAAAZodoQOmzogQEAAAABAAAAAcAGAAAAAAAAAAAAKSBTQAAAGJhci50eHRVVAUAA+BVWU91eAsAAQT1AQAABBQAAABQSwUGAAAAAAIAAgCaAAAAmgAAAAAA
diff --git a/internal/backport/archive/zip/testdata/go-with-datadesc-sig.zip b/internal/backport/archive/zip/testdata/go-with-datadesc-sig.zip
new file mode 100644
index 0000000..bcfe121
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/go-with-datadesc-sig.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/gophercolor16x16.png b/internal/backport/archive/zip/testdata/gophercolor16x16.png
new file mode 100644
index 0000000..48854ff
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/gophercolor16x16.png
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/readme.notzip b/internal/backport/archive/zip/testdata/readme.notzip
new file mode 100644
index 0000000..8173727
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/readme.notzip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/readme.zip b/internal/backport/archive/zip/testdata/readme.zip
new file mode 100644
index 0000000..5642a67
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/readme.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/subdir.zip b/internal/backport/archive/zip/testdata/subdir.zip
new file mode 100644
index 0000000..324d06b
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/subdir.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/symlink.zip b/internal/backport/archive/zip/testdata/symlink.zip
new file mode 100644
index 0000000..af84693
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/symlink.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/test-trailing-junk.zip b/internal/backport/archive/zip/testdata/test-trailing-junk.zip
new file mode 100644
index 0000000..42281b4
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/test-trailing-junk.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/test.zip b/internal/backport/archive/zip/testdata/test.zip
new file mode 100644
index 0000000..03890c0
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/test.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/time-22738.zip b/internal/backport/archive/zip/testdata/time-22738.zip
new file mode 100644
index 0000000..eb85b57
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/time-22738.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/time-7zip.zip b/internal/backport/archive/zip/testdata/time-7zip.zip
new file mode 100644
index 0000000..4f74819
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/time-7zip.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/time-go.zip b/internal/backport/archive/zip/testdata/time-go.zip
new file mode 100644
index 0000000..f008805
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/time-go.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/time-infozip.zip b/internal/backport/archive/zip/testdata/time-infozip.zip
new file mode 100644
index 0000000..8e63948
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/time-infozip.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/time-osx.zip b/internal/backport/archive/zip/testdata/time-osx.zip
new file mode 100644
index 0000000..e82c5c2
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/time-osx.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/time-win7.zip b/internal/backport/archive/zip/testdata/time-win7.zip
new file mode 100644
index 0000000..8ba222b
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/time-win7.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/time-winrar.zip b/internal/backport/archive/zip/testdata/time-winrar.zip
new file mode 100644
index 0000000..a8a19b0
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/time-winrar.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/time-winzip.zip b/internal/backport/archive/zip/testdata/time-winzip.zip
new file mode 100644
index 0000000..f6e8f8b
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/time-winzip.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/unix.zip b/internal/backport/archive/zip/testdata/unix.zip
new file mode 100644
index 0000000..ce1a981
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/unix.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/utf8-7zip.zip b/internal/backport/archive/zip/testdata/utf8-7zip.zip
new file mode 100644
index 0000000..0e97884
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/utf8-7zip.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/utf8-infozip.zip b/internal/backport/archive/zip/testdata/utf8-infozip.zip
new file mode 100644
index 0000000..25a8926
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/utf8-infozip.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/utf8-osx.zip b/internal/backport/archive/zip/testdata/utf8-osx.zip
new file mode 100644
index 0000000..9b0c058
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/utf8-osx.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/utf8-winrar.zip b/internal/backport/archive/zip/testdata/utf8-winrar.zip
new file mode 100644
index 0000000..4bad6c3
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/utf8-winrar.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/utf8-winzip.zip b/internal/backport/archive/zip/testdata/utf8-winzip.zip
new file mode 100644
index 0000000..909d52e
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/utf8-winzip.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/winxp.zip b/internal/backport/archive/zip/testdata/winxp.zip
new file mode 100644
index 0000000..3919322
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/winxp.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/zip64-2.zip b/internal/backport/archive/zip/testdata/zip64-2.zip
new file mode 100644
index 0000000..f844e35
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/zip64-2.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/zip64.zip b/internal/backport/archive/zip/testdata/zip64.zip
new file mode 100644
index 0000000..a2ee1fa
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/zip64.zip
Binary files differ
diff --git a/internal/backport/archive/zip/writer.go b/internal/backport/archive/zip/writer.go
new file mode 100644
index 0000000..3b23cc3
--- /dev/null
+++ b/internal/backport/archive/zip/writer.go
@@ -0,0 +1,634 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zip
+
+import (
+	"bufio"
+	"encoding/binary"
+	"errors"
+	"hash"
+	"hash/crc32"
+	"io"
+	"strings"
+	"unicode/utf8"
+)
+
+var (
+	errLongName  = errors.New("zip: FileHeader.Name too long")
+	errLongExtra = errors.New("zip: FileHeader.Extra too long")
+)
+
+// Writer implements a zip file writer.
+type Writer struct {
+	cw          *countWriter
+	dir         []*header
+	last        *fileWriter
+	closed      bool
+	compressors map[uint16]Compressor
+	comment     string
+
+	// testHookCloseSizeOffset if non-nil is called with the size
+	// of offset of the central directory at Close.
+	testHookCloseSizeOffset func(size, offset uint64)
+}
+
+type header struct {
+	*FileHeader
+	offset uint64
+	raw    bool
+}
+
+// NewWriter returns a new Writer writing a zip file to w.
+func NewWriter(w io.Writer) *Writer {
+	return &Writer{cw: &countWriter{w: bufio.NewWriter(w)}}
+}
+
+// SetOffset sets the offset of the beginning of the zip data within the
+// underlying writer. It should be used when the zip data is appended to an
+// existing file, such as a binary executable.
+// It must be called before any data is written.
+func (w *Writer) SetOffset(n int64) {
+	if w.cw.count != 0 {
+		panic("zip: SetOffset called after data was written")
+	}
+	w.cw.count = n
+}
+
+// Flush flushes any buffered data to the underlying writer.
+// Calling Flush is not normally necessary; calling Close is sufficient.
+func (w *Writer) Flush() error {
+	return w.cw.w.(*bufio.Writer).Flush()
+}
+
+// SetComment sets the end-of-central-directory comment field.
+// It can only be called before Close.
+func (w *Writer) SetComment(comment string) error {
+	if len(comment) > uint16max {
+		return errors.New("zip: Writer.Comment too long")
+	}
+	w.comment = comment
+	return nil
+}
+
+// Close finishes writing the zip file by writing the central directory.
+// It does not close the underlying writer.
+func (w *Writer) Close() error {
+	if w.last != nil && !w.last.closed {
+		if err := w.last.close(); err != nil {
+			return err
+		}
+		w.last = nil
+	}
+	if w.closed {
+		return errors.New("zip: writer closed twice")
+	}
+	w.closed = true
+
+	// write central directory
+	start := w.cw.count
+	for _, h := range w.dir {
+		var buf [directoryHeaderLen]byte
+		b := writeBuf(buf[:])
+		b.uint32(uint32(directoryHeaderSignature))
+		b.uint16(h.CreatorVersion)
+		b.uint16(h.ReaderVersion)
+		b.uint16(h.Flags)
+		b.uint16(h.Method)
+		b.uint16(h.ModifiedTime)
+		b.uint16(h.ModifiedDate)
+		b.uint32(h.CRC32)
+		if h.isZip64() || h.offset >= uint32max {
+			// the file needs a zip64 header. store maxint in both
+			// 32 bit size fields (and offset later) to signal that the
+			// zip64 extra header should be used.
+			b.uint32(uint32max) // compressed size
+			b.uint32(uint32max) // uncompressed size
+
+			// append a zip64 extra block to Extra
+			var buf [28]byte // 2x uint16 + 3x uint64
+			eb := writeBuf(buf[:])
+			eb.uint16(zip64ExtraID)
+			eb.uint16(24) // size = 3x uint64
+			eb.uint64(h.UncompressedSize64)
+			eb.uint64(h.CompressedSize64)
+			eb.uint64(h.offset)
+			h.Extra = append(h.Extra, buf[:]...)
+		} else {
+			b.uint32(h.CompressedSize)
+			b.uint32(h.UncompressedSize)
+		}
+
+		b.uint16(uint16(len(h.Name)))
+		b.uint16(uint16(len(h.Extra)))
+		b.uint16(uint16(len(h.Comment)))
+		b = b[4:] // skip disk number start and internal file attr (2x uint16)
+		b.uint32(h.ExternalAttrs)
+		if h.offset > uint32max {
+			b.uint32(uint32max)
+		} else {
+			b.uint32(uint32(h.offset))
+		}
+		if _, err := w.cw.Write(buf[:]); err != nil {
+			return err
+		}
+		if _, err := io.WriteString(w.cw, h.Name); err != nil {
+			return err
+		}
+		if _, err := w.cw.Write(h.Extra); err != nil {
+			return err
+		}
+		if _, err := io.WriteString(w.cw, h.Comment); err != nil {
+			return err
+		}
+	}
+	end := w.cw.count
+
+	records := uint64(len(w.dir))
+	size := uint64(end - start)
+	offset := uint64(start)
+
+	if f := w.testHookCloseSizeOffset; f != nil {
+		f(size, offset)
+	}
+
+	if records >= uint16max || size >= uint32max || offset >= uint32max {
+		var buf [directory64EndLen + directory64LocLen]byte
+		b := writeBuf(buf[:])
+
+		// zip64 end of central directory record
+		b.uint32(directory64EndSignature)
+		b.uint64(directory64EndLen - 12) // length minus signature (uint32) and length fields (uint64)
+		b.uint16(zipVersion45)           // version made by
+		b.uint16(zipVersion45)           // version needed to extract
+		b.uint32(0)                      // number of this disk
+		b.uint32(0)                      // number of the disk with the start of the central directory
+		b.uint64(records)                // total number of entries in the central directory on this disk
+		b.uint64(records)                // total number of entries in the central directory
+		b.uint64(size)                   // size of the central directory
+		b.uint64(offset)                 // offset of start of central directory with respect to the starting disk number
+
+		// zip64 end of central directory locator
+		b.uint32(directory64LocSignature)
+		b.uint32(0)           // number of the disk with the start of the zip64 end of central directory
+		b.uint64(uint64(end)) // relative offset of the zip64 end of central directory record
+		b.uint32(1)           // total number of disks
+
+		if _, err := w.cw.Write(buf[:]); err != nil {
+			return err
+		}
+
+		// store max values in the regular end record to signal
+		// that the zip64 values should be used instead
+		records = uint16max
+		size = uint32max
+		offset = uint32max
+	}
+
+	// write end record
+	var buf [directoryEndLen]byte
+	b := writeBuf(buf[:])
+	b.uint32(uint32(directoryEndSignature))
+	b = b[4:]                        // skip over disk number and first disk number (2x uint16)
+	b.uint16(uint16(records))        // number of entries this disk
+	b.uint16(uint16(records))        // number of entries total
+	b.uint32(uint32(size))           // size of directory
+	b.uint32(uint32(offset))         // start of directory
+	b.uint16(uint16(len(w.comment))) // byte size of EOCD comment
+	if _, err := w.cw.Write(buf[:]); err != nil {
+		return err
+	}
+	if _, err := io.WriteString(w.cw, w.comment); err != nil {
+		return err
+	}
+
+	return w.cw.w.(*bufio.Writer).Flush()
+}
+
+// Create adds a file to the zip file using the provided name.
+// It returns a Writer to which the file contents should be written.
+// The file contents will be compressed using the Deflate method.
+// The name must be a relative path: it must not start with a drive
+// letter (e.g. C:) or leading slash, and only forward slashes are
+// allowed. To create a directory instead of a file, add a trailing
+// slash to the name.
+// The file's contents must be written to the io.Writer before the next
+// call to Create, CreateHeader, or Close.
+func (w *Writer) Create(name string) (io.Writer, error) {
+	header := &FileHeader{
+		Name:   name,
+		Method: Deflate,
+	}
+	return w.CreateHeader(header)
+}
+
+// detectUTF8 reports whether s is a valid UTF-8 string, and whether the string
+// must be considered UTF-8 encoding (i.e., not compatible with CP-437, ASCII,
+// or any other common encoding).
+func detectUTF8(s string) (valid, require bool) {
+	for i := 0; i < len(s); {
+		r, size := utf8.DecodeRuneInString(s[i:])
+		i += size
+		// Officially, ZIP uses CP-437, but many readers use the system's
+		// local character encoding. Most encoding are compatible with a large
+		// subset of CP-437, which itself is ASCII-like.
+		//
+		// Forbid 0x7e and 0x5c since EUC-KR and Shift-JIS replace those
+		// characters with localized currency and overline characters.
+		if r < 0x20 || r > 0x7d || r == 0x5c {
+			if !utf8.ValidRune(r) || (r == utf8.RuneError && size == 1) {
+				return false, false
+			}
+			require = true
+		}
+	}
+	return true, require
+}
+
+// prepare performs the bookkeeping operations required at the start of
+// CreateHeader and CreateRaw.
+func (w *Writer) prepare(fh *FileHeader) error {
+	if w.last != nil && !w.last.closed {
+		if err := w.last.close(); err != nil {
+			return err
+		}
+	}
+	if len(w.dir) > 0 && w.dir[len(w.dir)-1].FileHeader == fh {
+		// See https://golang.org/issue/11144 confusion.
+		return errors.New("archive/zip: invalid duplicate FileHeader")
+	}
+	return nil
+}
+
+// CreateHeader adds a file to the zip archive using the provided FileHeader
+// for the file metadata. Writer takes ownership of fh and may mutate
+// its fields. The caller must not modify fh after calling CreateHeader.
+//
+// This returns a Writer to which the file contents should be written.
+// The file's contents must be written to the io.Writer before the next
+// call to Create, CreateHeader, CreateRaw, or Close.
+func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) {
+	if err := w.prepare(fh); err != nil {
+		return nil, err
+	}
+
+	// The ZIP format has a sad state of affairs regarding character encoding.
+	// Officially, the name and comment fields are supposed to be encoded
+	// in CP-437 (which is mostly compatible with ASCII), unless the UTF-8
+	// flag bit is set. However, there are several problems:
+	//
+	//	* Many ZIP readers still do not support UTF-8.
+	//	* If the UTF-8 flag is cleared, several readers simply interpret the
+	//	name and comment fields as whatever the local system encoding is.
+	//
+	// In order to avoid breaking readers without UTF-8 support,
+	// we avoid setting the UTF-8 flag if the strings are CP-437 compatible.
+	// However, if the strings require multibyte UTF-8 encoding and is a
+	// valid UTF-8 string, then we set the UTF-8 bit.
+	//
+	// For the case, where the user explicitly wants to specify the encoding
+	// as UTF-8, they will need to set the flag bit themselves.
+	utf8Valid1, utf8Require1 := detectUTF8(fh.Name)
+	utf8Valid2, utf8Require2 := detectUTF8(fh.Comment)
+	switch {
+	case fh.NonUTF8:
+		fh.Flags &^= 0x800
+	case (utf8Require1 || utf8Require2) && (utf8Valid1 && utf8Valid2):
+		fh.Flags |= 0x800
+	}
+
+	fh.CreatorVersion = fh.CreatorVersion&0xff00 | zipVersion20 // preserve compatibility byte
+	fh.ReaderVersion = zipVersion20
+
+	// If Modified is set, this takes precedence over MS-DOS timestamp fields.
+	if !fh.Modified.IsZero() {
+		// Contrary to the FileHeader.SetModTime method, we intentionally
+		// do not convert to UTC, because we assume the user intends to encode
+		// the date using the specified timezone. A user may want this control
+		// because many legacy ZIP readers interpret the timestamp according
+		// to the local timezone.
+		//
+		// The timezone is only non-UTC if a user directly sets the Modified
+		// field directly themselves. All other approaches sets UTC.
+		fh.ModifiedDate, fh.ModifiedTime = timeToMsDosTime(fh.Modified)
+
+		// Use "extended timestamp" format since this is what Info-ZIP uses.
+		// Nearly every major ZIP implementation uses a different format,
+		// but at least most seem to be able to understand the other formats.
+		//
+		// This format happens to be identical for both local and central header
+		// if modification time is the only timestamp being encoded.
+		var mbuf [9]byte // 2*SizeOf(uint16) + SizeOf(uint8) + SizeOf(uint32)
+		mt := uint32(fh.Modified.Unix())
+		eb := writeBuf(mbuf[:])
+		eb.uint16(extTimeExtraID)
+		eb.uint16(5)  // Size: SizeOf(uint8) + SizeOf(uint32)
+		eb.uint8(1)   // Flags: ModTime
+		eb.uint32(mt) // ModTime
+		fh.Extra = append(fh.Extra, mbuf[:]...)
+	}
+
+	var (
+		ow io.Writer
+		fw *fileWriter
+	)
+	h := &header{
+		FileHeader: fh,
+		offset:     uint64(w.cw.count),
+	}
+
+	if strings.HasSuffix(fh.Name, "/") {
+		// Set the compression method to Store to ensure data length is truly zero,
+		// which the writeHeader method always encodes for the size fields.
+		// This is necessary as most compression formats have non-zero lengths
+		// even when compressing an empty string.
+		fh.Method = Store
+		fh.Flags &^= 0x8 // we will not write a data descriptor
+
+		// Explicitly clear sizes as they have no meaning for directories.
+		fh.CompressedSize = 0
+		fh.CompressedSize64 = 0
+		fh.UncompressedSize = 0
+		fh.UncompressedSize64 = 0
+
+		ow = dirWriter{}
+	} else {
+		fh.Flags |= 0x8 // we will write a data descriptor
+
+		fw = &fileWriter{
+			zipw:      w.cw,
+			compCount: &countWriter{w: w.cw},
+			crc32:     crc32.NewIEEE(),
+		}
+		comp := w.compressor(fh.Method)
+		if comp == nil {
+			return nil, ErrAlgorithm
+		}
+		var err error
+		fw.comp, err = comp(fw.compCount)
+		if err != nil {
+			return nil, err
+		}
+		fw.rawCount = &countWriter{w: fw.comp}
+		fw.header = h
+		ow = fw
+	}
+	w.dir = append(w.dir, h)
+	if err := writeHeader(w.cw, h); err != nil {
+		return nil, err
+	}
+	// If we're creating a directory, fw is nil.
+	w.last = fw
+	return ow, nil
+}
+
+func writeHeader(w io.Writer, h *header) error {
+	const maxUint16 = 1<<16 - 1
+	if len(h.Name) > maxUint16 {
+		return errLongName
+	}
+	if len(h.Extra) > maxUint16 {
+		return errLongExtra
+	}
+
+	var buf [fileHeaderLen]byte
+	b := writeBuf(buf[:])
+	b.uint32(uint32(fileHeaderSignature))
+	b.uint16(h.ReaderVersion)
+	b.uint16(h.Flags)
+	b.uint16(h.Method)
+	b.uint16(h.ModifiedTime)
+	b.uint16(h.ModifiedDate)
+	// In raw mode (caller does the compression), the values are either
+	// written here or in the trailing data descriptor based on the header
+	// flags.
+	if h.raw && !h.hasDataDescriptor() {
+		b.uint32(h.CRC32)
+		b.uint32(uint32(min64(h.CompressedSize64, uint32max)))
+		b.uint32(uint32(min64(h.UncompressedSize64, uint32max)))
+	} else {
+		// When this package handle the compression, these values are
+		// always written to the trailing data descriptor.
+		b.uint32(0) // crc32
+		b.uint32(0) // compressed size
+		b.uint32(0) // uncompressed size
+	}
+	b.uint16(uint16(len(h.Name)))
+	b.uint16(uint16(len(h.Extra)))
+	if _, err := w.Write(buf[:]); err != nil {
+		return err
+	}
+	if _, err := io.WriteString(w, h.Name); err != nil {
+		return err
+	}
+	_, err := w.Write(h.Extra)
+	return err
+}
+
+func min64(x, y uint64) uint64 {
+	if x < y {
+		return x
+	}
+	return y
+}
+
+// CreateRaw adds a file to the zip archive using the provided FileHeader and
+// returns a Writer to which the file contents should be written. The file's
+// contents must be written to the io.Writer before the next call to Create,
+// CreateHeader, CreateRaw, or Close.
+//
+// In contrast to CreateHeader, the bytes passed to Writer are not compressed.
+func (w *Writer) CreateRaw(fh *FileHeader) (io.Writer, error) {
+	if err := w.prepare(fh); err != nil {
+		return nil, err
+	}
+
+	fh.CompressedSize = uint32(min64(fh.CompressedSize64, uint32max))
+	fh.UncompressedSize = uint32(min64(fh.UncompressedSize64, uint32max))
+
+	h := &header{
+		FileHeader: fh,
+		offset:     uint64(w.cw.count),
+		raw:        true,
+	}
+	w.dir = append(w.dir, h)
+	if err := writeHeader(w.cw, h); err != nil {
+		return nil, err
+	}
+
+	if strings.HasSuffix(fh.Name, "/") {
+		w.last = nil
+		return dirWriter{}, nil
+	}
+
+	fw := &fileWriter{
+		header: h,
+		zipw:   w.cw,
+	}
+	w.last = fw
+	return fw, nil
+}
+
+// Copy copies the file f (obtained from a Reader) into w. It copies the raw
+// form directly bypassing decompression, compression, and validation.
+func (w *Writer) Copy(f *File) error {
+	r, err := f.OpenRaw()
+	if err != nil {
+		return err
+	}
+	fw, err := w.CreateRaw(&f.FileHeader)
+	if err != nil {
+		return err
+	}
+	_, err = io.Copy(fw, r)
+	return err
+}
+
+// RegisterCompressor registers or overrides a custom compressor for a specific
+// method ID. If a compressor for a given method is not found, Writer will
+// default to looking up the compressor at the package level.
+func (w *Writer) RegisterCompressor(method uint16, comp Compressor) {
+	if w.compressors == nil {
+		w.compressors = make(map[uint16]Compressor)
+	}
+	w.compressors[method] = comp
+}
+
+func (w *Writer) compressor(method uint16) Compressor {
+	comp := w.compressors[method]
+	if comp == nil {
+		comp = compressor(method)
+	}
+	return comp
+}
+
+type dirWriter struct{}
+
+func (dirWriter) Write(b []byte) (int, error) {
+	if len(b) == 0 {
+		return 0, nil
+	}
+	return 0, errors.New("zip: write to directory")
+}
+
+type fileWriter struct {
+	*header
+	zipw      io.Writer
+	rawCount  *countWriter
+	comp      io.WriteCloser
+	compCount *countWriter
+	crc32     hash.Hash32
+	closed    bool
+}
+
+func (w *fileWriter) Write(p []byte) (int, error) {
+	if w.closed {
+		return 0, errors.New("zip: write to closed file")
+	}
+	if w.raw {
+		return w.zipw.Write(p)
+	}
+	w.crc32.Write(p)
+	return w.rawCount.Write(p)
+}
+
+func (w *fileWriter) close() error {
+	if w.closed {
+		return errors.New("zip: file closed twice")
+	}
+	w.closed = true
+	if w.raw {
+		return w.writeDataDescriptor()
+	}
+	if err := w.comp.Close(); err != nil {
+		return err
+	}
+
+	// update FileHeader
+	fh := w.header.FileHeader
+	fh.CRC32 = w.crc32.Sum32()
+	fh.CompressedSize64 = uint64(w.compCount.count)
+	fh.UncompressedSize64 = uint64(w.rawCount.count)
+
+	if fh.isZip64() {
+		fh.CompressedSize = uint32max
+		fh.UncompressedSize = uint32max
+		fh.ReaderVersion = zipVersion45 // requires 4.5 - File uses ZIP64 format extensions
+	} else {
+		fh.CompressedSize = uint32(fh.CompressedSize64)
+		fh.UncompressedSize = uint32(fh.UncompressedSize64)
+	}
+
+	return w.writeDataDescriptor()
+}
+
+func (w *fileWriter) writeDataDescriptor() error {
+	if !w.hasDataDescriptor() {
+		return nil
+	}
+	// Write data descriptor. This is more complicated than one would
+	// think, see e.g. comments in zipfile.c:putextended() and
+	// http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=7073588.
+	// The approach here is to write 8 byte sizes if needed without
+	// adding a zip64 extra in the local header (too late anyway).
+	var buf []byte
+	if w.isZip64() {
+		buf = make([]byte, dataDescriptor64Len)
+	} else {
+		buf = make([]byte, dataDescriptorLen)
+	}
+	b := writeBuf(buf)
+	b.uint32(dataDescriptorSignature) // de-facto standard, required by OS X
+	b.uint32(w.CRC32)
+	if w.isZip64() {
+		b.uint64(w.CompressedSize64)
+		b.uint64(w.UncompressedSize64)
+	} else {
+		b.uint32(w.CompressedSize)
+		b.uint32(w.UncompressedSize)
+	}
+	_, err := w.zipw.Write(buf)
+	return err
+}
+
+type countWriter struct {
+	w     io.Writer
+	count int64
+}
+
+func (w *countWriter) Write(p []byte) (int, error) {
+	n, err := w.w.Write(p)
+	w.count += int64(n)
+	return n, err
+}
+
+type nopCloser struct {
+	io.Writer
+}
+
+func (w nopCloser) Close() error {
+	return nil
+}
+
+type writeBuf []byte
+
+func (b *writeBuf) uint8(v uint8) {
+	(*b)[0] = v
+	*b = (*b)[1:]
+}
+
+func (b *writeBuf) uint16(v uint16) {
+	binary.LittleEndian.PutUint16(*b, v)
+	*b = (*b)[2:]
+}
+
+func (b *writeBuf) uint32(v uint32) {
+	binary.LittleEndian.PutUint32(*b, v)
+	*b = (*b)[4:]
+}
+
+func (b *writeBuf) uint64(v uint64) {
+	binary.LittleEndian.PutUint64(*b, v)
+	*b = (*b)[8:]
+}
diff --git a/internal/backport/archive/zip/writer_test.go b/internal/backport/archive/zip/writer_test.go
new file mode 100644
index 0000000..f001529
--- /dev/null
+++ b/internal/backport/archive/zip/writer_test.go
@@ -0,0 +1,605 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zip
+
+import (
+	"bytes"
+	"compress/flate"
+	"encoding/binary"
+	"fmt"
+	"hash/crc32"
+	"io"
+	"io/ioutil"
+	"math/rand"
+	"strings"
+	"testing"
+	"time"
+
+	"golang.org/x/website/internal/backport/io/fs"
+)
+
+// TODO(adg): a more sophisticated test suite
+
+type WriteTest struct {
+	Name   string
+	Data   []byte
+	Method uint16
+	Mode   fs.FileMode
+}
+
+var writeTests = []WriteTest{
+	{
+		Name:   "foo",
+		Data:   []byte("Rabbits, guinea pigs, gophers, marsupial rats, and quolls."),
+		Method: Store,
+		Mode:   0666,
+	},
+	{
+		Name:   "bar",
+		Data:   nil, // large data set in the test
+		Method: Deflate,
+		Mode:   0644,
+	},
+	{
+		Name:   "setuid",
+		Data:   []byte("setuid file"),
+		Method: Deflate,
+		Mode:   0755 | fs.ModeSetuid,
+	},
+	{
+		Name:   "setgid",
+		Data:   []byte("setgid file"),
+		Method: Deflate,
+		Mode:   0755 | fs.ModeSetgid,
+	},
+	{
+		Name:   "symlink",
+		Data:   []byte("../link/target"),
+		Method: Deflate,
+		Mode:   0755 | fs.ModeSymlink,
+	},
+	{
+		Name:   "device",
+		Data:   []byte("device file"),
+		Method: Deflate,
+		Mode:   0755 | fs.ModeDevice,
+	},
+	{
+		Name:   "chardevice",
+		Data:   []byte("char device file"),
+		Method: Deflate,
+		Mode:   0755 | fs.ModeDevice | fs.ModeCharDevice,
+	},
+}
+
+func TestWriter(t *testing.T) {
+	largeData := make([]byte, 1<<17)
+	if _, err := rand.Read(largeData); err != nil {
+		t.Fatal("rand.Read failed:", err)
+	}
+	writeTests[1].Data = largeData
+	defer func() {
+		writeTests[1].Data = nil
+	}()
+
+	// write a zip file
+	buf := new(bytes.Buffer)
+	w := NewWriter(buf)
+
+	for _, wt := range writeTests {
+		testCreate(t, w, &wt)
+	}
+
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	// read it back
+	r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
+	if err != nil {
+		t.Fatal(err)
+	}
+	for i, wt := range writeTests {
+		testReadFile(t, r.File[i], &wt)
+	}
+}
+
+// TestWriterComment is test for EOCD comment read/write.
+func TestWriterComment(t *testing.T) {
+	var tests = []struct {
+		comment string
+		ok      bool
+	}{
+		{"hi, hello", true},
+		{"hi, こんにちわ", true},
+		{strings.Repeat("a", uint16max), true},
+		{strings.Repeat("a", uint16max+1), false},
+	}
+
+	for _, test := range tests {
+		// write a zip file
+		buf := new(bytes.Buffer)
+		w := NewWriter(buf)
+		if err := w.SetComment(test.comment); err != nil {
+			if test.ok {
+				t.Fatalf("SetComment: unexpected error %v", err)
+			}
+			continue
+		} else {
+			if !test.ok {
+				t.Fatalf("SetComment: unexpected success, want error")
+			}
+		}
+
+		if err := w.Close(); test.ok == (err != nil) {
+			t.Fatal(err)
+		}
+
+		if w.closed != test.ok {
+			t.Fatalf("Writer.closed: got %v, want %v", w.closed, test.ok)
+		}
+
+		// skip read test in failure cases
+		if !test.ok {
+			continue
+		}
+
+		// read it back
+		r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
+		if err != nil {
+			t.Fatal(err)
+		}
+		if r.Comment != test.comment {
+			t.Fatalf("Reader.Comment: got %v, want %v", r.Comment, test.comment)
+		}
+	}
+}
+
+func TestWriterUTF8(t *testing.T) {
+	var utf8Tests = []struct {
+		name    string
+		comment string
+		nonUTF8 bool
+		flags   uint16
+	}{
+		{
+			name:    "hi, hello",
+			comment: "in the world",
+			flags:   0x8,
+		},
+		{
+			name:    "hi, こんにちわ",
+			comment: "in the world",
+			flags:   0x808,
+		},
+		{
+			name:    "hi, こんにちわ",
+			comment: "in the world",
+			nonUTF8: true,
+			flags:   0x8,
+		},
+		{
+			name:    "hi, hello",
+			comment: "in the 世界",
+			flags:   0x808,
+		},
+		{
+			name:    "hi, こんにちわ",
+			comment: "in the 世界",
+			flags:   0x808,
+		},
+		{
+			name:    "the replacement rune is �",
+			comment: "the replacement rune is �",
+			flags:   0x808,
+		},
+		{
+			// Name is Japanese encoded in Shift JIS.
+			name:    "\x93\xfa\x96{\x8c\xea.txt",
+			comment: "in the 世界",
+			flags:   0x008, // UTF-8 must not be set
+		},
+	}
+
+	// write a zip file
+	buf := new(bytes.Buffer)
+	w := NewWriter(buf)
+
+	for _, test := range utf8Tests {
+		h := &FileHeader{
+			Name:    test.name,
+			Comment: test.comment,
+			NonUTF8: test.nonUTF8,
+			Method:  Deflate,
+		}
+		w, err := w.CreateHeader(h)
+		if err != nil {
+			t.Fatal(err)
+		}
+		w.Write([]byte{})
+	}
+
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	// read it back
+	r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
+	if err != nil {
+		t.Fatal(err)
+	}
+	for i, test := range utf8Tests {
+		flags := r.File[i].Flags
+		if flags != test.flags {
+			t.Errorf("CreateHeader(name=%q comment=%q nonUTF8=%v): flags=%#x, want %#x", test.name, test.comment, test.nonUTF8, flags, test.flags)
+		}
+	}
+}
+
+func TestWriterTime(t *testing.T) {
+	var buf bytes.Buffer
+	h := &FileHeader{
+		Name:     "test.txt",
+		Modified: time.Date(2017, 10, 31, 21, 11, 57, 0, timeZone(-7*time.Hour)),
+	}
+	w := NewWriter(&buf)
+	if _, err := w.CreateHeader(h); err != nil {
+		t.Fatalf("unexpected CreateHeader error: %v", err)
+	}
+	if err := w.Close(); err != nil {
+		t.Fatalf("unexpected Close error: %v", err)
+	}
+
+	want, err := ioutil.ReadFile("testdata/time-go.zip")
+	if err != nil {
+		t.Fatalf("unexpected ReadFile error: %v", err)
+	}
+	if got := buf.Bytes(); !bytes.Equal(got, want) {
+		fmt.Printf("%x\n%x\n", got, want)
+		t.Error("contents of time-go.zip differ")
+	}
+}
+
+func TestWriterOffset(t *testing.T) {
+	largeData := make([]byte, 1<<17)
+	if _, err := rand.Read(largeData); err != nil {
+		t.Fatal("rand.Read failed:", err)
+	}
+	writeTests[1].Data = largeData
+	defer func() {
+		writeTests[1].Data = nil
+	}()
+
+	// write a zip file
+	buf := new(bytes.Buffer)
+	existingData := []byte{1, 2, 3, 1, 2, 3, 1, 2, 3}
+	n, _ := buf.Write(existingData)
+	w := NewWriter(buf)
+	w.SetOffset(int64(n))
+
+	for _, wt := range writeTests {
+		testCreate(t, w, &wt)
+	}
+
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	// read it back
+	r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
+	if err != nil {
+		t.Fatal(err)
+	}
+	for i, wt := range writeTests {
+		testReadFile(t, r.File[i], &wt)
+	}
+}
+
+func TestWriterFlush(t *testing.T) {
+	var buf bytes.Buffer
+	w := NewWriter(struct{ io.Writer }{&buf})
+	_, err := w.Create("foo")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if buf.Len() > 0 {
+		t.Fatalf("Unexpected %d bytes already in buffer", buf.Len())
+	}
+	if err := w.Flush(); err != nil {
+		t.Fatal(err)
+	}
+	if buf.Len() == 0 {
+		t.Fatal("No bytes written after Flush")
+	}
+}
+
+func TestWriterDir(t *testing.T) {
+	w := NewWriter(ioutil.Discard)
+	dw, err := w.Create("dir/")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := dw.Write(nil); err != nil {
+		t.Errorf("Write(nil) to directory: got %v, want nil", err)
+	}
+	if _, err := dw.Write([]byte("hello")); err == nil {
+		t.Error(`Write("hello") to directory: got nil error, want non-nil`)
+	}
+}
+
+func TestWriterDirAttributes(t *testing.T) {
+	var buf bytes.Buffer
+	w := NewWriter(&buf)
+	if _, err := w.CreateHeader(&FileHeader{
+		Name:               "dir/",
+		Method:             Deflate,
+		CompressedSize64:   1234,
+		UncompressedSize64: 5678,
+	}); err != nil {
+		t.Fatal(err)
+	}
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+	b := buf.Bytes()
+
+	var sig [4]byte
+	binary.LittleEndian.PutUint32(sig[:], uint32(fileHeaderSignature))
+
+	idx := bytes.Index(b, sig[:])
+	if idx == -1 {
+		t.Fatal("file header not found")
+	}
+	b = b[idx:]
+
+	if !bytes.Equal(b[6:10], []byte{0, 0, 0, 0}) { // FileHeader.Flags: 0, FileHeader.Method: 0
+		t.Errorf("unexpected method and flags: %v", b[6:10])
+	}
+
+	if !bytes.Equal(b[14:26], make([]byte, 12)) { // FileHeader.{CRC32,CompressSize,UncompressedSize} all zero.
+		t.Errorf("unexpected crc, compress and uncompressed size to be 0 was: %v", b[14:26])
+	}
+
+	binary.LittleEndian.PutUint32(sig[:], uint32(dataDescriptorSignature))
+	if bytes.Index(b, sig[:]) != -1 {
+		t.Error("there should be no data descriptor")
+	}
+}
+
+func TestWriterCopy(t *testing.T) {
+	// make a zip file
+	buf := new(bytes.Buffer)
+	w := NewWriter(buf)
+	for _, wt := range writeTests {
+		testCreate(t, w, &wt)
+	}
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	// read it back
+	src, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
+	if err != nil {
+		t.Fatal(err)
+	}
+	for i, wt := range writeTests {
+		testReadFile(t, src.File[i], &wt)
+	}
+
+	// make a new zip file copying the old compressed data.
+	buf2 := new(bytes.Buffer)
+	dst := NewWriter(buf2)
+	for _, f := range src.File {
+		if err := dst.Copy(f); err != nil {
+			t.Fatal(err)
+		}
+	}
+	if err := dst.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	// read the new one back
+	r, err := NewReader(bytes.NewReader(buf2.Bytes()), int64(buf2.Len()))
+	if err != nil {
+		t.Fatal(err)
+	}
+	for i, wt := range writeTests {
+		testReadFile(t, r.File[i], &wt)
+	}
+}
+
+func TestWriterCreateRaw(t *testing.T) {
+	files := []struct {
+		name             string
+		content          []byte
+		method           uint16
+		flags            uint16
+		crc32            uint32
+		uncompressedSize uint64
+		compressedSize   uint64
+	}{
+		{
+			name:    "small store w desc",
+			content: []byte("gophers"),
+			method:  Store,
+			flags:   0x8,
+		},
+		{
+			name:    "small deflate wo desc",
+			content: bytes.Repeat([]byte("abcdefg"), 2048),
+			method:  Deflate,
+		},
+	}
+
+	// write a zip file
+	archive := new(bytes.Buffer)
+	w := NewWriter(archive)
+
+	for i := range files {
+		f := &files[i]
+		f.crc32 = crc32.ChecksumIEEE(f.content)
+		size := uint64(len(f.content))
+		f.uncompressedSize = size
+		f.compressedSize = size
+
+		var compressedContent []byte
+		if f.method == Deflate {
+			var buf bytes.Buffer
+			w, err := flate.NewWriter(&buf, flate.BestSpeed)
+			if err != nil {
+				t.Fatalf("flate.NewWriter err = %v", err)
+			}
+			_, err = w.Write(f.content)
+			if err != nil {
+				t.Fatalf("flate Write err = %v", err)
+			}
+			err = w.Close()
+			if err != nil {
+				t.Fatalf("flate Writer.Close err = %v", err)
+			}
+			compressedContent = buf.Bytes()
+			f.compressedSize = uint64(len(compressedContent))
+		}
+
+		h := &FileHeader{
+			Name:               f.name,
+			Method:             f.method,
+			Flags:              f.flags,
+			CRC32:              f.crc32,
+			CompressedSize64:   f.compressedSize,
+			UncompressedSize64: f.uncompressedSize,
+		}
+		w, err := w.CreateRaw(h)
+		if err != nil {
+			t.Fatal(err)
+		}
+		if compressedContent != nil {
+			_, err = w.Write(compressedContent)
+		} else {
+			_, err = w.Write(f.content)
+		}
+		if err != nil {
+			t.Fatalf("%s Write got %v; want nil", f.name, err)
+		}
+	}
+
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	// read it back
+	r, err := NewReader(bytes.NewReader(archive.Bytes()), int64(archive.Len()))
+	if err != nil {
+		t.Fatal(err)
+	}
+	for i, want := range files {
+		got := r.File[i]
+		if got.Name != want.name {
+			t.Errorf("got Name %s; want %s", got.Name, want.name)
+		}
+		if got.Method != want.method {
+			t.Errorf("%s: got Method %#x; want %#x", want.name, got.Method, want.method)
+		}
+		if got.Flags != want.flags {
+			t.Errorf("%s: got Flags %#x; want %#x", want.name, got.Flags, want.flags)
+		}
+		if got.CRC32 != want.crc32 {
+			t.Errorf("%s: got CRC32 %#x; want %#x", want.name, got.CRC32, want.crc32)
+		}
+		if got.CompressedSize64 != want.compressedSize {
+			t.Errorf("%s: got CompressedSize64 %d; want %d", want.name, got.CompressedSize64, want.compressedSize)
+		}
+		if got.UncompressedSize64 != want.uncompressedSize {
+			t.Errorf("%s: got UncompressedSize64 %d; want %d", want.name, got.UncompressedSize64, want.uncompressedSize)
+		}
+
+		r, err := got.Open()
+		if err != nil {
+			t.Errorf("%s: Open err = %v", got.Name, err)
+			continue
+		}
+
+		buf, err := ioutil.ReadAll(r)
+		if err != nil {
+			t.Errorf("%s: ReadAll err = %v", got.Name, err)
+			continue
+		}
+
+		if !bytes.Equal(buf, want.content) {
+			t.Errorf("%v: ReadAll returned unexpected bytes", got.Name)
+		}
+	}
+}
+
+func testCreate(t *testing.T, w *Writer, wt *WriteTest) {
+	header := &FileHeader{
+		Name:   wt.Name,
+		Method: wt.Method,
+	}
+	if wt.Mode != 0 {
+		header.SetMode(wt.Mode)
+	}
+	f, err := w.CreateHeader(header)
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, err = f.Write(wt.Data)
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
+func testReadFile(t *testing.T, f *File, wt *WriteTest) {
+	if f.Name != wt.Name {
+		t.Fatalf("File name: got %q, want %q", f.Name, wt.Name)
+	}
+	testFileMode(t, f, wt.Mode)
+	rc, err := f.Open()
+	if err != nil {
+		t.Fatalf("opening %s: %v", f.Name, err)
+	}
+	b, err := ioutil.ReadAll(rc)
+	if err != nil {
+		t.Fatalf("reading %s: %v", f.Name, err)
+	}
+	err = rc.Close()
+	if err != nil {
+		t.Fatalf("closing %s: %v", f.Name, err)
+	}
+	if !bytes.Equal(b, wt.Data) {
+		t.Errorf("File contents %q, want %q", b, wt.Data)
+	}
+}
+
+func BenchmarkCompressedZipGarbage(b *testing.B) {
+	bigBuf := bytes.Repeat([]byte("a"), 1<<20)
+
+	runOnce := func(buf *bytes.Buffer) {
+		buf.Reset()
+		zw := NewWriter(buf)
+		for j := 0; j < 3; j++ {
+			w, _ := zw.CreateHeader(&FileHeader{
+				Name:   "foo",
+				Method: Deflate,
+			})
+			w.Write(bigBuf)
+		}
+		zw.Close()
+	}
+
+	b.ReportAllocs()
+	// Run once and then reset the timer.
+	// This effectively discards the very large initial flate setup cost,
+	// as well as the initialization of bigBuf.
+	runOnce(&bytes.Buffer{})
+	b.ResetTimer()
+
+	b.RunParallel(func(pb *testing.PB) {
+		var buf bytes.Buffer
+		for pb.Next() {
+			runOnce(&buf)
+		}
+	})
+}
diff --git a/internal/backport/archive/zip/zip_test.go b/internal/backport/archive/zip/zip_test.go
new file mode 100644
index 0000000..17f5331
--- /dev/null
+++ b/internal/backport/archive/zip/zip_test.go
@@ -0,0 +1,625 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests that involve both reading and writing.
+
+package zip
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"hash"
+	"io"
+	"io/ioutil"
+	"sort"
+	"strings"
+	"testing"
+	"time"
+)
+
+func TestModTime(t *testing.T) {
+	var testTime = time.Date(2009, time.November, 10, 23, 45, 58, 0, time.UTC)
+	fh := new(FileHeader)
+	fh.SetModTime(testTime)
+	outTime := fh.ModTime()
+	if !outTime.Equal(testTime) {
+		t.Errorf("times don't match: got %s, want %s", outTime, testTime)
+	}
+}
+
+func testHeaderRoundTrip(fh *FileHeader, wantUncompressedSize uint32, wantUncompressedSize64 uint64, t *testing.T) {
+	fi := fh.FileInfo()
+	fh2, err := FileInfoHeader(fi)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if got, want := fh2.Name, fh.Name; got != want {
+		t.Errorf("Name: got %s, want %s\n", got, want)
+	}
+	if got, want := fh2.UncompressedSize, wantUncompressedSize; got != want {
+		t.Errorf("UncompressedSize: got %d, want %d\n", got, want)
+	}
+	if got, want := fh2.UncompressedSize64, wantUncompressedSize64; got != want {
+		t.Errorf("UncompressedSize64: got %d, want %d\n", got, want)
+	}
+	if got, want := fh2.ModifiedTime, fh.ModifiedTime; got != want {
+		t.Errorf("ModifiedTime: got %d, want %d\n", got, want)
+	}
+	if got, want := fh2.ModifiedDate, fh.ModifiedDate; got != want {
+		t.Errorf("ModifiedDate: got %d, want %d\n", got, want)
+	}
+
+	if sysfh, ok := fi.Sys().(*FileHeader); !ok && sysfh != fh {
+		t.Errorf("Sys didn't return original *FileHeader")
+	}
+}
+
+func TestFileHeaderRoundTrip(t *testing.T) {
+	fh := &FileHeader{
+		Name:             "foo.txt",
+		UncompressedSize: 987654321,
+		ModifiedTime:     1234,
+		ModifiedDate:     5678,
+	}
+	testHeaderRoundTrip(fh, fh.UncompressedSize, uint64(fh.UncompressedSize), t)
+}
+
+func TestFileHeaderRoundTrip64(t *testing.T) {
+	fh := &FileHeader{
+		Name:               "foo.txt",
+		UncompressedSize64: 9876543210,
+		ModifiedTime:       1234,
+		ModifiedDate:       5678,
+	}
+	testHeaderRoundTrip(fh, uint32max, fh.UncompressedSize64, t)
+}
+
+func TestFileHeaderRoundTripModified(t *testing.T) {
+	fh := &FileHeader{
+		Name:             "foo.txt",
+		UncompressedSize: 987654321,
+		Modified:         time.Now().Local(),
+		ModifiedTime:     1234,
+		ModifiedDate:     5678,
+	}
+	fi := fh.FileInfo()
+	fh2, err := FileInfoHeader(fi)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if got, want := fh2.Modified, fh.Modified.UTC(); got != want {
+		t.Errorf("Modified: got %s, want %s\n", got, want)
+	}
+	if got, want := fi.ModTime(), fh.Modified.UTC(); got != want {
+		t.Errorf("Modified: got %s, want %s\n", got, want)
+	}
+}
+
+func TestFileHeaderRoundTripWithoutModified(t *testing.T) {
+	fh := &FileHeader{
+		Name:             "foo.txt",
+		UncompressedSize: 987654321,
+		ModifiedTime:     1234,
+		ModifiedDate:     5678,
+	}
+	fi := fh.FileInfo()
+	fh2, err := FileInfoHeader(fi)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if got, want := fh2.ModTime(), fh.ModTime(); got != want {
+		t.Errorf("Modified: got %s, want %s\n", got, want)
+	}
+	if got, want := fi.ModTime(), fh.ModTime(); got != want {
+		t.Errorf("Modified: got %s, want %s\n", got, want)
+	}
+}
+
+type repeatedByte struct {
+	off int64
+	b   byte
+	n   int64
+}
+
+// rleBuffer is a run-length-encoded byte buffer.
+// It's an io.Writer (like a bytes.Buffer) and also an io.ReaderAt,
+// allowing random-access reads.
+type rleBuffer struct {
+	buf []repeatedByte
+}
+
+func (r *rleBuffer) Size() int64 {
+	if len(r.buf) == 0 {
+		return 0
+	}
+	last := &r.buf[len(r.buf)-1]
+	return last.off + last.n
+}
+
+func (r *rleBuffer) Write(p []byte) (n int, err error) {
+	var rp *repeatedByte
+	if len(r.buf) > 0 {
+		rp = &r.buf[len(r.buf)-1]
+		// Fast path, if p is entirely the same byte repeated.
+		if lastByte := rp.b; len(p) > 0 && p[0] == lastByte {
+			if bytes.Count(p, []byte{lastByte}) == len(p) {
+				rp.n += int64(len(p))
+				return len(p), nil
+			}
+		}
+	}
+
+	for _, b := range p {
+		if rp == nil || rp.b != b {
+			r.buf = append(r.buf, repeatedByte{r.Size(), b, 1})
+			rp = &r.buf[len(r.buf)-1]
+		} else {
+			rp.n++
+		}
+	}
+	return len(p), nil
+}
+
+func min(x, y int64) int64 {
+	if x < y {
+		return x
+	}
+	return y
+}
+
+func memset(a []byte, b byte) {
+	if len(a) == 0 {
+		return
+	}
+	// Double, until we reach power of 2 >= len(a), same as bytes.Repeat,
+	// but without allocation.
+	a[0] = b
+	for i, l := 1, len(a); i < l; i *= 2 {
+		copy(a[i:], a[:i])
+	}
+}
+
+func (r *rleBuffer) ReadAt(p []byte, off int64) (n int, err error) {
+	if len(p) == 0 {
+		return
+	}
+	skipParts := sort.Search(len(r.buf), func(i int) bool {
+		part := &r.buf[i]
+		return part.off+part.n > off
+	})
+	parts := r.buf[skipParts:]
+	if len(parts) > 0 {
+		skipBytes := off - parts[0].off
+		for _, part := range parts {
+			repeat := int(min(part.n-skipBytes, int64(len(p)-n)))
+			memset(p[n:n+repeat], part.b)
+			n += repeat
+			if n == len(p) {
+				return
+			}
+			skipBytes = 0
+		}
+	}
+	if n != len(p) {
+		err = io.ErrUnexpectedEOF
+	}
+	return
+}
+
+// Just testing the rleBuffer used in the Zip64 test above. Not used by the zip code.
+func TestRLEBuffer(t *testing.T) {
+	b := new(rleBuffer)
+	var all []byte
+	writes := []string{"abcdeee", "eeeeeee", "eeeefghaaiii"}
+	for _, w := range writes {
+		b.Write([]byte(w))
+		all = append(all, w...)
+	}
+	if len(b.buf) != 10 {
+		t.Fatalf("len(b.buf) = %d; want 10", len(b.buf))
+	}
+
+	for i := 0; i < len(all); i++ {
+		for j := 0; j < len(all)-i; j++ {
+			buf := make([]byte, j)
+			n, err := b.ReadAt(buf, int64(i))
+			if err != nil || n != len(buf) {
+				t.Errorf("ReadAt(%d, %d) = %d, %v; want %d, nil", i, j, n, err, len(buf))
+			}
+			if !bytes.Equal(buf, all[i:i+j]) {
+				t.Errorf("ReadAt(%d, %d) = %q; want %q", i, j, buf, all[i:i+j])
+			}
+		}
+	}
+}
+
+// fakeHash32 is a dummy Hash32 that always returns 0.
+type fakeHash32 struct {
+	hash.Hash32
+}
+
+func (fakeHash32) Write(p []byte) (int, error) { return len(p), nil }
+func (fakeHash32) Sum32() uint32               { return 0 }
+
+// suffixSaver is an io.Writer & io.ReaderAt that remembers the last 0
+// to 'keep' bytes of data written to it. Call Suffix to get the
+// suffix bytes.
+type suffixSaver struct {
+	keep  int
+	buf   []byte
+	start int
+	size  int64
+}
+
+func (ss *suffixSaver) Size() int64 { return ss.size }
+
+var errDiscardedBytes = errors.New("ReadAt of discarded bytes")
+
+func (ss *suffixSaver) ReadAt(p []byte, off int64) (n int, err error) {
+	back := ss.size - off
+	if back > int64(ss.keep) {
+		return 0, errDiscardedBytes
+	}
+	suf := ss.Suffix()
+	n = copy(p, suf[len(suf)-int(back):])
+	if n != len(p) {
+		err = io.EOF
+	}
+	return
+}
+
+func (ss *suffixSaver) Suffix() []byte {
+	if len(ss.buf) < ss.keep {
+		return ss.buf
+	}
+	buf := make([]byte, ss.keep)
+	n := copy(buf, ss.buf[ss.start:])
+	copy(buf[n:], ss.buf[:])
+	return buf
+}
+
+func (ss *suffixSaver) Write(p []byte) (n int, err error) {
+	n = len(p)
+	ss.size += int64(len(p))
+	if len(ss.buf) < ss.keep {
+		space := ss.keep - len(ss.buf)
+		add := len(p)
+		if add > space {
+			add = space
+		}
+		ss.buf = append(ss.buf, p[:add]...)
+		p = p[add:]
+	}
+	for len(p) > 0 {
+		n := copy(ss.buf[ss.start:], p)
+		p = p[n:]
+		ss.start += n
+		if ss.start == ss.keep {
+			ss.start = 0
+		}
+	}
+	return
+}
+
+// generatesZip64 reports whether f wrote a zip64 file.
+// f is also responsible for closing w.
+func generatesZip64(t *testing.T, f func(w *Writer)) bool {
+	ss := &suffixSaver{keep: 10 << 20}
+	w := NewWriter(ss)
+	f(w)
+	return suffixIsZip64(t, ss)
+}
+
+type sizedReaderAt interface {
+	io.ReaderAt
+	Size() int64
+}
+
+func suffixIsZip64(t *testing.T, zip sizedReaderAt) bool {
+	d := make([]byte, 1024)
+	if _, err := zip.ReadAt(d, zip.Size()-int64(len(d))); err != nil {
+		t.Fatalf("ReadAt: %v", err)
+	}
+
+	sigOff := findSignatureInBlock(d)
+	if sigOff == -1 {
+		t.Errorf("failed to find signature in block")
+		return false
+	}
+
+	dirOff, err := findDirectory64End(zip, zip.Size()-int64(len(d))+int64(sigOff))
+	if err != nil {
+		t.Fatalf("findDirectory64End: %v", err)
+	}
+	if dirOff == -1 {
+		return false
+	}
+
+	d = make([]byte, directory64EndLen)
+	if _, err := zip.ReadAt(d, dirOff); err != nil {
+		t.Fatalf("ReadAt(off=%d): %v", dirOff, err)
+	}
+
+	b := readBuf(d)
+	if sig := b.uint32(); sig != directory64EndSignature {
+		return false
+	}
+
+	size := b.uint64()
+	if size != directory64EndLen-12 {
+		t.Errorf("expected length of %d, got %d", directory64EndLen-12, size)
+	}
+	return true
+}
+
+func testZip64(t testing.TB, size int64) *rleBuffer {
+	const chunkSize = 1024
+	chunks := int(size / chunkSize)
+	// write size bytes plus "END\n" to a zip file
+	buf := new(rleBuffer)
+	w := NewWriter(buf)
+	f, err := w.CreateHeader(&FileHeader{
+		Name:   "huge.txt",
+		Method: Store,
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+	f.(*fileWriter).crc32 = fakeHash32{}
+	chunk := make([]byte, chunkSize)
+	for i := range chunk {
+		chunk[i] = '.'
+	}
+	for i := 0; i < chunks; i++ {
+		_, err := f.Write(chunk)
+		if err != nil {
+			t.Fatal("write chunk:", err)
+		}
+	}
+	if frag := int(size % chunkSize); frag > 0 {
+		_, err := f.Write(chunk[:frag])
+		if err != nil {
+			t.Fatal("write chunk:", err)
+		}
+	}
+	end := []byte("END\n")
+	_, err = f.Write(end)
+	if err != nil {
+		t.Fatal("write end:", err)
+	}
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	// read back zip file and check that we get to the end of it
+	r, err := NewReader(buf, int64(buf.Size()))
+	if err != nil {
+		t.Fatal("reader:", err)
+	}
+	f0 := r.File[0]
+	rc, err := f0.Open()
+	if err != nil {
+		t.Fatal("opening:", err)
+	}
+	rc.(*checksumReader).hash = fakeHash32{}
+	for i := 0; i < chunks; i++ {
+		_, err := io.ReadFull(rc, chunk)
+		if err != nil {
+			t.Fatal("read:", err)
+		}
+	}
+	if frag := int(size % chunkSize); frag > 0 {
+		_, err := io.ReadFull(rc, chunk[:frag])
+		if err != nil {
+			t.Fatal("read:", err)
+		}
+	}
+	gotEnd, err := ioutil.ReadAll(rc)
+	if err != nil {
+		t.Fatal("read end:", err)
+	}
+	if !bytes.Equal(gotEnd, end) {
+		t.Errorf("End of zip64 archive %q, want %q", gotEnd, end)
+	}
+	err = rc.Close()
+	if err != nil {
+		t.Fatal("closing:", err)
+	}
+	if size+int64(len("END\n")) >= 1<<32-1 {
+		if got, want := f0.UncompressedSize, uint32(uint32max); got != want {
+			t.Errorf("UncompressedSize %#x, want %#x", got, want)
+		}
+	}
+
+	if got, want := f0.UncompressedSize64, uint64(size)+uint64(len(end)); got != want {
+		t.Errorf("UncompressedSize64 %#x, want %#x", got, want)
+	}
+
+	return buf
+}
+
+// Issue 9857
+func testZip64DirectoryRecordLength(buf *rleBuffer, t *testing.T) {
+	if !suffixIsZip64(t, buf) {
+		t.Fatal("not a zip64")
+	}
+}
+
+func testValidHeader(h *FileHeader, t *testing.T) {
+	var buf bytes.Buffer
+	z := NewWriter(&buf)
+
+	f, err := z.CreateHeader(h)
+	if err != nil {
+		t.Fatalf("error creating header: %v", err)
+	}
+	if _, err := f.Write([]byte("hi")); err != nil {
+		t.Fatalf("error writing content: %v", err)
+	}
+	if err := z.Close(); err != nil {
+		t.Fatalf("error closing zip writer: %v", err)
+	}
+
+	b := buf.Bytes()
+	zf, err := NewReader(bytes.NewReader(b), int64(len(b)))
+	if err != nil {
+		t.Fatalf("got %v, expected nil", err)
+	}
+	zh := zf.File[0].FileHeader
+	if zh.Name != h.Name || zh.Method != h.Method || zh.UncompressedSize64 != uint64(len("hi")) {
+		t.Fatalf("got %q/%d/%d expected %q/%d/%d", zh.Name, zh.Method, zh.UncompressedSize64, h.Name, h.Method, len("hi"))
+	}
+}
+
+// Issue 4302.
+func TestHeaderInvalidTagAndSize(t *testing.T) {
+	const timeFormat = "20060102T150405.000.txt"
+
+	ts := time.Now()
+	filename := ts.Format(timeFormat)
+
+	h := FileHeader{
+		Name:   filename,
+		Method: Deflate,
+		Extra:  []byte(ts.Format(time.RFC3339Nano)), // missing tag and len, but Extra is best-effort parsing
+	}
+	h.SetModTime(ts)
+
+	testValidHeader(&h, t)
+}
+
+func TestHeaderTooShort(t *testing.T) {
+	h := FileHeader{
+		Name:   "foo.txt",
+		Method: Deflate,
+		Extra:  []byte{zip64ExtraID}, // missing size and second half of tag, but Extra is best-effort parsing
+	}
+	testValidHeader(&h, t)
+}
+
+func TestHeaderTooLongErr(t *testing.T) {
+	var headerTests = []struct {
+		name    string
+		extra   []byte
+		wanterr error
+	}{
+		{
+			name:    strings.Repeat("x", 1<<16),
+			extra:   []byte{},
+			wanterr: errLongName,
+		},
+		{
+			name:    "long_extra",
+			extra:   bytes.Repeat([]byte{0xff}, 1<<16),
+			wanterr: errLongExtra,
+		},
+	}
+
+	// write a zip file
+	buf := new(bytes.Buffer)
+	w := NewWriter(buf)
+
+	for _, test := range headerTests {
+		h := &FileHeader{
+			Name:  test.name,
+			Extra: test.extra,
+		}
+		_, err := w.CreateHeader(h)
+		if err != test.wanterr {
+			t.Errorf("error=%v, want %v", err, test.wanterr)
+		}
+	}
+
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestHeaderIgnoredSize(t *testing.T) {
+	h := FileHeader{
+		Name:   "foo.txt",
+		Method: Deflate,
+		Extra:  []byte{zip64ExtraID & 0xFF, zip64ExtraID >> 8, 24, 0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8}, // bad size but shouldn't be consulted
+	}
+	testValidHeader(&h, t)
+}
+
+// Issue 4393. It is valid to have an extra data header
+// which contains no body.
+func TestZeroLengthHeader(t *testing.T) {
+	h := FileHeader{
+		Name:   "extadata.txt",
+		Method: Deflate,
+		Extra: []byte{
+			85, 84, 5, 0, 3, 154, 144, 195, 77, // tag 21589 size 5
+			85, 120, 0, 0, // tag 30805 size 0
+		},
+	}
+	testValidHeader(&h, t)
+}
+
+// Just benchmarking how fast the Zip64 test above is. Not related to
+// our zip performance, since the test above disabled CRC32 and flate.
+func BenchmarkZip64Test(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		testZip64(b, 1<<26)
+	}
+}
+
+func BenchmarkZip64TestSizes(b *testing.B) {
+	for _, size := range []int64{1 << 12, 1 << 20, 1 << 26} {
+		b.Run(fmt.Sprint(size), func(b *testing.B) {
+			b.RunParallel(func(pb *testing.PB) {
+				for pb.Next() {
+					testZip64(b, size)
+				}
+			})
+		})
+	}
+}
+
+func TestSuffixSaver(t *testing.T) {
+	const keep = 10
+	ss := &suffixSaver{keep: keep}
+	ss.Write([]byte("abc"))
+	if got := string(ss.Suffix()); got != "abc" {
+		t.Errorf("got = %q; want abc", got)
+	}
+	ss.Write([]byte("defghijklmno"))
+	if got := string(ss.Suffix()); got != "fghijklmno" {
+		t.Errorf("got = %q; want fghijklmno", got)
+	}
+	if got, want := ss.Size(), int64(len("abc")+len("defghijklmno")); got != want {
+		t.Errorf("Size = %d; want %d", got, want)
+	}
+	buf := make([]byte, ss.Size())
+	for off := int64(0); off < ss.Size(); off++ {
+		for size := 1; size <= int(ss.Size()-off); size++ {
+			readBuf := buf[:size]
+			n, err := ss.ReadAt(readBuf, off)
+			if off < ss.Size()-keep {
+				if err != errDiscardedBytes {
+					t.Errorf("off %d, size %d = %v, %v (%q); want errDiscardedBytes", off, size, n, err, readBuf[:n])
+				}
+				continue
+			}
+			want := "abcdefghijklmno"[off : off+int64(size)]
+			got := string(readBuf[:n])
+			if err != nil || got != want {
+				t.Errorf("off %d, size %d = %v, %v (%q); want %q", off, size, n, err, got, want)
+			}
+		}
+	}
+
+}
+
+type zeros struct{}
+
+func (zeros) Read(p []byte) (int, error) {
+	for i := range p {
+		p[i] = 0
+	}
+	return len(p), nil
+}
diff --git a/internal/backport/fmtsort/export_test.go b/internal/backport/fmtsort/export_test.go
new file mode 100644
index 0000000..25cbb5d
--- /dev/null
+++ b/internal/backport/fmtsort/export_test.go
@@ -0,0 +1,11 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fmtsort
+
+import "reflect"
+
+func Compare(a, b reflect.Value) int {
+	return compare(a, b)
+}
diff --git a/internal/backport/fmtsort/sort.go b/internal/backport/fmtsort/sort.go
new file mode 100644
index 0000000..7127ba6
--- /dev/null
+++ b/internal/backport/fmtsort/sort.go
@@ -0,0 +1,220 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fmtsort provides a general stable ordering mechanism
+// for maps, on behalf of the fmt and text/template packages.
+// It is not guaranteed to be efficient and works only for types
+// that are valid map keys.
+package fmtsort
+
+import (
+	"reflect"
+	"sort"
+)
+
+// Note: Throughout this package we avoid calling reflect.Value.Interface as
+// it is not always legal to do so and it's easier to avoid the issue than to face it.
+
+// SortedMap represents a map's keys and values. The keys and values are
+// aligned in index order: Value[i] is the value in the map corresponding to Key[i].
+type SortedMap struct {
+	Key   []reflect.Value
+	Value []reflect.Value
+}
+
+func (o *SortedMap) Len() int           { return len(o.Key) }
+func (o *SortedMap) Less(i, j int) bool { return compare(o.Key[i], o.Key[j]) < 0 }
+func (o *SortedMap) Swap(i, j int) {
+	o.Key[i], o.Key[j] = o.Key[j], o.Key[i]
+	o.Value[i], o.Value[j] = o.Value[j], o.Value[i]
+}
+
+// Sort accepts a map and returns a SortedMap that has the same keys and
+// values but in a stable sorted order according to the keys, modulo issues
+// raised by unorderable key values such as NaNs.
+//
+// The ordering rules are more general than with Go's < operator:
+//
+//  - when applicable, nil compares low
+//  - ints, floats, and strings order by <
+//  - NaN compares less than non-NaN floats
+//  - bool compares false before true
+//  - complex compares real, then imag
+//  - pointers compare by machine address
+//  - channel values compare by machine address
+//  - structs compare each field in turn
+//  - arrays compare each element in turn.
+//    Otherwise identical arrays compare by length.
+//  - interface values compare first by reflect.Type describing the concrete type
+//    and then by concrete value as described in the previous rules.
+//
+func Sort(mapValue reflect.Value) *SortedMap {
+	if mapValue.Type().Kind() != reflect.Map {
+		return nil
+	}
+	// Note: this code is arranged to not panic even in the presence
+	// of a concurrent map update. The runtime is responsible for
+	// yelling loudly if that happens. See issue 33275.
+	n := mapValue.Len()
+	key := make([]reflect.Value, 0, n)
+	value := make([]reflect.Value, 0, n)
+	iter := mapValue.MapRange()
+	for iter.Next() {
+		key = append(key, iter.Key())
+		value = append(value, iter.Value())
+	}
+	sorted := &SortedMap{
+		Key:   key,
+		Value: value,
+	}
+	sort.Stable(sorted)
+	return sorted
+}
+
+// compare compares two values of the same type. It returns -1, 0, 1
+// according to whether a > b (1), a == b (0), or a < b (-1).
+// If the types differ, it returns -1.
+// See the comment on Sort for the comparison rules.
+func compare(aVal, bVal reflect.Value) int {
+	aType, bType := aVal.Type(), bVal.Type()
+	if aType != bType {
+		return -1 // No good answer possible, but don't return 0: they're not equal.
+	}
+	switch aVal.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		a, b := aVal.Int(), bVal.Int()
+		switch {
+		case a < b:
+			return -1
+		case a > b:
+			return 1
+		default:
+			return 0
+		}
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		a, b := aVal.Uint(), bVal.Uint()
+		switch {
+		case a < b:
+			return -1
+		case a > b:
+			return 1
+		default:
+			return 0
+		}
+	case reflect.String:
+		a, b := aVal.String(), bVal.String()
+		switch {
+		case a < b:
+			return -1
+		case a > b:
+			return 1
+		default:
+			return 0
+		}
+	case reflect.Float32, reflect.Float64:
+		return floatCompare(aVal.Float(), bVal.Float())
+	case reflect.Complex64, reflect.Complex128:
+		a, b := aVal.Complex(), bVal.Complex()
+		if c := floatCompare(real(a), real(b)); c != 0 {
+			return c
+		}
+		return floatCompare(imag(a), imag(b))
+	case reflect.Bool:
+		a, b := aVal.Bool(), bVal.Bool()
+		switch {
+		case a == b:
+			return 0
+		case a:
+			return 1
+		default:
+			return -1
+		}
+	case reflect.Ptr, reflect.UnsafePointer:
+		a, b := aVal.Pointer(), bVal.Pointer()
+		switch {
+		case a < b:
+			return -1
+		case a > b:
+			return 1
+		default:
+			return 0
+		}
+	case reflect.Chan:
+		if c, ok := nilCompare(aVal, bVal); ok {
+			return c
+		}
+		ap, bp := aVal.Pointer(), bVal.Pointer()
+		switch {
+		case ap < bp:
+			return -1
+		case ap > bp:
+			return 1
+		default:
+			return 0
+		}
+	case reflect.Struct:
+		for i := 0; i < aVal.NumField(); i++ {
+			if c := compare(aVal.Field(i), bVal.Field(i)); c != 0 {
+				return c
+			}
+		}
+		return 0
+	case reflect.Array:
+		for i := 0; i < aVal.Len(); i++ {
+			if c := compare(aVal.Index(i), bVal.Index(i)); c != 0 {
+				return c
+			}
+		}
+		return 0
+	case reflect.Interface:
+		if c, ok := nilCompare(aVal, bVal); ok {
+			return c
+		}
+		c := compare(reflect.ValueOf(aVal.Elem().Type()), reflect.ValueOf(bVal.Elem().Type()))
+		if c != 0 {
+			return c
+		}
+		return compare(aVal.Elem(), bVal.Elem())
+	default:
+		// Certain types cannot appear as keys (maps, funcs, slices), but be explicit.
+		panic("bad type in compare: " + aType.String())
+	}
+}
+
+// nilCompare checks whether either value is nil. If not, the boolean is false.
+// If either value is nil, the boolean is true and the integer is the comparison
+// value. The comparison is defined to be 0 if both are nil, otherwise the one
+// nil value compares low. Both arguments must represent a chan, func,
+// interface, map, pointer, or slice.
+func nilCompare(aVal, bVal reflect.Value) (int, bool) {
+	if aVal.IsNil() {
+		if bVal.IsNil() {
+			return 0, true
+		}
+		return -1, true
+	}
+	if bVal.IsNil() {
+		return 1, true
+	}
+	return 0, false
+}
+
+// floatCompare compares two floating-point values. NaNs compare low.
+func floatCompare(a, b float64) int {
+	switch {
+	case isNaN(a):
+		return -1 // No good answer if b is a NaN so don't bother checking.
+	case isNaN(b):
+		return 1
+	case a < b:
+		return -1
+	case a > b:
+		return 1
+	}
+	return 0
+}
+
+func isNaN(a float64) bool {
+	return a != a
+}
diff --git a/internal/backport/fmtsort/sort_test.go b/internal/backport/fmtsort/sort_test.go
new file mode 100644
index 0000000..4e249d9
--- /dev/null
+++ b/internal/backport/fmtsort/sort_test.go
@@ -0,0 +1,269 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fmtsort_test
+
+import (
+	"fmt"
+	"math"
+	"reflect"
+	"strings"
+	"testing"
+	"unsafe"
+
+	"golang.org/x/website/internal/backport/fmtsort"
+)
+
+var compareTests = [][]reflect.Value{
+	ct(reflect.TypeOf(int(0)), -1, 0, 1),
+	ct(reflect.TypeOf(int8(0)), -1, 0, 1),
+	ct(reflect.TypeOf(int16(0)), -1, 0, 1),
+	ct(reflect.TypeOf(int32(0)), -1, 0, 1),
+	ct(reflect.TypeOf(int64(0)), -1, 0, 1),
+	ct(reflect.TypeOf(uint(0)), 0, 1, 5),
+	ct(reflect.TypeOf(uint8(0)), 0, 1, 5),
+	ct(reflect.TypeOf(uint16(0)), 0, 1, 5),
+	ct(reflect.TypeOf(uint32(0)), 0, 1, 5),
+	ct(reflect.TypeOf(uint64(0)), 0, 1, 5),
+	ct(reflect.TypeOf(uintptr(0)), 0, 1, 5),
+	ct(reflect.TypeOf(string("")), "", "a", "ab"),
+	ct(reflect.TypeOf(float32(0)), math.NaN(), math.Inf(-1), -1e10, 0, 1e10, math.Inf(1)),
+	ct(reflect.TypeOf(float64(0)), math.NaN(), math.Inf(-1), -1e10, 0, 1e10, math.Inf(1)),
+	ct(reflect.TypeOf(complex64(0+1i)), -1-1i, -1+0i, -1+1i, 0-1i, 0+0i, 0+1i, 1-1i, 1+0i, 1+1i),
+	ct(reflect.TypeOf(complex128(0+1i)), -1-1i, -1+0i, -1+1i, 0-1i, 0+0i, 0+1i, 1-1i, 1+0i, 1+1i),
+	ct(reflect.TypeOf(false), false, true),
+	ct(reflect.TypeOf(&ints[0]), &ints[0], &ints[1], &ints[2]),
+	ct(reflect.TypeOf(unsafe.Pointer(&ints[0])), unsafe.Pointer(&ints[0]), unsafe.Pointer(&ints[1]), unsafe.Pointer(&ints[2])),
+	ct(reflect.TypeOf(chans[0]), chans[0], chans[1], chans[2]),
+	ct(reflect.TypeOf(toy{}), toy{0, 1}, toy{0, 2}, toy{1, -1}, toy{1, 1}),
+	ct(reflect.TypeOf([2]int{}), [2]int{1, 1}, [2]int{1, 2}, [2]int{2, 0}),
+	ct(reflect.TypeOf(interface{}(interface{}(0))), iFace, 1, 2, 3),
+}
+
+var iFace interface{}
+
+func ct(typ reflect.Type, args ...interface{}) []reflect.Value {
+	value := make([]reflect.Value, len(args))
+	for i, v := range args {
+		x := reflect.ValueOf(v)
+		if !x.IsValid() { // Make it a typed nil.
+			x = reflect.Zero(typ)
+		} else {
+			x = x.Convert(typ)
+		}
+		value[i] = x
+	}
+	return value
+}
+
+func TestCompare(t *testing.T) {
+	for _, test := range compareTests {
+		for i, v0 := range test {
+			for j, v1 := range test {
+				c := fmtsort.Compare(v0, v1)
+				var expect int
+				switch {
+				case i == j:
+					expect = 0
+					// NaNs are tricky.
+					if typ := v0.Type(); (typ.Kind() == reflect.Float32 || typ.Kind() == reflect.Float64) && math.IsNaN(v0.Float()) {
+						expect = -1
+					}
+				case i < j:
+					expect = -1
+				case i > j:
+					expect = 1
+				}
+				if c != expect {
+					t.Errorf("%s: compare(%v,%v)=%d; expect %d", v0.Type(), v0, v1, c, expect)
+				}
+			}
+		}
+	}
+}
+
+type sortTest struct {
+	data  interface{} // Always a map.
+	print string      // Printed result using our custom printer.
+}
+
+var sortTests = []sortTest{
+	{
+		map[int]string{7: "bar", -3: "foo"},
+		"-3:foo 7:bar",
+	},
+	{
+		map[uint8]string{7: "bar", 3: "foo"},
+		"3:foo 7:bar",
+	},
+	{
+		map[string]string{"7": "bar", "3": "foo"},
+		"3:foo 7:bar",
+	},
+	{
+		map[float64]string{7: "bar", -3: "foo", math.NaN(): "nan", math.Inf(0): "inf"},
+		"NaN:nan -3:foo 7:bar +Inf:inf",
+	},
+	{
+		map[complex128]string{7 + 2i: "bar2", 7 + 1i: "bar", -3: "foo", complex(math.NaN(), 0i): "nan", complex(math.Inf(0), 0i): "inf"},
+		"(NaN+0i):nan (-3+0i):foo (7+1i):bar (7+2i):bar2 (+Inf+0i):inf",
+	},
+	{
+		map[bool]string{true: "true", false: "false"},
+		"false:false true:true",
+	},
+	{
+		chanMap(),
+		"CHAN0:0 CHAN1:1 CHAN2:2",
+	},
+	{
+		pointerMap(),
+		"PTR0:0 PTR1:1 PTR2:2",
+	},
+	{
+		unsafePointerMap(),
+		"UNSAFEPTR0:0 UNSAFEPTR1:1 UNSAFEPTR2:2",
+	},
+	{
+		map[toy]string{{7, 2}: "72", {7, 1}: "71", {3, 4}: "34"},
+		"{3 4}:34 {7 1}:71 {7 2}:72",
+	},
+	{
+		map[[2]int]string{{7, 2}: "72", {7, 1}: "71", {3, 4}: "34"},
+		"[3 4]:34 [7 1]:71 [7 2]:72",
+	},
+}
+
+func sprint(data interface{}) string {
+	om := fmtsort.Sort(reflect.ValueOf(data))
+	if om == nil {
+		return "nil"
+	}
+	b := new(strings.Builder)
+	for i, key := range om.Key {
+		if i > 0 {
+			b.WriteRune(' ')
+		}
+		b.WriteString(sprintKey(key))
+		b.WriteRune(':')
+		b.WriteString(fmt.Sprint(om.Value[i]))
+	}
+	return b.String()
+}
+
+// sprintKey formats a reflect.Value but gives reproducible values for some
+// problematic types such as pointers. Note that it only does special handling
+// for the troublesome types used in the test cases; it is not a general
+// printer.
+func sprintKey(key reflect.Value) string {
+	switch str := key.Type().String(); str {
+	case "*int":
+		ptr := key.Interface().(*int)
+		for i := range ints {
+			if ptr == &ints[i] {
+				return fmt.Sprintf("PTR%d", i)
+			}
+		}
+		return "PTR???"
+	case "unsafe.Pointer":
+		ptr := key.Interface().(unsafe.Pointer)
+		for i := range ints {
+			if ptr == unsafe.Pointer(&ints[i]) {
+				return fmt.Sprintf("UNSAFEPTR%d", i)
+			}
+		}
+		return "UNSAFEPTR???"
+	case "chan int":
+		c := key.Interface().(chan int)
+		for i := range chans {
+			if c == chans[i] {
+				return fmt.Sprintf("CHAN%d", i)
+			}
+		}
+		return "CHAN???"
+	default:
+		return fmt.Sprint(key)
+	}
+}
+
+var (
+	ints  [3]int
+	chans = [3]chan int{make(chan int), make(chan int), make(chan int)}
+)
+
+func pointerMap() map[*int]string {
+	m := make(map[*int]string)
+	for i := 2; i >= 0; i-- {
+		m[&ints[i]] = fmt.Sprint(i)
+	}
+	return m
+}
+
+func unsafePointerMap() map[unsafe.Pointer]string {
+	m := make(map[unsafe.Pointer]string)
+	for i := 2; i >= 0; i-- {
+		m[unsafe.Pointer(&ints[i])] = fmt.Sprint(i)
+	}
+	return m
+}
+
+func chanMap() map[chan int]string {
+	m := make(map[chan int]string)
+	for i := 2; i >= 0; i-- {
+		m[chans[i]] = fmt.Sprint(i)
+	}
+	return m
+}
+
+type toy struct {
+	A int // Exported.
+	b int // Unexported.
+}
+
+func TestOrder(t *testing.T) {
+	for _, test := range sortTests {
+		got := sprint(test.data)
+		if got != test.print {
+			t.Errorf("%s: got %q, want %q", reflect.TypeOf(test.data), got, test.print)
+		}
+	}
+}
+
+func TestInterface(t *testing.T) {
+	// A map containing multiple concrete types should be sorted by type,
+	// then value. However, the relative ordering of types is unspecified,
+	// so test this by checking the presence of sorted subgroups.
+	m := map[interface{}]string{
+		[2]int{1, 0}:             "",
+		[2]int{0, 1}:             "",
+		true:                     "",
+		false:                    "",
+		3.1:                      "",
+		2.1:                      "",
+		1.1:                      "",
+		math.NaN():               "",
+		3:                        "",
+		2:                        "",
+		1:                        "",
+		"c":                      "",
+		"b":                      "",
+		"a":                      "",
+		struct{ x, y int }{1, 0}: "",
+		struct{ x, y int }{0, 1}: "",
+	}
+	got := sprint(m)
+	typeGroups := []string{
+		"NaN: 1.1: 2.1: 3.1:", // float64
+		"false: true:",        // bool
+		"1: 2: 3:",            // int
+		"a: b: c:",            // string
+		"[0 1]: [1 0]:",       // [2]int
+		"{0 1}: {1 0}:",       // struct{ x int; y int }
+	}
+	for _, g := range typeGroups {
+		if !strings.Contains(got, g) {
+			t.Errorf("sorted map should contain %q", g)
+		}
+	}
+}
diff --git a/internal/backport/html/template/attr.go b/internal/backport/html/template/attr.go
new file mode 100644
index 0000000..22922e6
--- /dev/null
+++ b/internal/backport/html/template/attr.go
@@ -0,0 +1,175 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"strings"
+)
+
+// attrTypeMap[n] describes the value of the given attribute.
+// If an attribute affects (or can mask) the encoding or interpretation of
+// other content, or affects the contents, idempotency, or credentials of a
+// network message, then the value in this map is contentTypeUnsafe.
+// This map is derived from HTML5, specifically
+// https://www.w3.org/TR/html5/Overview.html#attributes-1
+// as well as "%URI"-typed attributes from
+// https://www.w3.org/TR/html4/index/attributes.html
+var attrTypeMap = map[string]contentType{
+	"accept":          contentTypePlain,
+	"accept-charset":  contentTypeUnsafe,
+	"action":          contentTypeURL,
+	"alt":             contentTypePlain,
+	"archive":         contentTypeURL,
+	"async":           contentTypeUnsafe,
+	"autocomplete":    contentTypePlain,
+	"autofocus":       contentTypePlain,
+	"autoplay":        contentTypePlain,
+	"background":      contentTypeURL,
+	"border":          contentTypePlain,
+	"checked":         contentTypePlain,
+	"cite":            contentTypeURL,
+	"challenge":       contentTypeUnsafe,
+	"charset":         contentTypeUnsafe,
+	"class":           contentTypePlain,
+	"classid":         contentTypeURL,
+	"codebase":        contentTypeURL,
+	"cols":            contentTypePlain,
+	"colspan":         contentTypePlain,
+	"content":         contentTypeUnsafe,
+	"contenteditable": contentTypePlain,
+	"contextmenu":     contentTypePlain,
+	"controls":        contentTypePlain,
+	"coords":          contentTypePlain,
+	"crossorigin":     contentTypeUnsafe,
+	"data":            contentTypeURL,
+	"datetime":        contentTypePlain,
+	"default":         contentTypePlain,
+	"defer":           contentTypeUnsafe,
+	"dir":             contentTypePlain,
+	"dirname":         contentTypePlain,
+	"disabled":        contentTypePlain,
+	"draggable":       contentTypePlain,
+	"dropzone":        contentTypePlain,
+	"enctype":         contentTypeUnsafe,
+	"for":             contentTypePlain,
+	"form":            contentTypeUnsafe,
+	"formaction":      contentTypeURL,
+	"formenctype":     contentTypeUnsafe,
+	"formmethod":      contentTypeUnsafe,
+	"formnovalidate":  contentTypeUnsafe,
+	"formtarget":      contentTypePlain,
+	"headers":         contentTypePlain,
+	"height":          contentTypePlain,
+	"hidden":          contentTypePlain,
+	"high":            contentTypePlain,
+	"href":            contentTypeURL,
+	"hreflang":        contentTypePlain,
+	"http-equiv":      contentTypeUnsafe,
+	"icon":            contentTypeURL,
+	"id":              contentTypePlain,
+	"ismap":           contentTypePlain,
+	"keytype":         contentTypeUnsafe,
+	"kind":            contentTypePlain,
+	"label":           contentTypePlain,
+	"lang":            contentTypePlain,
+	"language":        contentTypeUnsafe,
+	"list":            contentTypePlain,
+	"longdesc":        contentTypeURL,
+	"loop":            contentTypePlain,
+	"low":             contentTypePlain,
+	"manifest":        contentTypeURL,
+	"max":             contentTypePlain,
+	"maxlength":       contentTypePlain,
+	"media":           contentTypePlain,
+	"mediagroup":      contentTypePlain,
+	"method":          contentTypeUnsafe,
+	"min":             contentTypePlain,
+	"multiple":        contentTypePlain,
+	"name":            contentTypePlain,
+	"novalidate":      contentTypeUnsafe,
+	// Skip handler names from
+	// https://www.w3.org/TR/html5/webappapis.html#event-handlers-on-elements,-document-objects,-and-window-objects
+	// since we have special handling in attrType.
+	"open":        contentTypePlain,
+	"optimum":     contentTypePlain,
+	"pattern":     contentTypeUnsafe,
+	"placeholder": contentTypePlain,
+	"poster":      contentTypeURL,
+	"profile":     contentTypeURL,
+	"preload":     contentTypePlain,
+	"pubdate":     contentTypePlain,
+	"radiogroup":  contentTypePlain,
+	"readonly":    contentTypePlain,
+	"rel":         contentTypeUnsafe,
+	"required":    contentTypePlain,
+	"reversed":    contentTypePlain,
+	"rows":        contentTypePlain,
+	"rowspan":     contentTypePlain,
+	"sandbox":     contentTypeUnsafe,
+	"spellcheck":  contentTypePlain,
+	"scope":       contentTypePlain,
+	"scoped":      contentTypePlain,
+	"seamless":    contentTypePlain,
+	"selected":    contentTypePlain,
+	"shape":       contentTypePlain,
+	"size":        contentTypePlain,
+	"sizes":       contentTypePlain,
+	"span":        contentTypePlain,
+	"src":         contentTypeURL,
+	"srcdoc":      contentTypeHTML,
+	"srclang":     contentTypePlain,
+	"srcset":      contentTypeSrcset,
+	"start":       contentTypePlain,
+	"step":        contentTypePlain,
+	"style":       contentTypeCSS,
+	"tabindex":    contentTypePlain,
+	"target":      contentTypePlain,
+	"title":       contentTypePlain,
+	"type":        contentTypeUnsafe,
+	"usemap":      contentTypeURL,
+	"value":       contentTypeUnsafe,
+	"width":       contentTypePlain,
+	"wrap":        contentTypePlain,
+	"xmlns":       contentTypeURL,
+}
+
+// attrType returns a conservative (upper-bound on authority) guess at the
+// type of the lowercase named attribute.
+func attrType(name string) contentType {
+	if strings.HasPrefix(name, "data-") {
+		// Strip data- so that custom attribute heuristics below are
+		// widely applied.
+		// Treat data-action as URL below.
+		name = name[5:]
+	} else if colon := strings.IndexRune(name, ':'); colon != -1 {
+		if name[:colon] == "xmlns" {
+			return contentTypeURL
+		}
+		// Treat svg:href and xlink:href as href below.
+		name = name[colon+1:]
+	}
+	if t, ok := attrTypeMap[name]; ok {
+		return t
+	}
+	// Treat partial event handler names as script.
+	if strings.HasPrefix(name, "on") {
+		return contentTypeJS
+	}
+
+	// Heuristics to prevent "javascript:..." injection in custom
+	// data attributes and custom attributes like g:tweetUrl.
+	// https://www.w3.org/TR/html5/dom.html#embedding-custom-non-visible-data-with-the-data-*-attributes
+	// "Custom data attributes are intended to store custom data
+	//  private to the page or application, for which there are no
+	//  more appropriate attributes or elements."
+	// Developers seem to store URL content in data URLs that start
+	// or end with "URI" or "URL".
+	if strings.Contains(name, "src") ||
+		strings.Contains(name, "uri") ||
+		strings.Contains(name, "url") {
+		return contentTypeURL
+	}
+	return contentTypePlain
+}
diff --git a/internal/backport/html/template/attr_string.go b/internal/backport/html/template/attr_string.go
new file mode 100644
index 0000000..babe70c
--- /dev/null
+++ b/internal/backport/html/template/attr_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type attr"; DO NOT EDIT.
+
+package template
+
+import "strconv"
+
+const _attr_name = "attrNoneattrScriptattrScriptTypeattrStyleattrURLattrSrcset"
+
+var _attr_index = [...]uint8{0, 8, 18, 32, 41, 48, 58}
+
+func (i attr) String() string {
+	if i >= attr(len(_attr_index)-1) {
+		return "attr(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _attr_name[_attr_index[i]:_attr_index[i+1]]
+}
diff --git a/internal/backport/html/template/clone_test.go b/internal/backport/html/template/clone_test.go
new file mode 100644
index 0000000..07e8ef3
--- /dev/null
+++ b/internal/backport/html/template/clone_test.go
@@ -0,0 +1,280 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"strings"
+	"sync"
+	"testing"
+
+	"golang.org/x/website/internal/backport/text/template/parse"
+)
+
+func TestAddParseTreeHTML(t *testing.T) {
+	root := Must(New("root").Parse(`{{define "a"}} {{.}} {{template "b"}} {{.}} "></a>{{end}}`))
+	tree, err := parse.Parse("t", `{{define "b"}}<a href="{{end}}`, "", "", nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	added := Must(root.AddParseTree("b", tree["b"]))
+	b := new(bytes.Buffer)
+	err = added.ExecuteTemplate(b, "a", "1>0")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if got, want := b.String(), ` 1&gt;0 <a href=" 1%3e0 "></a>`; got != want {
+		t.Errorf("got %q want %q", got, want)
+	}
+}
+
+func TestClone(t *testing.T) {
+	// The {{.}} will be executed with data "<i>*/" in different contexts.
+	// In the t0 template, it will be in a text context.
+	// In the t1 template, it will be in a URL context.
+	// In the t2 template, it will be in a JavaScript context.
+	// In the t3 template, it will be in a CSS context.
+	const tmpl = `{{define "a"}}{{template "lhs"}}{{.}}{{template "rhs"}}{{end}}`
+	b := new(bytes.Buffer)
+
+	// Create an incomplete template t0.
+	t0 := Must(New("t0").Parse(tmpl))
+
+	// Clone t0 as t1.
+	t1 := Must(t0.Clone())
+	Must(t1.Parse(`{{define "lhs"}} <a href=" {{end}}`))
+	Must(t1.Parse(`{{define "rhs"}} "></a> {{end}}`))
+
+	// Execute t1.
+	b.Reset()
+	if err := t1.ExecuteTemplate(b, "a", "<i>*/"); err != nil {
+		t.Fatal(err)
+	}
+	if got, want := b.String(), ` <a href=" %3ci%3e*/ "></a> `; got != want {
+		t.Errorf("t1: got %q want %q", got, want)
+	}
+
+	// Clone t0 as t2.
+	t2 := Must(t0.Clone())
+	Must(t2.Parse(`{{define "lhs"}} <p onclick="javascript: {{end}}`))
+	Must(t2.Parse(`{{define "rhs"}} "></p> {{end}}`))
+
+	// Execute t2.
+	b.Reset()
+	if err := t2.ExecuteTemplate(b, "a", "<i>*/"); err != nil {
+		t.Fatal(err)
+	}
+	if got, want := b.String(), ` <p onclick="javascript: &#34;\u003ci\u003e*/&#34; "></p> `; got != want {
+		t.Errorf("t2: got %q want %q", got, want)
+	}
+
+	// Clone t0 as t3, but do not execute t3 yet.
+	t3 := Must(t0.Clone())
+	Must(t3.Parse(`{{define "lhs"}} <style> {{end}}`))
+	Must(t3.Parse(`{{define "rhs"}} </style> {{end}}`))
+
+	// Complete t0.
+	Must(t0.Parse(`{{define "lhs"}} ( {{end}}`))
+	Must(t0.Parse(`{{define "rhs"}} ) {{end}}`))
+
+	// Clone t0 as t4. Redefining the "lhs" template should not fail.
+	t4 := Must(t0.Clone())
+	if _, err := t4.Parse(`{{define "lhs"}} OK {{end}}`); err != nil {
+		t.Errorf(`redefine "lhs": got err %v want nil`, err)
+	}
+	// Cloning t1 should fail as it has been executed.
+	if _, err := t1.Clone(); err == nil {
+		t.Error("cloning t1: got nil err want non-nil")
+	}
+	// Redefining the "lhs" template in t1 should fail as it has been executed.
+	if _, err := t1.Parse(`{{define "lhs"}} OK {{end}}`); err == nil {
+		t.Error(`redefine "lhs": got nil err want non-nil`)
+	}
+
+	// Execute t0.
+	b.Reset()
+	if err := t0.ExecuteTemplate(b, "a", "<i>*/"); err != nil {
+		t.Fatal(err)
+	}
+	if got, want := b.String(), ` ( &lt;i&gt;*/ ) `; got != want {
+		t.Errorf("t0: got %q want %q", got, want)
+	}
+
+	// Clone t0. This should fail, as t0 has already executed.
+	if _, err := t0.Clone(); err == nil {
+		t.Error(`t0.Clone(): got nil err want non-nil`)
+	}
+
+	// Similarly, cloning sub-templates should fail.
+	if _, err := t0.Lookup("a").Clone(); err == nil {
+		t.Error(`t0.Lookup("a").Clone(): got nil err want non-nil`)
+	}
+	if _, err := t0.Lookup("lhs").Clone(); err == nil {
+		t.Error(`t0.Lookup("lhs").Clone(): got nil err want non-nil`)
+	}
+
+	// Execute t3.
+	b.Reset()
+	if err := t3.ExecuteTemplate(b, "a", "<i>*/"); err != nil {
+		t.Fatal(err)
+	}
+	if got, want := b.String(), ` <style> ZgotmplZ </style> `; got != want {
+		t.Errorf("t3: got %q want %q", got, want)
+	}
+}
+
+func TestTemplates(t *testing.T) {
+	names := []string{"t0", "a", "lhs", "rhs"}
+	// Some template definitions borrowed from TestClone.
+	const tmpl = `
+		{{define "a"}}{{template "lhs"}}{{.}}{{template "rhs"}}{{end}}
+		{{define "lhs"}} <a href=" {{end}}
+		{{define "rhs"}} "></a> {{end}}`
+	t0 := Must(New("t0").Parse(tmpl))
+	templates := t0.Templates()
+	if len(templates) != len(names) {
+		t.Errorf("expected %d templates; got %d", len(names), len(templates))
+	}
+	for _, name := range names {
+		found := false
+		for _, tmpl := range templates {
+			if name == tmpl.text.Name() {
+				found = true
+				break
+			}
+		}
+		if !found {
+			t.Error("could not find template", name)
+		}
+	}
+}
+
+// This used to crash; https://golang.org/issue/3281
+func TestCloneCrash(t *testing.T) {
+	t1 := New("all")
+	Must(t1.New("t1").Parse(`{{define "foo"}}foo{{end}}`))
+	t1.Clone()
+}
+
+// Ensure that this guarantee from the docs is upheld:
+// "Further calls to Parse in the copy will add templates
+// to the copy but not to the original."
+func TestCloneThenParse(t *testing.T) {
+	t0 := Must(New("t0").Parse(`{{define "a"}}{{template "embedded"}}{{end}}`))
+	t1 := Must(t0.Clone())
+	Must(t1.Parse(`{{define "embedded"}}t1{{end}}`))
+	if len(t0.Templates())+1 != len(t1.Templates()) {
+		t.Error("adding a template to a clone added it to the original")
+	}
+	// double check that the embedded template isn't available in the original
+	err := t0.ExecuteTemplate(ioutil.Discard, "a", nil)
+	if err == nil {
+		t.Error("expected 'no such template' error")
+	}
+}
+
+// https://golang.org/issue/5980
+func TestFuncMapWorksAfterClone(t *testing.T) {
+	funcs := FuncMap{"customFunc": func() (string, error) {
+		return "", errors.New("issue5980")
+	}}
+
+	// get the expected error output (no clone)
+	uncloned := Must(New("").Funcs(funcs).Parse("{{customFunc}}"))
+	wantErr := uncloned.Execute(ioutil.Discard, nil)
+
+	// toClone must be the same as uncloned. It has to be recreated from scratch,
+	// since cloning cannot occur after execution.
+	toClone := Must(New("").Funcs(funcs).Parse("{{customFunc}}"))
+	cloned := Must(toClone.Clone())
+	gotErr := cloned.Execute(ioutil.Discard, nil)
+
+	if wantErr.Error() != gotErr.Error() {
+		t.Errorf("clone error message mismatch want %q got %q", wantErr, gotErr)
+	}
+}
+
+// https://golang.org/issue/16101
+func TestTemplateCloneExecuteRace(t *testing.T) {
+	const (
+		input   = `<title>{{block "a" .}}a{{end}}</title><body>{{block "b" .}}b{{end}}<body>`
+		overlay = `{{define "b"}}A{{end}}`
+	)
+	outer := Must(New("outer").Parse(input))
+	tmpl := Must(Must(outer.Clone()).Parse(overlay))
+
+	var wg sync.WaitGroup
+	for i := 0; i < 10; i++ {
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			for i := 0; i < 100; i++ {
+				if err := tmpl.Execute(ioutil.Discard, "data"); err != nil {
+					panic(err)
+				}
+			}
+		}()
+	}
+	wg.Wait()
+}
+
+func TestTemplateCloneLookup(t *testing.T) {
+	// Template.escape makes an assumption that the template associated
+	// with t.Name() is t. Check that this holds.
+	tmpl := Must(New("x").Parse("a"))
+	tmpl = Must(tmpl.Clone())
+	if tmpl.Lookup(tmpl.Name()) != tmpl {
+		t.Error("after Clone, tmpl.Lookup(tmpl.Name()) != tmpl")
+	}
+}
+
+func TestCloneGrowth(t *testing.T) {
+	tmpl := Must(New("root").Parse(`<title>{{block "B". }}Arg{{end}}</title>`))
+	tmpl = Must(tmpl.Clone())
+	Must(tmpl.Parse(`{{define "B"}}Text{{end}}`))
+	for i := 0; i < 10; i++ {
+		tmpl.Execute(ioutil.Discard, nil)
+	}
+	if len(tmpl.DefinedTemplates()) > 200 {
+		t.Fatalf("too many templates: %v", len(tmpl.DefinedTemplates()))
+	}
+}
+
+// https://golang.org/issue/17735
+func TestCloneRedefinedName(t *testing.T) {
+	const base = `
+{{ define "a" -}}<title>{{ template "b" . -}}</title>{{ end -}}
+{{ define "b" }}{{ end -}}
+`
+	const page = `{{ template "a" . }}`
+
+	t1 := Must(New("a").Parse(base))
+
+	for i := 0; i < 2; i++ {
+		t2 := Must(t1.Clone())
+		t2 = Must(t2.New(fmt.Sprintf("%d", i)).Parse(page))
+		err := t2.Execute(ioutil.Discard, nil)
+		if err != nil {
+			t.Fatal(err)
+		}
+	}
+}
+
+// Issue 24791.
+func TestClonePipe(t *testing.T) {
+	a := Must(New("a").Parse(`{{define "a"}}{{range $v := .A}}{{$v}}{{end}}{{end}}`))
+	data := struct{ A []string }{A: []string{"hi"}}
+	b := Must(a.Clone())
+	var buf strings.Builder
+	if err := b.Execute(&buf, &data); err != nil {
+		t.Fatal(err)
+	}
+	if got, want := buf.String(), "hi"; got != want {
+		t.Errorf("got %q want %q", got, want)
+	}
+}
diff --git a/internal/backport/html/template/content.go b/internal/backport/html/template/content.go
new file mode 100644
index 0000000..6ba87a9
--- /dev/null
+++ b/internal/backport/html/template/content.go
@@ -0,0 +1,185 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"fmt"
+	"reflect"
+)
+
+// Strings of content from a trusted source.
+type (
+	// CSS encapsulates known safe content that matches any of:
+	//   1. The CSS3 stylesheet production, such as `p { color: purple }`.
+	//   2. The CSS3 rule production, such as `a[href=~"https:"].foo#bar`.
+	//   3. CSS3 declaration productions, such as `color: red; margin: 2px`.
+	//   4. The CSS3 value production, such as `rgba(0, 0, 255, 127)`.
+	// See https://www.w3.org/TR/css3-syntax/#parsing and
+	// https://web.archive.org/web/20090211114933/http://w3.org/TR/css3-syntax#style
+	//
+	// Use of this type presents a security risk:
+	// the encapsulated content should come from a trusted source,
+	// as it will be included verbatim in the template output.
+	CSS string
+
+	// HTML encapsulates a known safe HTML document fragment.
+	// It should not be used for HTML from a third-party, or HTML with
+	// unclosed tags or comments. The outputs of a sound HTML sanitizer
+	// and a template escaped by this package are fine for use with HTML.
+	//
+	// Use of this type presents a security risk:
+	// the encapsulated content should come from a trusted source,
+	// as it will be included verbatim in the template output.
+	HTML string
+
+	// HTMLAttr encapsulates an HTML attribute from a trusted source,
+	// for example, ` dir="ltr"`.
+	//
+	// Use of this type presents a security risk:
+	// the encapsulated content should come from a trusted source,
+	// as it will be included verbatim in the template output.
+	HTMLAttr string
+
+	// JS encapsulates a known safe EcmaScript5 Expression, for example,
+	// `(x + y * z())`.
+	// Template authors are responsible for ensuring that typed expressions
+	// do not break the intended precedence and that there is no
+	// statement/expression ambiguity as when passing an expression like
+	// "{ foo: bar() }\n['foo']()", which is both a valid Expression and a
+	// valid Program with a very different meaning.
+	//
+	// Use of this type presents a security risk:
+	// the encapsulated content should come from a trusted source,
+	// as it will be included verbatim in the template output.
+	//
+	// Using JS to include valid but untrusted JSON is not safe.
+	// A safe alternative is to parse the JSON with json.Unmarshal and then
+	// pass the resultant object into the template, where it will be
+	// converted to sanitized JSON when presented in a JavaScript context.
+	JS string
+
+	// JSStr encapsulates a sequence of characters meant to be embedded
+	// between quotes in a JavaScript expression.
+	// The string must match a series of StringCharacters:
+	//   StringCharacter :: SourceCharacter but not `\` or LineTerminator
+	//                    | EscapeSequence
+	// Note that LineContinuations are not allowed.
+	// JSStr("foo\\nbar") is fine, but JSStr("foo\\\nbar") is not.
+	//
+	// Use of this type presents a security risk:
+	// the encapsulated content should come from a trusted source,
+	// as it will be included verbatim in the template output.
+	JSStr string
+
+	// URL encapsulates a known safe URL or URL substring (see RFC 3986).
+	// A URL like `javascript:checkThatFormNotEditedBeforeLeavingPage()`
+	// from a trusted source should go in the page, but by default dynamic
+	// `javascript:` URLs are filtered out since they are a frequently
+	// exploited injection vector.
+	//
+	// Use of this type presents a security risk:
+	// the encapsulated content should come from a trusted source,
+	// as it will be included verbatim in the template output.
+	URL string
+
+	// Srcset encapsulates a known safe srcset attribute
+	// (see https://w3c.github.io/html/semantics-embedded-content.html#element-attrdef-img-srcset).
+	//
+	// Use of this type presents a security risk:
+	// the encapsulated content should come from a trusted source,
+	// as it will be included verbatim in the template output.
+	Srcset string
+)
+
+type contentType uint8
+
+const (
+	contentTypePlain contentType = iota
+	contentTypeCSS
+	contentTypeHTML
+	contentTypeHTMLAttr
+	contentTypeJS
+	contentTypeJSStr
+	contentTypeURL
+	contentTypeSrcset
+	// contentTypeUnsafe is used in attr.go for values that affect how
+	// embedded content and network messages are formed, vetted,
+	// or interpreted; or which credentials network messages carry.
+	contentTypeUnsafe
+)
+
+// indirect returns the value, after dereferencing as many times
+// as necessary to reach the base type (or nil).
+func indirect(a interface{}) interface{} {
+	if a == nil {
+		return nil
+	}
+	if t := reflect.TypeOf(a); t.Kind() != reflect.Ptr {
+		// Avoid creating a reflect.Value if it's not a pointer.
+		return a
+	}
+	v := reflect.ValueOf(a)
+	for v.Kind() == reflect.Ptr && !v.IsNil() {
+		v = v.Elem()
+	}
+	return v.Interface()
+}
+
+var (
+	errorType       = reflect.TypeOf((*error)(nil)).Elem()
+	fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
+)
+
+// indirectToStringerOrError returns the value, after dereferencing as many times
+// as necessary to reach the base type (or nil) or an implementation of fmt.Stringer
+// or error,
+func indirectToStringerOrError(a interface{}) interface{} {
+	if a == nil {
+		return nil
+	}
+	v := reflect.ValueOf(a)
+	for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() {
+		v = v.Elem()
+	}
+	return v.Interface()
+}
+
+// stringify converts its arguments to a string and the type of the content.
+// All pointers are dereferenced, as in the text/template package.
+func stringify(args ...interface{}) (string, contentType) {
+	if len(args) == 1 {
+		switch s := indirect(args[0]).(type) {
+		case string:
+			return s, contentTypePlain
+		case CSS:
+			return string(s), contentTypeCSS
+		case HTML:
+			return string(s), contentTypeHTML
+		case HTMLAttr:
+			return string(s), contentTypeHTMLAttr
+		case JS:
+			return string(s), contentTypeJS
+		case JSStr:
+			return string(s), contentTypeJSStr
+		case URL:
+			return string(s), contentTypeURL
+		case Srcset:
+			return string(s), contentTypeSrcset
+		}
+	}
+	i := 0
+	for _, arg := range args {
+		// We skip untyped nil arguments for backward compatibility.
+		// Without this they would be output as <nil>, escaped.
+		// See issue 25875.
+		if arg == nil {
+			continue
+		}
+
+		args[i] = indirectToStringerOrError(arg)
+		i++
+	}
+	return fmt.Sprint(args[:i]...), contentTypePlain
+}
diff --git a/internal/backport/html/template/content_test.go b/internal/backport/html/template/content_test.go
new file mode 100644
index 0000000..e6f47a2
--- /dev/null
+++ b/internal/backport/html/template/content_test.go
@@ -0,0 +1,458 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"bytes"
+	"fmt"
+	"strings"
+	"testing"
+)
+
+func TestTypedContent(t *testing.T) {
+	data := []interface{}{
+		`<b> "foo%" O'Reilly &bar;`,
+		CSS(`a[href =~ "//example.com"]#foo`),
+		HTML(`Hello, <b>World</b> &amp;tc!`),
+		HTMLAttr(` dir="ltr"`),
+		JS(`c && alert("Hello, World!");`),
+		JSStr(`Hello, World & O'Reilly\u0021`),
+		URL(`greeting=H%69,&addressee=(World)`),
+		Srcset(`greeting=H%69,&addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`),
+		URL(`,foo/,`),
+	}
+
+	// For each content sensitive escaper, see how it does on
+	// each of the typed strings above.
+	tests := []struct {
+		// A template containing a single {{.}}.
+		input string
+		want  []string
+	}{
+		{
+			`<style>{{.}} { color: blue }</style>`,
+			[]string{
+				`ZgotmplZ`,
+				// Allowed but not escaped.
+				`a[href =~ "//example.com"]#foo`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+			},
+		},
+		{
+			`<div style="{{.}}">`,
+			[]string{
+				`ZgotmplZ`,
+				// Allowed and HTML escaped.
+				`a[href =~ &#34;//example.com&#34;]#foo`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+			},
+		},
+		{
+			`{{.}}`,
+			[]string{
+				`&lt;b&gt; &#34;foo%&#34; O&#39;Reilly &amp;bar;`,
+				`a[href =~ &#34;//example.com&#34;]#foo`,
+				// Not escaped.
+				`Hello, <b>World</b> &amp;tc!`,
+				` dir=&#34;ltr&#34;`,
+				`c &amp;&amp; alert(&#34;Hello, World!&#34;);`,
+				`Hello, World &amp; O&#39;Reilly\u0021`,
+				`greeting=H%69,&amp;addressee=(World)`,
+				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
+				`,foo/,`,
+			},
+		},
+		{
+			`<a{{.}}>`,
+			[]string{
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				// Allowed and HTML escaped.
+				` dir="ltr"`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+			},
+		},
+		{
+			`<a title={{.}}>`,
+			[]string{
+				`&lt;b&gt;&#32;&#34;foo%&#34;&#32;O&#39;Reilly&#32;&amp;bar;`,
+				`a[href&#32;&#61;~&#32;&#34;//example.com&#34;]#foo`,
+				// Tags stripped, spaces escaped, entity not re-escaped.
+				`Hello,&#32;World&#32;&amp;tc!`,
+				`&#32;dir&#61;&#34;ltr&#34;`,
+				`c&#32;&amp;&amp;&#32;alert(&#34;Hello,&#32;World!&#34;);`,
+				`Hello,&#32;World&#32;&amp;&#32;O&#39;Reilly\u0021`,
+				`greeting&#61;H%69,&amp;addressee&#61;(World)`,
+				`greeting&#61;H%69,&amp;addressee&#61;(World)&#32;2x,&#32;https://golang.org/favicon.ico&#32;500.5w`,
+				`,foo/,`,
+			},
+		},
+		{
+			`<a title='{{.}}'>`,
+			[]string{
+				`&lt;b&gt; &#34;foo%&#34; O&#39;Reilly &amp;bar;`,
+				`a[href =~ &#34;//example.com&#34;]#foo`,
+				// Tags stripped, entity not re-escaped.
+				`Hello, World &amp;tc!`,
+				` dir=&#34;ltr&#34;`,
+				`c &amp;&amp; alert(&#34;Hello, World!&#34;);`,
+				`Hello, World &amp; O&#39;Reilly\u0021`,
+				`greeting=H%69,&amp;addressee=(World)`,
+				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
+				`,foo/,`,
+			},
+		},
+		{
+			`<textarea>{{.}}</textarea>`,
+			[]string{
+				`&lt;b&gt; &#34;foo%&#34; O&#39;Reilly &amp;bar;`,
+				`a[href =~ &#34;//example.com&#34;]#foo`,
+				// Angle brackets escaped to prevent injection of close tags, entity not re-escaped.
+				`Hello, &lt;b&gt;World&lt;/b&gt; &amp;tc!`,
+				` dir=&#34;ltr&#34;`,
+				`c &amp;&amp; alert(&#34;Hello, World!&#34;);`,
+				`Hello, World &amp; O&#39;Reilly\u0021`,
+				`greeting=H%69,&amp;addressee=(World)`,
+				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
+				`,foo/,`,
+			},
+		},
+		{
+			`<script>alert({{.}})</script>`,
+			[]string{
+				`"\u003cb\u003e \"foo%\" O'Reilly \u0026bar;"`,
+				`"a[href =~ \"//example.com\"]#foo"`,
+				`"Hello, \u003cb\u003eWorld\u003c/b\u003e \u0026amp;tc!"`,
+				`" dir=\"ltr\""`,
+				// Not escaped.
+				`c && alert("Hello, World!");`,
+				// Escape sequence not over-escaped.
+				`"Hello, World & O'Reilly\u0021"`,
+				`"greeting=H%69,\u0026addressee=(World)"`,
+				`"greeting=H%69,\u0026addressee=(World) 2x, https://golang.org/favicon.ico 500.5w"`,
+				`",foo/,"`,
+			},
+		},
+		{
+			`<button onclick="alert({{.}})">`,
+			[]string{
+				`&#34;\u003cb\u003e \&#34;foo%\&#34; O&#39;Reilly \u0026bar;&#34;`,
+				`&#34;a[href =~ \&#34;//example.com\&#34;]#foo&#34;`,
+				`&#34;Hello, \u003cb\u003eWorld\u003c/b\u003e \u0026amp;tc!&#34;`,
+				`&#34; dir=\&#34;ltr\&#34;&#34;`,
+				// Not JS escaped but HTML escaped.
+				`c &amp;&amp; alert(&#34;Hello, World!&#34;);`,
+				// Escape sequence not over-escaped.
+				`&#34;Hello, World &amp; O&#39;Reilly\u0021&#34;`,
+				`&#34;greeting=H%69,\u0026addressee=(World)&#34;`,
+				`&#34;greeting=H%69,\u0026addressee=(World) 2x, https://golang.org/favicon.ico 500.5w&#34;`,
+				`&#34;,foo/,&#34;`,
+			},
+		},
+		{
+			`<script>alert("{{.}}")</script>`,
+			[]string{
+				`\u003cb\u003e \u0022foo%\u0022 O\u0027Reilly \u0026bar;`,
+				`a[href =~ \u0022\/\/example.com\u0022]#foo`,
+				`Hello, \u003cb\u003eWorld\u003c\/b\u003e \u0026amp;tc!`,
+				` dir=\u0022ltr\u0022`,
+				`c \u0026\u0026 alert(\u0022Hello, World!\u0022);`,
+				// Escape sequence not over-escaped.
+				`Hello, World \u0026 O\u0027Reilly\u0021`,
+				`greeting=H%69,\u0026addressee=(World)`,
+				`greeting=H%69,\u0026addressee=(World) 2x, https:\/\/golang.org\/favicon.ico 500.5w`,
+				`,foo\/,`,
+			},
+		},
+		{
+			`<script type="text/javascript">alert("{{.}}")</script>`,
+			[]string{
+				`\u003cb\u003e \u0022foo%\u0022 O\u0027Reilly \u0026bar;`,
+				`a[href =~ \u0022\/\/example.com\u0022]#foo`,
+				`Hello, \u003cb\u003eWorld\u003c\/b\u003e \u0026amp;tc!`,
+				` dir=\u0022ltr\u0022`,
+				`c \u0026\u0026 alert(\u0022Hello, World!\u0022);`,
+				// Escape sequence not over-escaped.
+				`Hello, World \u0026 O\u0027Reilly\u0021`,
+				`greeting=H%69,\u0026addressee=(World)`,
+				`greeting=H%69,\u0026addressee=(World) 2x, https:\/\/golang.org\/favicon.ico 500.5w`,
+				`,foo\/,`,
+			},
+		},
+		{
+			`<script type="text/javascript">alert({{.}})</script>`,
+			[]string{
+				`"\u003cb\u003e \"foo%\" O'Reilly \u0026bar;"`,
+				`"a[href =~ \"//example.com\"]#foo"`,
+				`"Hello, \u003cb\u003eWorld\u003c/b\u003e \u0026amp;tc!"`,
+				`" dir=\"ltr\""`,
+				// Not escaped.
+				`c && alert("Hello, World!");`,
+				// Escape sequence not over-escaped.
+				`"Hello, World & O'Reilly\u0021"`,
+				`"greeting=H%69,\u0026addressee=(World)"`,
+				`"greeting=H%69,\u0026addressee=(World) 2x, https://golang.org/favicon.ico 500.5w"`,
+				`",foo/,"`,
+			},
+		},
+		{
+			// Not treated as JS. The output is same as for <div>{{.}}</div>
+			`<script type="golang.org/x/website/internal/backport/text/template">{{.}}</script>`,
+			[]string{
+				`&lt;b&gt; &#34;foo%&#34; O&#39;Reilly &amp;bar;`,
+				`a[href =~ &#34;//example.com&#34;]#foo`,
+				// Not escaped.
+				`Hello, <b>World</b> &amp;tc!`,
+				` dir=&#34;ltr&#34;`,
+				`c &amp;&amp; alert(&#34;Hello, World!&#34;);`,
+				`Hello, World &amp; O&#39;Reilly\u0021`,
+				`greeting=H%69,&amp;addressee=(World)`,
+				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
+				`,foo/,`,
+			},
+		},
+		{
+			`<button onclick='alert("{{.}}")'>`,
+			[]string{
+				`\u003cb\u003e \u0022foo%\u0022 O\u0027Reilly \u0026bar;`,
+				`a[href =~ \u0022\/\/example.com\u0022]#foo`,
+				`Hello, \u003cb\u003eWorld\u003c\/b\u003e \u0026amp;tc!`,
+				` dir=\u0022ltr\u0022`,
+				`c \u0026\u0026 alert(\u0022Hello, World!\u0022);`,
+				// Escape sequence not over-escaped.
+				`Hello, World \u0026 O\u0027Reilly\u0021`,
+				`greeting=H%69,\u0026addressee=(World)`,
+				`greeting=H%69,\u0026addressee=(World) 2x, https:\/\/golang.org\/favicon.ico 500.5w`,
+				`,foo\/,`,
+			},
+		},
+		{
+			`<a href="?q={{.}}">`,
+			[]string{
+				`%3cb%3e%20%22foo%25%22%20O%27Reilly%20%26bar%3b`,
+				`a%5bhref%20%3d~%20%22%2f%2fexample.com%22%5d%23foo`,
+				`Hello%2c%20%3cb%3eWorld%3c%2fb%3e%20%26amp%3btc%21`,
+				`%20dir%3d%22ltr%22`,
+				`c%20%26%26%20alert%28%22Hello%2c%20World%21%22%29%3b`,
+				`Hello%2c%20World%20%26%20O%27Reilly%5cu0021`,
+				// Quotes and parens are escaped but %69 is not over-escaped. HTML escaping is done.
+				`greeting=H%69,&amp;addressee=%28World%29`,
+				`greeting%3dH%2569%2c%26addressee%3d%28World%29%202x%2c%20https%3a%2f%2fgolang.org%2ffavicon.ico%20500.5w`,
+				`,foo/,`,
+			},
+		},
+		{
+			`<style>body { background: url('?img={{.}}') }</style>`,
+			[]string{
+				`%3cb%3e%20%22foo%25%22%20O%27Reilly%20%26bar%3b`,
+				`a%5bhref%20%3d~%20%22%2f%2fexample.com%22%5d%23foo`,
+				`Hello%2c%20%3cb%3eWorld%3c%2fb%3e%20%26amp%3btc%21`,
+				`%20dir%3d%22ltr%22`,
+				`c%20%26%26%20alert%28%22Hello%2c%20World%21%22%29%3b`,
+				`Hello%2c%20World%20%26%20O%27Reilly%5cu0021`,
+				// Quotes and parens are escaped but %69 is not over-escaped. HTML escaping is not done.
+				`greeting=H%69,&addressee=%28World%29`,
+				`greeting%3dH%2569%2c%26addressee%3d%28World%29%202x%2c%20https%3a%2f%2fgolang.org%2ffavicon.ico%20500.5w`,
+				`,foo/,`,
+			},
+		},
+		{
+			`<img srcset="{{.}}">`,
+			[]string{
+				`#ZgotmplZ`,
+				`#ZgotmplZ`,
+				// Commas are not esacped
+				`Hello,#ZgotmplZ`,
+				// Leading spaces are not percent escapes.
+				` dir=%22ltr%22`,
+				// Spaces after commas are not percent escaped.
+				`#ZgotmplZ, World!%22%29;`,
+				`Hello,#ZgotmplZ`,
+				`greeting=H%69%2c&amp;addressee=%28World%29`,
+				// Metadata is not escaped.
+				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
+				`%2cfoo/%2c`,
+			},
+		},
+		{
+			`<img srcset={{.}}>`,
+			[]string{
+				`#ZgotmplZ`,
+				`#ZgotmplZ`,
+				`Hello,#ZgotmplZ`,
+				// Spaces are HTML escaped not %-escaped
+				`&#32;dir&#61;%22ltr%22`,
+				`#ZgotmplZ,&#32;World!%22%29;`,
+				`Hello,#ZgotmplZ`,
+				`greeting&#61;H%69%2c&amp;addressee&#61;%28World%29`,
+				`greeting&#61;H%69,&amp;addressee&#61;(World)&#32;2x,&#32;https://golang.org/favicon.ico&#32;500.5w`,
+				// Commas are escaped.
+				`%2cfoo/%2c`,
+			},
+		},
+		{
+			`<img srcset="{{.}} 2x, https://golang.org/ 500.5w">`,
+			[]string{
+				`#ZgotmplZ`,
+				`#ZgotmplZ`,
+				`Hello,#ZgotmplZ`,
+				` dir=%22ltr%22`,
+				`#ZgotmplZ, World!%22%29;`,
+				`Hello,#ZgotmplZ`,
+				`greeting=H%69%2c&amp;addressee=%28World%29`,
+				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
+				`%2cfoo/%2c`,
+			},
+		},
+		{
+			`<img srcset="http://godoc.org/ {{.}}, https://golang.org/ 500.5w">`,
+			[]string{
+				`#ZgotmplZ`,
+				`#ZgotmplZ`,
+				`Hello,#ZgotmplZ`,
+				` dir=%22ltr%22`,
+				`#ZgotmplZ, World!%22%29;`,
+				`Hello,#ZgotmplZ`,
+				`greeting=H%69%2c&amp;addressee=%28World%29`,
+				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
+				`%2cfoo/%2c`,
+			},
+		},
+		{
+			`<img srcset="http://godoc.org/?q={{.}} 2x, https://golang.org/ 500.5w">`,
+			[]string{
+				`#ZgotmplZ`,
+				`#ZgotmplZ`,
+				`Hello,#ZgotmplZ`,
+				` dir=%22ltr%22`,
+				`#ZgotmplZ, World!%22%29;`,
+				`Hello,#ZgotmplZ`,
+				`greeting=H%69%2c&amp;addressee=%28World%29`,
+				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
+				`%2cfoo/%2c`,
+			},
+		},
+		{
+			`<img srcset="http://godoc.org/ 2x, {{.}} 500.5w">`,
+			[]string{
+				`#ZgotmplZ`,
+				`#ZgotmplZ`,
+				`Hello,#ZgotmplZ`,
+				` dir=%22ltr%22`,
+				`#ZgotmplZ, World!%22%29;`,
+				`Hello,#ZgotmplZ`,
+				`greeting=H%69%2c&amp;addressee=%28World%29`,
+				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
+				`%2cfoo/%2c`,
+			},
+		},
+		{
+			`<img srcset="http://godoc.org/ 2x, https://golang.org/ {{.}}">`,
+			[]string{
+				`#ZgotmplZ`,
+				`#ZgotmplZ`,
+				`Hello,#ZgotmplZ`,
+				` dir=%22ltr%22`,
+				`#ZgotmplZ, World!%22%29;`,
+				`Hello,#ZgotmplZ`,
+				`greeting=H%69%2c&amp;addressee=%28World%29`,
+				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
+				`%2cfoo/%2c`,
+			},
+		},
+	}
+
+	for _, test := range tests {
+		tmpl := Must(New("x").Parse(test.input))
+		pre := strings.Index(test.input, "{{.}}")
+		post := len(test.input) - (pre + 5)
+		var b bytes.Buffer
+		for i, x := range data {
+			b.Reset()
+			if err := tmpl.Execute(&b, x); err != nil {
+				t.Errorf("%q with %v: %s", test.input, x, err)
+				continue
+			}
+			if want, got := test.want[i], b.String()[pre:b.Len()-post]; want != got {
+				t.Errorf("%q with %v:\nwant\n\t%q,\ngot\n\t%q\n", test.input, x, want, got)
+				continue
+			}
+		}
+	}
+}
+
+// Test that we print using the String method. Was issue 3073.
+type myStringer struct {
+	v int
+}
+
+func (s *myStringer) String() string {
+	return fmt.Sprintf("string=%d", s.v)
+}
+
+type errorer struct {
+	v int
+}
+
+func (s *errorer) Error() string {
+	return fmt.Sprintf("error=%d", s.v)
+}
+
+func TestStringer(t *testing.T) {
+	s := &myStringer{3}
+	b := new(bytes.Buffer)
+	tmpl := Must(New("x").Parse("{{.}}"))
+	if err := tmpl.Execute(b, s); err != nil {
+		t.Fatal(err)
+	}
+	var expect = "string=3"
+	if b.String() != expect {
+		t.Errorf("expected %q got %q", expect, b.String())
+	}
+	e := &errorer{7}
+	b.Reset()
+	if err := tmpl.Execute(b, e); err != nil {
+		t.Fatal(err)
+	}
+	expect = "error=7"
+	if b.String() != expect {
+		t.Errorf("expected %q got %q", expect, b.String())
+	}
+}
+
+// https://golang.org/issue/5982
+func TestEscapingNilNonemptyInterfaces(t *testing.T) {
+	tmpl := Must(New("x").Parse("{{.E}}"))
+
+	got := new(bytes.Buffer)
+	testData := struct{ E error }{} // any non-empty interface here will do; error is just ready at hand
+	tmpl.Execute(got, testData)
+
+	// A non-empty interface should print like an empty interface.
+	want := new(bytes.Buffer)
+	data := struct{ E interface{} }{}
+	tmpl.Execute(want, data)
+
+	if !bytes.Equal(want.Bytes(), got.Bytes()) {
+		t.Errorf("expected %q got %q", string(want.Bytes()), string(got.Bytes()))
+	}
+}
diff --git a/internal/backport/html/template/context.go b/internal/backport/html/template/context.go
new file mode 100644
index 0000000..1aacec4
--- /dev/null
+++ b/internal/backport/html/template/context.go
@@ -0,0 +1,265 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"fmt"
+
+	"golang.org/x/website/internal/backport/text/template/parse"
+)
+
+// context describes the state an HTML parser must be in when it reaches the
+// portion of HTML produced by evaluating a particular template node.
+//
+// The zero value of type context is the start context for a template that
+// produces an HTML fragment as defined at
+// https://www.w3.org/TR/html5/syntax.html#the-end
+// where the context element is null.
+type context struct {
+	state   state
+	delim   delim
+	urlPart urlPart
+	jsCtx   jsCtx
+	attr    attr
+	element element
+	n       parse.Node // for range break/continue
+	err     *Error
+}
+
+func (c context) String() string {
+	var err error
+	if c.err != nil {
+		err = c.err
+	}
+	return fmt.Sprintf("{%v %v %v %v %v %v %v}", c.state, c.delim, c.urlPart, c.jsCtx, c.attr, c.element, err)
+}
+
+// eq reports whether two contexts are equal.
+func (c context) eq(d context) bool {
+	return c.state == d.state &&
+		c.delim == d.delim &&
+		c.urlPart == d.urlPart &&
+		c.jsCtx == d.jsCtx &&
+		c.attr == d.attr &&
+		c.element == d.element &&
+		c.err == d.err
+}
+
+// mangle produces an identifier that includes a suffix that distinguishes it
+// from template names mangled with different contexts.
+func (c context) mangle(templateName string) string {
+	// The mangled name for the default context is the input templateName.
+	if c.state == stateText {
+		return templateName
+	}
+	s := templateName + "$htmltemplate_" + c.state.String()
+	if c.delim != delimNone {
+		s += "_" + c.delim.String()
+	}
+	if c.urlPart != urlPartNone {
+		s += "_" + c.urlPart.String()
+	}
+	if c.jsCtx != jsCtxRegexp {
+		s += "_" + c.jsCtx.String()
+	}
+	if c.attr != attrNone {
+		s += "_" + c.attr.String()
+	}
+	if c.element != elementNone {
+		s += "_" + c.element.String()
+	}
+	return s
+}
+
+// state describes a high-level HTML parser state.
+//
+// It bounds the top of the element stack, and by extension the HTML insertion
+// mode, but also contains state that does not correspond to anything in the
+// HTML5 parsing algorithm because a single token production in the HTML
+// grammar may contain embedded actions in a template. For instance, the quoted
+// HTML attribute produced by
+//     <div title="Hello {{.World}}">
+// is a single token in HTML's grammar but in a template spans several nodes.
+type state uint8
+
+//go:generate stringer -type state
+
+const (
+	// stateText is parsed character data. An HTML parser is in
+	// this state when its parse position is outside an HTML tag,
+	// directive, comment, and special element body.
+	stateText state = iota
+	// stateTag occurs before an HTML attribute or the end of a tag.
+	stateTag
+	// stateAttrName occurs inside an attribute name.
+	// It occurs between the ^'s in ` ^name^ = value`.
+	stateAttrName
+	// stateAfterName occurs after an attr name has ended but before any
+	// equals sign. It occurs between the ^'s in ` name^ ^= value`.
+	stateAfterName
+	// stateBeforeValue occurs after the equals sign but before the value.
+	// It occurs between the ^'s in ` name =^ ^value`.
+	stateBeforeValue
+	// stateHTMLCmt occurs inside an <!-- HTML comment -->.
+	stateHTMLCmt
+	// stateRCDATA occurs inside an RCDATA element (<textarea> or <title>)
+	// as described at https://www.w3.org/TR/html5/syntax.html#elements-0
+	stateRCDATA
+	// stateAttr occurs inside an HTML attribute whose content is text.
+	stateAttr
+	// stateURL occurs inside an HTML attribute whose content is a URL.
+	stateURL
+	// stateSrcset occurs inside an HTML srcset attribute.
+	stateSrcset
+	// stateJS occurs inside an event handler or script element.
+	stateJS
+	// stateJSDqStr occurs inside a JavaScript double quoted string.
+	stateJSDqStr
+	// stateJSSqStr occurs inside a JavaScript single quoted string.
+	stateJSSqStr
+	// stateJSRegexp occurs inside a JavaScript regexp literal.
+	stateJSRegexp
+	// stateJSBlockCmt occurs inside a JavaScript /* block comment */.
+	stateJSBlockCmt
+	// stateJSLineCmt occurs inside a JavaScript // line comment.
+	stateJSLineCmt
+	// stateCSS occurs inside a <style> element or style attribute.
+	stateCSS
+	// stateCSSDqStr occurs inside a CSS double quoted string.
+	stateCSSDqStr
+	// stateCSSSqStr occurs inside a CSS single quoted string.
+	stateCSSSqStr
+	// stateCSSDqURL occurs inside a CSS double quoted url("...").
+	stateCSSDqURL
+	// stateCSSSqURL occurs inside a CSS single quoted url('...').
+	stateCSSSqURL
+	// stateCSSURL occurs inside a CSS unquoted url(...).
+	stateCSSURL
+	// stateCSSBlockCmt occurs inside a CSS /* block comment */.
+	stateCSSBlockCmt
+	// stateCSSLineCmt occurs inside a CSS // line comment.
+	stateCSSLineCmt
+	// stateError is an infectious error state outside any valid
+	// HTML/CSS/JS construct.
+	stateError
+	// stateDead marks unreachable code after a {{break}} or {{continue}}.
+	stateDead
+)
+
+// isComment is true for any state that contains content meant for template
+// authors & maintainers, not for end-users or machines.
+func isComment(s state) bool {
+	switch s {
+	case stateHTMLCmt, stateJSBlockCmt, stateJSLineCmt, stateCSSBlockCmt, stateCSSLineCmt:
+		return true
+	}
+	return false
+}
+
+// isInTag return whether s occurs solely inside an HTML tag.
+func isInTag(s state) bool {
+	switch s {
+	case stateTag, stateAttrName, stateAfterName, stateBeforeValue, stateAttr:
+		return true
+	}
+	return false
+}
+
+// delim is the delimiter that will end the current HTML attribute.
+type delim uint8
+
+//go:generate stringer -type delim
+
+const (
+	// delimNone occurs outside any attribute.
+	delimNone delim = iota
+	// delimDoubleQuote occurs when a double quote (") closes the attribute.
+	delimDoubleQuote
+	// delimSingleQuote occurs when a single quote (') closes the attribute.
+	delimSingleQuote
+	// delimSpaceOrTagEnd occurs when a space or right angle bracket (>)
+	// closes the attribute.
+	delimSpaceOrTagEnd
+)
+
+// urlPart identifies a part in an RFC 3986 hierarchical URL to allow different
+// encoding strategies.
+type urlPart uint8
+
+//go:generate stringer -type urlPart
+
+const (
+	// urlPartNone occurs when not in a URL, or possibly at the start:
+	// ^ in "^http://auth/path?k=v#frag".
+	urlPartNone urlPart = iota
+	// urlPartPreQuery occurs in the scheme, authority, or path; between the
+	// ^s in "h^ttp://auth/path^?k=v#frag".
+	urlPartPreQuery
+	// urlPartQueryOrFrag occurs in the query portion between the ^s in
+	// "http://auth/path?^k=v#frag^".
+	urlPartQueryOrFrag
+	// urlPartUnknown occurs due to joining of contexts both before and
+	// after the query separator.
+	urlPartUnknown
+)
+
+// jsCtx determines whether a '/' starts a regular expression literal or a
+// division operator.
+type jsCtx uint8
+
+//go:generate stringer -type jsCtx
+
+const (
+	// jsCtxRegexp occurs where a '/' would start a regexp literal.
+	jsCtxRegexp jsCtx = iota
+	// jsCtxDivOp occurs where a '/' would start a division operator.
+	jsCtxDivOp
+	// jsCtxUnknown occurs where a '/' is ambiguous due to context joining.
+	jsCtxUnknown
+)
+
+// element identifies the HTML element when inside a start tag or special body.
+// Certain HTML element (for example <script> and <style>) have bodies that are
+// treated differently from stateText so the element type is necessary to
+// transition into the correct context at the end of a tag and to identify the
+// end delimiter for the body.
+type element uint8
+
+//go:generate stringer -type element
+
+const (
+	// elementNone occurs outside a special tag or special element body.
+	elementNone element = iota
+	// elementScript corresponds to the raw text <script> element
+	// with JS MIME type or no type attribute.
+	elementScript
+	// elementStyle corresponds to the raw text <style> element.
+	elementStyle
+	// elementTextarea corresponds to the RCDATA <textarea> element.
+	elementTextarea
+	// elementTitle corresponds to the RCDATA <title> element.
+	elementTitle
+)
+
+//go:generate stringer -type attr
+
+// attr identifies the current HTML attribute when inside the attribute,
+// that is, starting from stateAttrName until stateTag/stateText (exclusive).
+type attr uint8
+
+const (
+	// attrNone corresponds to a normal attribute or no attribute.
+	attrNone attr = iota
+	// attrScript corresponds to an event handler attribute.
+	attrScript
+	// attrScriptType corresponds to the type attribute in script HTML element
+	attrScriptType
+	// attrStyle corresponds to the style attribute whose value is CSS.
+	attrStyle
+	// attrURL corresponds to an attribute whose value is a URL.
+	attrURL
+	// attrSrcset corresponds to a srcset attribute.
+	attrSrcset
+)
diff --git a/internal/backport/html/template/css.go b/internal/backport/html/template/css.go
new file mode 100644
index 0000000..eb92fc9
--- /dev/null
+++ b/internal/backport/html/template/css.go
@@ -0,0 +1,260 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"bytes"
+	"fmt"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+// endsWithCSSKeyword reports whether b ends with an ident that
+// case-insensitively matches the lower-case kw.
+func endsWithCSSKeyword(b []byte, kw string) bool {
+	i := len(b) - len(kw)
+	if i < 0 {
+		// Too short.
+		return false
+	}
+	if i != 0 {
+		r, _ := utf8.DecodeLastRune(b[:i])
+		if isCSSNmchar(r) {
+			// Too long.
+			return false
+		}
+	}
+	// Many CSS keywords, such as "!important" can have characters encoded,
+	// but the URI production does not allow that according to
+	// https://www.w3.org/TR/css3-syntax/#TOK-URI
+	// This does not attempt to recognize encoded keywords. For example,
+	// given "\75\72\6c" and "url" this return false.
+	return string(bytes.ToLower(b[i:])) == kw
+}
+
+// isCSSNmchar reports whether rune is allowed anywhere in a CSS identifier.
+func isCSSNmchar(r rune) bool {
+	// Based on the CSS3 nmchar production but ignores multi-rune escape
+	// sequences.
+	// https://www.w3.org/TR/css3-syntax/#SUBTOK-nmchar
+	return 'a' <= r && r <= 'z' ||
+		'A' <= r && r <= 'Z' ||
+		'0' <= r && r <= '9' ||
+		r == '-' ||
+		r == '_' ||
+		// Non-ASCII cases below.
+		0x80 <= r && r <= 0xd7ff ||
+		0xe000 <= r && r <= 0xfffd ||
+		0x10000 <= r && r <= 0x10ffff
+}
+
+// decodeCSS decodes CSS3 escapes given a sequence of stringchars.
+// If there is no change, it returns the input, otherwise it returns a slice
+// backed by a new array.
+// https://www.w3.org/TR/css3-syntax/#SUBTOK-stringchar defines stringchar.
+func decodeCSS(s []byte) []byte {
+	i := bytes.IndexByte(s, '\\')
+	if i == -1 {
+		return s
+	}
+	// The UTF-8 sequence for a codepoint is never longer than 1 + the
+	// number hex digits need to represent that codepoint, so len(s) is an
+	// upper bound on the output length.
+	b := make([]byte, 0, len(s))
+	for len(s) != 0 {
+		i := bytes.IndexByte(s, '\\')
+		if i == -1 {
+			i = len(s)
+		}
+		b, s = append(b, s[:i]...), s[i:]
+		if len(s) < 2 {
+			break
+		}
+		// https://www.w3.org/TR/css3-syntax/#SUBTOK-escape
+		// escape ::= unicode | '\' [#x20-#x7E#x80-#xD7FF#xE000-#xFFFD#x10000-#x10FFFF]
+		if isHex(s[1]) {
+			// https://www.w3.org/TR/css3-syntax/#SUBTOK-unicode
+			//   unicode ::= '\' [0-9a-fA-F]{1,6} wc?
+			j := 2
+			for j < len(s) && j < 7 && isHex(s[j]) {
+				j++
+			}
+			r := hexDecode(s[1:j])
+			if r > unicode.MaxRune {
+				r, j = r/16, j-1
+			}
+			n := utf8.EncodeRune(b[len(b):cap(b)], r)
+			// The optional space at the end allows a hex
+			// sequence to be followed by a literal hex.
+			// string(decodeCSS([]byte(`\A B`))) == "\nB"
+			b, s = b[:len(b)+n], skipCSSSpace(s[j:])
+		} else {
+			// `\\` decodes to `\` and `\"` to `"`.
+			_, n := utf8.DecodeRune(s[1:])
+			b, s = append(b, s[1:1+n]...), s[1+n:]
+		}
+	}
+	return b
+}
+
+// isHex reports whether the given character is a hex digit.
+func isHex(c byte) bool {
+	return '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F'
+}
+
+// hexDecode decodes a short hex digit sequence: "10" -> 16.
+func hexDecode(s []byte) rune {
+	n := '\x00'
+	for _, c := range s {
+		n <<= 4
+		switch {
+		case '0' <= c && c <= '9':
+			n |= rune(c - '0')
+		case 'a' <= c && c <= 'f':
+			n |= rune(c-'a') + 10
+		case 'A' <= c && c <= 'F':
+			n |= rune(c-'A') + 10
+		default:
+			panic(fmt.Sprintf("Bad hex digit in %q", s))
+		}
+	}
+	return n
+}
+
+// skipCSSSpace returns a suffix of c, skipping over a single space.
+func skipCSSSpace(c []byte) []byte {
+	if len(c) == 0 {
+		return c
+	}
+	// wc ::= #x9 | #xA | #xC | #xD | #x20
+	switch c[0] {
+	case '\t', '\n', '\f', ' ':
+		return c[1:]
+	case '\r':
+		// This differs from CSS3's wc production because it contains a
+		// probable spec error whereby wc contains all the single byte
+		// sequences in nl (newline) but not CRLF.
+		if len(c) >= 2 && c[1] == '\n' {
+			return c[2:]
+		}
+		return c[1:]
+	}
+	return c
+}
+
+// isCSSSpace reports whether b is a CSS space char as defined in wc.
+func isCSSSpace(b byte) bool {
+	switch b {
+	case '\t', '\n', '\f', '\r', ' ':
+		return true
+	}
+	return false
+}
+
+// cssEscaper escapes HTML and CSS special characters using \<hex>+ escapes.
+func cssEscaper(args ...interface{}) string {
+	s, _ := stringify(args...)
+	var b strings.Builder
+	r, w, written := rune(0), 0, 0
+	for i := 0; i < len(s); i += w {
+		// See comment in htmlEscaper.
+		r, w = utf8.DecodeRuneInString(s[i:])
+		var repl string
+		switch {
+		case int(r) < len(cssReplacementTable) && cssReplacementTable[r] != "":
+			repl = cssReplacementTable[r]
+		default:
+			continue
+		}
+		if written == 0 {
+			b.Grow(len(s))
+		}
+		b.WriteString(s[written:i])
+		b.WriteString(repl)
+		written = i + w
+		if repl != `\\` && (written == len(s) || isHex(s[written]) || isCSSSpace(s[written])) {
+			b.WriteByte(' ')
+		}
+	}
+	if written == 0 {
+		return s
+	}
+	b.WriteString(s[written:])
+	return b.String()
+}
+
+var cssReplacementTable = []string{
+	0:    `\0`,
+	'\t': `\9`,
+	'\n': `\a`,
+	'\f': `\c`,
+	'\r': `\d`,
+	// Encode HTML specials as hex so the output can be embedded
+	// in HTML attributes without further encoding.
+	'"':  `\22`,
+	'&':  `\26`,
+	'\'': `\27`,
+	'(':  `\28`,
+	')':  `\29`,
+	'+':  `\2b`,
+	'/':  `\2f`,
+	':':  `\3a`,
+	';':  `\3b`,
+	'<':  `\3c`,
+	'>':  `\3e`,
+	'\\': `\\`,
+	'{':  `\7b`,
+	'}':  `\7d`,
+}
+
+var expressionBytes = []byte("expression")
+var mozBindingBytes = []byte("mozbinding")
+
+// cssValueFilter allows innocuous CSS values in the output including CSS
+// quantities (10px or 25%), ID or class literals (#foo, .bar), keyword values
+// (inherit, blue), and colors (#888).
+// It filters out unsafe values, such as those that affect token boundaries,
+// and anything that might execute scripts.
+func cssValueFilter(args ...interface{}) string {
+	s, t := stringify(args...)
+	if t == contentTypeCSS {
+		return s
+	}
+	b, id := decodeCSS([]byte(s)), make([]byte, 0, 64)
+
+	// CSS3 error handling is specified as honoring string boundaries per
+	// https://www.w3.org/TR/css3-syntax/#error-handling :
+	//     Malformed declarations. User agents must handle unexpected
+	//     tokens encountered while parsing a declaration by reading until
+	//     the end of the declaration, while observing the rules for
+	//     matching pairs of (), [], {}, "", and '', and correctly handling
+	//     escapes. For example, a malformed declaration may be missing a
+	//     property, colon (:) or value.
+	// So we need to make sure that values do not have mismatched bracket
+	// or quote characters to prevent the browser from restarting parsing
+	// inside a string that might embed JavaScript source.
+	for i, c := range b {
+		switch c {
+		case 0, '"', '\'', '(', ')', '/', ';', '@', '[', '\\', ']', '`', '{', '}':
+			return filterFailsafe
+		case '-':
+			// Disallow <!-- or -->.
+			// -- should not appear in valid identifiers.
+			if i != 0 && b[i-1] == '-' {
+				return filterFailsafe
+			}
+		default:
+			if c < utf8.RuneSelf && isCSSNmchar(rune(c)) {
+				id = append(id, c)
+			}
+		}
+	}
+	id = bytes.ToLower(id)
+	if bytes.Contains(id, expressionBytes) || bytes.Contains(id, mozBindingBytes) {
+		return filterFailsafe
+	}
+	return string(b)
+}
diff --git a/internal/backport/html/template/css_test.go b/internal/backport/html/template/css_test.go
new file mode 100644
index 0000000..a735638
--- /dev/null
+++ b/internal/backport/html/template/css_test.go
@@ -0,0 +1,281 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"strconv"
+	"strings"
+	"testing"
+)
+
+func TestEndsWithCSSKeyword(t *testing.T) {
+	tests := []struct {
+		css, kw string
+		want    bool
+	}{
+		{"", "url", false},
+		{"url", "url", true},
+		{"URL", "url", true},
+		{"Url", "url", true},
+		{"url", "important", false},
+		{"important", "important", true},
+		{"image-url", "url", false},
+		{"imageurl", "url", false},
+		{"image url", "url", true},
+	}
+	for _, test := range tests {
+		got := endsWithCSSKeyword([]byte(test.css), test.kw)
+		if got != test.want {
+			t.Errorf("want %t but got %t for css=%v, kw=%v", test.want, got, test.css, test.kw)
+		}
+	}
+}
+
+func TestIsCSSNmchar(t *testing.T) {
+	tests := []struct {
+		rune rune
+		want bool
+	}{
+		{0, false},
+		{'0', true},
+		{'9', true},
+		{'A', true},
+		{'Z', true},
+		{'a', true},
+		{'z', true},
+		{'_', true},
+		{'-', true},
+		{':', false},
+		{';', false},
+		{' ', false},
+		{0x7f, false},
+		{0x80, true},
+		{0x1234, true},
+		{0xd800, false},
+		{0xdc00, false},
+		{0xfffe, false},
+		{0x10000, true},
+		{0x110000, false},
+	}
+	for _, test := range tests {
+		got := isCSSNmchar(test.rune)
+		if got != test.want {
+			t.Errorf("%q: want %t but got %t", string(test.rune), test.want, got)
+		}
+	}
+}
+
+func TestDecodeCSS(t *testing.T) {
+	tests := []struct {
+		css, want string
+	}{
+		{``, ``},
+		{`foo`, `foo`},
+		{`foo\`, `foo`},
+		{`foo\\`, `foo\`},
+		{`\`, ``},
+		{`\A`, "\n"},
+		{`\a`, "\n"},
+		{`\0a`, "\n"},
+		{`\00000a`, "\n"},
+		{`\000000a`, "\u0000a"},
+		{`\1234 5`, "\u1234" + "5"},
+		{`\1234\20 5`, "\u1234" + " 5"},
+		{`\1234\A 5`, "\u1234" + "\n5"},
+		{"\\1234\t5", "\u1234" + "5"},
+		{"\\1234\n5", "\u1234" + "5"},
+		{"\\1234\r\n5", "\u1234" + "5"},
+		{`\12345`, "\U00012345"},
+		{`\\`, `\`},
+		{`\\ `, `\ `},
+		{`\"`, `"`},
+		{`\'`, `'`},
+		{`\.`, `.`},
+		{`\. .`, `. .`},
+		{
+			`The \3c i\3equick\3c/i\3e,\d\A\3cspan style=\27 color:brown\27\3e brown\3c/span\3e  fox jumps\2028over the \3c canine class=\22lazy\22 \3e dog\3c/canine\3e`,
+			"The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>",
+		},
+	}
+	for _, test := range tests {
+		got1 := string(decodeCSS([]byte(test.css)))
+		if got1 != test.want {
+			t.Errorf("%q: want\n\t%q\nbut got\n\t%q", test.css, test.want, got1)
+		}
+		recoded := cssEscaper(got1)
+		if got2 := string(decodeCSS([]byte(recoded))); got2 != test.want {
+			t.Errorf("%q: escape & decode not dual for %q", test.css, recoded)
+		}
+	}
+}
+
+func TestHexDecode(t *testing.T) {
+	for i := 0; i < 0x200000; i += 101 /* coprime with 16 */ {
+		s := strconv.FormatInt(int64(i), 16)
+		if got := int(hexDecode([]byte(s))); got != i {
+			t.Errorf("%s: want %d but got %d", s, i, got)
+		}
+		s = strings.ToUpper(s)
+		if got := int(hexDecode([]byte(s))); got != i {
+			t.Errorf("%s: want %d but got %d", s, i, got)
+		}
+	}
+}
+
+func TestSkipCSSSpace(t *testing.T) {
+	tests := []struct {
+		css, want string
+	}{
+		{"", ""},
+		{"foo", "foo"},
+		{"\n", ""},
+		{"\r\n", ""},
+		{"\r", ""},
+		{"\t", ""},
+		{" ", ""},
+		{"\f", ""},
+		{" foo", "foo"},
+		{"  foo", " foo"},
+		{`\20`, `\20`},
+	}
+	for _, test := range tests {
+		got := string(skipCSSSpace([]byte(test.css)))
+		if got != test.want {
+			t.Errorf("%q: want %q but got %q", test.css, test.want, got)
+		}
+	}
+}
+
+func TestCSSEscaper(t *testing.T) {
+	input := ("\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f" +
+		"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
+		` !"#$%&'()*+,-./` +
+		`0123456789:;<=>?` +
+		`@ABCDEFGHIJKLMNO` +
+		`PQRSTUVWXYZ[\]^_` +
+		"`abcdefghijklmno" +
+		"pqrstuvwxyz{|}~\x7f" +
+		"\u00A0\u0100\u2028\u2029\ufeff\U0001D11E")
+
+	want := ("\\0\x01\x02\x03\x04\x05\x06\x07" +
+		"\x08\\9 \\a\x0b\\c \\d\x0E\x0F" +
+		"\x10\x11\x12\x13\x14\x15\x16\x17" +
+		"\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
+		` !\22#$%\26\27\28\29*\2b,-.\2f ` +
+		`0123456789\3a\3b\3c=\3e?` +
+		`@ABCDEFGHIJKLMNO` +
+		`PQRSTUVWXYZ[\\]^_` +
+		"`abcdefghijklmno" +
+		`pqrstuvwxyz\7b|\7d~` + "\u007f" +
+		"\u00A0\u0100\u2028\u2029\ufeff\U0001D11E")
+
+	got := cssEscaper(input)
+	if got != want {
+		t.Errorf("encode: want\n\t%q\nbut got\n\t%q", want, got)
+	}
+
+	got = string(decodeCSS([]byte(got)))
+	if input != got {
+		t.Errorf("decode: want\n\t%q\nbut got\n\t%q", input, got)
+	}
+}
+
+func TestCSSValueFilter(t *testing.T) {
+	tests := []struct {
+		css, want string
+	}{
+		{"", ""},
+		{"foo", "foo"},
+		{"0", "0"},
+		{"0px", "0px"},
+		{"-5px", "-5px"},
+		{"1.25in", "1.25in"},
+		{"+.33em", "+.33em"},
+		{"100%", "100%"},
+		{"12.5%", "12.5%"},
+		{".foo", ".foo"},
+		{"#bar", "#bar"},
+		{"corner-radius", "corner-radius"},
+		{"-moz-corner-radius", "-moz-corner-radius"},
+		{"#000", "#000"},
+		{"#48f", "#48f"},
+		{"#123456", "#123456"},
+		{"U+00-FF, U+980-9FF", "U+00-FF, U+980-9FF"},
+		{"color: red", "color: red"},
+		{"<!--", "ZgotmplZ"},
+		{"-->", "ZgotmplZ"},
+		{"<![CDATA[", "ZgotmplZ"},
+		{"]]>", "ZgotmplZ"},
+		{"</style", "ZgotmplZ"},
+		{`"`, "ZgotmplZ"},
+		{`'`, "ZgotmplZ"},
+		{"`", "ZgotmplZ"},
+		{"\x00", "ZgotmplZ"},
+		{"/* foo */", "ZgotmplZ"},
+		{"//", "ZgotmplZ"},
+		{"[href=~", "ZgotmplZ"},
+		{"expression(alert(1337))", "ZgotmplZ"},
+		{"-expression(alert(1337))", "ZgotmplZ"},
+		{"expression", "ZgotmplZ"},
+		{"Expression", "ZgotmplZ"},
+		{"EXPRESSION", "ZgotmplZ"},
+		{"-moz-binding", "ZgotmplZ"},
+		{"-expr\x00ession(alert(1337))", "ZgotmplZ"},
+		{`-expr\0ession(alert(1337))`, "ZgotmplZ"},
+		{`-express\69on(alert(1337))`, "ZgotmplZ"},
+		{`-express\69 on(alert(1337))`, "ZgotmplZ"},
+		{`-exp\72 ession(alert(1337))`, "ZgotmplZ"},
+		{`-exp\52 ession(alert(1337))`, "ZgotmplZ"},
+		{`-exp\000052 ession(alert(1337))`, "ZgotmplZ"},
+		{`-expre\0000073sion`, "-expre\x073sion"},
+		{`@import url evil.css`, "ZgotmplZ"},
+	}
+	for _, test := range tests {
+		got := cssValueFilter(test.css)
+		if got != test.want {
+			t.Errorf("%q: want %q but got %q", test.css, test.want, got)
+		}
+	}
+}
+
+func BenchmarkCSSEscaper(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		cssEscaper("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
+	}
+}
+
+func BenchmarkCSSEscaperNoSpecials(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		cssEscaper("The quick, brown fox jumps over the lazy dog.")
+	}
+}
+
+func BenchmarkDecodeCSS(b *testing.B) {
+	s := []byte(`The \3c i\3equick\3c/i\3e,\d\A\3cspan style=\27 color:brown\27\3e brown\3c/span\3e fox jumps\2028over the \3c canine class=\22lazy\22 \3edog\3c/canine\3e`)
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		decodeCSS(s)
+	}
+}
+
+func BenchmarkDecodeCSSNoSpecials(b *testing.B) {
+	s := []byte("The quick, brown fox jumps over the lazy dog.")
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		decodeCSS(s)
+	}
+}
+
+func BenchmarkCSSValueFilter(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		cssValueFilter(`  e\78preS\0Sio/**/n(alert(1337))`)
+	}
+}
+
+func BenchmarkCSSValueFilterOk(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		cssValueFilter(`Times New Roman`)
+	}
+}
diff --git a/internal/backport/html/template/delim_string.go b/internal/backport/html/template/delim_string.go
new file mode 100644
index 0000000..6d80e09
--- /dev/null
+++ b/internal/backport/html/template/delim_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type delim"; DO NOT EDIT.
+
+package template
+
+import "strconv"
+
+const _delim_name = "delimNonedelimDoubleQuotedelimSingleQuotedelimSpaceOrTagEnd"
+
+var _delim_index = [...]uint8{0, 9, 25, 41, 59}
+
+func (i delim) String() string {
+	if i >= delim(len(_delim_index)-1) {
+		return "delim(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _delim_name[_delim_index[i]:_delim_index[i+1]]
+}
diff --git a/internal/backport/html/template/doc.go b/internal/backport/html/template/doc.go
new file mode 100644
index 0000000..ed2a7e2
--- /dev/null
+++ b/internal/backport/html/template/doc.go
@@ -0,0 +1,241 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package template (html/template) implements data-driven templates for
+generating HTML output safe against code injection. It provides the
+same interface as package text/template and should be used instead of
+text/template whenever the output is HTML.
+
+The documentation here focuses on the security features of the package.
+For information about how to program the templates themselves, see the
+documentation for text/template.
+
+Introduction
+
+This package wraps package text/template so you can share its template API
+to parse and execute HTML templates safely.
+
+  tmpl, err := template.New("name").Parse(...)
+  // Error checking elided
+  err = tmpl.Execute(out, data)
+
+If successful, tmpl will now be injection-safe. Otherwise, err is an error
+defined in the docs for ErrorCode.
+
+HTML templates treat data values as plain text which should be encoded so they
+can be safely embedded in an HTML document. The escaping is contextual, so
+actions can appear within JavaScript, CSS, and URI contexts.
+
+The security model used by this package assumes that template authors are
+trusted, while Execute's data parameter is not. More details are
+provided below.
+
+Example
+
+  import "golang.org/x/website/internal/backport/text/template"
+  ...
+  t, err := template.New("foo").Parse(`{{define "T"}}Hello, {{.}}!{{end}}`)
+  err = t.ExecuteTemplate(out, "T", "<script>alert('you have been pwned')</script>")
+
+produces
+
+  Hello, <script>alert('you have been pwned')</script>!
+
+but the contextual autoescaping in html/template
+
+  import "html/template"
+  ...
+  t, err := template.New("foo").Parse(`{{define "T"}}Hello, {{.}}!{{end}}`)
+  err = t.ExecuteTemplate(out, "T", "<script>alert('you have been pwned')</script>")
+
+produces safe, escaped HTML output
+
+  Hello, &lt;script&gt;alert(&#39;you have been pwned&#39;)&lt;/script&gt;!
+
+
+Contexts
+
+This package understands HTML, CSS, JavaScript, and URIs. It adds sanitizing
+functions to each simple action pipeline, so given the excerpt
+
+  <a href="/search?q={{.}}">{{.}}</a>
+
+At parse time each {{.}} is overwritten to add escaping functions as necessary.
+In this case it becomes
+
+  <a href="/search?q={{. | urlescaper | attrescaper}}">{{. | htmlescaper}}</a>
+
+where urlescaper, attrescaper, and htmlescaper are aliases for internal escaping
+functions.
+
+For these internal escaping functions, if an action pipeline evaluates to
+a nil interface value, it is treated as though it were an empty string.
+
+Namespaced and data- attributes
+
+Attributes with a namespace are treated as if they had no namespace.
+Given the excerpt
+
+  <a my:href="{{.}}"></a>
+
+At parse time the attribute will be treated as if it were just "href".
+So at parse time the template becomes:
+
+  <a my:href="{{. | urlescaper | attrescaper}}"></a>
+
+Similarly to attributes with namespaces, attributes with a "data-" prefix are
+treated as if they had no "data-" prefix. So given
+
+  <a data-href="{{.}}"></a>
+
+At parse time this becomes
+
+  <a data-href="{{. | urlescaper | attrescaper}}"></a>
+
+If an attribute has both a namespace and a "data-" prefix, only the namespace
+will be removed when determining the context. For example
+
+  <a my:data-href="{{.}}"></a>
+
+This is handled as if "my:data-href" was just "data-href" and not "href" as
+it would be if the "data-" prefix were to be ignored too. Thus at parse
+time this becomes just
+
+  <a my:data-href="{{. | attrescaper}}"></a>
+
+As a special case, attributes with the namespace "xmlns" are always treated
+as containing URLs. Given the excerpts
+
+  <a xmlns:title="{{.}}"></a>
+  <a xmlns:href="{{.}}"></a>
+  <a xmlns:onclick="{{.}}"></a>
+
+At parse time they become:
+
+  <a xmlns:title="{{. | urlescaper | attrescaper}}"></a>
+  <a xmlns:href="{{. | urlescaper | attrescaper}}"></a>
+  <a xmlns:onclick="{{. | urlescaper | attrescaper}}"></a>
+
+Errors
+
+See the documentation of ErrorCode for details.
+
+
+A fuller picture
+
+The rest of this package comment may be skipped on first reading; it includes
+details necessary to understand escaping contexts and error messages. Most users
+will not need to understand these details.
+
+
+Contexts
+
+Assuming {{.}} is `O'Reilly: How are <i>you</i>?`, the table below shows
+how {{.}} appears when used in the context to the left.
+
+  Context                          {{.}} After
+  {{.}}                            O'Reilly: How are &lt;i&gt;you&lt;/i&gt;?
+  <a title='{{.}}'>                O&#39;Reilly: How are you?
+  <a href="/{{.}}">                O&#39;Reilly: How are %3ci%3eyou%3c/i%3e?
+  <a href="?q={{.}}">              O&#39;Reilly%3a%20How%20are%3ci%3e...%3f
+  <a onx='f("{{.}}")'>             O\x27Reilly: How are \x3ci\x3eyou...?
+  <a onx='f({{.}})'>               "O\x27Reilly: How are \x3ci\x3eyou...?"
+  <a onx='pattern = /{{.}}/;'>     O\x27Reilly: How are \x3ci\x3eyou...\x3f
+
+If used in an unsafe context, then the value might be filtered out:
+
+  Context                          {{.}} After
+  <a href="{{.}}">                 #ZgotmplZ
+
+since "O'Reilly:" is not an allowed protocol like "http:".
+
+
+If {{.}} is the innocuous word, `left`, then it can appear more widely,
+
+  Context                              {{.}} After
+  {{.}}                                left
+  <a title='{{.}}'>                    left
+  <a href='{{.}}'>                     left
+  <a href='/{{.}}'>                    left
+  <a href='?dir={{.}}'>                left
+  <a style="border-{{.}}: 4px">        left
+  <a style="align: {{.}}">             left
+  <a style="background: '{{.}}'>       left
+  <a style="background: url('{{.}}')>  left
+  <style>p.{{.}} {color:red}</style>   left
+
+Non-string values can be used in JavaScript contexts.
+If {{.}} is
+
+  struct{A,B string}{ "foo", "bar" }
+
+in the escaped template
+
+  <script>var pair = {{.}};</script>
+
+then the template output is
+
+  <script>var pair = {"A": "foo", "B": "bar"};</script>
+
+See package json to understand how non-string content is marshaled for
+embedding in JavaScript contexts.
+
+
+Typed Strings
+
+By default, this package assumes that all pipelines produce a plain text string.
+It adds escaping pipeline stages necessary to correctly and safely embed that
+plain text string in the appropriate context.
+
+When a data value is not plain text, you can make sure it is not over-escaped
+by marking it with its type.
+
+Types HTML, JS, URL, and others from content.go can carry safe content that is
+exempted from escaping.
+
+The template
+
+  Hello, {{.}}!
+
+can be invoked with
+
+  tmpl.Execute(out, template.HTML(`<b>World</b>`))
+
+to produce
+
+  Hello, <b>World</b>!
+
+instead of the
+
+  Hello, &lt;b&gt;World&lt;b&gt;!
+
+that would have been produced if {{.}} was a regular string.
+
+
+Security Model
+
+https://rawgit.com/mikesamuel/sanitized-jquery-templates/trunk/safetemplate.html#problem_definition defines "safe" as used by this package.
+
+This package assumes that template authors are trusted, that Execute's data
+parameter is not, and seeks to preserve the properties below in the face
+of untrusted data:
+
+Structure Preservation Property:
+"... when a template author writes an HTML tag in a safe templating language,
+the browser will interpret the corresponding portion of the output as a tag
+regardless of the values of untrusted data, and similarly for other structures
+such as attribute boundaries and JS and CSS string boundaries."
+
+Code Effect Property:
+"... only code specified by the template author should run as a result of
+injecting the template output into a page and all code specified by the
+template author should run as a result of the same."
+
+Least Surprise Property:
+"A developer (or code reviewer) familiar with HTML, CSS, and JavaScript, who
+knows that contextual autoescaping happens should be able to look at a {{.}}
+and correctly infer what sanitization happens."
+*/
+package template
diff --git a/internal/backport/html/template/element_string.go b/internal/backport/html/template/element_string.go
new file mode 100644
index 0000000..4573e08
--- /dev/null
+++ b/internal/backport/html/template/element_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type element"; DO NOT EDIT.
+
+package template
+
+import "strconv"
+
+const _element_name = "elementNoneelementScriptelementStyleelementTextareaelementTitle"
+
+var _element_index = [...]uint8{0, 11, 24, 36, 51, 63}
+
+func (i element) String() string {
+	if i >= element(len(_element_index)-1) {
+		return "element(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _element_name[_element_index[i]:_element_index[i+1]]
+}
diff --git a/internal/backport/html/template/error.go b/internal/backport/html/template/error.go
new file mode 100644
index 0000000..0eae11f
--- /dev/null
+++ b/internal/backport/html/template/error.go
@@ -0,0 +1,234 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"fmt"
+
+	"golang.org/x/website/internal/backport/text/template/parse"
+)
+
+// Error describes a problem encountered during template Escaping.
+type Error struct {
+	// ErrorCode describes the kind of error.
+	ErrorCode ErrorCode
+	// Node is the node that caused the problem, if known.
+	// If not nil, it overrides Name and Line.
+	Node parse.Node
+	// Name is the name of the template in which the error was encountered.
+	Name string
+	// Line is the line number of the error in the template source or 0.
+	Line int
+	// Description is a human-readable description of the problem.
+	Description string
+}
+
+// ErrorCode is a code for a kind of error.
+type ErrorCode int
+
+// We define codes for each error that manifests while escaping templates, but
+// escaped templates may also fail at runtime.
+//
+// Output: "ZgotmplZ"
+// Example:
+//   <img src="{{.X}}">
+//   where {{.X}} evaluates to `javascript:...`
+// Discussion:
+//   "ZgotmplZ" is a special value that indicates that unsafe content reached a
+//   CSS or URL context at runtime. The output of the example will be
+//     <img src="#ZgotmplZ">
+//   If the data comes from a trusted source, use content types to exempt it
+//   from filtering: URL(`javascript:...`).
+const (
+	// OK indicates the lack of an error.
+	OK ErrorCode = iota
+
+	// ErrAmbigContext: "... appears in an ambiguous context within a URL"
+	// Example:
+	//   <a href="
+	//      {{if .C}}
+	//        /path/
+	//      {{else}}
+	//        /search?q=
+	//      {{end}}
+	//      {{.X}}
+	//   ">
+	// Discussion:
+	//   {{.X}} is in an ambiguous URL context since, depending on {{.C}},
+	//  it may be either a URL suffix or a query parameter.
+	//   Moving {{.X}} into the condition removes the ambiguity:
+	//   <a href="{{if .C}}/path/{{.X}}{{else}}/search?q={{.X}}">
+	ErrAmbigContext
+
+	// ErrBadHTML: "expected space, attr name, or end of tag, but got ...",
+	//   "... in unquoted attr", "... in attribute name"
+	// Example:
+	//   <a href = /search?q=foo>
+	//   <href=foo>
+	//   <form na<e=...>
+	//   <option selected<
+	// Discussion:
+	//   This is often due to a typo in an HTML element, but some runes
+	//   are banned in tag names, attribute names, and unquoted attribute
+	//   values because they can tickle parser ambiguities.
+	//   Quoting all attributes is the best policy.
+	ErrBadHTML
+
+	// ErrBranchEnd: "{{if}} branches end in different contexts"
+	// Example:
+	//   {{if .C}}<a href="{{end}}{{.X}}
+	// Discussion:
+	//   Package html/template statically examines each path through an
+	//   {{if}}, {{range}}, or {{with}} to escape any following pipelines.
+	//   The example is ambiguous since {{.X}} might be an HTML text node,
+	//   or a URL prefix in an HTML attribute. The context of {{.X}} is
+	//   used to figure out how to escape it, but that context depends on
+	//   the run-time value of {{.C}} which is not statically known.
+	//
+	//   The problem is usually something like missing quotes or angle
+	//   brackets, or can be avoided by refactoring to put the two contexts
+	//   into different branches of an if, range or with. If the problem
+	//   is in a {{range}} over a collection that should never be empty,
+	//   adding a dummy {{else}} can help.
+	ErrBranchEnd
+
+	// ErrEndContext: "... ends in a non-text context: ..."
+	// Examples:
+	//   <div
+	//   <div title="no close quote>
+	//   <script>f()
+	// Discussion:
+	//   Executed templates should produce a DocumentFragment of HTML.
+	//   Templates that end without closing tags will trigger this error.
+	//   Templates that should not be used in an HTML context or that
+	//   produce incomplete Fragments should not be executed directly.
+	//
+	//   {{define "main"}} <script>{{template "helper"}}</script> {{end}}
+	//   {{define "helper"}} document.write(' <div title=" ') {{end}}
+	//
+	//   "helper" does not produce a valid document fragment, so should
+	//   not be Executed directly.
+	ErrEndContext
+
+	// ErrNoSuchTemplate: "no such template ..."
+	// Examples:
+	//   {{define "main"}}<div {{template "attrs"}}>{{end}}
+	//   {{define "attrs"}}href="{{.URL}}"{{end}}
+	// Discussion:
+	//   Package html/template looks through template calls to compute the
+	//   context.
+	//   Here the {{.URL}} in "attrs" must be treated as a URL when called
+	//   from "main", but you will get this error if "attrs" is not defined
+	//   when "main" is parsed.
+	ErrNoSuchTemplate
+
+	// ErrOutputContext: "cannot compute output context for template ..."
+	// Examples:
+	//   {{define "t"}}{{if .T}}{{template "t" .T}}{{end}}{{.H}}",{{end}}
+	// Discussion:
+	//   A recursive template does not end in the same context in which it
+	//   starts, and a reliable output context cannot be computed.
+	//   Look for typos in the named template.
+	//   If the template should not be called in the named start context,
+	//   look for calls to that template in unexpected contexts.
+	//   Maybe refactor recursive templates to not be recursive.
+	ErrOutputContext
+
+	// ErrPartialCharset: "unfinished JS regexp charset in ..."
+	// Example:
+	//     <script>var pattern = /foo[{{.Chars}}]/</script>
+	// Discussion:
+	//   Package html/template does not support interpolation into regular
+	//   expression literal character sets.
+	ErrPartialCharset
+
+	// ErrPartialEscape: "unfinished escape sequence in ..."
+	// Example:
+	//   <script>alert("\{{.X}}")</script>
+	// Discussion:
+	//   Package html/template does not support actions following a
+	//   backslash.
+	//   This is usually an error and there are better solutions; for
+	//   example
+	//     <script>alert("{{.X}}")</script>
+	//   should work, and if {{.X}} is a partial escape sequence such as
+	//   "xA0", mark the whole sequence as safe content: JSStr(`\xA0`)
+	ErrPartialEscape
+
+	// ErrRangeLoopReentry: "on range loop re-entry: ..."
+	// Example:
+	//   <script>var x = [{{range .}}'{{.}},{{end}}]</script>
+	// Discussion:
+	//   If an iteration through a range would cause it to end in a
+	//   different context than an earlier pass, there is no single context.
+	//   In the example, there is missing a quote, so it is not clear
+	//   whether {{.}} is meant to be inside a JS string or in a JS value
+	//   context. The second iteration would produce something like
+	//
+	//     <script>var x = ['firstValue,'secondValue]</script>
+	ErrRangeLoopReentry
+
+	// ErrSlashAmbig: '/' could start a division or regexp.
+	// Example:
+	//   <script>
+	//     {{if .C}}var x = 1{{end}}
+	//     /-{{.N}}/i.test(x) ? doThis : doThat();
+	//   </script>
+	// Discussion:
+	//   The example above could produce `var x = 1/-2/i.test(s)...`
+	//   in which the first '/' is a mathematical division operator or it
+	//   could produce `/-2/i.test(s)` in which the first '/' starts a
+	//   regexp literal.
+	//   Look for missing semicolons inside branches, and maybe add
+	//   parentheses to make it clear which interpretation you intend.
+	ErrSlashAmbig
+
+	// ErrPredefinedEscaper: "predefined escaper ... disallowed in template"
+	// Example:
+	//   <div class={{. | html}}>Hello<div>
+	// Discussion:
+	//   Package html/template already contextually escapes all pipelines to
+	//   produce HTML output safe against code injection. Manually escaping
+	//   pipeline output using the predefined escapers "html" or "urlquery" is
+	//   unnecessary, and may affect the correctness or safety of the escaped
+	//   pipeline output in Go 1.8 and earlier.
+	//
+	//   In most cases, such as the given example, this error can be resolved by
+	//   simply removing the predefined escaper from the pipeline and letting the
+	//   contextual autoescaper handle the escaping of the pipeline. In other
+	//   instances, where the predefined escaper occurs in the middle of a
+	//   pipeline where subsequent commands expect escaped input, e.g.
+	//     {{.X | html | makeALink}}
+	//   where makeALink does
+	//     return `<a href="`+input+`">link</a>`
+	//   consider refactoring the surrounding template to make use of the
+	//   contextual autoescaper, i.e.
+	//     <a href="{{.X}}">link</a>
+	//
+	//   To ease migration to Go 1.9 and beyond, "html" and "urlquery" will
+	//   continue to be allowed as the last command in a pipeline. However, if the
+	//   pipeline occurs in an unquoted attribute value context, "html" is
+	//   disallowed. Avoid using "html" and "urlquery" entirely in new templates.
+	ErrPredefinedEscaper
+)
+
+func (e *Error) Error() string {
+	switch {
+	case e.Node != nil:
+		loc, _ := (*parse.Tree)(nil).ErrorContext(e.Node)
+		return fmt.Sprintf("html/template:%s: %s", loc, e.Description)
+	case e.Line != 0:
+		return fmt.Sprintf("html/template:%s:%d: %s", e.Name, e.Line, e.Description)
+	case e.Name != "":
+		return fmt.Sprintf("html/template:%s: %s", e.Name, e.Description)
+	}
+	return "html/template: " + e.Description
+}
+
+// errorf creates an error given a format string f and args.
+// The template Name still needs to be supplied.
+func errorf(k ErrorCode, node parse.Node, line int, f string, args ...interface{}) *Error {
+	return &Error{k, node, "", line, fmt.Sprintf(f, args...)}
+}
diff --git a/internal/backport/html/template/escape.go b/internal/backport/html/template/escape.go
new file mode 100644
index 0000000..dfdfea5
--- /dev/null
+++ b/internal/backport/html/template/escape.go
@@ -0,0 +1,962 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"bytes"
+	"fmt"
+	"html"
+	"io"
+
+	"golang.org/x/website/internal/backport/text/template"
+	"golang.org/x/website/internal/backport/text/template/parse"
+)
+
+// escapeTemplate rewrites the named template, which must be
+// associated with t, to guarantee that the output of any of the named
+// templates is properly escaped. If no error is returned, then the named templates have
+// been modified. Otherwise the named templates have been rendered
+// unusable.
+func escapeTemplate(tmpl *Template, node parse.Node, name string) error {
+	c, _ := tmpl.esc.escapeTree(context{}, node, name, 0)
+	var err error
+	if c.err != nil {
+		err, c.err.Name = c.err, name
+	} else if c.state != stateText {
+		err = &Error{ErrEndContext, nil, name, 0, fmt.Sprintf("ends in a non-text context: %v", c)}
+	}
+	if err != nil {
+		// Prevent execution of unsafe templates.
+		if t := tmpl.set[name]; t != nil {
+			t.escapeErr = err
+			t.text.Tree = nil
+			t.Tree = nil
+		}
+		return err
+	}
+	tmpl.esc.commit()
+	if t := tmpl.set[name]; t != nil {
+		t.escapeErr = escapeOK
+		t.Tree = t.text.Tree
+	}
+	return nil
+}
+
+// evalArgs formats the list of arguments into a string. It is equivalent to
+// fmt.Sprint(args...), except that it deferences all pointers.
+func evalArgs(args ...interface{}) string {
+	// Optimization for simple common case of a single string argument.
+	if len(args) == 1 {
+		if s, ok := args[0].(string); ok {
+			return s
+		}
+	}
+	for i, arg := range args {
+		args[i] = indirectToStringerOrError(arg)
+	}
+	return fmt.Sprint(args...)
+}
+
+// funcMap maps command names to functions that render their inputs safe.
+var funcMap = template.FuncMap{
+	"_html_template_attrescaper":     attrEscaper,
+	"_html_template_commentescaper":  commentEscaper,
+	"_html_template_cssescaper":      cssEscaper,
+	"_html_template_cssvaluefilter":  cssValueFilter,
+	"_html_template_htmlnamefilter":  htmlNameFilter,
+	"_html_template_htmlescaper":     htmlEscaper,
+	"_html_template_jsregexpescaper": jsRegexpEscaper,
+	"_html_template_jsstrescaper":    jsStrEscaper,
+	"_html_template_jsvalescaper":    jsValEscaper,
+	"_html_template_nospaceescaper":  htmlNospaceEscaper,
+	"_html_template_rcdataescaper":   rcdataEscaper,
+	"_html_template_srcsetescaper":   srcsetFilterAndEscaper,
+	"_html_template_urlescaper":      urlEscaper,
+	"_html_template_urlfilter":       urlFilter,
+	"_html_template_urlnormalizer":   urlNormalizer,
+	"_eval_args_":                    evalArgs,
+}
+
+// escaper collects type inferences about templates and changes needed to make
+// templates injection safe.
+type escaper struct {
+	// ns is the nameSpace that this escaper is associated with.
+	ns *nameSpace
+	// output[templateName] is the output context for a templateName that
+	// has been mangled to include its input context.
+	output map[string]context
+	// derived[c.mangle(name)] maps to a template derived from the template
+	// named name templateName for the start context c.
+	derived map[string]*template.Template
+	// called[templateName] is a set of called mangled template names.
+	called map[string]bool
+	// xxxNodeEdits are the accumulated edits to apply during commit.
+	// Such edits are not applied immediately in case a template set
+	// executes a given template in different escaping contexts.
+	actionNodeEdits   map[*parse.ActionNode][]string
+	templateNodeEdits map[*parse.TemplateNode]string
+	textNodeEdits     map[*parse.TextNode][]byte
+	// rangeContext holds context about the current range loop.
+	rangeContext *rangeContext
+}
+
+// rangeContext holds information about the current range loop.
+type rangeContext struct {
+	outer     *rangeContext // outer loop
+	breaks    []context     // context at each break action
+	continues []context     // context at each continue action
+}
+
+// makeEscaper creates a blank escaper for the given set.
+func makeEscaper(n *nameSpace) escaper {
+	return escaper{
+		n,
+		map[string]context{},
+		map[string]*template.Template{},
+		map[string]bool{},
+		map[*parse.ActionNode][]string{},
+		map[*parse.TemplateNode]string{},
+		map[*parse.TextNode][]byte{},
+		nil,
+	}
+}
+
+// filterFailsafe is an innocuous word that is emitted in place of unsafe values
+// by sanitizer functions. It is not a keyword in any programming language,
+// contains no special characters, is not empty, and when it appears in output
+// it is distinct enough that a developer can find the source of the problem
+// via a search engine.
+const filterFailsafe = "ZgotmplZ"
+
+// escape escapes a template node.
+func (e *escaper) escape(c context, n parse.Node) context {
+	switch n := n.(type) {
+	case *parse.ActionNode:
+		return e.escapeAction(c, n)
+	case *parse.BreakNode:
+		c.n = n
+		e.rangeContext.breaks = append(e.rangeContext.breaks, c)
+		return context{state: stateDead}
+	case *parse.CommentNode:
+		return c
+	case *parse.ContinueNode:
+		c.n = n
+		e.rangeContext.continues = append(e.rangeContext.breaks, c)
+		return context{state: stateDead}
+	case *parse.IfNode:
+		return e.escapeBranch(c, &n.BranchNode, "if")
+	case *parse.ListNode:
+		return e.escapeList(c, n)
+	case *parse.RangeNode:
+		return e.escapeBranch(c, &n.BranchNode, "range")
+	case *parse.TemplateNode:
+		return e.escapeTemplate(c, n)
+	case *parse.TextNode:
+		return e.escapeText(c, n)
+	case *parse.WithNode:
+		return e.escapeBranch(c, &n.BranchNode, "with")
+	}
+	panic("escaping " + n.String() + " is unimplemented")
+}
+
+// escapeAction escapes an action template node.
+func (e *escaper) escapeAction(c context, n *parse.ActionNode) context {
+	if len(n.Pipe.Decl) != 0 {
+		// A local variable assignment, not an interpolation.
+		return c
+	}
+	c = nudge(c)
+	// Check for disallowed use of predefined escapers in the pipeline.
+	for pos, idNode := range n.Pipe.Cmds {
+		node, ok := idNode.Args[0].(*parse.IdentifierNode)
+		if !ok {
+			// A predefined escaper "esc" will never be found as an identifier in a
+			// Chain or Field node, since:
+			// - "esc.x ..." is invalid, since predefined escapers return strings, and
+			//   strings do not have methods, keys or fields.
+			// - "... .esc" is invalid, since predefined escapers are global functions,
+			//   not methods or fields of any types.
+			// Therefore, it is safe to ignore these two node types.
+			continue
+		}
+		ident := node.Ident
+		if _, ok := predefinedEscapers[ident]; ok {
+			if pos < len(n.Pipe.Cmds)-1 ||
+				c.state == stateAttr && c.delim == delimSpaceOrTagEnd && ident == "html" {
+				return context{
+					state: stateError,
+					err:   errorf(ErrPredefinedEscaper, n, n.Line, "predefined escaper %q disallowed in template", ident),
+				}
+			}
+		}
+	}
+	s := make([]string, 0, 3)
+	switch c.state {
+	case stateError:
+		return c
+	case stateURL, stateCSSDqStr, stateCSSSqStr, stateCSSDqURL, stateCSSSqURL, stateCSSURL:
+		switch c.urlPart {
+		case urlPartNone:
+			s = append(s, "_html_template_urlfilter")
+			fallthrough
+		case urlPartPreQuery:
+			switch c.state {
+			case stateCSSDqStr, stateCSSSqStr:
+				s = append(s, "_html_template_cssescaper")
+			default:
+				s = append(s, "_html_template_urlnormalizer")
+			}
+		case urlPartQueryOrFrag:
+			s = append(s, "_html_template_urlescaper")
+		case urlPartUnknown:
+			return context{
+				state: stateError,
+				err:   errorf(ErrAmbigContext, n, n.Line, "%s appears in an ambiguous context within a URL", n),
+			}
+		default:
+			panic(c.urlPart.String())
+		}
+	case stateJS:
+		s = append(s, "_html_template_jsvalescaper")
+		// A slash after a value starts a div operator.
+		c.jsCtx = jsCtxDivOp
+	case stateJSDqStr, stateJSSqStr:
+		s = append(s, "_html_template_jsstrescaper")
+	case stateJSRegexp:
+		s = append(s, "_html_template_jsregexpescaper")
+	case stateCSS:
+		s = append(s, "_html_template_cssvaluefilter")
+	case stateText:
+		s = append(s, "_html_template_htmlescaper")
+	case stateRCDATA:
+		s = append(s, "_html_template_rcdataescaper")
+	case stateAttr:
+		// Handled below in delim check.
+	case stateAttrName, stateTag:
+		c.state = stateAttrName
+		s = append(s, "_html_template_htmlnamefilter")
+	case stateSrcset:
+		s = append(s, "_html_template_srcsetescaper")
+	default:
+		if isComment(c.state) {
+			s = append(s, "_html_template_commentescaper")
+		} else {
+			panic("unexpected state " + c.state.String())
+		}
+	}
+	switch c.delim {
+	case delimNone:
+		// No extra-escaping needed for raw text content.
+	case delimSpaceOrTagEnd:
+		s = append(s, "_html_template_nospaceescaper")
+	default:
+		s = append(s, "_html_template_attrescaper")
+	}
+	e.editActionNode(n, s)
+	return c
+}
+
+// ensurePipelineContains ensures that the pipeline ends with the commands with
+// the identifiers in s in order. If the pipeline ends with a predefined escaper
+// (i.e. "html" or "urlquery"), merge it with the identifiers in s.
+func ensurePipelineContains(p *parse.PipeNode, s []string) {
+	if len(s) == 0 {
+		// Do not rewrite pipeline if we have no escapers to insert.
+		return
+	}
+	// Precondition: p.Cmds contains at most one predefined escaper and the
+	// escaper will be present at p.Cmds[len(p.Cmds)-1]. This precondition is
+	// always true because of the checks in escapeAction.
+	pipelineLen := len(p.Cmds)
+	if pipelineLen > 0 {
+		lastCmd := p.Cmds[pipelineLen-1]
+		if idNode, ok := lastCmd.Args[0].(*parse.IdentifierNode); ok {
+			if esc := idNode.Ident; predefinedEscapers[esc] {
+				// Pipeline ends with a predefined escaper.
+				if len(p.Cmds) == 1 && len(lastCmd.Args) > 1 {
+					// Special case: pipeline is of the form {{ esc arg1 arg2 ... argN }},
+					// where esc is the predefined escaper, and arg1...argN are its arguments.
+					// Convert this into the equivalent form
+					// {{ _eval_args_ arg1 arg2 ... argN | esc }}, so that esc can be easily
+					// merged with the escapers in s.
+					lastCmd.Args[0] = parse.NewIdentifier("_eval_args_").SetTree(nil).SetPos(lastCmd.Args[0].Position())
+					p.Cmds = appendCmd(p.Cmds, newIdentCmd(esc, p.Position()))
+					pipelineLen++
+				}
+				// If any of the commands in s that we are about to insert is equivalent
+				// to the predefined escaper, use the predefined escaper instead.
+				dup := false
+				for i, escaper := range s {
+					if escFnsEq(esc, escaper) {
+						s[i] = idNode.Ident
+						dup = true
+					}
+				}
+				if dup {
+					// The predefined escaper will already be inserted along with the
+					// escapers in s, so do not copy it to the rewritten pipeline.
+					pipelineLen--
+				}
+			}
+		}
+	}
+	// Rewrite the pipeline, creating the escapers in s at the end of the pipeline.
+	newCmds := make([]*parse.CommandNode, pipelineLen, pipelineLen+len(s))
+	insertedIdents := make(map[string]bool)
+	for i := 0; i < pipelineLen; i++ {
+		cmd := p.Cmds[i]
+		newCmds[i] = cmd
+		if idNode, ok := cmd.Args[0].(*parse.IdentifierNode); ok {
+			insertedIdents[normalizeEscFn(idNode.Ident)] = true
+		}
+	}
+	for _, name := range s {
+		if !insertedIdents[normalizeEscFn(name)] {
+			// When two templates share an underlying parse tree via the use of
+			// AddParseTree and one template is executed after the other, this check
+			// ensures that escapers that were already inserted into the pipeline on
+			// the first escaping pass do not get inserted again.
+			newCmds = appendCmd(newCmds, newIdentCmd(name, p.Position()))
+		}
+	}
+	p.Cmds = newCmds
+}
+
+// predefinedEscapers contains template predefined escapers that are equivalent
+// to some contextual escapers. Keep in sync with equivEscapers.
+var predefinedEscapers = map[string]bool{
+	"html":     true,
+	"urlquery": true,
+}
+
+// equivEscapers matches contextual escapers to equivalent predefined
+// template escapers.
+var equivEscapers = map[string]string{
+	// The following pairs of HTML escapers provide equivalent security
+	// guarantees, since they all escape '\000', '\'', '"', '&', '<', and '>'.
+	"_html_template_attrescaper":   "html",
+	"_html_template_htmlescaper":   "html",
+	"_html_template_rcdataescaper": "html",
+	// These two URL escapers produce URLs safe for embedding in a URL query by
+	// percent-encoding all the reserved characters specified in RFC 3986 Section
+	// 2.2
+	"_html_template_urlescaper": "urlquery",
+	// These two functions are not actually equivalent; urlquery is stricter as it
+	// escapes reserved characters (e.g. '#'), while _html_template_urlnormalizer
+	// does not. It is therefore only safe to replace _html_template_urlnormalizer
+	// with urlquery (this happens in ensurePipelineContains), but not the otherI've
+	// way around. We keep this entry around to preserve the behavior of templates
+	// written before Go 1.9, which might depend on this substitution taking place.
+	"_html_template_urlnormalizer": "urlquery",
+}
+
+// escFnsEq reports whether the two escaping functions are equivalent.
+func escFnsEq(a, b string) bool {
+	return normalizeEscFn(a) == normalizeEscFn(b)
+}
+
+// normalizeEscFn(a) is equal to normalizeEscFn(b) for any pair of names of
+// escaper functions a and b that are equivalent.
+func normalizeEscFn(e string) string {
+	if norm := equivEscapers[e]; norm != "" {
+		return norm
+	}
+	return e
+}
+
+// redundantFuncs[a][b] implies that funcMap[b](funcMap[a](x)) == funcMap[a](x)
+// for all x.
+var redundantFuncs = map[string]map[string]bool{
+	"_html_template_commentescaper": {
+		"_html_template_attrescaper":    true,
+		"_html_template_nospaceescaper": true,
+		"_html_template_htmlescaper":    true,
+	},
+	"_html_template_cssescaper": {
+		"_html_template_attrescaper": true,
+	},
+	"_html_template_jsregexpescaper": {
+		"_html_template_attrescaper": true,
+	},
+	"_html_template_jsstrescaper": {
+		"_html_template_attrescaper": true,
+	},
+	"_html_template_urlescaper": {
+		"_html_template_urlnormalizer": true,
+	},
+}
+
+// appendCmd appends the given command to the end of the command pipeline
+// unless it is redundant with the last command.
+func appendCmd(cmds []*parse.CommandNode, cmd *parse.CommandNode) []*parse.CommandNode {
+	if n := len(cmds); n != 0 {
+		last, okLast := cmds[n-1].Args[0].(*parse.IdentifierNode)
+		next, okNext := cmd.Args[0].(*parse.IdentifierNode)
+		if okLast && okNext && redundantFuncs[last.Ident][next.Ident] {
+			return cmds
+		}
+	}
+	return append(cmds, cmd)
+}
+
+// newIdentCmd produces a command containing a single identifier node.
+func newIdentCmd(identifier string, pos parse.Pos) *parse.CommandNode {
+	return &parse.CommandNode{
+		NodeType: parse.NodeCommand,
+		Args:     []parse.Node{parse.NewIdentifier(identifier).SetTree(nil).SetPos(pos)}, // TODO: SetTree.
+	}
+}
+
+// nudge returns the context that would result from following empty string
+// transitions from the input context.
+// For example, parsing:
+//     `<a href=`
+// will end in context{stateBeforeValue, attrURL}, but parsing one extra rune:
+//     `<a href=x`
+// will end in context{stateURL, delimSpaceOrTagEnd, ...}.
+// There are two transitions that happen when the 'x' is seen:
+// (1) Transition from a before-value state to a start-of-value state without
+//     consuming any character.
+// (2) Consume 'x' and transition past the first value character.
+// In this case, nudging produces the context after (1) happens.
+func nudge(c context) context {
+	switch c.state {
+	case stateTag:
+		// In `<foo {{.}}`, the action should emit an attribute.
+		c.state = stateAttrName
+	case stateBeforeValue:
+		// In `<foo bar={{.}}`, the action is an undelimited value.
+		c.state, c.delim, c.attr = attrStartStates[c.attr], delimSpaceOrTagEnd, attrNone
+	case stateAfterName:
+		// In `<foo bar {{.}}`, the action is an attribute name.
+		c.state, c.attr = stateAttrName, attrNone
+	}
+	return c
+}
+
+// join joins the two contexts of a branch template node. The result is an
+// error context if either of the input contexts are error contexts, or if the
+// input contexts differ.
+func join(a, b context, node parse.Node, nodeName string) context {
+	if a.state == stateError {
+		return a
+	}
+	if b.state == stateError {
+		return b
+	}
+	if a.state == stateDead {
+		return b
+	}
+	if b.state == stateDead {
+		return a
+	}
+	if a.eq(b) {
+		return a
+	}
+
+	c := a
+	c.urlPart = b.urlPart
+	if c.eq(b) {
+		// The contexts differ only by urlPart.
+		c.urlPart = urlPartUnknown
+		return c
+	}
+
+	c = a
+	c.jsCtx = b.jsCtx
+	if c.eq(b) {
+		// The contexts differ only by jsCtx.
+		c.jsCtx = jsCtxUnknown
+		return c
+	}
+
+	// Allow a nudged context to join with an unnudged one.
+	// This means that
+	//   <p title={{if .C}}{{.}}{{end}}
+	// ends in an unquoted value state even though the else branch
+	// ends in stateBeforeValue.
+	if c, d := nudge(a), nudge(b); !(c.eq(a) && d.eq(b)) {
+		if e := join(c, d, node, nodeName); e.state != stateError {
+			return e
+		}
+	}
+
+	return context{
+		state: stateError,
+		err:   errorf(ErrBranchEnd, node, 0, "{{%s}} branches end in different contexts: %v, %v", nodeName, a, b),
+	}
+}
+
+// escapeBranch escapes a branch template node: "if", "range" and "with".
+func (e *escaper) escapeBranch(c context, n *parse.BranchNode, nodeName string) context {
+	if nodeName == "range" {
+		e.rangeContext = &rangeContext{outer: e.rangeContext}
+	}
+	c0 := e.escapeList(c, n.List)
+	if nodeName == "range" {
+		if c0.state != stateError {
+			c0 = joinRange(c0, e.rangeContext)
+		}
+		e.rangeContext = e.rangeContext.outer
+		if c0.state == stateError {
+			return c0
+		}
+
+		// The "true" branch of a "range" node can execute multiple times.
+		// We check that executing n.List once results in the same context
+		// as executing n.List twice.
+		e.rangeContext = &rangeContext{outer: e.rangeContext}
+		c1, _ := e.escapeListConditionally(c0, n.List, nil)
+		c0 = join(c0, c1, n, nodeName)
+		if c0.state == stateError {
+			e.rangeContext = e.rangeContext.outer
+			// Make clear that this is a problem on loop re-entry
+			// since developers tend to overlook that branch when
+			// debugging templates.
+			c0.err.Line = n.Line
+			c0.err.Description = "on range loop re-entry: " + c0.err.Description
+			return c0
+		}
+		c0 = joinRange(c0, e.rangeContext)
+		e.rangeContext = e.rangeContext.outer
+		if c0.state == stateError {
+			return c0
+		}
+	}
+	c1 := e.escapeList(c, n.ElseList)
+	return join(c0, c1, n, nodeName)
+}
+
+func joinRange(c0 context, rc *rangeContext) context {
+	// Merge contexts at break and continue statements into overall body context.
+	// In theory we could treat breaks differently from continues, but for now it is
+	// enough to treat them both as going back to the start of the loop (which may then stop).
+	for _, c := range rc.breaks {
+		c0 = join(c0, c, c.n, "range")
+		if c0.state == stateError {
+			c0.err.Line = c.n.(*parse.BreakNode).Line
+			c0.err.Description = "at range loop break: " + c0.err.Description
+			return c0
+		}
+	}
+	for _, c := range rc.continues {
+		c0 = join(c0, c, c.n, "range")
+		if c0.state == stateError {
+			c0.err.Line = c.n.(*parse.ContinueNode).Line
+			c0.err.Description = "at range loop continue: " + c0.err.Description
+			return c0
+		}
+	}
+	return c0
+}
+
+// escapeList escapes a list template node.
+func (e *escaper) escapeList(c context, n *parse.ListNode) context {
+	if n == nil {
+		return c
+	}
+	for _, m := range n.Nodes {
+		c = e.escape(c, m)
+		if c.state == stateDead {
+			break
+		}
+	}
+	return c
+}
+
+// escapeListConditionally escapes a list node but only preserves edits and
+// inferences in e if the inferences and output context satisfy filter.
+// It returns the best guess at an output context, and the result of the filter
+// which is the same as whether e was updated.
+func (e *escaper) escapeListConditionally(c context, n *parse.ListNode, filter func(*escaper, context) bool) (context, bool) {
+	e1 := makeEscaper(e.ns)
+	e1.rangeContext = e.rangeContext
+	// Make type inferences available to f.
+	for k, v := range e.output {
+		e1.output[k] = v
+	}
+	c = e1.escapeList(c, n)
+	ok := filter != nil && filter(&e1, c)
+	if ok {
+		// Copy inferences and edits from e1 back into e.
+		for k, v := range e1.output {
+			e.output[k] = v
+		}
+		for k, v := range e1.derived {
+			e.derived[k] = v
+		}
+		for k, v := range e1.called {
+			e.called[k] = v
+		}
+		for k, v := range e1.actionNodeEdits {
+			e.editActionNode(k, v)
+		}
+		for k, v := range e1.templateNodeEdits {
+			e.editTemplateNode(k, v)
+		}
+		for k, v := range e1.textNodeEdits {
+			e.editTextNode(k, v)
+		}
+	}
+	return c, ok
+}
+
+// escapeTemplate escapes a {{template}} call node.
+func (e *escaper) escapeTemplate(c context, n *parse.TemplateNode) context {
+	c, name := e.escapeTree(c, n, n.Name, n.Line)
+	if name != n.Name {
+		e.editTemplateNode(n, name)
+	}
+	return c
+}
+
+// escapeTree escapes the named template starting in the given context as
+// necessary and returns its output context.
+func (e *escaper) escapeTree(c context, node parse.Node, name string, line int) (context, string) {
+	// Mangle the template name with the input context to produce a reliable
+	// identifier.
+	dname := c.mangle(name)
+	e.called[dname] = true
+	if out, ok := e.output[dname]; ok {
+		// Already escaped.
+		return out, dname
+	}
+	t := e.template(name)
+	if t == nil {
+		// Two cases: The template exists but is empty, or has never been mentioned at
+		// all. Distinguish the cases in the error messages.
+		if e.ns.set[name] != nil {
+			return context{
+				state: stateError,
+				err:   errorf(ErrNoSuchTemplate, node, line, "%q is an incomplete or empty template", name),
+			}, dname
+		}
+		return context{
+			state: stateError,
+			err:   errorf(ErrNoSuchTemplate, node, line, "no such template %q", name),
+		}, dname
+	}
+	if dname != name {
+		// Use any template derived during an earlier call to escapeTemplate
+		// with different top level templates, or clone if necessary.
+		dt := e.template(dname)
+		if dt == nil {
+			dt = template.New(dname)
+			dt.Tree = &parse.Tree{Name: dname, Root: t.Root.CopyList()}
+			e.derived[dname] = dt
+		}
+		t = dt
+	}
+	return e.computeOutCtx(c, t), dname
+}
+
+// computeOutCtx takes a template and its start context and computes the output
+// context while storing any inferences in e.
+func (e *escaper) computeOutCtx(c context, t *template.Template) context {
+	// Propagate context over the body.
+	c1, ok := e.escapeTemplateBody(c, t)
+	if !ok {
+		// Look for a fixed point by assuming c1 as the output context.
+		if c2, ok2 := e.escapeTemplateBody(c1, t); ok2 {
+			c1, ok = c2, true
+		}
+		// Use c1 as the error context if neither assumption worked.
+	}
+	if !ok && c1.state != stateError {
+		return context{
+			state: stateError,
+			err:   errorf(ErrOutputContext, t.Tree.Root, 0, "cannot compute output context for template %s", t.Name()),
+		}
+	}
+	return c1
+}
+
+// escapeTemplateBody escapes the given template assuming the given output
+// context, and returns the best guess at the output context and whether the
+// assumption was correct.
+func (e *escaper) escapeTemplateBody(c context, t *template.Template) (context, bool) {
+	filter := func(e1 *escaper, c1 context) bool {
+		if c1.state == stateError {
+			// Do not update the input escaper, e.
+			return false
+		}
+		if !e1.called[t.Name()] {
+			// If t is not recursively called, then c1 is an
+			// accurate output context.
+			return true
+		}
+		// c1 is accurate if it matches our assumed output context.
+		return c.eq(c1)
+	}
+	// We need to assume an output context so that recursive template calls
+	// take the fast path out of escapeTree instead of infinitely recursing.
+	// Naively assuming that the input context is the same as the output
+	// works >90% of the time.
+	e.output[t.Name()] = c
+	return e.escapeListConditionally(c, t.Tree.Root, filter)
+}
+
+// delimEnds maps each delim to a string of characters that terminate it.
+var delimEnds = [...]string{
+	delimDoubleQuote: `"`,
+	delimSingleQuote: "'",
+	// Determined empirically by running the below in various browsers.
+	// var div = document.createElement("DIV");
+	// for (var i = 0; i < 0x10000; ++i) {
+	//   div.innerHTML = "<span title=x" + String.fromCharCode(i) + "-bar>";
+	//   if (div.getElementsByTagName("SPAN")[0].title.indexOf("bar") < 0)
+	//     document.write("<p>U+" + i.toString(16));
+	// }
+	delimSpaceOrTagEnd: " \t\n\f\r>",
+}
+
+var doctypeBytes = []byte("<!DOCTYPE")
+
+// escapeText escapes a text template node.
+func (e *escaper) escapeText(c context, n *parse.TextNode) context {
+	s, written, i, b := n.Text, 0, 0, new(bytes.Buffer)
+	for i != len(s) {
+		c1, nread := contextAfterText(c, s[i:])
+		i1 := i + nread
+		if c.state == stateText || c.state == stateRCDATA {
+			end := i1
+			if c1.state != c.state {
+				for j := end - 1; j >= i; j-- {
+					if s[j] == '<' {
+						end = j
+						break
+					}
+				}
+			}
+			for j := i; j < end; j++ {
+				if s[j] == '<' && !bytes.HasPrefix(bytes.ToUpper(s[j:]), doctypeBytes) {
+					b.Write(s[written:j])
+					b.WriteString("&lt;")
+					written = j + 1
+				}
+			}
+		} else if isComment(c.state) && c.delim == delimNone {
+			switch c.state {
+			case stateJSBlockCmt:
+				// https://es5.github.com/#x7.4:
+				// "Comments behave like white space and are
+				// discarded except that, if a MultiLineComment
+				// contains a line terminator character, then
+				// the entire comment is considered to be a
+				// LineTerminator for purposes of parsing by
+				// the syntactic grammar."
+				if bytes.ContainsAny(s[written:i1], "\n\r\u2028\u2029") {
+					b.WriteByte('\n')
+				} else {
+					b.WriteByte(' ')
+				}
+			case stateCSSBlockCmt:
+				b.WriteByte(' ')
+			}
+			written = i1
+		}
+		if c.state != c1.state && isComment(c1.state) && c1.delim == delimNone {
+			// Preserve the portion between written and the comment start.
+			cs := i1 - 2
+			if c1.state == stateHTMLCmt {
+				// "<!--" instead of "/*" or "//"
+				cs -= 2
+			}
+			b.Write(s[written:cs])
+			written = i1
+		}
+		if i == i1 && c.state == c1.state {
+			panic(fmt.Sprintf("infinite loop from %v to %v on %q..%q", c, c1, s[:i], s[i:]))
+		}
+		c, i = c1, i1
+	}
+
+	if written != 0 && c.state != stateError {
+		if !isComment(c.state) || c.delim != delimNone {
+			b.Write(n.Text[written:])
+		}
+		e.editTextNode(n, b.Bytes())
+	}
+	return c
+}
+
+// contextAfterText starts in context c, consumes some tokens from the front of
+// s, then returns the context after those tokens and the unprocessed suffix.
+func contextAfterText(c context, s []byte) (context, int) {
+	if c.delim == delimNone {
+		c1, i := tSpecialTagEnd(c, s)
+		if i == 0 {
+			// A special end tag (`</script>`) has been seen and
+			// all content preceding it has been consumed.
+			return c1, 0
+		}
+		// Consider all content up to any end tag.
+		return transitionFunc[c.state](c, s[:i])
+	}
+
+	// We are at the beginning of an attribute value.
+
+	i := bytes.IndexAny(s, delimEnds[c.delim])
+	if i == -1 {
+		i = len(s)
+	}
+	if c.delim == delimSpaceOrTagEnd {
+		// https://www.w3.org/TR/html5/syntax.html#attribute-value-(unquoted)-state
+		// lists the runes below as error characters.
+		// Error out because HTML parsers may differ on whether
+		// "<a id= onclick=f("     ends inside id's or onclick's value,
+		// "<a class=`foo "        ends inside a value,
+		// "<a style=font:'Arial'" needs open-quote fixup.
+		// IE treats '`' as a quotation character.
+		if j := bytes.IndexAny(s[:i], "\"'<=`"); j >= 0 {
+			return context{
+				state: stateError,
+				err:   errorf(ErrBadHTML, nil, 0, "%q in unquoted attr: %q", s[j:j+1], s[:i]),
+			}, len(s)
+		}
+	}
+	if i == len(s) {
+		// Remain inside the attribute.
+		// Decode the value so non-HTML rules can easily handle
+		//     <button onclick="alert(&quot;Hi!&quot;)">
+		// without having to entity decode token boundaries.
+		for u := []byte(html.UnescapeString(string(s))); len(u) != 0; {
+			c1, i1 := transitionFunc[c.state](c, u)
+			c, u = c1, u[i1:]
+		}
+		return c, len(s)
+	}
+
+	element := c.element
+
+	// If this is a non-JS "type" attribute inside "script" tag, do not treat the contents as JS.
+	if c.state == stateAttr && c.element == elementScript && c.attr == attrScriptType && !isJSType(string(s[:i])) {
+		element = elementNone
+	}
+
+	if c.delim != delimSpaceOrTagEnd {
+		// Consume any quote.
+		i++
+	}
+	// On exiting an attribute, we discard all state information
+	// except the state and element.
+	return context{state: stateTag, element: element}, i
+}
+
+// editActionNode records a change to an action pipeline for later commit.
+func (e *escaper) editActionNode(n *parse.ActionNode, cmds []string) {
+	if _, ok := e.actionNodeEdits[n]; ok {
+		panic(fmt.Sprintf("node %s shared between templates", n))
+	}
+	e.actionNodeEdits[n] = cmds
+}
+
+// editTemplateNode records a change to a {{template}} callee for later commit.
+func (e *escaper) editTemplateNode(n *parse.TemplateNode, callee string) {
+	if _, ok := e.templateNodeEdits[n]; ok {
+		panic(fmt.Sprintf("node %s shared between templates", n))
+	}
+	e.templateNodeEdits[n] = callee
+}
+
+// editTextNode records a change to a text node for later commit.
+func (e *escaper) editTextNode(n *parse.TextNode, text []byte) {
+	if _, ok := e.textNodeEdits[n]; ok {
+		panic(fmt.Sprintf("node %s shared between templates", n))
+	}
+	e.textNodeEdits[n] = text
+}
+
+// commit applies changes to actions and template calls needed to contextually
+// autoescape content and adds any derived templates to the set.
+func (e *escaper) commit() {
+	for name := range e.output {
+		e.template(name).Funcs(funcMap)
+	}
+	// Any template from the name space associated with this escaper can be used
+	// to add derived templates to the underlying text/template name space.
+	tmpl := e.arbitraryTemplate()
+	for _, t := range e.derived {
+		if _, err := tmpl.text.AddParseTree(t.Name(), t.Tree); err != nil {
+			panic("error adding derived template")
+		}
+	}
+	for n, s := range e.actionNodeEdits {
+		ensurePipelineContains(n.Pipe, s)
+	}
+	for n, name := range e.templateNodeEdits {
+		n.Name = name
+	}
+	for n, s := range e.textNodeEdits {
+		n.Text = s
+	}
+	// Reset state that is specific to this commit so that the same changes are
+	// not re-applied to the template on subsequent calls to commit.
+	e.called = make(map[string]bool)
+	e.actionNodeEdits = make(map[*parse.ActionNode][]string)
+	e.templateNodeEdits = make(map[*parse.TemplateNode]string)
+	e.textNodeEdits = make(map[*parse.TextNode][]byte)
+}
+
+// template returns the named template given a mangled template name.
+func (e *escaper) template(name string) *template.Template {
+	// Any template from the name space associated with this escaper can be used
+	// to look up templates in the underlying text/template name space.
+	t := e.arbitraryTemplate().text.Lookup(name)
+	if t == nil {
+		t = e.derived[name]
+	}
+	return t
+}
+
+// arbitraryTemplate returns an arbitrary template from the name space
+// associated with e and panics if no templates are found.
+func (e *escaper) arbitraryTemplate() *Template {
+	for _, t := range e.ns.set {
+		return t
+	}
+	panic("no templates in name space")
+}
+
+// Forwarding functions so that clients need only import this package
+// to reach the general escaping functions of text/template.
+
+// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
+func HTMLEscape(w io.Writer, b []byte) {
+	template.HTMLEscape(w, b)
+}
+
+// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
+func HTMLEscapeString(s string) string {
+	return template.HTMLEscapeString(s)
+}
+
+// HTMLEscaper returns the escaped HTML equivalent of the textual
+// representation of its arguments.
+func HTMLEscaper(args ...interface{}) string {
+	return template.HTMLEscaper(args...)
+}
+
+// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
+func JSEscape(w io.Writer, b []byte) {
+	template.JSEscape(w, b)
+}
+
+// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
+func JSEscapeString(s string) string {
+	return template.JSEscapeString(s)
+}
+
+// JSEscaper returns the escaped JavaScript equivalent of the textual
+// representation of its arguments.
+func JSEscaper(args ...interface{}) string {
+	return template.JSEscaper(args...)
+}
+
+// URLQueryEscaper returns the escaped value of the textual representation of
+// its arguments in a form suitable for embedding in a URL query.
+func URLQueryEscaper(args ...interface{}) string {
+	return template.URLQueryEscaper(args...)
+}
diff --git a/internal/backport/html/template/escape_test.go b/internal/backport/html/template/escape_test.go
new file mode 100644
index 0000000..8145269
--- /dev/null
+++ b/internal/backport/html/template/escape_test.go
@@ -0,0 +1,1994 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"os"
+	"strings"
+	"testing"
+
+	"golang.org/x/website/internal/backport/text/template"
+	"golang.org/x/website/internal/backport/text/template/parse"
+)
+
+type badMarshaler struct{}
+
+func (x *badMarshaler) MarshalJSON() ([]byte, error) {
+	// Keys in valid JSON must be double quoted as must all strings.
+	return []byte("{ foo: 'not quite valid JSON' }"), nil
+}
+
+type goodMarshaler struct{}
+
+func (x *goodMarshaler) MarshalJSON() ([]byte, error) {
+	return []byte(`{ "<foo>": "O'Reilly" }`), nil
+}
+
+func TestEscape(t *testing.T) {
+	data := struct {
+		F, T    bool
+		C, G, H string
+		A, E    []string
+		B, M    json.Marshaler
+		N       int
+		U       interface{} // untyped nil
+		Z       *int        // typed nil
+		W       HTML
+	}{
+		F: false,
+		T: true,
+		C: "<Cincinnati>",
+		G: "<Goodbye>",
+		H: "<Hello>",
+		A: []string{"<a>", "<b>"},
+		E: []string{},
+		N: 42,
+		B: &badMarshaler{},
+		M: &goodMarshaler{},
+		U: nil,
+		Z: nil,
+		W: HTML(`&iexcl;<b class="foo">Hello</b>, <textarea>O'World</textarea>!`),
+	}
+	pdata := &data
+
+	tests := []struct {
+		name   string
+		input  string
+		output string
+	}{
+		{
+			"if",
+			"{{if .T}}Hello{{end}}, {{.C}}!",
+			"Hello, &lt;Cincinnati&gt;!",
+		},
+		{
+			"else",
+			"{{if .F}}{{.H}}{{else}}{{.G}}{{end}}!",
+			"&lt;Goodbye&gt;!",
+		},
+		{
+			"overescaping1",
+			"Hello, {{.C | html}}!",
+			"Hello, &lt;Cincinnati&gt;!",
+		},
+		{
+			"overescaping2",
+			"Hello, {{html .C}}!",
+			"Hello, &lt;Cincinnati&gt;!",
+		},
+		{
+			"overescaping3",
+			"{{with .C}}{{$msg := .}}Hello, {{$msg}}!{{end}}",
+			"Hello, &lt;Cincinnati&gt;!",
+		},
+		{
+			"assignment",
+			"{{if $x := .H}}{{$x}}{{end}}",
+			"&lt;Hello&gt;",
+		},
+		{
+			"withBody",
+			"{{with .H}}{{.}}{{end}}",
+			"&lt;Hello&gt;",
+		},
+		{
+			"withElse",
+			"{{with .E}}{{.}}{{else}}{{.H}}{{end}}",
+			"&lt;Hello&gt;",
+		},
+		{
+			"rangeBody",
+			"{{range .A}}{{.}}{{end}}",
+			"&lt;a&gt;&lt;b&gt;",
+		},
+		{
+			"rangeElse",
+			"{{range .E}}{{.}}{{else}}{{.H}}{{end}}",
+			"&lt;Hello&gt;",
+		},
+		{
+			"nonStringValue",
+			"{{.T}}",
+			"true",
+		},
+		{
+			"untypedNilValue",
+			"{{.U}}",
+			"",
+		},
+		{
+			"typedNilValue",
+			"{{.Z}}",
+			"&lt;nil&gt;",
+		},
+		{
+			"constant",
+			`<a href="/search?q={{"'a<b'"}}">`,
+			`<a href="/search?q=%27a%3cb%27">`,
+		},
+		{
+			"multipleAttrs",
+			"<a b=1 c={{.H}}>",
+			"<a b=1 c=&lt;Hello&gt;>",
+		},
+		{
+			"urlStartRel",
+			`<a href='{{"/foo/bar?a=b&c=d"}}'>`,
+			`<a href='/foo/bar?a=b&amp;c=d'>`,
+		},
+		{
+			"urlStartAbsOk",
+			`<a href='{{"http://example.com/foo/bar?a=b&c=d"}}'>`,
+			`<a href='http://example.com/foo/bar?a=b&amp;c=d'>`,
+		},
+		{
+			"protocolRelativeURLStart",
+			`<a href='{{"//example.com:8000/foo/bar?a=b&c=d"}}'>`,
+			`<a href='//example.com:8000/foo/bar?a=b&amp;c=d'>`,
+		},
+		{
+			"pathRelativeURLStart",
+			`<a href="{{"/javascript:80/foo/bar"}}">`,
+			`<a href="/javascript:80/foo/bar">`,
+		},
+		{
+			"dangerousURLStart",
+			`<a href='{{"javascript:alert(%22pwned%22)"}}'>`,
+			`<a href='#ZgotmplZ'>`,
+		},
+		{
+			"dangerousURLStart2",
+			`<a href='  {{"javascript:alert(%22pwned%22)"}}'>`,
+			`<a href='  #ZgotmplZ'>`,
+		},
+		{
+			"nonHierURL",
+			`<a href={{"mailto:Muhammed \"The Greatest\" Ali <m.ali@example.com>"}}>`,
+			`<a href=mailto:Muhammed%20%22The%20Greatest%22%20Ali%20%3cm.ali@example.com%3e>`,
+		},
+		{
+			"urlPath",
+			`<a href='http://{{"javascript:80"}}/foo'>`,
+			`<a href='http://javascript:80/foo'>`,
+		},
+		{
+			"urlQuery",
+			`<a href='/search?q={{.H}}'>`,
+			`<a href='/search?q=%3cHello%3e'>`,
+		},
+		{
+			"urlFragment",
+			`<a href='/faq#{{.H}}'>`,
+			`<a href='/faq#%3cHello%3e'>`,
+		},
+		{
+			"urlBranch",
+			`<a href="{{if .F}}/foo?a=b{{else}}/bar{{end}}">`,
+			`<a href="/bar">`,
+		},
+		{
+			"urlBranchConflictMoot",
+			`<a href="{{if .T}}/foo?a={{else}}/bar#{{end}}{{.C}}">`,
+			`<a href="/foo?a=%3cCincinnati%3e">`,
+		},
+		{
+			"jsStrValue",
+			"<button onclick='alert({{.H}})'>",
+			`<button onclick='alert(&#34;\u003cHello\u003e&#34;)'>`,
+		},
+		{
+			"jsNumericValue",
+			"<button onclick='alert({{.N}})'>",
+			`<button onclick='alert( 42 )'>`,
+		},
+		{
+			"jsBoolValue",
+			"<button onclick='alert({{.T}})'>",
+			`<button onclick='alert( true )'>`,
+		},
+		{
+			"jsNilValueTyped",
+			"<button onclick='alert(typeof{{.Z}})'>",
+			`<button onclick='alert(typeof null )'>`,
+		},
+		{
+			"jsNilValueUntyped",
+			"<button onclick='alert(typeof{{.U}})'>",
+			`<button onclick='alert(typeof null )'>`,
+		},
+		{
+			"jsObjValue",
+			"<button onclick='alert({{.A}})'>",
+			`<button onclick='alert([&#34;\u003ca\u003e&#34;,&#34;\u003cb\u003e&#34;])'>`,
+		},
+		{
+			"jsObjValueScript",
+			"<script>alert({{.A}})</script>",
+			`<script>alert(["\u003ca\u003e","\u003cb\u003e"])</script>`,
+		},
+		{
+			"jsObjValueNotOverEscaped",
+			"<button onclick='alert({{.A | html}})'>",
+			`<button onclick='alert([&#34;\u003ca\u003e&#34;,&#34;\u003cb\u003e&#34;])'>`,
+		},
+		{
+			"jsStr",
+			"<button onclick='alert(&quot;{{.H}}&quot;)'>",
+			`<button onclick='alert(&quot;\u003cHello\u003e&quot;)'>`,
+		},
+		{
+			"badMarshaler",
+			`<button onclick='alert(1/{{.B}}in numbers)'>`,
+			`<button onclick='alert(1/ /* json: error calling MarshalJSON for type *template.badMarshaler: invalid character &#39;f&#39; looking for beginning of object key string */null in numbers)'>`,
+		},
+		{
+			"jsMarshaler",
+			`<button onclick='alert({{.M}})'>`,
+			`<button onclick='alert({&#34;\u003cfoo\u003e&#34;:&#34;O&#39;Reilly&#34;})'>`,
+		},
+		{
+			"jsStrNotUnderEscaped",
+			"<button onclick='alert({{.C | urlquery}})'>",
+			// URL escaped, then quoted for JS.
+			`<button onclick='alert(&#34;%3CCincinnati%3E&#34;)'>`,
+		},
+		{
+			"jsRe",
+			`<button onclick='alert(/{{"foo+bar"}}/.test(""))'>`,
+			`<button onclick='alert(/foo\u002bbar/.test(""))'>`,
+		},
+		{
+			"jsReBlank",
+			`<script>alert(/{{""}}/.test(""));</script>`,
+			`<script>alert(/(?:)/.test(""));</script>`,
+		},
+		{
+			"jsReAmbigOk",
+			`<script>{{if true}}var x = 1{{end}}</script>`,
+			// The {if} ends in an ambiguous jsCtx but there is
+			// no slash following so we shouldn't care.
+			`<script>var x = 1</script>`,
+		},
+		{
+			"styleBidiKeywordPassed",
+			`<p style="dir: {{"ltr"}}">`,
+			`<p style="dir: ltr">`,
+		},
+		{
+			"styleBidiPropNamePassed",
+			`<p style="border-{{"left"}}: 0; border-{{"right"}}: 1in">`,
+			`<p style="border-left: 0; border-right: 1in">`,
+		},
+		{
+			"styleExpressionBlocked",
+			`<p style="width: {{"expression(alert(1337))"}}">`,
+			`<p style="width: ZgotmplZ">`,
+		},
+		{
+			"styleTagSelectorPassed",
+			`<style>{{"p"}} { color: pink }</style>`,
+			`<style>p { color: pink }</style>`,
+		},
+		{
+			"styleIDPassed",
+			`<style>p{{"#my-ID"}} { font: Arial }</style>`,
+			`<style>p#my-ID { font: Arial }</style>`,
+		},
+		{
+			"styleClassPassed",
+			`<style>p{{".my_class"}} { font: Arial }</style>`,
+			`<style>p.my_class { font: Arial }</style>`,
+		},
+		{
+			"styleQuantityPassed",
+			`<a style="left: {{"2em"}}; top: {{0}}">`,
+			`<a style="left: 2em; top: 0">`,
+		},
+		{
+			"stylePctPassed",
+			`<table style=width:{{"100%"}}>`,
+			`<table style=width:100%>`,
+		},
+		{
+			"styleColorPassed",
+			`<p style="color: {{"#8ff"}}; background: {{"#000"}}">`,
+			`<p style="color: #8ff; background: #000">`,
+		},
+		{
+			"styleObfuscatedExpressionBlocked",
+			`<p style="width: {{"  e\\78preS\x00Sio/**/n(alert(1337))"}}">`,
+			`<p style="width: ZgotmplZ">`,
+		},
+		{
+			"styleMozBindingBlocked",
+			`<p style="{{"-moz-binding(alert(1337))"}}: ...">`,
+			`<p style="ZgotmplZ: ...">`,
+		},
+		{
+			"styleObfuscatedMozBindingBlocked",
+			`<p style="{{"  -mo\\7a-B\x00I/**/nding(alert(1337))"}}: ...">`,
+			`<p style="ZgotmplZ: ...">`,
+		},
+		{
+			"styleFontNameString",
+			`<p style='font-family: "{{"Times New Roman"}}"'>`,
+			`<p style='font-family: "Times New Roman"'>`,
+		},
+		{
+			"styleFontNameString",
+			`<p style='font-family: "{{"Times New Roman"}}", "{{"sans-serif"}}"'>`,
+			`<p style='font-family: "Times New Roman", "sans-serif"'>`,
+		},
+		{
+			"styleFontNameUnquoted",
+			`<p style='font-family: {{"Times New Roman"}}'>`,
+			`<p style='font-family: Times New Roman'>`,
+		},
+		{
+			"styleURLQueryEncoded",
+			`<p style="background: url(/img?name={{"O'Reilly Animal(1)<2>.png"}})">`,
+			`<p style="background: url(/img?name=O%27Reilly%20Animal%281%29%3c2%3e.png)">`,
+		},
+		{
+			"styleQuotedURLQueryEncoded",
+			`<p style="background: url('/img?name={{"O'Reilly Animal(1)<2>.png"}}')">`,
+			`<p style="background: url('/img?name=O%27Reilly%20Animal%281%29%3c2%3e.png')">`,
+		},
+		{
+			"styleStrQueryEncoded",
+			`<p style="background: '/img?name={{"O'Reilly Animal(1)<2>.png"}}'">`,
+			`<p style="background: '/img?name=O%27Reilly%20Animal%281%29%3c2%3e.png'">`,
+		},
+		{
+			"styleURLBadProtocolBlocked",
+			`<a style="background: url('{{"javascript:alert(1337)"}}')">`,
+			`<a style="background: url('#ZgotmplZ')">`,
+		},
+		{
+			"styleStrBadProtocolBlocked",
+			`<a style="background: '{{"vbscript:alert(1337)"}}'">`,
+			`<a style="background: '#ZgotmplZ'">`,
+		},
+		{
+			"styleStrEncodedProtocolEncoded",
+			`<a style="background: '{{"javascript\\3a alert(1337)"}}'">`,
+			// The CSS string 'javascript\\3a alert(1337)' does not contain a colon.
+			`<a style="background: 'javascript\\3a alert\28 1337\29 '">`,
+		},
+		{
+			"styleURLGoodProtocolPassed",
+			`<a style="background: url('{{"http://oreilly.com/O'Reilly Animals(1)<2>;{}.html"}}')">`,
+			`<a style="background: url('http://oreilly.com/O%27Reilly%20Animals%281%29%3c2%3e;%7b%7d.html')">`,
+		},
+		{
+			"styleStrGoodProtocolPassed",
+			`<a style="background: '{{"http://oreilly.com/O'Reilly Animals(1)<2>;{}.html"}}'">`,
+			`<a style="background: 'http\3a\2f\2foreilly.com\2fO\27Reilly Animals\28 1\29\3c 2\3e\3b\7b\7d.html'">`,
+		},
+		{
+			"styleURLEncodedForHTMLInAttr",
+			`<a style="background: url('{{"/search?img=foo&size=icon"}}')">`,
+			`<a style="background: url('/search?img=foo&amp;size=icon')">`,
+		},
+		{
+			"styleURLNotEncodedForHTMLInCdata",
+			`<style>body { background: url('{{"/search?img=foo&size=icon"}}') }</style>`,
+			`<style>body { background: url('/search?img=foo&size=icon') }</style>`,
+		},
+		{
+			"styleURLMixedCase",
+			`<p style="background: URL(#{{.H}})">`,
+			`<p style="background: URL(#%3cHello%3e)">`,
+		},
+		{
+			"stylePropertyPairPassed",
+			`<a style='{{"color: red"}}'>`,
+			`<a style='color: red'>`,
+		},
+		{
+			"styleStrSpecialsEncoded",
+			`<a style="font-family: '{{"/**/'\";:// \\"}}', &quot;{{"/**/'\";:// \\"}}&quot;">`,
+			`<a style="font-family: '\2f**\2f\27\22\3b\3a\2f\2f  \\', &quot;\2f**\2f\27\22\3b\3a\2f\2f  \\&quot;">`,
+		},
+		{
+			"styleURLSpecialsEncoded",
+			`<a style="border-image: url({{"/**/'\";:// \\"}}), url(&quot;{{"/**/'\";:// \\"}}&quot;), url('{{"/**/'\";:// \\"}}'), 'http://www.example.com/?q={{"/**/'\";:// \\"}}''">`,
+			`<a style="border-image: url(/**/%27%22;://%20%5c), url(&quot;/**/%27%22;://%20%5c&quot;), url('/**/%27%22;://%20%5c'), 'http://www.example.com/?q=%2f%2a%2a%2f%27%22%3b%3a%2f%2f%20%5c''">`,
+		},
+		{
+			"HTML comment",
+			"<b>Hello, <!-- name of world -->{{.C}}</b>",
+			"<b>Hello, &lt;Cincinnati&gt;</b>",
+		},
+		{
+			"HTML comment not first < in text node.",
+			"<<!-- -->!--",
+			"&lt;!--",
+		},
+		{
+			"HTML normalization 1",
+			"a < b",
+			"a &lt; b",
+		},
+		{
+			"HTML normalization 2",
+			"a << b",
+			"a &lt;&lt; b",
+		},
+		{
+			"HTML normalization 3",
+			"a<<!-- --><!-- -->b",
+			"a&lt;b",
+		},
+		{
+			"HTML doctype not normalized",
+			"<!DOCTYPE html>Hello, World!",
+			"<!DOCTYPE html>Hello, World!",
+		},
+		{
+			"HTML doctype not case-insensitive",
+			"<!doCtYPE htMl>Hello, World!",
+			"<!doCtYPE htMl>Hello, World!",
+		},
+		{
+			"No doctype injection",
+			`<!{{"DOCTYPE"}}`,
+			"&lt;!DOCTYPE",
+		},
+		{
+			"Split HTML comment",
+			"<b>Hello, <!-- name of {{if .T}}city -->{{.C}}{{else}}world -->{{.W}}{{end}}</b>",
+			"<b>Hello, &lt;Cincinnati&gt;</b>",
+		},
+		{
+			"JS line comment",
+			"<script>for (;;) { if (c()) break// foo not a label\n" +
+				"foo({{.T}});}</script>",
+			"<script>for (;;) { if (c()) break\n" +
+				"foo( true );}</script>",
+		},
+		{
+			"JS multiline block comment",
+			"<script>for (;;) { if (c()) break/* foo not a label\n" +
+				" */foo({{.T}});}</script>",
+			// Newline separates break from call. If newline
+			// removed, then break will consume label leaving
+			// code invalid.
+			"<script>for (;;) { if (c()) break\n" +
+				"foo( true );}</script>",
+		},
+		{
+			"JS single-line block comment",
+			"<script>for (;;) {\n" +
+				"if (c()) break/* foo a label */foo;" +
+				"x({{.T}});}</script>",
+			// Newline separates break from call. If newline
+			// removed, then break will consume label leaving
+			// code invalid.
+			"<script>for (;;) {\n" +
+				"if (c()) break foo;" +
+				"x( true );}</script>",
+		},
+		{
+			"JS block comment flush with mathematical division",
+			"<script>var a/*b*//c\nd</script>",
+			"<script>var a /c\nd</script>",
+		},
+		{
+			"JS mixed comments",
+			"<script>var a/*b*///c\nd</script>",
+			"<script>var a \nd</script>",
+		},
+		{
+			"CSS comments",
+			"<style>p// paragraph\n" +
+				`{border: 1px/* color */{{"#00f"}}}</style>`,
+			"<style>p\n" +
+				"{border: 1px #00f}</style>",
+		},
+		{
+			"JS attr block comment",
+			`<a onclick="f(&quot;&quot;); /* alert({{.H}}) */">`,
+			// Attribute comment tests should pass if the comments
+			// are successfully elided.
+			`<a onclick="f(&quot;&quot;); /* alert() */">`,
+		},
+		{
+			"JS attr line comment",
+			`<a onclick="// alert({{.G}})">`,
+			`<a onclick="// alert()">`,
+		},
+		{
+			"CSS attr block comment",
+			`<a style="/* color: {{.H}} */">`,
+			`<a style="/* color:  */">`,
+		},
+		{
+			"CSS attr line comment",
+			`<a style="// color: {{.G}}">`,
+			`<a style="// color: ">`,
+		},
+		{
+			"HTML substitution commented out",
+			"<p><!-- {{.H}} --></p>",
+			"<p></p>",
+		},
+		{
+			"Comment ends flush with start",
+			"<!--{{.}}--><script>/*{{.}}*///{{.}}\n</script><style>/*{{.}}*///{{.}}\n</style><a onclick='/*{{.}}*///{{.}}' style='/*{{.}}*///{{.}}'>",
+			"<script> \n</script><style> \n</style><a onclick='/**///' style='/**///'>",
+		},
+		{
+			"typed HTML in text",
+			`{{.W}}`,
+			`&iexcl;<b class="foo">Hello</b>, <textarea>O'World</textarea>!`,
+		},
+		{
+			"typed HTML in attribute",
+			`<div title="{{.W}}">`,
+			`<div title="&iexcl;Hello, O&#39;World!">`,
+		},
+		{
+			"typed HTML in script",
+			`<button onclick="alert({{.W}})">`,
+			`<button onclick="alert(&#34;\u0026iexcl;\u003cb class=\&#34;foo\&#34;\u003eHello\u003c/b\u003e, \u003ctextarea\u003eO&#39;World\u003c/textarea\u003e!&#34;)">`,
+		},
+		{
+			"typed HTML in RCDATA",
+			`<textarea>{{.W}}</textarea>`,
+			`<textarea>&iexcl;&lt;b class=&#34;foo&#34;&gt;Hello&lt;/b&gt;, &lt;textarea&gt;O&#39;World&lt;/textarea&gt;!</textarea>`,
+		},
+		{
+			"range in textarea",
+			"<textarea>{{range .A}}{{.}}{{end}}</textarea>",
+			"<textarea>&lt;a&gt;&lt;b&gt;</textarea>",
+		},
+		{
+			"No tag injection",
+			`{{"10$"}}<{{"script src,evil.org/pwnd.js"}}...`,
+			`10$&lt;script src,evil.org/pwnd.js...`,
+		},
+		{
+			"No comment injection",
+			`<{{"!--"}}`,
+			`&lt;!--`,
+		},
+		{
+			"No RCDATA end tag injection",
+			`<textarea><{{"/textarea "}}...</textarea>`,
+			`<textarea>&lt;/textarea ...</textarea>`,
+		},
+		{
+			"optional attrs",
+			`<img class="{{"iconClass"}}"` +
+				`{{if .T}} id="{{"<iconId>"}}"{{end}}` +
+				// Double quotes inside if/else.
+				` src=` +
+				`{{if .T}}"?{{"<iconPath>"}}"` +
+				`{{else}}"images/cleardot.gif"{{end}}` +
+				// Missing space before title, but it is not a
+				// part of the src attribute.
+				`{{if .T}}title="{{"<title>"}}"{{end}}` +
+				// Quotes outside if/else.
+				` alt="` +
+				`{{if .T}}{{"<alt>"}}` +
+				`{{else}}{{if .F}}{{"<title>"}}{{end}}` +
+				`{{end}}"` +
+				`>`,
+			`<img class="iconClass" id="&lt;iconId&gt;" src="?%3ciconPath%3e"title="&lt;title&gt;" alt="&lt;alt&gt;">`,
+		},
+		{
+			"conditional valueless attr name",
+			`<input{{if .T}} checked{{end}} name=n>`,
+			`<input checked name=n>`,
+		},
+		{
+			"conditional dynamic valueless attr name 1",
+			`<input{{if .T}} {{"checked"}}{{end}} name=n>`,
+			`<input checked name=n>`,
+		},
+		{
+			"conditional dynamic valueless attr name 2",
+			`<input {{if .T}}{{"checked"}} {{end}}name=n>`,
+			`<input checked name=n>`,
+		},
+		{
+			"dynamic attribute name",
+			`<img on{{"load"}}="alert({{"loaded"}})">`,
+			// Treated as JS since quotes are inserted.
+			`<img onload="alert(&#34;loaded&#34;)">`,
+		},
+		{
+			"bad dynamic attribute name 1",
+			// Allow checked, selected, disabled, but not JS or
+			// CSS attributes.
+			`<input {{"onchange"}}="{{"doEvil()"}}">`,
+			`<input ZgotmplZ="doEvil()">`,
+		},
+		{
+			"bad dynamic attribute name 2",
+			`<div {{"sTyle"}}="{{"color: expression(alert(1337))"}}">`,
+			`<div ZgotmplZ="color: expression(alert(1337))">`,
+		},
+		{
+			"bad dynamic attribute name 3",
+			// Allow title or alt, but not a URL.
+			`<img {{"src"}}="{{"javascript:doEvil()"}}">`,
+			`<img ZgotmplZ="javascript:doEvil()">`,
+		},
+		{
+			"bad dynamic attribute name 4",
+			// Structure preservation requires values to associate
+			// with a consistent attribute.
+			`<input checked {{""}}="Whose value am I?">`,
+			`<input checked ZgotmplZ="Whose value am I?">`,
+		},
+		{
+			"dynamic element name",
+			`<h{{3}}><table><t{{"head"}}>...</h{{3}}>`,
+			`<h3><table><thead>...</h3>`,
+		},
+		{
+			"bad dynamic element name",
+			// Dynamic element names are typically used to switch
+			// between (thead, tfoot, tbody), (ul, ol), (th, td),
+			// and other replaceable sets.
+			// We do not currently easily support (ul, ol).
+			// If we do change to support that, this test should
+			// catch failures to filter out special tag names which
+			// would violate the structure preservation property --
+			// if any special tag name could be substituted, then
+			// the content could be raw text/RCDATA for some inputs
+			// and regular HTML content for others.
+			`<{{"script"}}>{{"doEvil()"}}</{{"script"}}>`,
+			`&lt;script>doEvil()&lt;/script>`,
+		},
+		{
+			"srcset bad URL in second position",
+			`<img srcset="{{"/not-an-image#,javascript:alert(1)"}}">`,
+			// The second URL is also filtered.
+			`<img srcset="/not-an-image#,#ZgotmplZ">`,
+		},
+		{
+			"srcset buffer growth",
+			`<img srcset={{",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,"}}>`,
+			`<img srcset=,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,>`,
+		},
+	}
+
+	for _, test := range tests {
+		tmpl := New(test.name)
+		tmpl = Must(tmpl.Parse(test.input))
+		// Check for bug 6459: Tree field was not set in Parse.
+		if tmpl.Tree != tmpl.text.Tree {
+			t.Errorf("%s: tree not set properly", test.name)
+			continue
+		}
+		b := new(bytes.Buffer)
+		if err := tmpl.Execute(b, data); err != nil {
+			t.Errorf("%s: template execution failed: %s", test.name, err)
+			continue
+		}
+		if w, g := test.output, b.String(); w != g {
+			t.Errorf("%s: escaped output: want\n\t%q\ngot\n\t%q", test.name, w, g)
+			continue
+		}
+		b.Reset()
+		if err := tmpl.Execute(b, pdata); err != nil {
+			t.Errorf("%s: template execution failed for pointer: %s", test.name, err)
+			continue
+		}
+		if w, g := test.output, b.String(); w != g {
+			t.Errorf("%s: escaped output for pointer: want\n\t%q\ngot\n\t%q", test.name, w, g)
+			continue
+		}
+		if tmpl.Tree != tmpl.text.Tree {
+			t.Errorf("%s: tree mismatch", test.name)
+			continue
+		}
+	}
+}
+
+func TestEscapeMap(t *testing.T) {
+	data := map[string]string{
+		"html":     `<h1>Hi!</h1>`,
+		"urlquery": `http://www.foo.com/index.html?title=main`,
+	}
+	for _, test := range [...]struct {
+		desc, input, output string
+	}{
+		// covering issue 20323
+		{
+			"field with predefined escaper name 1",
+			`{{.html | print}}`,
+			`&lt;h1&gt;Hi!&lt;/h1&gt;`,
+		},
+		// covering issue 20323
+		{
+			"field with predefined escaper name 2",
+			`{{.urlquery | print}}`,
+			`http://www.foo.com/index.html?title=main`,
+		},
+	} {
+		tmpl := Must(New("").Parse(test.input))
+		b := new(bytes.Buffer)
+		if err := tmpl.Execute(b, data); err != nil {
+			t.Errorf("%s: template execution failed: %s", test.desc, err)
+			continue
+		}
+		if w, g := test.output, b.String(); w != g {
+			t.Errorf("%s: escaped output: want\n\t%q\ngot\n\t%q", test.desc, w, g)
+			continue
+		}
+	}
+}
+
+func TestEscapeSet(t *testing.T) {
+	type dataItem struct {
+		Children []*dataItem
+		X        string
+	}
+
+	data := dataItem{
+		Children: []*dataItem{
+			{X: "foo"},
+			{X: "<bar>"},
+			{
+				Children: []*dataItem{
+					{X: "baz"},
+				},
+			},
+		},
+	}
+
+	tests := []struct {
+		inputs map[string]string
+		want   string
+	}{
+		// The trivial set.
+		{
+			map[string]string{
+				"main": ``,
+			},
+			``,
+		},
+		// A template called in the start context.
+		{
+			map[string]string{
+				"main": `Hello, {{template "helper"}}!`,
+				// Not a valid top level HTML template.
+				// "<b" is not a full tag.
+				"helper": `{{"<World>"}}`,
+			},
+			`Hello, &lt;World&gt;!`,
+		},
+		// A template called in a context other than the start.
+		{
+			map[string]string{
+				"main": `<a onclick='a = {{template "helper"}};'>`,
+				// Not a valid top level HTML template.
+				// "<b" is not a full tag.
+				"helper": `{{"<a>"}}<b`,
+			},
+			`<a onclick='a = &#34;\u003ca\u003e&#34;<b;'>`,
+		},
+		// A recursive template that ends in its start context.
+		{
+			map[string]string{
+				"main": `{{range .Children}}{{template "main" .}}{{else}}{{.X}} {{end}}`,
+			},
+			`foo &lt;bar&gt; baz `,
+		},
+		// A recursive helper template that ends in its start context.
+		{
+			map[string]string{
+				"main":   `{{template "helper" .}}`,
+				"helper": `{{if .Children}}<ul>{{range .Children}}<li>{{template "main" .}}</li>{{end}}</ul>{{else}}{{.X}}{{end}}`,
+			},
+			`<ul><li>foo</li><li>&lt;bar&gt;</li><li><ul><li>baz</li></ul></li></ul>`,
+		},
+		// Co-recursive templates that end in its start context.
+		{
+			map[string]string{
+				"main":   `<blockquote>{{range .Children}}{{template "helper" .}}{{end}}</blockquote>`,
+				"helper": `{{if .Children}}{{template "main" .}}{{else}}{{.X}}<br>{{end}}`,
+			},
+			`<blockquote>foo<br>&lt;bar&gt;<br><blockquote>baz<br></blockquote></blockquote>`,
+		},
+		// A template that is called in two different contexts.
+		{
+			map[string]string{
+				"main":   `<button onclick="title='{{template "helper"}}'; ...">{{template "helper"}}</button>`,
+				"helper": `{{11}} of {{"<100>"}}`,
+			},
+			`<button onclick="title='11 of \u003c100\u003e'; ...">11 of &lt;100&gt;</button>`,
+		},
+		// A non-recursive template that ends in a different context.
+		// helper starts in jsCtxRegexp and ends in jsCtxDivOp.
+		{
+			map[string]string{
+				"main":   `<script>var x={{template "helper"}}/{{"42"}};</script>`,
+				"helper": "{{126}}",
+			},
+			`<script>var x= 126 /"42";</script>`,
+		},
+		// A recursive template that ends in a similar context.
+		{
+			map[string]string{
+				"main":      `<script>var x=[{{template "countdown" 4}}];</script>`,
+				"countdown": `{{.}}{{if .}},{{template "countdown" . | pred}}{{end}}`,
+			},
+			`<script>var x=[ 4 , 3 , 2 , 1 , 0 ];</script>`,
+		},
+		// A recursive template that ends in a different context.
+		/*
+			{
+				map[string]string{
+					"main":   `<a href="/foo{{template "helper" .}}">`,
+					"helper": `{{if .Children}}{{range .Children}}{{template "helper" .}}{{end}}{{else}}?x={{.X}}{{end}}`,
+				},
+				`<a href="/foo?x=foo?x=%3cbar%3e?x=baz">`,
+			},
+		*/
+	}
+
+	// pred is a template function that returns the predecessor of a
+	// natural number for testing recursive templates.
+	fns := FuncMap{"pred": func(a ...interface{}) (interface{}, error) {
+		if len(a) == 1 {
+			if i, _ := a[0].(int); i > 0 {
+				return i - 1, nil
+			}
+		}
+		return nil, fmt.Errorf("undefined pred(%v)", a)
+	}}
+
+	for _, test := range tests {
+		source := ""
+		for name, body := range test.inputs {
+			source += fmt.Sprintf("{{define %q}}%s{{end}} ", name, body)
+		}
+		tmpl, err := New("root").Funcs(fns).Parse(source)
+		if err != nil {
+			t.Errorf("error parsing %q: %v", source, err)
+			continue
+		}
+		var b bytes.Buffer
+
+		if err := tmpl.ExecuteTemplate(&b, "main", data); err != nil {
+			t.Errorf("%q executing %v", err.Error(), tmpl.Lookup("main"))
+			continue
+		}
+		if got := b.String(); test.want != got {
+			t.Errorf("want\n\t%q\ngot\n\t%q", test.want, got)
+		}
+	}
+
+}
+
+func TestErrors(t *testing.T) {
+	tests := []struct {
+		input string
+		err   string
+	}{
+		// Non-error cases.
+		{
+			"{{if .Cond}}<a>{{else}}<b>{{end}}",
+			"",
+		},
+		{
+			"{{if .Cond}}<a>{{end}}",
+			"",
+		},
+		{
+			"{{if .Cond}}{{else}}<b>{{end}}",
+			"",
+		},
+		{
+			"{{with .Cond}}<div>{{end}}",
+			"",
+		},
+		{
+			"{{range .Items}}<a>{{end}}",
+			"",
+		},
+		{
+			"<a href='/foo?{{range .Items}}&{{.K}}={{.V}}{{end}}'>",
+			"",
+		},
+		{
+			"{{range .Items}}<a{{if .X}}{{end}}>{{end}}",
+			"",
+		},
+		{
+			"{{range .Items}}<a{{if .X}}{{end}}>{{continue}}{{end}}",
+			"",
+		},
+		{
+			"{{range .Items}}<a{{if .X}}{{end}}>{{break}}{{end}}",
+			"",
+		},
+		{
+			"{{range .Items}}<a{{if .X}}{{end}}>{{if .X}}{{break}}{{end}}{{end}}",
+			"",
+		},
+		// Error cases.
+		{
+			"{{if .Cond}}<a{{end}}",
+			"z:1:5: {{if}} branches",
+		},
+		{
+			"{{if .Cond}}\n{{else}}\n<a{{end}}",
+			"z:1:5: {{if}} branches",
+		},
+		{
+			// Missing quote in the else branch.
+			`{{if .Cond}}<a href="foo">{{else}}<a href="bar>{{end}}`,
+			"z:1:5: {{if}} branches",
+		},
+		{
+			// Different kind of attribute: href implies a URL.
+			"<a {{if .Cond}}href='{{else}}title='{{end}}{{.X}}'>",
+			"z:1:8: {{if}} branches",
+		},
+		{
+			"\n{{with .X}}<a{{end}}",
+			"z:2:7: {{with}} branches",
+		},
+		{
+			"\n{{with .X}}<a>{{else}}<a{{end}}",
+			"z:2:7: {{with}} branches",
+		},
+		{
+			"{{range .Items}}<a{{end}}",
+			`z:1: on range loop re-entry: "<" in attribute name: "<a"`,
+		},
+		{
+			"\n{{range .Items}} x='<a{{end}}",
+			"z:2:8: on range loop re-entry: {{range}} branches",
+		},
+		{
+			"{{range .Items}}<a{{if .X}}{{break}}{{end}}>{{end}}",
+			"z:1:29: at range loop break: {{range}} branches end in different contexts",
+		},
+		{
+			"{{range .Items}}<a{{if .X}}{{continue}}{{end}}>{{end}}",
+			"z:1:29: at range loop continue: {{range}} branches end in different contexts",
+		},
+		{
+			"<a b=1 c={{.H}}",
+			"z: ends in a non-text context: {stateAttr delimSpaceOrTagEnd",
+		},
+		{
+			"<script>foo();",
+			"z: ends in a non-text context: {stateJS",
+		},
+		{
+			`<a href="{{if .F}}/foo?a={{else}}/bar/{{end}}{{.H}}">`,
+			"z:1:47: {{.H}} appears in an ambiguous context within a URL",
+		},
+		{
+			`<a onclick="alert('Hello \`,
+			`unfinished escape sequence in JS string: "Hello \\"`,
+		},
+		{
+			`<a onclick='alert("Hello\, World\`,
+			`unfinished escape sequence in JS string: "Hello\\, World\\"`,
+		},
+		{
+			`<a onclick='alert(/x+\`,
+			`unfinished escape sequence in JS string: "x+\\"`,
+		},
+		{
+			`<a onclick="/foo[\]/`,
+			`unfinished JS regexp charset: "foo[\\]/"`,
+		},
+		{
+			// It is ambiguous whether 1.5 should be 1\.5 or 1.5.
+			// Either `var x = 1/- 1.5 /i.test(x)`
+			// where `i.test(x)` is a method call of reference i,
+			// or `/-1\.5/i.test(x)` which is a method call on a
+			// case insensitive regular expression.
+			`<script>{{if false}}var x = 1{{end}}/-{{"1.5"}}/i.test(x)</script>`,
+			`'/' could start a division or regexp: "/-"`,
+		},
+		{
+			`{{template "foo"}}`,
+			"z:1:11: no such template \"foo\"",
+		},
+		{
+			`<div{{template "y"}}>` +
+				// Illegal starting in stateTag but not in stateText.
+				`{{define "y"}} foo<b{{end}}`,
+			`"<" in attribute name: " foo<b"`,
+		},
+		{
+			`<script>reverseList = [{{template "t"}}]</script>` +
+				// Missing " after recursive call.
+				`{{define "t"}}{{if .Tail}}{{template "t" .Tail}}{{end}}{{.Head}}",{{end}}`,
+			`: cannot compute output context for template t$htmltemplate_stateJS_elementScript`,
+		},
+		{
+			`<input type=button value=onclick=>`,
+			`html/template:z: "=" in unquoted attr: "onclick="`,
+		},
+		{
+			`<input type=button value= onclick=>`,
+			`html/template:z: "=" in unquoted attr: "onclick="`,
+		},
+		{
+			`<input type=button value= 1+1=2>`,
+			`html/template:z: "=" in unquoted attr: "1+1=2"`,
+		},
+		{
+			"<a class=`foo>",
+			"html/template:z: \"`\" in unquoted attr: \"`foo\"",
+		},
+		{
+			`<a style=font:'Arial'>`,
+			`html/template:z: "'" in unquoted attr: "font:'Arial'"`,
+		},
+		{
+			`<a=foo>`,
+			`: expected space, attr name, or end of tag, but got "=foo>"`,
+		},
+		{
+			`Hello, {{. | urlquery | print}}!`,
+			// urlquery is disallowed if it is not the last command in the pipeline.
+			`predefined escaper "urlquery" disallowed in template`,
+		},
+		{
+			`Hello, {{. | html | print}}!`,
+			// html is disallowed if it is not the last command in the pipeline.
+			`predefined escaper "html" disallowed in template`,
+		},
+		{
+			`Hello, {{html . | print}}!`,
+			// A direct call to html is disallowed if it is not the last command in the pipeline.
+			`predefined escaper "html" disallowed in template`,
+		},
+		{
+			`<div class={{. | html}}>Hello<div>`,
+			// html is disallowed in a pipeline that is in an unquoted attribute context,
+			// even if it is the last command in the pipeline.
+			`predefined escaper "html" disallowed in template`,
+		},
+		{
+			`Hello, {{. | urlquery | html}}!`,
+			// html is allowed since it is the last command in the pipeline, but urlquery is not.
+			`predefined escaper "urlquery" disallowed in template`,
+		},
+	}
+	for _, test := range tests {
+		buf := new(bytes.Buffer)
+		tmpl, err := New("z").Parse(test.input)
+		if err != nil {
+			t.Errorf("input=%q: unexpected parse error %s\n", test.input, err)
+			continue
+		}
+		err = tmpl.Execute(buf, nil)
+		var got string
+		if err != nil {
+			got = err.Error()
+		}
+		if test.err == "" {
+			if got != "" {
+				t.Errorf("input=%q: unexpected error %q", test.input, got)
+			}
+			continue
+		}
+		if !strings.Contains(got, test.err) {
+			t.Errorf("input=%q: error\n\t%q\ndoes not contain expected string\n\t%q", test.input, got, test.err)
+			continue
+		}
+		// Check that we get the same error if we call Execute again.
+		if err := tmpl.Execute(buf, nil); err == nil || err.Error() != got {
+			t.Errorf("input=%q: unexpected error on second call %q", test.input, err)
+
+		}
+	}
+}
+
+func TestEscapeText(t *testing.T) {
+	tests := []struct {
+		input  string
+		output context
+	}{
+		{
+			``,
+			context{},
+		},
+		{
+			`Hello, World!`,
+			context{},
+		},
+		{
+			// An orphaned "<" is OK.
+			`I <3 Ponies!`,
+			context{},
+		},
+		{
+			`<a`,
+			context{state: stateTag},
+		},
+		{
+			`<a `,
+			context{state: stateTag},
+		},
+		{
+			`<a>`,
+			context{state: stateText},
+		},
+		{
+			`<a href`,
+			context{state: stateAttrName, attr: attrURL},
+		},
+		{
+			`<a on`,
+			context{state: stateAttrName, attr: attrScript},
+		},
+		{
+			`<a href `,
+			context{state: stateAfterName, attr: attrURL},
+		},
+		{
+			`<a style  =  `,
+			context{state: stateBeforeValue, attr: attrStyle},
+		},
+		{
+			`<a href=`,
+			context{state: stateBeforeValue, attr: attrURL},
+		},
+		{
+			`<a href=x`,
+			context{state: stateURL, delim: delimSpaceOrTagEnd, urlPart: urlPartPreQuery, attr: attrURL},
+		},
+		{
+			`<a href=x `,
+			context{state: stateTag},
+		},
+		{
+			`<a href=>`,
+			context{state: stateText},
+		},
+		{
+			`<a href=x>`,
+			context{state: stateText},
+		},
+		{
+			`<a href ='`,
+			context{state: stateURL, delim: delimSingleQuote, attr: attrURL},
+		},
+		{
+			`<a href=''`,
+			context{state: stateTag},
+		},
+		{
+			`<a href= "`,
+			context{state: stateURL, delim: delimDoubleQuote, attr: attrURL},
+		},
+		{
+			`<a href=""`,
+			context{state: stateTag},
+		},
+		{
+			`<a title="`,
+			context{state: stateAttr, delim: delimDoubleQuote},
+		},
+		{
+			`<a HREF='http:`,
+			context{state: stateURL, delim: delimSingleQuote, urlPart: urlPartPreQuery, attr: attrURL},
+		},
+		{
+			`<a Href='/`,
+			context{state: stateURL, delim: delimSingleQuote, urlPart: urlPartPreQuery, attr: attrURL},
+		},
+		{
+			`<a href='"`,
+			context{state: stateURL, delim: delimSingleQuote, urlPart: urlPartPreQuery, attr: attrURL},
+		},
+		{
+			`<a href="'`,
+			context{state: stateURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery, attr: attrURL},
+		},
+		{
+			`<a href='&apos;`,
+			context{state: stateURL, delim: delimSingleQuote, urlPart: urlPartPreQuery, attr: attrURL},
+		},
+		{
+			`<a href="&quot;`,
+			context{state: stateURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery, attr: attrURL},
+		},
+		{
+			`<a href="&#34;`,
+			context{state: stateURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery, attr: attrURL},
+		},
+		{
+			`<a href=&quot;`,
+			context{state: stateURL, delim: delimSpaceOrTagEnd, urlPart: urlPartPreQuery, attr: attrURL},
+		},
+		{
+			`<img alt="1">`,
+			context{state: stateText},
+		},
+		{
+			`<img alt="1>"`,
+			context{state: stateTag},
+		},
+		{
+			`<img alt="1>">`,
+			context{state: stateText},
+		},
+		{
+			`<input checked type="checkbox"`,
+			context{state: stateTag},
+		},
+		{
+			`<a onclick="`,
+			context{state: stateJS, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			`<a onclick="//foo`,
+			context{state: stateJSLineCmt, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			"<a onclick='//\n",
+			context{state: stateJS, delim: delimSingleQuote, attr: attrScript},
+		},
+		{
+			"<a onclick='//\r\n",
+			context{state: stateJS, delim: delimSingleQuote, attr: attrScript},
+		},
+		{
+			"<a onclick='//\u2028",
+			context{state: stateJS, delim: delimSingleQuote, attr: attrScript},
+		},
+		{
+			`<a onclick="/*`,
+			context{state: stateJSBlockCmt, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			`<a onclick="/*/`,
+			context{state: stateJSBlockCmt, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			`<a onclick="/**/`,
+			context{state: stateJS, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			`<a onkeypress="&quot;`,
+			context{state: stateJSDqStr, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			`<a onclick='&quot;foo&quot;`,
+			context{state: stateJS, delim: delimSingleQuote, jsCtx: jsCtxDivOp, attr: attrScript},
+		},
+		{
+			`<a onclick=&#39;foo&#39;`,
+			context{state: stateJS, delim: delimSpaceOrTagEnd, jsCtx: jsCtxDivOp, attr: attrScript},
+		},
+		{
+			`<a onclick=&#39;foo`,
+			context{state: stateJSSqStr, delim: delimSpaceOrTagEnd, attr: attrScript},
+		},
+		{
+			`<a onclick="&quot;foo'`,
+			context{state: stateJSDqStr, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			`<a onclick="'foo&quot;`,
+			context{state: stateJSSqStr, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			`<A ONCLICK="'`,
+			context{state: stateJSSqStr, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			`<a onclick="/`,
+			context{state: stateJSRegexp, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			`<a onclick="'foo'`,
+			context{state: stateJS, delim: delimDoubleQuote, jsCtx: jsCtxDivOp, attr: attrScript},
+		},
+		{
+			`<a onclick="'foo\'`,
+			context{state: stateJSSqStr, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			`<a onclick="'foo\'`,
+			context{state: stateJSSqStr, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			`<a onclick="/foo/`,
+			context{state: stateJS, delim: delimDoubleQuote, jsCtx: jsCtxDivOp, attr: attrScript},
+		},
+		{
+			`<script>/foo/ /=`,
+			context{state: stateJS, element: elementScript},
+		},
+		{
+			`<a onclick="1 /foo`,
+			context{state: stateJS, delim: delimDoubleQuote, jsCtx: jsCtxDivOp, attr: attrScript},
+		},
+		{
+			`<a onclick="1 /*c*/ /foo`,
+			context{state: stateJS, delim: delimDoubleQuote, jsCtx: jsCtxDivOp, attr: attrScript},
+		},
+		{
+			`<a onclick="/foo[/]`,
+			context{state: stateJSRegexp, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			`<a onclick="/foo\/`,
+			context{state: stateJSRegexp, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			`<a onclick="/foo/`,
+			context{state: stateJS, delim: delimDoubleQuote, jsCtx: jsCtxDivOp, attr: attrScript},
+		},
+		{
+			`<input checked style="`,
+			context{state: stateCSS, delim: delimDoubleQuote, attr: attrStyle},
+		},
+		{
+			`<a style="//`,
+			context{state: stateCSSLineCmt, delim: delimDoubleQuote, attr: attrStyle},
+		},
+		{
+			`<a style="//</script>`,
+			context{state: stateCSSLineCmt, delim: delimDoubleQuote, attr: attrStyle},
+		},
+		{
+			"<a style='//\n",
+			context{state: stateCSS, delim: delimSingleQuote, attr: attrStyle},
+		},
+		{
+			"<a style='//\r",
+			context{state: stateCSS, delim: delimSingleQuote, attr: attrStyle},
+		},
+		{
+			`<a style="/*`,
+			context{state: stateCSSBlockCmt, delim: delimDoubleQuote, attr: attrStyle},
+		},
+		{
+			`<a style="/*/`,
+			context{state: stateCSSBlockCmt, delim: delimDoubleQuote, attr: attrStyle},
+		},
+		{
+			`<a style="/**/`,
+			context{state: stateCSS, delim: delimDoubleQuote, attr: attrStyle},
+		},
+		{
+			`<a style="background: '`,
+			context{state: stateCSSSqStr, delim: delimDoubleQuote, attr: attrStyle},
+		},
+		{
+			`<a style="background: &quot;`,
+			context{state: stateCSSDqStr, delim: delimDoubleQuote, attr: attrStyle},
+		},
+		{
+			`<a style="background: '/foo?img=`,
+			context{state: stateCSSSqStr, delim: delimDoubleQuote, urlPart: urlPartQueryOrFrag, attr: attrStyle},
+		},
+		{
+			`<a style="background: '/`,
+			context{state: stateCSSSqStr, delim: delimDoubleQuote, urlPart: urlPartPreQuery, attr: attrStyle},
+		},
+		{
+			`<a style="background: url(&#x22;/`,
+			context{state: stateCSSDqURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery, attr: attrStyle},
+		},
+		{
+			`<a style="background: url('/`,
+			context{state: stateCSSSqURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery, attr: attrStyle},
+		},
+		{
+			`<a style="background: url('/)`,
+			context{state: stateCSSSqURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery, attr: attrStyle},
+		},
+		{
+			`<a style="background: url('/ `,
+			context{state: stateCSSSqURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery, attr: attrStyle},
+		},
+		{
+			`<a style="background: url(/`,
+			context{state: stateCSSURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery, attr: attrStyle},
+		},
+		{
+			`<a style="background: url( `,
+			context{state: stateCSSURL, delim: delimDoubleQuote, attr: attrStyle},
+		},
+		{
+			`<a style="background: url( /image?name=`,
+			context{state: stateCSSURL, delim: delimDoubleQuote, urlPart: urlPartQueryOrFrag, attr: attrStyle},
+		},
+		{
+			`<a style="background: url(x)`,
+			context{state: stateCSS, delim: delimDoubleQuote, attr: attrStyle},
+		},
+		{
+			`<a style="background: url('x'`,
+			context{state: stateCSS, delim: delimDoubleQuote, attr: attrStyle},
+		},
+		{
+			`<a style="background: url( x `,
+			context{state: stateCSS, delim: delimDoubleQuote, attr: attrStyle},
+		},
+		{
+			`<!-- foo`,
+			context{state: stateHTMLCmt},
+		},
+		{
+			`<!-->`,
+			context{state: stateHTMLCmt},
+		},
+		{
+			`<!--->`,
+			context{state: stateHTMLCmt},
+		},
+		{
+			`<!-- foo -->`,
+			context{state: stateText},
+		},
+		{
+			`<script`,
+			context{state: stateTag, element: elementScript},
+		},
+		{
+			`<script `,
+			context{state: stateTag, element: elementScript},
+		},
+		{
+			`<script src="foo.js" `,
+			context{state: stateTag, element: elementScript},
+		},
+		{
+			`<script src='foo.js' `,
+			context{state: stateTag, element: elementScript},
+		},
+		{
+			`<script type=text/javascript `,
+			context{state: stateTag, element: elementScript},
+		},
+		{
+			`<script>`,
+			context{state: stateJS, jsCtx: jsCtxRegexp, element: elementScript},
+		},
+		{
+			`<script>foo`,
+			context{state: stateJS, jsCtx: jsCtxDivOp, element: elementScript},
+		},
+		{
+			`<script>foo</script>`,
+			context{state: stateText},
+		},
+		{
+			`<script>foo</script><!--`,
+			context{state: stateHTMLCmt},
+		},
+		{
+			`<script>document.write("<p>foo</p>");`,
+			context{state: stateJS, element: elementScript},
+		},
+		{
+			`<script>document.write("<p>foo<\/script>");`,
+			context{state: stateJS, element: elementScript},
+		},
+		{
+			`<script>document.write("<script>alert(1)</script>");`,
+			context{state: stateText},
+		},
+		{
+			`<script type="golang.org/x/website/internal/backport/text/template">`,
+			context{state: stateText},
+		},
+		// covering issue 19968
+		{
+			`<script type="TEXT/JAVASCRIPT">`,
+			context{state: stateJS, element: elementScript},
+		},
+		// covering issue 19965
+		{
+			`<script TYPE="golang.org/x/website/internal/backport/text/template">`,
+			context{state: stateText},
+		},
+		{
+			`<script type="notjs">`,
+			context{state: stateText},
+		},
+		{
+			`<Script>`,
+			context{state: stateJS, element: elementScript},
+		},
+		{
+			`<SCRIPT>foo`,
+			context{state: stateJS, jsCtx: jsCtxDivOp, element: elementScript},
+		},
+		{
+			`<textarea>value`,
+			context{state: stateRCDATA, element: elementTextarea},
+		},
+		{
+			`<textarea>value</TEXTAREA>`,
+			context{state: stateText},
+		},
+		{
+			`<textarea name=html><b`,
+			context{state: stateRCDATA, element: elementTextarea},
+		},
+		{
+			`<title>value`,
+			context{state: stateRCDATA, element: elementTitle},
+		},
+		{
+			`<style>value`,
+			context{state: stateCSS, element: elementStyle},
+		},
+		{
+			`<a xlink:href`,
+			context{state: stateAttrName, attr: attrURL},
+		},
+		{
+			`<a xmlns`,
+			context{state: stateAttrName, attr: attrURL},
+		},
+		{
+			`<a xmlns:foo`,
+			context{state: stateAttrName, attr: attrURL},
+		},
+		{
+			`<a xmlnsxyz`,
+			context{state: stateAttrName},
+		},
+		{
+			`<a data-url`,
+			context{state: stateAttrName, attr: attrURL},
+		},
+		{
+			`<a data-iconUri`,
+			context{state: stateAttrName, attr: attrURL},
+		},
+		{
+			`<a data-urlItem`,
+			context{state: stateAttrName, attr: attrURL},
+		},
+		{
+			`<a g:`,
+			context{state: stateAttrName},
+		},
+		{
+			`<a g:url`,
+			context{state: stateAttrName, attr: attrURL},
+		},
+		{
+			`<a g:iconUri`,
+			context{state: stateAttrName, attr: attrURL},
+		},
+		{
+			`<a g:urlItem`,
+			context{state: stateAttrName, attr: attrURL},
+		},
+		{
+			`<a g:value`,
+			context{state: stateAttrName},
+		},
+		{
+			`<a svg:style='`,
+			context{state: stateCSS, delim: delimSingleQuote, attr: attrStyle},
+		},
+		{
+			`<svg:font-face`,
+			context{state: stateTag},
+		},
+		{
+			`<svg:a svg:onclick="`,
+			context{state: stateJS, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			`<svg:a svg:onclick="x()">`,
+			context{},
+		},
+	}
+
+	for _, test := range tests {
+		b, e := []byte(test.input), makeEscaper(nil)
+		c := e.escapeText(context{}, &parse.TextNode{NodeType: parse.NodeText, Text: b})
+		if !test.output.eq(c) {
+			t.Errorf("input %q: want context\n\t%v\ngot\n\t%v", test.input, test.output, c)
+			continue
+		}
+		if test.input != string(b) {
+			t.Errorf("input %q: text node was modified: want %q got %q", test.input, test.input, b)
+			continue
+		}
+	}
+}
+
+func TestEnsurePipelineContains(t *testing.T) {
+	tests := []struct {
+		input, output string
+		ids           []string
+	}{
+		{
+			"{{.X}}",
+			".X",
+			[]string{},
+		},
+		{
+			"{{.X | html}}",
+			".X | html",
+			[]string{},
+		},
+		{
+			"{{.X}}",
+			".X | html",
+			[]string{"html"},
+		},
+		{
+			"{{html .X}}",
+			"_eval_args_ .X | html | urlquery",
+			[]string{"html", "urlquery"},
+		},
+		{
+			"{{html .X .Y .Z}}",
+			"_eval_args_ .X .Y .Z | html | urlquery",
+			[]string{"html", "urlquery"},
+		},
+		{
+			"{{.X | print}}",
+			".X | print | urlquery",
+			[]string{"urlquery"},
+		},
+		{
+			"{{.X | print | urlquery}}",
+			".X | print | urlquery",
+			[]string{"urlquery"},
+		},
+		{
+			"{{.X | urlquery}}",
+			".X | html | urlquery",
+			[]string{"html", "urlquery"},
+		},
+		{
+			"{{.X | print 2 | .f 3}}",
+			".X | print 2 | .f 3 | urlquery | html",
+			[]string{"urlquery", "html"},
+		},
+		{
+			// covering issue 10801
+			"{{.X | println.x }}",
+			".X | println.x | urlquery | html",
+			[]string{"urlquery", "html"},
+		},
+		{
+			// covering issue 10801
+			"{{.X | (print 12 | println).x }}",
+			".X | (print 12 | println).x | urlquery | html",
+			[]string{"urlquery", "html"},
+		},
+		// The following test cases ensure that the merging of internal escapers
+		// with the predefined "html" and "urlquery" escapers is correct.
+		{
+			"{{.X | urlquery}}",
+			".X | _html_template_urlfilter | urlquery",
+			[]string{"_html_template_urlfilter", "_html_template_urlnormalizer"},
+		},
+		{
+			"{{.X | urlquery}}",
+			".X | urlquery | _html_template_urlfilter | _html_template_cssescaper",
+			[]string{"_html_template_urlfilter", "_html_template_cssescaper"},
+		},
+		{
+			"{{.X | urlquery}}",
+			".X | urlquery",
+			[]string{"_html_template_urlnormalizer"},
+		},
+		{
+			"{{.X | urlquery}}",
+			".X | urlquery",
+			[]string{"_html_template_urlescaper"},
+		},
+		{
+			"{{.X | html}}",
+			".X | html",
+			[]string{"_html_template_htmlescaper"},
+		},
+		{
+			"{{.X | html}}",
+			".X | html",
+			[]string{"_html_template_rcdataescaper"},
+		},
+	}
+	for i, test := range tests {
+		tmpl := template.Must(template.New("test").Parse(test.input))
+		action, ok := (tmpl.Tree.Root.Nodes[0].(*parse.ActionNode))
+		if !ok {
+			t.Errorf("First node is not an action: %s", test.input)
+			continue
+		}
+		pipe := action.Pipe
+		originalIDs := make([]string, len(test.ids))
+		copy(originalIDs, test.ids)
+		ensurePipelineContains(pipe, test.ids)
+		got := pipe.String()
+		if got != test.output {
+			t.Errorf("#%d: %s, %v: want\n\t%s\ngot\n\t%s", i, test.input, originalIDs, test.output, got)
+		}
+	}
+}
+
+func TestEscapeMalformedPipelines(t *testing.T) {
+	tests := []string{
+		"{{ 0 | $ }}",
+		"{{ 0 | $ | urlquery }}",
+		"{{ 0 | (nil) }}",
+		"{{ 0 | (nil) | html }}",
+	}
+	for _, test := range tests {
+		var b bytes.Buffer
+		tmpl, err := New("test").Parse(test)
+		if err != nil {
+			t.Errorf("failed to parse set: %q", err)
+		}
+		err = tmpl.Execute(&b, nil)
+		if err == nil {
+			t.Errorf("Expected error for %q", test)
+		}
+	}
+}
+
+func TestEscapeErrorsNotIgnorable(t *testing.T) {
+	var b bytes.Buffer
+	tmpl, _ := New("dangerous").Parse("<a")
+	err := tmpl.Execute(&b, nil)
+	if err == nil {
+		t.Errorf("Expected error")
+	} else if b.Len() != 0 {
+		t.Errorf("Emitted output despite escaping failure")
+	}
+}
+
+func TestEscapeSetErrorsNotIgnorable(t *testing.T) {
+	var b bytes.Buffer
+	tmpl, err := New("root").Parse(`{{define "t"}}<a{{end}}`)
+	if err != nil {
+		t.Errorf("failed to parse set: %q", err)
+	}
+	err = tmpl.ExecuteTemplate(&b, "t", nil)
+	if err == nil {
+		t.Errorf("Expected error")
+	} else if b.Len() != 0 {
+		t.Errorf("Emitted output despite escaping failure")
+	}
+}
+
+func TestRedundantFuncs(t *testing.T) {
+	inputs := []interface{}{
+		"\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f" +
+			"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
+			` !"#$%&'()*+,-./` +
+			`0123456789:;<=>?` +
+			`@ABCDEFGHIJKLMNO` +
+			`PQRSTUVWXYZ[\]^_` +
+			"`abcdefghijklmno" +
+			"pqrstuvwxyz{|}~\x7f" +
+			"\u00A0\u0100\u2028\u2029\ufeff\ufdec\ufffd\uffff\U0001D11E" +
+			"&amp;%22\\",
+		CSS(`a[href =~ "//example.com"]#foo`),
+		HTML(`Hello, <b>World</b> &amp;tc!`),
+		HTMLAttr(` dir="ltr"`),
+		JS(`c && alert("Hello, World!");`),
+		JSStr(`Hello, World & O'Reilly\x21`),
+		URL(`greeting=H%69&addressee=(World)`),
+	}
+
+	for n0, m := range redundantFuncs {
+		f0 := funcMap[n0].(func(...interface{}) string)
+		for n1 := range m {
+			f1 := funcMap[n1].(func(...interface{}) string)
+			for _, input := range inputs {
+				want := f0(input)
+				if got := f1(want); want != got {
+					t.Errorf("%s %s with %T %q: want\n\t%q,\ngot\n\t%q", n0, n1, input, input, want, got)
+				}
+			}
+		}
+	}
+}
+
+func TestIndirectPrint(t *testing.T) {
+	a := 3
+	ap := &a
+	b := "hello"
+	bp := &b
+	bpp := &bp
+	tmpl := Must(New("t").Parse(`{{.}}`))
+	var buf bytes.Buffer
+	err := tmpl.Execute(&buf, ap)
+	if err != nil {
+		t.Errorf("Unexpected error: %s", err)
+	} else if buf.String() != "3" {
+		t.Errorf(`Expected "3"; got %q`, buf.String())
+	}
+	buf.Reset()
+	err = tmpl.Execute(&buf, bpp)
+	if err != nil {
+		t.Errorf("Unexpected error: %s", err)
+	} else if buf.String() != "hello" {
+		t.Errorf(`Expected "hello"; got %q`, buf.String())
+	}
+}
+
+// This is a test for issue 3272.
+func TestEmptyTemplateHTML(t *testing.T) {
+	page := Must(New("page").ParseFiles(os.DevNull))
+	if err := page.ExecuteTemplate(os.Stdout, "page", "nothing"); err == nil {
+		t.Fatal("expected error")
+	}
+}
+
+type Issue7379 int
+
+func (Issue7379) SomeMethod(x int) string {
+	return fmt.Sprintf("<%d>", x)
+}
+
+// This is a test for issue 7379: type assertion error caused panic, and then
+// the code to handle the panic breaks escaping. It's hard to see the second
+// problem once the first is fixed, but its fix is trivial so we let that go. See
+// the discussion for issue 7379.
+func TestPipeToMethodIsEscaped(t *testing.T) {
+	tmpl := Must(New("x").Parse("<html>{{0 | .SomeMethod}}</html>\n"))
+	tryExec := func() string {
+		defer func() {
+			panicValue := recover()
+			if panicValue != nil {
+				t.Errorf("panicked: %v\n", panicValue)
+			}
+		}()
+		var b bytes.Buffer
+		tmpl.Execute(&b, Issue7379(0))
+		return b.String()
+	}
+	for i := 0; i < 3; i++ {
+		str := tryExec()
+		const expect = "<html>&lt;0&gt;</html>\n"
+		if str != expect {
+			t.Errorf("expected %q got %q", expect, str)
+		}
+	}
+}
+
+// Unlike text/template, html/template crashed if given an incomplete
+// template, that is, a template that had been named but not given any content.
+// This is issue #10204.
+func TestErrorOnUndefined(t *testing.T) {
+	tmpl := New("undefined")
+
+	err := tmpl.Execute(nil, nil)
+	if err == nil {
+		t.Error("expected error")
+	} else if !strings.Contains(err.Error(), "incomplete") {
+		t.Errorf("expected error about incomplete template; got %s", err)
+	}
+}
+
+// This covers issue #20842.
+func TestIdempotentExecute(t *testing.T) {
+	tmpl := Must(New("").
+		Parse(`{{define "main"}}<body>{{template "hello"}}</body>{{end}}`))
+	Must(tmpl.
+		Parse(`{{define "hello"}}Hello, {{"Ladies & Gentlemen!"}}{{end}}`))
+	got := new(bytes.Buffer)
+	var err error
+	// Ensure that "hello" produces the same output when executed twice.
+	want := "Hello, Ladies &amp; Gentlemen!"
+	for i := 0; i < 2; i++ {
+		err = tmpl.ExecuteTemplate(got, "hello", nil)
+		if err != nil {
+			t.Errorf("unexpected error: %s", err)
+		}
+		if got.String() != want {
+			t.Errorf("after executing template \"hello\", got:\n\t%q\nwant:\n\t%q\n", got.String(), want)
+		}
+		got.Reset()
+	}
+	// Ensure that the implicit re-execution of "hello" during the execution of
+	// "main" does not cause the output of "hello" to change.
+	err = tmpl.ExecuteTemplate(got, "main", nil)
+	if err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	// If the HTML escaper is added again to the action {{"Ladies & Gentlemen!"}},
+	// we would expected to see the ampersand overescaped to "&amp;amp;".
+	want = "<body>Hello, Ladies &amp; Gentlemen!</body>"
+	if got.String() != want {
+		t.Errorf("after executing template \"main\", got:\n\t%q\nwant:\n\t%q\n", got.String(), want)
+	}
+}
+
+func BenchmarkEscapedExecute(b *testing.B) {
+	tmpl := Must(New("t").Parse(`<a onclick="alert('{{.}}')">{{.}}</a>`))
+	var buf bytes.Buffer
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		tmpl.Execute(&buf, "foo & 'bar' & baz")
+		buf.Reset()
+	}
+}
+
+// Covers issue 22780.
+func TestOrphanedTemplate(t *testing.T) {
+	t1 := Must(New("foo").Parse(`<a href="{{.}}">link1</a>`))
+	t2 := Must(t1.New("foo").Parse(`bar`))
+
+	var b bytes.Buffer
+	const wantError = `template: "foo" is an incomplete or empty template`
+	if err := t1.Execute(&b, "javascript:alert(1)"); err == nil {
+		t.Fatal("expected error executing t1")
+	} else if gotError := err.Error(); gotError != wantError {
+		t.Fatalf("got t1 execution error:\n\t%s\nwant:\n\t%s", gotError, wantError)
+	}
+	b.Reset()
+	if err := t2.Execute(&b, nil); err != nil {
+		t.Fatalf("error executing t2: %s", err)
+	}
+	const want = "bar"
+	if got := b.String(); got != want {
+		t.Fatalf("t2 rendered %q, want %q", got, want)
+	}
+}
+
+// Covers issue 21844.
+func TestAliasedParseTreeDoesNotOverescape(t *testing.T) {
+	const (
+		tmplText = `{{.}}`
+		data     = `<baz>`
+		want     = `&lt;baz&gt;`
+	)
+	// Templates "foo" and "bar" both alias the same underlying parse tree.
+	tpl := Must(New("foo").Parse(tmplText))
+	if _, err := tpl.AddParseTree("bar", tpl.Tree); err != nil {
+		t.Fatalf("AddParseTree error: %v", err)
+	}
+	var b1, b2 bytes.Buffer
+	if err := tpl.ExecuteTemplate(&b1, "foo", data); err != nil {
+		t.Fatalf(`ExecuteTemplate failed for "foo": %v`, err)
+	}
+	if err := tpl.ExecuteTemplate(&b2, "bar", data); err != nil {
+		t.Fatalf(`ExecuteTemplate failed for "foo": %v`, err)
+	}
+	got1, got2 := b1.String(), b2.String()
+	if got1 != want {
+		t.Fatalf(`Template "foo" rendered %q, want %q`, got1, want)
+	}
+	if got1 != got2 {
+		t.Fatalf(`Template "foo" and "bar" rendered %q and %q respectively, expected equal values`, got1, got2)
+	}
+}
diff --git a/internal/backport/html/template/example_test.go b/internal/backport/html/template/example_test.go
new file mode 100644
index 0000000..dbb6511
--- /dev/null
+++ b/internal/backport/html/template/example_test.go
@@ -0,0 +1,182 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template_test
+
+import (
+	"fmt"
+	"log"
+	"os"
+	"strings"
+
+	"golang.org/x/website/internal/backport/html/template"
+)
+
+func Example() {
+	const tpl = `
+<!DOCTYPE html>
+<html>
+	<head>
+		<meta charset="UTF-8">
+		<title>{{.Title}}</title>
+	</head>
+	<body>
+		{{range .Items}}<div>{{ . }}</div>{{else}}<div><strong>no rows</strong></div>{{end}}
+	</body>
+</html>`
+
+	check := func(err error) {
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+	t, err := template.New("webpage").Parse(tpl)
+	check(err)
+
+	data := struct {
+		Title string
+		Items []string
+	}{
+		Title: "My page",
+		Items: []string{
+			"My photos",
+			"My blog",
+		},
+	}
+
+	err = t.Execute(os.Stdout, data)
+	check(err)
+
+	noItems := struct {
+		Title string
+		Items []string
+	}{
+		Title: "My another page",
+		Items: []string{},
+	}
+
+	err = t.Execute(os.Stdout, noItems)
+	check(err)
+
+	// Output:
+	// <!DOCTYPE html>
+	// <html>
+	// 	<head>
+	// 		<meta charset="UTF-8">
+	// 		<title>My page</title>
+	// 	</head>
+	// 	<body>
+	// 		<div>My photos</div><div>My blog</div>
+	// 	</body>
+	// </html>
+	// <!DOCTYPE html>
+	// <html>
+	// 	<head>
+	// 		<meta charset="UTF-8">
+	// 		<title>My another page</title>
+	// 	</head>
+	// 	<body>
+	// 		<div><strong>no rows</strong></div>
+	// 	</body>
+	// </html>
+
+}
+
+func Example_autoescaping() {
+	check := func(err error) {
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+	t, err := template.New("foo").Parse(`{{define "T"}}Hello, {{.}}!{{end}}`)
+	check(err)
+	err = t.ExecuteTemplate(os.Stdout, "T", "<script>alert('you have been pwned')</script>")
+	check(err)
+	// Output:
+	// Hello, &lt;script&gt;alert(&#39;you have been pwned&#39;)&lt;/script&gt;!
+}
+
+func Example_escape() {
+	const s = `"Fran & Freddie's Diner" <tasty@example.com>`
+	v := []interface{}{`"Fran & Freddie's Diner"`, ' ', `<tasty@example.com>`}
+
+	fmt.Println(template.HTMLEscapeString(s))
+	template.HTMLEscape(os.Stdout, []byte(s))
+	fmt.Fprintln(os.Stdout, "")
+	fmt.Println(template.HTMLEscaper(v...))
+
+	fmt.Println(template.JSEscapeString(s))
+	template.JSEscape(os.Stdout, []byte(s))
+	fmt.Fprintln(os.Stdout, "")
+	fmt.Println(template.JSEscaper(v...))
+
+	fmt.Println(template.URLQueryEscaper(v...))
+
+	// Output:
+	// &#34;Fran &amp; Freddie&#39;s Diner&#34; &lt;tasty@example.com&gt;
+	// &#34;Fran &amp; Freddie&#39;s Diner&#34; &lt;tasty@example.com&gt;
+	// &#34;Fran &amp; Freddie&#39;s Diner&#34;32&lt;tasty@example.com&gt;
+	// \"Fran \u0026 Freddie\'s Diner\" \u003Ctasty@example.com\u003E
+	// \"Fran \u0026 Freddie\'s Diner\" \u003Ctasty@example.com\u003E
+	// \"Fran \u0026 Freddie\'s Diner\"32\u003Ctasty@example.com\u003E
+	// %22Fran+%26+Freddie%27s+Diner%2232%3Ctasty%40example.com%3E
+
+}
+
+func ExampleTemplate_Delims() {
+	const text = "<<.Greeting>> {{.Name}}"
+
+	data := struct {
+		Greeting string
+		Name     string
+	}{
+		Greeting: "Hello",
+		Name:     "Joe",
+	}
+
+	t := template.Must(template.New("tpl").Delims("<<", ">>").Parse(text))
+
+	err := t.Execute(os.Stdout, data)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	// Output:
+	// Hello {{.Name}}
+}
+
+// The following example is duplicated in text/template; keep them in sync.
+
+func ExampleTemplate_block() {
+	const (
+		master  = `Names:{{block "list" .}}{{"\n"}}{{range .}}{{println "-" .}}{{end}}{{end}}`
+		overlay = `{{define "list"}} {{join . ", "}}{{end}} `
+	)
+	var (
+		funcs     = template.FuncMap{"join": strings.Join}
+		guardians = []string{"Gamora", "Groot", "Nebula", "Rocket", "Star-Lord"}
+	)
+	masterTmpl, err := template.New("master").Funcs(funcs).Parse(master)
+	if err != nil {
+		log.Fatal(err)
+	}
+	overlayTmpl, err := template.Must(masterTmpl.Clone()).Parse(overlay)
+	if err != nil {
+		log.Fatal(err)
+	}
+	if err := masterTmpl.Execute(os.Stdout, guardians); err != nil {
+		log.Fatal(err)
+	}
+	if err := overlayTmpl.Execute(os.Stdout, guardians); err != nil {
+		log.Fatal(err)
+	}
+	// Output:
+	// Names:
+	// - Gamora
+	// - Groot
+	// - Nebula
+	// - Rocket
+	// - Star-Lord
+	// Names: Gamora, Groot, Nebula, Rocket, Star-Lord
+}
diff --git a/internal/backport/html/template/examplefiles_test.go b/internal/backport/html/template/examplefiles_test.go
new file mode 100644
index 0000000..2efc8e5
--- /dev/null
+++ b/internal/backport/html/template/examplefiles_test.go
@@ -0,0 +1,227 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template_test
+
+import (
+	"io"
+	"io/ioutil"
+	"log"
+	"os"
+	"path/filepath"
+
+	"golang.org/x/website/internal/backport/text/template"
+)
+
+// templateFile defines the contents of a template to be stored in a file, for testing.
+type templateFile struct {
+	name     string
+	contents string
+}
+
+func createTestDir(files []templateFile) string {
+	dir, err := ioutil.TempDir("", "template")
+	if err != nil {
+		log.Fatal(err)
+	}
+	for _, file := range files {
+		f, err := os.Create(filepath.Join(dir, file.name))
+		if err != nil {
+			log.Fatal(err)
+		}
+		defer f.Close()
+		_, err = io.WriteString(f, file.contents)
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+	return dir
+}
+
+// The following example is duplicated in text/template; keep them in sync.
+
+// Here we demonstrate loading a set of templates from a directory.
+func ExampleTemplate_glob() {
+	// Here we create a temporary directory and populate it with our sample
+	// template definition files; usually the template files would already
+	// exist in some location known to the program.
+	dir := createTestDir([]templateFile{
+		// T0.tmpl is a plain template file that just invokes T1.
+		{"T0.tmpl", `T0 invokes T1: ({{template "T1"}})`},
+		// T1.tmpl defines a template, T1 that invokes T2.
+		{"T1.tmpl", `{{define "T1"}}T1 invokes T2: ({{template "T2"}}){{end}}`},
+		// T2.tmpl defines a template T2.
+		{"T2.tmpl", `{{define "T2"}}This is T2{{end}}`},
+	})
+	// Clean up after the test; another quirk of running as an example.
+	defer os.RemoveAll(dir)
+
+	// pattern is the glob pattern used to find all the template files.
+	pattern := filepath.Join(dir, "*.tmpl")
+
+	// Here starts the example proper.
+	// T0.tmpl is the first name matched, so it becomes the starting template,
+	// the value returned by ParseGlob.
+	tmpl := template.Must(template.ParseGlob(pattern))
+
+	err := tmpl.Execute(os.Stdout, nil)
+	if err != nil {
+		log.Fatalf("template execution: %s", err)
+	}
+	// Output:
+	// T0 invokes T1: (T1 invokes T2: (This is T2))
+}
+
+// Here we demonstrate loading a set of templates from files in different directories
+func ExampleTemplate_parsefiles() {
+	// Here we create different temporary directories and populate them with our sample
+	// template definition files; usually the template files would already
+	// exist in some location known to the program.
+	dir1 := createTestDir([]templateFile{
+		// T1.tmpl is a plain template file that just invokes T2.
+		{"T1.tmpl", `T1 invokes T2: ({{template "T2"}})`},
+	})
+
+	dir2 := createTestDir([]templateFile{
+		// T2.tmpl defines a template T2.
+		{"T2.tmpl", `{{define "T2"}}This is T2{{end}}`},
+	})
+
+	// Clean up after the test; another quirk of running as an example.
+	defer func(dirs ...string) {
+		for _, dir := range dirs {
+			os.RemoveAll(dir)
+		}
+	}(dir1, dir2)
+
+	// Here starts the example proper.
+	// Let's just parse only dir1/T0 and dir2/T2
+	paths := []string{
+		filepath.Join(dir1, "T1.tmpl"),
+		filepath.Join(dir2, "T2.tmpl"),
+	}
+	tmpl := template.Must(template.ParseFiles(paths...))
+
+	err := tmpl.Execute(os.Stdout, nil)
+	if err != nil {
+		log.Fatalf("template execution: %s", err)
+	}
+	// Output:
+	// T1 invokes T2: (This is T2)
+}
+
+// The following example is duplicated in text/template; keep them in sync.
+
+// This example demonstrates one way to share some templates
+// and use them in different contexts. In this variant we add multiple driver
+// templates by hand to an existing bundle of templates.
+func ExampleTemplate_helpers() {
+	// Here we create a temporary directory and populate it with our sample
+	// template definition files; usually the template files would already
+	// exist in some location known to the program.
+	dir := createTestDir([]templateFile{
+		// T1.tmpl defines a template, T1 that invokes T2.
+		{"T1.tmpl", `{{define "T1"}}T1 invokes T2: ({{template "T2"}}){{end}}`},
+		// T2.tmpl defines a template T2.
+		{"T2.tmpl", `{{define "T2"}}This is T2{{end}}`},
+	})
+	// Clean up after the test; another quirk of running as an example.
+	defer os.RemoveAll(dir)
+
+	// pattern is the glob pattern used to find all the template files.
+	pattern := filepath.Join(dir, "*.tmpl")
+
+	// Here starts the example proper.
+	// Load the helpers.
+	templates := template.Must(template.ParseGlob(pattern))
+	// Add one driver template to the bunch; we do this with an explicit template definition.
+	_, err := templates.Parse("{{define `driver1`}}Driver 1 calls T1: ({{template `T1`}})\n{{end}}")
+	if err != nil {
+		log.Fatal("parsing driver1: ", err)
+	}
+	// Add another driver template.
+	_, err = templates.Parse("{{define `driver2`}}Driver 2 calls T2: ({{template `T2`}})\n{{end}}")
+	if err != nil {
+		log.Fatal("parsing driver2: ", err)
+	}
+	// We load all the templates before execution. This package does not require
+	// that behavior but html/template's escaping does, so it's a good habit.
+	err = templates.ExecuteTemplate(os.Stdout, "driver1", nil)
+	if err != nil {
+		log.Fatalf("driver1 execution: %s", err)
+	}
+	err = templates.ExecuteTemplate(os.Stdout, "driver2", nil)
+	if err != nil {
+		log.Fatalf("driver2 execution: %s", err)
+	}
+	// Output:
+	// Driver 1 calls T1: (T1 invokes T2: (This is T2))
+	// Driver 2 calls T2: (This is T2)
+}
+
+// The following example is duplicated in text/template; keep them in sync.
+
+// This example demonstrates how to use one group of driver
+// templates with distinct sets of helper templates.
+func ExampleTemplate_share() {
+	// Here we create a temporary directory and populate it with our sample
+	// template definition files; usually the template files would already
+	// exist in some location known to the program.
+	dir := createTestDir([]templateFile{
+		// T0.tmpl is a plain template file that just invokes T1.
+		{"T0.tmpl", "T0 ({{.}} version) invokes T1: ({{template `T1`}})\n"},
+		// T1.tmpl defines a template, T1 that invokes T2. Note T2 is not defined
+		{"T1.tmpl", `{{define "T1"}}T1 invokes T2: ({{template "T2"}}){{end}}`},
+	})
+	// Clean up after the test; another quirk of running as an example.
+	defer os.RemoveAll(dir)
+
+	// pattern is the glob pattern used to find all the template files.
+	pattern := filepath.Join(dir, "*.tmpl")
+
+	// Here starts the example proper.
+	// Load the drivers.
+	drivers := template.Must(template.ParseGlob(pattern))
+
+	// We must define an implementation of the T2 template. First we clone
+	// the drivers, then add a definition of T2 to the template name space.
+
+	// 1. Clone the helper set to create a new name space from which to run them.
+	first, err := drivers.Clone()
+	if err != nil {
+		log.Fatal("cloning helpers: ", err)
+	}
+	// 2. Define T2, version A, and parse it.
+	_, err = first.Parse("{{define `T2`}}T2, version A{{end}}")
+	if err != nil {
+		log.Fatal("parsing T2: ", err)
+	}
+
+	// Now repeat the whole thing, using a different version of T2.
+	// 1. Clone the drivers.
+	second, err := drivers.Clone()
+	if err != nil {
+		log.Fatal("cloning drivers: ", err)
+	}
+	// 2. Define T2, version B, and parse it.
+	_, err = second.Parse("{{define `T2`}}T2, version B{{end}}")
+	if err != nil {
+		log.Fatal("parsing T2: ", err)
+	}
+
+	// Execute the templates in the reverse order to verify the
+	// first is unaffected by the second.
+	err = second.ExecuteTemplate(os.Stdout, "T0.tmpl", "second")
+	if err != nil {
+		log.Fatalf("second execution: %s", err)
+	}
+	err = first.ExecuteTemplate(os.Stdout, "T0.tmpl", "first")
+	if err != nil {
+		log.Fatalf("first: execution: %s", err)
+	}
+
+	// Output:
+	// T0 (second version) invokes T1: (T1 invokes T2: (T2, version B))
+	// T0 (first version) invokes T1: (T1 invokes T2: (T2, version A))
+}
diff --git a/internal/backport/html/template/exec_test.go b/internal/backport/html/template/exec_test.go
new file mode 100644
index 0000000..4342aa5
--- /dev/null
+++ b/internal/backport/html/template/exec_test.go
@@ -0,0 +1,1834 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests for template execution, copied from text/template.
+
+package template
+
+import (
+	"bytes"
+	"errors"
+	"flag"
+	"fmt"
+	"io/ioutil"
+	"reflect"
+	"strings"
+	"sync"
+	"testing"
+
+	"golang.org/x/website/internal/backport/text/template"
+)
+
+var debug = flag.Bool("debug", false, "show the errors produced by the tests")
+
+// T has lots of interesting pieces to use to test execution.
+type T struct {
+	// Basics
+	True        bool
+	I           int
+	U16         uint16
+	X, S        string
+	FloatZero   float64
+	ComplexZero complex128
+	// Nested structs.
+	U *U
+	// Struct with String method.
+	V0     V
+	V1, V2 *V
+	// Struct with Error method.
+	W0     W
+	W1, W2 *W
+	// Slices
+	SI      []int
+	SICap   []int
+	SIEmpty []int
+	SB      []bool
+	// Arrays
+	AI [3]int
+	// Maps
+	MSI      map[string]int
+	MSIone   map[string]int // one element, for deterministic output
+	MSIEmpty map[string]int
+	MXI      map[interface{}]int
+	MII      map[int]int
+	MI32S    map[int32]string
+	MI64S    map[int64]string
+	MUI32S   map[uint32]string
+	MUI64S   map[uint64]string
+	MI8S     map[int8]string
+	MUI8S    map[uint8]string
+	SMSI     []map[string]int
+	// Empty interfaces; used to see if we can dig inside one.
+	Empty0 interface{} // nil
+	Empty1 interface{}
+	Empty2 interface{}
+	Empty3 interface{}
+	Empty4 interface{}
+	// Non-empty interfaces.
+	NonEmptyInterface         I
+	NonEmptyInterfacePtS      *I
+	NonEmptyInterfaceNil      I
+	NonEmptyInterfaceTypedNil I
+	// Stringer.
+	Str fmt.Stringer
+	Err error
+	// Pointers
+	PI  *int
+	PS  *string
+	PSI *[]int
+	NIL *int
+	// Function (not method)
+	BinaryFunc      func(string, string) string
+	VariadicFunc    func(...string) string
+	VariadicFuncInt func(int, ...string) string
+	NilOKFunc       func(*int) bool
+	ErrFunc         func() (string, error)
+	PanicFunc       func() string
+	// Template to test evaluation of templates.
+	Tmpl *Template
+	// Unexported field; cannot be accessed by template.
+	unexported int
+}
+
+type S []string
+
+func (S) Method0() string {
+	return "M0"
+}
+
+type U struct {
+	V string
+}
+
+type V struct {
+	j int
+}
+
+func (v *V) String() string {
+	if v == nil {
+		return "nilV"
+	}
+	return fmt.Sprintf("<%d>", v.j)
+}
+
+type W struct {
+	k int
+}
+
+func (w *W) Error() string {
+	if w == nil {
+		return "nilW"
+	}
+	return fmt.Sprintf("[%d]", w.k)
+}
+
+var siVal = I(S{"a", "b"})
+
+var tVal = &T{
+	True:   true,
+	I:      17,
+	U16:    16,
+	X:      "x",
+	S:      "xyz",
+	U:      &U{"v"},
+	V0:     V{6666},
+	V1:     &V{7777}, // leave V2 as nil
+	W0:     W{888},
+	W1:     &W{999}, // leave W2 as nil
+	SI:     []int{3, 4, 5},
+	SICap:  make([]int, 5, 10),
+	AI:     [3]int{3, 4, 5},
+	SB:     []bool{true, false},
+	MSI:    map[string]int{"one": 1, "two": 2, "three": 3},
+	MSIone: map[string]int{"one": 1},
+	MXI:    map[interface{}]int{"one": 1},
+	MII:    map[int]int{1: 1},
+	MI32S:  map[int32]string{1: "one", 2: "two"},
+	MI64S:  map[int64]string{2: "i642", 3: "i643"},
+	MUI32S: map[uint32]string{2: "u322", 3: "u323"},
+	MUI64S: map[uint64]string{2: "ui642", 3: "ui643"},
+	MI8S:   map[int8]string{2: "i82", 3: "i83"},
+	MUI8S:  map[uint8]string{2: "u82", 3: "u83"},
+	SMSI: []map[string]int{
+		{"one": 1, "two": 2},
+		{"eleven": 11, "twelve": 12},
+	},
+	Empty1:                    3,
+	Empty2:                    "empty2",
+	Empty3:                    []int{7, 8},
+	Empty4:                    &U{"UinEmpty"},
+	NonEmptyInterface:         &T{X: "x"},
+	NonEmptyInterfacePtS:      &siVal,
+	NonEmptyInterfaceTypedNil: (*T)(nil),
+	Str:                       bytes.NewBuffer([]byte("foozle")),
+	Err:                       errors.New("erroozle"),
+	PI:                        newInt(23),
+	PS:                        newString("a string"),
+	PSI:                       newIntSlice(21, 22, 23),
+	BinaryFunc:                func(a, b string) string { return fmt.Sprintf("[%s=%s]", a, b) },
+	VariadicFunc:              func(s ...string) string { return fmt.Sprint("<", strings.Join(s, "+"), ">") },
+	VariadicFuncInt:           func(a int, s ...string) string { return fmt.Sprint(a, "=<", strings.Join(s, "+"), ">") },
+	NilOKFunc:                 func(s *int) bool { return s == nil },
+	ErrFunc:                   func() (string, error) { return "bla", nil },
+	PanicFunc:                 func() string { panic("test panic") },
+	Tmpl:                      Must(New("x").Parse("test template")), // "x" is the value of .X
+}
+
+var tSliceOfNil = []*T{nil}
+
+// A non-empty interface.
+type I interface {
+	Method0() string
+}
+
+var iVal I = tVal
+
+// Helpers for creation.
+func newInt(n int) *int {
+	return &n
+}
+
+func newString(s string) *string {
+	return &s
+}
+
+func newIntSlice(n ...int) *[]int {
+	p := new([]int)
+	*p = make([]int, len(n))
+	copy(*p, n)
+	return p
+}
+
+// Simple methods with and without arguments.
+func (t *T) Method0() string {
+	return "M0"
+}
+
+func (t *T) Method1(a int) int {
+	return a
+}
+
+func (t *T) Method2(a uint16, b string) string {
+	return fmt.Sprintf("Method2: %d %s", a, b)
+}
+
+func (t *T) Method3(v interface{}) string {
+	return fmt.Sprintf("Method3: %v", v)
+}
+
+func (t *T) Copy() *T {
+	n := new(T)
+	*n = *t
+	return n
+}
+
+func (t *T) MAdd(a int, b []int) []int {
+	v := make([]int, len(b))
+	for i, x := range b {
+		v[i] = x + a
+	}
+	return v
+}
+
+var myError = errors.New("my error")
+
+// MyError returns a value and an error according to its argument.
+func (t *T) MyError(error bool) (bool, error) {
+	if error {
+		return true, myError
+	}
+	return false, nil
+}
+
+// A few methods to test chaining.
+func (t *T) GetU() *U {
+	return t.U
+}
+
+func (u *U) TrueFalse(b bool) string {
+	if b {
+		return "true"
+	}
+	return ""
+}
+
+func typeOf(arg interface{}) string {
+	return fmt.Sprintf("%T", arg)
+}
+
+type execTest struct {
+	name   string
+	input  string
+	output string
+	data   interface{}
+	ok     bool
+}
+
+// bigInt and bigUint are hex string representing numbers either side
+// of the max int boundary.
+// We do it this way so the test doesn't depend on ints being 32 bits.
+var (
+	bigInt  = fmt.Sprintf("0x%x", int(1<<uint(reflect.TypeOf(0).Bits()-1)-1))
+	bigUint = fmt.Sprintf("0x%x", uint(1<<uint(reflect.TypeOf(0).Bits()-1)))
+)
+
+var execTests = []execTest{
+	// Trivial cases.
+	{"empty", "", "", nil, true},
+	{"text", "some text", "some text", nil, true},
+	{"nil action", "{{nil}}", "", nil, false},
+
+	// Ideal constants.
+	{"ideal int", "{{typeOf 3}}", "int", 0, true},
+	{"ideal float", "{{typeOf 1.0}}", "float64", 0, true},
+	{"ideal exp float", "{{typeOf 1e1}}", "float64", 0, true},
+	{"ideal complex", "{{typeOf 1i}}", "complex128", 0, true},
+	{"ideal int", "{{typeOf " + bigInt + "}}", "int", 0, true},
+	{"ideal too big", "{{typeOf " + bigUint + "}}", "", 0, false},
+	{"ideal nil without type", "{{nil}}", "", 0, false},
+
+	// Fields of structs.
+	{".X", "-{{.X}}-", "-x-", tVal, true},
+	{".U.V", "-{{.U.V}}-", "-v-", tVal, true},
+	{".unexported", "{{.unexported}}", "", tVal, false},
+
+	// Fields on maps.
+	{"map .one", "{{.MSI.one}}", "1", tVal, true},
+	{"map .two", "{{.MSI.two}}", "2", tVal, true},
+	{"map .NO", "{{.MSI.NO}}", "", tVal, true}, // NOTE: <no value> in text/template
+	{"map .one interface", "{{.MXI.one}}", "1", tVal, true},
+	{"map .WRONG args", "{{.MSI.one 1}}", "", tVal, false},
+	{"map .WRONG type", "{{.MII.one}}", "", tVal, false},
+
+	// Dots of all kinds to test basic evaluation.
+	{"dot int", "<{{.}}>", "&lt;13>", 13, true},
+	{"dot uint", "<{{.}}>", "&lt;14>", uint(14), true},
+	{"dot float", "<{{.}}>", "&lt;15.1>", 15.1, true},
+	{"dot bool", "<{{.}}>", "&lt;true>", true, true},
+	{"dot complex", "<{{.}}>", "&lt;(16.2-17i)>", 16.2 - 17i, true},
+	{"dot string", "<{{.}}>", "&lt;hello>", "hello", true},
+	{"dot slice", "<{{.}}>", "&lt;[-1 -2 -3]>", []int{-1, -2, -3}, true},
+	{"dot map", "<{{.}}>", "&lt;map[two:22]>", map[string]int{"two": 22}, true},
+	{"dot struct", "<{{.}}>", "&lt;{7 seven}>", struct {
+		a int
+		b string
+	}{7, "seven"}, true},
+
+	// Variables.
+	{"$ int", "{{$}}", "123", 123, true},
+	{"$.I", "{{$.I}}", "17", tVal, true},
+	{"$.U.V", "{{$.U.V}}", "v", tVal, true},
+	{"declare in action", "{{$x := $.U.V}}{{$x}}", "v", tVal, true},
+	{"simple assignment", "{{$x := 2}}{{$x = 3}}{{$x}}", "3", tVal, true},
+	{"nested assignment",
+		"{{$x := 2}}{{if true}}{{$x = 3}}{{end}}{{$x}}",
+		"3", tVal, true},
+	{"nested assignment changes the last declaration",
+		"{{$x := 1}}{{if true}}{{$x := 2}}{{if true}}{{$x = 3}}{{end}}{{end}}{{$x}}",
+		"1", tVal, true},
+
+	// Type with String method.
+	{"V{6666}.String()", "-{{.V0}}-", "-{6666}-", tVal, true}, //  NOTE: -<6666>- in text/template
+	{"&V{7777}.String()", "-{{.V1}}-", "-&lt;7777&gt;-", tVal, true},
+	{"(*V)(nil).String()", "-{{.V2}}-", "-nilV-", tVal, true},
+
+	// Type with Error method.
+	{"W{888}.Error()", "-{{.W0}}-", "-{888}-", tVal, true}, // NOTE: -[888] in text/template
+	{"&W{999}.Error()", "-{{.W1}}-", "-[999]-", tVal, true},
+	{"(*W)(nil).Error()", "-{{.W2}}-", "-nilW-", tVal, true},
+
+	// Pointers.
+	{"*int", "{{.PI}}", "23", tVal, true},
+	{"*string", "{{.PS}}", "a string", tVal, true},
+	{"*[]int", "{{.PSI}}", "[21 22 23]", tVal, true},
+	{"*[]int[1]", "{{index .PSI 1}}", "22", tVal, true},
+	{"NIL", "{{.NIL}}", "&lt;nil&gt;", tVal, true},
+
+	// Empty interfaces holding values.
+	{"empty nil", "{{.Empty0}}", "", tVal, true}, // NOTE: <no value> in text/template
+	{"empty with int", "{{.Empty1}}", "3", tVal, true},
+	{"empty with string", "{{.Empty2}}", "empty2", tVal, true},
+	{"empty with slice", "{{.Empty3}}", "[7 8]", tVal, true},
+	{"empty with struct", "{{.Empty4}}", "{UinEmpty}", tVal, true},
+	{"empty with struct, field", "{{.Empty4.V}}", "UinEmpty", tVal, true},
+
+	// Edge cases with <no value> with an interface value
+	{"field on interface", "{{.foo}}", "", nil, true},                  // NOTE: <no value> in text/template
+	{"field on parenthesized interface", "{{(.).foo}}", "", nil, true}, // NOTE: <no value> in text/template
+
+	// Issue 31810: Parenthesized first element of pipeline with arguments.
+	// See also TestIssue31810.
+	{"unparenthesized non-function", "{{1 2}}", "", nil, false},
+	{"parenthesized non-function", "{{(1) 2}}", "", nil, false},
+	{"parenthesized non-function with no args", "{{(1)}}", "1", nil, true}, // This is fine.
+
+	// Method calls.
+	{".Method0", "-{{.Method0}}-", "-M0-", tVal, true},
+	{".Method1(1234)", "-{{.Method1 1234}}-", "-1234-", tVal, true},
+	{".Method1(.I)", "-{{.Method1 .I}}-", "-17-", tVal, true},
+	{".Method2(3, .X)", "-{{.Method2 3 .X}}-", "-Method2: 3 x-", tVal, true},
+	{".Method2(.U16, `str`)", "-{{.Method2 .U16 `str`}}-", "-Method2: 16 str-", tVal, true},
+	{".Method2(.U16, $x)", "{{if $x := .X}}-{{.Method2 .U16 $x}}{{end}}-", "-Method2: 16 x-", tVal, true},
+	{".Method3(nil constant)", "-{{.Method3 nil}}-", "-Method3: &lt;nil&gt;-", tVal, true},
+	{".Method3(nil value)", "-{{.Method3 .MXI.unset}}-", "-Method3: &lt;nil&gt;-", tVal, true},
+	{"method on var", "{{if $x := .}}-{{$x.Method2 .U16 $x.X}}{{end}}-", "-Method2: 16 x-", tVal, true},
+	{"method on chained var",
+		"{{range .MSIone}}{{if $.U.TrueFalse $.True}}{{$.U.TrueFalse $.True}}{{else}}WRONG{{end}}{{end}}",
+		"true", tVal, true},
+	{"chained method",
+		"{{range .MSIone}}{{if $.GetU.TrueFalse $.True}}{{$.U.TrueFalse $.True}}{{else}}WRONG{{end}}{{end}}",
+		"true", tVal, true},
+	{"chained method on variable",
+		"{{with $x := .}}{{with .SI}}{{$.GetU.TrueFalse $.True}}{{end}}{{end}}",
+		"true", tVal, true},
+	{".NilOKFunc not nil", "{{call .NilOKFunc .PI}}", "false", tVal, true},
+	{".NilOKFunc nil", "{{call .NilOKFunc nil}}", "true", tVal, true},
+	{"method on nil value from slice", "-{{range .}}{{.Method1 1234}}{{end}}-", "-1234-", tSliceOfNil, true},
+	{"method on typed nil interface value", "{{.NonEmptyInterfaceTypedNil.Method0}}", "M0", tVal, true},
+
+	// Function call builtin.
+	{".BinaryFunc", "{{call .BinaryFunc `1` `2`}}", "[1=2]", tVal, true},
+	{".VariadicFunc0", "{{call .VariadicFunc}}", "&lt;&gt;", tVal, true},
+	{".VariadicFunc2", "{{call .VariadicFunc `he` `llo`}}", "&lt;he&#43;llo&gt;", tVal, true},
+	{".VariadicFuncInt", "{{call .VariadicFuncInt 33 `he` `llo`}}", "33=&lt;he&#43;llo&gt;", tVal, true},
+	{"if .BinaryFunc call", "{{ if .BinaryFunc}}{{call .BinaryFunc `1` `2`}}{{end}}", "[1=2]", tVal, true},
+	{"if not .BinaryFunc call", "{{ if not .BinaryFunc}}{{call .BinaryFunc `1` `2`}}{{else}}No{{end}}", "No", tVal, true},
+	{"Interface Call", `{{stringer .S}}`, "foozle", map[string]interface{}{"S": bytes.NewBufferString("foozle")}, true},
+	{".ErrFunc", "{{call .ErrFunc}}", "bla", tVal, true},
+	{"call nil", "{{call nil}}", "", tVal, false},
+
+	// Erroneous function calls (check args).
+	{".BinaryFuncTooFew", "{{call .BinaryFunc `1`}}", "", tVal, false},
+	{".BinaryFuncTooMany", "{{call .BinaryFunc `1` `2` `3`}}", "", tVal, false},
+	{".BinaryFuncBad0", "{{call .BinaryFunc 1 3}}", "", tVal, false},
+	{".BinaryFuncBad1", "{{call .BinaryFunc `1` 3}}", "", tVal, false},
+	{".VariadicFuncBad0", "{{call .VariadicFunc 3}}", "", tVal, false},
+	{".VariadicFuncIntBad0", "{{call .VariadicFuncInt}}", "", tVal, false},
+	{".VariadicFuncIntBad`", "{{call .VariadicFuncInt `x`}}", "", tVal, false},
+	{".VariadicFuncNilBad", "{{call .VariadicFunc nil}}", "", tVal, false},
+
+	// Pipelines.
+	{"pipeline", "-{{.Method0 | .Method2 .U16}}-", "-Method2: 16 M0-", tVal, true},
+	{"pipeline func", "-{{call .VariadicFunc `llo` | call .VariadicFunc `he` }}-", "-&lt;he&#43;&lt;llo&gt;&gt;-", tVal, true},
+
+	// Nil values aren't missing arguments.
+	{"nil pipeline", "{{ .Empty0 | call .NilOKFunc }}", "true", tVal, true},
+	{"nil call arg", "{{ call .NilOKFunc .Empty0 }}", "true", tVal, true},
+	{"bad nil pipeline", "{{ .Empty0 | .VariadicFunc }}", "", tVal, false},
+
+	// Parenthesized expressions
+	{"parens in pipeline", "{{printf `%d %d %d` (1) (2 | add 3) (add 4 (add 5 6))}}", "1 5 15", tVal, true},
+
+	// Parenthesized expressions with field accesses
+	{"parens: $ in paren", "{{($).X}}", "x", tVal, true},
+	{"parens: $.GetU in paren", "{{($.GetU).V}}", "v", tVal, true},
+	{"parens: $ in paren in pipe", "{{($ | echo).X}}", "x", tVal, true},
+	{"parens: spaces and args", `{{(makemap "up" "down" "left" "right").left}}`, "right", tVal, true},
+
+	// If.
+	{"if true", "{{if true}}TRUE{{end}}", "TRUE", tVal, true},
+	{"if false", "{{if false}}TRUE{{else}}FALSE{{end}}", "FALSE", tVal, true},
+	{"if nil", "{{if nil}}TRUE{{end}}", "", tVal, false},
+	{"if on typed nil interface value", "{{if .NonEmptyInterfaceTypedNil}}TRUE{{ end }}", "", tVal, true},
+	{"if 1", "{{if 1}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true},
+	{"if 0", "{{if 0}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
+	{"if 1.5", "{{if 1.5}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true},
+	{"if 0.0", "{{if .FloatZero}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
+	{"if 1.5i", "{{if 1.5i}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true},
+	{"if 0.0i", "{{if .ComplexZero}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
+	{"if emptystring", "{{if ``}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+	{"if string", "{{if `notempty`}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true},
+	{"if emptyslice", "{{if .SIEmpty}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+	{"if slice", "{{if .SI}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true},
+	{"if emptymap", "{{if .MSIEmpty}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+	{"if map", "{{if .MSI}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true},
+	{"if map unset", "{{if .MXI.none}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
+	{"if map not unset", "{{if not .MXI.none}}ZERO{{else}}NON-ZERO{{end}}", "ZERO", tVal, true},
+	{"if $x with $y int", "{{if $x := true}}{{with $y := .I}}{{$x}},{{$y}}{{end}}{{end}}", "true,17", tVal, true},
+	{"if $x with $x int", "{{if $x := true}}{{with $x := .I}}{{$x}},{{end}}{{$x}}{{end}}", "17,true", tVal, true},
+	{"if else if", "{{if false}}FALSE{{else if true}}TRUE{{end}}", "TRUE", tVal, true},
+	{"if else chain", "{{if eq 1 3}}1{{else if eq 2 3}}2{{else if eq 3 3}}3{{end}}", "3", tVal, true},
+
+	// Print etc.
+	{"print", `{{print "hello, print"}}`, "hello, print", tVal, true},
+	{"print 123", `{{print 1 2 3}}`, "1 2 3", tVal, true},
+	{"print nil", `{{print nil}}`, "&lt;nil&gt;", tVal, true},
+	{"println", `{{println 1 2 3}}`, "1 2 3\n", tVal, true},
+	{"printf int", `{{printf "%04x" 127}}`, "007f", tVal, true},
+	{"printf float", `{{printf "%g" 3.5}}`, "3.5", tVal, true},
+	{"printf complex", `{{printf "%g" 1+7i}}`, "(1&#43;7i)", tVal, true},
+	{"printf string", `{{printf "%s" "hello"}}`, "hello", tVal, true},
+	{"printf function", `{{printf "%#q" zeroArgs}}`, "`zeroArgs`", tVal, true},
+	{"printf field", `{{printf "%s" .U.V}}`, "v", tVal, true},
+	{"printf method", `{{printf "%s" .Method0}}`, "M0", tVal, true},
+	{"printf dot", `{{with .I}}{{printf "%d" .}}{{end}}`, "17", tVal, true},
+	{"printf var", `{{with $x := .I}}{{printf "%d" $x}}{{end}}`, "17", tVal, true},
+	{"printf lots", `{{printf "%d %s %g %s" 127 "hello" 7-3i .Method0}}`, "127 hello (7-3i) M0", tVal, true},
+
+	// HTML.
+	{"html", `{{html "<script>alert(\"XSS\");</script>"}}`,
+		"&lt;script&gt;alert(&#34;XSS&#34;);&lt;/script&gt;", nil, true},
+	{"html pipeline", `{{printf "<script>alert(\"XSS\");</script>" | html}}`,
+		"&lt;script&gt;alert(&#34;XSS&#34;);&lt;/script&gt;", nil, true},
+	{"html", `{{html .PS}}`, "a string", tVal, true},
+	{"html typed nil", `{{html .NIL}}`, "&lt;nil&gt;", tVal, true},
+	{"html untyped nil", `{{html .Empty0}}`, "&lt;nil&gt;", tVal, true}, // NOTE: "&lt;no value&gt;" in text/template
+
+	// JavaScript.
+	{"js", `{{js .}}`, `It\&#39;d be nice.`, `It'd be nice.`, true},
+
+	// URL query.
+	{"urlquery", `{{"http://www.example.org/"|urlquery}}`, "http%3A%2F%2Fwww.example.org%2F", nil, true},
+
+	// Booleans
+	{"not", "{{not true}} {{not false}}", "false true", nil, true},
+	{"and", "{{and false 0}} {{and 1 0}} {{and 0 true}} {{and 1 1}}", "false 0 0 1", nil, true},
+	{"or", "{{or 0 0}} {{or 1 0}} {{or 0 true}} {{or 1 1}}", "0 1 true 1", nil, true},
+	{"boolean if", "{{if and true 1 `hi`}}TRUE{{else}}FALSE{{end}}", "TRUE", tVal, true},
+	{"boolean if not", "{{if and true 1 `hi` | not}}TRUE{{else}}FALSE{{end}}", "FALSE", nil, true},
+
+	// Indexing.
+	{"slice[0]", "{{index .SI 0}}", "3", tVal, true},
+	{"slice[1]", "{{index .SI 1}}", "4", tVal, true},
+	{"slice[HUGE]", "{{index .SI 10}}", "", tVal, false},
+	{"slice[WRONG]", "{{index .SI `hello`}}", "", tVal, false},
+	{"slice[nil]", "{{index .SI nil}}", "", tVal, false},
+	{"map[one]", "{{index .MSI `one`}}", "1", tVal, true},
+	{"map[two]", "{{index .MSI `two`}}", "2", tVal, true},
+	{"map[NO]", "{{index .MSI `XXX`}}", "0", tVal, true},
+	{"map[nil]", "{{index .MSI nil}}", "", tVal, false},
+	{"map[``]", "{{index .MSI ``}}", "0", tVal, true},
+	{"map[WRONG]", "{{index .MSI 10}}", "", tVal, false},
+	{"double index", "{{index .SMSI 1 `eleven`}}", "11", tVal, true},
+	{"nil[1]", "{{index nil 1}}", "", tVal, false},
+	{"map MI64S", "{{index .MI64S 2}}", "i642", tVal, true},
+	{"map MI32S", "{{index .MI32S 2}}", "two", tVal, true},
+	{"map MUI64S", "{{index .MUI64S 3}}", "ui643", tVal, true},
+	{"map MI8S", "{{index .MI8S 3}}", "i83", tVal, true},
+	{"map MUI8S", "{{index .MUI8S 2}}", "u82", tVal, true},
+	{"index of an interface field", "{{index .Empty3 0}}", "7", tVal, true},
+
+	// Slicing.
+	{"slice[:]", "{{slice .SI}}", "[3 4 5]", tVal, true},
+	{"slice[1:]", "{{slice .SI 1}}", "[4 5]", tVal, true},
+	{"slice[1:2]", "{{slice .SI 1 2}}", "[4]", tVal, true},
+	{"slice[-1:]", "{{slice .SI -1}}", "", tVal, false},
+	{"slice[1:-2]", "{{slice .SI 1 -2}}", "", tVal, false},
+	{"slice[1:2:-1]", "{{slice .SI 1 2 -1}}", "", tVal, false},
+	{"slice[2:1]", "{{slice .SI 2 1}}", "", tVal, false},
+	{"slice[2:2:1]", "{{slice .SI 2 2 1}}", "", tVal, false},
+	{"out of range", "{{slice .SI 4 5}}", "", tVal, false},
+	{"out of range", "{{slice .SI 2 2 5}}", "", tVal, false},
+	{"len(s) < indexes < cap(s)", "{{slice .SICap 6 10}}", "[0 0 0 0]", tVal, true},
+	{"len(s) < indexes < cap(s)", "{{slice .SICap 6 10 10}}", "[0 0 0 0]", tVal, true},
+	{"indexes > cap(s)", "{{slice .SICap 10 11}}", "", tVal, false},
+	{"indexes > cap(s)", "{{slice .SICap 6 10 11}}", "", tVal, false},
+	{"array[:]", "{{slice .AI}}", "[3 4 5]", tVal, true},
+	{"array[1:]", "{{slice .AI 1}}", "[4 5]", tVal, true},
+	{"array[1:2]", "{{slice .AI 1 2}}", "[4]", tVal, true},
+	{"string[:]", "{{slice .S}}", "xyz", tVal, true},
+	{"string[0:1]", "{{slice .S 0 1}}", "x", tVal, true},
+	{"string[1:]", "{{slice .S 1}}", "yz", tVal, true},
+	{"string[1:2]", "{{slice .S 1 2}}", "y", tVal, true},
+	{"out of range", "{{slice .S 1 5}}", "", tVal, false},
+	{"3-index slice of string", "{{slice .S 1 2 2}}", "", tVal, false},
+	{"slice of an interface field", "{{slice .Empty3 0 1}}", "[7]", tVal, true},
+
+	// Len.
+	{"slice", "{{len .SI}}", "3", tVal, true},
+	{"map", "{{len .MSI }}", "3", tVal, true},
+	{"len of int", "{{len 3}}", "", tVal, false},
+	{"len of nothing", "{{len .Empty0}}", "", tVal, false},
+	{"len of an interface field", "{{len .Empty3}}", "2", tVal, true},
+
+	// With.
+	{"with true", "{{with true}}{{.}}{{end}}", "true", tVal, true},
+	{"with false", "{{with false}}{{.}}{{else}}FALSE{{end}}", "FALSE", tVal, true},
+	{"with 1", "{{with 1}}{{.}}{{else}}ZERO{{end}}", "1", tVal, true},
+	{"with 0", "{{with 0}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true},
+	{"with 1.5", "{{with 1.5}}{{.}}{{else}}ZERO{{end}}", "1.5", tVal, true},
+	{"with 0.0", "{{with .FloatZero}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true},
+	{"with 1.5i", "{{with 1.5i}}{{.}}{{else}}ZERO{{end}}", "(0&#43;1.5i)", tVal, true},
+	{"with 0.0i", "{{with .ComplexZero}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true},
+	{"with emptystring", "{{with ``}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+	{"with string", "{{with `notempty`}}{{.}}{{else}}EMPTY{{end}}", "notempty", tVal, true},
+	{"with emptyslice", "{{with .SIEmpty}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+	{"with slice", "{{with .SI}}{{.}}{{else}}EMPTY{{end}}", "[3 4 5]", tVal, true},
+	{"with emptymap", "{{with .MSIEmpty}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+	{"with map", "{{with .MSIone}}{{.}}{{else}}EMPTY{{end}}", "map[one:1]", tVal, true},
+	{"with empty interface, struct field", "{{with .Empty4}}{{.V}}{{end}}", "UinEmpty", tVal, true},
+	{"with $x int", "{{with $x := .I}}{{$x}}{{end}}", "17", tVal, true},
+	{"with $x struct.U.V", "{{with $x := $}}{{$x.U.V}}{{end}}", "v", tVal, true},
+	{"with variable and action", "{{with $x := $}}{{$y := $.U.V}}{{$y}}{{end}}", "v", tVal, true},
+	{"with on typed nil interface value", "{{with .NonEmptyInterfaceTypedNil}}TRUE{{ end }}", "", tVal, true},
+
+	// Range.
+	{"range []int", "{{range .SI}}-{{.}}-{{end}}", "-3--4--5-", tVal, true},
+	{"range empty no else", "{{range .SIEmpty}}-{{.}}-{{end}}", "", tVal, true},
+	{"range []int else", "{{range .SI}}-{{.}}-{{else}}EMPTY{{end}}", "-3--4--5-", tVal, true},
+	{"range empty else", "{{range .SIEmpty}}-{{.}}-{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+	{"range []int break else", "{{range .SI}}-{{.}}-{{break}}NOTREACHED{{else}}EMPTY{{end}}", "-3-", tVal, true},
+	{"range []int continue else", "{{range .SI}}-{{.}}-{{continue}}NOTREACHED{{else}}EMPTY{{end}}", "-3--4--5-", tVal, true},
+	{"range []bool", "{{range .SB}}-{{.}}-{{end}}", "-true--false-", tVal, true},
+	{"range []int method", "{{range .SI | .MAdd .I}}-{{.}}-{{end}}", "-20--21--22-", tVal, true},
+	{"range map", "{{range .MSI}}-{{.}}-{{end}}", "-1--3--2-", tVal, true},
+	{"range empty map no else", "{{range .MSIEmpty}}-{{.}}-{{end}}", "", tVal, true},
+	{"range map else", "{{range .MSI}}-{{.}}-{{else}}EMPTY{{end}}", "-1--3--2-", tVal, true},
+	{"range empty map else", "{{range .MSIEmpty}}-{{.}}-{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+	{"range empty interface", "{{range .Empty3}}-{{.}}-{{else}}EMPTY{{end}}", "-7--8-", tVal, true},
+	{"range empty nil", "{{range .Empty0}}-{{.}}-{{end}}", "", tVal, true},
+	{"range $x SI", "{{range $x := .SI}}<{{$x}}>{{end}}", "&lt;3>&lt;4>&lt;5>", tVal, true},
+	{"range $x $y SI", "{{range $x, $y := .SI}}<{{$x}}={{$y}}>{{end}}", "&lt;0=3>&lt;1=4>&lt;2=5>", tVal, true},
+	{"range $x MSIone", "{{range $x := .MSIone}}<{{$x}}>{{end}}", "&lt;1>", tVal, true},
+	{"range $x $y MSIone", "{{range $x, $y := .MSIone}}<{{$x}}={{$y}}>{{end}}", "&lt;one=1>", tVal, true},
+	{"range $x PSI", "{{range $x := .PSI}}<{{$x}}>{{end}}", "&lt;21>&lt;22>&lt;23>", tVal, true},
+	{"declare in range", "{{range $x := .PSI}}<{{$foo:=$x}}{{$x}}>{{end}}", "&lt;21>&lt;22>&lt;23>", tVal, true},
+	{"range count", `{{range $i, $x := count 5}}[{{$i}}]{{$x}}{{end}}`, "[0]a[1]b[2]c[3]d[4]e", tVal, true},
+	{"range nil count", `{{range $i, $x := count 0}}{{else}}empty{{end}}`, "empty", tVal, true},
+
+	// Cute examples.
+	{"or as if true", `{{or .SI "slice is empty"}}`, "[3 4 5]", tVal, true},
+	{"or as if false", `{{or .SIEmpty "slice is empty"}}`, "slice is empty", tVal, true},
+
+	// Error handling.
+	{"error method, error", "{{.MyError true}}", "", tVal, false},
+	{"error method, no error", "{{.MyError false}}", "false", tVal, true},
+
+	// Numbers
+	{"decimal", "{{print 1234}}", "1234", tVal, true},
+	{"decimal _", "{{print 12_34}}", "1234", tVal, true},
+	{"binary", "{{print 0b101}}", "5", tVal, true},
+	{"binary _", "{{print 0b_1_0_1}}", "5", tVal, true},
+	{"BINARY", "{{print 0B101}}", "5", tVal, true},
+	{"octal0", "{{print 0377}}", "255", tVal, true},
+	{"octal", "{{print 0o377}}", "255", tVal, true},
+	{"octal _", "{{print 0o_3_7_7}}", "255", tVal, true},
+	{"OCTAL", "{{print 0O377}}", "255", tVal, true},
+	{"hex", "{{print 0x123}}", "291", tVal, true},
+	{"hex _", "{{print 0x1_23}}", "291", tVal, true},
+	{"HEX", "{{print 0X123ABC}}", "1194684", tVal, true},
+	{"float", "{{print 123.4}}", "123.4", tVal, true},
+	{"float _", "{{print 0_0_1_2_3.4}}", "123.4", tVal, true},
+	{"hex float", "{{print +0x1.ep+2}}", "7.5", tVal, true},
+	{"hex float _", "{{print +0x_1.e_0p+0_2}}", "7.5", tVal, true},
+	{"HEX float", "{{print +0X1.EP+2}}", "7.5", tVal, true},
+	{"print multi", "{{print 1_2_3_4 7.5_00_00_00}}", "1234 7.5", tVal, true},
+	{"print multi2", "{{print 1234 0x0_1.e_0p+02}}", "1234 7.5", tVal, true},
+
+	// Fixed bugs.
+	// Must separate dot and receiver; otherwise args are evaluated with dot set to variable.
+	{"bug0", "{{range .MSIone}}{{if $.Method1 .}}X{{end}}{{end}}", "X", tVal, true},
+	// Do not loop endlessly in indirect for non-empty interfaces.
+	// The bug appears with *interface only; looped forever.
+	{"bug1", "{{.Method0}}", "M0", &iVal, true},
+	// Was taking address of interface field, so method set was empty.
+	{"bug2", "{{$.NonEmptyInterface.Method0}}", "M0", tVal, true},
+	// Struct values were not legal in with - mere oversight.
+	{"bug3", "{{with $}}{{.Method0}}{{end}}", "M0", tVal, true},
+	// Nil interface values in if.
+	{"bug4", "{{if .Empty0}}non-nil{{else}}nil{{end}}", "nil", tVal, true},
+	// Stringer.
+	{"bug5", "{{.Str}}", "foozle", tVal, true},
+	{"bug5a", "{{.Err}}", "erroozle", tVal, true},
+	// Args need to be indirected and dereferenced sometimes.
+	{"bug6a", "{{vfunc .V0 .V1}}", "vfunc", tVal, true},
+	{"bug6b", "{{vfunc .V0 .V0}}", "vfunc", tVal, true},
+	{"bug6c", "{{vfunc .V1 .V0}}", "vfunc", tVal, true},
+	{"bug6d", "{{vfunc .V1 .V1}}", "vfunc", tVal, true},
+	// Legal parse but illegal execution: non-function should have no arguments.
+	{"bug7a", "{{3 2}}", "", tVal, false},
+	{"bug7b", "{{$x := 1}}{{$x 2}}", "", tVal, false},
+	{"bug7c", "{{$x := 1}}{{3 | $x}}", "", tVal, false},
+	// Pipelined arg was not being type-checked.
+	{"bug8a", "{{3|oneArg}}", "", tVal, false},
+	{"bug8b", "{{4|dddArg 3}}", "", tVal, false},
+	// A bug was introduced that broke map lookups for lower-case names.
+	{"bug9", "{{.cause}}", "neglect", map[string]string{"cause": "neglect"}, true},
+	// Field chain starting with function did not work.
+	{"bug10", "{{mapOfThree.three}}-{{(mapOfThree).three}}", "3-3", 0, true},
+	// Dereferencing nil pointer while evaluating function arguments should not panic. Issue 7333.
+	{"bug11", "{{valueString .PS}}", "", T{}, false},
+	// 0xef gave constant type float64. Issue 8622.
+	{"bug12xe", "{{printf `%T` 0xef}}", "int", T{}, true},
+	{"bug12xE", "{{printf `%T` 0xEE}}", "int", T{}, true},
+	{"bug12Xe", "{{printf `%T` 0Xef}}", "int", T{}, true},
+	{"bug12XE", "{{printf `%T` 0XEE}}", "int", T{}, true},
+	// Chained nodes did not work as arguments. Issue 8473.
+	{"bug13", "{{print (.Copy).I}}", "17", tVal, true},
+	// Didn't protect against nil or literal values in field chains.
+	{"bug14a", "{{(nil).True}}", "", tVal, false},
+	{"bug14b", "{{$x := nil}}{{$x.anything}}", "", tVal, false},
+	{"bug14c", `{{$x := (1.0)}}{{$y := ("hello")}}{{$x.anything}}{{$y.true}}`, "", tVal, false},
+	// Didn't call validateType on function results. Issue 10800.
+	{"bug15", "{{valueString returnInt}}", "", tVal, false},
+	// Variadic function corner cases. Issue 10946.
+	{"bug16a", "{{true|printf}}", "", tVal, false},
+	{"bug16b", "{{1|printf}}", "", tVal, false},
+	{"bug16c", "{{1.1|printf}}", "", tVal, false},
+	{"bug16d", "{{'x'|printf}}", "", tVal, false},
+	{"bug16e", "{{0i|printf}}", "", tVal, false},
+	{"bug16f", "{{true|twoArgs \"xxx\"}}", "", tVal, false},
+	{"bug16g", "{{\"aaa\" |twoArgs \"bbb\"}}", "twoArgs=bbbaaa", tVal, true},
+	{"bug16h", "{{1|oneArg}}", "", tVal, false},
+	{"bug16i", "{{\"aaa\"|oneArg}}", "oneArg=aaa", tVal, true},
+	{"bug16j", "{{1+2i|printf \"%v\"}}", "(1&#43;2i)", tVal, true},
+	{"bug16k", "{{\"aaa\"|printf }}", "aaa", tVal, true},
+	{"bug17a", "{{.NonEmptyInterface.X}}", "x", tVal, true},
+	{"bug17b", "-{{.NonEmptyInterface.Method1 1234}}-", "-1234-", tVal, true},
+	{"bug17c", "{{len .NonEmptyInterfacePtS}}", "2", tVal, true},
+	{"bug17d", "{{index .NonEmptyInterfacePtS 0}}", "a", tVal, true},
+	{"bug17e", "{{range .NonEmptyInterfacePtS}}-{{.}}-{{end}}", "-a--b-", tVal, true},
+
+	// More variadic function corner cases. Some runes would get evaluated
+	// as constant floats instead of ints. Issue 34483.
+	{"bug18a", "{{eq . '.'}}", "true", '.', true},
+	{"bug18b", "{{eq . 'e'}}", "true", 'e', true},
+	{"bug18c", "{{eq . 'P'}}", "true", 'P', true},
+}
+
+func zeroArgs() string {
+	return "zeroArgs"
+}
+
+func oneArg(a string) string {
+	return "oneArg=" + a
+}
+
+func twoArgs(a, b string) string {
+	return "twoArgs=" + a + b
+}
+
+func dddArg(a int, b ...string) string {
+	return fmt.Sprintln(a, b)
+}
+
+// count returns a channel that will deliver n sequential 1-letter strings starting at "a"
+func count(n int) chan string {
+	if n == 0 {
+		return nil
+	}
+	c := make(chan string)
+	go func() {
+		for i := 0; i < n; i++ {
+			c <- "abcdefghijklmnop"[i : i+1]
+		}
+		close(c)
+	}()
+	return c
+}
+
+// vfunc takes a *V and a V
+func vfunc(V, *V) string {
+	return "vfunc"
+}
+
+// valueString takes a string, not a pointer.
+func valueString(v string) string {
+	return "value is ignored"
+}
+
+// returnInt returns an int
+func returnInt() int {
+	return 7
+}
+
+func add(args ...int) int {
+	sum := 0
+	for _, x := range args {
+		sum += x
+	}
+	return sum
+}
+
+func echo(arg interface{}) interface{} {
+	return arg
+}
+
+func makemap(arg ...string) map[string]string {
+	if len(arg)%2 != 0 {
+		panic("bad makemap")
+	}
+	m := make(map[string]string)
+	for i := 0; i < len(arg); i += 2 {
+		m[arg[i]] = arg[i+1]
+	}
+	return m
+}
+
+func stringer(s fmt.Stringer) string {
+	return s.String()
+}
+
+func mapOfThree() interface{} {
+	return map[string]int{"three": 3}
+}
+
+func testExecute(execTests []execTest, template *Template, t *testing.T) {
+	b := new(bytes.Buffer)
+	funcs := FuncMap{
+		"add":         add,
+		"count":       count,
+		"dddArg":      dddArg,
+		"echo":        echo,
+		"makemap":     makemap,
+		"mapOfThree":  mapOfThree,
+		"oneArg":      oneArg,
+		"returnInt":   returnInt,
+		"stringer":    stringer,
+		"twoArgs":     twoArgs,
+		"typeOf":      typeOf,
+		"valueString": valueString,
+		"vfunc":       vfunc,
+		"zeroArgs":    zeroArgs,
+	}
+	for _, test := range execTests {
+		var tmpl *Template
+		var err error
+		if template == nil {
+			tmpl, err = New(test.name).Funcs(funcs).Parse(test.input)
+		} else {
+			tmpl, err = template.Clone()
+			if err != nil {
+				t.Errorf("%s: clone error: %s", test.name, err)
+				continue
+			}
+			tmpl, err = tmpl.New(test.name).Funcs(funcs).Parse(test.input)
+		}
+		if err != nil {
+			t.Errorf("%s: parse error: %s", test.name, err)
+			continue
+		}
+		b.Reset()
+		err = tmpl.Execute(b, test.data)
+		switch {
+		case !test.ok && err == nil:
+			t.Errorf("%s: expected error; got none", test.name)
+			continue
+		case test.ok && err != nil:
+			t.Errorf("%s: unexpected execute error: %s", test.name, err)
+			continue
+		case !test.ok && err != nil:
+			// expected error, got one
+			if *debug {
+				fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err)
+			}
+		}
+		result := b.String()
+		if result != test.output {
+			t.Errorf("%s: expected\n\t%q\ngot\n\t%q", test.name, test.output, result)
+		}
+	}
+}
+
+func TestExecute(t *testing.T) {
+	testExecute(execTests, nil, t)
+}
+
+var delimPairs = []string{
+	"", "", // default
+	"{{", "}}", // same as default
+	"|", "|", // same
+	"(日)", "(本)", // peculiar
+}
+
+func TestDelims(t *testing.T) {
+	const hello = "Hello, world"
+	var value = struct{ Str string }{hello}
+	for i := 0; i < len(delimPairs); i += 2 {
+		text := ".Str"
+		left := delimPairs[i+0]
+		trueLeft := left
+		right := delimPairs[i+1]
+		trueRight := right
+		if left == "" { // default case
+			trueLeft = "{{"
+		}
+		if right == "" { // default case
+			trueRight = "}}"
+		}
+		text = trueLeft + text + trueRight
+		// Now add a comment
+		text += trueLeft + "/*comment*/" + trueRight
+		// Now add  an action containing a string.
+		text += trueLeft + `"` + trueLeft + `"` + trueRight
+		// At this point text looks like `{{.Str}}{{/*comment*/}}{{"{{"}}`.
+		tmpl, err := New("delims").Delims(left, right).Parse(text)
+		if err != nil {
+			t.Fatalf("delim %q text %q parse err %s", left, text, err)
+		}
+		var b = new(bytes.Buffer)
+		err = tmpl.Execute(b, value)
+		if err != nil {
+			t.Fatalf("delim %q exec err %s", left, err)
+		}
+		if b.String() != hello+trueLeft {
+			t.Errorf("expected %q got %q", hello+trueLeft, b.String())
+		}
+	}
+}
+
+// Check that an error from a method flows back to the top.
+func TestExecuteError(t *testing.T) {
+	b := new(bytes.Buffer)
+	tmpl := New("error")
+	_, err := tmpl.Parse("{{.MyError true}}")
+	if err != nil {
+		t.Fatalf("parse error: %s", err)
+	}
+	err = tmpl.Execute(b, tVal)
+	if err == nil {
+		t.Errorf("expected error; got none")
+	} else if !strings.Contains(err.Error(), myError.Error()) {
+		if *debug {
+			fmt.Printf("test execute error: %s\n", err)
+		}
+		t.Errorf("expected myError; got %s", err)
+	}
+}
+
+const execErrorText = `line 1
+line 2
+line 3
+{{template "one" .}}
+{{define "one"}}{{template "two" .}}{{end}}
+{{define "two"}}{{template "three" .}}{{end}}
+{{define "three"}}{{index "hi" $}}{{end}}`
+
+// Check that an error from a nested template contains all the relevant information.
+func TestExecError(t *testing.T) {
+	tmpl, err := New("top").Parse(execErrorText)
+	if err != nil {
+		t.Fatal("parse error:", err)
+	}
+	var b bytes.Buffer
+	err = tmpl.Execute(&b, 5) // 5 is out of range indexing "hi"
+	if err == nil {
+		t.Fatal("expected error")
+	}
+	const want = `template: top:7:20: executing "three" at <index "hi" $>: error calling index: index out of range: 5`
+	got := err.Error()
+	if got != want {
+		t.Errorf("expected\n%q\ngot\n%q", want, got)
+	}
+}
+
+func TestJSEscaping(t *testing.T) {
+	testCases := []struct {
+		in, exp string
+	}{
+		{`a`, `a`},
+		{`'foo`, `\'foo`},
+		{`Go "jump" \`, `Go \"jump\" \\`},
+		{`Yukihiro says "今日は世界"`, `Yukihiro says \"今日は世界\"`},
+		{"unprintable \uFDFF", `unprintable \uFDFF`},
+		{`<html>`, `\u003Chtml\u003E`},
+		{`no = in attributes`, `no \u003D in attributes`},
+		{`&#x27; does not become HTML entity`, `\u0026#x27; does not become HTML entity`},
+	}
+	for _, tc := range testCases {
+		s := JSEscapeString(tc.in)
+		if s != tc.exp {
+			t.Errorf("JS escaping [%s] got [%s] want [%s]", tc.in, s, tc.exp)
+		}
+	}
+}
+
+// A nice example: walk a binary tree.
+
+type Tree struct {
+	Val         int
+	Left, Right *Tree
+}
+
+// Use different delimiters to test Set.Delims.
+// Also test the trimming of leading and trailing spaces.
+const treeTemplate = `
+	(- define "tree" -)
+	[
+		(- .Val -)
+		(- with .Left -)
+			(template "tree" . -)
+		(- end -)
+		(- with .Right -)
+			(- template "tree" . -)
+		(- end -)
+	]
+	(- end -)
+`
+
+func TestTree(t *testing.T) {
+	var tree = &Tree{
+		1,
+		&Tree{
+			2, &Tree{
+				3,
+				&Tree{
+					4, nil, nil,
+				},
+				nil,
+			},
+			&Tree{
+				5,
+				&Tree{
+					6, nil, nil,
+				},
+				nil,
+			},
+		},
+		&Tree{
+			7,
+			&Tree{
+				8,
+				&Tree{
+					9, nil, nil,
+				},
+				nil,
+			},
+			&Tree{
+				10,
+				&Tree{
+					11, nil, nil,
+				},
+				nil,
+			},
+		},
+	}
+	tmpl, err := New("root").Delims("(", ")").Parse(treeTemplate)
+	if err != nil {
+		t.Fatal("parse error:", err)
+	}
+	var b bytes.Buffer
+	const expect = "[1[2[3[4]][5[6]]][7[8[9]][10[11]]]]"
+	// First by looking up the template.
+	err = tmpl.Lookup("tree").Execute(&b, tree)
+	if err != nil {
+		t.Fatal("exec error:", err)
+	}
+	result := b.String()
+	if result != expect {
+		t.Errorf("expected %q got %q", expect, result)
+	}
+	// Then direct to execution.
+	b.Reset()
+	err = tmpl.ExecuteTemplate(&b, "tree", tree)
+	if err != nil {
+		t.Fatal("exec error:", err)
+	}
+	result = b.String()
+	if result != expect {
+		t.Errorf("expected %q got %q", expect, result)
+	}
+}
+
+func TestExecuteOnNewTemplate(t *testing.T) {
+	// This is issue 3872.
+	New("Name").Templates()
+	// This is issue 11379.
+	// new(Template).Templates() // TODO: crashes
+	// new(Template).Parse("") // TODO: crashes
+	// new(Template).New("abc").Parse("") // TODO: crashes
+	// new(Template).Execute(nil, nil)                // TODO: crashes; returns an error (but does not crash)
+	// new(Template).ExecuteTemplate(nil, "XXX", nil) // TODO: crashes; returns an error (but does not crash)
+}
+
+const testTemplates = `{{define "one"}}one{{end}}{{define "two"}}two{{end}}`
+
+func TestMessageForExecuteEmpty(t *testing.T) {
+	// Test a truly empty template.
+	tmpl := New("empty")
+	var b bytes.Buffer
+	err := tmpl.Execute(&b, 0)
+	if err == nil {
+		t.Fatal("expected initial error")
+	}
+	got := err.Error()
+	want := `template: "empty" is an incomplete or empty template` // NOTE: text/template has extra "empty: " in message
+	if got != want {
+		t.Errorf("expected error %s got %s", want, got)
+	}
+
+	// Add a non-empty template to check that the error is helpful.
+	tmpl = New("empty")
+	tests, err := New("").Parse(testTemplates)
+	if err != nil {
+		t.Fatal(err)
+	}
+	tmpl.AddParseTree("secondary", tests.Tree)
+	err = tmpl.Execute(&b, 0)
+	if err == nil {
+		t.Fatal("expected second error")
+	}
+	got = err.Error()
+	if got != want {
+		t.Errorf("expected error %s got %s", want, got)
+	}
+	// Make sure we can execute the secondary.
+	err = tmpl.ExecuteTemplate(&b, "secondary", 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestFinalForPrintf(t *testing.T) {
+	tmpl, err := New("").Parse(`{{"x" | printf}}`)
+	if err != nil {
+		t.Fatal(err)
+	}
+	var b bytes.Buffer
+	err = tmpl.Execute(&b, 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
+type cmpTest struct {
+	expr  string
+	truth string
+	ok    bool
+}
+
+var cmpTests = []cmpTest{
+	{"eq true true", "true", true},
+	{"eq true false", "false", true},
+	{"eq 1+2i 1+2i", "true", true},
+	{"eq 1+2i 1+3i", "false", true},
+	{"eq 1.5 1.5", "true", true},
+	{"eq 1.5 2.5", "false", true},
+	{"eq 1 1", "true", true},
+	{"eq 1 2", "false", true},
+	{"eq `xy` `xy`", "true", true},
+	{"eq `xy` `xyz`", "false", true},
+	{"eq .Uthree .Uthree", "true", true},
+	{"eq .Uthree .Ufour", "false", true},
+	{"eq 3 4 5 6 3", "true", true},
+	{"eq 3 4 5 6 7", "false", true},
+	{"ne true true", "false", true},
+	{"ne true false", "true", true},
+	{"ne 1+2i 1+2i", "false", true},
+	{"ne 1+2i 1+3i", "true", true},
+	{"ne 1.5 1.5", "false", true},
+	{"ne 1.5 2.5", "true", true},
+	{"ne 1 1", "false", true},
+	{"ne 1 2", "true", true},
+	{"ne `xy` `xy`", "false", true},
+	{"ne `xy` `xyz`", "true", true},
+	{"ne .Uthree .Uthree", "false", true},
+	{"ne .Uthree .Ufour", "true", true},
+	{"lt 1.5 1.5", "false", true},
+	{"lt 1.5 2.5", "true", true},
+	{"lt 1 1", "false", true},
+	{"lt 1 2", "true", true},
+	{"lt `xy` `xy`", "false", true},
+	{"lt `xy` `xyz`", "true", true},
+	{"lt .Uthree .Uthree", "false", true},
+	{"lt .Uthree .Ufour", "true", true},
+	{"le 1.5 1.5", "true", true},
+	{"le 1.5 2.5", "true", true},
+	{"le 2.5 1.5", "false", true},
+	{"le 1 1", "true", true},
+	{"le 1 2", "true", true},
+	{"le 2 1", "false", true},
+	{"le `xy` `xy`", "true", true},
+	{"le `xy` `xyz`", "true", true},
+	{"le `xyz` `xy`", "false", true},
+	{"le .Uthree .Uthree", "true", true},
+	{"le .Uthree .Ufour", "true", true},
+	{"le .Ufour .Uthree", "false", true},
+	{"gt 1.5 1.5", "false", true},
+	{"gt 1.5 2.5", "false", true},
+	{"gt 1 1", "false", true},
+	{"gt 2 1", "true", true},
+	{"gt 1 2", "false", true},
+	{"gt `xy` `xy`", "false", true},
+	{"gt `xy` `xyz`", "false", true},
+	{"gt .Uthree .Uthree", "false", true},
+	{"gt .Uthree .Ufour", "false", true},
+	{"gt .Ufour .Uthree", "true", true},
+	{"ge 1.5 1.5", "true", true},
+	{"ge 1.5 2.5", "false", true},
+	{"ge 2.5 1.5", "true", true},
+	{"ge 1 1", "true", true},
+	{"ge 1 2", "false", true},
+	{"ge 2 1", "true", true},
+	{"ge `xy` `xy`", "true", true},
+	{"ge `xy` `xyz`", "false", true},
+	{"ge `xyz` `xy`", "true", true},
+	{"ge .Uthree .Uthree", "true", true},
+	{"ge .Uthree .Ufour", "false", true},
+	{"ge .Ufour .Uthree", "true", true},
+	// Mixing signed and unsigned integers.
+	{"eq .Uthree .Three", "true", true},
+	{"eq .Three .Uthree", "true", true},
+	{"le .Uthree .Three", "true", true},
+	{"le .Three .Uthree", "true", true},
+	{"ge .Uthree .Three", "true", true},
+	{"ge .Three .Uthree", "true", true},
+	{"lt .Uthree .Three", "false", true},
+	{"lt .Three .Uthree", "false", true},
+	{"gt .Uthree .Three", "false", true},
+	{"gt .Three .Uthree", "false", true},
+	{"eq .Ufour .Three", "false", true},
+	{"lt .Ufour .Three", "false", true},
+	{"gt .Ufour .Three", "true", true},
+	{"eq .NegOne .Uthree", "false", true},
+	{"eq .Uthree .NegOne", "false", true},
+	{"ne .NegOne .Uthree", "true", true},
+	{"ne .Uthree .NegOne", "true", true},
+	{"lt .NegOne .Uthree", "true", true},
+	{"lt .Uthree .NegOne", "false", true},
+	{"le .NegOne .Uthree", "true", true},
+	{"le .Uthree .NegOne", "false", true},
+	{"gt .NegOne .Uthree", "false", true},
+	{"gt .Uthree .NegOne", "true", true},
+	{"ge .NegOne .Uthree", "false", true},
+	{"ge .Uthree .NegOne", "true", true},
+	{"eq (index `x` 0) 'x'", "true", true}, // The example that triggered this rule.
+	{"eq (index `x` 0) 'y'", "false", true},
+	{"eq .V1 .V2", "true", true},
+	{"eq .Ptr .Ptr", "true", true},
+	{"eq .Ptr .NilPtr", "false", true},
+	{"eq .NilPtr .NilPtr", "true", true},
+	{"eq .Iface1 .Iface1", "true", true},
+	{"eq .Iface1 .Iface2", "false", true},
+	{"eq .Iface2 .Iface2", "true", true},
+	// Errors
+	{"eq `xy` 1", "", false},       // Different types.
+	{"eq 2 2.0", "", false},        // Different types.
+	{"lt true true", "", false},    // Unordered types.
+	{"lt 1+0i 1+0i", "", false},    // Unordered types.
+	{"eq .Ptr 1", "", false},       // Incompatible types.
+	{"eq .Ptr .NegOne", "", false}, // Incompatible types.
+	{"eq .Map .Map", "", false},    // Uncomparable types.
+	{"eq .Map .V1", "", false},     // Uncomparable types.
+}
+
+func TestComparison(t *testing.T) {
+	b := new(bytes.Buffer)
+	var cmpStruct = struct {
+		Uthree, Ufour  uint
+		NegOne, Three  int
+		Ptr, NilPtr    *int
+		Map            map[int]int
+		V1, V2         V
+		Iface1, Iface2 fmt.Stringer
+	}{
+		Uthree: 3,
+		Ufour:  4,
+		NegOne: -1,
+		Three:  3,
+		Ptr:    new(int),
+		Iface1: b,
+	}
+	for _, test := range cmpTests {
+		text := fmt.Sprintf("{{if %s}}true{{else}}false{{end}}", test.expr)
+		tmpl, err := New("empty").Parse(text)
+		if err != nil {
+			t.Fatalf("%q: %s", test.expr, err)
+		}
+		b.Reset()
+		err = tmpl.Execute(b, &cmpStruct)
+		if test.ok && err != nil {
+			t.Errorf("%s errored incorrectly: %s", test.expr, err)
+			continue
+		}
+		if !test.ok && err == nil {
+			t.Errorf("%s did not error", test.expr)
+			continue
+		}
+		if b.String() != test.truth {
+			t.Errorf("%s: want %s; got %s", test.expr, test.truth, b.String())
+		}
+	}
+}
+
+func TestMissingMapKey(t *testing.T) {
+	data := map[string]int{
+		"x": 99,
+	}
+	tmpl, err := New("t1").Parse("{{.x}} {{.y}}")
+	if err != nil {
+		t.Fatal(err)
+	}
+	var b bytes.Buffer
+	// By default, just get "<no value>" // NOTE: not in html/template, get empty string
+	err = tmpl.Execute(&b, data)
+	if err != nil {
+		t.Fatal(err)
+	}
+	want := "99 "
+	got := b.String()
+	if got != want {
+		t.Errorf("got %q; expected %q", got, want)
+	}
+	// Same if we set the option explicitly to the default.
+	tmpl.Option("missingkey=default")
+	b.Reset()
+	err = tmpl.Execute(&b, data)
+	if err != nil {
+		t.Fatal("default:", err)
+	}
+	got = b.String()
+	if got != want {
+		t.Errorf("got %q; expected %q", got, want)
+	}
+	// Next we ask for a zero value
+	tmpl.Option("missingkey=zero")
+	b.Reset()
+	err = tmpl.Execute(&b, data)
+	if err != nil {
+		t.Fatal("zero:", err)
+	}
+	want = "99 0"
+	got = b.String()
+	if got != want {
+		t.Errorf("got %q; expected %q", got, want)
+	}
+	// Now we ask for an error.
+	tmpl.Option("missingkey=error")
+	err = tmpl.Execute(&b, data)
+	if err == nil {
+		t.Errorf("expected error; got none")
+	}
+	// same Option, but now a nil interface: ask for an error
+	err = tmpl.Execute(&b, nil)
+	t.Log(err)
+	if err == nil {
+		t.Errorf("expected error for nil-interface; got none")
+	}
+}
+
+// Test that the error message for multiline unterminated string
+// refers to the line number of the opening quote.
+func TestUnterminatedStringError(t *testing.T) {
+	_, err := New("X").Parse("hello\n\n{{`unterminated\n\n\n\n}}\n some more\n\n")
+	if err == nil {
+		t.Fatal("expected error")
+	}
+	str := err.Error()
+	if !strings.Contains(str, "X:3: unterminated raw quoted string") {
+		t.Fatalf("unexpected error: %s", str)
+	}
+}
+
+const alwaysErrorText = "always be failing"
+
+var alwaysError = errors.New(alwaysErrorText)
+
+type ErrorWriter int
+
+func (e ErrorWriter) Write(p []byte) (int, error) {
+	return 0, alwaysError
+}
+
+func TestExecuteGivesExecError(t *testing.T) {
+	// First, a non-execution error shouldn't be an ExecError.
+	tmpl, err := New("X").Parse("hello")
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = tmpl.Execute(ErrorWriter(0), 0)
+	if err == nil {
+		t.Fatal("expected error; got none")
+	}
+	if err.Error() != alwaysErrorText {
+		t.Errorf("expected %q error; got %q", alwaysErrorText, err)
+	}
+	// This one should be an ExecError.
+	tmpl, err = New("X").Parse("hello, {{.X.Y}}")
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = tmpl.Execute(ioutil.Discard, 0)
+	if err == nil {
+		t.Fatal("expected error; got none")
+	}
+	eerr, ok := err.(template.ExecError)
+	if !ok {
+		t.Fatalf("did not expect ExecError %s", eerr)
+	}
+	expect := "field X in type int"
+	if !strings.Contains(err.Error(), expect) {
+		t.Errorf("expected %q; got %q", expect, err)
+	}
+}
+
+func funcNameTestFunc() int {
+	return 0
+}
+
+func TestGoodFuncNames(t *testing.T) {
+	names := []string{
+		"_",
+		"a",
+		"a1",
+		"a1",
+		"Ӵ",
+	}
+	for _, name := range names {
+		tmpl := New("X").Funcs(
+			FuncMap{
+				name: funcNameTestFunc,
+			},
+		)
+		if tmpl == nil {
+			t.Fatalf("nil result for %q", name)
+		}
+	}
+}
+
+func TestBadFuncNames(t *testing.T) {
+	names := []string{
+		"",
+		"2",
+		"a-b",
+	}
+	for _, name := range names {
+		testBadFuncName(name, t)
+	}
+}
+
+func testBadFuncName(name string, t *testing.T) {
+	t.Helper()
+	defer func() {
+		recover()
+	}()
+	New("X").Funcs(
+		FuncMap{
+			name: funcNameTestFunc,
+		},
+	)
+	// If we get here, the name did not cause a panic, which is how Funcs
+	// reports an error.
+	t.Errorf("%q succeeded incorrectly as function name", name)
+}
+
+func TestBlock(t *testing.T) {
+	const (
+		input   = `a({{block "inner" .}}bar({{.}})baz{{end}})b`
+		want    = `a(bar(hello)baz)b`
+		overlay = `{{define "inner"}}foo({{.}})bar{{end}}`
+		want2   = `a(foo(goodbye)bar)b`
+	)
+	tmpl, err := New("outer").Parse(input)
+	if err != nil {
+		t.Fatal(err)
+	}
+	tmpl2, err := Must(tmpl.Clone()).Parse(overlay)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	var buf bytes.Buffer
+	if err := tmpl.Execute(&buf, "hello"); err != nil {
+		t.Fatal(err)
+	}
+	if got := buf.String(); got != want {
+		t.Errorf("got %q, want %q", got, want)
+	}
+
+	buf.Reset()
+	if err := tmpl2.Execute(&buf, "goodbye"); err != nil {
+		t.Fatal(err)
+	}
+	if got := buf.String(); got != want2 {
+		t.Errorf("got %q, want %q", got, want2)
+	}
+}
+
+func TestEvalFieldErrors(t *testing.T) {
+	tests := []struct {
+		name, src string
+		value     interface{}
+		want      string
+	}{
+		{
+			// Check that calling an invalid field on nil pointer
+			// prints a field error instead of a distracting nil
+			// pointer error. https://golang.org/issue/15125
+			"MissingFieldOnNil",
+			"{{.MissingField}}",
+			(*T)(nil),
+			"can't evaluate field MissingField in type *template.T",
+		},
+		{
+			"MissingFieldOnNonNil",
+			"{{.MissingField}}",
+			&T{},
+			"can't evaluate field MissingField in type *template.T",
+		},
+		{
+			"ExistingFieldOnNil",
+			"{{.X}}",
+			(*T)(nil),
+			"nil pointer evaluating *template.T.X",
+		},
+		{
+			"MissingKeyOnNilMap",
+			"{{.MissingKey}}",
+			(*map[string]string)(nil),
+			"nil pointer evaluating *map[string]string.MissingKey",
+		},
+		{
+			"MissingKeyOnNilMapPtr",
+			"{{.MissingKey}}",
+			(*map[string]string)(nil),
+			"nil pointer evaluating *map[string]string.MissingKey",
+		},
+		{
+			"MissingKeyOnMapPtrToNil",
+			"{{.MissingKey}}",
+			&map[string]string{},
+			"<nil>",
+		},
+	}
+	for _, tc := range tests {
+		t.Run(tc.name, func(t *testing.T) {
+			tmpl := Must(New("tmpl").Parse(tc.src))
+			err := tmpl.Execute(ioutil.Discard, tc.value)
+			got := "<nil>"
+			if err != nil {
+				got = err.Error()
+			}
+			if !strings.HasSuffix(got, tc.want) {
+				t.Fatalf("got error %q, want %q", got, tc.want)
+			}
+		})
+	}
+}
+
+func TestMaxExecDepth(t *testing.T) {
+	if testing.Short() {
+		t.Skip("skipping in -short mode")
+	}
+	tmpl := Must(New("tmpl").Parse(`{{template "tmpl" .}}`))
+	err := tmpl.Execute(ioutil.Discard, nil)
+	got := "<nil>"
+	if err != nil {
+		got = err.Error()
+	}
+	const want = "exceeded maximum template depth"
+	if !strings.Contains(got, want) {
+		t.Errorf("got error %q; want %q", got, want)
+	}
+}
+
+func TestAddrOfIndex(t *testing.T) {
+	// golang.org/issue/14916.
+	// Before index worked on reflect.Values, the .String could not be
+	// found on the (incorrectly unaddressable) V value,
+	// in contrast to range, which worked fine.
+	// Also testing that passing a reflect.Value to tmpl.Execute works.
+	texts := []string{
+		`{{range .}}{{.String}}{{end}}`,
+		`{{with index . 0}}{{.String}}{{end}}`,
+	}
+	for _, text := range texts {
+		tmpl := Must(New("tmpl").Parse(text))
+		var buf bytes.Buffer
+		err := tmpl.Execute(&buf, reflect.ValueOf([]V{{1}}))
+		if err != nil {
+			t.Fatalf("%s: Execute: %v", text, err)
+		}
+		if buf.String() != "&lt;1&gt;" {
+			t.Fatalf("%s: template output = %q, want %q", text, &buf, "&lt;1&gt;")
+		}
+	}
+}
+
+func TestInterfaceValues(t *testing.T) {
+	// golang.org/issue/17714.
+	// Before index worked on reflect.Values, interface values
+	// were always implicitly promoted to the underlying value,
+	// except that nil interfaces were promoted to the zero reflect.Value.
+	// Eliminating a round trip to interface{} and back to reflect.Value
+	// eliminated this promotion, breaking these cases.
+	tests := []struct {
+		text string
+		out  string
+	}{
+		{`{{index .Nil 1}}`, "ERROR: index of untyped nil"},
+		{`{{index .Slice 2}}`, "2"},
+		{`{{index .Slice .Two}}`, "2"},
+		{`{{call .Nil 1}}`, "ERROR: call of nil"},
+		{`{{call .PlusOne 1}}`, "2"},
+		{`{{call .PlusOne .One}}`, "2"},
+		{`{{and (index .Slice 0) true}}`, "0"},
+		{`{{and .Zero true}}`, "0"},
+		{`{{and (index .Slice 1) false}}`, "false"},
+		{`{{and .One false}}`, "false"},
+		{`{{or (index .Slice 0) false}}`, "false"},
+		{`{{or .Zero false}}`, "false"},
+		{`{{or (index .Slice 1) true}}`, "1"},
+		{`{{or .One true}}`, "1"},
+		{`{{not (index .Slice 0)}}`, "true"},
+		{`{{not .Zero}}`, "true"},
+		{`{{not (index .Slice 1)}}`, "false"},
+		{`{{not .One}}`, "false"},
+		{`{{eq (index .Slice 0) .Zero}}`, "true"},
+		{`{{eq (index .Slice 1) .One}}`, "true"},
+		{`{{ne (index .Slice 0) .Zero}}`, "false"},
+		{`{{ne (index .Slice 1) .One}}`, "false"},
+		{`{{ge (index .Slice 0) .One}}`, "false"},
+		{`{{ge (index .Slice 1) .Zero}}`, "true"},
+		{`{{gt (index .Slice 0) .One}}`, "false"},
+		{`{{gt (index .Slice 1) .Zero}}`, "true"},
+		{`{{le (index .Slice 0) .One}}`, "true"},
+		{`{{le (index .Slice 1) .Zero}}`, "false"},
+		{`{{lt (index .Slice 0) .One}}`, "true"},
+		{`{{lt (index .Slice 1) .Zero}}`, "false"},
+	}
+
+	for _, tt := range tests {
+		tmpl := Must(New("tmpl").Parse(tt.text))
+		var buf bytes.Buffer
+		err := tmpl.Execute(&buf, map[string]interface{}{
+			"PlusOne": func(n int) int {
+				return n + 1
+			},
+			"Slice": []int{0, 1, 2, 3},
+			"One":   1,
+			"Two":   2,
+			"Nil":   nil,
+			"Zero":  0,
+		})
+		if strings.HasPrefix(tt.out, "ERROR:") {
+			e := strings.TrimSpace(strings.TrimPrefix(tt.out, "ERROR:"))
+			if err == nil || !strings.Contains(err.Error(), e) {
+				t.Errorf("%s: Execute: %v, want error %q", tt.text, err, e)
+			}
+			continue
+		}
+		if err != nil {
+			t.Errorf("%s: Execute: %v", tt.text, err)
+			continue
+		}
+		if buf.String() != tt.out {
+			t.Errorf("%s: template output = %q, want %q", tt.text, &buf, tt.out)
+		}
+	}
+}
+
+// Check that panics during calls are recovered and returned as errors.
+func TestExecutePanicDuringCall(t *testing.T) {
+	funcs := map[string]interface{}{
+		"doPanic": func() string {
+			panic("custom panic string")
+		},
+	}
+	tests := []struct {
+		name    string
+		input   string
+		data    interface{}
+		wantErr string
+	}{
+		{
+			"direct func call panics",
+			"{{doPanic}}", (*T)(nil),
+			`template: t:1:2: executing "t" at <doPanic>: error calling doPanic: custom panic string`,
+		},
+		{
+			"indirect func call panics",
+			"{{call doPanic}}", (*T)(nil),
+			`template: t:1:7: executing "t" at <doPanic>: error calling doPanic: custom panic string`,
+		},
+		{
+			"direct method call panics",
+			"{{.GetU}}", (*T)(nil),
+			`template: t:1:2: executing "t" at <.GetU>: error calling GetU: runtime error: invalid memory address or nil pointer dereference`,
+		},
+		{
+			"indirect method call panics",
+			"{{call .GetU}}", (*T)(nil),
+			`template: t:1:7: executing "t" at <.GetU>: error calling GetU: runtime error: invalid memory address or nil pointer dereference`,
+		},
+		{
+			"func field call panics",
+			"{{call .PanicFunc}}", tVal,
+			`template: t:1:2: executing "t" at <call .PanicFunc>: error calling call: test panic`,
+		},
+		{
+			"method call on nil interface",
+			"{{.NonEmptyInterfaceNil.Method0}}", tVal,
+			`template: t:1:23: executing "t" at <.NonEmptyInterfaceNil.Method0>: nil pointer evaluating template.I.Method0`,
+		},
+	}
+	for _, tc := range tests {
+		b := new(bytes.Buffer)
+		tmpl, err := New("t").Funcs(funcs).Parse(tc.input)
+		if err != nil {
+			t.Fatalf("parse error: %s", err)
+		}
+		err = tmpl.Execute(b, tc.data)
+		if err == nil {
+			t.Errorf("%s: expected error; got none", tc.name)
+		} else if !strings.Contains(err.Error(), tc.wantErr) {
+			if *debug {
+				fmt.Printf("%s: test execute error: %s\n", tc.name, err)
+			}
+			t.Errorf("%s: expected error:\n%s\ngot:\n%s", tc.name, tc.wantErr, err)
+		}
+	}
+}
+
+// Issue 31810. Check that a parenthesized first argument behaves properly.
+func TestIssue31810(t *testing.T) {
+	t.Skip("broken in html/template")
+
+	// A simple value with no arguments is fine.
+	var b bytes.Buffer
+	const text = "{{ (.)  }}"
+	tmpl, err := New("").Parse(text)
+	if err != nil {
+		t.Error(err)
+	}
+	err = tmpl.Execute(&b, "result")
+	if err != nil {
+		t.Error(err)
+	}
+	if b.String() != "result" {
+		t.Errorf("%s got %q, expected %q", text, b.String(), "result")
+	}
+
+	// Even a plain function fails - need to use call.
+	f := func() string { return "result" }
+	b.Reset()
+	err = tmpl.Execute(&b, f)
+	if err == nil {
+		t.Error("expected error with no call, got none")
+	}
+
+	// Works if the function is explicitly called.
+	const textCall = "{{ (call .)  }}"
+	tmpl, err = New("").Parse(textCall)
+	b.Reset()
+	err = tmpl.Execute(&b, f)
+	if err != nil {
+		t.Error(err)
+	}
+	if b.String() != "result" {
+		t.Errorf("%s got %q, expected %q", textCall, b.String(), "result")
+	}
+}
+
+// Issue 39807. There was a race applying escapeTemplate.
+
+const raceText = `
+{{- define "jstempl" -}}
+var v = "v";
+{{- end -}}
+<script type="application/javascript">
+{{ template "jstempl" $ }}
+</script>
+`
+
+func TestEscapeRace(t *testing.T) {
+	tmpl := New("")
+	_, err := tmpl.New("templ.html").Parse(raceText)
+	if err != nil {
+		t.Fatal(err)
+	}
+	const count = 20
+	for i := 0; i < count; i++ {
+		_, err := tmpl.New(fmt.Sprintf("x%d.html", i)).Parse(`{{ template "templ.html" .}}`)
+		if err != nil {
+			t.Fatal(err)
+		}
+	}
+
+	var wg sync.WaitGroup
+	for i := 0; i < 10; i++ {
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			for j := 0; j < count; j++ {
+				sub := tmpl.Lookup(fmt.Sprintf("x%d.html", j))
+				if err := sub.Execute(ioutil.Discard, nil); err != nil {
+					t.Error(err)
+				}
+			}
+		}()
+	}
+	wg.Wait()
+}
+
+func TestRecursiveExecute(t *testing.T) {
+	tmpl := New("")
+
+	recur := func() (HTML, error) {
+		var sb strings.Builder
+		if err := tmpl.ExecuteTemplate(&sb, "subroutine", nil); err != nil {
+			t.Fatal(err)
+		}
+		return HTML(sb.String()), nil
+	}
+
+	m := FuncMap{
+		"recur": recur,
+	}
+
+	top, err := tmpl.New("x.html").Funcs(m).Parse(`{{recur}}`)
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, err = tmpl.New("subroutine").Parse(`<a href="/x?p={{"'a<b'"}}">`)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if err := top.Execute(ioutil.Discard, nil); err != nil {
+		t.Fatal(err)
+	}
+}
+
+// recursiveInvoker is for TestRecursiveExecuteViaMethod.
+type recursiveInvoker struct {
+	t    *testing.T
+	tmpl *Template
+}
+
+func (r *recursiveInvoker) Recur() (string, error) {
+	var sb strings.Builder
+	if err := r.tmpl.ExecuteTemplate(&sb, "subroutine", nil); err != nil {
+		r.t.Fatal(err)
+	}
+	return sb.String(), nil
+}
+
+func TestRecursiveExecuteViaMethod(t *testing.T) {
+	tmpl := New("")
+	top, err := tmpl.New("x.html").Parse(`{{.Recur}}`)
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, err = tmpl.New("subroutine").Parse(`<a href="/x?p={{"'a<b'"}}">`)
+	if err != nil {
+		t.Fatal(err)
+	}
+	r := &recursiveInvoker{
+		t:    t,
+		tmpl: tmpl,
+	}
+	if err := top.Execute(ioutil.Discard, r); err != nil {
+		t.Fatal(err)
+	}
+}
+
+// Issue 43295.
+func TestTemplateFuncsAfterClone(t *testing.T) {
+	s := `{{ f . }}`
+	want := "test"
+	orig := New("orig").Funcs(map[string]interface{}{
+		"f": func(in string) string {
+			return in
+		},
+	}).New("child")
+
+	overviewTmpl := Must(Must(orig.Clone()).Parse(s))
+	var out strings.Builder
+	if err := overviewTmpl.Execute(&out, want); err != nil {
+		t.Fatal(err)
+	}
+	if got := out.String(); got != want {
+		t.Fatalf("got %q; want %q", got, want)
+	}
+}
diff --git a/internal/backport/html/template/html.go b/internal/backport/html/template/html.go
new file mode 100644
index 0000000..356b829
--- /dev/null
+++ b/internal/backport/html/template/html.go
@@ -0,0 +1,265 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"bytes"
+	"fmt"
+	"strings"
+	"unicode/utf8"
+)
+
+// htmlNospaceEscaper escapes for inclusion in unquoted attribute values.
+func htmlNospaceEscaper(args ...interface{}) string {
+	s, t := stringify(args...)
+	if t == contentTypeHTML {
+		return htmlReplacer(stripTags(s), htmlNospaceNormReplacementTable, false)
+	}
+	return htmlReplacer(s, htmlNospaceReplacementTable, false)
+}
+
+// attrEscaper escapes for inclusion in quoted attribute values.
+func attrEscaper(args ...interface{}) string {
+	s, t := stringify(args...)
+	if t == contentTypeHTML {
+		return htmlReplacer(stripTags(s), htmlNormReplacementTable, true)
+	}
+	return htmlReplacer(s, htmlReplacementTable, true)
+}
+
+// rcdataEscaper escapes for inclusion in an RCDATA element body.
+func rcdataEscaper(args ...interface{}) string {
+	s, t := stringify(args...)
+	if t == contentTypeHTML {
+		return htmlReplacer(s, htmlNormReplacementTable, true)
+	}
+	return htmlReplacer(s, htmlReplacementTable, true)
+}
+
+// htmlEscaper escapes for inclusion in HTML text.
+func htmlEscaper(args ...interface{}) string {
+	s, t := stringify(args...)
+	if t == contentTypeHTML {
+		return s
+	}
+	return htmlReplacer(s, htmlReplacementTable, true)
+}
+
+// htmlReplacementTable contains the runes that need to be escaped
+// inside a quoted attribute value or in a text node.
+var htmlReplacementTable = []string{
+	// https://www.w3.org/TR/html5/syntax.html#attribute-value-(unquoted)-state
+	// U+0000 NULL Parse error. Append a U+FFFD REPLACEMENT
+	// CHARACTER character to the current attribute's value.
+	// "
+	// and similarly
+	// https://www.w3.org/TR/html5/syntax.html#before-attribute-value-state
+	0:    "\uFFFD",
+	'"':  "&#34;",
+	'&':  "&amp;",
+	'\'': "&#39;",
+	'+':  "&#43;",
+	'<':  "&lt;",
+	'>':  "&gt;",
+}
+
+// htmlNormReplacementTable is like htmlReplacementTable but without '&' to
+// avoid over-encoding existing entities.
+var htmlNormReplacementTable = []string{
+	0:    "\uFFFD",
+	'"':  "&#34;",
+	'\'': "&#39;",
+	'+':  "&#43;",
+	'<':  "&lt;",
+	'>':  "&gt;",
+}
+
+// htmlNospaceReplacementTable contains the runes that need to be escaped
+// inside an unquoted attribute value.
+// The set of runes escaped is the union of the HTML specials and
+// those determined by running the JS below in browsers:
+// <div id=d></div>
+// <script>(function () {
+// var a = [], d = document.getElementById("d"), i, c, s;
+// for (i = 0; i < 0x10000; ++i) {
+//   c = String.fromCharCode(i);
+//   d.innerHTML = "<span title=" + c + "lt" + c + "></span>"
+//   s = d.getElementsByTagName("SPAN")[0];
+//   if (!s || s.title !== c + "lt" + c) { a.push(i.toString(16)); }
+// }
+// document.write(a.join(", "));
+// })()</script>
+var htmlNospaceReplacementTable = []string{
+	0:    "&#xfffd;",
+	'\t': "&#9;",
+	'\n': "&#10;",
+	'\v': "&#11;",
+	'\f': "&#12;",
+	'\r': "&#13;",
+	' ':  "&#32;",
+	'"':  "&#34;",
+	'&':  "&amp;",
+	'\'': "&#39;",
+	'+':  "&#43;",
+	'<':  "&lt;",
+	'=':  "&#61;",
+	'>':  "&gt;",
+	// A parse error in the attribute value (unquoted) and
+	// before attribute value states.
+	// Treated as a quoting character by IE.
+	'`': "&#96;",
+}
+
+// htmlNospaceNormReplacementTable is like htmlNospaceReplacementTable but
+// without '&' to avoid over-encoding existing entities.
+var htmlNospaceNormReplacementTable = []string{
+	0:    "&#xfffd;",
+	'\t': "&#9;",
+	'\n': "&#10;",
+	'\v': "&#11;",
+	'\f': "&#12;",
+	'\r': "&#13;",
+	' ':  "&#32;",
+	'"':  "&#34;",
+	'\'': "&#39;",
+	'+':  "&#43;",
+	'<':  "&lt;",
+	'=':  "&#61;",
+	'>':  "&gt;",
+	// A parse error in the attribute value (unquoted) and
+	// before attribute value states.
+	// Treated as a quoting character by IE.
+	'`': "&#96;",
+}
+
+// htmlReplacer returns s with runes replaced according to replacementTable
+// and when badRunes is true, certain bad runes are allowed through unescaped.
+func htmlReplacer(s string, replacementTable []string, badRunes bool) string {
+	written, b := 0, new(strings.Builder)
+	r, w := rune(0), 0
+	for i := 0; i < len(s); i += w {
+		// Cannot use 'for range s' because we need to preserve the width
+		// of the runes in the input. If we see a decoding error, the input
+		// width will not be utf8.Runelen(r) and we will overrun the buffer.
+		r, w = utf8.DecodeRuneInString(s[i:])
+		if int(r) < len(replacementTable) {
+			if repl := replacementTable[r]; len(repl) != 0 {
+				if written == 0 {
+					b.Grow(len(s))
+				}
+				b.WriteString(s[written:i])
+				b.WriteString(repl)
+				written = i + w
+			}
+		} else if badRunes {
+			// No-op.
+			// IE does not allow these ranges in unquoted attrs.
+		} else if 0xfdd0 <= r && r <= 0xfdef || 0xfff0 <= r && r <= 0xffff {
+			if written == 0 {
+				b.Grow(len(s))
+			}
+			fmt.Fprintf(b, "%s&#x%x;", s[written:i], r)
+			written = i + w
+		}
+	}
+	if written == 0 {
+		return s
+	}
+	b.WriteString(s[written:])
+	return b.String()
+}
+
+// stripTags takes a snippet of HTML and returns only the text content.
+// For example, `<b>&iexcl;Hi!</b> <script>...</script>` -> `&iexcl;Hi! `.
+func stripTags(html string) string {
+	var b bytes.Buffer
+	s, c, i, allText := []byte(html), context{}, 0, true
+	// Using the transition funcs helps us avoid mangling
+	// `<div title="1>2">` or `I <3 Ponies!`.
+	for i != len(s) {
+		if c.delim == delimNone {
+			st := c.state
+			// Use RCDATA instead of parsing into JS or CSS styles.
+			if c.element != elementNone && !isInTag(st) {
+				st = stateRCDATA
+			}
+			d, nread := transitionFunc[st](c, s[i:])
+			i1 := i + nread
+			if c.state == stateText || c.state == stateRCDATA {
+				// Emit text up to the start of the tag or comment.
+				j := i1
+				if d.state != c.state {
+					for j1 := j - 1; j1 >= i; j1-- {
+						if s[j1] == '<' {
+							j = j1
+							break
+						}
+					}
+				}
+				b.Write(s[i:j])
+			} else {
+				allText = false
+			}
+			c, i = d, i1
+			continue
+		}
+		i1 := i + bytes.IndexAny(s[i:], delimEnds[c.delim])
+		if i1 < i {
+			break
+		}
+		if c.delim != delimSpaceOrTagEnd {
+			// Consume any quote.
+			i1++
+		}
+		c, i = context{state: stateTag, element: c.element}, i1
+	}
+	if allText {
+		return html
+	} else if c.state == stateText || c.state == stateRCDATA {
+		b.Write(s[i:])
+	}
+	return b.String()
+}
+
+// htmlNameFilter accepts valid parts of an HTML attribute or tag name or
+// a known-safe HTML attribute.
+func htmlNameFilter(args ...interface{}) string {
+	s, t := stringify(args...)
+	if t == contentTypeHTMLAttr {
+		return s
+	}
+	if len(s) == 0 {
+		// Avoid violation of structure preservation.
+		// <input checked {{.K}}={{.V}}>.
+		// Without this, if .K is empty then .V is the value of
+		// checked, but otherwise .V is the value of the attribute
+		// named .K.
+		return filterFailsafe
+	}
+	s = strings.ToLower(s)
+	if t := attrType(s); t != contentTypePlain {
+		// TODO: Split attr and element name part filters so we can recognize known attributes.
+		return filterFailsafe
+	}
+	for _, r := range s {
+		switch {
+		case '0' <= r && r <= '9':
+		case 'a' <= r && r <= 'z':
+		default:
+			return filterFailsafe
+		}
+	}
+	return s
+}
+
+// commentEscaper returns the empty string regardless of input.
+// Comment content does not correspond to any parsed structure or
+// human-readable content, so the simplest and most secure policy is to drop
+// content interpolated into comments.
+// This approach is equally valid whether or not static comment content is
+// removed from the template.
+func commentEscaper(args ...interface{}) string {
+	return ""
+}
diff --git a/internal/backport/html/template/html_test.go b/internal/backport/html/template/html_test.go
new file mode 100644
index 0000000..f04ee04
--- /dev/null
+++ b/internal/backport/html/template/html_test.go
@@ -0,0 +1,97 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"html"
+	"strings"
+	"testing"
+)
+
+func TestHTMLNospaceEscaper(t *testing.T) {
+	input := ("\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f" +
+		"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
+		` !"#$%&'()*+,-./` +
+		`0123456789:;<=>?` +
+		`@ABCDEFGHIJKLMNO` +
+		`PQRSTUVWXYZ[\]^_` +
+		"`abcdefghijklmno" +
+		"pqrstuvwxyz{|}~\x7f" +
+		"\u00A0\u0100\u2028\u2029\ufeff\ufdec\U0001D11E" +
+		"erroneous\x960") // keep at the end
+
+	want := ("&#xfffd;\x01\x02\x03\x04\x05\x06\x07" +
+		"\x08&#9;&#10;&#11;&#12;&#13;\x0E\x0F" +
+		"\x10\x11\x12\x13\x14\x15\x16\x17" +
+		"\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
+		`&#32;!&#34;#$%&amp;&#39;()*&#43;,-./` +
+		`0123456789:;&lt;&#61;&gt;?` +
+		`@ABCDEFGHIJKLMNO` +
+		`PQRSTUVWXYZ[\]^_` +
+		`&#96;abcdefghijklmno` +
+		`pqrstuvwxyz{|}~` + "\u007f" +
+		"\u00A0\u0100\u2028\u2029\ufeff&#xfdec;\U0001D11E" +
+		"erroneous&#xfffd;0") // keep at the end
+
+	got := htmlNospaceEscaper(input)
+	if got != want {
+		t.Errorf("encode: want\n\t%q\nbut got\n\t%q", want, got)
+	}
+
+	r := strings.NewReplacer("\x00", "\ufffd", "\x96", "\ufffd")
+	got, want = html.UnescapeString(got), r.Replace(input)
+	if want != got {
+		t.Errorf("decode: want\n\t%q\nbut got\n\t%q", want, got)
+	}
+}
+
+func TestStripTags(t *testing.T) {
+	tests := []struct {
+		input, want string
+	}{
+		{"", ""},
+		{"Hello, World!", "Hello, World!"},
+		{"foo&amp;bar", "foo&amp;bar"},
+		{`Hello <a href="www.example.com/">World</a>!`, "Hello World!"},
+		{"Foo <textarea>Bar</textarea> Baz", "Foo Bar Baz"},
+		{"Foo <!-- Bar --> Baz", "Foo  Baz"},
+		{"<", "<"},
+		{"foo < bar", "foo < bar"},
+		{`Foo<script type="text/javascript">alert(1337)</script>Bar`, "FooBar"},
+		{`Foo<div title="1>2">Bar`, "FooBar"},
+		{`I <3 Ponies!`, `I <3 Ponies!`},
+		{`<script>foo()</script>`, ``},
+	}
+
+	for _, test := range tests {
+		if got := stripTags(test.input); got != test.want {
+			t.Errorf("%q: want %q, got %q", test.input, test.want, got)
+		}
+	}
+}
+
+func BenchmarkHTMLNospaceEscaper(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		htmlNospaceEscaper("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
+	}
+}
+
+func BenchmarkHTMLNospaceEscaperNoSpecials(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		htmlNospaceEscaper("The_quick,_brown_fox_jumps_over_the_lazy_dog.")
+	}
+}
+
+func BenchmarkStripTags(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		stripTags("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
+	}
+}
+
+func BenchmarkStripTagsNoSpecials(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		stripTags("The quick, brown fox jumps over the lazy dog.")
+	}
+}
diff --git a/internal/backport/html/template/js.go b/internal/backport/html/template/js.go
new file mode 100644
index 0000000..ea9c183
--- /dev/null
+++ b/internal/backport/html/template/js.go
@@ -0,0 +1,431 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"reflect"
+	"strings"
+	"unicode/utf8"
+)
+
+// nextJSCtx returns the context that determines whether a slash after the
+// given run of tokens starts a regular expression instead of a division
+// operator: / or /=.
+//
+// This assumes that the token run does not include any string tokens, comment
+// tokens, regular expression literal tokens, or division operators.
+//
+// This fails on some valid but nonsensical JavaScript programs like
+// "x = ++/foo/i" which is quite different than "x++/foo/i", but is not known to
+// fail on any known useful programs. It is based on the draft
+// JavaScript 2.0 lexical grammar and requires one token of lookbehind:
+// https://www.mozilla.org/js/language/js20-2000-07/rationale/syntax.html
+func nextJSCtx(s []byte, preceding jsCtx) jsCtx {
+	s = bytes.TrimRight(s, "\t\n\f\r \u2028\u2029")
+	if len(s) == 0 {
+		return preceding
+	}
+
+	// All cases below are in the single-byte UTF-8 group.
+	switch c, n := s[len(s)-1], len(s); c {
+	case '+', '-':
+		// ++ and -- are not regexp preceders, but + and - are whether
+		// they are used as infix or prefix operators.
+		start := n - 1
+		// Count the number of adjacent dashes or pluses.
+		for start > 0 && s[start-1] == c {
+			start--
+		}
+		if (n-start)&1 == 1 {
+			// Reached for trailing minus signs since "---" is the
+			// same as "-- -".
+			return jsCtxRegexp
+		}
+		return jsCtxDivOp
+	case '.':
+		// Handle "42."
+		if n != 1 && '0' <= s[n-2] && s[n-2] <= '9' {
+			return jsCtxDivOp
+		}
+		return jsCtxRegexp
+	// Suffixes for all punctuators from section 7.7 of the language spec
+	// that only end binary operators not handled above.
+	case ',', '<', '>', '=', '*', '%', '&', '|', '^', '?':
+		return jsCtxRegexp
+	// Suffixes for all punctuators from section 7.7 of the language spec
+	// that are prefix operators not handled above.
+	case '!', '~':
+		return jsCtxRegexp
+	// Matches all the punctuators from section 7.7 of the language spec
+	// that are open brackets not handled above.
+	case '(', '[':
+		return jsCtxRegexp
+	// Matches all the punctuators from section 7.7 of the language spec
+	// that precede expression starts.
+	case ':', ';', '{':
+		return jsCtxRegexp
+	// CAVEAT: the close punctuators ('}', ']', ')') precede div ops and
+	// are handled in the default except for '}' which can precede a
+	// division op as in
+	//    ({ valueOf: function () { return 42 } } / 2
+	// which is valid, but, in practice, developers don't divide object
+	// literals, so our heuristic works well for code like
+	//    function () { ... }  /foo/.test(x) && sideEffect();
+	// The ')' punctuator can precede a regular expression as in
+	//     if (b) /foo/.test(x) && ...
+	// but this is much less likely than
+	//     (a + b) / c
+	case '}':
+		return jsCtxRegexp
+	default:
+		// Look for an IdentifierName and see if it is a keyword that
+		// can precede a regular expression.
+		j := n
+		for j > 0 && isJSIdentPart(rune(s[j-1])) {
+			j--
+		}
+		if regexpPrecederKeywords[string(s[j:])] {
+			return jsCtxRegexp
+		}
+	}
+	// Otherwise is a punctuator not listed above, or
+	// a string which precedes a div op, or an identifier
+	// which precedes a div op.
+	return jsCtxDivOp
+}
+
+// regexpPrecederKeywords is a set of reserved JS keywords that can precede a
+// regular expression in JS source.
+var regexpPrecederKeywords = map[string]bool{
+	"break":      true,
+	"case":       true,
+	"continue":   true,
+	"delete":     true,
+	"do":         true,
+	"else":       true,
+	"finally":    true,
+	"in":         true,
+	"instanceof": true,
+	"return":     true,
+	"throw":      true,
+	"try":        true,
+	"typeof":     true,
+	"void":       true,
+}
+
+var jsonMarshalType = reflect.TypeOf((*json.Marshaler)(nil)).Elem()
+
+// indirectToJSONMarshaler returns the value, after dereferencing as many times
+// as necessary to reach the base type (or nil) or an implementation of json.Marshal.
+func indirectToJSONMarshaler(a interface{}) interface{} {
+	// text/template now supports passing untyped nil as a func call
+	// argument, so we must support it. Otherwise we'd panic below, as one
+	// cannot call the Type or Interface methods on an invalid
+	// reflect.Value. See golang.org/issue/18716.
+	if a == nil {
+		return nil
+	}
+
+	v := reflect.ValueOf(a)
+	for !v.Type().Implements(jsonMarshalType) && v.Kind() == reflect.Ptr && !v.IsNil() {
+		v = v.Elem()
+	}
+	return v.Interface()
+}
+
+// jsValEscaper escapes its inputs to a JS Expression (section 11.14) that has
+// neither side-effects nor free variables outside (NaN, Infinity).
+func jsValEscaper(args ...interface{}) string {
+	var a interface{}
+	if len(args) == 1 {
+		a = indirectToJSONMarshaler(args[0])
+		switch t := a.(type) {
+		case JS:
+			return string(t)
+		case JSStr:
+			// TODO: normalize quotes.
+			return `"` + string(t) + `"`
+		case json.Marshaler:
+			// Do not treat as a Stringer.
+		case fmt.Stringer:
+			a = t.String()
+		}
+	} else {
+		for i, arg := range args {
+			args[i] = indirectToJSONMarshaler(arg)
+		}
+		a = fmt.Sprint(args...)
+	}
+	// TODO: detect cycles before calling Marshal which loops infinitely on
+	// cyclic data. This may be an unacceptable DoS risk.
+	b, err := json.Marshal(a)
+	if err != nil {
+		// Put a space before comment so that if it is flush against
+		// a division operator it is not turned into a line comment:
+		//     x/{{y}}
+		// turning into
+		//     x//* error marshaling y:
+		//          second line of error message */null
+		return fmt.Sprintf(" /* %s */null ", strings.ReplaceAll(err.Error(), "*/", "* /"))
+	}
+
+	// TODO: maybe post-process output to prevent it from containing
+	// "<!--", "-->", "<![CDATA[", "]]>", or "</script"
+	// in case custom marshalers produce output containing those.
+	// Note: Do not use \x escaping to save bytes because it is not JSON compatible and this escaper
+	// supports ld+json content-type.
+	if len(b) == 0 {
+		// In, `x=y/{{.}}*z` a json.Marshaler that produces "" should
+		// not cause the output `x=y/*z`.
+		return " null "
+	}
+	first, _ := utf8.DecodeRune(b)
+	last, _ := utf8.DecodeLastRune(b)
+	var buf strings.Builder
+	// Prevent IdentifierNames and NumericLiterals from running into
+	// keywords: in, instanceof, typeof, void
+	pad := isJSIdentPart(first) || isJSIdentPart(last)
+	if pad {
+		buf.WriteByte(' ')
+	}
+	written := 0
+	// Make sure that json.Marshal escapes codepoints U+2028 & U+2029
+	// so it falls within the subset of JSON which is valid JS.
+	for i := 0; i < len(b); {
+		rune, n := utf8.DecodeRune(b[i:])
+		repl := ""
+		if rune == 0x2028 {
+			repl = `\u2028`
+		} else if rune == 0x2029 {
+			repl = `\u2029`
+		}
+		if repl != "" {
+			buf.Write(b[written:i])
+			buf.WriteString(repl)
+			written = i + n
+		}
+		i += n
+	}
+	if buf.Len() != 0 {
+		buf.Write(b[written:])
+		if pad {
+			buf.WriteByte(' ')
+		}
+		return buf.String()
+	}
+	return string(b)
+}
+
+// jsStrEscaper produces a string that can be included between quotes in
+// JavaScript source, in JavaScript embedded in an HTML5 <script> element,
+// or in an HTML5 event handler attribute such as onclick.
+func jsStrEscaper(args ...interface{}) string {
+	s, t := stringify(args...)
+	if t == contentTypeJSStr {
+		return replace(s, jsStrNormReplacementTable)
+	}
+	return replace(s, jsStrReplacementTable)
+}
+
+// jsRegexpEscaper behaves like jsStrEscaper but escapes regular expression
+// specials so the result is treated literally when included in a regular
+// expression literal. /foo{{.X}}bar/ matches the string "foo" followed by
+// the literal text of {{.X}} followed by the string "bar".
+func jsRegexpEscaper(args ...interface{}) string {
+	s, _ := stringify(args...)
+	s = replace(s, jsRegexpReplacementTable)
+	if s == "" {
+		// /{{.X}}/ should not produce a line comment when .X == "".
+		return "(?:)"
+	}
+	return s
+}
+
+// replace replaces each rune r of s with replacementTable[r], provided that
+// r < len(replacementTable). If replacementTable[r] is the empty string then
+// no replacement is made.
+// It also replaces runes U+2028 and U+2029 with the raw strings `\u2028` and
+// `\u2029`.
+func replace(s string, replacementTable []string) string {
+	var b strings.Builder
+	r, w, written := rune(0), 0, 0
+	for i := 0; i < len(s); i += w {
+		// See comment in htmlEscaper.
+		r, w = utf8.DecodeRuneInString(s[i:])
+		var repl string
+		switch {
+		case int(r) < len(lowUnicodeReplacementTable):
+			repl = lowUnicodeReplacementTable[r]
+		case int(r) < len(replacementTable) && replacementTable[r] != "":
+			repl = replacementTable[r]
+		case r == '\u2028':
+			repl = `\u2028`
+		case r == '\u2029':
+			repl = `\u2029`
+		default:
+			continue
+		}
+		if written == 0 {
+			b.Grow(len(s))
+		}
+		b.WriteString(s[written:i])
+		b.WriteString(repl)
+		written = i + w
+	}
+	if written == 0 {
+		return s
+	}
+	b.WriteString(s[written:])
+	return b.String()
+}
+
+var lowUnicodeReplacementTable = []string{
+	0: `\u0000`, 1: `\u0001`, 2: `\u0002`, 3: `\u0003`, 4: `\u0004`, 5: `\u0005`, 6: `\u0006`,
+	'\a': `\u0007`,
+	'\b': `\u0008`,
+	'\t': `\t`,
+	'\n': `\n`,
+	'\v': `\u000b`, // "\v" == "v" on IE 6.
+	'\f': `\f`,
+	'\r': `\r`,
+	0xe:  `\u000e`, 0xf: `\u000f`, 0x10: `\u0010`, 0x11: `\u0011`, 0x12: `\u0012`, 0x13: `\u0013`,
+	0x14: `\u0014`, 0x15: `\u0015`, 0x16: `\u0016`, 0x17: `\u0017`, 0x18: `\u0018`, 0x19: `\u0019`,
+	0x1a: `\u001a`, 0x1b: `\u001b`, 0x1c: `\u001c`, 0x1d: `\u001d`, 0x1e: `\u001e`, 0x1f: `\u001f`,
+}
+
+var jsStrReplacementTable = []string{
+	0:    `\u0000`,
+	'\t': `\t`,
+	'\n': `\n`,
+	'\v': `\u000b`, // "\v" == "v" on IE 6.
+	'\f': `\f`,
+	'\r': `\r`,
+	// Encode HTML specials as hex so the output can be embedded
+	// in HTML attributes without further encoding.
+	'"':  `\u0022`,
+	'&':  `\u0026`,
+	'\'': `\u0027`,
+	'+':  `\u002b`,
+	'/':  `\/`,
+	'<':  `\u003c`,
+	'>':  `\u003e`,
+	'\\': `\\`,
+}
+
+// jsStrNormReplacementTable is like jsStrReplacementTable but does not
+// overencode existing escapes since this table has no entry for `\`.
+var jsStrNormReplacementTable = []string{
+	0:    `\u0000`,
+	'\t': `\t`,
+	'\n': `\n`,
+	'\v': `\u000b`, // "\v" == "v" on IE 6.
+	'\f': `\f`,
+	'\r': `\r`,
+	// Encode HTML specials as hex so the output can be embedded
+	// in HTML attributes without further encoding.
+	'"':  `\u0022`,
+	'&':  `\u0026`,
+	'\'': `\u0027`,
+	'+':  `\u002b`,
+	'/':  `\/`,
+	'<':  `\u003c`,
+	'>':  `\u003e`,
+}
+var jsRegexpReplacementTable = []string{
+	0:    `\u0000`,
+	'\t': `\t`,
+	'\n': `\n`,
+	'\v': `\u000b`, // "\v" == "v" on IE 6.
+	'\f': `\f`,
+	'\r': `\r`,
+	// Encode HTML specials as hex so the output can be embedded
+	// in HTML attributes without further encoding.
+	'"':  `\u0022`,
+	'$':  `\$`,
+	'&':  `\u0026`,
+	'\'': `\u0027`,
+	'(':  `\(`,
+	')':  `\)`,
+	'*':  `\*`,
+	'+':  `\u002b`,
+	'-':  `\-`,
+	'.':  `\.`,
+	'/':  `\/`,
+	'<':  `\u003c`,
+	'>':  `\u003e`,
+	'?':  `\?`,
+	'[':  `\[`,
+	'\\': `\\`,
+	']':  `\]`,
+	'^':  `\^`,
+	'{':  `\{`,
+	'|':  `\|`,
+	'}':  `\}`,
+}
+
+// isJSIdentPart reports whether the given rune is a JS identifier part.
+// It does not handle all the non-Latin letters, joiners, and combining marks,
+// but it does handle every codepoint that can occur in a numeric literal or
+// a keyword.
+func isJSIdentPart(r rune) bool {
+	switch {
+	case r == '$':
+		return true
+	case '0' <= r && r <= '9':
+		return true
+	case 'A' <= r && r <= 'Z':
+		return true
+	case r == '_':
+		return true
+	case 'a' <= r && r <= 'z':
+		return true
+	}
+	return false
+}
+
+// isJSType reports whether the given MIME type should be considered JavaScript.
+//
+// It is used to determine whether a script tag with a type attribute is a javascript container.
+func isJSType(mimeType string) bool {
+	// per
+	//   https://www.w3.org/TR/html5/scripting-1.html#attr-script-type
+	//   https://tools.ietf.org/html/rfc7231#section-3.1.1
+	//   https://tools.ietf.org/html/rfc4329#section-3
+	//   https://www.ietf.org/rfc/rfc4627.txt
+	// discard parameters
+	if i := strings.Index(mimeType, ";"); i >= 0 {
+		mimeType = mimeType[:i]
+	}
+	mimeType = strings.ToLower(mimeType)
+	mimeType = strings.TrimSpace(mimeType)
+	switch mimeType {
+	case
+		"application/ecmascript",
+		"application/javascript",
+		"application/json",
+		"application/ld+json",
+		"application/x-ecmascript",
+		"application/x-javascript",
+		"module",
+		"text/ecmascript",
+		"text/javascript",
+		"text/javascript1.0",
+		"text/javascript1.1",
+		"text/javascript1.2",
+		"text/javascript1.3",
+		"text/javascript1.4",
+		"text/javascript1.5",
+		"text/jscript",
+		"text/livescript",
+		"text/x-ecmascript",
+		"text/x-javascript":
+		return true
+	default:
+		return false
+	}
+}
diff --git a/internal/backport/html/template/js_test.go b/internal/backport/html/template/js_test.go
new file mode 100644
index 0000000..d7ee47b
--- /dev/null
+++ b/internal/backport/html/template/js_test.go
@@ -0,0 +1,423 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"bytes"
+	"math"
+	"strings"
+	"testing"
+)
+
+func TestNextJsCtx(t *testing.T) {
+	tests := []struct {
+		jsCtx jsCtx
+		s     string
+	}{
+		// Statement terminators precede regexps.
+		{jsCtxRegexp, ";"},
+		// This is not airtight.
+		//     ({ valueOf: function () { return 1 } } / 2)
+		// is valid JavaScript but in practice, devs do not do this.
+		// A block followed by a statement starting with a RegExp is
+		// much more common:
+		//     while (x) {...} /foo/.test(x) || panic()
+		{jsCtxRegexp, "}"},
+		// But member, call, grouping, and array expression terminators
+		// precede div ops.
+		{jsCtxDivOp, ")"},
+		{jsCtxDivOp, "]"},
+		// At the start of a primary expression, array, or expression
+		// statement, expect a regexp.
+		{jsCtxRegexp, "("},
+		{jsCtxRegexp, "["},
+		{jsCtxRegexp, "{"},
+		// Assignment operators precede regexps as do all exclusively
+		// prefix and binary operators.
+		{jsCtxRegexp, "="},
+		{jsCtxRegexp, "+="},
+		{jsCtxRegexp, "*="},
+		{jsCtxRegexp, "*"},
+		{jsCtxRegexp, "!"},
+		// Whether the + or - is infix or prefix, it cannot precede a
+		// div op.
+		{jsCtxRegexp, "+"},
+		{jsCtxRegexp, "-"},
+		// An incr/decr op precedes a div operator.
+		// This is not airtight. In (g = ++/h/i) a regexp follows a
+		// pre-increment operator, but in practice devs do not try to
+		// increment or decrement regular expressions.
+		// (g++/h/i) where ++ is a postfix operator on g is much more
+		// common.
+		{jsCtxDivOp, "--"},
+		{jsCtxDivOp, "++"},
+		{jsCtxDivOp, "x--"},
+		// When we have many dashes or pluses, then they are grouped
+		// left to right.
+		{jsCtxRegexp, "x---"}, // A postfix -- then a -.
+		// return followed by a slash returns the regexp literal or the
+		// slash starts a regexp literal in an expression statement that
+		// is dead code.
+		{jsCtxRegexp, "return"},
+		{jsCtxRegexp, "return "},
+		{jsCtxRegexp, "return\t"},
+		{jsCtxRegexp, "return\n"},
+		{jsCtxRegexp, "return\u2028"},
+		// Identifiers can be divided and cannot validly be preceded by
+		// a regular expressions. Semicolon insertion cannot happen
+		// between an identifier and a regular expression on a new line
+		// because the one token lookahead for semicolon insertion has
+		// to conclude that it could be a div binary op and treat it as
+		// such.
+		{jsCtxDivOp, "x"},
+		{jsCtxDivOp, "x "},
+		{jsCtxDivOp, "x\t"},
+		{jsCtxDivOp, "x\n"},
+		{jsCtxDivOp, "x\u2028"},
+		{jsCtxDivOp, "preturn"},
+		// Numbers precede div ops.
+		{jsCtxDivOp, "0"},
+		// Dots that are part of a number are div preceders.
+		{jsCtxDivOp, "0."},
+	}
+
+	for _, test := range tests {
+		if nextJSCtx([]byte(test.s), jsCtxRegexp) != test.jsCtx {
+			t.Errorf("want %s got %q", test.jsCtx, test.s)
+		}
+		if nextJSCtx([]byte(test.s), jsCtxDivOp) != test.jsCtx {
+			t.Errorf("want %s got %q", test.jsCtx, test.s)
+		}
+	}
+
+	if nextJSCtx([]byte("   "), jsCtxRegexp) != jsCtxRegexp {
+		t.Error("Blank tokens")
+	}
+
+	if nextJSCtx([]byte("   "), jsCtxDivOp) != jsCtxDivOp {
+		t.Error("Blank tokens")
+	}
+}
+
+func TestJSValEscaper(t *testing.T) {
+	tests := []struct {
+		x  interface{}
+		js string
+	}{
+		{int(42), " 42 "},
+		{uint(42), " 42 "},
+		{int16(42), " 42 "},
+		{uint16(42), " 42 "},
+		{int32(-42), " -42 "},
+		{uint32(42), " 42 "},
+		{int16(-42), " -42 "},
+		{uint16(42), " 42 "},
+		{int64(-42), " -42 "},
+		{uint64(42), " 42 "},
+		{uint64(1) << 53, " 9007199254740992 "},
+		// ulp(1 << 53) > 1 so this loses precision in JS
+		// but it is still a representable integer literal.
+		{uint64(1)<<53 + 1, " 9007199254740993 "},
+		{float32(1.0), " 1 "},
+		{float32(-1.0), " -1 "},
+		{float32(0.5), " 0.5 "},
+		{float32(-0.5), " -0.5 "},
+		{float32(1.0) / float32(256), " 0.00390625 "},
+		{float32(0), " 0 "},
+		{math.Copysign(0, -1), " -0 "},
+		{float64(1.0), " 1 "},
+		{float64(-1.0), " -1 "},
+		{float64(0.5), " 0.5 "},
+		{float64(-0.5), " -0.5 "},
+		{float64(0), " 0 "},
+		{math.Copysign(0, -1), " -0 "},
+		{"", `""`},
+		{"foo", `"foo"`},
+		// Newlines.
+		{"\r\n\u2028\u2029", `"\r\n\u2028\u2029"`},
+		// "\v" == "v" on IE 6 so use "\u000b" instead.
+		{"\t\x0b", `"\t\u000b"`},
+		{struct{ X, Y int }{1, 2}, `{"X":1,"Y":2}`},
+		{[]interface{}{}, "[]"},
+		{[]interface{}{42, "foo", nil}, `[42,"foo",null]`},
+		{[]string{"<!--", "</script>", "-->"}, `["\u003c!--","\u003c/script\u003e","--\u003e"]`},
+		{"<!--", `"\u003c!--"`},
+		{"-->", `"--\u003e"`},
+		{"<![CDATA[", `"\u003c![CDATA["`},
+		{"]]>", `"]]\u003e"`},
+		{"</script", `"\u003c/script"`},
+		{"\U0001D11E", "\"\U0001D11E\""}, // or "\uD834\uDD1E"
+		{nil, " null "},
+	}
+
+	for _, test := range tests {
+		if js := jsValEscaper(test.x); js != test.js {
+			t.Errorf("%+v: want\n\t%q\ngot\n\t%q", test.x, test.js, js)
+		}
+		// Make sure that escaping corner cases are not broken
+		// by nesting.
+		a := []interface{}{test.x}
+		want := "[" + strings.TrimSpace(test.js) + "]"
+		if js := jsValEscaper(a); js != want {
+			t.Errorf("%+v: want\n\t%q\ngot\n\t%q", a, want, js)
+		}
+	}
+}
+
+func TestJSStrEscaper(t *testing.T) {
+	tests := []struct {
+		x   interface{}
+		esc string
+	}{
+		{"", ``},
+		{"foo", `foo`},
+		{"\u0000", `\u0000`},
+		{"\t", `\t`},
+		{"\n", `\n`},
+		{"\r", `\r`},
+		{"\u2028", `\u2028`},
+		{"\u2029", `\u2029`},
+		{"\\", `\\`},
+		{"\\n", `\\n`},
+		{"foo\r\nbar", `foo\r\nbar`},
+		// Preserve attribute boundaries.
+		{`"`, `\u0022`},
+		{`'`, `\u0027`},
+		// Allow embedding in HTML without further escaping.
+		{`&amp;`, `\u0026amp;`},
+		// Prevent breaking out of text node and element boundaries.
+		{"</script>", `\u003c\/script\u003e`},
+		{"<![CDATA[", `\u003c![CDATA[`},
+		{"]]>", `]]\u003e`},
+		// https://dev.w3.org/html5/markup/aria/syntax.html#escaping-text-span
+		//   "The text in style, script, title, and textarea elements
+		//   must not have an escaping text span start that is not
+		//   followed by an escaping text span end."
+		// Furthermore, spoofing an escaping text span end could lead
+		// to different interpretation of a </script> sequence otherwise
+		// masked by the escaping text span, and spoofing a start could
+		// allow regular text content to be interpreted as script
+		// allowing script execution via a combination of a JS string
+		// injection followed by an HTML text injection.
+		{"<!--", `\u003c!--`},
+		{"-->", `--\u003e`},
+		// From https://code.google.com/p/doctype/wiki/ArticleUtf7
+		{"+ADw-script+AD4-alert(1)+ADw-/script+AD4-",
+			`\u002bADw-script\u002bAD4-alert(1)\u002bADw-\/script\u002bAD4-`,
+		},
+		// Invalid UTF-8 sequence
+		{"foo\xA0bar", "foo\xA0bar"},
+		// Invalid unicode scalar value.
+		{"foo\xed\xa0\x80bar", "foo\xed\xa0\x80bar"},
+	}
+
+	for _, test := range tests {
+		esc := jsStrEscaper(test.x)
+		if esc != test.esc {
+			t.Errorf("%q: want %q got %q", test.x, test.esc, esc)
+		}
+	}
+}
+
+func TestJSRegexpEscaper(t *testing.T) {
+	tests := []struct {
+		x   interface{}
+		esc string
+	}{
+		{"", `(?:)`},
+		{"foo", `foo`},
+		{"\u0000", `\u0000`},
+		{"\t", `\t`},
+		{"\n", `\n`},
+		{"\r", `\r`},
+		{"\u2028", `\u2028`},
+		{"\u2029", `\u2029`},
+		{"\\", `\\`},
+		{"\\n", `\\n`},
+		{"foo\r\nbar", `foo\r\nbar`},
+		// Preserve attribute boundaries.
+		{`"`, `\u0022`},
+		{`'`, `\u0027`},
+		// Allow embedding in HTML without further escaping.
+		{`&amp;`, `\u0026amp;`},
+		// Prevent breaking out of text node and element boundaries.
+		{"</script>", `\u003c\/script\u003e`},
+		{"<![CDATA[", `\u003c!\[CDATA\[`},
+		{"]]>", `\]\]\u003e`},
+		// Escaping text spans.
+		{"<!--", `\u003c!\-\-`},
+		{"-->", `\-\-\u003e`},
+		{"*", `\*`},
+		{"+", `\u002b`},
+		{"?", `\?`},
+		{"[](){}", `\[\]\(\)\{\}`},
+		{"$foo|x.y", `\$foo\|x\.y`},
+		{"x^y", `x\^y`},
+	}
+
+	for _, test := range tests {
+		esc := jsRegexpEscaper(test.x)
+		if esc != test.esc {
+			t.Errorf("%q: want %q got %q", test.x, test.esc, esc)
+		}
+	}
+}
+
+func TestEscapersOnLower7AndSelectHighCodepoints(t *testing.T) {
+	input := ("\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f" +
+		"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
+		` !"#$%&'()*+,-./` +
+		`0123456789:;<=>?` +
+		`@ABCDEFGHIJKLMNO` +
+		`PQRSTUVWXYZ[\]^_` +
+		"`abcdefghijklmno" +
+		"pqrstuvwxyz{|}~\x7f" +
+		"\u00A0\u0100\u2028\u2029\ufeff\U0001D11E")
+
+	tests := []struct {
+		name    string
+		escaper func(...interface{}) string
+		escaped string
+	}{
+		{
+			"jsStrEscaper",
+			jsStrEscaper,
+			`\u0000\u0001\u0002\u0003\u0004\u0005\u0006\u0007` +
+				`\u0008\t\n\u000b\f\r\u000e\u000f` +
+				`\u0010\u0011\u0012\u0013\u0014\u0015\u0016\u0017` +
+				`\u0018\u0019\u001a\u001b\u001c\u001d\u001e\u001f` +
+				` !\u0022#$%\u0026\u0027()*\u002b,-.\/` +
+				`0123456789:;\u003c=\u003e?` +
+				`@ABCDEFGHIJKLMNO` +
+				`PQRSTUVWXYZ[\\]^_` +
+				"`abcdefghijklmno" +
+				"pqrstuvwxyz{|}~\u007f" +
+				"\u00A0\u0100\\u2028\\u2029\ufeff\U0001D11E",
+		},
+		{
+			"jsRegexpEscaper",
+			jsRegexpEscaper,
+			`\u0000\u0001\u0002\u0003\u0004\u0005\u0006\u0007` +
+				`\u0008\t\n\u000b\f\r\u000e\u000f` +
+				`\u0010\u0011\u0012\u0013\u0014\u0015\u0016\u0017` +
+				`\u0018\u0019\u001a\u001b\u001c\u001d\u001e\u001f` +
+				` !\u0022#\$%\u0026\u0027\(\)\*\u002b,\-\.\/` +
+				`0123456789:;\u003c=\u003e\?` +
+				`@ABCDEFGHIJKLMNO` +
+				`PQRSTUVWXYZ\[\\\]\^_` +
+				"`abcdefghijklmno" +
+				`pqrstuvwxyz\{\|\}~` + "\u007f" +
+				"\u00A0\u0100\\u2028\\u2029\ufeff\U0001D11E",
+		},
+	}
+
+	for _, test := range tests {
+		if s := test.escaper(input); s != test.escaped {
+			t.Errorf("%s once: want\n\t%q\ngot\n\t%q", test.name, test.escaped, s)
+			continue
+		}
+
+		// Escape it rune by rune to make sure that any
+		// fast-path checking does not break escaping.
+		var buf bytes.Buffer
+		for _, c := range input {
+			buf.WriteString(test.escaper(string(c)))
+		}
+
+		if s := buf.String(); s != test.escaped {
+			t.Errorf("%s rune-wise: want\n\t%q\ngot\n\t%q", test.name, test.escaped, s)
+			continue
+		}
+	}
+}
+
+func TestIsJsMimeType(t *testing.T) {
+	tests := []struct {
+		in  string
+		out bool
+	}{
+		{"application/javascript;version=1.8", true},
+		{"application/javascript;version=1.8;foo=bar", true},
+		{"application/javascript/version=1.8", false},
+		{"text/javascript", true},
+		{"application/json", true},
+		{"application/ld+json", true},
+		{"module", true},
+	}
+
+	for _, test := range tests {
+		if isJSType(test.in) != test.out {
+			t.Errorf("isJSType(%q) = %v, want %v", test.in, !test.out, test.out)
+		}
+	}
+}
+
+func BenchmarkJSValEscaperWithNum(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		jsValEscaper(3.141592654)
+	}
+}
+
+func BenchmarkJSValEscaperWithStr(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		jsValEscaper("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
+	}
+}
+
+func BenchmarkJSValEscaperWithStrNoSpecials(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		jsValEscaper("The quick, brown fox jumps over the lazy dog")
+	}
+}
+
+func BenchmarkJSValEscaperWithObj(b *testing.B) {
+	o := struct {
+		S string
+		N int
+	}{
+		"The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>\u2028",
+		42,
+	}
+	for i := 0; i < b.N; i++ {
+		jsValEscaper(o)
+	}
+}
+
+func BenchmarkJSValEscaperWithObjNoSpecials(b *testing.B) {
+	o := struct {
+		S string
+		N int
+	}{
+		"The quick, brown fox jumps over the lazy dog",
+		42,
+	}
+	for i := 0; i < b.N; i++ {
+		jsValEscaper(o)
+	}
+}
+
+func BenchmarkJSStrEscaperNoSpecials(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		jsStrEscaper("The quick, brown fox jumps over the lazy dog.")
+	}
+}
+
+func BenchmarkJSStrEscaper(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		jsStrEscaper("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
+	}
+}
+
+func BenchmarkJSRegexpEscaperNoSpecials(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		jsRegexpEscaper("The quick, brown fox jumps over the lazy dog")
+	}
+}
+
+func BenchmarkJSRegexpEscaper(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		jsRegexpEscaper("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
+	}
+}
diff --git a/internal/backport/html/template/jsctx_string.go b/internal/backport/html/template/jsctx_string.go
new file mode 100644
index 0000000..dd1d87e
--- /dev/null
+++ b/internal/backport/html/template/jsctx_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type jsCtx"; DO NOT EDIT.
+
+package template
+
+import "strconv"
+
+const _jsCtx_name = "jsCtxRegexpjsCtxDivOpjsCtxUnknown"
+
+var _jsCtx_index = [...]uint8{0, 11, 21, 33}
+
+func (i jsCtx) String() string {
+	if i >= jsCtx(len(_jsCtx_index)-1) {
+		return "jsCtx(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _jsCtx_name[_jsCtx_index[i]:_jsCtx_index[i+1]]
+}
diff --git a/internal/backport/html/template/multi_test.go b/internal/backport/html/template/multi_test.go
new file mode 100644
index 0000000..23d9bbc
--- /dev/null
+++ b/internal/backport/html/template/multi_test.go
@@ -0,0 +1,277 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests for multiple-template execution, copied from text/template.
+
+package template
+
+import (
+	"bytes"
+	"testing"
+
+	"golang.org/x/website/internal/backport/osfs"
+	"golang.org/x/website/internal/backport/text/template/parse"
+)
+
+var multiExecTests = []execTest{
+	{"empty", "", "", nil, true},
+	{"text", "some text", "some text", nil, true},
+	{"invoke x", `{{template "x" .SI}}`, "TEXT", tVal, true},
+	{"invoke x no args", `{{template "x"}}`, "TEXT", tVal, true},
+	{"invoke dot int", `{{template "dot" .I}}`, "17", tVal, true},
+	{"invoke dot []int", `{{template "dot" .SI}}`, "[3 4 5]", tVal, true},
+	{"invoke dotV", `{{template "dotV" .U}}`, "v", tVal, true},
+	{"invoke nested int", `{{template "nested" .I}}`, "17", tVal, true},
+	{"variable declared by template", `{{template "nested" $x:=.SI}},{{index $x 1}}`, "[3 4 5],4", tVal, true},
+
+	// User-defined function: test argument evaluator.
+	{"testFunc literal", `{{oneArg "joe"}}`, "oneArg=joe", tVal, true},
+	{"testFunc .", `{{oneArg .}}`, "oneArg=joe", "joe", true},
+}
+
+// These strings are also in testdata/*.
+const multiText1 = `
+	{{define "x"}}TEXT{{end}}
+	{{define "dotV"}}{{.V}}{{end}}
+`
+
+const multiText2 = `
+	{{define "dot"}}{{.}}{{end}}
+	{{define "nested"}}{{template "dot" .}}{{end}}
+`
+
+func TestMultiExecute(t *testing.T) {
+	// Declare a couple of templates first.
+	template, err := New("root").Parse(multiText1)
+	if err != nil {
+		t.Fatalf("parse error for 1: %s", err)
+	}
+	_, err = template.Parse(multiText2)
+	if err != nil {
+		t.Fatalf("parse error for 2: %s", err)
+	}
+	testExecute(multiExecTests, template, t)
+}
+
+func TestParseFiles(t *testing.T) {
+	_, err := ParseFiles("DOES NOT EXIST")
+	if err == nil {
+		t.Error("expected error for non-existent file; got none")
+	}
+	template := New("root")
+	_, err = template.ParseFiles("testdata/file1.tmpl", "testdata/file2.tmpl")
+	if err != nil {
+		t.Fatalf("error parsing files: %v", err)
+	}
+	testExecute(multiExecTests, template, t)
+}
+
+func TestParseGlob(t *testing.T) {
+	_, err := ParseGlob("DOES NOT EXIST")
+	if err == nil {
+		t.Error("expected error for non-existent file; got none")
+	}
+	_, err = New("error").ParseGlob("[x")
+	if err == nil {
+		t.Error("expected error for bad pattern; got none")
+	}
+	template := New("root")
+	_, err = template.ParseGlob("testdata/file*.tmpl")
+	if err != nil {
+		t.Fatalf("error parsing files: %v", err)
+	}
+	testExecute(multiExecTests, template, t)
+}
+
+func TestParseFS(t *testing.T) {
+	fs := osfs.DirFS("testdata")
+
+	{
+		_, err := ParseFS(fs, "DOES NOT EXIST")
+		if err == nil {
+			t.Error("expected error for non-existent file; got none")
+		}
+	}
+
+	{
+		template := New("root")
+		_, err := template.ParseFS(fs, "file1.tmpl", "file2.tmpl")
+		if err != nil {
+			t.Fatalf("error parsing files: %v", err)
+		}
+		testExecute(multiExecTests, template, t)
+	}
+
+	{
+		template := New("root")
+		_, err := template.ParseFS(fs, "file*.tmpl")
+		if err != nil {
+			t.Fatalf("error parsing files: %v", err)
+		}
+		testExecute(multiExecTests, template, t)
+	}
+}
+
+// In these tests, actual content (not just template definitions) comes from the parsed files.
+
+var templateFileExecTests = []execTest{
+	{"test", `{{template "tmpl1.tmpl"}}{{template "tmpl2.tmpl"}}`, "template1\n\ny\ntemplate2\n\nx\n", 0, true},
+}
+
+func TestParseFilesWithData(t *testing.T) {
+	template, err := New("root").ParseFiles("testdata/tmpl1.tmpl", "testdata/tmpl2.tmpl")
+	if err != nil {
+		t.Fatalf("error parsing files: %v", err)
+	}
+	testExecute(templateFileExecTests, template, t)
+}
+
+func TestParseGlobWithData(t *testing.T) {
+	template, err := New("root").ParseGlob("testdata/tmpl*.tmpl")
+	if err != nil {
+		t.Fatalf("error parsing files: %v", err)
+	}
+	testExecute(templateFileExecTests, template, t)
+}
+
+const (
+	cloneText1 = `{{define "a"}}{{template "b"}}{{template "c"}}{{end}}`
+	cloneText2 = `{{define "b"}}b{{end}}`
+	cloneText3 = `{{define "c"}}root{{end}}`
+	cloneText4 = `{{define "c"}}clone{{end}}`
+)
+
+// Issue 7032
+func TestAddParseTreeToUnparsedTemplate(t *testing.T) {
+	master := "{{define \"master\"}}{{end}}"
+	tmpl := New("master")
+	tree, err := parse.Parse("master", master, "", "", nil)
+	if err != nil {
+		t.Fatalf("unexpected parse err: %v", err)
+	}
+	masterTree := tree["master"]
+	tmpl.AddParseTree("master", masterTree) // used to panic
+}
+
+func TestRedefinition(t *testing.T) {
+	var tmpl *Template
+	var err error
+	if tmpl, err = New("tmpl1").Parse(`{{define "test"}}foo{{end}}`); err != nil {
+		t.Fatalf("parse 1: %v", err)
+	}
+	if _, err = tmpl.Parse(`{{define "test"}}bar{{end}}`); err != nil {
+		t.Fatalf("got error %v, expected nil", err)
+	}
+	if _, err = tmpl.New("tmpl2").Parse(`{{define "test"}}bar{{end}}`); err != nil {
+		t.Fatalf("got error %v, expected nil", err)
+	}
+}
+
+// Issue 10879
+func TestEmptyTemplateCloneCrash(t *testing.T) {
+	t1 := New("base")
+	t1.Clone() // used to panic
+}
+
+// Issue 10910, 10926
+func TestTemplateLookUp(t *testing.T) {
+	t.Skip("broken on html/template") // TODO
+	t1 := New("foo")
+	if t1.Lookup("foo") != nil {
+		t.Error("Lookup returned non-nil value for undefined template foo")
+	}
+	t1.New("bar")
+	if t1.Lookup("bar") != nil {
+		t.Error("Lookup returned non-nil value for undefined template bar")
+	}
+	t1.Parse(`{{define "foo"}}test{{end}}`)
+	if t1.Lookup("foo") == nil {
+		t.Error("Lookup returned nil value for defined template")
+	}
+}
+
+func TestParse(t *testing.T) {
+	// In multiple calls to Parse with the same receiver template, only one call
+	// can contain text other than space, comments, and template definitions
+	t1 := New("test")
+	if _, err := t1.Parse(`{{define "test"}}{{end}}`); err != nil {
+		t.Fatalf("parsing test: %s", err)
+	}
+	if _, err := t1.Parse(`{{define "test"}}{{/* this is a comment */}}{{end}}`); err != nil {
+		t.Fatalf("parsing test: %s", err)
+	}
+	if _, err := t1.Parse(`{{define "test"}}foo{{end}}`); err != nil {
+		t.Fatalf("parsing test: %s", err)
+	}
+}
+
+func TestEmptyTemplate(t *testing.T) {
+	cases := []struct {
+		defn []string
+		in   string
+		want string
+	}{
+		{[]string{"x", "y"}, "", "y"},
+		{[]string{""}, "once", ""},
+		{[]string{"", ""}, "twice", ""},
+		{[]string{"{{.}}", "{{.}}"}, "twice", "twice"},
+		{[]string{"{{/* a comment */}}", "{{/* a comment */}}"}, "comment", ""},
+		{[]string{"{{.}}", ""}, "twice", "twice"}, // TODO: should want "" not "twice"
+	}
+
+	for i, c := range cases {
+		root := New("root")
+
+		var (
+			m   *Template
+			err error
+		)
+		for _, d := range c.defn {
+			m, err = root.New(c.in).Parse(d)
+			if err != nil {
+				t.Fatal(err)
+			}
+		}
+		buf := &bytes.Buffer{}
+		if err := m.Execute(buf, c.in); err != nil {
+			t.Error(i, err)
+			continue
+		}
+		if buf.String() != c.want {
+			t.Errorf("expected string %q: got %q", c.want, buf.String())
+		}
+	}
+}
+
+// Issue 19249 was a regression in 1.8 caused by the handling of empty
+// templates added in that release, which got different answers depending
+// on the order templates appeared in the internal map.
+func TestIssue19294(t *testing.T) {
+	// The empty block in "xhtml" should be replaced during execution
+	// by the contents of "stylesheet", but if the internal map associating
+	// names with templates is built in the wrong order, the empty block
+	// looks non-empty and this doesn't happen.
+	var inlined = map[string]string{
+		"stylesheet": `{{define "stylesheet"}}stylesheet{{end}}`,
+		"xhtml":      `{{block "stylesheet" .}}{{end}}`,
+	}
+	all := []string{"stylesheet", "xhtml"}
+	for i := 0; i < 100; i++ {
+		res, err := New("title.xhtml").Parse(`{{template "xhtml" .}}`)
+		if err != nil {
+			t.Fatal(err)
+		}
+		for _, name := range all {
+			_, err := res.New(name).Parse(inlined[name])
+			if err != nil {
+				t.Fatal(err)
+			}
+		}
+		var buf bytes.Buffer
+		res.Execute(&buf, 0)
+		if buf.String() != "stylesheet" {
+			t.Fatalf("iteration %d: got %q; expected %q", i, buf.String(), "stylesheet")
+		}
+	}
+}
diff --git a/internal/backport/html/template/state_string.go b/internal/backport/html/template/state_string.go
new file mode 100644
index 0000000..05104be
--- /dev/null
+++ b/internal/backport/html/template/state_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type state"; DO NOT EDIT.
+
+package template
+
+import "strconv"
+
+const _state_name = "stateTextstateTagstateAttrNamestateAfterNamestateBeforeValuestateHTMLCmtstateRCDATAstateAttrstateURLstateSrcsetstateJSstateJSDqStrstateJSSqStrstateJSRegexpstateJSBlockCmtstateJSLineCmtstateCSSstateCSSDqStrstateCSSSqStrstateCSSDqURLstateCSSSqURLstateCSSURLstateCSSBlockCmtstateCSSLineCmtstateError"
+
+var _state_index = [...]uint16{0, 9, 17, 30, 44, 60, 72, 83, 92, 100, 111, 118, 130, 142, 155, 170, 184, 192, 205, 218, 231, 244, 255, 271, 286, 296}
+
+func (i state) String() string {
+	if i >= state(len(_state_index)-1) {
+		return "state(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _state_name[_state_index[i]:_state_index[i+1]]
+}
diff --git a/internal/backport/html/template/template.go b/internal/backport/html/template/template.go
new file mode 100644
index 0000000..4156ed4
--- /dev/null
+++ b/internal/backport/html/template/template.go
@@ -0,0 +1,538 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"path/filepath"
+	"sync"
+
+	"golang.org/x/website/internal/backport/path"
+
+	"golang.org/x/website/internal/backport/io/fs"
+	"golang.org/x/website/internal/backport/text/template"
+	"golang.org/x/website/internal/backport/text/template/parse"
+)
+
+// Template is a specialized Template from "golang.org/x/website/internal/backport/text/template" that produces a safe
+// HTML document fragment.
+type Template struct {
+	// Sticky error if escaping fails, or escapeOK if succeeded.
+	escapeErr error
+	// We could embed the text/template field, but it's safer not to because
+	// we need to keep our version of the name space and the underlying
+	// template's in sync.
+	text *template.Template
+	// The underlying template's parse tree, updated to be HTML-safe.
+	Tree       *parse.Tree
+	*nameSpace // common to all associated templates
+}
+
+// escapeOK is a sentinel value used to indicate valid escaping.
+var escapeOK = fmt.Errorf("template escaped correctly")
+
+// nameSpace is the data structure shared by all templates in an association.
+type nameSpace struct {
+	mu      sync.Mutex
+	set     map[string]*Template
+	escaped bool
+	esc     escaper
+}
+
+// Templates returns a slice of the templates associated with t, including t
+// itself.
+func (t *Template) Templates() []*Template {
+	ns := t.nameSpace
+	ns.mu.Lock()
+	defer ns.mu.Unlock()
+	// Return a slice so we don't expose the map.
+	m := make([]*Template, 0, len(ns.set))
+	for _, v := range ns.set {
+		m = append(m, v)
+	}
+	return m
+}
+
+// Option sets options for the template. Options are described by
+// strings, either a simple string or "key=value". There can be at
+// most one equals sign in an option string. If the option string
+// is unrecognized or otherwise invalid, Option panics.
+//
+// Known options:
+//
+// missingkey: Control the behavior during execution if a map is
+// indexed with a key that is not present in the map.
+//	"missingkey=default" or "missingkey=invalid"
+//		The default behavior: Do nothing and continue execution.
+//		If printed, the result of the index operation is the string
+//		"<no value>".
+//	"missingkey=zero"
+//		The operation returns the zero value for the map type's element.
+//	"missingkey=error"
+//		Execution stops immediately with an error.
+//
+func (t *Template) Option(opt ...string) *Template {
+	t.text.Option(opt...)
+	return t
+}
+
+// checkCanParse checks whether it is OK to parse templates.
+// If not, it returns an error.
+func (t *Template) checkCanParse() error {
+	if t == nil {
+		return nil
+	}
+	t.nameSpace.mu.Lock()
+	defer t.nameSpace.mu.Unlock()
+	if t.nameSpace.escaped {
+		return fmt.Errorf("html/template: cannot Parse after Execute")
+	}
+	return nil
+}
+
+// escape escapes all associated templates.
+func (t *Template) escape() error {
+	t.nameSpace.mu.Lock()
+	defer t.nameSpace.mu.Unlock()
+	t.nameSpace.escaped = true
+	if t.escapeErr == nil {
+		if t.Tree == nil {
+			return fmt.Errorf("template: %q is an incomplete or empty template", t.Name())
+		}
+		if err := escapeTemplate(t, t.text.Root, t.Name()); err != nil {
+			return err
+		}
+	} else if t.escapeErr != escapeOK {
+		return t.escapeErr
+	}
+	return nil
+}
+
+// Execute applies a parsed template to the specified data object,
+// writing the output to wr.
+// If an error occurs executing the template or writing its output,
+// execution stops, but partial results may already have been written to
+// the output writer.
+// A template may be executed safely in parallel, although if parallel
+// executions share a Writer the output may be interleaved.
+func (t *Template) Execute(wr io.Writer, data interface{}) error {
+	if err := t.escape(); err != nil {
+		return err
+	}
+	return t.text.Execute(wr, data)
+}
+
+// ExecuteTemplate applies the template associated with t that has the given
+// name to the specified data object and writes the output to wr.
+// If an error occurs executing the template or writing its output,
+// execution stops, but partial results may already have been written to
+// the output writer.
+// A template may be executed safely in parallel, although if parallel
+// executions share a Writer the output may be interleaved.
+func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error {
+	tmpl, err := t.lookupAndEscapeTemplate(name)
+	if err != nil {
+		return err
+	}
+	return tmpl.text.Execute(wr, data)
+}
+
+// lookupAndEscapeTemplate guarantees that the template with the given name
+// is escaped, or returns an error if it cannot be. It returns the named
+// template.
+func (t *Template) lookupAndEscapeTemplate(name string) (tmpl *Template, err error) {
+	t.nameSpace.mu.Lock()
+	defer t.nameSpace.mu.Unlock()
+	t.nameSpace.escaped = true
+	tmpl = t.set[name]
+	if tmpl == nil {
+		return nil, fmt.Errorf("html/template: %q is undefined", name)
+	}
+	if tmpl.escapeErr != nil && tmpl.escapeErr != escapeOK {
+		return nil, tmpl.escapeErr
+	}
+	if tmpl.text.Tree == nil || tmpl.text.Root == nil {
+		return nil, fmt.Errorf("html/template: %q is an incomplete template", name)
+	}
+	if t.text.Lookup(name) == nil {
+		panic("html/template internal error: template escaping out of sync")
+	}
+	if tmpl.escapeErr == nil {
+		err = escapeTemplate(tmpl, tmpl.text.Root, name)
+	}
+	return tmpl, err
+}
+
+// DefinedTemplates returns a string listing the defined templates,
+// prefixed by the string "; defined templates are: ". If there are none,
+// it returns the empty string. Used to generate an error message.
+func (t *Template) DefinedTemplates() string {
+	return t.text.DefinedTemplates()
+}
+
+// Parse parses text as a template body for t.
+// Named template definitions ({{define ...}} or {{block ...}} statements) in text
+// define additional templates associated with t and are removed from the
+// definition of t itself.
+//
+// Templates can be redefined in successive calls to Parse,
+// before the first use of Execute on t or any associated template.
+// A template definition with a body containing only white space and comments
+// is considered empty and will not replace an existing template's body.
+// This allows using Parse to add new named template definitions without
+// overwriting the main template body.
+func (t *Template) Parse(text string) (*Template, error) {
+	if err := t.checkCanParse(); err != nil {
+		return nil, err
+	}
+
+	ret, err := t.text.Parse(text)
+	if err != nil {
+		return nil, err
+	}
+
+	// In general, all the named templates might have changed underfoot.
+	// Regardless, some new ones may have been defined.
+	// The template.Template set has been updated; update ours.
+	t.nameSpace.mu.Lock()
+	defer t.nameSpace.mu.Unlock()
+	for _, v := range ret.Templates() {
+		name := v.Name()
+		tmpl := t.set[name]
+		if tmpl == nil {
+			tmpl = t.new(name)
+		}
+		tmpl.text = v
+		tmpl.Tree = v.Tree
+	}
+	return t, nil
+}
+
+// AddParseTree creates a new template with the name and parse tree
+// and associates it with t.
+//
+// It returns an error if t or any associated template has already been executed.
+func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) {
+	if err := t.checkCanParse(); err != nil {
+		return nil, err
+	}
+
+	t.nameSpace.mu.Lock()
+	defer t.nameSpace.mu.Unlock()
+	text, err := t.text.AddParseTree(name, tree)
+	if err != nil {
+		return nil, err
+	}
+	ret := &Template{
+		nil,
+		text,
+		text.Tree,
+		t.nameSpace,
+	}
+	t.set[name] = ret
+	return ret, nil
+}
+
+// Clone returns a duplicate of the template, including all associated
+// templates. The actual representation is not copied, but the name space of
+// associated templates is, so further calls to Parse in the copy will add
+// templates to the copy but not to the original. Clone can be used to prepare
+// common templates and use them with variant definitions for other templates
+// by adding the variants after the clone is made.
+//
+// It returns an error if t has already been executed.
+func (t *Template) Clone() (*Template, error) {
+	t.nameSpace.mu.Lock()
+	defer t.nameSpace.mu.Unlock()
+	if t.escapeErr != nil {
+		return nil, fmt.Errorf("html/template: cannot Clone %q after it has executed", t.Name())
+	}
+	textClone, err := t.text.Clone()
+	if err != nil {
+		return nil, err
+	}
+	ns := &nameSpace{set: make(map[string]*Template)}
+	ns.esc = makeEscaper(ns)
+	ret := &Template{
+		nil,
+		textClone,
+		textClone.Tree,
+		ns,
+	}
+	ret.set[ret.Name()] = ret
+	for _, x := range textClone.Templates() {
+		name := x.Name()
+		src := t.set[name]
+		if src == nil || src.escapeErr != nil {
+			return nil, fmt.Errorf("html/template: cannot Clone %q after it has executed", t.Name())
+		}
+		x.Tree = x.Tree.Copy()
+		ret.set[name] = &Template{
+			nil,
+			x,
+			x.Tree,
+			ret.nameSpace,
+		}
+	}
+	// Return the template associated with the name of this template.
+	return ret.set[ret.Name()], nil
+}
+
+// New allocates a new HTML template with the given name.
+func New(name string) *Template {
+	ns := &nameSpace{set: make(map[string]*Template)}
+	ns.esc = makeEscaper(ns)
+	tmpl := &Template{
+		nil,
+		template.New(name),
+		nil,
+		ns,
+	}
+	tmpl.set[name] = tmpl
+	return tmpl
+}
+
+// New allocates a new HTML template associated with the given one
+// and with the same delimiters. The association, which is transitive,
+// allows one template to invoke another with a {{template}} action.
+//
+// If a template with the given name already exists, the new HTML template
+// will replace it. The existing template will be reset and disassociated with
+// t.
+func (t *Template) New(name string) *Template {
+	t.nameSpace.mu.Lock()
+	defer t.nameSpace.mu.Unlock()
+	return t.new(name)
+}
+
+// new is the implementation of New, without the lock.
+func (t *Template) new(name string) *Template {
+	tmpl := &Template{
+		nil,
+		t.text.New(name),
+		nil,
+		t.nameSpace,
+	}
+	if existing, ok := tmpl.set[name]; ok {
+		emptyTmpl := New(existing.Name())
+		*existing = *emptyTmpl
+	}
+	tmpl.set[name] = tmpl
+	return tmpl
+}
+
+// Name returns the name of the template.
+func (t *Template) Name() string {
+	return t.text.Name()
+}
+
+// FuncMap is the type of the map defining the mapping from names to
+// functions. Each function must have either a single return value, or two
+// return values of which the second has type error. In that case, if the
+// second (error) argument evaluates to non-nil during execution, execution
+// terminates and Execute returns that error. FuncMap has the same base type
+// as FuncMap in "golang.org/x/website/internal/backport/text/template", copied here so clients need not import
+// "golang.org/x/website/internal/backport/text/template".
+type FuncMap map[string]interface{}
+
+// Funcs adds the elements of the argument map to the template's function map.
+// It must be called before the template is parsed.
+// It panics if a value in the map is not a function with appropriate return
+// type. However, it is legal to overwrite elements of the map. The return
+// value is the template, so calls can be chained.
+func (t *Template) Funcs(funcMap FuncMap) *Template {
+	t.text.Funcs(template.FuncMap(funcMap))
+	return t
+}
+
+// Delims sets the action delimiters to the specified strings, to be used in
+// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template
+// definitions will inherit the settings. An empty delimiter stands for the
+// corresponding default: {{ or }}.
+// The return value is the template, so calls can be chained.
+func (t *Template) Delims(left, right string) *Template {
+	t.text.Delims(left, right)
+	return t
+}
+
+// Lookup returns the template with the given name that is associated with t,
+// or nil if there is no such template.
+func (t *Template) Lookup(name string) *Template {
+	t.nameSpace.mu.Lock()
+	defer t.nameSpace.mu.Unlock()
+	return t.set[name]
+}
+
+// Must is a helper that wraps a call to a function returning (*Template, error)
+// and panics if the error is non-nil. It is intended for use in variable initializations
+// such as
+//	var t = template.Must(template.New("name").Parse("html"))
+func Must(t *Template, err error) *Template {
+	if err != nil {
+		panic(err)
+	}
+	return t
+}
+
+// ParseFiles creates a new Template and parses the template definitions from
+// the named files. The returned template's name will have the (base) name and
+// (parsed) contents of the first file. There must be at least one file.
+// If an error occurs, parsing stops and the returned *Template is nil.
+//
+// When parsing multiple files with the same name in different directories,
+// the last one mentioned will be the one that results.
+// For instance, ParseFiles("a/foo", "b/foo") stores "b/foo" as the template
+// named "foo", while "a/foo" is unavailable.
+func ParseFiles(filenames ...string) (*Template, error) {
+	return parseFiles(nil, readFileOS, filenames...)
+}
+
+// ParseFiles parses the named files and associates the resulting templates with
+// t. If an error occurs, parsing stops and the returned template is nil;
+// otherwise it is t. There must be at least one file.
+//
+// When parsing multiple files with the same name in different directories,
+// the last one mentioned will be the one that results.
+//
+// ParseFiles returns an error if t or any associated template has already been executed.
+func (t *Template) ParseFiles(filenames ...string) (*Template, error) {
+	return parseFiles(t, readFileOS, filenames...)
+}
+
+// parseFiles is the helper for the method and function. If the argument
+// template is nil, it is created from the first file.
+func parseFiles(t *Template, readFile func(string) (string, []byte, error), filenames ...string) (*Template, error) {
+	if err := t.checkCanParse(); err != nil {
+		return nil, err
+	}
+
+	if len(filenames) == 0 {
+		// Not really a problem, but be consistent.
+		return nil, fmt.Errorf("html/template: no files named in call to ParseFiles")
+	}
+	for _, filename := range filenames {
+		name, b, err := readFile(filename)
+		if err != nil {
+			return nil, err
+		}
+		s := string(b)
+		// First template becomes return value if not already defined,
+		// and we use that one for subsequent New calls to associate
+		// all the templates together. Also, if this file has the same name
+		// as t, this file becomes the contents of t, so
+		//  t, err := New(name).Funcs(xxx).ParseFiles(name)
+		// works. Otherwise we create a new template associated with t.
+		var tmpl *Template
+		if t == nil {
+			t = New(name)
+		}
+		if name == t.Name() {
+			tmpl = t
+		} else {
+			tmpl = t.New(name)
+		}
+		_, err = tmpl.Parse(s)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return t, nil
+}
+
+// ParseGlob creates a new Template and parses the template definitions from
+// the files identified by the pattern. The files are matched according to the
+// semantics of filepath.Match, and the pattern must match at least one file.
+// The returned template will have the (base) name and (parsed) contents of the
+// first file matched by the pattern. ParseGlob is equivalent to calling
+// ParseFiles with the list of files matched by the pattern.
+//
+// When parsing multiple files with the same name in different directories,
+// the last one mentioned will be the one that results.
+func ParseGlob(pattern string) (*Template, error) {
+	return parseGlob(nil, pattern)
+}
+
+// ParseGlob parses the template definitions in the files identified by the
+// pattern and associates the resulting templates with t. The files are matched
+// according to the semantics of filepath.Match, and the pattern must match at
+// least one file. ParseGlob is equivalent to calling t.ParseFiles with the
+// list of files matched by the pattern.
+//
+// When parsing multiple files with the same name in different directories,
+// the last one mentioned will be the one that results.
+//
+// ParseGlob returns an error if t or any associated template has already been executed.
+func (t *Template) ParseGlob(pattern string) (*Template, error) {
+	return parseGlob(t, pattern)
+}
+
+// parseGlob is the implementation of the function and method ParseGlob.
+func parseGlob(t *Template, pattern string) (*Template, error) {
+	if err := t.checkCanParse(); err != nil {
+		return nil, err
+	}
+	filenames, err := filepath.Glob(pattern)
+	if err != nil {
+		return nil, err
+	}
+	if len(filenames) == 0 {
+		return nil, fmt.Errorf("html/template: pattern matches no files: %#q", pattern)
+	}
+	return parseFiles(t, readFileOS, filenames...)
+}
+
+// IsTrue reports whether the value is 'true', in the sense of not the zero of its type,
+// and whether the value has a meaningful truth value. This is the definition of
+// truth used by if and other such actions.
+func IsTrue(val interface{}) (truth, ok bool) {
+	return template.IsTrue(val)
+}
+
+// ParseFS is like ParseFiles or ParseGlob but reads from the file system fs
+// instead of the host operating system's file system.
+// It accepts a list of glob patterns.
+// (Note that most file names serve as glob patterns matching only themselves.)
+func ParseFS(fs fs.FS, patterns ...string) (*Template, error) {
+	return parseFS(nil, fs, patterns)
+}
+
+// ParseFS is like ParseFiles or ParseGlob but reads from the file system fs
+// instead of the host operating system's file system.
+// It accepts a list of glob patterns.
+// (Note that most file names serve as glob patterns matching only themselves.)
+func (t *Template) ParseFS(fs fs.FS, patterns ...string) (*Template, error) {
+	return parseFS(t, fs, patterns)
+}
+
+func parseFS(t *Template, fsys fs.FS, patterns []string) (*Template, error) {
+	var filenames []string
+	for _, pattern := range patterns {
+		list, err := fs.Glob(fsys, pattern)
+		if err != nil {
+			return nil, err
+		}
+		if len(list) == 0 {
+			return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
+		}
+		filenames = append(filenames, list...)
+	}
+	return parseFiles(t, readFileFS(fsys), filenames...)
+}
+
+func readFileOS(file string) (name string, b []byte, err error) {
+	name = filepath.Base(file)
+	b, err = ioutil.ReadFile(file)
+	return
+}
+
+func readFileFS(fsys fs.FS) func(string) (string, []byte, error) {
+	return func(file string) (name string, b []byte, err error) {
+		name = path.Base(file)
+		b, err = fs.ReadFile(fsys, file)
+		return
+	}
+}
diff --git a/internal/backport/html/template/template_test.go b/internal/backport/html/template/template_test.go
new file mode 100644
index 0000000..0f68e72
--- /dev/null
+++ b/internal/backport/html/template/template_test.go
@@ -0,0 +1,219 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template_test
+
+import (
+	"bytes"
+	"encoding/json"
+	"strings"
+	"testing"
+
+	. "golang.org/x/website/internal/backport/html/template"
+	"golang.org/x/website/internal/backport/text/template/parse"
+)
+
+func TestTemplateClone(t *testing.T) {
+	// https://golang.org/issue/12996
+	orig := New("name")
+	clone, err := orig.Clone()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(clone.Templates()) != len(orig.Templates()) {
+		t.Fatalf("Invalid length of t.Clone().Templates()")
+	}
+
+	const want = "stuff"
+	parsed := Must(clone.Parse(want))
+	var buf bytes.Buffer
+	err = parsed.Execute(&buf, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if got := buf.String(); got != want {
+		t.Fatalf("got %q; want %q", got, want)
+	}
+}
+
+func TestRedefineNonEmptyAfterExecution(t *testing.T) {
+	c := newTestCase(t)
+	c.mustParse(c.root, `foo`)
+	c.mustExecute(c.root, nil, "foo")
+	c.mustNotParse(c.root, `bar`)
+}
+
+func TestRedefineEmptyAfterExecution(t *testing.T) {
+	c := newTestCase(t)
+	c.mustParse(c.root, ``)
+	c.mustExecute(c.root, nil, "")
+	c.mustNotParse(c.root, `foo`)
+	c.mustExecute(c.root, nil, "")
+}
+
+func TestRedefineAfterNonExecution(t *testing.T) {
+	c := newTestCase(t)
+	c.mustParse(c.root, `{{if .}}<{{template "X"}}>{{end}}{{define "X"}}foo{{end}}`)
+	c.mustExecute(c.root, 0, "")
+	c.mustNotParse(c.root, `{{define "X"}}bar{{end}}`)
+	c.mustExecute(c.root, 1, "&lt;foo>")
+}
+
+func TestRedefineAfterNamedExecution(t *testing.T) {
+	c := newTestCase(t)
+	c.mustParse(c.root, `<{{template "X" .}}>{{define "X"}}foo{{end}}`)
+	c.mustExecute(c.root, nil, "&lt;foo>")
+	c.mustNotParse(c.root, `{{define "X"}}bar{{end}}`)
+	c.mustExecute(c.root, nil, "&lt;foo>")
+}
+
+func TestRedefineNestedByNameAfterExecution(t *testing.T) {
+	c := newTestCase(t)
+	c.mustParse(c.root, `{{define "X"}}foo{{end}}`)
+	c.mustExecute(c.lookup("X"), nil, "foo")
+	c.mustNotParse(c.root, `{{define "X"}}bar{{end}}`)
+	c.mustExecute(c.lookup("X"), nil, "foo")
+}
+
+func TestRedefineNestedByTemplateAfterExecution(t *testing.T) {
+	c := newTestCase(t)
+	c.mustParse(c.root, `{{define "X"}}foo{{end}}`)
+	c.mustExecute(c.lookup("X"), nil, "foo")
+	c.mustNotParse(c.lookup("X"), `bar`)
+	c.mustExecute(c.lookup("X"), nil, "foo")
+}
+
+func TestRedefineSafety(t *testing.T) {
+	c := newTestCase(t)
+	c.mustParse(c.root, `<html><a href="{{template "X"}}">{{define "X"}}{{end}}`)
+	c.mustExecute(c.root, nil, `<html><a href="">`)
+	// Note: Every version of Go prior to Go 1.8 accepted the redefinition of "X"
+	// on the next line, but luckily kept it from being used in the outer template.
+	// Now we reject it, which makes clearer that we're not going to use it.
+	c.mustNotParse(c.root, `{{define "X"}}" bar="baz{{end}}`)
+	c.mustExecute(c.root, nil, `<html><a href="">`)
+}
+
+func TestRedefineTopUse(t *testing.T) {
+	c := newTestCase(t)
+	c.mustParse(c.root, `{{template "X"}}{{.}}{{define "X"}}{{end}}`)
+	c.mustExecute(c.root, 42, `42`)
+	c.mustNotParse(c.root, `{{define "X"}}<script>{{end}}`)
+	c.mustExecute(c.root, 42, `42`)
+}
+
+func TestRedefineOtherParsers(t *testing.T) {
+	c := newTestCase(t)
+	c.mustParse(c.root, ``)
+	c.mustExecute(c.root, nil, ``)
+	if _, err := c.root.ParseFiles("no.template"); err == nil || !strings.Contains(err.Error(), "Execute") {
+		t.Errorf("ParseFiles: %v\nwanted error about already having Executed", err)
+	}
+	if _, err := c.root.ParseGlob("*.no.template"); err == nil || !strings.Contains(err.Error(), "Execute") {
+		t.Errorf("ParseGlob: %v\nwanted error about already having Executed", err)
+	}
+	if _, err := c.root.AddParseTree("t1", c.root.Tree); err == nil || !strings.Contains(err.Error(), "Execute") {
+		t.Errorf("AddParseTree: %v\nwanted error about already having Executed", err)
+	}
+}
+
+func TestNumbers(t *testing.T) {
+	c := newTestCase(t)
+	c.mustParse(c.root, `{{print 1_2.3_4}} {{print 0x0_1.e_0p+02}}`)
+	c.mustExecute(c.root, nil, "12.34 7.5")
+}
+
+func TestStringsInScriptsWithJsonContentTypeAreCorrectlyEscaped(t *testing.T) {
+	// See #33671 and #37634 for more context on this.
+	tests := []struct{ name, in string }{
+		{"empty", ""},
+		{"invalid", string(rune(-1))},
+		{"null", "\u0000"},
+		{"unit separator", "\u001F"},
+		{"tab", "\t"},
+		{"gt and lt", "<>"},
+		{"quotes", `'"`},
+		{"ASCII letters", "ASCII letters"},
+		{"Unicode", "ʕ⊙ϖ⊙ʔ"},
+		{"Pizza", "🍕"},
+	}
+	const (
+		prefix = `<script type="application/ld+json">`
+		suffix = `</script>`
+		templ  = prefix + `"{{.}}"` + suffix
+	)
+	tpl := Must(New("JS string is JSON string").Parse(templ))
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			var buf bytes.Buffer
+			if err := tpl.Execute(&buf, tt.in); err != nil {
+				t.Fatalf("Cannot render template: %v", err)
+			}
+			trimmed := bytes.TrimSuffix(bytes.TrimPrefix(buf.Bytes(), []byte(prefix)), []byte(suffix))
+			var got string
+			if err := json.Unmarshal(trimmed, &got); err != nil {
+				t.Fatalf("Cannot parse JS string %q as JSON: %v", trimmed[1:len(trimmed)-1], err)
+			}
+			if got != tt.in {
+				t.Errorf("Serialization changed the string value: got %q want %q", got, tt.in)
+			}
+		})
+	}
+}
+
+func TestSkipEscapeComments(t *testing.T) {
+	c := newTestCase(t)
+	tr := parse.New("root")
+	tr.Mode = parse.ParseComments
+	newT, err := tr.Parse("{{/* A comment */}}{{ 1 }}{{/* Another comment */}}", "", "", make(map[string]*parse.Tree))
+	if err != nil {
+		t.Fatalf("Cannot parse template text: %v", err)
+	}
+	c.root, err = c.root.AddParseTree("root", newT)
+	if err != nil {
+		t.Fatalf("Cannot add parse tree to template: %v", err)
+	}
+	c.mustExecute(c.root, nil, "1")
+}
+
+type testCase struct {
+	t    *testing.T
+	root *Template
+}
+
+func newTestCase(t *testing.T) *testCase {
+	return &testCase{
+		t:    t,
+		root: New("root"),
+	}
+}
+
+func (c *testCase) lookup(name string) *Template {
+	return c.root.Lookup(name)
+}
+
+func (c *testCase) mustParse(t *Template, text string) {
+	_, err := t.Parse(text)
+	if err != nil {
+		c.t.Fatalf("parse: %v", err)
+	}
+}
+
+func (c *testCase) mustNotParse(t *Template, text string) {
+	_, err := t.Parse(text)
+	if err == nil {
+		c.t.Fatalf("parse: unexpected success")
+	}
+}
+
+func (c *testCase) mustExecute(t *Template, val interface{}, want string) {
+	var buf bytes.Buffer
+	err := t.Execute(&buf, val)
+	if err != nil {
+		c.t.Fatalf("execute: %v", err)
+	}
+	if buf.String() != want {
+		c.t.Fatalf("template output:\n%s\nwant:\n%s", buf.String(), want)
+	}
+}
diff --git a/internal/backport/html/template/testdata/file1.tmpl b/internal/backport/html/template/testdata/file1.tmpl
new file mode 100644
index 0000000..febf9d9
--- /dev/null
+++ b/internal/backport/html/template/testdata/file1.tmpl
@@ -0,0 +1,2 @@
+{{define "x"}}TEXT{{end}}
+{{define "dotV"}}{{.V}}{{end}}
diff --git a/internal/backport/html/template/testdata/file2.tmpl b/internal/backport/html/template/testdata/file2.tmpl
new file mode 100644
index 0000000..39bf6fb
--- /dev/null
+++ b/internal/backport/html/template/testdata/file2.tmpl
@@ -0,0 +1,2 @@
+{{define "dot"}}{{.}}{{end}}
+{{define "nested"}}{{template "dot" .}}{{end}}
diff --git a/internal/backport/html/template/testdata/fs.zip b/internal/backport/html/template/testdata/fs.zip
new file mode 100644
index 0000000..8581313
--- /dev/null
+++ b/internal/backport/html/template/testdata/fs.zip
Binary files differ
diff --git a/internal/backport/html/template/testdata/tmpl1.tmpl b/internal/backport/html/template/testdata/tmpl1.tmpl
new file mode 100644
index 0000000..b72b3a3
--- /dev/null
+++ b/internal/backport/html/template/testdata/tmpl1.tmpl
@@ -0,0 +1,3 @@
+template1
+{{define "x"}}x{{end}}
+{{template "y"}}
diff --git a/internal/backport/html/template/testdata/tmpl2.tmpl b/internal/backport/html/template/testdata/tmpl2.tmpl
new file mode 100644
index 0000000..16beba6
--- /dev/null
+++ b/internal/backport/html/template/testdata/tmpl2.tmpl
@@ -0,0 +1,3 @@
+template2
+{{define "y"}}y{{end}}
+{{template "x"}}
diff --git a/internal/backport/html/template/transition.go b/internal/backport/html/template/transition.go
new file mode 100644
index 0000000..06df679
--- /dev/null
+++ b/internal/backport/html/template/transition.go
@@ -0,0 +1,592 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"bytes"
+	"strings"
+)
+
+// transitionFunc is the array of context transition functions for text nodes.
+// A transition function takes a context and template text input, and returns
+// the updated context and the number of bytes consumed from the front of the
+// input.
+var transitionFunc = [...]func(context, []byte) (context, int){
+	stateText:        tText,
+	stateTag:         tTag,
+	stateAttrName:    tAttrName,
+	stateAfterName:   tAfterName,
+	stateBeforeValue: tBeforeValue,
+	stateHTMLCmt:     tHTMLCmt,
+	stateRCDATA:      tSpecialTagEnd,
+	stateAttr:        tAttr,
+	stateURL:         tURL,
+	stateSrcset:      tURL,
+	stateJS:          tJS,
+	stateJSDqStr:     tJSDelimited,
+	stateJSSqStr:     tJSDelimited,
+	stateJSRegexp:    tJSDelimited,
+	stateJSBlockCmt:  tBlockCmt,
+	stateJSLineCmt:   tLineCmt,
+	stateCSS:         tCSS,
+	stateCSSDqStr:    tCSSStr,
+	stateCSSSqStr:    tCSSStr,
+	stateCSSDqURL:    tCSSStr,
+	stateCSSSqURL:    tCSSStr,
+	stateCSSURL:      tCSSStr,
+	stateCSSBlockCmt: tBlockCmt,
+	stateCSSLineCmt:  tLineCmt,
+	stateError:       tError,
+}
+
+var commentStart = []byte("<!--")
+var commentEnd = []byte("-->")
+
+// tText is the context transition function for the text state.
+func tText(c context, s []byte) (context, int) {
+	k := 0
+	for {
+		i := k + bytes.IndexByte(s[k:], '<')
+		if i < k || i+1 == len(s) {
+			return c, len(s)
+		} else if i+4 <= len(s) && bytes.Equal(commentStart, s[i:i+4]) {
+			return context{state: stateHTMLCmt}, i + 4
+		}
+		i++
+		end := false
+		if s[i] == '/' {
+			if i+1 == len(s) {
+				return c, len(s)
+			}
+			end, i = true, i+1
+		}
+		j, e := eatTagName(s, i)
+		if j != i {
+			if end {
+				e = elementNone
+			}
+			// We've found an HTML tag.
+			return context{state: stateTag, element: e}, j
+		}
+		k = j
+	}
+}
+
+var elementContentType = [...]state{
+	elementNone:     stateText,
+	elementScript:   stateJS,
+	elementStyle:    stateCSS,
+	elementTextarea: stateRCDATA,
+	elementTitle:    stateRCDATA,
+}
+
+// tTag is the context transition function for the tag state.
+func tTag(c context, s []byte) (context, int) {
+	// Find the attribute name.
+	i := eatWhiteSpace(s, 0)
+	if i == len(s) {
+		return c, len(s)
+	}
+	if s[i] == '>' {
+		return context{
+			state:   elementContentType[c.element],
+			element: c.element,
+		}, i + 1
+	}
+	j, err := eatAttrName(s, i)
+	if err != nil {
+		return context{state: stateError, err: err}, len(s)
+	}
+	state, attr := stateTag, attrNone
+	if i == j {
+		return context{
+			state: stateError,
+			err:   errorf(ErrBadHTML, nil, 0, "expected space, attr name, or end of tag, but got %q", s[i:]),
+		}, len(s)
+	}
+
+	attrName := strings.ToLower(string(s[i:j]))
+	if c.element == elementScript && attrName == "type" {
+		attr = attrScriptType
+	} else {
+		switch attrType(attrName) {
+		case contentTypeURL:
+			attr = attrURL
+		case contentTypeCSS:
+			attr = attrStyle
+		case contentTypeJS:
+			attr = attrScript
+		case contentTypeSrcset:
+			attr = attrSrcset
+		}
+	}
+
+	if j == len(s) {
+		state = stateAttrName
+	} else {
+		state = stateAfterName
+	}
+	return context{state: state, element: c.element, attr: attr}, j
+}
+
+// tAttrName is the context transition function for stateAttrName.
+func tAttrName(c context, s []byte) (context, int) {
+	i, err := eatAttrName(s, 0)
+	if err != nil {
+		return context{state: stateError, err: err}, len(s)
+	} else if i != len(s) {
+		c.state = stateAfterName
+	}
+	return c, i
+}
+
+// tAfterName is the context transition function for stateAfterName.
+func tAfterName(c context, s []byte) (context, int) {
+	// Look for the start of the value.
+	i := eatWhiteSpace(s, 0)
+	if i == len(s) {
+		return c, len(s)
+	} else if s[i] != '=' {
+		// Occurs due to tag ending '>', and valueless attribute.
+		c.state = stateTag
+		return c, i
+	}
+	c.state = stateBeforeValue
+	// Consume the "=".
+	return c, i + 1
+}
+
+var attrStartStates = [...]state{
+	attrNone:       stateAttr,
+	attrScript:     stateJS,
+	attrScriptType: stateAttr,
+	attrStyle:      stateCSS,
+	attrURL:        stateURL,
+	attrSrcset:     stateSrcset,
+}
+
+// tBeforeValue is the context transition function for stateBeforeValue.
+func tBeforeValue(c context, s []byte) (context, int) {
+	i := eatWhiteSpace(s, 0)
+	if i == len(s) {
+		return c, len(s)
+	}
+	// Find the attribute delimiter.
+	delim := delimSpaceOrTagEnd
+	switch s[i] {
+	case '\'':
+		delim, i = delimSingleQuote, i+1
+	case '"':
+		delim, i = delimDoubleQuote, i+1
+	}
+	c.state, c.delim = attrStartStates[c.attr], delim
+	return c, i
+}
+
+// tHTMLCmt is the context transition function for stateHTMLCmt.
+func tHTMLCmt(c context, s []byte) (context, int) {
+	if i := bytes.Index(s, commentEnd); i != -1 {
+		return context{}, i + 3
+	}
+	return c, len(s)
+}
+
+// specialTagEndMarkers maps element types to the character sequence that
+// case-insensitively signals the end of the special tag body.
+var specialTagEndMarkers = [...][]byte{
+	elementScript:   []byte("script"),
+	elementStyle:    []byte("style"),
+	elementTextarea: []byte("textarea"),
+	elementTitle:    []byte("title"),
+}
+
+var (
+	specialTagEndPrefix = []byte("</")
+	tagEndSeparators    = []byte("> \t\n\f/")
+)
+
+// tSpecialTagEnd is the context transition function for raw text and RCDATA
+// element states.
+func tSpecialTagEnd(c context, s []byte) (context, int) {
+	if c.element != elementNone {
+		if i := indexTagEnd(s, specialTagEndMarkers[c.element]); i != -1 {
+			return context{}, i
+		}
+	}
+	return c, len(s)
+}
+
+// indexTagEnd finds the index of a special tag end in a case insensitive way, or returns -1
+func indexTagEnd(s []byte, tag []byte) int {
+	res := 0
+	plen := len(specialTagEndPrefix)
+	for len(s) > 0 {
+		// Try to find the tag end prefix first
+		i := bytes.Index(s, specialTagEndPrefix)
+		if i == -1 {
+			return i
+		}
+		s = s[i+plen:]
+		// Try to match the actual tag if there is still space for it
+		if len(tag) <= len(s) && bytes.EqualFold(tag, s[:len(tag)]) {
+			s = s[len(tag):]
+			// Check the tag is followed by a proper separator
+			if len(s) > 0 && bytes.IndexByte(tagEndSeparators, s[0]) != -1 {
+				return res + i
+			}
+			res += len(tag)
+		}
+		res += i + plen
+	}
+	return -1
+}
+
+// tAttr is the context transition function for the attribute state.
+func tAttr(c context, s []byte) (context, int) {
+	return c, len(s)
+}
+
+// tURL is the context transition function for the URL state.
+func tURL(c context, s []byte) (context, int) {
+	if bytes.ContainsAny(s, "#?") {
+		c.urlPart = urlPartQueryOrFrag
+	} else if len(s) != eatWhiteSpace(s, 0) && c.urlPart == urlPartNone {
+		// HTML5 uses "Valid URL potentially surrounded by spaces" for
+		// attrs: https://www.w3.org/TR/html5/index.html#attributes-1
+		c.urlPart = urlPartPreQuery
+	}
+	return c, len(s)
+}
+
+// tJS is the context transition function for the JS state.
+func tJS(c context, s []byte) (context, int) {
+	i := bytes.IndexAny(s, `"'/`)
+	if i == -1 {
+		// Entire input is non string, comment, regexp tokens.
+		c.jsCtx = nextJSCtx(s, c.jsCtx)
+		return c, len(s)
+	}
+	c.jsCtx = nextJSCtx(s[:i], c.jsCtx)
+	switch s[i] {
+	case '"':
+		c.state, c.jsCtx = stateJSDqStr, jsCtxRegexp
+	case '\'':
+		c.state, c.jsCtx = stateJSSqStr, jsCtxRegexp
+	case '/':
+		switch {
+		case i+1 < len(s) && s[i+1] == '/':
+			c.state, i = stateJSLineCmt, i+1
+		case i+1 < len(s) && s[i+1] == '*':
+			c.state, i = stateJSBlockCmt, i+1
+		case c.jsCtx == jsCtxRegexp:
+			c.state = stateJSRegexp
+		case c.jsCtx == jsCtxDivOp:
+			c.jsCtx = jsCtxRegexp
+		default:
+			return context{
+				state: stateError,
+				err:   errorf(ErrSlashAmbig, nil, 0, "'/' could start a division or regexp: %.32q", s[i:]),
+			}, len(s)
+		}
+	default:
+		panic("unreachable")
+	}
+	return c, i + 1
+}
+
+// tJSDelimited is the context transition function for the JS string and regexp
+// states.
+func tJSDelimited(c context, s []byte) (context, int) {
+	specials := `\"`
+	switch c.state {
+	case stateJSSqStr:
+		specials = `\'`
+	case stateJSRegexp:
+		specials = `\/[]`
+	}
+
+	k, inCharset := 0, false
+	for {
+		i := k + bytes.IndexAny(s[k:], specials)
+		if i < k {
+			break
+		}
+		switch s[i] {
+		case '\\':
+			i++
+			if i == len(s) {
+				return context{
+					state: stateError,
+					err:   errorf(ErrPartialEscape, nil, 0, "unfinished escape sequence in JS string: %q", s),
+				}, len(s)
+			}
+		case '[':
+			inCharset = true
+		case ']':
+			inCharset = false
+		default:
+			// end delimiter
+			if !inCharset {
+				c.state, c.jsCtx = stateJS, jsCtxDivOp
+				return c, i + 1
+			}
+		}
+		k = i + 1
+	}
+
+	if inCharset {
+		// This can be fixed by making context richer if interpolation
+		// into charsets is desired.
+		return context{
+			state: stateError,
+			err:   errorf(ErrPartialCharset, nil, 0, "unfinished JS regexp charset: %q", s),
+		}, len(s)
+	}
+
+	return c, len(s)
+}
+
+var blockCommentEnd = []byte("*/")
+
+// tBlockCmt is the context transition function for /*comment*/ states.
+func tBlockCmt(c context, s []byte) (context, int) {
+	i := bytes.Index(s, blockCommentEnd)
+	if i == -1 {
+		return c, len(s)
+	}
+	switch c.state {
+	case stateJSBlockCmt:
+		c.state = stateJS
+	case stateCSSBlockCmt:
+		c.state = stateCSS
+	default:
+		panic(c.state.String())
+	}
+	return c, i + 2
+}
+
+// tLineCmt is the context transition function for //comment states.
+func tLineCmt(c context, s []byte) (context, int) {
+	var lineTerminators string
+	var endState state
+	switch c.state {
+	case stateJSLineCmt:
+		lineTerminators, endState = "\n\r\u2028\u2029", stateJS
+	case stateCSSLineCmt:
+		lineTerminators, endState = "\n\f\r", stateCSS
+		// Line comments are not part of any published CSS standard but
+		// are supported by the 4 major browsers.
+		// This defines line comments as
+		//     LINECOMMENT ::= "//" [^\n\f\d]*
+		// since https://www.w3.org/TR/css3-syntax/#SUBTOK-nl defines
+		// newlines:
+		//     nl ::= #xA | #xD #xA | #xD | #xC
+	default:
+		panic(c.state.String())
+	}
+
+	i := bytes.IndexAny(s, lineTerminators)
+	if i == -1 {
+		return c, len(s)
+	}
+	c.state = endState
+	// Per section 7.4 of EcmaScript 5 : https://es5.github.com/#x7.4
+	// "However, the LineTerminator at the end of the line is not
+	// considered to be part of the single-line comment; it is
+	// recognized separately by the lexical grammar and becomes part
+	// of the stream of input elements for the syntactic grammar."
+	return c, i
+}
+
+// tCSS is the context transition function for the CSS state.
+func tCSS(c context, s []byte) (context, int) {
+	// CSS quoted strings are almost never used except for:
+	// (1) URLs as in background: "/foo.png"
+	// (2) Multiword font-names as in font-family: "Times New Roman"
+	// (3) List separators in content values as in inline-lists:
+	//    <style>
+	//    ul.inlineList { list-style: none; padding:0 }
+	//    ul.inlineList > li { display: inline }
+	//    ul.inlineList > li:before { content: ", " }
+	//    ul.inlineList > li:first-child:before { content: "" }
+	//    </style>
+	//    <ul class=inlineList><li>One<li>Two<li>Three</ul>
+	// (4) Attribute value selectors as in a[href="http://example.com/"]
+	//
+	// We conservatively treat all strings as URLs, but make some
+	// allowances to avoid confusion.
+	//
+	// In (1), our conservative assumption is justified.
+	// In (2), valid font names do not contain ':', '?', or '#', so our
+	// conservative assumption is fine since we will never transition past
+	// urlPartPreQuery.
+	// In (3), our protocol heuristic should not be tripped, and there
+	// should not be non-space content after a '?' or '#', so as long as
+	// we only %-encode RFC 3986 reserved characters we are ok.
+	// In (4), we should URL escape for URL attributes, and for others we
+	// have the attribute name available if our conservative assumption
+	// proves problematic for real code.
+
+	k := 0
+	for {
+		i := k + bytes.IndexAny(s[k:], `("'/`)
+		if i < k {
+			return c, len(s)
+		}
+		switch s[i] {
+		case '(':
+			// Look for url to the left.
+			p := bytes.TrimRight(s[:i], "\t\n\f\r ")
+			if endsWithCSSKeyword(p, "url") {
+				j := len(s) - len(bytes.TrimLeft(s[i+1:], "\t\n\f\r "))
+				switch {
+				case j != len(s) && s[j] == '"':
+					c.state, j = stateCSSDqURL, j+1
+				case j != len(s) && s[j] == '\'':
+					c.state, j = stateCSSSqURL, j+1
+				default:
+					c.state = stateCSSURL
+				}
+				return c, j
+			}
+		case '/':
+			if i+1 < len(s) {
+				switch s[i+1] {
+				case '/':
+					c.state = stateCSSLineCmt
+					return c, i + 2
+				case '*':
+					c.state = stateCSSBlockCmt
+					return c, i + 2
+				}
+			}
+		case '"':
+			c.state = stateCSSDqStr
+			return c, i + 1
+		case '\'':
+			c.state = stateCSSSqStr
+			return c, i + 1
+		}
+		k = i + 1
+	}
+}
+
+// tCSSStr is the context transition function for the CSS string and URL states.
+func tCSSStr(c context, s []byte) (context, int) {
+	var endAndEsc string
+	switch c.state {
+	case stateCSSDqStr, stateCSSDqURL:
+		endAndEsc = `\"`
+	case stateCSSSqStr, stateCSSSqURL:
+		endAndEsc = `\'`
+	case stateCSSURL:
+		// Unquoted URLs end with a newline or close parenthesis.
+		// The below includes the wc (whitespace character) and nl.
+		endAndEsc = "\\\t\n\f\r )"
+	default:
+		panic(c.state.String())
+	}
+
+	k := 0
+	for {
+		i := k + bytes.IndexAny(s[k:], endAndEsc)
+		if i < k {
+			c, nread := tURL(c, decodeCSS(s[k:]))
+			return c, k + nread
+		}
+		if s[i] == '\\' {
+			i++
+			if i == len(s) {
+				return context{
+					state: stateError,
+					err:   errorf(ErrPartialEscape, nil, 0, "unfinished escape sequence in CSS string: %q", s),
+				}, len(s)
+			}
+		} else {
+			c.state = stateCSS
+			return c, i + 1
+		}
+		c, _ = tURL(c, decodeCSS(s[:i+1]))
+		k = i + 1
+	}
+}
+
+// tError is the context transition function for the error state.
+func tError(c context, s []byte) (context, int) {
+	return c, len(s)
+}
+
+// eatAttrName returns the largest j such that s[i:j] is an attribute name.
+// It returns an error if s[i:] does not look like it begins with an
+// attribute name, such as encountering a quote mark without a preceding
+// equals sign.
+func eatAttrName(s []byte, i int) (int, *Error) {
+	for j := i; j < len(s); j++ {
+		switch s[j] {
+		case ' ', '\t', '\n', '\f', '\r', '=', '>':
+			return j, nil
+		case '\'', '"', '<':
+			// These result in a parse warning in HTML5 and are
+			// indicative of serious problems if seen in an attr
+			// name in a template.
+			return -1, errorf(ErrBadHTML, nil, 0, "%q in attribute name: %.32q", s[j:j+1], s)
+		default:
+			// No-op.
+		}
+	}
+	return len(s), nil
+}
+
+var elementNameMap = map[string]element{
+	"script":   elementScript,
+	"style":    elementStyle,
+	"textarea": elementTextarea,
+	"title":    elementTitle,
+}
+
+// asciiAlpha reports whether c is an ASCII letter.
+func asciiAlpha(c byte) bool {
+	return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z'
+}
+
+// asciiAlphaNum reports whether c is an ASCII letter or digit.
+func asciiAlphaNum(c byte) bool {
+	return asciiAlpha(c) || '0' <= c && c <= '9'
+}
+
+// eatTagName returns the largest j such that s[i:j] is a tag name and the tag type.
+func eatTagName(s []byte, i int) (int, element) {
+	if i == len(s) || !asciiAlpha(s[i]) {
+		return i, elementNone
+	}
+	j := i + 1
+	for j < len(s) {
+		x := s[j]
+		if asciiAlphaNum(x) {
+			j++
+			continue
+		}
+		// Allow "x-y" or "x:y" but not "x-", "-y", or "x--y".
+		if (x == ':' || x == '-') && j+1 < len(s) && asciiAlphaNum(s[j+1]) {
+			j += 2
+			continue
+		}
+		break
+	}
+	return j, elementNameMap[strings.ToLower(string(s[i:j]))]
+}
+
+// eatWhiteSpace returns the largest j such that s[i:j] is white space.
+func eatWhiteSpace(s []byte, i int) int {
+	for j := i; j < len(s); j++ {
+		switch s[j] {
+		case ' ', '\t', '\n', '\f', '\r':
+			// No-op.
+		default:
+			return j
+		}
+	}
+	return len(s)
+}
diff --git a/internal/backport/html/template/transition_test.go b/internal/backport/html/template/transition_test.go
new file mode 100644
index 0000000..412a4c7
--- /dev/null
+++ b/internal/backport/html/template/transition_test.go
@@ -0,0 +1,60 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"bytes"
+	"strings"
+	"testing"
+)
+
+func TestFindEndTag(t *testing.T) {
+	tests := []struct {
+		s, tag string
+		want   int
+	}{
+		{"", "tag", -1},
+		{"hello </textarea> hello", "textarea", 6},
+		{"hello </TEXTarea> hello", "textarea", 6},
+		{"hello </textAREA>", "textarea", 6},
+		{"hello </textarea", "textareax", -1},
+		{"hello </textarea>", "tag", -1},
+		{"hello tag </textarea", "tag", -1},
+		{"hello </tag> </other> </textarea> <other>", "textarea", 22},
+		{"</textarea> <other>", "textarea", 0},
+		{"<div> </div> </TEXTAREA>", "textarea", 13},
+		{"<div> </div> </TEXTAREA\t>", "textarea", 13},
+		{"<div> </div> </TEXTAREA >", "textarea", 13},
+		{"<div> </div> </TEXTAREAfoo", "textarea", -1},
+		{"</TEXTAREAfoo </textarea>", "textarea", 14},
+		{"<</script >", "script", 1},
+		{"</script>", "textarea", -1},
+	}
+	for _, test := range tests {
+		if got := indexTagEnd([]byte(test.s), []byte(test.tag)); test.want != got {
+			t.Errorf("%q/%q: want\n\t%d\nbut got\n\t%d", test.s, test.tag, test.want, got)
+		}
+	}
+}
+
+func BenchmarkTemplateSpecialTags(b *testing.B) {
+
+	r := struct {
+		Name, Gift string
+	}{"Aunt Mildred", "bone china tea set"}
+
+	h1 := "<textarea> Hello Hello Hello </textarea> "
+	h2 := "<textarea> <p> Dear {{.Name}},\n{{with .Gift}}Thank you for the lovely {{.}}. {{end}}\nBest wishes. </p>\n</textarea>"
+	html := strings.Repeat(h1, 100) + h2 + strings.Repeat(h1, 100) + h2
+
+	var buf bytes.Buffer
+	for i := 0; i < b.N; i++ {
+		tmpl := Must(New("foo").Parse(html))
+		if err := tmpl.Execute(&buf, r); err != nil {
+			b.Fatal(err)
+		}
+		buf.Reset()
+	}
+}
diff --git a/internal/backport/html/template/url.go b/internal/backport/html/template/url.go
new file mode 100644
index 0000000..6f8185a
--- /dev/null
+++ b/internal/backport/html/template/url.go
@@ -0,0 +1,219 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"bytes"
+	"fmt"
+	"strings"
+)
+
+// urlFilter returns its input unless it contains an unsafe scheme in which
+// case it defangs the entire URL.
+//
+// Schemes that cause unintended side effects that are irreversible without user
+// interaction are considered unsafe. For example, clicking on a "javascript:"
+// link can immediately trigger JavaScript code execution.
+//
+// This filter conservatively assumes that all schemes other than the following
+// are unsafe:
+//    * http:   Navigates to a new website, and may open a new window or tab.
+//              These side effects can be reversed by navigating back to the
+//              previous website, or closing the window or tab. No irreversible
+//              changes will take place without further user interaction with
+//              the new website.
+//    * https:  Same as http.
+//    * mailto: Opens an email program and starts a new draft. This side effect
+//              is not irreversible until the user explicitly clicks send; it
+//              can be undone by closing the email program.
+//
+// To allow URLs containing other schemes to bypass this filter, developers must
+// explicitly indicate that such a URL is expected and safe by encapsulating it
+// in a template.URL value.
+func urlFilter(args ...interface{}) string {
+	s, t := stringify(args...)
+	if t == contentTypeURL {
+		return s
+	}
+	if !isSafeURL(s) {
+		return "#" + filterFailsafe
+	}
+	return s
+}
+
+// isSafeURL is true if s is a relative URL or if URL has a protocol in
+// (http, https, mailto).
+func isSafeURL(s string) bool {
+	if i := strings.IndexRune(s, ':'); i >= 0 && !strings.ContainsRune(s[:i], '/') {
+
+		protocol := s[:i]
+		if !strings.EqualFold(protocol, "http") && !strings.EqualFold(protocol, "https") && !strings.EqualFold(protocol, "mailto") {
+			return false
+		}
+	}
+	return true
+}
+
+// urlEscaper produces an output that can be embedded in a URL query.
+// The output can be embedded in an HTML attribute without further escaping.
+func urlEscaper(args ...interface{}) string {
+	return urlProcessor(false, args...)
+}
+
+// urlNormalizer normalizes URL content so it can be embedded in a quote-delimited
+// string or parenthesis delimited url(...).
+// The normalizer does not encode all HTML specials. Specifically, it does not
+// encode '&' so correct embedding in an HTML attribute requires escaping of
+// '&' to '&amp;'.
+func urlNormalizer(args ...interface{}) string {
+	return urlProcessor(true, args...)
+}
+
+// urlProcessor normalizes (when norm is true) or escapes its input to produce
+// a valid hierarchical or opaque URL part.
+func urlProcessor(norm bool, args ...interface{}) string {
+	s, t := stringify(args...)
+	if t == contentTypeURL {
+		norm = true
+	}
+	var b bytes.Buffer
+	if processURLOnto(s, norm, &b) {
+		return b.String()
+	}
+	return s
+}
+
+// processURLOnto appends a normalized URL corresponding to its input to b
+// and reports whether the appended content differs from s.
+func processURLOnto(s string, norm bool, b *bytes.Buffer) bool {
+	b.Grow(len(s) + 16)
+	written := 0
+	// The byte loop below assumes that all URLs use UTF-8 as the
+	// content-encoding. This is similar to the URI to IRI encoding scheme
+	// defined in section 3.1 of  RFC 3987, and behaves the same as the
+	// EcmaScript builtin encodeURIComponent.
+	// It should not cause any misencoding of URLs in pages with
+	// Content-type: text/html;charset=UTF-8.
+	for i, n := 0, len(s); i < n; i++ {
+		c := s[i]
+		switch c {
+		// Single quote and parens are sub-delims in RFC 3986, but we
+		// escape them so the output can be embedded in single
+		// quoted attributes and unquoted CSS url(...) constructs.
+		// Single quotes are reserved in URLs, but are only used in
+		// the obsolete "mark" rule in an appendix in RFC 3986
+		// so can be safely encoded.
+		case '!', '#', '$', '&', '*', '+', ',', '/', ':', ';', '=', '?', '@', '[', ']':
+			if norm {
+				continue
+			}
+		// Unreserved according to RFC 3986 sec 2.3
+		// "For consistency, percent-encoded octets in the ranges of
+		// ALPHA (%41-%5A and %61-%7A), DIGIT (%30-%39), hyphen (%2D),
+		// period (%2E), underscore (%5F), or tilde (%7E) should not be
+		// created by URI producers
+		case '-', '.', '_', '~':
+			continue
+		case '%':
+			// When normalizing do not re-encode valid escapes.
+			if norm && i+2 < len(s) && isHex(s[i+1]) && isHex(s[i+2]) {
+				continue
+			}
+		default:
+			// Unreserved according to RFC 3986 sec 2.3
+			if 'a' <= c && c <= 'z' {
+				continue
+			}
+			if 'A' <= c && c <= 'Z' {
+				continue
+			}
+			if '0' <= c && c <= '9' {
+				continue
+			}
+		}
+		b.WriteString(s[written:i])
+		fmt.Fprintf(b, "%%%02x", c)
+		written = i + 1
+	}
+	b.WriteString(s[written:])
+	return written != 0
+}
+
+// Filters and normalizes srcset values which are comma separated
+// URLs followed by metadata.
+func srcsetFilterAndEscaper(args ...interface{}) string {
+	s, t := stringify(args...)
+	switch t {
+	case contentTypeSrcset:
+		return s
+	case contentTypeURL:
+		// Normalizing gets rid of all HTML whitespace
+		// which separate the image URL from its metadata.
+		var b bytes.Buffer
+		if processURLOnto(s, true, &b) {
+			s = b.String()
+		}
+		// Additionally, commas separate one source from another.
+		return strings.ReplaceAll(s, ",", "%2c")
+	}
+
+	var b bytes.Buffer
+	written := 0
+	for i := 0; i < len(s); i++ {
+		if s[i] == ',' {
+			filterSrcsetElement(s, written, i, &b)
+			b.WriteString(",")
+			written = i + 1
+		}
+	}
+	filterSrcsetElement(s, written, len(s), &b)
+	return b.String()
+}
+
+// Derived from https://play.golang.org/p/Dhmj7FORT5
+const htmlSpaceAndASCIIAlnumBytes = "\x00\x36\x00\x00\x01\x00\xff\x03\xfe\xff\xff\x07\xfe\xff\xff\x07"
+
+// isHTMLSpace is true iff c is a whitespace character per
+// https://infra.spec.whatwg.org/#ascii-whitespace
+func isHTMLSpace(c byte) bool {
+	return (c <= 0x20) && 0 != (htmlSpaceAndASCIIAlnumBytes[c>>3]&(1<<uint(c&0x7)))
+}
+
+func isHTMLSpaceOrASCIIAlnum(c byte) bool {
+	return (c < 0x80) && 0 != (htmlSpaceAndASCIIAlnumBytes[c>>3]&(1<<uint(c&0x7)))
+}
+
+func filterSrcsetElement(s string, left int, right int, b *bytes.Buffer) {
+	start := left
+	for start < right && isHTMLSpace(s[start]) {
+		start++
+	}
+	end := right
+	for i := start; i < right; i++ {
+		if isHTMLSpace(s[i]) {
+			end = i
+			break
+		}
+	}
+	if url := s[start:end]; isSafeURL(url) {
+		// If image metadata is only spaces or alnums then
+		// we don't need to URL normalize it.
+		metadataOk := true
+		for i := end; i < right; i++ {
+			if !isHTMLSpaceOrASCIIAlnum(s[i]) {
+				metadataOk = false
+				break
+			}
+		}
+		if metadataOk {
+			b.WriteString(s[left:start])
+			processURLOnto(url, true, b)
+			b.WriteString(s[end:right])
+			return
+		}
+	}
+	b.WriteString("#")
+	b.WriteString(filterFailsafe)
+}
diff --git a/internal/backport/html/template/url_test.go b/internal/backport/html/template/url_test.go
new file mode 100644
index 0000000..75c354e
--- /dev/null
+++ b/internal/backport/html/template/url_test.go
@@ -0,0 +1,169 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"testing"
+)
+
+func TestURLNormalizer(t *testing.T) {
+	tests := []struct {
+		url, want string
+	}{
+		{"", ""},
+		{
+			"http://example.com:80/foo/bar?q=foo%20&bar=x+y#frag",
+			"http://example.com:80/foo/bar?q=foo%20&bar=x+y#frag",
+		},
+		{" ", "%20"},
+		{"%7c", "%7c"},
+		{"%7C", "%7C"},
+		{"%2", "%252"},
+		{"%", "%25"},
+		{"%z", "%25z"},
+		{"/foo|bar/%5c\u1234", "/foo%7cbar/%5c%e1%88%b4"},
+	}
+	for _, test := range tests {
+		if got := urlNormalizer(test.url); test.want != got {
+			t.Errorf("%q: want\n\t%q\nbut got\n\t%q", test.url, test.want, got)
+		}
+		if test.want != urlNormalizer(test.want) {
+			t.Errorf("not idempotent: %q", test.want)
+		}
+	}
+}
+
+func TestURLFilters(t *testing.T) {
+	input := ("\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f" +
+		"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
+		` !"#$%&'()*+,-./` +
+		`0123456789:;<=>?` +
+		`@ABCDEFGHIJKLMNO` +
+		`PQRSTUVWXYZ[\]^_` +
+		"`abcdefghijklmno" +
+		"pqrstuvwxyz{|}~\x7f" +
+		"\u00A0\u0100\u2028\u2029\ufeff\U0001D11E")
+
+	tests := []struct {
+		name    string
+		escaper func(...interface{}) string
+		escaped string
+	}{
+		{
+			"urlEscaper",
+			urlEscaper,
+			"%00%01%02%03%04%05%06%07%08%09%0a%0b%0c%0d%0e%0f" +
+				"%10%11%12%13%14%15%16%17%18%19%1a%1b%1c%1d%1e%1f" +
+				"%20%21%22%23%24%25%26%27%28%29%2a%2b%2c-.%2f" +
+				"0123456789%3a%3b%3c%3d%3e%3f" +
+				"%40ABCDEFGHIJKLMNO" +
+				"PQRSTUVWXYZ%5b%5c%5d%5e_" +
+				"%60abcdefghijklmno" +
+				"pqrstuvwxyz%7b%7c%7d~%7f" +
+				"%c2%a0%c4%80%e2%80%a8%e2%80%a9%ef%bb%bf%f0%9d%84%9e",
+		},
+		{
+			"urlNormalizer",
+			urlNormalizer,
+			"%00%01%02%03%04%05%06%07%08%09%0a%0b%0c%0d%0e%0f" +
+				"%10%11%12%13%14%15%16%17%18%19%1a%1b%1c%1d%1e%1f" +
+				"%20!%22#$%25&%27%28%29*+,-./" +
+				"0123456789:;%3c=%3e?" +
+				"@ABCDEFGHIJKLMNO" +
+				"PQRSTUVWXYZ[%5c]%5e_" +
+				"%60abcdefghijklmno" +
+				"pqrstuvwxyz%7b%7c%7d~%7f" +
+				"%c2%a0%c4%80%e2%80%a8%e2%80%a9%ef%bb%bf%f0%9d%84%9e",
+		},
+	}
+
+	for _, test := range tests {
+		if s := test.escaper(input); s != test.escaped {
+			t.Errorf("%s: want\n\t%q\ngot\n\t%q", test.name, test.escaped, s)
+			continue
+		}
+	}
+}
+
+func TestSrcsetFilter(t *testing.T) {
+	tests := []struct {
+		name  string
+		input string
+		want  string
+	}{
+		{
+			"one ok",
+			"http://example.com/img.png",
+			"http://example.com/img.png",
+		},
+		{
+			"one ok with metadata",
+			" /img.png 200w",
+			" /img.png 200w",
+		},
+		{
+			"one bad",
+			"javascript:alert(1) 200w",
+			"#ZgotmplZ",
+		},
+		{
+			"two ok",
+			"foo.png, bar.png",
+			"foo.png, bar.png",
+		},
+		{
+			"left bad",
+			"javascript:alert(1), /foo.png",
+			"#ZgotmplZ, /foo.png",
+		},
+		{
+			"right bad",
+			"/bogus#, javascript:alert(1)",
+			"/bogus#,#ZgotmplZ",
+		},
+	}
+
+	for _, test := range tests {
+		if got := srcsetFilterAndEscaper(test.input); got != test.want {
+			t.Errorf("%s: srcsetFilterAndEscaper(%q) want %q != %q", test.name, test.input, test.want, got)
+		}
+	}
+}
+
+func BenchmarkURLEscaper(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		urlEscaper("http://example.com:80/foo?q=bar%20&baz=x+y#frag")
+	}
+}
+
+func BenchmarkURLEscaperNoSpecials(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		urlEscaper("TheQuickBrownFoxJumpsOverTheLazyDog.")
+	}
+}
+
+func BenchmarkURLNormalizer(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		urlNormalizer("The quick brown fox jumps over the lazy dog.\n")
+	}
+}
+
+func BenchmarkURLNormalizerNoSpecials(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		urlNormalizer("http://example.com:80/foo?q=bar%20&baz=x+y#frag")
+	}
+}
+
+func BenchmarkSrcsetFilter(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		srcsetFilterAndEscaper(" /foo/bar.png 200w, /baz/boo(1).png")
+	}
+}
+
+func BenchmarkSrcsetFilterNoSpecials(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		srcsetFilterAndEscaper("http://example.com:80/foo?q=bar%20&baz=x+y#frag")
+	}
+}
diff --git a/internal/backport/html/template/urlpart_string.go b/internal/backport/html/template/urlpart_string.go
new file mode 100644
index 0000000..813eea9
--- /dev/null
+++ b/internal/backport/html/template/urlpart_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type urlPart"; DO NOT EDIT.
+
+package template
+
+import "strconv"
+
+const _urlPart_name = "urlPartNoneurlPartPreQueryurlPartQueryOrFragurlPartUnknown"
+
+var _urlPart_index = [...]uint8{0, 11, 26, 44, 58}
+
+func (i urlPart) String() string {
+	if i >= urlPart(len(_urlPart_index)-1) {
+		return "urlPart(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _urlPart_name[_urlPart_index[i]:_urlPart_index[i+1]]
+}
diff --git a/internal/backport/httpfs/fs.go b/internal/backport/httpfs/fs.go
new file mode 100644
index 0000000..ab6dbac
--- /dev/null
+++ b/internal/backport/httpfs/fs.go
@@ -0,0 +1,94 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// HTTP file system request handler
+
+package httpfs
+
+import (
+	"errors"
+	"io"
+	"net/http"
+	"strings"
+
+	"golang.org/x/website/internal/backport/io/fs"
+)
+
+func isSlashRune(r rune) bool { return r == '/' || r == '\\' }
+
+type ioFS struct {
+	fsys fs.FS
+}
+
+type ioFile struct {
+	file fs.File
+}
+
+func (f ioFS) Open(name string) (http.File, error) {
+	if name == "/" {
+		name = "."
+	} else {
+		name = strings.TrimPrefix(name, "/")
+	}
+	file, err := f.fsys.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	return ioFile{file}, nil
+}
+
+func (f ioFile) Close() error               { return f.file.Close() }
+func (f ioFile) Read(b []byte) (int, error) { return f.file.Read(b) }
+func (f ioFile) Stat() (fs.FileInfo, error) { return f.file.Stat() }
+
+var errMissingSeek = errors.New("io.File missing Seek method")
+var errMissingReadDir = errors.New("io.File directory missing ReadDir method")
+
+func (f ioFile) Seek(offset int64, whence int) (int64, error) {
+	s, ok := f.file.(io.Seeker)
+	if !ok {
+		return 0, errMissingSeek
+	}
+	return s.Seek(offset, whence)
+}
+
+func (f ioFile) ReadDir(count int) ([]fs.DirEntry, error) {
+	d, ok := f.file.(fs.ReadDirFile)
+	if !ok {
+		return nil, errMissingReadDir
+	}
+	return d.ReadDir(count)
+}
+
+func (f ioFile) Readdir(count int) ([]fs.FileInfo, error) {
+	d, ok := f.file.(fs.ReadDirFile)
+	if !ok {
+		return nil, errMissingReadDir
+	}
+	var list []fs.FileInfo
+	for {
+		dirs, err := d.ReadDir(count - len(list))
+		for _, dir := range dirs {
+			info, err := dir.Info()
+			if err != nil {
+				// Pretend it doesn't exist, like (*os.File).Readdir does.
+				continue
+			}
+			list = append(list, info)
+		}
+		if err != nil {
+			return list, err
+		}
+		if count < 0 || len(list) >= count {
+			break
+		}
+	}
+	return list, nil
+}
+
+// FS converts fsys to a FileSystem implementation,
+// for use with FileServer and NewFileTransport.
+func FS(fsys fs.FS) http.FileSystem {
+	return ioFS{fsys}
+}
diff --git a/internal/backport/io/fs/fs.go b/internal/backport/io/fs/fs.go
new file mode 100644
index 0000000..b9eae56
--- /dev/null
+++ b/internal/backport/io/fs/fs.go
@@ -0,0 +1,143 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fs defines basic interfaces to a file system.
+// A file system can be provided by the host operating system
+// but also by other packages.
+package fs
+
+import (
+	"os"
+	"unicode/utf8"
+)
+
+// An FS provides access to a hierarchical file system.
+//
+// The FS interface is the minimum implementation required of the file system.
+// A file system may implement additional interfaces,
+// such as ReadFileFS, to provide additional or optimized functionality.
+type FS interface {
+	// Open opens the named file.
+	//
+	// When Open returns an error, it should be of type *PathError
+	// with the Op field set to "open", the Path field set to name,
+	// and the Err field describing the problem.
+	//
+	// Open should reject attempts to open names that do not satisfy
+	// ValidPath(name), returning a *PathError with Err set to
+	// ErrInvalid or ErrNotExist.
+	Open(name string) (File, error)
+}
+
+// ValidPath reports whether the given path name
+// is valid for use in a call to Open.
+//
+// Path names passed to open are UTF-8-encoded,
+// unrooted, slash-separated sequences of path elements, like “x/y/z”.
+// Path names must not contain an element that is “.” or “..” or the empty string,
+// except for the special case that the root directory is named “.”.
+// Paths must not start or end with a slash: “/x” and “x/” are invalid.
+//
+// Note that paths are slash-separated on all systems, even Windows.
+// Paths containing other characters such as backslash and colon
+// are accepted as valid, but those characters must never be
+// interpreted by an FS implementation as path element separators.
+func ValidPath(name string) bool {
+	if !utf8.ValidString(name) {
+		return false
+	}
+
+	if name == "." {
+		// special case
+		return true
+	}
+
+	// Iterate over elements in name, checking each.
+	for {
+		i := 0
+		for i < len(name) && name[i] != '/' {
+			i++
+		}
+		elem := name[:i]
+		if elem == "" || elem == "." || elem == ".." {
+			return false
+		}
+		if i == len(name) {
+			return true // reached clean ending
+		}
+		name = name[i+1:]
+	}
+}
+
+// A ReadDirFile is a directory file whose entries can be read with the ReadDir method.
+// Every directory file should implement this interface.
+// (It is permissible for any file to implement this interface,
+// but if so ReadDir should return an error for non-directories.)
+type ReadDirFile interface {
+	File
+
+	// ReadDir reads the contents of the directory and returns
+	// a slice of up to n DirEntry values in directory order.
+	// Subsequent calls on the same file will yield further DirEntry values.
+	//
+	// If n > 0, ReadDir returns at most n DirEntry structures.
+	// In this case, if ReadDir returns an empty slice, it will return
+	// a non-nil error explaining why.
+	// At the end of a directory, the error is io.EOF.
+	//
+	// If n <= 0, ReadDir returns all the DirEntry values from the directory
+	// in a single slice. In this case, if ReadDir succeeds (reads all the way
+	// to the end of the directory), it returns the slice and a nil error.
+	// If it encounters an error before the end of the directory,
+	// ReadDir returns the DirEntry list read until that point and a non-nil error.
+	ReadDir(n int) ([]DirEntry, error)
+}
+
+// Generic file system errors.
+// Errors returned by file systems can be tested against these errors
+// using errors.Is.
+var (
+	ErrInvalid    = os.ErrInvalid    // "invalid argument"
+	ErrPermission = os.ErrPermission // "permission denied"
+	ErrExist      = os.ErrExist      // "file already exists"
+	ErrNotExist   = os.ErrNotExist   // "file does not exist"
+	ErrClosed     = os.ErrClosed     // "file already closed"
+)
+
+func errInvalid() error    { return os.ErrInvalid }
+func errPermission() error { return os.ErrPermission }
+func errExist() error      { return os.ErrExist }
+func errNotExist() error   { return os.ErrNotExist }
+func errClosed() error     { return os.ErrClosed }
+
+// A FileInfo describes a file and is returned by Stat.
+type FileInfo = os.FileInfo
+
+// A FileMode represents a file's mode and permission bits.
+// The bits have the same definition on all systems, so that
+// information about files can be moved from one system
+// to another portably. Not all bits apply to all systems.
+// The only required bit is ModeDir for directories.
+type FileMode = os.FileMode
+
+const (
+	ModeDir        = os.ModeDir
+	ModeAppend     = os.ModeAppend
+	ModeExclusive  = os.ModeExclusive
+	ModeTemporary  = os.ModeTemporary
+	ModeSymlink    = os.ModeSymlink
+	ModeDevice     = os.ModeDevice
+	ModeNamedPipe  = os.ModeNamedPipe
+	ModeSocket     = os.ModeSocket
+	ModeSetuid     = os.ModeSetuid
+	ModeSetgid     = os.ModeSetgid
+	ModeCharDevice = os.ModeCharDevice
+	ModeSticky     = os.ModeSticky
+	ModeIrregular  = os.ModeIrregular
+	ModeType       = os.ModeType
+	ModePerm       = os.ModePerm
+)
+
+// PathError records an error and the operation and file path that caused it.
+type PathError = os.PathError
diff --git a/internal/backport/io/fs/fs_test.go b/internal/backport/io/fs/fs_test.go
new file mode 100644
index 0000000..9570c04
--- /dev/null
+++ b/internal/backport/io/fs/fs_test.go
@@ -0,0 +1,50 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fs_test
+
+import (
+	"testing"
+
+	. "golang.org/x/website/internal/backport/io/fs"
+)
+
+var isValidPathTests = []struct {
+	name string
+	ok   bool
+}{
+	{".", true},
+	{"x", true},
+	{"x/y", true},
+
+	{"", false},
+	{"..", false},
+	{"/", false},
+	{"x/", false},
+	{"/x", false},
+	{"x/y/", false},
+	{"/x/y", false},
+	{"./", false},
+	{"./x", false},
+	{"x/.", false},
+	{"x/./y", false},
+	{"../", false},
+	{"../x", false},
+	{"x/..", false},
+	{"x/../y", false},
+	{"x//y", false},
+	{`x\`, true},
+	{`x\y`, true},
+	{`x:y`, true},
+	{`\x`, true},
+}
+
+func TestValidPath(t *testing.T) {
+	for _, tt := range isValidPathTests {
+		ok := ValidPath(tt.name)
+		if ok != tt.ok {
+			t.Errorf("ValidPath(%q) = %v, want %v", tt.name, ok, tt.ok)
+		}
+	}
+}
diff --git a/internal/backport/io/fs/glob.go b/internal/backport/io/fs/glob.go
new file mode 100644
index 0000000..7caa6ad
--- /dev/null
+++ b/internal/backport/io/fs/glob.go
@@ -0,0 +1,119 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fs
+
+import (
+	"golang.org/x/website/internal/backport/path"
+)
+
+// A GlobFS is a file system with a Glob method.
+type GlobFS interface {
+	FS
+
+	// Glob returns the names of all files matching pattern,
+	// providing an implementation of the top-level
+	// Glob function.
+	Glob(pattern string) ([]string, error)
+}
+
+// Glob returns the names of all files matching pattern or nil
+// if there is no matching file. The syntax of patterns is the same
+// as in path.Match. The pattern may describe hierarchical names such as
+// usr/*/bin/ed.
+//
+// Glob ignores file system errors such as I/O errors reading directories.
+// The only possible returned error is path.ErrBadPattern, reporting that
+// the pattern is malformed.
+//
+// If fs implements GlobFS, Glob calls fs.Glob.
+// Otherwise, Glob uses ReadDir to traverse the directory tree
+// and look for matches for the pattern.
+func Glob(fsys FS, pattern string) (matches []string, err error) {
+	if fsys, ok := fsys.(GlobFS); ok {
+		return fsys.Glob(pattern)
+	}
+
+	// Check pattern is well-formed.
+	if _, err := path.Match(pattern, ""); err != nil {
+		return nil, err
+	}
+	if !hasMeta(pattern) {
+		if _, err = Stat(fsys, pattern); err != nil {
+			return nil, nil
+		}
+		return []string{pattern}, nil
+	}
+
+	dir, file := path.Split(pattern)
+	dir = cleanGlobPath(dir)
+
+	if !hasMeta(dir) {
+		return glob(fsys, dir, file, nil)
+	}
+
+	// Prevent infinite recursion. See issue 15879.
+	if dir == pattern {
+		return nil, path.ErrBadPattern
+	}
+
+	var m []string
+	m, err = Glob(fsys, dir)
+	if err != nil {
+		return
+	}
+	for _, d := range m {
+		matches, err = glob(fsys, d, file, matches)
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+// cleanGlobPath prepares path for glob matching.
+func cleanGlobPath(path string) string {
+	switch path {
+	case "":
+		return "."
+	default:
+		return path[0 : len(path)-1] // chop off trailing separator
+	}
+}
+
+// glob searches for files matching pattern in the directory dir
+// and appends them to matches, returning the updated slice.
+// If the directory cannot be opened, glob returns the existing matches.
+// New matches are added in lexicographical order.
+func glob(fs FS, dir, pattern string, matches []string) (m []string, e error) {
+	m = matches
+	infos, err := ReadDir(fs, dir)
+	if err != nil {
+		return // ignore I/O error
+	}
+
+	for _, info := range infos {
+		n := info.Name()
+		matched, err := path.Match(pattern, n)
+		if err != nil {
+			return m, err
+		}
+		if matched {
+			m = append(m, path.Join(dir, n))
+		}
+	}
+	return
+}
+
+// hasMeta reports whether path contains any of the magic characters
+// recognized by path.Match.
+func hasMeta(path string) bool {
+	for i := 0; i < len(path); i++ {
+		switch path[i] {
+		case '*', '?', '[', '\\':
+			return true
+		}
+	}
+	return false
+}
diff --git a/internal/backport/io/fs/glob_test.go b/internal/backport/io/fs/glob_test.go
new file mode 100644
index 0000000..6e8b68a
--- /dev/null
+++ b/internal/backport/io/fs/glob_test.go
@@ -0,0 +1,88 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fs_test
+
+import (
+	"testing"
+
+	. "golang.org/x/website/internal/backport/io/fs"
+	"golang.org/x/website/internal/backport/osfs"
+	"golang.org/x/website/internal/backport/path"
+)
+
+var globTests = []struct {
+	fs              FS
+	pattern, result string
+}{
+	{osfs.DirFS("."), "glob.go", "glob.go"},
+	{osfs.DirFS("."), "gl?b.go", "glob.go"},
+	{osfs.DirFS("."), `gl\ob.go`, "glob.go"},
+	{osfs.DirFS("."), "*", "glob.go"},
+	{osfs.DirFS(".."), "*/glob.go", "fs/glob.go"},
+}
+
+func TestGlob(t *testing.T) {
+	for _, tt := range globTests {
+		matches, err := Glob(tt.fs, tt.pattern)
+		if err != nil {
+			t.Errorf("Glob error for %q: %s", tt.pattern, err)
+			continue
+		}
+		if !contains(matches, tt.result) {
+			t.Errorf("Glob(%#q) = %#v want %v", tt.pattern, matches, tt.result)
+		}
+	}
+	for _, pattern := range []string{"no_match", "../*/no_match", `\*`} {
+		matches, err := Glob(osfs.DirFS("."), pattern)
+		if err != nil {
+			t.Errorf("Glob error for %q: %s", pattern, err)
+			continue
+		}
+		if len(matches) != 0 {
+			t.Errorf("Glob(%#q) = %#v want []", pattern, matches)
+		}
+	}
+}
+
+func TestGlobError(t *testing.T) {
+	bad := []string{`[]`, `nonexist/[]`}
+	for _, pattern := range bad {
+		_, err := Glob(osfs.DirFS("."), pattern)
+		if err != path.ErrBadPattern {
+			t.Errorf("Glob(fs, %#q) returned err=%v, want path.ErrBadPattern", pattern, err)
+		}
+	}
+}
+
+// contains reports whether vector contains the string s.
+func contains(vector []string, s string) bool {
+	for _, elem := range vector {
+		if elem == s {
+			return true
+		}
+	}
+	return false
+}
+
+type globOnly struct{ GlobFS }
+
+func (globOnly) Open(name string) (File, error) { return nil, ErrNotExist }
+
+func TestGlobMethod(t *testing.T) {
+	check := func(desc string, names []string, err error) {
+		t.Helper()
+		if err != nil || len(names) != 1 || names[0] != "hello.txt" {
+			t.Errorf("Glob(%s) = %v, %v, want %v, nil", desc, names, err, []string{"hello.txt"})
+		}
+	}
+
+	// Test that ReadDir uses the method when present.
+	names, err := Glob(globOnly{testFsys}, "*.txt")
+	check("readDirOnly", names, err)
+
+	// Test that ReadDir uses Open when the method is not present.
+	names, err = Glob(openOnly{testFsys}, "*.txt")
+	check("openOnly", names, err)
+}
diff --git a/internal/backport/io/fs/go115.go b/internal/backport/io/fs/go115.go
new file mode 100644
index 0000000..b5e74c5
--- /dev/null
+++ b/internal/backport/io/fs/go115.go
@@ -0,0 +1,42 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.16
+// +build !go1.16
+
+package fs
+
+// A File provides access to a single file.
+// The File interface is the minimum implementation required of the file.
+// Directory files should also implement ReadDirFile.
+// A file may implement io.ReaderAt or io.Seeker as optimizations.
+type File interface {
+	Stat() (FileInfo, error)
+	Read([]byte) (int, error)
+	Close() error
+}
+
+// A DirEntry is an entry read from a directory
+// (using the ReadDir function or a ReadDirFile's ReadDir method).
+type DirEntry interface {
+	// Name returns the name of the file (or subdirectory) described by the entry.
+	// This name is only the final element of the path (the base name), not the entire path.
+	// For example, Name would return "hello.go" not "/home/gopher/hello.go".
+	Name() string
+
+	// IsDir reports whether the entry describes a directory.
+	IsDir() bool
+
+	// Type returns the type bits for the entry.
+	// The type bits are a subset of the usual FileMode bits, those returned by the FileMode.Type method.
+	Type() FileMode
+
+	// Info returns the FileInfo for the file or subdirectory described by the entry.
+	// The returned FileInfo may be from the time of the original directory read
+	// or from the time of the call to Info. If the file has been removed or renamed
+	// since the directory read, Info may return an error satisfying errors.Is(err, ErrNotExist).
+	// If the entry denotes a symbolic link, Info reports the information about the link itself,
+	// not the link's target.
+	Info() (FileInfo, error)
+}
diff --git a/internal/backport/io/fs/go116.go b/internal/backport/io/fs/go116.go
new file mode 100644
index 0000000..d4b302a
--- /dev/null
+++ b/internal/backport/io/fs/go116.go
@@ -0,0 +1,13 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.16
+// +build go1.16
+
+package fs
+
+import "io/fs"
+
+type File = fs.File
+type DirEntry = fs.DirEntry
diff --git a/internal/backport/io/fs/readdir.go b/internal/backport/io/fs/readdir.go
new file mode 100644
index 0000000..0375101
--- /dev/null
+++ b/internal/backport/io/fs/readdir.go
@@ -0,0 +1,77 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fs
+
+import (
+	"errors"
+	"sort"
+)
+
+// ReadDirFS is the interface implemented by a file system
+// that provides an optimized implementation of ReadDir.
+type ReadDirFS interface {
+	FS
+
+	// ReadDir reads the named directory
+	// and returns a list of directory entries sorted by filename.
+	ReadDir(name string) ([]DirEntry, error)
+}
+
+// ReadDir reads the named directory
+// and returns a list of directory entries sorted by filename.
+//
+// If fs implements ReadDirFS, ReadDir calls fs.ReadDir.
+// Otherwise ReadDir calls fs.Open and uses ReadDir and Close
+// on the returned file.
+func ReadDir(fsys FS, name string) ([]DirEntry, error) {
+	if fsys, ok := fsys.(ReadDirFS); ok {
+		return fsys.ReadDir(name)
+	}
+
+	file, err := fsys.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+
+	dir, ok := file.(ReadDirFile)
+	if !ok {
+		return nil, &PathError{Op: "readdir", Path: name, Err: errors.New("not implemented")}
+	}
+
+	list, err := dir.ReadDir(-1)
+	sort.Slice(list, func(i, j int) bool { return list[i].Name() < list[j].Name() })
+	return list, err
+}
+
+// dirInfo is a DirEntry based on a FileInfo.
+type dirInfo struct {
+	fileInfo FileInfo
+}
+
+func (di dirInfo) IsDir() bool {
+	return di.fileInfo.IsDir()
+}
+
+func (di dirInfo) Type() FileMode {
+	return di.fileInfo.Mode() & ModeType
+}
+
+func (di dirInfo) Info() (FileInfo, error) {
+	return di.fileInfo, nil
+}
+
+func (di dirInfo) Name() string {
+	return di.fileInfo.Name()
+}
+
+// FileInfoToDirEntry returns a DirEntry that returns information from info.
+// If info is nil, FileInfoToDirEntry returns nil.
+func FileInfoToDirEntry(info FileInfo) DirEntry {
+	if info == nil {
+		return nil
+	}
+	return dirInfo{fileInfo: info}
+}
diff --git a/internal/backport/io/fs/readdir_test.go b/internal/backport/io/fs/readdir_test.go
new file mode 100644
index 0000000..a30b9a3
--- /dev/null
+++ b/internal/backport/io/fs/readdir_test.go
@@ -0,0 +1,94 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fs_test
+
+import (
+	"os"
+	"testing"
+	"time"
+
+	. "golang.org/x/website/internal/backport/io/fs"
+	"golang.org/x/website/internal/backport/testing/fstest"
+)
+
+type readDirOnly struct{ ReadDirFS }
+
+func (readDirOnly) Open(name string) (File, error) { return nil, ErrNotExist }
+
+func TestReadDir(t *testing.T) {
+	check := func(desc string, dirs []DirEntry, err error) {
+		t.Helper()
+		if err != nil || len(dirs) != 2 || dirs[0].Name() != "hello.txt" || dirs[1].Name() != "sub" {
+			var names []string
+			for _, d := range dirs {
+				names = append(names, d.Name())
+			}
+			t.Errorf("ReadDir(%s) = %v, %v, want %v, nil", desc, names, err, []string{"hello.txt", "sub"})
+		}
+	}
+
+	// Test that ReadDir uses the method when present.
+	dirs, err := ReadDir(readDirOnly{testFsys}, ".")
+	check("readDirOnly", dirs, err)
+
+	// Test that ReadDir uses Open when the method is not present.
+	dirs, err = ReadDir(openOnly{testFsys}, ".")
+	check("openOnly", dirs, err)
+
+	// Test that ReadDir on Sub of . works (sub_test checks non-trivial subs).
+	sub, err := Sub(testFsys, ".")
+	if err != nil {
+		t.Fatal(err)
+	}
+	dirs, err = ReadDir(sub, ".")
+	check("sub(.)", dirs, err)
+}
+
+func TestFileInfoToDirEntry(t *testing.T) {
+	testFs := fstest.MapFS{
+		"notadir.txt": {
+			Data:    []byte("hello, world"),
+			Mode:    0,
+			ModTime: time.Now(),
+			Sys:     &sysValue,
+		},
+		"adir": {
+			Data:    nil,
+			Mode:    os.ModeDir,
+			ModTime: time.Now(),
+			Sys:     &sysValue,
+		},
+	}
+
+	tests := []struct {
+		path     string
+		wantMode FileMode
+		wantDir  bool
+	}{
+		{path: "notadir.txt", wantMode: 0, wantDir: false},
+		{path: "adir", wantMode: os.ModeDir, wantDir: true},
+	}
+
+	for _, test := range tests {
+		test := test
+		t.Run(test.path, func(t *testing.T) {
+			fi, err := Stat(testFs, test.path)
+			if err != nil {
+				t.Fatal(err)
+			}
+
+			dirEntry := FileInfoToDirEntry(fi)
+			if g, w := dirEntry.Type(), test.wantMode; g != w {
+				t.Errorf("FileMode mismatch: got=%v, want=%v", g, w)
+			}
+			if g, w := dirEntry.Name(), test.path; g != w {
+				t.Errorf("Name mismatch: got=%v, want=%v", g, w)
+			}
+			if g, w := dirEntry.IsDir(), test.wantDir; g != w {
+				t.Errorf("IsDir mismatch: got=%v, want=%v", g, w)
+			}
+		})
+	}
+}
diff --git a/internal/backport/io/fs/readfile.go b/internal/backport/io/fs/readfile.go
new file mode 100644
index 0000000..d3c181c
--- /dev/null
+++ b/internal/backport/io/fs/readfile.go
@@ -0,0 +1,66 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fs
+
+import "io"
+
+// ReadFileFS is the interface implemented by a file system
+// that provides an optimized implementation of ReadFile.
+type ReadFileFS interface {
+	FS
+
+	// ReadFile reads the named file and returns its contents.
+	// A successful call returns a nil error, not io.EOF.
+	// (Because ReadFile reads the whole file, the expected EOF
+	// from the final Read is not treated as an error to be reported.)
+	//
+	// The caller is permitted to modify the returned byte slice.
+	// This method should return a copy of the underlying data.
+	ReadFile(name string) ([]byte, error)
+}
+
+// ReadFile reads the named file from the file system fs and returns its contents.
+// A successful call returns a nil error, not io.EOF.
+// (Because ReadFile reads the whole file, the expected EOF
+// from the final Read is not treated as an error to be reported.)
+//
+// If fs implements ReadFileFS, ReadFile calls fs.ReadFile.
+// Otherwise ReadFile calls fs.Open and uses Read and Close
+// on the returned file.
+func ReadFile(fsys FS, name string) ([]byte, error) {
+	if fsys, ok := fsys.(ReadFileFS); ok {
+		return fsys.ReadFile(name)
+	}
+
+	file, err := fsys.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+
+	var size int
+	if info, err := file.Stat(); err == nil {
+		size64 := info.Size()
+		if int64(int(size64)) == size64 {
+			size = int(size64)
+		}
+	}
+
+	data := make([]byte, 0, size+1)
+	for {
+		if len(data) >= cap(data) {
+			d := append(data[:cap(data)], 0)
+			data = d[:len(data)]
+		}
+		n, err := file.Read(data[len(data):cap(data)])
+		data = data[:len(data)+n]
+		if err != nil {
+			if err == io.EOF {
+				err = nil
+			}
+			return data, err
+		}
+	}
+}
diff --git a/internal/backport/io/fs/readfile_test.go b/internal/backport/io/fs/readfile_test.go
new file mode 100644
index 0000000..dbc0b53
--- /dev/null
+++ b/internal/backport/io/fs/readfile_test.go
@@ -0,0 +1,60 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fs_test
+
+import (
+	"testing"
+	"time"
+
+	. "golang.org/x/website/internal/backport/io/fs"
+	"golang.org/x/website/internal/backport/testing/fstest"
+)
+
+var testFsys = fstest.MapFS{
+	"hello.txt": {
+		Data:    []byte("hello, world"),
+		Mode:    0456,
+		ModTime: time.Now(),
+		Sys:     &sysValue,
+	},
+	"sub/goodbye.txt": {
+		Data:    []byte("goodbye, world"),
+		Mode:    0456,
+		ModTime: time.Now(),
+		Sys:     &sysValue,
+	},
+}
+
+var sysValue int
+
+type readFileOnly struct{ ReadFileFS }
+
+func (readFileOnly) Open(name string) (File, error) { return nil, ErrNotExist }
+
+type openOnly struct{ FS }
+
+func TestReadFile(t *testing.T) {
+	// Test that ReadFile uses the method when present.
+	data, err := ReadFile(readFileOnly{testFsys}, "hello.txt")
+	if string(data) != "hello, world" || err != nil {
+		t.Fatalf(`ReadFile(readFileOnly, "hello.txt") = %q, %v, want %q, nil`, data, err, "hello, world")
+	}
+
+	// Test that ReadFile uses Open when the method is not present.
+	data, err = ReadFile(openOnly{testFsys}, "hello.txt")
+	if string(data) != "hello, world" || err != nil {
+		t.Fatalf(`ReadFile(openOnly, "hello.txt") = %q, %v, want %q, nil`, data, err, "hello, world")
+	}
+
+	// Test that ReadFile on Sub of . works (sub_test checks non-trivial subs).
+	sub, err := Sub(testFsys, ".")
+	if err != nil {
+		t.Fatal(err)
+	}
+	data, err = ReadFile(sub, "hello.txt")
+	if string(data) != "hello, world" || err != nil {
+		t.Fatalf(`ReadFile(sub(.), "hello.txt") = %q, %v, want %q, nil`, data, err, "hello, world")
+	}
+}
diff --git a/internal/backport/io/fs/stat.go b/internal/backport/io/fs/stat.go
new file mode 100644
index 0000000..735a6e3
--- /dev/null
+++ b/internal/backport/io/fs/stat.go
@@ -0,0 +1,31 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fs
+
+// A StatFS is a file system with a Stat method.
+type StatFS interface {
+	FS
+
+	// Stat returns a FileInfo describing the file.
+	// If there is an error, it should be of type *PathError.
+	Stat(name string) (FileInfo, error)
+}
+
+// Stat returns a FileInfo describing the named file from the file system.
+//
+// If fs implements StatFS, Stat calls fs.Stat.
+// Otherwise, Stat opens the file to stat it.
+func Stat(fsys FS, name string) (FileInfo, error) {
+	if fsys, ok := fsys.(StatFS); ok {
+		return fsys.Stat(name)
+	}
+
+	file, err := fsys.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+	return file.Stat()
+}
diff --git a/internal/backport/io/fs/stat_test.go b/internal/backport/io/fs/stat_test.go
new file mode 100644
index 0000000..4085e36
--- /dev/null
+++ b/internal/backport/io/fs/stat_test.go
@@ -0,0 +1,37 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fs_test
+
+import (
+	"fmt"
+	"testing"
+
+	. "golang.org/x/website/internal/backport/io/fs"
+)
+
+type statOnly struct{ StatFS }
+
+func (statOnly) Open(name string) (File, error) { return nil, ErrNotExist }
+
+func TestStat(t *testing.T) {
+	check := func(desc string, info FileInfo, err error) {
+		t.Helper()
+		if err != nil || info == nil || info.Mode() != 0456 {
+			infoStr := "<nil>"
+			if info != nil {
+				infoStr = fmt.Sprintf("FileInfo(Mode: %#o)", info.Mode())
+			}
+			t.Fatalf("Stat(%s) = %v, %v, want Mode:0456, nil", desc, infoStr, err)
+		}
+	}
+
+	// Test that Stat uses the method when present.
+	info, err := Stat(statOnly{testFsys}, "hello.txt")
+	check("statOnly", info, err)
+
+	// Test that Stat uses Open when the method is not present.
+	info, err = Stat(openOnly{testFsys}, "hello.txt")
+	check("openOnly", info, err)
+}
diff --git a/internal/backport/io/fs/sub.go b/internal/backport/io/fs/sub.go
new file mode 100644
index 0000000..e8a65b5
--- /dev/null
+++ b/internal/backport/io/fs/sub.go
@@ -0,0 +1,139 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fs
+
+import (
+	"errors"
+
+	"golang.org/x/website/internal/backport/path"
+)
+
+// A SubFS is a file system with a Sub method.
+type SubFS interface {
+	FS
+
+	// Sub returns an FS corresponding to the subtree rooted at dir.
+	Sub(dir string) (FS, error)
+}
+
+// Sub returns an FS corresponding to the subtree rooted at fsys's dir.
+//
+// If fs implements SubFS, Sub calls returns fsys.Sub(dir).
+// Otherwise, if dir is ".", Sub returns fsys unchanged.
+// Otherwise, Sub returns a new FS implementation sub that,
+// in effect, implements sub.Open(dir) as fsys.Open(path.Join(dir, name)).
+// The implementation also translates calls to ReadDir, ReadFile, and Glob appropriately.
+//
+// Note that Sub(os.DirFS("/"), "prefix") is equivalent to os.DirFS("/prefix")
+// and that neither of them guarantees to avoid operating system
+// accesses outside "/prefix", because the implementation of os.DirFS
+// does not check for symbolic links inside "/prefix" that point to
+// other directories. That is, os.DirFS is not a general substitute for a
+// chroot-style security mechanism, and Sub does not change that fact.
+func Sub(fsys FS, dir string) (FS, error) {
+	if !ValidPath(dir) {
+		return nil, &PathError{Op: "sub", Path: dir, Err: errors.New("invalid name")}
+	}
+	if dir == "." {
+		return fsys, nil
+	}
+	if fsys, ok := fsys.(SubFS); ok {
+		return fsys.Sub(dir)
+	}
+	return &subFS{fsys, dir}, nil
+}
+
+type subFS struct {
+	fsys FS
+	dir  string
+}
+
+// fullName maps name to the fully-qualified name dir/name.
+func (f *subFS) fullName(op string, name string) (string, error) {
+	if !ValidPath(name) {
+		return "", &PathError{Op: op, Path: name, Err: errors.New("invalid name")}
+	}
+	return path.Join(f.dir, name), nil
+}
+
+// shorten maps name, which should start with f.dir, back to the suffix after f.dir.
+func (f *subFS) shorten(name string) (rel string, ok bool) {
+	if name == f.dir {
+		return ".", true
+	}
+	if len(name) >= len(f.dir)+2 && name[len(f.dir)] == '/' && name[:len(f.dir)] == f.dir {
+		return name[len(f.dir)+1:], true
+	}
+	return "", false
+}
+
+// fixErr shortens any reported names in PathErrors by stripping f.dir.
+func (f *subFS) fixErr(err error) error {
+	if e, ok := err.(*PathError); ok {
+		if short, ok := f.shorten(e.Path); ok {
+			e.Path = short
+		}
+	}
+	return err
+}
+
+func (f *subFS) Open(name string) (File, error) {
+	full, err := f.fullName("open", name)
+	if err != nil {
+		return nil, err
+	}
+	file, err := f.fsys.Open(full)
+	return file, f.fixErr(err)
+}
+
+func (f *subFS) ReadDir(name string) ([]DirEntry, error) {
+	full, err := f.fullName("read", name)
+	if err != nil {
+		return nil, err
+	}
+	dir, err := ReadDir(f.fsys, full)
+	return dir, f.fixErr(err)
+}
+
+func (f *subFS) ReadFile(name string) ([]byte, error) {
+	full, err := f.fullName("read", name)
+	if err != nil {
+		return nil, err
+	}
+	data, err := ReadFile(f.fsys, full)
+	return data, f.fixErr(err)
+}
+
+func (f *subFS) Glob(pattern string) ([]string, error) {
+	// Check pattern is well-formed.
+	if _, err := path.Match(pattern, ""); err != nil {
+		return nil, err
+	}
+	if pattern == "." {
+		return []string{"."}, nil
+	}
+
+	full := f.dir + "/" + pattern
+	list, err := Glob(f.fsys, full)
+	for i, name := range list {
+		name, ok := f.shorten(name)
+		if !ok {
+			return nil, errors.New("invalid result from inner fsys Glob: " + name + " not in " + f.dir) // can't use fmt in this package
+		}
+		list[i] = name
+	}
+	return list, f.fixErr(err)
+}
+
+func (f *subFS) Sub(dir string) (FS, error) {
+	if dir == "." {
+		return f, nil
+	}
+	full, err := f.fullName("sub", dir)
+	if err != nil {
+		return nil, err
+	}
+	return &subFS{f.fsys, full}, nil
+}
diff --git a/internal/backport/io/fs/sub_test.go b/internal/backport/io/fs/sub_test.go
new file mode 100644
index 0000000..0a8e7fb
--- /dev/null
+++ b/internal/backport/io/fs/sub_test.go
@@ -0,0 +1,58 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fs_test
+
+import (
+	"testing"
+
+	. "golang.org/x/website/internal/backport/io/fs"
+)
+
+type subOnly struct{ SubFS }
+
+func (subOnly) Open(name string) (File, error) { return nil, ErrNotExist }
+
+func TestSub(t *testing.T) {
+	check := func(desc string, sub FS, err error) {
+		t.Helper()
+		if err != nil {
+			t.Errorf("Sub(sub): %v", err)
+			return
+		}
+		data, err := ReadFile(sub, "goodbye.txt")
+		if string(data) != "goodbye, world" || err != nil {
+			t.Errorf(`ReadFile(%s, "goodbye.txt" = %q, %v, want %q, nil`, desc, string(data), err, "goodbye, world")
+		}
+
+		dirs, err := ReadDir(sub, ".")
+		if err != nil || len(dirs) != 1 || dirs[0].Name() != "goodbye.txt" {
+			var names []string
+			for _, d := range dirs {
+				names = append(names, d.Name())
+			}
+			t.Errorf(`ReadDir(%s, ".") = %v, %v, want %v, nil`, desc, names, err, []string{"goodbye.txt"})
+		}
+	}
+
+	// Test that Sub uses the method when present.
+	sub, err := Sub(subOnly{testFsys}, "sub")
+	check("subOnly", sub, err)
+
+	// Test that Sub uses Open when the method is not present.
+	sub, err = Sub(openOnly{testFsys}, "sub")
+	check("openOnly", sub, err)
+
+	_, err = sub.Open("nonexist")
+	if err == nil {
+		t.Fatal("Open(nonexist): succeeded")
+	}
+	pe, ok := err.(*PathError)
+	if !ok {
+		t.Fatalf("Open(nonexist): error is %T, want *PathError", err)
+	}
+	if pe.Path != "nonexist" {
+		t.Fatalf("Open(nonexist): err.Path = %q, want %q", pe.Path, "nonexist")
+	}
+}
diff --git a/internal/backport/io/fs/walk.go b/internal/backport/io/fs/walk.go
new file mode 100644
index 0000000..2b09026
--- /dev/null
+++ b/internal/backport/io/fs/walk.go
@@ -0,0 +1,128 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fs
+
+import (
+	"errors"
+
+	"golang.org/x/website/internal/backport/path"
+)
+
+// SkipDir is used as a return value from WalkDirFuncs to indicate that
+// the directory named in the call is to be skipped. It is not returned
+// as an error by any function.
+var SkipDir = errors.New("skip this directory")
+
+// WalkDirFunc is the type of the function called by WalkDir to visit
+// each file or directory.
+//
+// The path argument contains the argument to WalkDir as a prefix.
+// That is, if WalkDir is called with root argument "dir" and finds a file
+// named "a" in that directory, the walk function will be called with
+// argument "dir/a".
+//
+// The d argument is the fs.DirEntry for the named path.
+//
+// The error result returned by the function controls how WalkDir
+// continues. If the function returns the special value SkipDir, WalkDir
+// skips the current directory (path if d.IsDir() is true, otherwise
+// path's parent directory). Otherwise, if the function returns a non-nil
+// error, WalkDir stops entirely and returns that error.
+//
+// The err argument reports an error related to path, signaling that
+// WalkDir will not walk into that directory. The function can decide how
+// to handle that error; as described earlier, returning the error will
+// cause WalkDir to stop walking the entire tree.
+//
+// WalkDir calls the function with a non-nil err argument in two cases.
+//
+// First, if the initial fs.Stat on the root directory fails, WalkDir
+// calls the function with path set to root, d set to nil, and err set to
+// the error from fs.Stat.
+//
+// Second, if a directory's ReadDir method fails, WalkDir calls the
+// function with path set to the directory's path, d set to an
+// fs.DirEntry describing the directory, and err set to the error from
+// ReadDir. In this second case, the function is called twice with the
+// path of the directory: the first call is before the directory read is
+// attempted and has err set to nil, giving the function a chance to
+// return SkipDir and avoid the ReadDir entirely. The second call is
+// after a failed ReadDir and reports the error from ReadDir.
+// (If ReadDir succeeds, there is no second call.)
+//
+// The differences between WalkDirFunc compared to filepath.WalkFunc are:
+//
+//   - The second argument has type fs.DirEntry instead of fs.FileInfo.
+//   - The function is called before reading a directory, to allow SkipDir
+//     to bypass the directory read entirely.
+//   - If a directory read fails, the function is called a second time
+//     for that directory to report the error.
+//
+type WalkDirFunc func(path string, d DirEntry, err error) error
+
+// walkDir recursively descends path, calling walkDirFn.
+func walkDir(fsys FS, name string, d DirEntry, walkDirFn WalkDirFunc) error {
+	if err := walkDirFn(name, d, nil); err != nil || !d.IsDir() {
+		if err == SkipDir && d.IsDir() {
+			// Successfully skipped directory.
+			err = nil
+		}
+		return err
+	}
+
+	dirs, err := ReadDir(fsys, name)
+	if err != nil {
+		// Second call, to report ReadDir error.
+		err = walkDirFn(name, d, err)
+		if err != nil {
+			return err
+		}
+	}
+
+	for _, d1 := range dirs {
+		name1 := path.Join(name, d1.Name())
+		if err := walkDir(fsys, name1, d1, walkDirFn); err != nil {
+			if err == SkipDir {
+				break
+			}
+			return err
+		}
+	}
+	return nil
+}
+
+// WalkDir walks the file tree rooted at root, calling fn for each file or
+// directory in the tree, including root.
+//
+// All errors that arise visiting files and directories are filtered by fn:
+// see the fs.WalkDirFunc documentation for details.
+//
+// The files are walked in lexical order, which makes the output deterministic
+// but requires WalkDir to read an entire directory into memory before proceeding
+// to walk that directory.
+//
+// WalkDir does not follow symbolic links found in directories,
+// but if root itself is a symbolic link, its target will be walked.
+func WalkDir(fsys FS, root string, fn WalkDirFunc) error {
+	info, err := Stat(fsys, root)
+	if err != nil {
+		err = fn(root, nil, err)
+	} else {
+		err = walkDir(fsys, root, &statDirEntry{info}, fn)
+	}
+	if err == SkipDir {
+		return nil
+	}
+	return err
+}
+
+type statDirEntry struct {
+	info FileInfo
+}
+
+func (d *statDirEntry) Name() string            { return d.info.Name() }
+func (d *statDirEntry) IsDir() bool             { return d.info.IsDir() }
+func (d *statDirEntry) Type() FileMode          { return d.info.Mode() & ModeType }
+func (d *statDirEntry) Info() (FileInfo, error) { return d.info, nil }
diff --git a/internal/backport/io/fs/walk_test.go b/internal/backport/io/fs/walk_test.go
new file mode 100644
index 0000000..e8d4be6
--- /dev/null
+++ b/internal/backport/io/fs/walk_test.go
@@ -0,0 +1,126 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fs_test
+
+import (
+	"os"
+	"testing"
+
+	pathpkg "golang.org/x/website/internal/backport/path"
+
+	. "golang.org/x/website/internal/backport/io/fs"
+	"golang.org/x/website/internal/backport/testing/fstest"
+)
+
+type Node struct {
+	name    string
+	entries []*Node // nil if the entry is a file
+	mark    int
+}
+
+var tree = &Node{
+	"testdata",
+	[]*Node{
+		{"a", nil, 0},
+		{"b", []*Node{}, 0},
+		{"c", nil, 0},
+		{
+			"d",
+			[]*Node{
+				{"x", nil, 0},
+				{"y", []*Node{}, 0},
+				{
+					"z",
+					[]*Node{
+						{"u", nil, 0},
+						{"v", nil, 0},
+					},
+					0,
+				},
+			},
+			0,
+		},
+	},
+	0,
+}
+
+func walkTree(n *Node, path string, f func(path string, n *Node)) {
+	f(path, n)
+	for _, e := range n.entries {
+		walkTree(e, pathpkg.Join(path, e.name), f)
+	}
+}
+
+func makeTree(t *testing.T) FS {
+	fsys := fstest.MapFS{}
+	walkTree(tree, tree.name, func(path string, n *Node) {
+		if n.entries == nil {
+			fsys[path] = &fstest.MapFile{}
+		} else {
+			fsys[path] = &fstest.MapFile{Mode: ModeDir}
+		}
+	})
+	return fsys
+}
+
+func markTree(n *Node) { walkTree(n, "", func(path string, n *Node) { n.mark++ }) }
+
+func checkMarks(t *testing.T, report bool) {
+	walkTree(tree, tree.name, func(path string, n *Node) {
+		if n.mark != 1 && report {
+			t.Errorf("node %s mark = %d; expected 1", path, n.mark)
+		}
+		n.mark = 0
+	})
+}
+
+// Assumes that each node name is unique. Good enough for a test.
+// If clear is true, any incoming error is cleared before return. The errors
+// are always accumulated, though.
+func mark(entry DirEntry, err error, errors *[]error, clear bool) error {
+	name := entry.Name()
+	walkTree(tree, tree.name, func(path string, n *Node) {
+		if n.name == name {
+			n.mark++
+		}
+	})
+	if err != nil {
+		*errors = append(*errors, err)
+		if clear {
+			return nil
+		}
+		return err
+	}
+	return nil
+}
+
+func TestWalkDir(t *testing.T) {
+	tmpDir := t.TempDir()
+
+	origDir, err := os.Getwd()
+	if err != nil {
+		t.Fatal("finding working dir:", err)
+	}
+	if err = os.Chdir(tmpDir); err != nil {
+		t.Fatal("entering temp dir:", err)
+	}
+	defer os.Chdir(origDir)
+
+	fsys := makeTree(t)
+	errors := make([]error, 0, 10)
+	clear := true
+	markFn := func(path string, entry DirEntry, err error) error {
+		return mark(entry, err, &errors, clear)
+	}
+	// Expect no errors.
+	err = WalkDir(fsys, ".", markFn)
+	if err != nil {
+		t.Fatalf("no error expected, found: %s", err)
+	}
+	if len(errors) != 0 {
+		t.Fatalf("unexpected errors: %s", errors)
+	}
+	checkMarks(t, true)
+}
diff --git a/internal/backport/obscuretestdata/obscuretestdata.go b/internal/backport/obscuretestdata/obscuretestdata.go
new file mode 100644
index 0000000..512f375
--- /dev/null
+++ b/internal/backport/obscuretestdata/obscuretestdata.go
@@ -0,0 +1,51 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package obscuretestdata contains functionality used by tests to more easily
+// work with testdata that must be obscured primarily due to
+// golang.org/issue/34986.
+package obscuretestdata
+
+import (
+	"encoding/base64"
+	"io"
+	"io/ioutil"
+	"os"
+)
+
+// DecodeToTempFile decodes the named file to a temporary location.
+// If successful, it returns the path of the decoded file.
+// The caller is responsible for ensuring that the temporary file is removed.
+func DecodeToTempFile(name string) (path string, err error) {
+	f, err := os.Open(name)
+	if err != nil {
+		return "", err
+	}
+	defer f.Close()
+
+	tmp, err := ioutil.TempFile("", "obscuretestdata-decoded-")
+	if err != nil {
+		return "", err
+	}
+	if _, err := io.Copy(tmp, base64.NewDecoder(base64.StdEncoding, f)); err != nil {
+		tmp.Close()
+		os.Remove(tmp.Name())
+		return "", err
+	}
+	if err := tmp.Close(); err != nil {
+		os.Remove(tmp.Name())
+		return "", err
+	}
+	return tmp.Name(), nil
+}
+
+// ReadFile reads the named file and returns its decoded contents.
+func ReadFile(name string) ([]byte, error) {
+	f, err := os.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+	return ioutil.ReadAll(base64.NewDecoder(base64.StdEncoding, f))
+}
diff --git a/internal/backport/osfs/file.go b/internal/backport/osfs/file.go
new file mode 100644
index 0000000..8cba1bc
--- /dev/null
+++ b/internal/backport/osfs/file.go
@@ -0,0 +1,72 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package osfs
+
+import (
+	"os"
+	"runtime"
+
+	"golang.org/x/website/internal/backport/io/fs"
+)
+
+// DirFS returns a file system (an fs.FS) for the tree of files rooted at the directory dir.
+//
+// Note that DirFS("/prefix") only guarantees that the Open calls it makes to the
+// operating system will begin with "/prefix": DirFS("/prefix").Open("file") is the
+// same as os.Open("/prefix/file"). So if /prefix/file is a symbolic link pointing outside
+// the /prefix tree, then using DirFS does not stop the access any more than using
+// os.Open does. DirFS is therefore not a general substitute for a chroot-style security
+// mechanism when the directory tree contains arbitrary content.
+func DirFS(dir string) fs.FS {
+	return dirFS(dir)
+}
+
+func containsAny(s, chars string) bool {
+	for i := 0; i < len(s); i++ {
+		for j := 0; j < len(chars); j++ {
+			if s[i] == chars[j] {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+type dirFS string
+
+func (dir dirFS) Open(name string) (fs.File, error) {
+	if !fs.ValidPath(name) || runtime.GOOS == "windows" && containsAny(name, `\:`) {
+		return nil, &os.PathError{Op: "open", Path: name, Err: os.ErrInvalid}
+	}
+	f, err := os.Open(string(dir) + "/" + name)
+	if err != nil {
+		return nil, err // nil fs.File
+	}
+	return dirFSFile{f}, nil
+}
+
+func (dir dirFS) Stat(name string) (fs.FileInfo, error) {
+	if !fs.ValidPath(name) || runtime.GOOS == "windows" && containsAny(name, `\:`) {
+		return nil, &os.PathError{Op: "stat", Path: name, Err: os.ErrInvalid}
+	}
+	f, err := os.Stat(string(dir) + "/" + name)
+	if err != nil {
+		return nil, err
+	}
+	return f, nil
+}
+
+type dirFSFile struct {
+	*os.File
+}
+
+func (f dirFSFile) ReadDir(n int) ([]fs.DirEntry, error) {
+	infos, err := f.Readdir(n)
+	var dirs []fs.DirEntry
+	for _, info := range infos {
+		dirs = append(dirs, fs.FileInfoToDirEntry(info))
+	}
+	return dirs, err
+}
diff --git a/internal/backport/path/example_test.go b/internal/backport/path/example_test.go
new file mode 100644
index 0000000..55c8536
--- /dev/null
+++ b/internal/backport/path/example_test.go
@@ -0,0 +1,122 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package path_test
+
+import (
+	"fmt"
+
+	"golang.org/x/website/internal/backport/path"
+)
+
+func ExampleBase() {
+	fmt.Println(path.Base("/a/b"))
+	fmt.Println(path.Base("/"))
+	fmt.Println(path.Base(""))
+	// Output:
+	// b
+	// /
+	// .
+}
+
+func ExampleClean() {
+	paths := []string{
+		"a/c",
+		"a//c",
+		"a/c/.",
+		"a/c/b/..",
+		"/../a/c",
+		"/../a/b/../././/c",
+		"",
+	}
+
+	for _, p := range paths {
+		fmt.Printf("Clean(%q) = %q\n", p, path.Clean(p))
+	}
+
+	// Output:
+	// Clean("a/c") = "a/c"
+	// Clean("a//c") = "a/c"
+	// Clean("a/c/.") = "a/c"
+	// Clean("a/c/b/..") = "a/c"
+	// Clean("/../a/c") = "/a/c"
+	// Clean("/../a/b/../././/c") = "/a/c"
+	// Clean("") = "."
+}
+
+func ExampleDir() {
+	fmt.Println(path.Dir("/a/b/c"))
+	fmt.Println(path.Dir("a/b/c"))
+	fmt.Println(path.Dir("/a/"))
+	fmt.Println(path.Dir("a/"))
+	fmt.Println(path.Dir("/"))
+	fmt.Println(path.Dir(""))
+	// Output:
+	// /a/b
+	// a/b
+	// /a
+	// a
+	// /
+	// .
+}
+
+func ExampleExt() {
+	fmt.Println(path.Ext("/a/b/c/bar.css"))
+	fmt.Println(path.Ext("/"))
+	fmt.Println(path.Ext(""))
+	// Output:
+	// .css
+	//
+	//
+}
+
+func ExampleIsAbs() {
+	fmt.Println(path.IsAbs("/dev/null"))
+	// Output: true
+}
+
+func ExampleJoin() {
+	fmt.Println(path.Join("a", "b", "c"))
+	fmt.Println(path.Join("a", "b/c"))
+	fmt.Println(path.Join("a/b", "c"))
+
+	fmt.Println(path.Join("a/b", "../../../xyz"))
+
+	fmt.Println(path.Join("", ""))
+	fmt.Println(path.Join("a", ""))
+	fmt.Println(path.Join("", "a"))
+
+	// Output:
+	// a/b/c
+	// a/b/c
+	// a/b/c
+	// ../xyz
+	//
+	// a
+	// a
+}
+
+func ExampleMatch() {
+	fmt.Println(path.Match("abc", "abc"))
+	fmt.Println(path.Match("a*", "abc"))
+	fmt.Println(path.Match("a*/b", "a/c/b"))
+	// Output:
+	// true <nil>
+	// true <nil>
+	// false <nil>
+}
+
+func ExampleSplit() {
+	split := func(s string) {
+		dir, file := path.Split(s)
+		fmt.Printf("path.Split(%q) = dir: %q, file: %q\n", s, dir, file)
+	}
+	split("static/myfile.css")
+	split("myfile.css")
+	split("")
+	// Output:
+	// path.Split("static/myfile.css") = dir: "static/", file: "myfile.css"
+	// path.Split("myfile.css") = dir: "", file: "myfile.css"
+	// path.Split("") = dir: "", file: ""
+}
diff --git a/internal/backport/path/match.go b/internal/backport/path/match.go
new file mode 100644
index 0000000..2b1974f
--- /dev/null
+++ b/internal/backport/path/match.go
@@ -0,0 +1,231 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package path
+
+import (
+	"errors"
+	"strings"
+	"unicode/utf8"
+)
+
+// ErrBadPattern indicates a pattern was malformed.
+var ErrBadPattern = errors.New("syntax error in pattern")
+
+// Match reports whether name matches the shell pattern.
+// The pattern syntax is:
+//
+//	pattern:
+//		{ term }
+//	term:
+//		'*'         matches any sequence of non-/ characters
+//		'?'         matches any single non-/ character
+//		'[' [ '^' ] { character-range } ']'
+//		            character class (must be non-empty)
+//		c           matches character c (c != '*', '?', '\\', '[')
+//		'\\' c      matches character c
+//
+//	character-range:
+//		c           matches character c (c != '\\', '-', ']')
+//		'\\' c      matches character c
+//		lo '-' hi   matches character c for lo <= c <= hi
+//
+// Match requires pattern to match all of name, not just a substring.
+// The only possible returned error is ErrBadPattern, when pattern
+// is malformed.
+//
+func Match(pattern, name string) (matched bool, err error) {
+Pattern:
+	for len(pattern) > 0 {
+		var star bool
+		var chunk string
+		star, chunk, pattern = scanChunk(pattern)
+		if star && chunk == "" {
+			// Trailing * matches rest of string unless it has a /.
+			return !strings.Contains(name, "/"), nil
+		}
+		// Look for match at current position.
+		t, ok, err := matchChunk(chunk, name)
+		// if we're the last chunk, make sure we've exhausted the name
+		// otherwise we'll give a false result even if we could still match
+		// using the star
+		if ok && (len(t) == 0 || len(pattern) > 0) {
+			name = t
+			continue
+		}
+		if err != nil {
+			return false, err
+		}
+		if star {
+			// Look for match skipping i+1 bytes.
+			// Cannot skip /.
+			for i := 0; i < len(name) && name[i] != '/'; i++ {
+				t, ok, err := matchChunk(chunk, name[i+1:])
+				if ok {
+					// if we're the last chunk, make sure we exhausted the name
+					if len(pattern) == 0 && len(t) > 0 {
+						continue
+					}
+					name = t
+					continue Pattern
+				}
+				if err != nil {
+					return false, err
+				}
+			}
+		}
+		// Before returning false with no error,
+		// check that the remainder of the pattern is syntactically valid.
+		for len(pattern) > 0 {
+			_, chunk, pattern = scanChunk(pattern)
+			if _, _, err := matchChunk(chunk, ""); err != nil {
+				return false, err
+			}
+		}
+		return false, nil
+	}
+	return len(name) == 0, nil
+}
+
+// scanChunk gets the next segment of pattern, which is a non-star string
+// possibly preceded by a star.
+func scanChunk(pattern string) (star bool, chunk, rest string) {
+	for len(pattern) > 0 && pattern[0] == '*' {
+		pattern = pattern[1:]
+		star = true
+	}
+	inrange := false
+	var i int
+Scan:
+	for i = 0; i < len(pattern); i++ {
+		switch pattern[i] {
+		case '\\':
+			// error check handled in matchChunk: bad pattern.
+			if i+1 < len(pattern) {
+				i++
+			}
+		case '[':
+			inrange = true
+		case ']':
+			inrange = false
+		case '*':
+			if !inrange {
+				break Scan
+			}
+		}
+	}
+	return star, pattern[0:i], pattern[i:]
+}
+
+// matchChunk checks whether chunk matches the beginning of s.
+// If so, it returns the remainder of s (after the match).
+// Chunk is all single-character operators: literals, char classes, and ?.
+func matchChunk(chunk, s string) (rest string, ok bool, err error) {
+	// failed records whether the match has failed.
+	// After the match fails, the loop continues on processing chunk,
+	// checking that the pattern is well-formed but no longer reading s.
+	failed := false
+	for len(chunk) > 0 {
+		if !failed && len(s) == 0 {
+			failed = true
+		}
+		switch chunk[0] {
+		case '[':
+			// character class
+			var r rune
+			if !failed {
+				var n int
+				r, n = utf8.DecodeRuneInString(s)
+				s = s[n:]
+			}
+			chunk = chunk[1:]
+			// possibly negated
+			negated := false
+			if len(chunk) > 0 && chunk[0] == '^' {
+				negated = true
+				chunk = chunk[1:]
+			}
+			// parse all ranges
+			match := false
+			nrange := 0
+			for {
+				if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 {
+					chunk = chunk[1:]
+					break
+				}
+				var lo, hi rune
+				if lo, chunk, err = getEsc(chunk); err != nil {
+					return "", false, err
+				}
+				hi = lo
+				if chunk[0] == '-' {
+					if hi, chunk, err = getEsc(chunk[1:]); err != nil {
+						return "", false, err
+					}
+				}
+				if lo <= r && r <= hi {
+					match = true
+				}
+				nrange++
+			}
+			if match == negated {
+				failed = true
+			}
+
+		case '?':
+			if !failed {
+				if s[0] == '/' {
+					failed = true
+				}
+				_, n := utf8.DecodeRuneInString(s)
+				s = s[n:]
+			}
+			chunk = chunk[1:]
+
+		case '\\':
+			chunk = chunk[1:]
+			if len(chunk) == 0 {
+				return "", false, ErrBadPattern
+			}
+			fallthrough
+
+		default:
+			if !failed {
+				if chunk[0] != s[0] {
+					failed = true
+				}
+				s = s[1:]
+			}
+			chunk = chunk[1:]
+		}
+	}
+	if failed {
+		return "", false, nil
+	}
+	return s, true, nil
+}
+
+// getEsc gets a possibly-escaped character from chunk, for a character class.
+func getEsc(chunk string) (r rune, nchunk string, err error) {
+	if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' {
+		err = ErrBadPattern
+		return
+	}
+	if chunk[0] == '\\' {
+		chunk = chunk[1:]
+		if len(chunk) == 0 {
+			err = ErrBadPattern
+			return
+		}
+	}
+	r, n := utf8.DecodeRuneInString(chunk)
+	if r == utf8.RuneError && n == 1 {
+		err = ErrBadPattern
+	}
+	nchunk = chunk[n:]
+	if len(nchunk) == 0 {
+		err = ErrBadPattern
+	}
+	return
+}
diff --git a/internal/backport/path/match_test.go b/internal/backport/path/match_test.go
new file mode 100644
index 0000000..57b5877
--- /dev/null
+++ b/internal/backport/path/match_test.go
@@ -0,0 +1,85 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package path_test
+
+import (
+	"testing"
+
+	. "golang.org/x/website/internal/backport/path"
+)
+
+type MatchTest struct {
+	pattern, s string
+	match      bool
+	err        error
+}
+
+var matchTests = []MatchTest{
+	{"abc", "abc", true, nil},
+	{"*", "abc", true, nil},
+	{"*c", "abc", true, nil},
+	{"a*", "a", true, nil},
+	{"a*", "abc", true, nil},
+	{"a*", "ab/c", false, nil},
+	{"a*/b", "abc/b", true, nil},
+	{"a*/b", "a/c/b", false, nil},
+	{"a*b*c*d*e*/f", "axbxcxdxe/f", true, nil},
+	{"a*b*c*d*e*/f", "axbxcxdxexxx/f", true, nil},
+	{"a*b*c*d*e*/f", "axbxcxdxe/xxx/f", false, nil},
+	{"a*b*c*d*e*/f", "axbxcxdxexxx/fff", false, nil},
+	{"a*b?c*x", "abxbbxdbxebxczzx", true, nil},
+	{"a*b?c*x", "abxbbxdbxebxczzy", false, nil},
+	{"ab[c]", "abc", true, nil},
+	{"ab[b-d]", "abc", true, nil},
+	{"ab[e-g]", "abc", false, nil},
+	{"ab[^c]", "abc", false, nil},
+	{"ab[^b-d]", "abc", false, nil},
+	{"ab[^e-g]", "abc", true, nil},
+	{"a\\*b", "a*b", true, nil},
+	{"a\\*b", "ab", false, nil},
+	{"a?b", "a☺b", true, nil},
+	{"a[^a]b", "a☺b", true, nil},
+	{"a???b", "a☺b", false, nil},
+	{"a[^a][^a][^a]b", "a☺b", false, nil},
+	{"[a-ζ]*", "α", true, nil},
+	{"*[a-ζ]", "A", false, nil},
+	{"a?b", "a/b", false, nil},
+	{"a*b", "a/b", false, nil},
+	{"[\\]a]", "]", true, nil},
+	{"[\\-]", "-", true, nil},
+	{"[x\\-]", "x", true, nil},
+	{"[x\\-]", "-", true, nil},
+	{"[x\\-]", "z", false, nil},
+	{"[\\-x]", "x", true, nil},
+	{"[\\-x]", "-", true, nil},
+	{"[\\-x]", "a", false, nil},
+	{"[]a]", "]", false, ErrBadPattern},
+	{"[-]", "-", false, ErrBadPattern},
+	{"[x-]", "x", false, ErrBadPattern},
+	{"[x-]", "-", false, ErrBadPattern},
+	{"[x-]", "z", false, ErrBadPattern},
+	{"[-x]", "x", false, ErrBadPattern},
+	{"[-x]", "-", false, ErrBadPattern},
+	{"[-x]", "a", false, ErrBadPattern},
+	{"\\", "a", false, ErrBadPattern},
+	{"[a-b-c]", "a", false, ErrBadPattern},
+	{"[", "a", false, ErrBadPattern},
+	{"[^", "a", false, ErrBadPattern},
+	{"[^bc", "a", false, ErrBadPattern},
+	{"a[", "a", false, ErrBadPattern},
+	{"a[", "ab", false, ErrBadPattern},
+	{"a[", "x", false, ErrBadPattern},
+	{"a/b[", "x", false, ErrBadPattern},
+	{"*x", "xxx", true, nil},
+}
+
+func TestMatch(t *testing.T) {
+	for _, tt := range matchTests {
+		ok, err := Match(tt.pattern, tt.s)
+		if ok != tt.match || err != tt.err {
+			t.Errorf("Match(%#q, %#q) = %v, %v want %v, %v", tt.pattern, tt.s, ok, err, tt.match, tt.err)
+		}
+	}
+}
diff --git a/internal/backport/path/path.go b/internal/backport/path/path.go
new file mode 100644
index 0000000..f1f3499
--- /dev/null
+++ b/internal/backport/path/path.go
@@ -0,0 +1,233 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package path implements utility routines for manipulating slash-separated
+// paths.
+//
+// The path package should only be used for paths separated by forward
+// slashes, such as the paths in URLs. This package does not deal with
+// Windows paths with drive letters or backslashes; to manipulate
+// operating system paths, use the path/filepath package.
+package path
+
+// A lazybuf is a lazily constructed path buffer.
+// It supports append, reading previously appended bytes,
+// and retrieving the final string. It does not allocate a buffer
+// to hold the output until that output diverges from s.
+type lazybuf struct {
+	s   string
+	buf []byte
+	w   int
+}
+
+func (b *lazybuf) index(i int) byte {
+	if b.buf != nil {
+		return b.buf[i]
+	}
+	return b.s[i]
+}
+
+func (b *lazybuf) append(c byte) {
+	if b.buf == nil {
+		if b.w < len(b.s) && b.s[b.w] == c {
+			b.w++
+			return
+		}
+		b.buf = make([]byte, len(b.s))
+		copy(b.buf, b.s[:b.w])
+	}
+	b.buf[b.w] = c
+	b.w++
+}
+
+func (b *lazybuf) string() string {
+	if b.buf == nil {
+		return b.s[:b.w]
+	}
+	return string(b.buf[:b.w])
+}
+
+// Clean returns the shortest path name equivalent to path
+// by purely lexical processing. It applies the following rules
+// iteratively until no further processing can be done:
+//
+//	1. Replace multiple slashes with a single slash.
+//	2. Eliminate each . path name element (the current directory).
+//	3. Eliminate each inner .. path name element (the parent directory)
+//	   along with the non-.. element that precedes it.
+//	4. Eliminate .. elements that begin a rooted path:
+//	   that is, replace "/.." by "/" at the beginning of a path.
+//
+// The returned path ends in a slash only if it is the root "/".
+//
+// If the result of this process is an empty string, Clean
+// returns the string ".".
+//
+// See also Rob Pike, ``Lexical File Names in Plan 9 or
+// Getting Dot-Dot Right,''
+// https://9p.io/sys/doc/lexnames.html
+func Clean(path string) string {
+	if path == "" {
+		return "."
+	}
+
+	rooted := path[0] == '/'
+	n := len(path)
+
+	// Invariants:
+	//	reading from path; r is index of next byte to process.
+	//	writing to buf; w is index of next byte to write.
+	//	dotdot is index in buf where .. must stop, either because
+	//		it is the leading slash or it is a leading ../../.. prefix.
+	out := lazybuf{s: path}
+	r, dotdot := 0, 0
+	if rooted {
+		out.append('/')
+		r, dotdot = 1, 1
+	}
+
+	for r < n {
+		switch {
+		case path[r] == '/':
+			// empty path element
+			r++
+		case path[r] == '.' && (r+1 == n || path[r+1] == '/'):
+			// . element
+			r++
+		case path[r] == '.' && path[r+1] == '.' && (r+2 == n || path[r+2] == '/'):
+			// .. element: remove to last /
+			r += 2
+			switch {
+			case out.w > dotdot:
+				// can backtrack
+				out.w--
+				for out.w > dotdot && out.index(out.w) != '/' {
+					out.w--
+				}
+			case !rooted:
+				// cannot backtrack, but not rooted, so append .. element.
+				if out.w > 0 {
+					out.append('/')
+				}
+				out.append('.')
+				out.append('.')
+				dotdot = out.w
+			}
+		default:
+			// real path element.
+			// add slash if needed
+			if rooted && out.w != 1 || !rooted && out.w != 0 {
+				out.append('/')
+			}
+			// copy element
+			for ; r < n && path[r] != '/'; r++ {
+				out.append(path[r])
+			}
+		}
+	}
+
+	// Turn empty string into "."
+	if out.w == 0 {
+		return "."
+	}
+
+	return out.string()
+}
+
+// lastSlash(s) is strings.LastIndex(s, "/") but we can't import strings.
+func lastSlash(s string) int {
+	i := len(s) - 1
+	for i >= 0 && s[i] != '/' {
+		i--
+	}
+	return i
+}
+
+// Split splits path immediately following the final slash,
+// separating it into a directory and file name component.
+// If there is no slash in path, Split returns an empty dir and
+// file set to path.
+// The returned values have the property that path = dir+file.
+func Split(path string) (dir, file string) {
+	i := lastSlash(path)
+	return path[:i+1], path[i+1:]
+}
+
+// Join joins any number of path elements into a single path,
+// separating them with slashes. Empty elements are ignored.
+// The result is Cleaned. However, if the argument list is
+// empty or all its elements are empty, Join returns
+// an empty string.
+func Join(elem ...string) string {
+	size := 0
+	for _, e := range elem {
+		size += len(e)
+	}
+	if size == 0 {
+		return ""
+	}
+	buf := make([]byte, 0, size+len(elem)-1)
+	for _, e := range elem {
+		if len(buf) > 0 || e != "" {
+			if len(buf) > 0 {
+				buf = append(buf, '/')
+			}
+			buf = append(buf, e...)
+		}
+	}
+	return Clean(string(buf))
+}
+
+// Ext returns the file name extension used by path.
+// The extension is the suffix beginning at the final dot
+// in the final slash-separated element of path;
+// it is empty if there is no dot.
+func Ext(path string) string {
+	for i := len(path) - 1; i >= 0 && path[i] != '/'; i-- {
+		if path[i] == '.' {
+			return path[i:]
+		}
+	}
+	return ""
+}
+
+// Base returns the last element of path.
+// Trailing slashes are removed before extracting the last element.
+// If the path is empty, Base returns ".".
+// If the path consists entirely of slashes, Base returns "/".
+func Base(path string) string {
+	if path == "" {
+		return "."
+	}
+	// Strip trailing slashes.
+	for len(path) > 0 && path[len(path)-1] == '/' {
+		path = path[0 : len(path)-1]
+	}
+	// Find the last element
+	if i := lastSlash(path); i >= 0 {
+		path = path[i+1:]
+	}
+	// If empty now, it had only slashes.
+	if path == "" {
+		return "/"
+	}
+	return path
+}
+
+// IsAbs reports whether the path is absolute.
+func IsAbs(path string) bool {
+	return len(path) > 0 && path[0] == '/'
+}
+
+// Dir returns all but the last element of path, typically the path's directory.
+// After dropping the final element using Split, the path is Cleaned and trailing
+// slashes are removed.
+// If the path is empty, Dir returns ".".
+// If the path consists entirely of slashes followed by non-slash bytes, Dir
+// returns a single slash. In any other case, the returned path does not end in a
+// slash.
+func Dir(path string) string {
+	dir, _ := Split(path)
+	return Clean(dir)
+}
diff --git a/internal/backport/path/path_test.go b/internal/backport/path/path_test.go
new file mode 100644
index 0000000..ba80b01
--- /dev/null
+++ b/internal/backport/path/path_test.go
@@ -0,0 +1,237 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package path_test
+
+import (
+	"runtime"
+	"testing"
+
+	. "golang.org/x/website/internal/backport/path"
+)
+
+type PathTest struct {
+	path, result string
+}
+
+var cleantests = []PathTest{
+	// Already clean
+	{"", "."},
+	{"abc", "abc"},
+	{"abc/def", "abc/def"},
+	{"a/b/c", "a/b/c"},
+	{".", "."},
+	{"..", ".."},
+	{"../..", "../.."},
+	{"../../abc", "../../abc"},
+	{"/abc", "/abc"},
+	{"/", "/"},
+
+	// Remove trailing slash
+	{"abc/", "abc"},
+	{"abc/def/", "abc/def"},
+	{"a/b/c/", "a/b/c"},
+	{"./", "."},
+	{"../", ".."},
+	{"../../", "../.."},
+	{"/abc/", "/abc"},
+
+	// Remove doubled slash
+	{"abc//def//ghi", "abc/def/ghi"},
+	{"//abc", "/abc"},
+	{"///abc", "/abc"},
+	{"//abc//", "/abc"},
+	{"abc//", "abc"},
+
+	// Remove . elements
+	{"abc/./def", "abc/def"},
+	{"/./abc/def", "/abc/def"},
+	{"abc/.", "abc"},
+
+	// Remove .. elements
+	{"abc/def/ghi/../jkl", "abc/def/jkl"},
+	{"abc/def/../ghi/../jkl", "abc/jkl"},
+	{"abc/def/..", "abc"},
+	{"abc/def/../..", "."},
+	{"/abc/def/../..", "/"},
+	{"abc/def/../../..", ".."},
+	{"/abc/def/../../..", "/"},
+	{"abc/def/../../../ghi/jkl/../../../mno", "../../mno"},
+
+	// Combinations
+	{"abc/./../def", "def"},
+	{"abc//./../def", "def"},
+	{"abc/../../././../def", "../../def"},
+}
+
+func TestClean(t *testing.T) {
+	for _, test := range cleantests {
+		if s := Clean(test.path); s != test.result {
+			t.Errorf("Clean(%q) = %q, want %q", test.path, s, test.result)
+		}
+		if s := Clean(test.result); s != test.result {
+			t.Errorf("Clean(%q) = %q, want %q", test.result, s, test.result)
+		}
+	}
+}
+
+func TestCleanMallocs(t *testing.T) {
+	if testing.Short() {
+		t.Skip("skipping malloc count in short mode")
+	}
+	if runtime.GOMAXPROCS(0) > 1 {
+		t.Log("skipping AllocsPerRun checks; GOMAXPROCS>1")
+		return
+	}
+
+	for _, test := range cleantests {
+		allocs := testing.AllocsPerRun(100, func() { Clean(test.result) })
+		if allocs > 0 {
+			t.Errorf("Clean(%q): %v allocs, want zero", test.result, allocs)
+		}
+	}
+}
+
+type SplitTest struct {
+	path, dir, file string
+}
+
+var splittests = []SplitTest{
+	{"a/b", "a/", "b"},
+	{"a/b/", "a/b/", ""},
+	{"a/", "a/", ""},
+	{"a", "", "a"},
+	{"/", "/", ""},
+}
+
+func TestSplit(t *testing.T) {
+	for _, test := range splittests {
+		if d, f := Split(test.path); d != test.dir || f != test.file {
+			t.Errorf("Split(%q) = %q, %q, want %q, %q", test.path, d, f, test.dir, test.file)
+		}
+	}
+}
+
+type JoinTest struct {
+	elem []string
+	path string
+}
+
+var jointests = []JoinTest{
+	// zero parameters
+	{[]string{}, ""},
+
+	// one parameter
+	{[]string{""}, ""},
+	{[]string{"a"}, "a"},
+
+	// two parameters
+	{[]string{"a", "b"}, "a/b"},
+	{[]string{"a", ""}, "a"},
+	{[]string{"", "b"}, "b"},
+	{[]string{"/", "a"}, "/a"},
+	{[]string{"/", ""}, "/"},
+	{[]string{"a/", "b"}, "a/b"},
+	{[]string{"a/", ""}, "a"},
+	{[]string{"", ""}, ""},
+}
+
+func TestJoin(t *testing.T) {
+	for _, test := range jointests {
+		if p := Join(test.elem...); p != test.path {
+			t.Errorf("Join(%q) = %q, want %q", test.elem, p, test.path)
+		}
+	}
+}
+
+type ExtTest struct {
+	path, ext string
+}
+
+var exttests = []ExtTest{
+	{"path.go", ".go"},
+	{"path.pb.go", ".go"},
+	{"a.dir/b", ""},
+	{"a.dir/b.go", ".go"},
+	{"a.dir/", ""},
+}
+
+func TestExt(t *testing.T) {
+	for _, test := range exttests {
+		if x := Ext(test.path); x != test.ext {
+			t.Errorf("Ext(%q) = %q, want %q", test.path, x, test.ext)
+		}
+	}
+}
+
+var basetests = []PathTest{
+	// Already clean
+	{"", "."},
+	{".", "."},
+	{"/.", "."},
+	{"/", "/"},
+	{"////", "/"},
+	{"x/", "x"},
+	{"abc", "abc"},
+	{"abc/def", "def"},
+	{"a/b/.x", ".x"},
+	{"a/b/c.", "c."},
+	{"a/b/c.x", "c.x"},
+}
+
+func TestBase(t *testing.T) {
+	for _, test := range basetests {
+		if s := Base(test.path); s != test.result {
+			t.Errorf("Base(%q) = %q, want %q", test.path, s, test.result)
+		}
+	}
+}
+
+var dirtests = []PathTest{
+	{"", "."},
+	{".", "."},
+	{"/.", "/"},
+	{"/", "/"},
+	{"////", "/"},
+	{"/foo", "/"},
+	{"x/", "x"},
+	{"abc", "."},
+	{"abc/def", "abc"},
+	{"abc////def", "abc"},
+	{"a/b/.x", "a/b"},
+	{"a/b/c.", "a/b"},
+	{"a/b/c.x", "a/b"},
+}
+
+func TestDir(t *testing.T) {
+	for _, test := range dirtests {
+		if s := Dir(test.path); s != test.result {
+			t.Errorf("Dir(%q) = %q, want %q", test.path, s, test.result)
+		}
+	}
+}
+
+type IsAbsTest struct {
+	path  string
+	isAbs bool
+}
+
+var isAbsTests = []IsAbsTest{
+	{"", false},
+	{"/", true},
+	{"/usr/bin/gcc", true},
+	{"..", false},
+	{"/a/../bb", true},
+	{".", false},
+	{"./", false},
+	{"lala", false},
+}
+
+func TestIsAbs(t *testing.T) {
+	for _, test := range isAbsTests {
+		if r := IsAbs(test.path); r != test.isAbs {
+			t.Errorf("IsAbs(%q) = %v, want %v", test.path, r, test.isAbs)
+		}
+	}
+}
diff --git a/internal/backport/testing/fstest/mapfs.go b/internal/backport/testing/fstest/mapfs.go
new file mode 100644
index 0000000..361f8bf
--- /dev/null
+++ b/internal/backport/testing/fstest/mapfs.go
@@ -0,0 +1,240 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fstest
+
+import (
+	"io"
+	"sort"
+	"strings"
+	"time"
+
+	"golang.org/x/website/internal/backport/path"
+
+	"golang.org/x/website/internal/backport/io/fs"
+)
+
+// A MapFS is a simple in-memory file system for use in tests,
+// represented as a map from path names (arguments to Open)
+// to information about the files or directories they represent.
+//
+// The map need not include parent directories for files contained
+// in the map; those will be synthesized if needed.
+// But a directory can still be included by setting the MapFile.Mode's ModeDir bit;
+// this may be necessary for detailed control over the directory's FileInfo
+// or to create an empty directory.
+//
+// File system operations read directly from the map,
+// so that the file system can be changed by editing the map as needed.
+// An implication is that file system operations must not run concurrently
+// with changes to the map, which would be a race.
+// Another implication is that opening or reading a directory requires
+// iterating over the entire map, so a MapFS should typically be used with not more
+// than a few hundred entries or directory reads.
+type MapFS map[string]*MapFile
+
+// A MapFile describes a single file in a MapFS.
+type MapFile struct {
+	Data    []byte      // file content
+	Mode    fs.FileMode // FileInfo.Mode
+	ModTime time.Time   // FileInfo.ModTime
+	Sys     interface{} // FileInfo.Sys
+}
+
+var _ fs.FS = MapFS(nil)
+var _ fs.File = (*openMapFile)(nil)
+
+// Open opens the named file.
+func (fsys MapFS) Open(name string) (fs.File, error) {
+	if !fs.ValidPath(name) {
+		return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist}
+	}
+	file := fsys[name]
+	if file != nil && file.Mode&fs.ModeDir == 0 {
+		// Ordinary file
+		return &openMapFile{name, mapFileInfo{path.Base(name), file}, 0}, nil
+	}
+
+	// Directory, possibly synthesized.
+	// Note that file can be nil here: the map need not contain explicit parent directories for all its files.
+	// But file can also be non-nil, in case the user wants to set metadata for the directory explicitly.
+	// Either way, we need to construct the list of children of this directory.
+	var list []mapFileInfo
+	var elem string
+	var need = make(map[string]bool)
+	if name == "." {
+		elem = "."
+		for fname, f := range fsys {
+			i := strings.Index(fname, "/")
+			if i < 0 {
+				list = append(list, mapFileInfo{fname, f})
+			} else {
+				need[fname[:i]] = true
+			}
+		}
+	} else {
+		elem = name[strings.LastIndex(name, "/")+1:]
+		prefix := name + "/"
+		for fname, f := range fsys {
+			if strings.HasPrefix(fname, prefix) {
+				felem := fname[len(prefix):]
+				i := strings.Index(felem, "/")
+				if i < 0 {
+					list = append(list, mapFileInfo{felem, f})
+				} else {
+					need[fname[len(prefix):len(prefix)+i]] = true
+				}
+			}
+		}
+		// If the directory name is not in the map,
+		// and there are no children of the name in the map,
+		// then the directory is treated as not existing.
+		if file == nil && list == nil && len(need) == 0 {
+			return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist}
+		}
+	}
+	for _, fi := range list {
+		delete(need, fi.name)
+	}
+	for name := range need {
+		list = append(list, mapFileInfo{name, &MapFile{Mode: fs.ModeDir}})
+	}
+	sort.Slice(list, func(i, j int) bool {
+		return list[i].name < list[j].name
+	})
+
+	if file == nil {
+		file = &MapFile{Mode: fs.ModeDir}
+	}
+	return &mapDir{name, mapFileInfo{elem, file}, list, 0}, nil
+}
+
+// fsOnly is a wrapper that hides all but the fs.FS methods,
+// to avoid an infinite recursion when implementing special
+// methods in terms of helpers that would use them.
+// (In general, implementing these methods using the package fs helpers
+// is redundant and unnecessary, but having the methods may make
+// MapFS exercise more code paths when used in tests.)
+type fsOnly struct{ fs.FS }
+
+func (fsys MapFS) ReadFile(name string) ([]byte, error) {
+	return fs.ReadFile(fsOnly{fsys}, name)
+}
+
+func (fsys MapFS) Stat(name string) (fs.FileInfo, error) {
+	return fs.Stat(fsOnly{fsys}, name)
+}
+
+func (fsys MapFS) ReadDir(name string) ([]fs.DirEntry, error) {
+	return fs.ReadDir(fsOnly{fsys}, name)
+}
+
+func (fsys MapFS) Glob(pattern string) ([]string, error) {
+	return fs.Glob(fsOnly{fsys}, pattern)
+}
+
+type noSub struct {
+	MapFS
+}
+
+func (noSub) Sub() {} // not the fs.SubFS signature
+
+func (fsys MapFS) Sub(dir string) (fs.FS, error) {
+	return fs.Sub(noSub{fsys}, dir)
+}
+
+// A mapFileInfo implements fs.FileInfo and fs.DirEntry for a given map file.
+type mapFileInfo struct {
+	name string
+	f    *MapFile
+}
+
+func (i *mapFileInfo) Name() string               { return i.name }
+func (i *mapFileInfo) Size() int64                { return int64(len(i.f.Data)) }
+func (i *mapFileInfo) Mode() fs.FileMode          { return i.f.Mode }
+func (i *mapFileInfo) Type() fs.FileMode          { return i.f.Mode & fs.ModeType }
+func (i *mapFileInfo) ModTime() time.Time         { return i.f.ModTime }
+func (i *mapFileInfo) IsDir() bool                { return i.f.Mode&fs.ModeDir != 0 }
+func (i *mapFileInfo) Sys() interface{}           { return i.f.Sys }
+func (i *mapFileInfo) Info() (fs.FileInfo, error) { return i, nil }
+
+// An openMapFile is a regular (non-directory) fs.File open for reading.
+type openMapFile struct {
+	path string
+	mapFileInfo
+	offset int64
+}
+
+func (f *openMapFile) Stat() (fs.FileInfo, error) { return &f.mapFileInfo, nil }
+
+func (f *openMapFile) Close() error { return nil }
+
+func (f *openMapFile) Read(b []byte) (int, error) {
+	if f.offset >= int64(len(f.f.Data)) {
+		return 0, io.EOF
+	}
+	if f.offset < 0 {
+		return 0, &fs.PathError{Op: "read", Path: f.path, Err: fs.ErrInvalid}
+	}
+	n := copy(b, f.f.Data[f.offset:])
+	f.offset += int64(n)
+	return n, nil
+}
+
+func (f *openMapFile) Seek(offset int64, whence int) (int64, error) {
+	switch whence {
+	case 0:
+		// offset += 0
+	case 1:
+		offset += f.offset
+	case 2:
+		offset += int64(len(f.f.Data))
+	}
+	if offset < 0 || offset > int64(len(f.f.Data)) {
+		return 0, &fs.PathError{Op: "seek", Path: f.path, Err: fs.ErrInvalid}
+	}
+	f.offset = offset
+	return offset, nil
+}
+
+func (f *openMapFile) ReadAt(b []byte, offset int64) (int, error) {
+	if offset < 0 || offset > int64(len(f.f.Data)) {
+		return 0, &fs.PathError{Op: "read", Path: f.path, Err: fs.ErrInvalid}
+	}
+	n := copy(b, f.f.Data[offset:])
+	if n < len(b) {
+		return n, io.EOF
+	}
+	return n, nil
+}
+
+// A mapDir is a directory fs.File (so also an fs.ReadDirFile) open for reading.
+type mapDir struct {
+	path string
+	mapFileInfo
+	entry  []mapFileInfo
+	offset int
+}
+
+func (d *mapDir) Stat() (fs.FileInfo, error) { return &d.mapFileInfo, nil }
+func (d *mapDir) Close() error               { return nil }
+func (d *mapDir) Read(b []byte) (int, error) {
+	return 0, &fs.PathError{Op: "read", Path: d.path, Err: fs.ErrInvalid}
+}
+
+func (d *mapDir) ReadDir(count int) ([]fs.DirEntry, error) {
+	n := len(d.entry) - d.offset
+	if n == 0 && count > 0 {
+		return nil, io.EOF
+	}
+	if count > 0 && n > count {
+		n = count
+	}
+	list := make([]fs.DirEntry, n)
+	for i := range list {
+		list[i] = &d.entry[d.offset+i]
+	}
+	d.offset += n
+	return list, nil
+}
diff --git a/internal/backport/testing/fstest/mapfs_test.go b/internal/backport/testing/fstest/mapfs_test.go
new file mode 100644
index 0000000..2abedd6
--- /dev/null
+++ b/internal/backport/testing/fstest/mapfs_test.go
@@ -0,0 +1,19 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fstest
+
+import (
+	"testing"
+)
+
+func TestMapFS(t *testing.T) {
+	m := MapFS{
+		"hello":             {Data: []byte("hello, world\n")},
+		"fortune/k/ken.txt": {Data: []byte("If a program is too slow, it must have a loop.\n")},
+	}
+	if err := TestFS(m, "hello", "fortune/k/ken.txt"); err != nil {
+		t.Fatal(err)
+	}
+}
diff --git a/internal/backport/testing/fstest/testfs.go b/internal/backport/testing/fstest/testfs.go
new file mode 100644
index 0000000..dbb94c0
--- /dev/null
+++ b/internal/backport/testing/fstest/testfs.go
@@ -0,0 +1,628 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fstest implements support for testing implementations and users of file systems.
+package fstest
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"reflect"
+	"sort"
+	"strings"
+
+	"golang.org/x/website/internal/backport/path"
+
+	"golang.org/x/website/internal/backport/io/fs"
+	"golang.org/x/website/internal/backport/testing/iotest"
+)
+
+// TestFS tests a file system implementation.
+// It walks the entire tree of files in fsys,
+// opening and checking that each file behaves correctly.
+// It also checks that the file system contains at least the expected files.
+// As a special case, if no expected files are listed, fsys must be empty.
+// Otherwise, fsys must contain at least the listed files; it can also contain others.
+// The contents of fsys must not change concurrently with TestFS.
+//
+// If TestFS finds any misbehaviors, it returns an error reporting all of them.
+// The error text spans multiple lines, one per detected misbehavior.
+//
+// Typical usage inside a test is:
+//
+//	if err := fstest.TestFS(myFS, "file/that/should/be/present"); err != nil {
+//		t.Fatal(err)
+//	}
+//
+func TestFS(fsys fs.FS, expected ...string) error {
+	if err := testFS(fsys, expected...); err != nil {
+		return err
+	}
+	for _, name := range expected {
+		if i := strings.Index(name, "/"); i >= 0 {
+			dir, dirSlash := name[:i], name[:i+1]
+			var subExpected []string
+			for _, name := range expected {
+				if strings.HasPrefix(name, dirSlash) {
+					subExpected = append(subExpected, name[len(dirSlash):])
+				}
+			}
+			sub, err := fs.Sub(fsys, dir)
+			if err != nil {
+				return err
+			}
+			if err := testFS(sub, subExpected...); err != nil {
+				return fmt.Errorf("testing fs.Sub(fsys, %s): %v", dir, err)
+			}
+			break // one sub-test is enough
+		}
+	}
+	return nil
+}
+
+func testFS(fsys fs.FS, expected ...string) error {
+	t := fsTester{fsys: fsys}
+	t.checkDir(".")
+	t.checkOpen(".")
+	found := make(map[string]bool)
+	for _, dir := range t.dirs {
+		found[dir] = true
+	}
+	for _, file := range t.files {
+		found[file] = true
+	}
+	delete(found, ".")
+	if len(expected) == 0 && len(found) > 0 {
+		var list []string
+		for k := range found {
+			if k != "." {
+				list = append(list, k)
+			}
+		}
+		sort.Strings(list)
+		if len(list) > 15 {
+			list = append(list[:10], "...")
+		}
+		t.errorf("expected empty file system but found files:\n%s", strings.Join(list, "\n"))
+	}
+	for _, name := range expected {
+		if !found[name] {
+			t.errorf("expected but not found: %s", name)
+		}
+	}
+	if len(t.errText) == 0 {
+		return nil
+	}
+	return errors.New("TestFS found errors:\n" + string(t.errText))
+}
+
+// An fsTester holds state for running the test.
+type fsTester struct {
+	fsys    fs.FS
+	errText []byte
+	dirs    []string
+	files   []string
+}
+
+// errorf adds an error line to errText.
+func (t *fsTester) errorf(format string, args ...interface{}) {
+	if len(t.errText) > 0 {
+		t.errText = append(t.errText, '\n')
+	}
+	t.errText = append(t.errText, fmt.Sprintf(format, args...)...)
+}
+
+func (t *fsTester) openDir(dir string) fs.ReadDirFile {
+	f, err := t.fsys.Open(dir)
+	if err != nil {
+		t.errorf("%s: Open: %v", dir, err)
+		return nil
+	}
+	d, ok := f.(fs.ReadDirFile)
+	if !ok {
+		f.Close()
+		t.errorf("%s: Open returned File type %T, not a fs.ReadDirFile", dir, f)
+		return nil
+	}
+	return d
+}
+
+// checkDir checks the directory dir, which is expected to exist
+// (it is either the root or was found in a directory listing with IsDir true).
+func (t *fsTester) checkDir(dir string) {
+	// Read entire directory.
+	t.dirs = append(t.dirs, dir)
+	d := t.openDir(dir)
+	if d == nil {
+		return
+	}
+	list, err := d.ReadDir(-1)
+	if err != nil {
+		d.Close()
+		t.errorf("%s: ReadDir(-1): %v", dir, err)
+		return
+	}
+
+	// Check all children.
+	var prefix string
+	if dir == "." {
+		prefix = ""
+	} else {
+		prefix = dir + "/"
+	}
+	for _, info := range list {
+		name := info.Name()
+		switch {
+		case name == ".", name == "..", name == "":
+			t.errorf("%s: ReadDir: child has invalid name: %#q", dir, name)
+			continue
+		case strings.Contains(name, "/"):
+			t.errorf("%s: ReadDir: child name contains slash: %#q", dir, name)
+			continue
+		case strings.Contains(name, `\`):
+			t.errorf("%s: ReadDir: child name contains backslash: %#q", dir, name)
+			continue
+		}
+		path := prefix + name
+		t.checkStat(path, info)
+		t.checkOpen(path)
+		if info.IsDir() {
+			t.checkDir(path)
+		} else {
+			t.checkFile(path)
+		}
+	}
+
+	// Check ReadDir(-1) at EOF.
+	list2, err := d.ReadDir(-1)
+	if len(list2) > 0 || err != nil {
+		d.Close()
+		t.errorf("%s: ReadDir(-1) at EOF = %d entries, %v, wanted 0 entries, nil", dir, len(list2), err)
+		return
+	}
+
+	// Check ReadDir(1) at EOF (different results).
+	list2, err = d.ReadDir(1)
+	if len(list2) > 0 || err != io.EOF {
+		d.Close()
+		t.errorf("%s: ReadDir(1) at EOF = %d entries, %v, wanted 0 entries, EOF", dir, len(list2), err)
+		return
+	}
+
+	// Check that close does not report an error.
+	if err := d.Close(); err != nil {
+		t.errorf("%s: Close: %v", dir, err)
+	}
+
+	// Check that closing twice doesn't crash.
+	// The return value doesn't matter.
+	d.Close()
+
+	// Reopen directory, read a second time, make sure contents match.
+	if d = t.openDir(dir); d == nil {
+		return
+	}
+	defer d.Close()
+	list2, err = d.ReadDir(-1)
+	if err != nil {
+		t.errorf("%s: second Open+ReadDir(-1): %v", dir, err)
+		return
+	}
+	t.checkDirList(dir, "first Open+ReadDir(-1) vs second Open+ReadDir(-1)", list, list2)
+
+	// Reopen directory, read a third time in pieces, make sure contents match.
+	if d = t.openDir(dir); d == nil {
+		return
+	}
+	defer d.Close()
+	list2 = nil
+	for {
+		n := 1
+		if len(list2) > 0 {
+			n = 2
+		}
+		frag, err := d.ReadDir(n)
+		if len(frag) > n {
+			t.errorf("%s: third Open: ReadDir(%d) after %d: %d entries (too many)", dir, n, len(list2), len(frag))
+			return
+		}
+		list2 = append(list2, frag...)
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			t.errorf("%s: third Open: ReadDir(%d) after %d: %v", dir, n, len(list2), err)
+			return
+		}
+		if n == 0 {
+			t.errorf("%s: third Open: ReadDir(%d) after %d: 0 entries but nil error", dir, n, len(list2))
+			return
+		}
+	}
+	t.checkDirList(dir, "first Open+ReadDir(-1) vs third Open+ReadDir(1,2) loop", list, list2)
+
+	// If fsys has ReadDir, check that it matches and is sorted.
+	if fsys, ok := t.fsys.(fs.ReadDirFS); ok {
+		list2, err := fsys.ReadDir(dir)
+		if err != nil {
+			t.errorf("%s: fsys.ReadDir: %v", dir, err)
+			return
+		}
+		t.checkDirList(dir, "first Open+ReadDir(-1) vs fsys.ReadDir", list, list2)
+
+		for i := 0; i+1 < len(list2); i++ {
+			if list2[i].Name() >= list2[i+1].Name() {
+				t.errorf("%s: fsys.ReadDir: list not sorted: %s before %s", dir, list2[i].Name(), list2[i+1].Name())
+			}
+		}
+	}
+
+	// Check fs.ReadDir as well.
+	list2, err = fs.ReadDir(t.fsys, dir)
+	if err != nil {
+		t.errorf("%s: fs.ReadDir: %v", dir, err)
+		return
+	}
+	t.checkDirList(dir, "first Open+ReadDir(-1) vs fs.ReadDir", list, list2)
+
+	for i := 0; i+1 < len(list2); i++ {
+		if list2[i].Name() >= list2[i+1].Name() {
+			t.errorf("%s: fs.ReadDir: list not sorted: %s before %s", dir, list2[i].Name(), list2[i+1].Name())
+		}
+	}
+
+	t.checkGlob(dir, list)
+}
+
+// formatEntry formats an fs.DirEntry into a string for error messages and comparison.
+func formatEntry(entry fs.DirEntry) string {
+	return fmt.Sprintf("%s IsDir=%v Type=%v", entry.Name(), entry.IsDir(), entry.Type())
+}
+
+// formatInfoEntry formats an fs.FileInfo into a string like the result of formatEntry, for error messages and comparison.
+func formatInfoEntry(info fs.FileInfo) string {
+	return fmt.Sprintf("%s IsDir=%v Type=%v", info.Name(), info.IsDir(), info.Mode()&fs.ModeType)
+}
+
+// formatInfo formats an fs.FileInfo into a string for error messages and comparison.
+func formatInfo(info fs.FileInfo) string {
+	return fmt.Sprintf("%s IsDir=%v Mode=%v Size=%d ModTime=%v", info.Name(), info.IsDir(), info.Mode(), info.Size(), info.ModTime())
+}
+
+// checkGlob checks that various glob patterns work if the file system implements GlobFS.
+func (t *fsTester) checkGlob(dir string, list []fs.DirEntry) {
+	if _, ok := t.fsys.(fs.GlobFS); !ok {
+		return
+	}
+
+	// Make a complex glob pattern prefix that only matches dir.
+	var glob string
+	if dir != "." {
+		elem := strings.Split(dir, "/")
+		for i, e := range elem {
+			var pattern []rune
+			for j, r := range e {
+				if r == '*' || r == '?' || r == '\\' || r == '[' || r == '-' {
+					pattern = append(pattern, '\\', r)
+					continue
+				}
+				switch (i + j) % 5 {
+				case 0:
+					pattern = append(pattern, r)
+				case 1:
+					pattern = append(pattern, '[', r, ']')
+				case 2:
+					pattern = append(pattern, '[', r, '-', r, ']')
+				case 3:
+					pattern = append(pattern, '[', '\\', r, ']')
+				case 4:
+					pattern = append(pattern, '[', '\\', r, '-', '\\', r, ']')
+				}
+			}
+			elem[i] = string(pattern)
+		}
+		glob = strings.Join(elem, "/") + "/"
+	}
+
+	// Test that malformed patterns are detected.
+	// The error is likely path.ErrBadPattern but need not be.
+	if _, err := t.fsys.(fs.GlobFS).Glob(glob + "nonexist/[]"); err == nil {
+		t.errorf("%s: Glob(%#q): bad pattern not detected", dir, glob+"nonexist/[]")
+	}
+
+	// Try to find a letter that appears in only some of the final names.
+	c := rune('a')
+	for ; c <= 'z'; c++ {
+		have, haveNot := false, false
+		for _, d := range list {
+			if strings.ContainsRune(d.Name(), c) {
+				have = true
+			} else {
+				haveNot = true
+			}
+		}
+		if have && haveNot {
+			break
+		}
+	}
+	if c > 'z' {
+		c = 'a'
+	}
+	glob += "*" + string(c) + "*"
+
+	var want []string
+	for _, d := range list {
+		if strings.ContainsRune(d.Name(), c) {
+			want = append(want, path.Join(dir, d.Name()))
+		}
+	}
+
+	names, err := t.fsys.(fs.GlobFS).Glob(glob)
+	if err != nil {
+		t.errorf("%s: Glob(%#q): %v", dir, glob, err)
+		return
+	}
+	if reflect.DeepEqual(want, names) {
+		return
+	}
+
+	if !sort.StringsAreSorted(names) {
+		t.errorf("%s: Glob(%#q): unsorted output:\n%s", dir, glob, strings.Join(names, "\n"))
+		sort.Strings(names)
+	}
+
+	var problems []string
+	for len(want) > 0 || len(names) > 0 {
+		switch {
+		case len(want) > 0 && len(names) > 0 && want[0] == names[0]:
+			want, names = want[1:], names[1:]
+		case len(want) > 0 && (len(names) == 0 || want[0] < names[0]):
+			problems = append(problems, "missing: "+want[0])
+			want = want[1:]
+		default:
+			problems = append(problems, "extra: "+names[0])
+			names = names[1:]
+		}
+	}
+	t.errorf("%s: Glob(%#q): wrong output:\n%s", dir, glob, strings.Join(problems, "\n"))
+}
+
+// checkStat checks that a direct stat of path matches entry,
+// which was found in the parent's directory listing.
+func (t *fsTester) checkStat(path string, entry fs.DirEntry) {
+	file, err := t.fsys.Open(path)
+	if err != nil {
+		t.errorf("%s: Open: %v", path, err)
+		return
+	}
+	info, err := file.Stat()
+	file.Close()
+	if err != nil {
+		t.errorf("%s: Stat: %v", path, err)
+		return
+	}
+	fentry := formatEntry(entry)
+	fientry := formatInfoEntry(info)
+	// Note: mismatch here is OK for symlink, because Open dereferences symlink.
+	if fentry != fientry && entry.Type()&fs.ModeSymlink == 0 {
+		t.errorf("%s: mismatch:\n\tentry = %s\n\tfile.Stat() = %s", path, fentry, fientry)
+	}
+
+	einfo, err := entry.Info()
+	if err != nil {
+		t.errorf("%s: entry.Info: %v", path, err)
+		return
+	}
+	finfo := formatInfo(info)
+	if entry.Type()&fs.ModeSymlink != 0 {
+		// For symlink, just check that entry.Info matches entry on common fields.
+		// Open deferences symlink, so info itself may differ.
+		feentry := formatInfoEntry(einfo)
+		if fentry != feentry {
+			t.errorf("%s: mismatch\n\tentry = %s\n\tentry.Info() = %s\n", path, fentry, feentry)
+		}
+	} else {
+		feinfo := formatInfo(einfo)
+		if feinfo != finfo {
+			t.errorf("%s: mismatch:\n\tentry.Info() = %s\n\tfile.Stat() = %s\n", path, feinfo, finfo)
+		}
+	}
+
+	// Stat should be the same as Open+Stat, even for symlinks.
+	info2, err := fs.Stat(t.fsys, path)
+	if err != nil {
+		t.errorf("%s: fs.Stat: %v", path, err)
+		return
+	}
+	finfo2 := formatInfo(info2)
+	if finfo2 != finfo {
+		t.errorf("%s: fs.Stat(...) = %s\n\twant %s", path, finfo2, finfo)
+	}
+
+	if fsys, ok := t.fsys.(fs.StatFS); ok {
+		info2, err := fsys.Stat(path)
+		if err != nil {
+			t.errorf("%s: fsys.Stat: %v", path, err)
+			return
+		}
+		finfo2 := formatInfo(info2)
+		if finfo2 != finfo {
+			t.errorf("%s: fsys.Stat(...) = %s\n\twant %s", path, finfo2, finfo)
+		}
+	}
+}
+
+// checkDirList checks that two directory lists contain the same files and file info.
+// The order of the lists need not match.
+func (t *fsTester) checkDirList(dir, desc string, list1, list2 []fs.DirEntry) {
+	old := make(map[string]fs.DirEntry)
+	checkMode := func(entry fs.DirEntry) {
+		if entry.IsDir() != (entry.Type()&fs.ModeDir != 0) {
+			if entry.IsDir() {
+				t.errorf("%s: ReadDir returned %s with IsDir() = true, Type() & ModeDir = 0", dir, entry.Name())
+			} else {
+				t.errorf("%s: ReadDir returned %s with IsDir() = false, Type() & ModeDir = ModeDir", dir, entry.Name())
+			}
+		}
+	}
+
+	for _, entry1 := range list1 {
+		old[entry1.Name()] = entry1
+		checkMode(entry1)
+	}
+
+	var diffs []string
+	for _, entry2 := range list2 {
+		entry1 := old[entry2.Name()]
+		if entry1 == nil {
+			checkMode(entry2)
+			diffs = append(diffs, "+ "+formatEntry(entry2))
+			continue
+		}
+		if formatEntry(entry1) != formatEntry(entry2) {
+			diffs = append(diffs, "- "+formatEntry(entry1), "+ "+formatEntry(entry2))
+		}
+		delete(old, entry2.Name())
+	}
+	for _, entry1 := range old {
+		diffs = append(diffs, "- "+formatEntry(entry1))
+	}
+
+	if len(diffs) == 0 {
+		return
+	}
+
+	sort.Slice(diffs, func(i, j int) bool {
+		fi := strings.Fields(diffs[i])
+		fj := strings.Fields(diffs[j])
+		// sort by name (i < j) and then +/- (j < i, because + < -)
+		return fi[1]+" "+fj[0] < fj[1]+" "+fi[0]
+	})
+
+	t.errorf("%s: diff %s:\n\t%s", dir, desc, strings.Join(diffs, "\n\t"))
+}
+
+// checkFile checks that basic file reading works correctly.
+func (t *fsTester) checkFile(file string) {
+	t.files = append(t.files, file)
+
+	// Read entire file.
+	f, err := t.fsys.Open(file)
+	if err != nil {
+		t.errorf("%s: Open: %v", file, err)
+		return
+	}
+
+	data, err := ioutil.ReadAll(f)
+	if err != nil {
+		f.Close()
+		t.errorf("%s: Open+ReadAll: %v", file, err)
+		return
+	}
+
+	if err := f.Close(); err != nil {
+		t.errorf("%s: Close: %v", file, err)
+	}
+
+	// Check that closing twice doesn't crash.
+	// The return value doesn't matter.
+	f.Close()
+
+	// Check that ReadFile works if present.
+	if fsys, ok := t.fsys.(fs.ReadFileFS); ok {
+		data2, err := fsys.ReadFile(file)
+		if err != nil {
+			t.errorf("%s: fsys.ReadFile: %v", file, err)
+			return
+		}
+		t.checkFileRead(file, "ReadAll vs fsys.ReadFile", data, data2)
+
+		// Modify the data and check it again. Modifying the
+		// returned byte slice should not affect the next call.
+		for i := range data2 {
+			data2[i]++
+		}
+		data2, err = fsys.ReadFile(file)
+		if err != nil {
+			t.errorf("%s: second call to fsys.ReadFile: %v", file, err)
+			return
+		}
+		t.checkFileRead(file, "Readall vs second fsys.ReadFile", data, data2)
+
+		t.checkBadPath(file, "ReadFile",
+			func(name string) error { _, err := fsys.ReadFile(name); return err })
+	}
+
+	// Check that fs.ReadFile works with t.fsys.
+	data2, err := fs.ReadFile(t.fsys, file)
+	if err != nil {
+		t.errorf("%s: fs.ReadFile: %v", file, err)
+		return
+	}
+	t.checkFileRead(file, "ReadAll vs fs.ReadFile", data, data2)
+
+	// Use iotest.TestReader to check small reads, Seek, ReadAt.
+	f, err = t.fsys.Open(file)
+	if err != nil {
+		t.errorf("%s: second Open: %v", file, err)
+		return
+	}
+	defer f.Close()
+	if err := iotest.TestReader(f, data); err != nil {
+		t.errorf("%s: failed TestReader:\n\t%s", file, strings.ReplaceAll(err.Error(), "\n", "\n\t"))
+	}
+}
+
+func (t *fsTester) checkFileRead(file, desc string, data1, data2 []byte) {
+	if string(data1) != string(data2) {
+		t.errorf("%s: %s: different data returned\n\t%q\n\t%q", file, desc, data1, data2)
+		return
+	}
+}
+
+// checkBadPath checks that various invalid forms of file's name cannot be opened using t.fsys.Open.
+func (t *fsTester) checkOpen(file string) {
+	t.checkBadPath(file, "Open", func(file string) error {
+		f, err := t.fsys.Open(file)
+		if err == nil {
+			f.Close()
+		}
+		return err
+	})
+}
+
+// checkBadPath checks that various invalid forms of file's name cannot be opened using open.
+func (t *fsTester) checkBadPath(file string, desc string, open func(string) error) {
+	bad := []string{
+		"/" + file,
+		file + "/.",
+	}
+	if file == "." {
+		bad = append(bad, "/")
+	}
+	if i := strings.Index(file, "/"); i >= 0 {
+		bad = append(bad,
+			file[:i]+"//"+file[i+1:],
+			file[:i]+"/./"+file[i+1:],
+			file[:i]+`\`+file[i+1:],
+			file[:i]+"/../"+file,
+		)
+	}
+	if i := strings.LastIndex(file, "/"); i >= 0 {
+		bad = append(bad,
+			file[:i]+"//"+file[i+1:],
+			file[:i]+"/./"+file[i+1:],
+			file[:i]+`\`+file[i+1:],
+			file+"/../"+file[i+1:],
+		)
+	}
+
+	for _, b := range bad {
+		if err := open(b); err == nil {
+			t.errorf("%s: %s(%s) succeeded, want error", file, desc, b)
+		}
+	}
+}
diff --git a/internal/backport/testing/iotest/example_test.go b/internal/backport/testing/iotest/example_test.go
new file mode 100644
index 0000000..ce3607d
--- /dev/null
+++ b/internal/backport/testing/iotest/example_test.go
@@ -0,0 +1,23 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package iotest_test
+
+import (
+	"errors"
+	"fmt"
+
+	"golang.org/x/website/internal/backport/testing/iotest"
+)
+
+func ExampleErrReader() {
+	// A reader that always returns a custom error.
+	r := iotest.ErrReader(errors.New("custom error"))
+	n, err := r.Read(nil)
+	fmt.Printf("n:   %d\nerr: %q\n", n, err)
+
+	// Output:
+	// n:   0
+	// err: "custom error"
+}
diff --git a/internal/backport/testing/iotest/logger.go b/internal/backport/testing/iotest/logger.go
new file mode 100644
index 0000000..99548dc
--- /dev/null
+++ b/internal/backport/testing/iotest/logger.go
@@ -0,0 +1,54 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package iotest
+
+import (
+	"io"
+	"log"
+)
+
+type writeLogger struct {
+	prefix string
+	w      io.Writer
+}
+
+func (l *writeLogger) Write(p []byte) (n int, err error) {
+	n, err = l.w.Write(p)
+	if err != nil {
+		log.Printf("%s %x: %v", l.prefix, p[0:n], err)
+	} else {
+		log.Printf("%s %x", l.prefix, p[0:n])
+	}
+	return
+}
+
+// NewWriteLogger returns a writer that behaves like w except
+// that it logs (using log.Printf) each write to standard error,
+// printing the prefix and the hexadecimal data written.
+func NewWriteLogger(prefix string, w io.Writer) io.Writer {
+	return &writeLogger{prefix, w}
+}
+
+type readLogger struct {
+	prefix string
+	r      io.Reader
+}
+
+func (l *readLogger) Read(p []byte) (n int, err error) {
+	n, err = l.r.Read(p)
+	if err != nil {
+		log.Printf("%s %x: %v", l.prefix, p[0:n], err)
+	} else {
+		log.Printf("%s %x", l.prefix, p[0:n])
+	}
+	return
+}
+
+// NewReadLogger returns a reader that behaves like r except
+// that it logs (using log.Printf) each read to standard error,
+// printing the prefix and the hexadecimal data read.
+func NewReadLogger(prefix string, r io.Reader) io.Reader {
+	return &readLogger{prefix, r}
+}
diff --git a/internal/backport/testing/iotest/logger_test.go b/internal/backport/testing/iotest/logger_test.go
new file mode 100644
index 0000000..fec4467
--- /dev/null
+++ b/internal/backport/testing/iotest/logger_test.go
@@ -0,0 +1,152 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package iotest
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"log"
+	"testing"
+)
+
+type errWriter struct {
+	err error
+}
+
+func (w errWriter) Write([]byte) (int, error) {
+	return 0, w.err
+}
+
+func TestWriteLogger(t *testing.T) {
+	olw := log.Writer()
+	olf := log.Flags()
+	olp := log.Prefix()
+
+	// Revert the original log settings before we exit.
+	defer func() {
+		log.SetFlags(olf)
+		log.SetPrefix(olp)
+		log.SetOutput(olw)
+	}()
+
+	lOut := new(bytes.Buffer)
+	log.SetPrefix("lw: ")
+	log.SetOutput(lOut)
+	log.SetFlags(0)
+
+	lw := new(bytes.Buffer)
+	wl := NewWriteLogger("write:", lw)
+	if _, err := wl.Write([]byte("Hello, World!")); err != nil {
+		t.Fatalf("Unexpectedly failed to write: %v", err)
+	}
+
+	if g, w := lw.String(), "Hello, World!"; g != w {
+		t.Errorf("WriteLogger mismatch\n\tgot:  %q\n\twant: %q", g, w)
+	}
+	wantLogWithHex := fmt.Sprintf("lw: write: %x\n", "Hello, World!")
+	if g, w := lOut.String(), wantLogWithHex; g != w {
+		t.Errorf("WriteLogger mismatch\n\tgot:  %q\n\twant: %q", g, w)
+	}
+}
+
+func TestWriteLogger_errorOnWrite(t *testing.T) {
+	olw := log.Writer()
+	olf := log.Flags()
+	olp := log.Prefix()
+
+	// Revert the original log settings before we exit.
+	defer func() {
+		log.SetFlags(olf)
+		log.SetPrefix(olp)
+		log.SetOutput(olw)
+	}()
+
+	lOut := new(bytes.Buffer)
+	log.SetPrefix("lw: ")
+	log.SetOutput(lOut)
+	log.SetFlags(0)
+
+	lw := errWriter{err: errors.New("Write Error!")}
+	wl := NewWriteLogger("write:", lw)
+	if _, err := wl.Write([]byte("Hello, World!")); err == nil {
+		t.Fatalf("Unexpectedly succeeded to write: %v", err)
+	}
+
+	wantLogWithHex := fmt.Sprintf("lw: write: %x: %v\n", "", "Write Error!")
+	if g, w := lOut.String(), wantLogWithHex; g != w {
+		t.Errorf("WriteLogger mismatch\n\tgot:  %q\n\twant: %q", g, w)
+	}
+}
+
+func TestReadLogger(t *testing.T) {
+	olw := log.Writer()
+	olf := log.Flags()
+	olp := log.Prefix()
+
+	// Revert the original log settings before we exit.
+	defer func() {
+		log.SetFlags(olf)
+		log.SetPrefix(olp)
+		log.SetOutput(olw)
+	}()
+
+	lOut := new(bytes.Buffer)
+	log.SetPrefix("lr: ")
+	log.SetOutput(lOut)
+	log.SetFlags(0)
+
+	data := []byte("Hello, World!")
+	p := make([]byte, len(data))
+	lr := bytes.NewReader(data)
+	rl := NewReadLogger("read:", lr)
+
+	n, err := rl.Read(p)
+	if err != nil {
+		t.Fatalf("Unexpectedly failed to read: %v", err)
+	}
+
+	if g, w := p[:n], data; !bytes.Equal(g, w) {
+		t.Errorf("ReadLogger mismatch\n\tgot:  %q\n\twant: %q", g, w)
+	}
+
+	wantLogWithHex := fmt.Sprintf("lr: read: %x\n", "Hello, World!")
+	if g, w := lOut.String(), wantLogWithHex; g != w {
+		t.Errorf("ReadLogger mismatch\n\tgot:  %q\n\twant: %q", g, w)
+	}
+}
+
+func TestReadLogger_errorOnRead(t *testing.T) {
+	olw := log.Writer()
+	olf := log.Flags()
+	olp := log.Prefix()
+
+	// Revert the original log settings before we exit.
+	defer func() {
+		log.SetFlags(olf)
+		log.SetPrefix(olp)
+		log.SetOutput(olw)
+	}()
+
+	lOut := new(bytes.Buffer)
+	log.SetPrefix("lr: ")
+	log.SetOutput(lOut)
+	log.SetFlags(0)
+
+	data := []byte("Hello, World!")
+	p := make([]byte, len(data))
+
+	lr := ErrReader(errors.New("io failure"))
+	rl := NewReadLogger("read", lr)
+	n, err := rl.Read(p)
+	if err == nil {
+		t.Fatalf("Unexpectedly succeeded to read: %v", err)
+	}
+
+	wantLogWithHex := fmt.Sprintf("lr: read %x: io failure\n", p[:n])
+	if g, w := lOut.String(), wantLogWithHex; g != w {
+		t.Errorf("ReadLogger mismatch\n\tgot:  %q\n\twant: %q", g, w)
+	}
+}
diff --git a/internal/backport/testing/iotest/reader.go b/internal/backport/testing/iotest/reader.go
new file mode 100644
index 0000000..33b782d
--- /dev/null
+++ b/internal/backport/testing/iotest/reader.go
@@ -0,0 +1,269 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package iotest implements Readers and Writers useful mainly for testing.
+package iotest
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+)
+
+// OneByteReader returns a Reader that implements
+// each non-empty Read by reading one byte from r.
+func OneByteReader(r io.Reader) io.Reader { return &oneByteReader{r} }
+
+type oneByteReader struct {
+	r io.Reader
+}
+
+func (r *oneByteReader) Read(p []byte) (int, error) {
+	if len(p) == 0 {
+		return 0, nil
+	}
+	return r.r.Read(p[0:1])
+}
+
+// HalfReader returns a Reader that implements Read
+// by reading half as many requested bytes from r.
+func HalfReader(r io.Reader) io.Reader { return &halfReader{r} }
+
+type halfReader struct {
+	r io.Reader
+}
+
+func (r *halfReader) Read(p []byte) (int, error) {
+	return r.r.Read(p[0 : (len(p)+1)/2])
+}
+
+// DataErrReader changes the way errors are handled by a Reader. Normally, a
+// Reader returns an error (typically EOF) from the first Read call after the
+// last piece of data is read. DataErrReader wraps a Reader and changes its
+// behavior so the final error is returned along with the final data, instead
+// of in the first call after the final data.
+func DataErrReader(r io.Reader) io.Reader { return &dataErrReader{r, nil, make([]byte, 1024)} }
+
+type dataErrReader struct {
+	r      io.Reader
+	unread []byte
+	data   []byte
+}
+
+func (r *dataErrReader) Read(p []byte) (n int, err error) {
+	// loop because first call needs two reads:
+	// one to get data and a second to look for an error.
+	for {
+		if len(r.unread) == 0 {
+			n1, err1 := r.r.Read(r.data)
+			r.unread = r.data[0:n1]
+			err = err1
+		}
+		if n > 0 || err != nil {
+			break
+		}
+		n = copy(p, r.unread)
+		r.unread = r.unread[n:]
+	}
+	return
+}
+
+// ErrTimeout is a fake timeout error.
+var ErrTimeout = errors.New("timeout")
+
+// TimeoutReader returns ErrTimeout on the second read
+// with no data. Subsequent calls to read succeed.
+func TimeoutReader(r io.Reader) io.Reader { return &timeoutReader{r, 0} }
+
+type timeoutReader struct {
+	r     io.Reader
+	count int
+}
+
+func (r *timeoutReader) Read(p []byte) (int, error) {
+	r.count++
+	if r.count == 2 {
+		return 0, ErrTimeout
+	}
+	return r.r.Read(p)
+}
+
+// ErrReader returns an io.Reader that returns 0, err from all Read calls.
+func ErrReader(err error) io.Reader {
+	return &errReader{err: err}
+}
+
+type errReader struct {
+	err error
+}
+
+func (r *errReader) Read(p []byte) (int, error) {
+	return 0, r.err
+}
+
+type smallByteReader struct {
+	r   io.Reader
+	off int
+	n   int
+}
+
+func (r *smallByteReader) Read(p []byte) (int, error) {
+	if len(p) == 0 {
+		return 0, nil
+	}
+	r.n = r.n%3 + 1
+	n := r.n
+	if n > len(p) {
+		n = len(p)
+	}
+	n, err := r.r.Read(p[0:n])
+	if err != nil && err != io.EOF {
+		err = fmt.Errorf("Read(%d bytes at offset %d): %v", n, r.off, err)
+	}
+	r.off += n
+	return n, err
+}
+
+// TestReader tests that reading from r returns the expected file content.
+// It does reads of different sizes, until EOF.
+// If r implements io.ReaderAt or io.Seeker, TestReader also checks
+// that those operations behave as they should.
+//
+// If TestReader finds any misbehaviors, it returns an error reporting them.
+// The error text may span multiple lines.
+func TestReader(r io.Reader, content []byte) error {
+	if len(content) > 0 {
+		n, err := r.Read(nil)
+		if n != 0 || err != nil {
+			return fmt.Errorf("Read(0) = %d, %v, want 0, nil", n, err)
+		}
+	}
+
+	data, err := ioutil.ReadAll(&smallByteReader{r: r})
+	if err != nil {
+		return err
+	}
+	if !bytes.Equal(data, content) {
+		return fmt.Errorf("ReadAll(small amounts) = %q\n\twant %q", data, content)
+	}
+	n, err := r.Read(make([]byte, 10))
+	if n != 0 || err != io.EOF {
+		return fmt.Errorf("Read(10) at EOF = %v, %v, want 0, EOF", n, err)
+	}
+
+	if r, ok := r.(io.ReadSeeker); ok {
+		// Seek(0, 1) should report the current file position (EOF).
+		if off, err := r.Seek(0, 1); off != int64(len(content)) || err != nil {
+			return fmt.Errorf("Seek(0, 1) from EOF = %d, %v, want %d, nil", off, err, len(content))
+		}
+
+		// Seek backward partway through file, in two steps.
+		// If middle == 0, len(content) == 0, can't use the -1 and +1 seeks.
+		middle := len(content) - len(content)/3
+		if middle > 0 {
+			if off, err := r.Seek(-1, 1); off != int64(len(content)-1) || err != nil {
+				return fmt.Errorf("Seek(-1, 1) from EOF = %d, %v, want %d, nil", -off, err, len(content)-1)
+			}
+			if off, err := r.Seek(int64(-len(content)/3), 1); off != int64(middle-1) || err != nil {
+				return fmt.Errorf("Seek(%d, 1) from %d = %d, %v, want %d, nil", -len(content)/3, len(content)-1, off, err, middle-1)
+			}
+			if off, err := r.Seek(+1, 1); off != int64(middle) || err != nil {
+				return fmt.Errorf("Seek(+1, 1) from %d = %d, %v, want %d, nil", middle-1, off, err, middle)
+			}
+		}
+
+		// Seek(0, 1) should report the current file position (middle).
+		if off, err := r.Seek(0, 1); off != int64(middle) || err != nil {
+			return fmt.Errorf("Seek(0, 1) from %d = %d, %v, want %d, nil", middle, off, err, middle)
+		}
+
+		// Reading forward should return the last part of the file.
+		data, err := ioutil.ReadAll(&smallByteReader{r: r})
+		if err != nil {
+			return fmt.Errorf("ReadAll from offset %d: %v", middle, err)
+		}
+		if !bytes.Equal(data, content[middle:]) {
+			return fmt.Errorf("ReadAll from offset %d = %q\n\twant %q", middle, data, content[middle:])
+		}
+
+		// Seek relative to end of file, but start elsewhere.
+		if off, err := r.Seek(int64(middle/2), 0); off != int64(middle/2) || err != nil {
+			return fmt.Errorf("Seek(%d, 0) from EOF = %d, %v, want %d, nil", middle/2, off, err, middle/2)
+		}
+		if off, err := r.Seek(int64(-len(content)/3), 2); off != int64(middle) || err != nil {
+			return fmt.Errorf("Seek(%d, 2) from %d = %d, %v, want %d, nil", -len(content)/3, middle/2, off, err, middle)
+		}
+
+		// Reading forward should return the last part of the file (again).
+		data, err = ioutil.ReadAll(&smallByteReader{r: r})
+		if err != nil {
+			return fmt.Errorf("ReadAll from offset %d: %v", middle, err)
+		}
+		if !bytes.Equal(data, content[middle:]) {
+			return fmt.Errorf("ReadAll from offset %d = %q\n\twant %q", middle, data, content[middle:])
+		}
+
+		// Absolute seek & read forward.
+		if off, err := r.Seek(int64(middle/2), 0); off != int64(middle/2) || err != nil {
+			return fmt.Errorf("Seek(%d, 0) from EOF = %d, %v, want %d, nil", middle/2, off, err, middle/2)
+		}
+		data, err = ioutil.ReadAll(r)
+		if err != nil {
+			return fmt.Errorf("ReadAll from offset %d: %v", middle/2, err)
+		}
+		if !bytes.Equal(data, content[middle/2:]) {
+			return fmt.Errorf("ReadAll from offset %d = %q\n\twant %q", middle/2, data, content[middle/2:])
+		}
+	}
+
+	if r, ok := r.(io.ReaderAt); ok {
+		data := make([]byte, len(content), len(content)+1)
+		for i := range data {
+			data[i] = 0xfe
+		}
+		n, err := r.ReadAt(data, 0)
+		if n != len(data) || err != nil && err != io.EOF {
+			return fmt.Errorf("ReadAt(%d, 0) = %v, %v, want %d, nil or EOF", len(data), n, err, len(data))
+		}
+		if !bytes.Equal(data, content) {
+			return fmt.Errorf("ReadAt(%d, 0) = %q\n\twant %q", len(data), data, content)
+		}
+
+		n, err = r.ReadAt(data[:1], int64(len(data)))
+		if n != 0 || err != io.EOF {
+			return fmt.Errorf("ReadAt(1, %d) = %v, %v, want 0, EOF", len(data), n, err)
+		}
+
+		for i := range data {
+			data[i] = 0xfe
+		}
+		n, err = r.ReadAt(data[:cap(data)], 0)
+		if n != len(data) || err != io.EOF {
+			return fmt.Errorf("ReadAt(%d, 0) = %v, %v, want %d, EOF", cap(data), n, err, len(data))
+		}
+		if !bytes.Equal(data, content) {
+			return fmt.Errorf("ReadAt(%d, 0) = %q\n\twant %q", len(data), data, content)
+		}
+
+		for i := range data {
+			data[i] = 0xfe
+		}
+		for i := range data {
+			n, err = r.ReadAt(data[i:i+1], int64(i))
+			if n != 1 || err != nil && (i != len(data)-1 || err != io.EOF) {
+				want := "nil"
+				if i == len(data)-1 {
+					want = "nil or EOF"
+				}
+				return fmt.Errorf("ReadAt(1, %d) = %v, %v, want 1, %s", i, n, err, want)
+			}
+			if data[i] != content[i] {
+				return fmt.Errorf("ReadAt(1, %d) = %q want %q", i, data[i:i+1], content[i:i+1])
+			}
+		}
+	}
+	return nil
+}
diff --git a/internal/backport/testing/iotest/reader_test.go b/internal/backport/testing/iotest/reader_test.go
new file mode 100644
index 0000000..f149e74
--- /dev/null
+++ b/internal/backport/testing/iotest/reader_test.go
@@ -0,0 +1,261 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package iotest
+
+import (
+	"bytes"
+	"errors"
+	"io"
+	"strings"
+	"testing"
+)
+
+func TestOneByteReader_nonEmptyReader(t *testing.T) {
+	msg := "Hello, World!"
+	buf := new(bytes.Buffer)
+	buf.WriteString(msg)
+
+	obr := OneByteReader(buf)
+	var b []byte
+	n, err := obr.Read(b)
+	if err != nil || n != 0 {
+		t.Errorf("Empty buffer read returned n=%d err=%v", n, err)
+	}
+
+	b = make([]byte, 3)
+	// Read from obr until EOF.
+	got := new(bytes.Buffer)
+	for i := 0; ; i++ {
+		n, err = obr.Read(b)
+		if err != nil {
+			break
+		}
+		if g, w := n, 1; g != w {
+			t.Errorf("Iteration #%d read %d bytes, want %d", i, g, w)
+		}
+		got.Write(b[:n])
+	}
+	if g, w := err, io.EOF; g != w {
+		t.Errorf("Unexpected error after reading all bytes\n\tGot:  %v\n\tWant: %v", g, w)
+	}
+	if g, w := got.String(), "Hello, World!"; g != w {
+		t.Errorf("Read mismatch\n\tGot:  %q\n\tWant: %q", g, w)
+	}
+}
+
+func TestOneByteReader_emptyReader(t *testing.T) {
+	r := new(bytes.Buffer)
+
+	obr := OneByteReader(r)
+	var b []byte
+	if n, err := obr.Read(b); err != nil || n != 0 {
+		t.Errorf("Empty buffer read returned n=%d err=%v", n, err)
+	}
+
+	b = make([]byte, 5)
+	n, err := obr.Read(b)
+	if g, w := err, io.EOF; g != w {
+		t.Errorf("Error mismatch\n\tGot:  %v\n\tWant: %v", g, w)
+	}
+	if g, w := n, 0; g != w {
+		t.Errorf("Unexpectedly read %d bytes, wanted %d", g, w)
+	}
+}
+
+func TestHalfReader_nonEmptyReader(t *testing.T) {
+	msg := "Hello, World!"
+	buf := new(bytes.Buffer)
+	buf.WriteString(msg)
+	// empty read buffer
+	hr := HalfReader(buf)
+	var b []byte
+	n, err := hr.Read(b)
+	if err != nil || n != 0 {
+		t.Errorf("Empty buffer read returned n=%d err=%v", n, err)
+	}
+	// non empty read buffer
+	b = make([]byte, 2)
+	got := new(bytes.Buffer)
+	for i := 0; ; i++ {
+		n, err = hr.Read(b)
+		if err != nil {
+			break
+		}
+		if g, w := n, 1; g != w {
+			t.Errorf("Iteration #%d read %d bytes, want %d", i, g, w)
+		}
+		got.Write(b[:n])
+	}
+	if g, w := err, io.EOF; g != w {
+		t.Errorf("Unexpected error after reading all bytes\n\tGot:  %v\n\tWant: %v", g, w)
+	}
+	if g, w := got.String(), "Hello, World!"; g != w {
+		t.Errorf("Read mismatch\n\tGot:  %q\n\tWant: %q", g, w)
+	}
+}
+
+func TestHalfReader_emptyReader(t *testing.T) {
+	r := new(bytes.Buffer)
+
+	hr := HalfReader(r)
+	var b []byte
+	if n, err := hr.Read(b); err != nil || n != 0 {
+		t.Errorf("Empty buffer read returned n=%d err=%v", n, err)
+	}
+
+	b = make([]byte, 5)
+	n, err := hr.Read(b)
+	if g, w := err, io.EOF; g != w {
+		t.Errorf("Error mismatch\n\tGot:  %v\n\tWant: %v", g, w)
+	}
+	if g, w := n, 0; g != w {
+		t.Errorf("Unexpectedly read %d bytes, wanted %d", g, w)
+	}
+}
+
+func TestTimeOutReader_nonEmptyReader(t *testing.T) {
+	msg := "Hello, World!"
+	buf := new(bytes.Buffer)
+	buf.WriteString(msg)
+	// empty read buffer
+	tor := TimeoutReader(buf)
+	var b []byte
+	n, err := tor.Read(b)
+	if err != nil || n != 0 {
+		t.Errorf("Empty buffer read returned n=%d err=%v", n, err)
+	}
+	// Second call should timeout
+	n, err = tor.Read(b)
+	if g, w := err, ErrTimeout; g != w {
+		t.Errorf("Error mismatch\n\tGot:  %v\n\tWant: %v", g, w)
+	}
+	if g, w := n, 0; g != w {
+		t.Errorf("Unexpectedly read %d bytes, wanted %d", g, w)
+	}
+	// non empty read buffer
+	tor2 := TimeoutReader(buf)
+	b = make([]byte, 3)
+	if n, err := tor2.Read(b); err != nil || n == 0 {
+		t.Errorf("Empty buffer read returned n=%d err=%v", n, err)
+	}
+	// Second call should timeout
+	n, err = tor2.Read(b)
+	if g, w := err, ErrTimeout; g != w {
+		t.Errorf("Error mismatch\n\tGot:  %v\n\tWant: %v", g, w)
+	}
+	if g, w := n, 0; g != w {
+		t.Errorf("Unexpectedly read %d bytes, wanted %d", g, w)
+	}
+}
+
+func TestTimeOutReader_emptyReader(t *testing.T) {
+	r := new(bytes.Buffer)
+	// empty read buffer
+	tor := TimeoutReader(r)
+	var b []byte
+	if n, err := tor.Read(b); err != nil || n != 0 {
+		t.Errorf("Empty buffer read returned n=%d err=%v", n, err)
+	}
+	// Second call should timeout
+	n, err := tor.Read(b)
+	if g, w := err, ErrTimeout; g != w {
+		t.Errorf("Error mismatch\n\tGot:  %v\n\tWant: %v", g, w)
+	}
+	if g, w := n, 0; g != w {
+		t.Errorf("Unexpectedly read %d bytes, wanted %d", g, w)
+	}
+	// non empty read buffer
+	tor2 := TimeoutReader(r)
+	b = make([]byte, 5)
+	if n, err := tor2.Read(b); err != io.EOF || n != 0 {
+		t.Errorf("Empty buffer read returned n=%d err=%v", n, err)
+	}
+	// Second call should timeout
+	n, err = tor2.Read(b)
+	if g, w := err, ErrTimeout; g != w {
+		t.Errorf("Error mismatch\n\tGot:  %v\n\tWant: %v", g, w)
+	}
+	if g, w := n, 0; g != w {
+		t.Errorf("Unexpectedly read %d bytes, wanted %d", g, w)
+	}
+}
+
+func TestDataErrReader_nonEmptyReader(t *testing.T) {
+	msg := "Hello, World!"
+	buf := new(bytes.Buffer)
+	buf.WriteString(msg)
+
+	der := DataErrReader(buf)
+
+	b := make([]byte, 3)
+	got := new(bytes.Buffer)
+	var n int
+	var err error
+	for {
+		n, err = der.Read(b)
+		got.Write(b[:n])
+		if err != nil {
+			break
+		}
+	}
+	if err != io.EOF || n == 0 {
+		t.Errorf("Last Read returned n=%d err=%v", n, err)
+	}
+	if g, w := got.String(), "Hello, World!"; g != w {
+		t.Errorf("Read mismatch\n\tGot:  %q\n\tWant: %q", g, w)
+	}
+}
+
+func TestDataErrReader_emptyReader(t *testing.T) {
+	r := new(bytes.Buffer)
+
+	der := DataErrReader(r)
+	var b []byte
+	if n, err := der.Read(b); err != io.EOF || n != 0 {
+		t.Errorf("Empty buffer read returned n=%d err=%v", n, err)
+	}
+
+	b = make([]byte, 5)
+	n, err := der.Read(b)
+	if g, w := err, io.EOF; g != w {
+		t.Errorf("Error mismatch\n\tGot:  %v\n\tWant: %v", g, w)
+	}
+	if g, w := n, 0; g != w {
+		t.Errorf("Unexpectedly read %d bytes, wanted %d", g, w)
+	}
+}
+
+func TestErrReader(t *testing.T) {
+	cases := []struct {
+		name string
+		err  error
+	}{
+		{"nil error", nil},
+		{"non-nil error", errors.New("io failure")},
+		{"io.EOF", io.EOF},
+	}
+
+	for _, tt := range cases {
+		tt := tt
+		t.Run(tt.name, func(t *testing.T) {
+			n, err := ErrReader(tt.err).Read(nil)
+			if err != tt.err {
+				t.Fatalf("Error mismatch\nGot:  %v\nWant: %v", err, tt.err)
+			}
+			if n != 0 {
+				t.Fatalf("Byte count mismatch: got %d want 0", n)
+			}
+		})
+	}
+}
+
+func TestStringsReader(t *testing.T) {
+	const msg = "Now is the time for all good gophers."
+
+	r := strings.NewReader(msg)
+	if err := TestReader(r, []byte(msg)); err != nil {
+		t.Fatal(err)
+	}
+}
diff --git a/internal/backport/testing/iotest/writer.go b/internal/backport/testing/iotest/writer.go
new file mode 100644
index 0000000..af61ab8
--- /dev/null
+++ b/internal/backport/testing/iotest/writer.go
@@ -0,0 +1,35 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package iotest
+
+import "io"
+
+// TruncateWriter returns a Writer that writes to w
+// but stops silently after n bytes.
+func TruncateWriter(w io.Writer, n int64) io.Writer {
+	return &truncateWriter{w, n}
+}
+
+type truncateWriter struct {
+	w io.Writer
+	n int64
+}
+
+func (t *truncateWriter) Write(p []byte) (n int, err error) {
+	if t.n <= 0 {
+		return len(p), nil
+	}
+	// real write
+	n = len(p)
+	if int64(n) > t.n {
+		n = int(t.n)
+	}
+	n, err = t.w.Write(p[0:n])
+	t.n -= int64(n)
+	if err == nil {
+		n = len(p)
+	}
+	return
+}
diff --git a/internal/backport/testing/iotest/writer_test.go b/internal/backport/testing/iotest/writer_test.go
new file mode 100644
index 0000000..5aaa77c
--- /dev/null
+++ b/internal/backport/testing/iotest/writer_test.go
@@ -0,0 +1,39 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package iotest
+
+import (
+	"bytes"
+	"testing"
+)
+
+var truncateWriterTests = []struct {
+	in    string
+	want  string
+	trunc int64
+	n     int
+}{
+	{"hello", "", -1, 5},
+	{"world", "", 0, 5},
+	{"abcde", "abc", 3, 5},
+	{"edcba", "edcba", 7, 5},
+}
+
+func TestTruncateWriter(t *testing.T) {
+	for _, tt := range truncateWriterTests {
+		buf := new(bytes.Buffer)
+		tw := TruncateWriter(buf, tt.trunc)
+		n, err := tw.Write([]byte(tt.in))
+		if err != nil {
+			t.Errorf("Unexpected error %v for\n\t%+v", err, tt)
+		}
+		if g, w := buf.String(), tt.want; g != w {
+			t.Errorf("got %q, expected %q", g, w)
+		}
+		if g, w := n, tt.n; g != w {
+			t.Errorf("read %d bytes, but expected to have read %d bytes for\n\t%+v", g, w, tt)
+		}
+	}
+}
diff --git a/internal/backport/text/template/doc.go b/internal/backport/text/template/doc.go
new file mode 100644
index 0000000..5332002
--- /dev/null
+++ b/internal/backport/text/template/doc.go
@@ -0,0 +1,463 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package template implements data-driven templates for generating textual output.
+
+To generate HTML output, see package html/template, which has the same interface
+as this package but automatically secures HTML output against certain attacks.
+
+Templates are executed by applying them to a data structure. Annotations in the
+template refer to elements of the data structure (typically a field of a struct
+or a key in a map) to control execution and derive values to be displayed.
+Execution of the template walks the structure and sets the cursor, represented
+by a period '.' and called "dot", to the value at the current location in the
+structure as execution proceeds.
+
+The input text for a template is UTF-8-encoded text in any format.
+"Actions"--data evaluations or control structures--are delimited by
+"{{" and "}}"; all text outside actions is copied to the output unchanged.
+Except for raw strings, actions may not span newlines, although comments can.
+
+Once parsed, a template may be executed safely in parallel, although if parallel
+executions share a Writer the output may be interleaved.
+
+Here is a trivial example that prints "17 items are made of wool".
+
+	type Inventory struct {
+		Material string
+		Count    uint
+	}
+	sweaters := Inventory{"wool", 17}
+	tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}")
+	if err != nil { panic(err) }
+	err = tmpl.Execute(os.Stdout, sweaters)
+	if err != nil { panic(err) }
+
+More intricate examples appear below.
+
+Text and spaces
+
+By default, all text between actions is copied verbatim when the template is
+executed. For example, the string " items are made of " in the example above
+appears on standard output when the program is run.
+
+However, to aid in formatting template source code, if an action's left
+delimiter (by default "{{") is followed immediately by a minus sign and white
+space, all trailing white space is trimmed from the immediately preceding text.
+Similarly, if the right delimiter ("}}") is preceded by white space and a minus
+sign, all leading white space is trimmed from the immediately following text.
+In these trim markers, the white space must be present:
+"{{- 3}}" is like "{{3}}" but trims the immediately preceding text, while
+"{{-3}}" parses as an action containing the number -3.
+
+For instance, when executing the template whose source is
+
+	"{{23 -}} < {{- 45}}"
+
+the generated output would be
+
+	"23<45"
+
+For this trimming, the definition of white space characters is the same as in Go:
+space, horizontal tab, carriage return, and newline.
+
+Actions
+
+Here is the list of actions. "Arguments" and "pipelines" are evaluations of
+data, defined in detail in the corresponding sections that follow.
+
+*/
+//	{{/* a comment */}}
+//	{{- /* a comment with white space trimmed from preceding and following text */ -}}
+//		A comment; discarded. May contain newlines.
+//		Comments do not nest and must start and end at the
+//		delimiters, as shown here.
+/*
+
+	{{pipeline}}
+		The default textual representation (the same as would be
+		printed by fmt.Print) of the value of the pipeline is copied
+		to the output.
+
+	{{if pipeline}} T1 {{end}}
+		If the value of the pipeline is empty, no output is generated;
+		otherwise, T1 is executed. The empty values are false, 0, any
+		nil pointer or interface value, and any array, slice, map, or
+		string of length zero.
+		Dot is unaffected.
+
+	{{if pipeline}} T1 {{else}} T0 {{end}}
+		If the value of the pipeline is empty, T0 is executed;
+		otherwise, T1 is executed. Dot is unaffected.
+
+	{{if pipeline}} T1 {{else if pipeline}} T0 {{end}}
+		To simplify the appearance of if-else chains, the else action
+		of an if may include another if directly; the effect is exactly
+		the same as writing
+			{{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}}
+
+	{{range pipeline}} T1 {{end}}
+		The value of the pipeline must be an array, slice, map, or channel.
+		If the value of the pipeline has length zero, nothing is output;
+		otherwise, dot is set to the successive elements of the array,
+		slice, or map and T1 is executed. If the value is a map and the
+		keys are of basic type with a defined order, the elements will be
+		visited in sorted key order.
+
+	{{range pipeline}} T1 {{else}} T0 {{end}}
+		The value of the pipeline must be an array, slice, map, or channel.
+		If the value of the pipeline has length zero, dot is unaffected and
+		T0 is executed; otherwise, dot is set to the successive elements
+		of the array, slice, or map and T1 is executed.
+
+	{{break}}
+		The innermost {{range pipeline}} loop is ended early, stopping the
+		current iteration and bypassing all remaining iterations.
+
+	{{continue}}
+		The current iteration of the innermost {{range pipeline}} loop is
+		stopped, and the loop starts the next iteration.
+
+	{{template "name"}}
+		The template with the specified name is executed with nil data.
+
+	{{template "name" pipeline}}
+		The template with the specified name is executed with dot set
+		to the value of the pipeline.
+
+	{{block "name" pipeline}} T1 {{end}}
+		A block is shorthand for defining a template
+			{{define "name"}} T1 {{end}}
+		and then executing it in place
+			{{template "name" pipeline}}
+		The typical use is to define a set of root templates that are
+		then customized by redefining the block templates within.
+
+	{{with pipeline}} T1 {{end}}
+		If the value of the pipeline is empty, no output is generated;
+		otherwise, dot is set to the value of the pipeline and T1 is
+		executed.
+
+	{{with pipeline}} T1 {{else}} T0 {{end}}
+		If the value of the pipeline is empty, dot is unaffected and T0
+		is executed; otherwise, dot is set to the value of the pipeline
+		and T1 is executed.
+
+Arguments
+
+An argument is a simple value, denoted by one of the following.
+
+	- A boolean, string, character, integer, floating-point, imaginary
+	  or complex constant in Go syntax. These behave like Go's untyped
+	  constants. Note that, as in Go, whether a large integer constant
+	  overflows when assigned or passed to a function can depend on whether
+	  the host machine's ints are 32 or 64 bits.
+	- The keyword nil, representing an untyped Go nil.
+	- The character '.' (period):
+		.
+	  The result is the value of dot.
+	- A variable name, which is a (possibly empty) alphanumeric string
+	  preceded by a dollar sign, such as
+		$piOver2
+	  or
+		$
+	  The result is the value of the variable.
+	  Variables are described below.
+	- The name of a field of the data, which must be a struct, preceded
+	  by a period, such as
+		.Field
+	  The result is the value of the field. Field invocations may be
+	  chained:
+	    .Field1.Field2
+	  Fields can also be evaluated on variables, including chaining:
+	    $x.Field1.Field2
+	- The name of a key of the data, which must be a map, preceded
+	  by a period, such as
+		.Key
+	  The result is the map element value indexed by the key.
+	  Key invocations may be chained and combined with fields to any
+	  depth:
+	    .Field1.Key1.Field2.Key2
+	  Although the key must be an alphanumeric identifier, unlike with
+	  field names they do not need to start with an upper case letter.
+	  Keys can also be evaluated on variables, including chaining:
+	    $x.key1.key2
+	- The name of a niladic method of the data, preceded by a period,
+	  such as
+		.Method
+	  The result is the value of invoking the method with dot as the
+	  receiver, dot.Method(). Such a method must have one return value (of
+	  any type) or two return values, the second of which is an error.
+	  If it has two and the returned error is non-nil, execution terminates
+	  and an error is returned to the caller as the value of Execute.
+	  Method invocations may be chained and combined with fields and keys
+	  to any depth:
+	    .Field1.Key1.Method1.Field2.Key2.Method2
+	  Methods can also be evaluated on variables, including chaining:
+	    $x.Method1.Field
+	- The name of a niladic function, such as
+		fun
+	  The result is the value of invoking the function, fun(). The return
+	  types and values behave as in methods. Functions and function
+	  names are described below.
+	- A parenthesized instance of one the above, for grouping. The result
+	  may be accessed by a field or map key invocation.
+		print (.F1 arg1) (.F2 arg2)
+		(.StructValuedMethod "arg").Field
+
+Arguments may evaluate to any type; if they are pointers the implementation
+automatically indirects to the base type when required.
+If an evaluation yields a function value, such as a function-valued
+field of a struct, the function is not invoked automatically, but it
+can be used as a truth value for an if action and the like. To invoke
+it, use the call function, defined below.
+
+Pipelines
+
+A pipeline is a possibly chained sequence of "commands". A command is a simple
+value (argument) or a function or method call, possibly with multiple arguments:
+
+	Argument
+		The result is the value of evaluating the argument.
+	.Method [Argument...]
+		The method can be alone or the last element of a chain but,
+		unlike methods in the middle of a chain, it can take arguments.
+		The result is the value of calling the method with the
+		arguments:
+			dot.Method(Argument1, etc.)
+	functionName [Argument...]
+		The result is the value of calling the function associated
+		with the name:
+			function(Argument1, etc.)
+		Functions and function names are described below.
+
+A pipeline may be "chained" by separating a sequence of commands with pipeline
+characters '|'. In a chained pipeline, the result of each command is
+passed as the last argument of the following command. The output of the final
+command in the pipeline is the value of the pipeline.
+
+The output of a command will be either one value or two values, the second of
+which has type error. If that second value is present and evaluates to
+non-nil, execution terminates and the error is returned to the caller of
+Execute.
+
+Variables
+
+A pipeline inside an action may initialize a variable to capture the result.
+The initialization has syntax
+
+	$variable := pipeline
+
+where $variable is the name of the variable. An action that declares a
+variable produces no output.
+
+Variables previously declared can also be assigned, using the syntax
+
+	$variable = pipeline
+
+If a "range" action initializes a variable, the variable is set to the
+successive elements of the iteration. Also, a "range" may declare two
+variables, separated by a comma:
+
+	range $index, $element := pipeline
+
+in which case $index and $element are set to the successive values of the
+array/slice index or map key and element, respectively. Note that if there is
+only one variable, it is assigned the element; this is opposite to the
+convention in Go range clauses.
+
+A variable's scope extends to the "end" action of the control structure ("if",
+"with", or "range") in which it is declared, or to the end of the template if
+there is no such control structure. A template invocation does not inherit
+variables from the point of its invocation.
+
+When execution begins, $ is set to the data argument passed to Execute, that is,
+to the starting value of dot.
+
+Examples
+
+Here are some example one-line templates demonstrating pipelines and variables.
+All produce the quoted word "output":
+
+	{{"\"output\""}}
+		A string constant.
+	{{`"output"`}}
+		A raw string constant.
+	{{printf "%q" "output"}}
+		A function call.
+	{{"output" | printf "%q"}}
+		A function call whose final argument comes from the previous
+		command.
+	{{printf "%q" (print "out" "put")}}
+		A parenthesized argument.
+	{{"put" | printf "%s%s" "out" | printf "%q"}}
+		A more elaborate call.
+	{{"output" | printf "%s" | printf "%q"}}
+		A longer chain.
+	{{with "output"}}{{printf "%q" .}}{{end}}
+		A with action using dot.
+	{{with $x := "output" | printf "%q"}}{{$x}}{{end}}
+		A with action that creates and uses a variable.
+	{{with $x := "output"}}{{printf "%q" $x}}{{end}}
+		A with action that uses the variable in another action.
+	{{with $x := "output"}}{{$x | printf "%q"}}{{end}}
+		The same, but pipelined.
+
+Functions
+
+During execution functions are found in two function maps: first in the
+template, then in the global function map. By default, no functions are defined
+in the template but the Funcs method can be used to add them.
+
+Predefined global functions are named as follows.
+
+	and
+		Returns the boolean AND of its arguments by returning the
+		first empty argument or the last argument, that is,
+		"and x y" behaves as "if x then y else x." Only those
+		arguments necessary to determine the answer are evaluated.
+	call
+		Returns the result of calling the first argument, which
+		must be a function, with the remaining arguments as parameters.
+		Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where
+		Y is a func-valued field, map entry, or the like.
+		The first argument must be the result of an evaluation
+		that yields a value of function type (as distinct from
+		a predefined function such as print). The function must
+		return either one or two result values, the second of which
+		is of type error. If the arguments don't match the function
+		or the returned error value is non-nil, execution stops.
+	html
+		Returns the escaped HTML equivalent of the textual
+		representation of its arguments. This function is unavailable
+		in html/template, with a few exceptions.
+	index
+		Returns the result of indexing its first argument by the
+		following arguments. Thus "index x 1 2 3" is, in Go syntax,
+		x[1][2][3]. Each indexed item must be a map, slice, or array.
+	slice
+		slice returns the result of slicing its first argument by the
+		remaining arguments. Thus "slice x 1 2" is, in Go syntax, x[1:2],
+		while "slice x" is x[:], "slice x 1" is x[1:], and "slice x 1 2 3"
+		is x[1:2:3]. The first argument must be a string, slice, or array.
+	js
+		Returns the escaped JavaScript equivalent of the textual
+		representation of its arguments.
+	len
+		Returns the integer length of its argument.
+	not
+		Returns the boolean negation of its single argument.
+	or
+		Returns the boolean OR of its arguments by returning the
+		first non-empty argument or the last argument, that is,
+		"or x y" behaves as "if x then x else y". Only those
+		arguments necessary to determine the answer are evaluated.
+	print
+		An alias for fmt.Sprint
+	printf
+		An alias for fmt.Sprintf
+	println
+		An alias for fmt.Sprintln
+	urlquery
+		Returns the escaped value of the textual representation of
+		its arguments in a form suitable for embedding in a URL query.
+		This function is unavailable in html/template, with a few
+		exceptions.
+
+The boolean functions take any zero value to be false and a non-zero
+value to be true.
+
+There is also a set of binary comparison operators defined as
+functions:
+
+	eq
+		Returns the boolean truth of arg1 == arg2
+	ne
+		Returns the boolean truth of arg1 != arg2
+	lt
+		Returns the boolean truth of arg1 < arg2
+	le
+		Returns the boolean truth of arg1 <= arg2
+	gt
+		Returns the boolean truth of arg1 > arg2
+	ge
+		Returns the boolean truth of arg1 >= arg2
+
+For simpler multi-way equality tests, eq (only) accepts two or more
+arguments and compares the second and subsequent to the first,
+returning in effect
+
+	arg1==arg2 || arg1==arg3 || arg1==arg4 ...
+
+(Unlike with || in Go, however, eq is a function call and all the
+arguments will be evaluated.)
+
+The comparison functions work on any values whose type Go defines as
+comparable. For basic types such as integers, the rules are relaxed:
+size and exact type are ignored, so any integer value, signed or unsigned,
+may be compared with any other integer value. (The arithmetic value is compared,
+not the bit pattern, so all negative integers are less than all unsigned integers.)
+However, as usual, one may not compare an int with a float32 and so on.
+
+Associated templates
+
+Each template is named by a string specified when it is created. Also, each
+template is associated with zero or more other templates that it may invoke by
+name; such associations are transitive and form a name space of templates.
+
+A template may use a template invocation to instantiate another associated
+template; see the explanation of the "template" action above. The name must be
+that of a template associated with the template that contains the invocation.
+
+Nested template definitions
+
+When parsing a template, another template may be defined and associated with the
+template being parsed. Template definitions must appear at the top level of the
+template, much like global variables in a Go program.
+
+The syntax of such definitions is to surround each template declaration with a
+"define" and "end" action.
+
+The define action names the template being created by providing a string
+constant. Here is a simple example:
+
+	`{{define "T1"}}ONE{{end}}
+	{{define "T2"}}TWO{{end}}
+	{{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}}
+	{{template "T3"}}`
+
+This defines two templates, T1 and T2, and a third T3 that invokes the other two
+when it is executed. Finally it invokes T3. If executed this template will
+produce the text
+
+	ONE TWO
+
+By construction, a template may reside in only one association. If it's
+necessary to have a template addressable from multiple associations, the
+template definition must be parsed multiple times to create distinct *Template
+values, or must be copied with the Clone or AddParseTree method.
+
+Parse may be called multiple times to assemble the various associated templates;
+see the ParseFiles and ParseGlob functions and methods for simple ways to parse
+related templates stored in files.
+
+A template may be executed directly or through ExecuteTemplate, which executes
+an associated template identified by name. To invoke our example above, we
+might write,
+
+	err := tmpl.Execute(os.Stdout, "no data needed")
+	if err != nil {
+		log.Fatalf("execution failed: %s", err)
+	}
+
+or to invoke a particular template explicitly by name,
+
+	err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed")
+	if err != nil {
+		log.Fatalf("execution failed: %s", err)
+	}
+
+*/
+package template
diff --git a/internal/backport/text/template/example_test.go b/internal/backport/text/template/example_test.go
new file mode 100644
index 0000000..b07611e
--- /dev/null
+++ b/internal/backport/text/template/example_test.go
@@ -0,0 +1,111 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template_test
+
+import (
+	"log"
+	"os"
+	"strings"
+
+	"golang.org/x/website/internal/backport/text/template"
+)
+
+func ExampleTemplate() {
+	// Define a template.
+	const letter = `
+Dear {{.Name}},
+{{if .Attended}}
+It was a pleasure to see you at the wedding.
+{{- else}}
+It is a shame you couldn't make it to the wedding.
+{{- end}}
+{{with .Gift -}}
+Thank you for the lovely {{.}}.
+{{end}}
+Best wishes,
+Josie
+`
+
+	// Prepare some data to insert into the template.
+	type Recipient struct {
+		Name, Gift string
+		Attended   bool
+	}
+	var recipients = []Recipient{
+		{"Aunt Mildred", "bone china tea set", true},
+		{"Uncle John", "moleskin pants", false},
+		{"Cousin Rodney", "", false},
+	}
+
+	// Create a new template and parse the letter into it.
+	t := template.Must(template.New("letter").Parse(letter))
+
+	// Execute the template for each recipient.
+	for _, r := range recipients {
+		err := t.Execute(os.Stdout, r)
+		if err != nil {
+			log.Println("executing template:", err)
+		}
+	}
+
+	// Output:
+	// Dear Aunt Mildred,
+	//
+	// It was a pleasure to see you at the wedding.
+	// Thank you for the lovely bone china tea set.
+	//
+	// Best wishes,
+	// Josie
+	//
+	// Dear Uncle John,
+	//
+	// It is a shame you couldn't make it to the wedding.
+	// Thank you for the lovely moleskin pants.
+	//
+	// Best wishes,
+	// Josie
+	//
+	// Dear Cousin Rodney,
+	//
+	// It is a shame you couldn't make it to the wedding.
+	//
+	// Best wishes,
+	// Josie
+}
+
+// The following example is duplicated in html/template; keep them in sync.
+
+func ExampleTemplate_block() {
+	const (
+		master  = `Names:{{block "list" .}}{{"\n"}}{{range .}}{{println "-" .}}{{end}}{{end}}`
+		overlay = `{{define "list"}} {{join . ", "}}{{end}} `
+	)
+	var (
+		funcs     = template.FuncMap{"join": strings.Join}
+		guardians = []string{"Gamora", "Groot", "Nebula", "Rocket", "Star-Lord"}
+	)
+	masterTmpl, err := template.New("master").Funcs(funcs).Parse(master)
+	if err != nil {
+		log.Fatal(err)
+	}
+	overlayTmpl, err := template.Must(masterTmpl.Clone()).Parse(overlay)
+	if err != nil {
+		log.Fatal(err)
+	}
+	if err := masterTmpl.Execute(os.Stdout, guardians); err != nil {
+		log.Fatal(err)
+	}
+	if err := overlayTmpl.Execute(os.Stdout, guardians); err != nil {
+		log.Fatal(err)
+	}
+	// Output:
+	// Names:
+	// - Gamora
+	// - Groot
+	// - Nebula
+	// - Rocket
+	// - Star-Lord
+	// Names: Gamora, Groot, Nebula, Rocket, Star-Lord
+}
diff --git a/internal/backport/text/template/examplefiles_test.go b/internal/backport/text/template/examplefiles_test.go
new file mode 100644
index 0000000..9b03198
--- /dev/null
+++ b/internal/backport/text/template/examplefiles_test.go
@@ -0,0 +1,183 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template_test
+
+import (
+	"io"
+	"io/ioutil"
+	"log"
+	"os"
+	"path/filepath"
+
+	"golang.org/x/website/internal/backport/text/template"
+)
+
+// templateFile defines the contents of a template to be stored in a file, for testing.
+type templateFile struct {
+	name     string
+	contents string
+}
+
+func createTestDir(files []templateFile) string {
+	dir, err := ioutil.TempDir("", "template")
+	if err != nil {
+		log.Fatal(err)
+	}
+	for _, file := range files {
+		f, err := os.Create(filepath.Join(dir, file.name))
+		if err != nil {
+			log.Fatal(err)
+		}
+		defer f.Close()
+		_, err = io.WriteString(f, file.contents)
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+	return dir
+}
+
+// Here we demonstrate loading a set of templates from a directory.
+func ExampleTemplate_glob() {
+	// Here we create a temporary directory and populate it with our sample
+	// template definition files; usually the template files would already
+	// exist in some location known to the program.
+	dir := createTestDir([]templateFile{
+		// T0.tmpl is a plain template file that just invokes T1.
+		{"T0.tmpl", `T0 invokes T1: ({{template "T1"}})`},
+		// T1.tmpl defines a template, T1 that invokes T2.
+		{"T1.tmpl", `{{define "T1"}}T1 invokes T2: ({{template "T2"}}){{end}}`},
+		// T2.tmpl defines a template T2.
+		{"T2.tmpl", `{{define "T2"}}This is T2{{end}}`},
+	})
+	// Clean up after the test; another quirk of running as an example.
+	defer os.RemoveAll(dir)
+
+	// pattern is the glob pattern used to find all the template files.
+	pattern := filepath.Join(dir, "*.tmpl")
+
+	// Here starts the example proper.
+	// T0.tmpl is the first name matched, so it becomes the starting template,
+	// the value returned by ParseGlob.
+	tmpl := template.Must(template.ParseGlob(pattern))
+
+	err := tmpl.Execute(os.Stdout, nil)
+	if err != nil {
+		log.Fatalf("template execution: %s", err)
+	}
+	// Output:
+	// T0 invokes T1: (T1 invokes T2: (This is T2))
+}
+
+// This example demonstrates one way to share some templates
+// and use them in different contexts. In this variant we add multiple driver
+// templates by hand to an existing bundle of templates.
+func ExampleTemplate_helpers() {
+	// Here we create a temporary directory and populate it with our sample
+	// template definition files; usually the template files would already
+	// exist in some location known to the program.
+	dir := createTestDir([]templateFile{
+		// T1.tmpl defines a template, T1 that invokes T2.
+		{"T1.tmpl", `{{define "T1"}}T1 invokes T2: ({{template "T2"}}){{end}}`},
+		// T2.tmpl defines a template T2.
+		{"T2.tmpl", `{{define "T2"}}This is T2{{end}}`},
+	})
+	// Clean up after the test; another quirk of running as an example.
+	defer os.RemoveAll(dir)
+
+	// pattern is the glob pattern used to find all the template files.
+	pattern := filepath.Join(dir, "*.tmpl")
+
+	// Here starts the example proper.
+	// Load the helpers.
+	templates := template.Must(template.ParseGlob(pattern))
+	// Add one driver template to the bunch; we do this with an explicit template definition.
+	_, err := templates.Parse("{{define `driver1`}}Driver 1 calls T1: ({{template `T1`}})\n{{end}}")
+	if err != nil {
+		log.Fatal("parsing driver1: ", err)
+	}
+	// Add another driver template.
+	_, err = templates.Parse("{{define `driver2`}}Driver 2 calls T2: ({{template `T2`}})\n{{end}}")
+	if err != nil {
+		log.Fatal("parsing driver2: ", err)
+	}
+	// We load all the templates before execution. This package does not require
+	// that behavior but html/template's escaping does, so it's a good habit.
+	err = templates.ExecuteTemplate(os.Stdout, "driver1", nil)
+	if err != nil {
+		log.Fatalf("driver1 execution: %s", err)
+	}
+	err = templates.ExecuteTemplate(os.Stdout, "driver2", nil)
+	if err != nil {
+		log.Fatalf("driver2 execution: %s", err)
+	}
+	// Output:
+	// Driver 1 calls T1: (T1 invokes T2: (This is T2))
+	// Driver 2 calls T2: (This is T2)
+}
+
+// This example demonstrates how to use one group of driver
+// templates with distinct sets of helper templates.
+func ExampleTemplate_share() {
+	// Here we create a temporary directory and populate it with our sample
+	// template definition files; usually the template files would already
+	// exist in some location known to the program.
+	dir := createTestDir([]templateFile{
+		// T0.tmpl is a plain template file that just invokes T1.
+		{"T0.tmpl", "T0 ({{.}} version) invokes T1: ({{template `T1`}})\n"},
+		// T1.tmpl defines a template, T1 that invokes T2. Note T2 is not defined
+		{"T1.tmpl", `{{define "T1"}}T1 invokes T2: ({{template "T2"}}){{end}}`},
+	})
+	// Clean up after the test; another quirk of running as an example.
+	defer os.RemoveAll(dir)
+
+	// pattern is the glob pattern used to find all the template files.
+	pattern := filepath.Join(dir, "*.tmpl")
+
+	// Here starts the example proper.
+	// Load the drivers.
+	drivers := template.Must(template.ParseGlob(pattern))
+
+	// We must define an implementation of the T2 template. First we clone
+	// the drivers, then add a definition of T2 to the template name space.
+
+	// 1. Clone the helper set to create a new name space from which to run them.
+	first, err := drivers.Clone()
+	if err != nil {
+		log.Fatal("cloning helpers: ", err)
+	}
+	// 2. Define T2, version A, and parse it.
+	_, err = first.Parse("{{define `T2`}}T2, version A{{end}}")
+	if err != nil {
+		log.Fatal("parsing T2: ", err)
+	}
+
+	// Now repeat the whole thing, using a different version of T2.
+	// 1. Clone the drivers.
+	second, err := drivers.Clone()
+	if err != nil {
+		log.Fatal("cloning drivers: ", err)
+	}
+	// 2. Define T2, version B, and parse it.
+	_, err = second.Parse("{{define `T2`}}T2, version B{{end}}")
+	if err != nil {
+		log.Fatal("parsing T2: ", err)
+	}
+
+	// Execute the templates in the reverse order to verify the
+	// first is unaffected by the second.
+	err = second.ExecuteTemplate(os.Stdout, "T0.tmpl", "second")
+	if err != nil {
+		log.Fatalf("second execution: %s", err)
+	}
+	err = first.ExecuteTemplate(os.Stdout, "T0.tmpl", "first")
+	if err != nil {
+		log.Fatalf("first: execution: %s", err)
+	}
+
+	// Output:
+	// T0 (second version) invokes T1: (T1 invokes T2: (T2, version B))
+	// T0 (first version) invokes T1: (T1 invokes T2: (T2, version A))
+}
diff --git a/internal/backport/text/template/examplefunc_test.go b/internal/backport/text/template/examplefunc_test.go
new file mode 100644
index 0000000..ed558ad
--- /dev/null
+++ b/internal/backport/text/template/examplefunc_test.go
@@ -0,0 +1,55 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template_test
+
+import (
+	"log"
+	"os"
+	"strings"
+
+	"golang.org/x/website/internal/backport/text/template"
+)
+
+// This example demonstrates a custom function to process template text.
+// It installs the strings.Title function and uses it to
+// Make Title Text Look Good In Our Template's Output.
+func ExampleTemplate_func() {
+	// First we create a FuncMap with which to register the function.
+	funcMap := template.FuncMap{
+		// The name "title" is what the function will be called in the template text.
+		"title": strings.Title,
+	}
+
+	// A simple template definition to test our function.
+	// We print the input text several ways:
+	// - the original
+	// - title-cased
+	// - title-cased and then printed with %q
+	// - printed with %q and then title-cased.
+	const templateText = `
+Input: {{printf "%q" .}}
+Output 0: {{title .}}
+Output 1: {{title . | printf "%q"}}
+Output 2: {{printf "%q" . | title}}
+`
+
+	// Create a template, add the function map, and parse the text.
+	tmpl, err := template.New("titleTest").Funcs(funcMap).Parse(templateText)
+	if err != nil {
+		log.Fatalf("parsing: %s", err)
+	}
+
+	// Run the template to verify the output.
+	err = tmpl.Execute(os.Stdout, "the go programming language")
+	if err != nil {
+		log.Fatalf("execution: %s", err)
+	}
+
+	// Output:
+	// Input: "the go programming language"
+	// Output 0: The Go Programming Language
+	// Output 1: "The Go Programming Language"
+	// Output 2: "The Go Programming Language"
+}
diff --git a/internal/backport/text/template/exec.go b/internal/backport/text/template/exec.go
new file mode 100644
index 0000000..4ff29bb
--- /dev/null
+++ b/internal/backport/text/template/exec.go
@@ -0,0 +1,1028 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"runtime"
+	"strings"
+
+	"golang.org/x/website/internal/backport/fmtsort"
+	"golang.org/x/website/internal/backport/text/template/parse"
+)
+
+// maxExecDepth specifies the maximum stack depth of templates within
+// templates. This limit is only practically reached by accidentally
+// recursive template invocations. This limit allows us to return
+// an error instead of triggering a stack overflow.
+var maxExecDepth = initMaxExecDepth()
+
+func initMaxExecDepth() int {
+	if runtime.GOARCH == "wasm" {
+		return 1000
+	}
+	return 100000
+}
+
+// state represents the state of an execution. It's not part of the
+// template so that multiple executions of the same template
+// can execute in parallel.
+type state struct {
+	tmpl  *Template
+	wr    io.Writer
+	node  parse.Node // current node, for errors
+	vars  []variable // push-down stack of variable values.
+	depth int        // the height of the stack of executing templates.
+}
+
+// variable holds the dynamic value of a variable such as $, $x etc.
+type variable struct {
+	name  string
+	value reflect.Value
+}
+
+// push pushes a new variable on the stack.
+func (s *state) push(name string, value reflect.Value) {
+	s.vars = append(s.vars, variable{name, value})
+}
+
+// mark returns the length of the variable stack.
+func (s *state) mark() int {
+	return len(s.vars)
+}
+
+// pop pops the variable stack up to the mark.
+func (s *state) pop(mark int) {
+	s.vars = s.vars[0:mark]
+}
+
+// setVar overwrites the last declared variable with the given name.
+// Used by variable assignments.
+func (s *state) setVar(name string, value reflect.Value) {
+	for i := s.mark() - 1; i >= 0; i-- {
+		if s.vars[i].name == name {
+			s.vars[i].value = value
+			return
+		}
+	}
+	s.errorf("undefined variable: %s", name)
+}
+
+// setTopVar overwrites the top-nth variable on the stack. Used by range iterations.
+func (s *state) setTopVar(n int, value reflect.Value) {
+	s.vars[len(s.vars)-n].value = value
+}
+
+// varValue returns the value of the named variable.
+func (s *state) varValue(name string) reflect.Value {
+	for i := s.mark() - 1; i >= 0; i-- {
+		if s.vars[i].name == name {
+			return s.vars[i].value
+		}
+	}
+	s.errorf("undefined variable: %s", name)
+	return zero
+}
+
+var zero reflect.Value
+
+type missingValType struct{}
+
+var missingVal = reflect.ValueOf(missingValType{})
+
+// at marks the state to be on node n, for error reporting.
+func (s *state) at(node parse.Node) {
+	s.node = node
+}
+
+// doublePercent returns the string with %'s replaced by %%, if necessary,
+// so it can be used safely inside a Printf format string.
+func doublePercent(str string) string {
+	return strings.ReplaceAll(str, "%", "%%")
+}
+
+// TODO: It would be nice if ExecError was more broken down, but
+// the way ErrorContext embeds the template name makes the
+// processing too clumsy.
+
+// ExecError is the custom error type returned when Execute has an
+// error evaluating its template. (If a write error occurs, the actual
+// error is returned; it will not be of type ExecError.)
+type ExecError struct {
+	Name string // Name of template.
+	Err  error  // Pre-formatted error.
+}
+
+func (e ExecError) Error() string {
+	return e.Err.Error()
+}
+
+func (e ExecError) Unwrap() error {
+	return e.Err
+}
+
+// errorf records an ExecError and terminates processing.
+func (s *state) errorf(format string, args ...interface{}) {
+	name := doublePercent(s.tmpl.Name())
+	if s.node == nil {
+		format = fmt.Sprintf("template: %s: %s", name, format)
+	} else {
+		location, context := s.tmpl.ErrorContext(s.node)
+		format = fmt.Sprintf("template: %s: executing %q at <%s>: %s", location, name, doublePercent(context), format)
+	}
+	panic(ExecError{
+		Name: s.tmpl.Name(),
+		Err:  fmt.Errorf(format, args...),
+	})
+}
+
+// writeError is the wrapper type used internally when Execute has an
+// error writing to its output. We strip the wrapper in errRecover.
+// Note that this is not an implementation of error, so it cannot escape
+// from the package as an error value.
+type writeError struct {
+	Err error // Original error.
+}
+
+func (s *state) writeError(err error) {
+	panic(writeError{
+		Err: err,
+	})
+}
+
+// errRecover is the handler that turns panics into returns from the top
+// level of Parse.
+func errRecover(errp *error) {
+	e := recover()
+	if e != nil {
+		switch err := e.(type) {
+		case runtime.Error:
+			panic(e)
+		case writeError:
+			*errp = err.Err // Strip the wrapper.
+		case ExecError:
+			*errp = err // Keep the wrapper.
+		default:
+			panic(e)
+		}
+	}
+}
+
+// ExecuteTemplate applies the template associated with t that has the given name
+// to the specified data object and writes the output to wr.
+// If an error occurs executing the template or writing its output,
+// execution stops, but partial results may already have been written to
+// the output writer.
+// A template may be executed safely in parallel, although if parallel
+// executions share a Writer the output may be interleaved.
+func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error {
+	tmpl := t.Lookup(name)
+	if tmpl == nil {
+		return fmt.Errorf("template: no template %q associated with template %q", name, t.name)
+	}
+	return tmpl.Execute(wr, data)
+}
+
+// Execute applies a parsed template to the specified data object,
+// and writes the output to wr.
+// If an error occurs executing the template or writing its output,
+// execution stops, but partial results may already have been written to
+// the output writer.
+// A template may be executed safely in parallel, although if parallel
+// executions share a Writer the output may be interleaved.
+//
+// If data is a reflect.Value, the template applies to the concrete
+// value that the reflect.Value holds, as in fmt.Print.
+func (t *Template) Execute(wr io.Writer, data interface{}) error {
+	return t.execute(wr, data)
+}
+
+func (t *Template) execute(wr io.Writer, data interface{}) (err error) {
+	defer errRecover(&err)
+	value, ok := data.(reflect.Value)
+	if !ok {
+		value = reflect.ValueOf(data)
+	}
+	state := &state{
+		tmpl: t,
+		wr:   wr,
+		vars: []variable{{"$", value}},
+	}
+	if t.Tree == nil || t.Root == nil {
+		state.errorf("%q is an incomplete or empty template", t.Name())
+	}
+	state.walk(value, t.Root)
+	return
+}
+
+// DefinedTemplates returns a string listing the defined templates,
+// prefixed by the string "; defined templates are: ". If there are none,
+// it returns the empty string. For generating an error message here
+// and in html/template.
+func (t *Template) DefinedTemplates() string {
+	if t.common == nil {
+		return ""
+	}
+	var b strings.Builder
+	t.muTmpl.RLock()
+	defer t.muTmpl.RUnlock()
+	for name, tmpl := range t.tmpl {
+		if tmpl.Tree == nil || tmpl.Root == nil {
+			continue
+		}
+		if b.Len() == 0 {
+			b.WriteString("; defined templates are: ")
+		} else {
+			b.WriteString(", ")
+		}
+		fmt.Fprintf(&b, "%q", name)
+	}
+	return b.String()
+}
+
+// Sentinel errors for use with panic to signal early exits from range loops.
+var (
+	walkBreak    = errors.New("break")
+	walkContinue = errors.New("continue")
+)
+
+// Walk functions step through the major pieces of the template structure,
+// generating output as they go.
+func (s *state) walk(dot reflect.Value, node parse.Node) {
+	s.at(node)
+	switch node := node.(type) {
+	case *parse.ActionNode:
+		// Do not pop variables so they persist until next end.
+		// Also, if the action declares variables, don't print the result.
+		val := s.evalPipeline(dot, node.Pipe)
+		if len(node.Pipe.Decl) == 0 {
+			s.printValue(node, val)
+		}
+	case *parse.BreakNode:
+		panic(walkBreak)
+	case *parse.CommentNode:
+	case *parse.ContinueNode:
+		panic(walkContinue)
+	case *parse.IfNode:
+		s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList)
+	case *parse.ListNode:
+		for _, node := range node.Nodes {
+			s.walk(dot, node)
+		}
+	case *parse.RangeNode:
+		s.walkRange(dot, node)
+	case *parse.TemplateNode:
+		s.walkTemplate(dot, node)
+	case *parse.TextNode:
+		if _, err := s.wr.Write(node.Text); err != nil {
+			s.writeError(err)
+		}
+	case *parse.WithNode:
+		s.walkIfOrWith(parse.NodeWith, dot, node.Pipe, node.List, node.ElseList)
+	default:
+		s.errorf("unknown node: %s", node)
+	}
+}
+
+// walkIfOrWith walks an 'if' or 'with' node. The two control structures
+// are identical in behavior except that 'with' sets dot.
+func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) {
+	defer s.pop(s.mark())
+	val := s.evalPipeline(dot, pipe)
+	truth, ok := isTrue(indirectInterface(val))
+	if !ok {
+		s.errorf("if/with can't use %v", val)
+	}
+	if truth {
+		if typ == parse.NodeWith {
+			s.walk(val, list)
+		} else {
+			s.walk(dot, list)
+		}
+	} else if elseList != nil {
+		s.walk(dot, elseList)
+	}
+}
+
+// IsTrue reports whether the value is 'true', in the sense of not the zero of its type,
+// and whether the value has a meaningful truth value. This is the definition of
+// truth used by if and other such actions.
+func IsTrue(val interface{}) (truth, ok bool) {
+	return isTrue(reflect.ValueOf(val))
+}
+
+func isTrue(val reflect.Value) (truth, ok bool) {
+	if !val.IsValid() {
+		// Something like var x interface{}, never set. It's a form of nil.
+		return false, true
+	}
+	switch val.Kind() {
+	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+		truth = val.Len() > 0
+	case reflect.Bool:
+		truth = val.Bool()
+	case reflect.Complex64, reflect.Complex128:
+		truth = val.Complex() != 0
+	case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface:
+		truth = !val.IsNil()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		truth = val.Int() != 0
+	case reflect.Float32, reflect.Float64:
+		truth = val.Float() != 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		truth = val.Uint() != 0
+	case reflect.Struct:
+		truth = true // Struct values are always true.
+	default:
+		return
+	}
+	return truth, true
+}
+
+func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) {
+	s.at(r)
+	defer func() {
+		if r := recover(); r != nil && r != walkBreak {
+			panic(r)
+		}
+	}()
+	defer s.pop(s.mark())
+	val, _ := indirect(s.evalPipeline(dot, r.Pipe))
+	// mark top of stack before any variables in the body are pushed.
+	mark := s.mark()
+	oneIteration := func(index, elem reflect.Value) {
+		// Set top var (lexically the second if there are two) to the element.
+		if len(r.Pipe.Decl) > 0 {
+			s.setTopVar(1, elem)
+		}
+		// Set next var (lexically the first if there are two) to the index.
+		if len(r.Pipe.Decl) > 1 {
+			s.setTopVar(2, index)
+		}
+		defer s.pop(mark)
+		defer func() {
+			// Consume panic(walkContinue)
+			if r := recover(); r != nil && r != walkContinue {
+				panic(r)
+			}
+		}()
+		s.walk(elem, r.List)
+	}
+	switch val.Kind() {
+	case reflect.Array, reflect.Slice:
+		if val.Len() == 0 {
+			break
+		}
+		for i := 0; i < val.Len(); i++ {
+			oneIteration(reflect.ValueOf(i), val.Index(i))
+		}
+		return
+	case reflect.Map:
+		if val.Len() == 0 {
+			break
+		}
+		om := fmtsort.Sort(val)
+		for i, key := range om.Key {
+			oneIteration(key, om.Value[i])
+		}
+		return
+	case reflect.Chan:
+		if val.IsNil() {
+			break
+		}
+		if val.Type().ChanDir() == reflect.SendDir {
+			s.errorf("range over send-only channel %v", val)
+			break
+		}
+		i := 0
+		for ; ; i++ {
+			elem, ok := val.Recv()
+			if !ok {
+				break
+			}
+			oneIteration(reflect.ValueOf(i), elem)
+		}
+		if i == 0 {
+			break
+		}
+		return
+	case reflect.Invalid:
+		break // An invalid value is likely a nil map, etc. and acts like an empty map.
+	default:
+		s.errorf("range can't iterate over %v", val)
+	}
+	if r.ElseList != nil {
+		s.walk(dot, r.ElseList)
+	}
+}
+
+func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) {
+	s.at(t)
+	tmpl := s.tmpl.Lookup(t.Name)
+	if tmpl == nil {
+		s.errorf("template %q not defined", t.Name)
+	}
+	if s.depth == maxExecDepth {
+		s.errorf("exceeded maximum template depth (%v)", maxExecDepth)
+	}
+	// Variables declared by the pipeline persist.
+	dot = s.evalPipeline(dot, t.Pipe)
+	newState := *s
+	newState.depth++
+	newState.tmpl = tmpl
+	// No dynamic scoping: template invocations inherit no variables.
+	newState.vars = []variable{{"$", dot}}
+	newState.walk(dot, tmpl.Root)
+}
+
+// Eval functions evaluate pipelines, commands, and their elements and extract
+// values from the data structure by examining fields, calling methods, and so on.
+// The printing of those values happens only through walk functions.
+
+// evalPipeline returns the value acquired by evaluating a pipeline. If the
+// pipeline has a variable declaration, the variable will be pushed on the
+// stack. Callers should therefore pop the stack after they are finished
+// executing commands depending on the pipeline value.
+func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) {
+	if pipe == nil {
+		return
+	}
+	s.at(pipe)
+	value = missingVal
+	for _, cmd := range pipe.Cmds {
+		value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg.
+		// If the object has type interface{}, dig down one level to the thing inside.
+		if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 {
+			value = reflect.ValueOf(value.Interface()) // lovely!
+		}
+	}
+	for _, variable := range pipe.Decl {
+		if pipe.IsAssign {
+			s.setVar(variable.Ident[0], value)
+		} else {
+			s.push(variable.Ident[0], value)
+		}
+	}
+	return value
+}
+
+func (s *state) notAFunction(args []parse.Node, final reflect.Value) {
+	if len(args) > 1 || final != missingVal {
+		s.errorf("can't give argument to non-function %s", args[0])
+	}
+}
+
+func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value {
+	firstWord := cmd.Args[0]
+	switch n := firstWord.(type) {
+	case *parse.FieldNode:
+		return s.evalFieldNode(dot, n, cmd.Args, final)
+	case *parse.ChainNode:
+		return s.evalChainNode(dot, n, cmd.Args, final)
+	case *parse.IdentifierNode:
+		// Must be a function.
+		return s.evalFunction(dot, n, cmd, cmd.Args, final)
+	case *parse.PipeNode:
+		// Parenthesized pipeline. The arguments are all inside the pipeline; final must be absent.
+		s.notAFunction(cmd.Args, final)
+		return s.evalPipeline(dot, n)
+	case *parse.VariableNode:
+		return s.evalVariableNode(dot, n, cmd.Args, final)
+	}
+	s.at(firstWord)
+	s.notAFunction(cmd.Args, final)
+	switch word := firstWord.(type) {
+	case *parse.BoolNode:
+		return reflect.ValueOf(word.True)
+	case *parse.DotNode:
+		return dot
+	case *parse.NilNode:
+		s.errorf("nil is not a command")
+	case *parse.NumberNode:
+		return s.idealConstant(word)
+	case *parse.StringNode:
+		return reflect.ValueOf(word.Text)
+	}
+	s.errorf("can't evaluate command %q", firstWord)
+	panic("not reached")
+}
+
+// idealConstant is called to return the value of a number in a context where
+// we don't know the type. In that case, the syntax of the number tells us
+// its type, and we use Go rules to resolve. Note there is no such thing as
+// a uint ideal constant in this situation - the value must be of int type.
+func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value {
+	// These are ideal constants but we don't know the type
+	// and we have no context.  (If it was a method argument,
+	// we'd know what we need.) The syntax guides us to some extent.
+	s.at(constant)
+	switch {
+	case constant.IsComplex:
+		return reflect.ValueOf(constant.Complex128) // incontrovertible.
+
+	case constant.IsFloat &&
+		!isHexInt(constant.Text) && !isRuneInt(constant.Text) &&
+		strings.ContainsAny(constant.Text, ".eEpP"):
+		return reflect.ValueOf(constant.Float64)
+
+	case constant.IsInt:
+		n := int(constant.Int64)
+		if int64(n) != constant.Int64 {
+			s.errorf("%s overflows int", constant.Text)
+		}
+		return reflect.ValueOf(n)
+
+	case constant.IsUint:
+		s.errorf("%s overflows int", constant.Text)
+	}
+	return zero
+}
+
+func isRuneInt(s string) bool {
+	return len(s) > 0 && s[0] == '\''
+}
+
+func isHexInt(s string) bool {
+	return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X') && !strings.ContainsAny(s, "pP")
+}
+
+func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value {
+	s.at(field)
+	return s.evalFieldChain(dot, dot, field, field.Ident, args, final)
+}
+
+func (s *state) evalChainNode(dot reflect.Value, chain *parse.ChainNode, args []parse.Node, final reflect.Value) reflect.Value {
+	s.at(chain)
+	if len(chain.Field) == 0 {
+		s.errorf("internal error: no fields in evalChainNode")
+	}
+	if chain.Node.Type() == parse.NodeNil {
+		s.errorf("indirection through explicit nil in %s", chain)
+	}
+	// (pipe).Field1.Field2 has pipe as .Node, fields as .Field. Eval the pipeline, then the fields.
+	pipe := s.evalArg(dot, nil, chain.Node)
+	return s.evalFieldChain(dot, pipe, chain, chain.Field, args, final)
+}
+
+func (s *state) evalVariableNode(dot reflect.Value, variable *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value {
+	// $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields.
+	s.at(variable)
+	value := s.varValue(variable.Ident[0])
+	if len(variable.Ident) == 1 {
+		s.notAFunction(args, final)
+		return value
+	}
+	return s.evalFieldChain(dot, value, variable, variable.Ident[1:], args, final)
+}
+
+// evalFieldChain evaluates .X.Y.Z possibly followed by arguments.
+// dot is the environment in which to evaluate arguments, while
+// receiver is the value being walked along the chain.
+func (s *state) evalFieldChain(dot, receiver reflect.Value, node parse.Node, ident []string, args []parse.Node, final reflect.Value) reflect.Value {
+	n := len(ident)
+	for i := 0; i < n-1; i++ {
+		receiver = s.evalField(dot, ident[i], node, nil, missingVal, receiver)
+	}
+	// Now if it's a method, it gets the arguments.
+	return s.evalField(dot, ident[n-1], node, args, final, receiver)
+}
+
+func (s *state) evalFunction(dot reflect.Value, node *parse.IdentifierNode, cmd parse.Node, args []parse.Node, final reflect.Value) reflect.Value {
+	s.at(node)
+	name := node.Ident
+	function, isBuiltin, ok := findFunction(name, s.tmpl)
+	if !ok {
+		s.errorf("%q is not a defined function", name)
+	}
+	return s.evalCall(dot, function, isBuiltin, cmd, name, args, final)
+}
+
+// evalField evaluates an expression like (.Field) or (.Field arg1 arg2).
+// The 'final' argument represents the return value from the preceding
+// value of the pipeline, if any.
+func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value {
+	if !receiver.IsValid() {
+		if s.tmpl.option.missingKey == mapError { // Treat invalid value as missing map key.
+			s.errorf("nil data; no entry for key %q", fieldName)
+		}
+		return zero
+	}
+	typ := receiver.Type()
+	receiver, isNil := indirect(receiver)
+	if receiver.Kind() == reflect.Interface && isNil {
+		// Calling a method on a nil interface can't work. The
+		// MethodByName method call below would panic.
+		s.errorf("nil pointer evaluating %s.%s", typ, fieldName)
+		return zero
+	}
+
+	// Unless it's an interface, need to get to a value of type *T to guarantee
+	// we see all methods of T and *T.
+	ptr := receiver
+	if ptr.Kind() != reflect.Interface && ptr.Kind() != reflect.Ptr && ptr.CanAddr() {
+		ptr = ptr.Addr()
+	}
+	if method := ptr.MethodByName(fieldName); method.IsValid() {
+		return s.evalCall(dot, method, false, node, fieldName, args, final)
+	}
+	hasArgs := len(args) > 1 || final != missingVal
+	// It's not a method; must be a field of a struct or an element of a map.
+	switch receiver.Kind() {
+	case reflect.Struct:
+		tField, ok := receiver.Type().FieldByName(fieldName)
+		if ok {
+			field := receiver.FieldByIndex(tField.Index)
+			if tField.PkgPath != "" {
+				s.errorf("%s is an unexported field of struct type %s", fieldName, typ)
+			}
+			// If it's a function, we must call it.
+			if hasArgs {
+				s.errorf("%s has arguments but cannot be invoked as function", fieldName)
+			}
+			return field
+		}
+	case reflect.Map:
+		// If it's a map, attempt to use the field name as a key.
+		nameVal := reflect.ValueOf(fieldName)
+		if nameVal.Type().AssignableTo(receiver.Type().Key()) {
+			if hasArgs {
+				s.errorf("%s is not a method but has arguments", fieldName)
+			}
+			result := receiver.MapIndex(nameVal)
+			if !result.IsValid() {
+				switch s.tmpl.option.missingKey {
+				case mapInvalid:
+					// Just use the invalid value.
+				case mapZeroValue:
+					result = reflect.Zero(receiver.Type().Elem())
+				case mapError:
+					s.errorf("map has no entry for key %q", fieldName)
+				}
+			}
+			return result
+		}
+	case reflect.Ptr:
+		etyp := receiver.Type().Elem()
+		if etyp.Kind() == reflect.Struct {
+			if _, ok := etyp.FieldByName(fieldName); !ok {
+				// If there's no such field, say "can't evaluate"
+				// instead of "nil pointer evaluating".
+				break
+			}
+		}
+		if isNil {
+			s.errorf("nil pointer evaluating %s.%s", typ, fieldName)
+		}
+	}
+	s.errorf("can't evaluate field %s in type %s", fieldName, typ)
+	panic("not reached")
+}
+
+var (
+	errorType        = reflect.TypeOf((*error)(nil)).Elem()
+	fmtStringerType  = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
+	reflectValueType = reflect.TypeOf((*reflect.Value)(nil)).Elem()
+)
+
+// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so
+// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0]
+// as the function itself.
+func (s *state) evalCall(dot, fun reflect.Value, isBuiltin bool, node parse.Node, name string, args []parse.Node, final reflect.Value) reflect.Value {
+	if args != nil {
+		args = args[1:] // Zeroth arg is function name/node; not passed to function.
+	}
+	typ := fun.Type()
+	numIn := len(args)
+	if final != missingVal {
+		numIn++
+	}
+	numFixed := len(args)
+	if typ.IsVariadic() {
+		numFixed = typ.NumIn() - 1 // last arg is the variadic one.
+		if numIn < numFixed {
+			s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args))
+		}
+	} else if numIn != typ.NumIn() {
+		s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), numIn)
+	}
+	if !goodFunc(typ) {
+		// TODO: This could still be a confusing error; maybe goodFunc should provide info.
+		s.errorf("can't call method/function %q with %d results", name, typ.NumOut())
+	}
+
+	// Special case for builtin and/or, which short-circuit.
+	if isBuiltin && (name == "and" || name == "or") {
+		argType := typ.In(0)
+		var v reflect.Value
+		for _, arg := range args {
+			v = s.evalArg(dot, argType, arg).Interface().(reflect.Value)
+			if truth(v) == (name == "or") {
+				break
+			}
+		}
+		return v
+	}
+
+	// Build the arg list.
+	argv := make([]reflect.Value, numIn)
+	// Args must be evaluated. Fixed args first.
+	i := 0
+	for ; i < numFixed && i < len(args); i++ {
+		argv[i] = s.evalArg(dot, typ.In(i), args[i])
+	}
+	// Now the ... args.
+	if typ.IsVariadic() {
+		argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice.
+		for ; i < len(args); i++ {
+			argv[i] = s.evalArg(dot, argType, args[i])
+		}
+	}
+	// Add final value if necessary.
+	if final != missingVal {
+		t := typ.In(typ.NumIn() - 1)
+		if typ.IsVariadic() {
+			if numIn-1 < numFixed {
+				// The added final argument corresponds to a fixed parameter of the function.
+				// Validate against the type of the actual parameter.
+				t = typ.In(numIn - 1)
+			} else {
+				// The added final argument corresponds to the variadic part.
+				// Validate against the type of the elements of the variadic slice.
+				t = t.Elem()
+			}
+		}
+		argv[i] = s.validateType(final, t)
+	}
+	v, err := safeCall(fun, argv)
+	// If we have an error that is not nil, stop execution and return that
+	// error to the caller.
+	if err != nil {
+		s.at(node)
+		s.errorf("error calling %s: %w", name, err)
+	}
+	if v.Type() == reflectValueType {
+		v = v.Interface().(reflect.Value)
+	}
+	return v
+}
+
+// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero.
+func canBeNil(typ reflect.Type) bool {
+	switch typ.Kind() {
+	case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+		return true
+	case reflect.Struct:
+		return typ == reflectValueType
+	}
+	return false
+}
+
+// validateType guarantees that the value is valid and assignable to the type.
+func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value {
+	if !value.IsValid() {
+		if typ == nil {
+			// An untyped nil interface{}. Accept as a proper nil value.
+			return reflect.ValueOf(nil)
+		}
+		if canBeNil(typ) {
+			// Like above, but use the zero value of the non-nil type.
+			return reflect.Zero(typ)
+		}
+		s.errorf("invalid value; expected %s", typ)
+	}
+	if typ == reflectValueType && value.Type() != typ {
+		return reflect.ValueOf(value)
+	}
+	if typ != nil && !value.Type().AssignableTo(typ) {
+		if value.Kind() == reflect.Interface && !value.IsNil() {
+			value = value.Elem()
+			if value.Type().AssignableTo(typ) {
+				return value
+			}
+			// fallthrough
+		}
+		// Does one dereference or indirection work? We could do more, as we
+		// do with method receivers, but that gets messy and method receivers
+		// are much more constrained, so it makes more sense there than here.
+		// Besides, one is almost always all you need.
+		switch {
+		case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ):
+			value = value.Elem()
+			if !value.IsValid() {
+				s.errorf("dereference of nil pointer of type %s", typ)
+			}
+		case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr():
+			value = value.Addr()
+		default:
+			s.errorf("wrong type for value; expected %s; got %s", typ, value.Type())
+		}
+	}
+	return value
+}
+
+func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value {
+	s.at(n)
+	switch arg := n.(type) {
+	case *parse.DotNode:
+		return s.validateType(dot, typ)
+	case *parse.NilNode:
+		if canBeNil(typ) {
+			return reflect.Zero(typ)
+		}
+		s.errorf("cannot assign nil to %s", typ)
+	case *parse.FieldNode:
+		return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, missingVal), typ)
+	case *parse.VariableNode:
+		return s.validateType(s.evalVariableNode(dot, arg, nil, missingVal), typ)
+	case *parse.PipeNode:
+		return s.validateType(s.evalPipeline(dot, arg), typ)
+	case *parse.IdentifierNode:
+		return s.validateType(s.evalFunction(dot, arg, arg, nil, missingVal), typ)
+	case *parse.ChainNode:
+		return s.validateType(s.evalChainNode(dot, arg, nil, missingVal), typ)
+	}
+	switch typ.Kind() {
+	case reflect.Bool:
+		return s.evalBool(typ, n)
+	case reflect.Complex64, reflect.Complex128:
+		return s.evalComplex(typ, n)
+	case reflect.Float32, reflect.Float64:
+		return s.evalFloat(typ, n)
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return s.evalInteger(typ, n)
+	case reflect.Interface:
+		if typ.NumMethod() == 0 {
+			return s.evalEmptyInterface(dot, n)
+		}
+	case reflect.Struct:
+		if typ == reflectValueType {
+			return reflect.ValueOf(s.evalEmptyInterface(dot, n))
+		}
+	case reflect.String:
+		return s.evalString(typ, n)
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return s.evalUnsignedInteger(typ, n)
+	}
+	s.errorf("can't handle %s for arg of type %s", n, typ)
+	panic("not reached")
+}
+
+func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value {
+	s.at(n)
+	if n, ok := n.(*parse.BoolNode); ok {
+		value := reflect.New(typ).Elem()
+		value.SetBool(n.True)
+		return value
+	}
+	s.errorf("expected bool; found %s", n)
+	panic("not reached")
+}
+
+func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value {
+	s.at(n)
+	if n, ok := n.(*parse.StringNode); ok {
+		value := reflect.New(typ).Elem()
+		value.SetString(n.Text)
+		return value
+	}
+	s.errorf("expected string; found %s", n)
+	panic("not reached")
+}
+
+func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value {
+	s.at(n)
+	if n, ok := n.(*parse.NumberNode); ok && n.IsInt {
+		value := reflect.New(typ).Elem()
+		value.SetInt(n.Int64)
+		return value
+	}
+	s.errorf("expected integer; found %s", n)
+	panic("not reached")
+}
+
+func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value {
+	s.at(n)
+	if n, ok := n.(*parse.NumberNode); ok && n.IsUint {
+		value := reflect.New(typ).Elem()
+		value.SetUint(n.Uint64)
+		return value
+	}
+	s.errorf("expected unsigned integer; found %s", n)
+	panic("not reached")
+}
+
+func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value {
+	s.at(n)
+	if n, ok := n.(*parse.NumberNode); ok && n.IsFloat {
+		value := reflect.New(typ).Elem()
+		value.SetFloat(n.Float64)
+		return value
+	}
+	s.errorf("expected float; found %s", n)
+	panic("not reached")
+}
+
+func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value {
+	if n, ok := n.(*parse.NumberNode); ok && n.IsComplex {
+		value := reflect.New(typ).Elem()
+		value.SetComplex(n.Complex128)
+		return value
+	}
+	s.errorf("expected complex; found %s", n)
+	panic("not reached")
+}
+
+func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value {
+	s.at(n)
+	switch n := n.(type) {
+	case *parse.BoolNode:
+		return reflect.ValueOf(n.True)
+	case *parse.DotNode:
+		return dot
+	case *parse.FieldNode:
+		return s.evalFieldNode(dot, n, nil, missingVal)
+	case *parse.IdentifierNode:
+		return s.evalFunction(dot, n, n, nil, missingVal)
+	case *parse.NilNode:
+		// NilNode is handled in evalArg, the only place that calls here.
+		s.errorf("evalEmptyInterface: nil (can't happen)")
+	case *parse.NumberNode:
+		return s.idealConstant(n)
+	case *parse.StringNode:
+		return reflect.ValueOf(n.Text)
+	case *parse.VariableNode:
+		return s.evalVariableNode(dot, n, nil, missingVal)
+	case *parse.PipeNode:
+		return s.evalPipeline(dot, n)
+	}
+	s.errorf("can't handle assignment of %s to empty interface argument", n)
+	panic("not reached")
+}
+
+// indirect returns the item at the end of indirection, and a bool to indicate
+// if it's nil. If the returned bool is true, the returned value's kind will be
+// either a pointer or interface.
+func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
+	for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() {
+		if v.IsNil() {
+			return v, true
+		}
+	}
+	return v, false
+}
+
+// indirectInterface returns the concrete value in an interface value,
+// or else the zero reflect.Value.
+// That is, if v represents the interface value x, the result is the same as reflect.ValueOf(x):
+// the fact that x was an interface value is forgotten.
+func indirectInterface(v reflect.Value) reflect.Value {
+	if v.Kind() != reflect.Interface {
+		return v
+	}
+	if v.IsNil() {
+		return reflect.Value{}
+	}
+	return v.Elem()
+}
+
+// printValue writes the textual representation of the value to the output of
+// the template.
+func (s *state) printValue(n parse.Node, v reflect.Value) {
+	s.at(n)
+	iface, ok := printableValue(v)
+	if !ok {
+		s.errorf("can't print %s of type %s", n, v.Type())
+	}
+	_, err := fmt.Fprint(s.wr, iface)
+	if err != nil {
+		s.writeError(err)
+	}
+}
+
+// printableValue returns the, possibly indirected, interface value inside v that
+// is best for a call to formatted printer.
+func printableValue(v reflect.Value) (interface{}, bool) {
+	if v.Kind() == reflect.Ptr {
+		v, _ = indirect(v) // fmt.Fprint handles nil.
+	}
+	if !v.IsValid() {
+		return "<no value>", true
+	}
+
+	if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) {
+		if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) {
+			v = v.Addr()
+		} else {
+			switch v.Kind() {
+			case reflect.Chan, reflect.Func:
+				return nil, false
+			}
+		}
+	}
+	return v.Interface(), true
+}
diff --git a/internal/backport/text/template/exec_test.go b/internal/backport/text/template/exec_test.go
new file mode 100644
index 0000000..dbf2631
--- /dev/null
+++ b/internal/backport/text/template/exec_test.go
@@ -0,0 +1,1782 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"bytes"
+	"errors"
+	"flag"
+	"fmt"
+	"io/ioutil"
+	"reflect"
+	"strings"
+	"sync"
+	"testing"
+)
+
+var debug = flag.Bool("debug", false, "show the errors produced by the tests")
+
+// T has lots of interesting pieces to use to test execution.
+type T struct {
+	// Basics
+	True        bool
+	I           int
+	U16         uint16
+	X, S        string
+	FloatZero   float64
+	ComplexZero complex128
+	// Nested structs.
+	U *U
+	// Struct with String method.
+	V0     V
+	V1, V2 *V
+	// Struct with Error method.
+	W0     W
+	W1, W2 *W
+	// Slices
+	SI      []int
+	SICap   []int
+	SIEmpty []int
+	SB      []bool
+	// Arrays
+	AI [3]int
+	// Maps
+	MSI      map[string]int
+	MSIone   map[string]int // one element, for deterministic output
+	MSIEmpty map[string]int
+	MXI      map[interface{}]int
+	MII      map[int]int
+	MI32S    map[int32]string
+	MI64S    map[int64]string
+	MUI32S   map[uint32]string
+	MUI64S   map[uint64]string
+	MI8S     map[int8]string
+	MUI8S    map[uint8]string
+	SMSI     []map[string]int
+	// Empty interfaces; used to see if we can dig inside one.
+	Empty0 interface{} // nil
+	Empty1 interface{}
+	Empty2 interface{}
+	Empty3 interface{}
+	Empty4 interface{}
+	// Non-empty interfaces.
+	NonEmptyInterface         I
+	NonEmptyInterfacePtS      *I
+	NonEmptyInterfaceNil      I
+	NonEmptyInterfaceTypedNil I
+	// Stringer.
+	Str fmt.Stringer
+	Err error
+	// Pointers
+	PI  *int
+	PS  *string
+	PSI *[]int
+	NIL *int
+	// Function (not method)
+	BinaryFunc      func(string, string) string
+	VariadicFunc    func(...string) string
+	VariadicFuncInt func(int, ...string) string
+	NilOKFunc       func(*int) bool
+	ErrFunc         func() (string, error)
+	PanicFunc       func() string
+	// Template to test evaluation of templates.
+	Tmpl *Template
+	// Unexported field; cannot be accessed by template.
+	unexported int
+}
+
+type S []string
+
+func (S) Method0() string {
+	return "M0"
+}
+
+type U struct {
+	V string
+}
+
+type V struct {
+	j int
+}
+
+func (v *V) String() string {
+	if v == nil {
+		return "nilV"
+	}
+	return fmt.Sprintf("<%d>", v.j)
+}
+
+type W struct {
+	k int
+}
+
+func (w *W) Error() string {
+	if w == nil {
+		return "nilW"
+	}
+	return fmt.Sprintf("[%d]", w.k)
+}
+
+var siVal = I(S{"a", "b"})
+
+var tVal = &T{
+	True:   true,
+	I:      17,
+	U16:    16,
+	X:      "x",
+	S:      "xyz",
+	U:      &U{"v"},
+	V0:     V{6666},
+	V1:     &V{7777}, // leave V2 as nil
+	W0:     W{888},
+	W1:     &W{999}, // leave W2 as nil
+	SI:     []int{3, 4, 5},
+	SICap:  make([]int, 5, 10),
+	AI:     [3]int{3, 4, 5},
+	SB:     []bool{true, false},
+	MSI:    map[string]int{"one": 1, "two": 2, "three": 3},
+	MSIone: map[string]int{"one": 1},
+	MXI:    map[interface{}]int{"one": 1},
+	MII:    map[int]int{1: 1},
+	MI32S:  map[int32]string{1: "one", 2: "two"},
+	MI64S:  map[int64]string{2: "i642", 3: "i643"},
+	MUI32S: map[uint32]string{2: "u322", 3: "u323"},
+	MUI64S: map[uint64]string{2: "ui642", 3: "ui643"},
+	MI8S:   map[int8]string{2: "i82", 3: "i83"},
+	MUI8S:  map[uint8]string{2: "u82", 3: "u83"},
+	SMSI: []map[string]int{
+		{"one": 1, "two": 2},
+		{"eleven": 11, "twelve": 12},
+	},
+	Empty1:                    3,
+	Empty2:                    "empty2",
+	Empty3:                    []int{7, 8},
+	Empty4:                    &U{"UinEmpty"},
+	NonEmptyInterface:         &T{X: "x"},
+	NonEmptyInterfacePtS:      &siVal,
+	NonEmptyInterfaceTypedNil: (*T)(nil),
+	Str:                       bytes.NewBuffer([]byte("foozle")),
+	Err:                       errors.New("erroozle"),
+	PI:                        newInt(23),
+	PS:                        newString("a string"),
+	PSI:                       newIntSlice(21, 22, 23),
+	BinaryFunc:                func(a, b string) string { return fmt.Sprintf("[%s=%s]", a, b) },
+	VariadicFunc:              func(s ...string) string { return fmt.Sprint("<", strings.Join(s, "+"), ">") },
+	VariadicFuncInt:           func(a int, s ...string) string { return fmt.Sprint(a, "=<", strings.Join(s, "+"), ">") },
+	NilOKFunc:                 func(s *int) bool { return s == nil },
+	ErrFunc:                   func() (string, error) { return "bla", nil },
+	PanicFunc:                 func() string { panic("test panic") },
+	Tmpl:                      Must(New("x").Parse("test template")), // "x" is the value of .X
+}
+
+var tSliceOfNil = []*T{nil}
+
+// A non-empty interface.
+type I interface {
+	Method0() string
+}
+
+var iVal I = tVal
+
+// Helpers for creation.
+func newInt(n int) *int {
+	return &n
+}
+
+func newString(s string) *string {
+	return &s
+}
+
+func newIntSlice(n ...int) *[]int {
+	p := new([]int)
+	*p = make([]int, len(n))
+	copy(*p, n)
+	return p
+}
+
+// Simple methods with and without arguments.
+func (t *T) Method0() string {
+	return "M0"
+}
+
+func (t *T) Method1(a int) int {
+	return a
+}
+
+func (t *T) Method2(a uint16, b string) string {
+	return fmt.Sprintf("Method2: %d %s", a, b)
+}
+
+func (t *T) Method3(v interface{}) string {
+	return fmt.Sprintf("Method3: %v", v)
+}
+
+func (t *T) Copy() *T {
+	n := new(T)
+	*n = *t
+	return n
+}
+
+func (t *T) MAdd(a int, b []int) []int {
+	v := make([]int, len(b))
+	for i, x := range b {
+		v[i] = x + a
+	}
+	return v
+}
+
+var myError = errors.New("my error")
+
+// MyError returns a value and an error according to its argument.
+func (t *T) MyError(error bool) (bool, error) {
+	if error {
+		return true, myError
+	}
+	return false, nil
+}
+
+// A few methods to test chaining.
+func (t *T) GetU() *U {
+	return t.U
+}
+
+func (u *U) TrueFalse(b bool) string {
+	if b {
+		return "true"
+	}
+	return ""
+}
+
+func typeOf(arg interface{}) string {
+	return fmt.Sprintf("%T", arg)
+}
+
+type execTest struct {
+	name   string
+	input  string
+	output string
+	data   interface{}
+	ok     bool
+}
+
+// bigInt and bigUint are hex string representing numbers either side
+// of the max int boundary.
+// We do it this way so the test doesn't depend on ints being 32 bits.
+var (
+	bigInt  = fmt.Sprintf("0x%x", int(1<<uint(reflect.TypeOf(0).Bits()-1)-1))
+	bigUint = fmt.Sprintf("0x%x", uint(1<<uint(reflect.TypeOf(0).Bits()-1)))
+)
+
+var execTests = []execTest{
+	// Trivial cases.
+	{"empty", "", "", nil, true},
+	{"text", "some text", "some text", nil, true},
+	{"nil action", "{{nil}}", "", nil, false},
+
+	// Ideal constants.
+	{"ideal int", "{{typeOf 3}}", "int", 0, true},
+	{"ideal float", "{{typeOf 1.0}}", "float64", 0, true},
+	{"ideal exp float", "{{typeOf 1e1}}", "float64", 0, true},
+	{"ideal complex", "{{typeOf 1i}}", "complex128", 0, true},
+	{"ideal int", "{{typeOf " + bigInt + "}}", "int", 0, true},
+	{"ideal too big", "{{typeOf " + bigUint + "}}", "", 0, false},
+	{"ideal nil without type", "{{nil}}", "", 0, false},
+
+	// Fields of structs.
+	{".X", "-{{.X}}-", "-x-", tVal, true},
+	{".U.V", "-{{.U.V}}-", "-v-", tVal, true},
+	{".unexported", "{{.unexported}}", "", tVal, false},
+
+	// Fields on maps.
+	{"map .one", "{{.MSI.one}}", "1", tVal, true},
+	{"map .two", "{{.MSI.two}}", "2", tVal, true},
+	{"map .NO", "{{.MSI.NO}}", "<no value>", tVal, true},
+	{"map .one interface", "{{.MXI.one}}", "1", tVal, true},
+	{"map .WRONG args", "{{.MSI.one 1}}", "", tVal, false},
+	{"map .WRONG type", "{{.MII.one}}", "", tVal, false},
+
+	// Dots of all kinds to test basic evaluation.
+	{"dot int", "<{{.}}>", "<13>", 13, true},
+	{"dot uint", "<{{.}}>", "<14>", uint(14), true},
+	{"dot float", "<{{.}}>", "<15.1>", 15.1, true},
+	{"dot bool", "<{{.}}>", "<true>", true, true},
+	{"dot complex", "<{{.}}>", "<(16.2-17i)>", 16.2 - 17i, true},
+	{"dot string", "<{{.}}>", "<hello>", "hello", true},
+	{"dot slice", "<{{.}}>", "<[-1 -2 -3]>", []int{-1, -2, -3}, true},
+	{"dot map", "<{{.}}>", "<map[two:22]>", map[string]int{"two": 22}, true},
+	{"dot struct", "<{{.}}>", "<{7 seven}>", struct {
+		a int
+		b string
+	}{7, "seven"}, true},
+
+	// Variables.
+	{"$ int", "{{$}}", "123", 123, true},
+	{"$.I", "{{$.I}}", "17", tVal, true},
+	{"$.U.V", "{{$.U.V}}", "v", tVal, true},
+	{"declare in action", "{{$x := $.U.V}}{{$x}}", "v", tVal, true},
+	{"simple assignment", "{{$x := 2}}{{$x = 3}}{{$x}}", "3", tVal, true},
+	{"nested assignment",
+		"{{$x := 2}}{{if true}}{{$x = 3}}{{end}}{{$x}}",
+		"3", tVal, true},
+	{"nested assignment changes the last declaration",
+		"{{$x := 1}}{{if true}}{{$x := 2}}{{if true}}{{$x = 3}}{{end}}{{end}}{{$x}}",
+		"1", tVal, true},
+
+	// Type with String method.
+	{"V{6666}.String()", "-{{.V0}}-", "-<6666>-", tVal, true},
+	{"&V{7777}.String()", "-{{.V1}}-", "-<7777>-", tVal, true},
+	{"(*V)(nil).String()", "-{{.V2}}-", "-nilV-", tVal, true},
+
+	// Type with Error method.
+	{"W{888}.Error()", "-{{.W0}}-", "-[888]-", tVal, true},
+	{"&W{999}.Error()", "-{{.W1}}-", "-[999]-", tVal, true},
+	{"(*W)(nil).Error()", "-{{.W2}}-", "-nilW-", tVal, true},
+
+	// Pointers.
+	{"*int", "{{.PI}}", "23", tVal, true},
+	{"*string", "{{.PS}}", "a string", tVal, true},
+	{"*[]int", "{{.PSI}}", "[21 22 23]", tVal, true},
+	{"*[]int[1]", "{{index .PSI 1}}", "22", tVal, true},
+	{"NIL", "{{.NIL}}", "<nil>", tVal, true},
+
+	// Empty interfaces holding values.
+	{"empty nil", "{{.Empty0}}", "<no value>", tVal, true},
+	{"empty with int", "{{.Empty1}}", "3", tVal, true},
+	{"empty with string", "{{.Empty2}}", "empty2", tVal, true},
+	{"empty with slice", "{{.Empty3}}", "[7 8]", tVal, true},
+	{"empty with struct", "{{.Empty4}}", "{UinEmpty}", tVal, true},
+	{"empty with struct, field", "{{.Empty4.V}}", "UinEmpty", tVal, true},
+
+	// Edge cases with <no value> with an interface value
+	{"field on interface", "{{.foo}}", "<no value>", nil, true},
+	{"field on parenthesized interface", "{{(.).foo}}", "<no value>", nil, true},
+
+	// Issue 31810: Parenthesized first element of pipeline with arguments.
+	// See also TestIssue31810.
+	{"unparenthesized non-function", "{{1 2}}", "", nil, false},
+	{"parenthesized non-function", "{{(1) 2}}", "", nil, false},
+	{"parenthesized non-function with no args", "{{(1)}}", "1", nil, true}, // This is fine.
+
+	// Method calls.
+	{".Method0", "-{{.Method0}}-", "-M0-", tVal, true},
+	{".Method1(1234)", "-{{.Method1 1234}}-", "-1234-", tVal, true},
+	{".Method1(.I)", "-{{.Method1 .I}}-", "-17-", tVal, true},
+	{".Method2(3, .X)", "-{{.Method2 3 .X}}-", "-Method2: 3 x-", tVal, true},
+	{".Method2(.U16, `str`)", "-{{.Method2 .U16 `str`}}-", "-Method2: 16 str-", tVal, true},
+	{".Method2(.U16, $x)", "{{if $x := .X}}-{{.Method2 .U16 $x}}{{end}}-", "-Method2: 16 x-", tVal, true},
+	{".Method3(nil constant)", "-{{.Method3 nil}}-", "-Method3: <nil>-", tVal, true},
+	{".Method3(nil value)", "-{{.Method3 .MXI.unset}}-", "-Method3: <nil>-", tVal, true},
+	{"method on var", "{{if $x := .}}-{{$x.Method2 .U16 $x.X}}{{end}}-", "-Method2: 16 x-", tVal, true},
+	{"method on chained var",
+		"{{range .MSIone}}{{if $.U.TrueFalse $.True}}{{$.U.TrueFalse $.True}}{{else}}WRONG{{end}}{{end}}",
+		"true", tVal, true},
+	{"chained method",
+		"{{range .MSIone}}{{if $.GetU.TrueFalse $.True}}{{$.U.TrueFalse $.True}}{{else}}WRONG{{end}}{{end}}",
+		"true", tVal, true},
+	{"chained method on variable",
+		"{{with $x := .}}{{with .SI}}{{$.GetU.TrueFalse $.True}}{{end}}{{end}}",
+		"true", tVal, true},
+	{".NilOKFunc not nil", "{{call .NilOKFunc .PI}}", "false", tVal, true},
+	{".NilOKFunc nil", "{{call .NilOKFunc nil}}", "true", tVal, true},
+	{"method on nil value from slice", "-{{range .}}{{.Method1 1234}}{{end}}-", "-1234-", tSliceOfNil, true},
+	{"method on typed nil interface value", "{{.NonEmptyInterfaceTypedNil.Method0}}", "M0", tVal, true},
+
+	// Function call builtin.
+	{".BinaryFunc", "{{call .BinaryFunc `1` `2`}}", "[1=2]", tVal, true},
+	{".VariadicFunc0", "{{call .VariadicFunc}}", "<>", tVal, true},
+	{".VariadicFunc2", "{{call .VariadicFunc `he` `llo`}}", "<he+llo>", tVal, true},
+	{".VariadicFuncInt", "{{call .VariadicFuncInt 33 `he` `llo`}}", "33=<he+llo>", tVal, true},
+	{"if .BinaryFunc call", "{{ if .BinaryFunc}}{{call .BinaryFunc `1` `2`}}{{end}}", "[1=2]", tVal, true},
+	{"if not .BinaryFunc call", "{{ if not .BinaryFunc}}{{call .BinaryFunc `1` `2`}}{{else}}No{{end}}", "No", tVal, true},
+	{"Interface Call", `{{stringer .S}}`, "foozle", map[string]interface{}{"S": bytes.NewBufferString("foozle")}, true},
+	{".ErrFunc", "{{call .ErrFunc}}", "bla", tVal, true},
+	{"call nil", "{{call nil}}", "", tVal, false},
+
+	// Erroneous function calls (check args).
+	{".BinaryFuncTooFew", "{{call .BinaryFunc `1`}}", "", tVal, false},
+	{".BinaryFuncTooMany", "{{call .BinaryFunc `1` `2` `3`}}", "", tVal, false},
+	{".BinaryFuncBad0", "{{call .BinaryFunc 1 3}}", "", tVal, false},
+	{".BinaryFuncBad1", "{{call .BinaryFunc `1` 3}}", "", tVal, false},
+	{".VariadicFuncBad0", "{{call .VariadicFunc 3}}", "", tVal, false},
+	{".VariadicFuncIntBad0", "{{call .VariadicFuncInt}}", "", tVal, false},
+	{".VariadicFuncIntBad`", "{{call .VariadicFuncInt `x`}}", "", tVal, false},
+	{".VariadicFuncNilBad", "{{call .VariadicFunc nil}}", "", tVal, false},
+
+	// Pipelines.
+	{"pipeline", "-{{.Method0 | .Method2 .U16}}-", "-Method2: 16 M0-", tVal, true},
+	{"pipeline func", "-{{call .VariadicFunc `llo` | call .VariadicFunc `he` }}-", "-<he+<llo>>-", tVal, true},
+
+	// Nil values aren't missing arguments.
+	{"nil pipeline", "{{ .Empty0 | call .NilOKFunc }}", "true", tVal, true},
+	{"nil call arg", "{{ call .NilOKFunc .Empty0 }}", "true", tVal, true},
+	{"bad nil pipeline", "{{ .Empty0 | .VariadicFunc }}", "", tVal, false},
+
+	// Parenthesized expressions
+	{"parens in pipeline", "{{printf `%d %d %d` (1) (2 | add 3) (add 4 (add 5 6))}}", "1 5 15", tVal, true},
+
+	// Parenthesized expressions with field accesses
+	{"parens: $ in paren", "{{($).X}}", "x", tVal, true},
+	{"parens: $.GetU in paren", "{{($.GetU).V}}", "v", tVal, true},
+	{"parens: $ in paren in pipe", "{{($ | echo).X}}", "x", tVal, true},
+	{"parens: spaces and args", `{{(makemap "up" "down" "left" "right").left}}`, "right", tVal, true},
+
+	// If.
+	{"if true", "{{if true}}TRUE{{end}}", "TRUE", tVal, true},
+	{"if false", "{{if false}}TRUE{{else}}FALSE{{end}}", "FALSE", tVal, true},
+	{"if nil", "{{if nil}}TRUE{{end}}", "", tVal, false},
+	{"if on typed nil interface value", "{{if .NonEmptyInterfaceTypedNil}}TRUE{{ end }}", "", tVal, true},
+	{"if 1", "{{if 1}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true},
+	{"if 0", "{{if 0}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
+	{"if 1.5", "{{if 1.5}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true},
+	{"if 0.0", "{{if .FloatZero}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
+	{"if 1.5i", "{{if 1.5i}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true},
+	{"if 0.0i", "{{if .ComplexZero}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
+	{"if emptystring", "{{if ``}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+	{"if string", "{{if `notempty`}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true},
+	{"if emptyslice", "{{if .SIEmpty}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+	{"if slice", "{{if .SI}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true},
+	{"if emptymap", "{{if .MSIEmpty}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+	{"if map", "{{if .MSI}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true},
+	{"if map unset", "{{if .MXI.none}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
+	{"if map not unset", "{{if not .MXI.none}}ZERO{{else}}NON-ZERO{{end}}", "ZERO", tVal, true},
+	{"if $x with $y int", "{{if $x := true}}{{with $y := .I}}{{$x}},{{$y}}{{end}}{{end}}", "true,17", tVal, true},
+	{"if $x with $x int", "{{if $x := true}}{{with $x := .I}}{{$x}},{{end}}{{$x}}{{end}}", "17,true", tVal, true},
+	{"if else if", "{{if false}}FALSE{{else if true}}TRUE{{end}}", "TRUE", tVal, true},
+	{"if else chain", "{{if eq 1 3}}1{{else if eq 2 3}}2{{else if eq 3 3}}3{{end}}", "3", tVal, true},
+
+	// Print etc.
+	{"print", `{{print "hello, print"}}`, "hello, print", tVal, true},
+	{"print 123", `{{print 1 2 3}}`, "1 2 3", tVal, true},
+	{"print nil", `{{print nil}}`, "<nil>", tVal, true},
+	{"println", `{{println 1 2 3}}`, "1 2 3\n", tVal, true},
+	{"printf int", `{{printf "%04x" 127}}`, "007f", tVal, true},
+	{"printf float", `{{printf "%g" 3.5}}`, "3.5", tVal, true},
+	{"printf complex", `{{printf "%g" 1+7i}}`, "(1+7i)", tVal, true},
+	{"printf string", `{{printf "%s" "hello"}}`, "hello", tVal, true},
+	{"printf function", `{{printf "%#q" zeroArgs}}`, "`zeroArgs`", tVal, true},
+	{"printf field", `{{printf "%s" .U.V}}`, "v", tVal, true},
+	{"printf method", `{{printf "%s" .Method0}}`, "M0", tVal, true},
+	{"printf dot", `{{with .I}}{{printf "%d" .}}{{end}}`, "17", tVal, true},
+	{"printf var", `{{with $x := .I}}{{printf "%d" $x}}{{end}}`, "17", tVal, true},
+	{"printf lots", `{{printf "%d %s %g %s" 127 "hello" 7-3i .Method0}}`, "127 hello (7-3i) M0", tVal, true},
+
+	// HTML.
+	{"html", `{{html "<script>alert(\"XSS\");</script>"}}`,
+		"&lt;script&gt;alert(&#34;XSS&#34;);&lt;/script&gt;", nil, true},
+	{"html pipeline", `{{printf "<script>alert(\"XSS\");</script>" | html}}`,
+		"&lt;script&gt;alert(&#34;XSS&#34;);&lt;/script&gt;", nil, true},
+	{"html", `{{html .PS}}`, "a string", tVal, true},
+	{"html typed nil", `{{html .NIL}}`, "&lt;nil&gt;", tVal, true},
+	{"html untyped nil", `{{html .Empty0}}`, "&lt;no value&gt;", tVal, true},
+
+	// JavaScript.
+	{"js", `{{js .}}`, `It\'d be nice.`, `It'd be nice.`, true},
+
+	// URL query.
+	{"urlquery", `{{"http://www.example.org/"|urlquery}}`, "http%3A%2F%2Fwww.example.org%2F", nil, true},
+
+	// Booleans
+	{"not", "{{not true}} {{not false}}", "false true", nil, true},
+	{"and", "{{and false 0}} {{and 1 0}} {{and 0 true}} {{and 1 1}}", "false 0 0 1", nil, true},
+	{"or", "{{or 0 0}} {{or 1 0}} {{or 0 true}} {{or 1 1}}", "0 1 true 1", nil, true},
+	{"or short-circuit", "{{or 0 1 (die)}}", "1", nil, true},
+	{"and short-circuit", "{{and 1 0 (die)}}", "0", nil, true},
+	{"or short-circuit2", "{{or 0 0 (die)}}", "", nil, false},
+	{"and short-circuit2", "{{and 1 1 (die)}}", "", nil, false},
+	{"boolean if", "{{if and true 1 `hi`}}TRUE{{else}}FALSE{{end}}", "TRUE", tVal, true},
+	{"boolean if not", "{{if and true 1 `hi` | not}}TRUE{{else}}FALSE{{end}}", "FALSE", nil, true},
+
+	// Indexing.
+	{"slice[0]", "{{index .SI 0}}", "3", tVal, true},
+	{"slice[1]", "{{index .SI 1}}", "4", tVal, true},
+	{"slice[HUGE]", "{{index .SI 10}}", "", tVal, false},
+	{"slice[WRONG]", "{{index .SI `hello`}}", "", tVal, false},
+	{"slice[nil]", "{{index .SI nil}}", "", tVal, false},
+	{"map[one]", "{{index .MSI `one`}}", "1", tVal, true},
+	{"map[two]", "{{index .MSI `two`}}", "2", tVal, true},
+	{"map[NO]", "{{index .MSI `XXX`}}", "0", tVal, true},
+	{"map[nil]", "{{index .MSI nil}}", "", tVal, false},
+	{"map[``]", "{{index .MSI ``}}", "0", tVal, true},
+	{"map[WRONG]", "{{index .MSI 10}}", "", tVal, false},
+	{"double index", "{{index .SMSI 1 `eleven`}}", "11", tVal, true},
+	{"nil[1]", "{{index nil 1}}", "", tVal, false},
+	{"map MI64S", "{{index .MI64S 2}}", "i642", tVal, true},
+	{"map MI32S", "{{index .MI32S 2}}", "two", tVal, true},
+	{"map MUI64S", "{{index .MUI64S 3}}", "ui643", tVal, true},
+	{"map MI8S", "{{index .MI8S 3}}", "i83", tVal, true},
+	{"map MUI8S", "{{index .MUI8S 2}}", "u82", tVal, true},
+	{"index of an interface field", "{{index .Empty3 0}}", "7", tVal, true},
+
+	// Slicing.
+	{"slice[:]", "{{slice .SI}}", "[3 4 5]", tVal, true},
+	{"slice[1:]", "{{slice .SI 1}}", "[4 5]", tVal, true},
+	{"slice[1:2]", "{{slice .SI 1 2}}", "[4]", tVal, true},
+	{"slice[-1:]", "{{slice .SI -1}}", "", tVal, false},
+	{"slice[1:-2]", "{{slice .SI 1 -2}}", "", tVal, false},
+	{"slice[1:2:-1]", "{{slice .SI 1 2 -1}}", "", tVal, false},
+	{"slice[2:1]", "{{slice .SI 2 1}}", "", tVal, false},
+	{"slice[2:2:1]", "{{slice .SI 2 2 1}}", "", tVal, false},
+	{"out of range", "{{slice .SI 4 5}}", "", tVal, false},
+	{"out of range", "{{slice .SI 2 2 5}}", "", tVal, false},
+	{"len(s) < indexes < cap(s)", "{{slice .SICap 6 10}}", "[0 0 0 0]", tVal, true},
+	{"len(s) < indexes < cap(s)", "{{slice .SICap 6 10 10}}", "[0 0 0 0]", tVal, true},
+	{"indexes > cap(s)", "{{slice .SICap 10 11}}", "", tVal, false},
+	{"indexes > cap(s)", "{{slice .SICap 6 10 11}}", "", tVal, false},
+	{"array[:]", "{{slice .AI}}", "[3 4 5]", tVal, true},
+	{"array[1:]", "{{slice .AI 1}}", "[4 5]", tVal, true},
+	{"array[1:2]", "{{slice .AI 1 2}}", "[4]", tVal, true},
+	{"string[:]", "{{slice .S}}", "xyz", tVal, true},
+	{"string[0:1]", "{{slice .S 0 1}}", "x", tVal, true},
+	{"string[1:]", "{{slice .S 1}}", "yz", tVal, true},
+	{"string[1:2]", "{{slice .S 1 2}}", "y", tVal, true},
+	{"out of range", "{{slice .S 1 5}}", "", tVal, false},
+	{"3-index slice of string", "{{slice .S 1 2 2}}", "", tVal, false},
+	{"slice of an interface field", "{{slice .Empty3 0 1}}", "[7]", tVal, true},
+
+	// Len.
+	{"slice", "{{len .SI}}", "3", tVal, true},
+	{"map", "{{len .MSI }}", "3", tVal, true},
+	{"len of int", "{{len 3}}", "", tVal, false},
+	{"len of nothing", "{{len .Empty0}}", "", tVal, false},
+	{"len of an interface field", "{{len .Empty3}}", "2", tVal, true},
+
+	// With.
+	{"with true", "{{with true}}{{.}}{{end}}", "true", tVal, true},
+	{"with false", "{{with false}}{{.}}{{else}}FALSE{{end}}", "FALSE", tVal, true},
+	{"with 1", "{{with 1}}{{.}}{{else}}ZERO{{end}}", "1", tVal, true},
+	{"with 0", "{{with 0}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true},
+	{"with 1.5", "{{with 1.5}}{{.}}{{else}}ZERO{{end}}", "1.5", tVal, true},
+	{"with 0.0", "{{with .FloatZero}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true},
+	{"with 1.5i", "{{with 1.5i}}{{.}}{{else}}ZERO{{end}}", "(0+1.5i)", tVal, true},
+	{"with 0.0i", "{{with .ComplexZero}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true},
+	{"with emptystring", "{{with ``}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+	{"with string", "{{with `notempty`}}{{.}}{{else}}EMPTY{{end}}", "notempty", tVal, true},
+	{"with emptyslice", "{{with .SIEmpty}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+	{"with slice", "{{with .SI}}{{.}}{{else}}EMPTY{{end}}", "[3 4 5]", tVal, true},
+	{"with emptymap", "{{with .MSIEmpty}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+	{"with map", "{{with .MSIone}}{{.}}{{else}}EMPTY{{end}}", "map[one:1]", tVal, true},
+	{"with empty interface, struct field", "{{with .Empty4}}{{.V}}{{end}}", "UinEmpty", tVal, true},
+	{"with $x int", "{{with $x := .I}}{{$x}}{{end}}", "17", tVal, true},
+	{"with $x struct.U.V", "{{with $x := $}}{{$x.U.V}}{{end}}", "v", tVal, true},
+	{"with variable and action", "{{with $x := $}}{{$y := $.U.V}}{{$y}}{{end}}", "v", tVal, true},
+	{"with on typed nil interface value", "{{with .NonEmptyInterfaceTypedNil}}TRUE{{ end }}", "", tVal, true},
+
+	// Range.
+	{"range []int", "{{range .SI}}-{{.}}-{{end}}", "-3--4--5-", tVal, true},
+	{"range empty no else", "{{range .SIEmpty}}-{{.}}-{{end}}", "", tVal, true},
+	{"range []int else", "{{range .SI}}-{{.}}-{{else}}EMPTY{{end}}", "-3--4--5-", tVal, true},
+	{"range empty else", "{{range .SIEmpty}}-{{.}}-{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+	{"range []int break else", "{{range .SI}}-{{.}}-{{break}}NOTREACHED{{else}}EMPTY{{end}}", "-3-", tVal, true},
+	{"range []int continue else", "{{range .SI}}-{{.}}-{{continue}}NOTREACHED{{else}}EMPTY{{end}}", "-3--4--5-", tVal, true},
+	{"range []bool", "{{range .SB}}-{{.}}-{{end}}", "-true--false-", tVal, true},
+	{"range []int method", "{{range .SI | .MAdd .I}}-{{.}}-{{end}}", "-20--21--22-", tVal, true},
+	{"range map", "{{range .MSI}}-{{.}}-{{end}}", "-1--3--2-", tVal, true},
+	{"range empty map no else", "{{range .MSIEmpty}}-{{.}}-{{end}}", "", tVal, true},
+	{"range map else", "{{range .MSI}}-{{.}}-{{else}}EMPTY{{end}}", "-1--3--2-", tVal, true},
+	{"range empty map else", "{{range .MSIEmpty}}-{{.}}-{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+	{"range empty interface", "{{range .Empty3}}-{{.}}-{{else}}EMPTY{{end}}", "-7--8-", tVal, true},
+	{"range empty nil", "{{range .Empty0}}-{{.}}-{{end}}", "", tVal, true},
+	{"range $x SI", "{{range $x := .SI}}<{{$x}}>{{end}}", "<3><4><5>", tVal, true},
+	{"range $x $y SI", "{{range $x, $y := .SI}}<{{$x}}={{$y}}>{{end}}", "<0=3><1=4><2=5>", tVal, true},
+	{"range $x MSIone", "{{range $x := .MSIone}}<{{$x}}>{{end}}", "<1>", tVal, true},
+	{"range $x $y MSIone", "{{range $x, $y := .MSIone}}<{{$x}}={{$y}}>{{end}}", "<one=1>", tVal, true},
+	{"range $x PSI", "{{range $x := .PSI}}<{{$x}}>{{end}}", "<21><22><23>", tVal, true},
+	{"declare in range", "{{range $x := .PSI}}<{{$foo:=$x}}{{$x}}>{{end}}", "<21><22><23>", tVal, true},
+	{"range count", `{{range $i, $x := count 5}}[{{$i}}]{{$x}}{{end}}`, "[0]a[1]b[2]c[3]d[4]e", tVal, true},
+	{"range nil count", `{{range $i, $x := count 0}}{{else}}empty{{end}}`, "empty", tVal, true},
+
+	// Cute examples.
+	{"or as if true", `{{or .SI "slice is empty"}}`, "[3 4 5]", tVal, true},
+	{"or as if false", `{{or .SIEmpty "slice is empty"}}`, "slice is empty", tVal, true},
+
+	// Error handling.
+	{"error method, error", "{{.MyError true}}", "", tVal, false},
+	{"error method, no error", "{{.MyError false}}", "false", tVal, true},
+
+	// Numbers
+	{"decimal", "{{print 1234}}", "1234", tVal, true},
+	{"decimal _", "{{print 12_34}}", "1234", tVal, true},
+	{"binary", "{{print 0b101}}", "5", tVal, true},
+	{"binary _", "{{print 0b_1_0_1}}", "5", tVal, true},
+	{"BINARY", "{{print 0B101}}", "5", tVal, true},
+	{"octal0", "{{print 0377}}", "255", tVal, true},
+	{"octal", "{{print 0o377}}", "255", tVal, true},
+	{"octal _", "{{print 0o_3_7_7}}", "255", tVal, true},
+	{"OCTAL", "{{print 0O377}}", "255", tVal, true},
+	{"hex", "{{print 0x123}}", "291", tVal, true},
+	{"hex _", "{{print 0x1_23}}", "291", tVal, true},
+	{"HEX", "{{print 0X123ABC}}", "1194684", tVal, true},
+	{"float", "{{print 123.4}}", "123.4", tVal, true},
+	{"float _", "{{print 0_0_1_2_3.4}}", "123.4", tVal, true},
+	{"hex float", "{{print +0x1.ep+2}}", "7.5", tVal, true},
+	{"hex float _", "{{print +0x_1.e_0p+0_2}}", "7.5", tVal, true},
+	{"HEX float", "{{print +0X1.EP+2}}", "7.5", tVal, true},
+	{"print multi", "{{print 1_2_3_4 7.5_00_00_00}}", "1234 7.5", tVal, true},
+	{"print multi2", "{{print 1234 0x0_1.e_0p+02}}", "1234 7.5", tVal, true},
+
+	// Fixed bugs.
+	// Must separate dot and receiver; otherwise args are evaluated with dot set to variable.
+	{"bug0", "{{range .MSIone}}{{if $.Method1 .}}X{{end}}{{end}}", "X", tVal, true},
+	// Do not loop endlessly in indirect for non-empty interfaces.
+	// The bug appears with *interface only; looped forever.
+	{"bug1", "{{.Method0}}", "M0", &iVal, true},
+	// Was taking address of interface field, so method set was empty.
+	{"bug2", "{{$.NonEmptyInterface.Method0}}", "M0", tVal, true},
+	// Struct values were not legal in with - mere oversight.
+	{"bug3", "{{with $}}{{.Method0}}{{end}}", "M0", tVal, true},
+	// Nil interface values in if.
+	{"bug4", "{{if .Empty0}}non-nil{{else}}nil{{end}}", "nil", tVal, true},
+	// Stringer.
+	{"bug5", "{{.Str}}", "foozle", tVal, true},
+	{"bug5a", "{{.Err}}", "erroozle", tVal, true},
+	// Args need to be indirected and dereferenced sometimes.
+	{"bug6a", "{{vfunc .V0 .V1}}", "vfunc", tVal, true},
+	{"bug6b", "{{vfunc .V0 .V0}}", "vfunc", tVal, true},
+	{"bug6c", "{{vfunc .V1 .V0}}", "vfunc", tVal, true},
+	{"bug6d", "{{vfunc .V1 .V1}}", "vfunc", tVal, true},
+	// Legal parse but illegal execution: non-function should have no arguments.
+	{"bug7a", "{{3 2}}", "", tVal, false},
+	{"bug7b", "{{$x := 1}}{{$x 2}}", "", tVal, false},
+	{"bug7c", "{{$x := 1}}{{3 | $x}}", "", tVal, false},
+	// Pipelined arg was not being type-checked.
+	{"bug8a", "{{3|oneArg}}", "", tVal, false},
+	{"bug8b", "{{4|dddArg 3}}", "", tVal, false},
+	// A bug was introduced that broke map lookups for lower-case names.
+	{"bug9", "{{.cause}}", "neglect", map[string]string{"cause": "neglect"}, true},
+	// Field chain starting with function did not work.
+	{"bug10", "{{mapOfThree.three}}-{{(mapOfThree).three}}", "3-3", 0, true},
+	// Dereferencing nil pointer while evaluating function arguments should not panic. Issue 7333.
+	{"bug11", "{{valueString .PS}}", "", T{}, false},
+	// 0xef gave constant type float64. Issue 8622.
+	{"bug12xe", "{{printf `%T` 0xef}}", "int", T{}, true},
+	{"bug12xE", "{{printf `%T` 0xEE}}", "int", T{}, true},
+	{"bug12Xe", "{{printf `%T` 0Xef}}", "int", T{}, true},
+	{"bug12XE", "{{printf `%T` 0XEE}}", "int", T{}, true},
+	// Chained nodes did not work as arguments. Issue 8473.
+	{"bug13", "{{print (.Copy).I}}", "17", tVal, true},
+	// Didn't protect against nil or literal values in field chains.
+	{"bug14a", "{{(nil).True}}", "", tVal, false},
+	{"bug14b", "{{$x := nil}}{{$x.anything}}", "", tVal, false},
+	{"bug14c", `{{$x := (1.0)}}{{$y := ("hello")}}{{$x.anything}}{{$y.true}}`, "", tVal, false},
+	// Didn't call validateType on function results. Issue 10800.
+	{"bug15", "{{valueString returnInt}}", "", tVal, false},
+	// Variadic function corner cases. Issue 10946.
+	{"bug16a", "{{true|printf}}", "", tVal, false},
+	{"bug16b", "{{1|printf}}", "", tVal, false},
+	{"bug16c", "{{1.1|printf}}", "", tVal, false},
+	{"bug16d", "{{'x'|printf}}", "", tVal, false},
+	{"bug16e", "{{0i|printf}}", "", tVal, false},
+	{"bug16f", "{{true|twoArgs \"xxx\"}}", "", tVal, false},
+	{"bug16g", "{{\"aaa\" |twoArgs \"bbb\"}}", "twoArgs=bbbaaa", tVal, true},
+	{"bug16h", "{{1|oneArg}}", "", tVal, false},
+	{"bug16i", "{{\"aaa\"|oneArg}}", "oneArg=aaa", tVal, true},
+	{"bug16j", "{{1+2i|printf \"%v\"}}", "(1+2i)", tVal, true},
+	{"bug16k", "{{\"aaa\"|printf }}", "aaa", tVal, true},
+	{"bug17a", "{{.NonEmptyInterface.X}}", "x", tVal, true},
+	{"bug17b", "-{{.NonEmptyInterface.Method1 1234}}-", "-1234-", tVal, true},
+	{"bug17c", "{{len .NonEmptyInterfacePtS}}", "2", tVal, true},
+	{"bug17d", "{{index .NonEmptyInterfacePtS 0}}", "a", tVal, true},
+	{"bug17e", "{{range .NonEmptyInterfacePtS}}-{{.}}-{{end}}", "-a--b-", tVal, true},
+
+	// More variadic function corner cases. Some runes would get evaluated
+	// as constant floats instead of ints. Issue 34483.
+	{"bug18a", "{{eq . '.'}}", "true", '.', true},
+	{"bug18b", "{{eq . 'e'}}", "true", 'e', true},
+	{"bug18c", "{{eq . 'P'}}", "true", 'P', true},
+}
+
+func zeroArgs() string {
+	return "zeroArgs"
+}
+
+func oneArg(a string) string {
+	return "oneArg=" + a
+}
+
+func twoArgs(a, b string) string {
+	return "twoArgs=" + a + b
+}
+
+func dddArg(a int, b ...string) string {
+	return fmt.Sprintln(a, b)
+}
+
+// count returns a channel that will deliver n sequential 1-letter strings starting at "a"
+func count(n int) chan string {
+	if n == 0 {
+		return nil
+	}
+	c := make(chan string)
+	go func() {
+		for i := 0; i < n; i++ {
+			c <- "abcdefghijklmnop"[i : i+1]
+		}
+		close(c)
+	}()
+	return c
+}
+
+// vfunc takes a *V and a V
+func vfunc(V, *V) string {
+	return "vfunc"
+}
+
+// valueString takes a string, not a pointer.
+func valueString(v string) string {
+	return "value is ignored"
+}
+
+// returnInt returns an int
+func returnInt() int {
+	return 7
+}
+
+func add(args ...int) int {
+	sum := 0
+	for _, x := range args {
+		sum += x
+	}
+	return sum
+}
+
+func echo(arg interface{}) interface{} {
+	return arg
+}
+
+func makemap(arg ...string) map[string]string {
+	if len(arg)%2 != 0 {
+		panic("bad makemap")
+	}
+	m := make(map[string]string)
+	for i := 0; i < len(arg); i += 2 {
+		m[arg[i]] = arg[i+1]
+	}
+	return m
+}
+
+func stringer(s fmt.Stringer) string {
+	return s.String()
+}
+
+func mapOfThree() interface{} {
+	return map[string]int{"three": 3}
+}
+
+func testExecute(execTests []execTest, template *Template, t *testing.T) {
+	b := new(bytes.Buffer)
+	funcs := FuncMap{
+		"add":         add,
+		"count":       count,
+		"dddArg":      dddArg,
+		"die":         func() bool { panic("die") },
+		"echo":        echo,
+		"makemap":     makemap,
+		"mapOfThree":  mapOfThree,
+		"oneArg":      oneArg,
+		"returnInt":   returnInt,
+		"stringer":    stringer,
+		"twoArgs":     twoArgs,
+		"typeOf":      typeOf,
+		"valueString": valueString,
+		"vfunc":       vfunc,
+		"zeroArgs":    zeroArgs,
+	}
+	for _, test := range execTests {
+		var tmpl *Template
+		var err error
+		if template == nil {
+			tmpl, err = New(test.name).Funcs(funcs).Parse(test.input)
+		} else {
+			tmpl, err = template.New(test.name).Funcs(funcs).Parse(test.input)
+		}
+		if err != nil {
+			t.Errorf("%s: parse error: %s", test.name, err)
+			continue
+		}
+		b.Reset()
+		err = tmpl.Execute(b, test.data)
+		switch {
+		case !test.ok && err == nil:
+			t.Errorf("%s: expected error; got none", test.name)
+			continue
+		case test.ok && err != nil:
+			t.Errorf("%s: unexpected execute error: %s", test.name, err)
+			continue
+		case !test.ok && err != nil:
+			// expected error, got one
+			if *debug {
+				fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err)
+			}
+		}
+		result := b.String()
+		if result != test.output {
+			t.Errorf("%s: expected\n\t%q\ngot\n\t%q", test.name, test.output, result)
+		}
+	}
+}
+
+func TestExecute(t *testing.T) {
+	testExecute(execTests, nil, t)
+}
+
+var delimPairs = []string{
+	"", "", // default
+	"{{", "}}", // same as default
+	"<<", ">>", // distinct
+	"|", "|", // same
+	"(日)", "(本)", // peculiar
+}
+
+func TestDelims(t *testing.T) {
+	const hello = "Hello, world"
+	var value = struct{ Str string }{hello}
+	for i := 0; i < len(delimPairs); i += 2 {
+		text := ".Str"
+		left := delimPairs[i+0]
+		trueLeft := left
+		right := delimPairs[i+1]
+		trueRight := right
+		if left == "" { // default case
+			trueLeft = "{{"
+		}
+		if right == "" { // default case
+			trueRight = "}}"
+		}
+		text = trueLeft + text + trueRight
+		// Now add a comment
+		text += trueLeft + "/*comment*/" + trueRight
+		// Now add  an action containing a string.
+		text += trueLeft + `"` + trueLeft + `"` + trueRight
+		// At this point text looks like `{{.Str}}{{/*comment*/}}{{"{{"}}`.
+		tmpl, err := New("delims").Delims(left, right).Parse(text)
+		if err != nil {
+			t.Fatalf("delim %q text %q parse err %s", left, text, err)
+		}
+		var b = new(bytes.Buffer)
+		err = tmpl.Execute(b, value)
+		if err != nil {
+			t.Fatalf("delim %q exec err %s", left, err)
+		}
+		if b.String() != hello+trueLeft {
+			t.Errorf("expected %q got %q", hello+trueLeft, b.String())
+		}
+	}
+}
+
+// Check that an error from a method flows back to the top.
+func TestExecuteError(t *testing.T) {
+	b := new(bytes.Buffer)
+	tmpl := New("error")
+	_, err := tmpl.Parse("{{.MyError true}}")
+	if err != nil {
+		t.Fatalf("parse error: %s", err)
+	}
+	err = tmpl.Execute(b, tVal)
+	if err == nil {
+		t.Errorf("expected error; got none")
+	} else if !strings.Contains(err.Error(), myError.Error()) {
+		if *debug {
+			fmt.Printf("test execute error: %s\n", err)
+		}
+		t.Errorf("expected myError; got %s", err)
+	}
+}
+
+const execErrorText = `line 1
+line 2
+line 3
+{{template "one" .}}
+{{define "one"}}{{template "two" .}}{{end}}
+{{define "two"}}{{template "three" .}}{{end}}
+{{define "three"}}{{index "hi" $}}{{end}}`
+
+// Check that an error from a nested template contains all the relevant information.
+func TestExecError(t *testing.T) {
+	tmpl, err := New("top").Parse(execErrorText)
+	if err != nil {
+		t.Fatal("parse error:", err)
+	}
+	var b bytes.Buffer
+	err = tmpl.Execute(&b, 5) // 5 is out of range indexing "hi"
+	if err == nil {
+		t.Fatal("expected error")
+	}
+	const want = `template: top:7:20: executing "three" at <index "hi" $>: error calling index: index out of range: 5`
+	got := err.Error()
+	if got != want {
+		t.Errorf("expected\n%q\ngot\n%q", want, got)
+	}
+}
+
+type CustomError struct{}
+
+func (*CustomError) Error() string { return "heyo !" }
+
+// Check that a custom error can be returned.
+func TestExecError_CustomError(t *testing.T) {
+	failingFunc := func() (string, error) {
+		return "", &CustomError{}
+	}
+	tmpl := Must(New("top").Funcs(FuncMap{
+		"err": failingFunc,
+	}).Parse("{{ err }}"))
+
+	var b bytes.Buffer
+	err := tmpl.Execute(&b, nil)
+
+	var e *CustomError
+	if !errors.As(err, &e) {
+		t.Fatalf("expected custom error; got %s", err)
+	}
+}
+
+func TestJSEscaping(t *testing.T) {
+	testCases := []struct {
+		in, exp string
+	}{
+		{`a`, `a`},
+		{`'foo`, `\'foo`},
+		{`Go "jump" \`, `Go \"jump\" \\`},
+		{`Yukihiro says "今日は世界"`, `Yukihiro says \"今日は世界\"`},
+		{"unprintable \uFDFF", `unprintable \uFDFF`},
+		{`<html>`, `\u003Chtml\u003E`},
+		{`no = in attributes`, `no \u003D in attributes`},
+		{`&#x27; does not become HTML entity`, `\u0026#x27; does not become HTML entity`},
+	}
+	for _, tc := range testCases {
+		s := JSEscapeString(tc.in)
+		if s != tc.exp {
+			t.Errorf("JS escaping [%s] got [%s] want [%s]", tc.in, s, tc.exp)
+		}
+	}
+}
+
+// A nice example: walk a binary tree.
+
+type Tree struct {
+	Val         int
+	Left, Right *Tree
+}
+
+// Use different delimiters to test Set.Delims.
+// Also test the trimming of leading and trailing spaces.
+const treeTemplate = `
+	(- define "tree" -)
+	[
+		(- .Val -)
+		(- with .Left -)
+			(template "tree" . -)
+		(- end -)
+		(- with .Right -)
+			(- template "tree" . -)
+		(- end -)
+	]
+	(- end -)
+`
+
+func TestTree(t *testing.T) {
+	var tree = &Tree{
+		1,
+		&Tree{
+			2, &Tree{
+				3,
+				&Tree{
+					4, nil, nil,
+				},
+				nil,
+			},
+			&Tree{
+				5,
+				&Tree{
+					6, nil, nil,
+				},
+				nil,
+			},
+		},
+		&Tree{
+			7,
+			&Tree{
+				8,
+				&Tree{
+					9, nil, nil,
+				},
+				nil,
+			},
+			&Tree{
+				10,
+				&Tree{
+					11, nil, nil,
+				},
+				nil,
+			},
+		},
+	}
+	tmpl, err := New("root").Delims("(", ")").Parse(treeTemplate)
+	if err != nil {
+		t.Fatal("parse error:", err)
+	}
+	var b bytes.Buffer
+	const expect = "[1[2[3[4]][5[6]]][7[8[9]][10[11]]]]"
+	// First by looking up the template.
+	err = tmpl.Lookup("tree").Execute(&b, tree)
+	if err != nil {
+		t.Fatal("exec error:", err)
+	}
+	result := b.String()
+	if result != expect {
+		t.Errorf("expected %q got %q", expect, result)
+	}
+	// Then direct to execution.
+	b.Reset()
+	err = tmpl.ExecuteTemplate(&b, "tree", tree)
+	if err != nil {
+		t.Fatal("exec error:", err)
+	}
+	result = b.String()
+	if result != expect {
+		t.Errorf("expected %q got %q", expect, result)
+	}
+}
+
+func TestExecuteOnNewTemplate(t *testing.T) {
+	// This is issue 3872.
+	New("Name").Templates()
+	// This is issue 11379.
+	new(Template).Templates()
+	new(Template).Parse("")
+	new(Template).New("abc").Parse("")
+	new(Template).Execute(nil, nil)                // returns an error (but does not crash)
+	new(Template).ExecuteTemplate(nil, "XXX", nil) // returns an error (but does not crash)
+}
+
+const testTemplates = `{{define "one"}}one{{end}}{{define "two"}}two{{end}}`
+
+func TestMessageForExecuteEmpty(t *testing.T) {
+	// Test a truly empty template.
+	tmpl := New("empty")
+	var b bytes.Buffer
+	err := tmpl.Execute(&b, 0)
+	if err == nil {
+		t.Fatal("expected initial error")
+	}
+	got := err.Error()
+	want := `template: empty: "empty" is an incomplete or empty template`
+	if got != want {
+		t.Errorf("expected error %s got %s", want, got)
+	}
+	// Add a non-empty template to check that the error is helpful.
+	tests, err := New("").Parse(testTemplates)
+	if err != nil {
+		t.Fatal(err)
+	}
+	tmpl.AddParseTree("secondary", tests.Tree)
+	err = tmpl.Execute(&b, 0)
+	if err == nil {
+		t.Fatal("expected second error")
+	}
+	got = err.Error()
+	want = `template: empty: "empty" is an incomplete or empty template`
+	if got != want {
+		t.Errorf("expected error %s got %s", want, got)
+	}
+	// Make sure we can execute the secondary.
+	err = tmpl.ExecuteTemplate(&b, "secondary", 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestFinalForPrintf(t *testing.T) {
+	tmpl, err := New("").Parse(`{{"x" | printf}}`)
+	if err != nil {
+		t.Fatal(err)
+	}
+	var b bytes.Buffer
+	err = tmpl.Execute(&b, 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
+type cmpTest struct {
+	expr  string
+	truth string
+	ok    bool
+}
+
+var cmpTests = []cmpTest{
+	{"eq true true", "true", true},
+	{"eq true false", "false", true},
+	{"eq 1+2i 1+2i", "true", true},
+	{"eq 1+2i 1+3i", "false", true},
+	{"eq 1.5 1.5", "true", true},
+	{"eq 1.5 2.5", "false", true},
+	{"eq 1 1", "true", true},
+	{"eq 1 2", "false", true},
+	{"eq `xy` `xy`", "true", true},
+	{"eq `xy` `xyz`", "false", true},
+	{"eq .Uthree .Uthree", "true", true},
+	{"eq .Uthree .Ufour", "false", true},
+	{"eq 3 4 5 6 3", "true", true},
+	{"eq 3 4 5 6 7", "false", true},
+	{"ne true true", "false", true},
+	{"ne true false", "true", true},
+	{"ne 1+2i 1+2i", "false", true},
+	{"ne 1+2i 1+3i", "true", true},
+	{"ne 1.5 1.5", "false", true},
+	{"ne 1.5 2.5", "true", true},
+	{"ne 1 1", "false", true},
+	{"ne 1 2", "true", true},
+	{"ne `xy` `xy`", "false", true},
+	{"ne `xy` `xyz`", "true", true},
+	{"ne .Uthree .Uthree", "false", true},
+	{"ne .Uthree .Ufour", "true", true},
+	{"lt 1.5 1.5", "false", true},
+	{"lt 1.5 2.5", "true", true},
+	{"lt 1 1", "false", true},
+	{"lt 1 2", "true", true},
+	{"lt `xy` `xy`", "false", true},
+	{"lt `xy` `xyz`", "true", true},
+	{"lt .Uthree .Uthree", "false", true},
+	{"lt .Uthree .Ufour", "true", true},
+	{"le 1.5 1.5", "true", true},
+	{"le 1.5 2.5", "true", true},
+	{"le 2.5 1.5", "false", true},
+	{"le 1 1", "true", true},
+	{"le 1 2", "true", true},
+	{"le 2 1", "false", true},
+	{"le `xy` `xy`", "true", true},
+	{"le `xy` `xyz`", "true", true},
+	{"le `xyz` `xy`", "false", true},
+	{"le .Uthree .Uthree", "true", true},
+	{"le .Uthree .Ufour", "true", true},
+	{"le .Ufour .Uthree", "false", true},
+	{"gt 1.5 1.5", "false", true},
+	{"gt 1.5 2.5", "false", true},
+	{"gt 1 1", "false", true},
+	{"gt 2 1", "true", true},
+	{"gt 1 2", "false", true},
+	{"gt `xy` `xy`", "false", true},
+	{"gt `xy` `xyz`", "false", true},
+	{"gt .Uthree .Uthree", "false", true},
+	{"gt .Uthree .Ufour", "false", true},
+	{"gt .Ufour .Uthree", "true", true},
+	{"ge 1.5 1.5", "true", true},
+	{"ge 1.5 2.5", "false", true},
+	{"ge 2.5 1.5", "true", true},
+	{"ge 1 1", "true", true},
+	{"ge 1 2", "false", true},
+	{"ge 2 1", "true", true},
+	{"ge `xy` `xy`", "true", true},
+	{"ge `xy` `xyz`", "false", true},
+	{"ge `xyz` `xy`", "true", true},
+	{"ge .Uthree .Uthree", "true", true},
+	{"ge .Uthree .Ufour", "false", true},
+	{"ge .Ufour .Uthree", "true", true},
+	// Mixing signed and unsigned integers.
+	{"eq .Uthree .Three", "true", true},
+	{"eq .Three .Uthree", "true", true},
+	{"le .Uthree .Three", "true", true},
+	{"le .Three .Uthree", "true", true},
+	{"ge .Uthree .Three", "true", true},
+	{"ge .Three .Uthree", "true", true},
+	{"lt .Uthree .Three", "false", true},
+	{"lt .Three .Uthree", "false", true},
+	{"gt .Uthree .Three", "false", true},
+	{"gt .Three .Uthree", "false", true},
+	{"eq .Ufour .Three", "false", true},
+	{"lt .Ufour .Three", "false", true},
+	{"gt .Ufour .Three", "true", true},
+	{"eq .NegOne .Uthree", "false", true},
+	{"eq .Uthree .NegOne", "false", true},
+	{"ne .NegOne .Uthree", "true", true},
+	{"ne .Uthree .NegOne", "true", true},
+	{"lt .NegOne .Uthree", "true", true},
+	{"lt .Uthree .NegOne", "false", true},
+	{"le .NegOne .Uthree", "true", true},
+	{"le .Uthree .NegOne", "false", true},
+	{"gt .NegOne .Uthree", "false", true},
+	{"gt .Uthree .NegOne", "true", true},
+	{"ge .NegOne .Uthree", "false", true},
+	{"ge .Uthree .NegOne", "true", true},
+	{"eq (index `x` 0) 'x'", "true", true}, // The example that triggered this rule.
+	{"eq (index `x` 0) 'y'", "false", true},
+	{"eq .V1 .V2", "true", true},
+	{"eq .Ptr .Ptr", "true", true},
+	{"eq .Ptr .NilPtr", "false", true},
+	{"eq .NilPtr .NilPtr", "true", true},
+	{"eq .Iface1 .Iface1", "true", true},
+	{"eq .Iface1 .NilIface", "false", true},
+	{"eq .NilIface .NilIface", "true", true},
+	{"eq .NilIface .Iface1", "false", true},
+	{"eq .NilIface 0", "false", true},
+	{"eq 0 .NilIface", "false", true},
+	// Errors
+	{"eq `xy` 1", "", false},       // Different types.
+	{"eq 2 2.0", "", false},        // Different types.
+	{"lt true true", "", false},    // Unordered types.
+	{"lt 1+0i 1+0i", "", false},    // Unordered types.
+	{"eq .Ptr 1", "", false},       // Incompatible types.
+	{"eq .Ptr .NegOne", "", false}, // Incompatible types.
+	{"eq .Map .Map", "", false},    // Uncomparable types.
+	{"eq .Map .V1", "", false},     // Uncomparable types.
+}
+
+func TestComparison(t *testing.T) {
+	b := new(bytes.Buffer)
+	var cmpStruct = struct {
+		Uthree, Ufour    uint
+		NegOne, Three    int
+		Ptr, NilPtr      *int
+		Map              map[int]int
+		V1, V2           V
+		Iface1, NilIface fmt.Stringer
+	}{
+		Uthree: 3,
+		Ufour:  4,
+		NegOne: -1,
+		Three:  3,
+		Ptr:    new(int),
+		Iface1: b,
+	}
+	for _, test := range cmpTests {
+		text := fmt.Sprintf("{{if %s}}true{{else}}false{{end}}", test.expr)
+		tmpl, err := New("empty").Parse(text)
+		if err != nil {
+			t.Fatalf("%q: %s", test.expr, err)
+		}
+		b.Reset()
+		err = tmpl.Execute(b, &cmpStruct)
+		if test.ok && err != nil {
+			t.Errorf("%s errored incorrectly: %s", test.expr, err)
+			continue
+		}
+		if !test.ok && err == nil {
+			t.Errorf("%s did not error", test.expr)
+			continue
+		}
+		if b.String() != test.truth {
+			t.Errorf("%s: want %s; got %s", test.expr, test.truth, b.String())
+		}
+	}
+}
+
+func TestMissingMapKey(t *testing.T) {
+	data := map[string]int{
+		"x": 99,
+	}
+	tmpl, err := New("t1").Parse("{{.x}} {{.y}}")
+	if err != nil {
+		t.Fatal(err)
+	}
+	var b bytes.Buffer
+	// By default, just get "<no value>"
+	err = tmpl.Execute(&b, data)
+	if err != nil {
+		t.Fatal(err)
+	}
+	want := "99 <no value>"
+	got := b.String()
+	if got != want {
+		t.Errorf("got %q; expected %q", got, want)
+	}
+	// Same if we set the option explicitly to the default.
+	tmpl.Option("missingkey=default")
+	b.Reset()
+	err = tmpl.Execute(&b, data)
+	if err != nil {
+		t.Fatal("default:", err)
+	}
+	want = "99 <no value>"
+	got = b.String()
+	if got != want {
+		t.Errorf("got %q; expected %q", got, want)
+	}
+	// Next we ask for a zero value
+	tmpl.Option("missingkey=zero")
+	b.Reset()
+	err = tmpl.Execute(&b, data)
+	if err != nil {
+		t.Fatal("zero:", err)
+	}
+	want = "99 0"
+	got = b.String()
+	if got != want {
+		t.Errorf("got %q; expected %q", got, want)
+	}
+	// Now we ask for an error.
+	tmpl.Option("missingkey=error")
+	err = tmpl.Execute(&b, data)
+	if err == nil {
+		t.Errorf("expected error; got none")
+	}
+	// same Option, but now a nil interface: ask for an error
+	err = tmpl.Execute(&b, nil)
+	t.Log(err)
+	if err == nil {
+		t.Errorf("expected error for nil-interface; got none")
+	}
+}
+
+// Test that the error message for multiline unterminated string
+// refers to the line number of the opening quote.
+func TestUnterminatedStringError(t *testing.T) {
+	_, err := New("X").Parse("hello\n\n{{`unterminated\n\n\n\n}}\n some more\n\n")
+	if err == nil {
+		t.Fatal("expected error")
+	}
+	str := err.Error()
+	if !strings.Contains(str, "X:3: unterminated raw quoted string") {
+		t.Fatalf("unexpected error: %s", str)
+	}
+}
+
+const alwaysErrorText = "always be failing"
+
+var alwaysError = errors.New(alwaysErrorText)
+
+type ErrorWriter int
+
+func (e ErrorWriter) Write(p []byte) (int, error) {
+	return 0, alwaysError
+}
+
+func TestExecuteGivesExecError(t *testing.T) {
+	// First, a non-execution error shouldn't be an ExecError.
+	tmpl, err := New("X").Parse("hello")
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = tmpl.Execute(ErrorWriter(0), 0)
+	if err == nil {
+		t.Fatal("expected error; got none")
+	}
+	if err.Error() != alwaysErrorText {
+		t.Errorf("expected %q error; got %q", alwaysErrorText, err)
+	}
+	// This one should be an ExecError.
+	tmpl, err = New("X").Parse("hello, {{.X.Y}}")
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = tmpl.Execute(ioutil.Discard, 0)
+	if err == nil {
+		t.Fatal("expected error; got none")
+	}
+	eerr, ok := err.(ExecError)
+	if !ok {
+		t.Fatalf("did not expect ExecError %s", eerr)
+	}
+	expect := "field X in type int"
+	if !strings.Contains(err.Error(), expect) {
+		t.Errorf("expected %q; got %q", expect, err)
+	}
+}
+
+func funcNameTestFunc() int {
+	return 0
+}
+
+func TestGoodFuncNames(t *testing.T) {
+	names := []string{
+		"_",
+		"a",
+		"a1",
+		"a1",
+		"Ӵ",
+	}
+	for _, name := range names {
+		tmpl := New("X").Funcs(
+			FuncMap{
+				name: funcNameTestFunc,
+			},
+		)
+		if tmpl == nil {
+			t.Fatalf("nil result for %q", name)
+		}
+	}
+}
+
+func TestBadFuncNames(t *testing.T) {
+	names := []string{
+		"",
+		"2",
+		"a-b",
+	}
+	for _, name := range names {
+		testBadFuncName(name, t)
+	}
+}
+
+func testBadFuncName(name string, t *testing.T) {
+	t.Helper()
+	defer func() {
+		recover()
+	}()
+	New("X").Funcs(
+		FuncMap{
+			name: funcNameTestFunc,
+		},
+	)
+	// If we get here, the name did not cause a panic, which is how Funcs
+	// reports an error.
+	t.Errorf("%q succeeded incorrectly as function name", name)
+}
+
+func TestBlock(t *testing.T) {
+	const (
+		input   = `a({{block "inner" .}}bar({{.}})baz{{end}})b`
+		want    = `a(bar(hello)baz)b`
+		overlay = `{{define "inner"}}foo({{.}})bar{{end}}`
+		want2   = `a(foo(goodbye)bar)b`
+	)
+	tmpl, err := New("outer").Parse(input)
+	if err != nil {
+		t.Fatal(err)
+	}
+	tmpl2, err := Must(tmpl.Clone()).Parse(overlay)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	var buf bytes.Buffer
+	if err := tmpl.Execute(&buf, "hello"); err != nil {
+		t.Fatal(err)
+	}
+	if got := buf.String(); got != want {
+		t.Errorf("got %q, want %q", got, want)
+	}
+
+	buf.Reset()
+	if err := tmpl2.Execute(&buf, "goodbye"); err != nil {
+		t.Fatal(err)
+	}
+	if got := buf.String(); got != want2 {
+		t.Errorf("got %q, want %q", got, want2)
+	}
+}
+
+func TestEvalFieldErrors(t *testing.T) {
+	tests := []struct {
+		name, src string
+		value     interface{}
+		want      string
+	}{
+		{
+			// Check that calling an invalid field on nil pointer
+			// prints a field error instead of a distracting nil
+			// pointer error. https://golang.org/issue/15125
+			"MissingFieldOnNil",
+			"{{.MissingField}}",
+			(*T)(nil),
+			"can't evaluate field MissingField in type *template.T",
+		},
+		{
+			"MissingFieldOnNonNil",
+			"{{.MissingField}}",
+			&T{},
+			"can't evaluate field MissingField in type *template.T",
+		},
+		{
+			"ExistingFieldOnNil",
+			"{{.X}}",
+			(*T)(nil),
+			"nil pointer evaluating *template.T.X",
+		},
+		{
+			"MissingKeyOnNilMap",
+			"{{.MissingKey}}",
+			(*map[string]string)(nil),
+			"nil pointer evaluating *map[string]string.MissingKey",
+		},
+		{
+			"MissingKeyOnNilMapPtr",
+			"{{.MissingKey}}",
+			(*map[string]string)(nil),
+			"nil pointer evaluating *map[string]string.MissingKey",
+		},
+		{
+			"MissingKeyOnMapPtrToNil",
+			"{{.MissingKey}}",
+			&map[string]string{},
+			"<nil>",
+		},
+	}
+	for _, tc := range tests {
+		t.Run(tc.name, func(t *testing.T) {
+			tmpl := Must(New("tmpl").Parse(tc.src))
+			err := tmpl.Execute(ioutil.Discard, tc.value)
+			got := "<nil>"
+			if err != nil {
+				got = err.Error()
+			}
+			if !strings.HasSuffix(got, tc.want) {
+				t.Fatalf("got error %q, want %q", got, tc.want)
+			}
+		})
+	}
+}
+
+func TestMaxExecDepth(t *testing.T) {
+	if testing.Short() {
+		t.Skip("skipping in -short mode")
+	}
+	tmpl := Must(New("tmpl").Parse(`{{template "tmpl" .}}`))
+	err := tmpl.Execute(ioutil.Discard, nil)
+	got := "<nil>"
+	if err != nil {
+		got = err.Error()
+	}
+	const want = "exceeded maximum template depth"
+	if !strings.Contains(got, want) {
+		t.Errorf("got error %q; want %q", got, want)
+	}
+}
+
+func TestAddrOfIndex(t *testing.T) {
+	// golang.org/issue/14916.
+	// Before index worked on reflect.Values, the .String could not be
+	// found on the (incorrectly unaddressable) V value,
+	// in contrast to range, which worked fine.
+	// Also testing that passing a reflect.Value to tmpl.Execute works.
+	texts := []string{
+		`{{range .}}{{.String}}{{end}}`,
+		`{{with index . 0}}{{.String}}{{end}}`,
+	}
+	for _, text := range texts {
+		tmpl := Must(New("tmpl").Parse(text))
+		var buf bytes.Buffer
+		err := tmpl.Execute(&buf, reflect.ValueOf([]V{{1}}))
+		if err != nil {
+			t.Fatalf("%s: Execute: %v", text, err)
+		}
+		if buf.String() != "<1>" {
+			t.Fatalf("%s: template output = %q, want %q", text, &buf, "<1>")
+		}
+	}
+}
+
+func TestInterfaceValues(t *testing.T) {
+	// golang.org/issue/17714.
+	// Before index worked on reflect.Values, interface values
+	// were always implicitly promoted to the underlying value,
+	// except that nil interfaces were promoted to the zero reflect.Value.
+	// Eliminating a round trip to interface{} and back to reflect.Value
+	// eliminated this promotion, breaking these cases.
+	tests := []struct {
+		text string
+		out  string
+	}{
+		{`{{index .Nil 1}}`, "ERROR: index of untyped nil"},
+		{`{{index .Slice 2}}`, "2"},
+		{`{{index .Slice .Two}}`, "2"},
+		{`{{call .Nil 1}}`, "ERROR: call of nil"},
+		{`{{call .PlusOne 1}}`, "2"},
+		{`{{call .PlusOne .One}}`, "2"},
+		{`{{and (index .Slice 0) true}}`, "0"},
+		{`{{and .Zero true}}`, "0"},
+		{`{{and (index .Slice 1) false}}`, "false"},
+		{`{{and .One false}}`, "false"},
+		{`{{or (index .Slice 0) false}}`, "false"},
+		{`{{or .Zero false}}`, "false"},
+		{`{{or (index .Slice 1) true}}`, "1"},
+		{`{{or .One true}}`, "1"},
+		{`{{not (index .Slice 0)}}`, "true"},
+		{`{{not .Zero}}`, "true"},
+		{`{{not (index .Slice 1)}}`, "false"},
+		{`{{not .One}}`, "false"},
+		{`{{eq (index .Slice 0) .Zero}}`, "true"},
+		{`{{eq (index .Slice 1) .One}}`, "true"},
+		{`{{ne (index .Slice 0) .Zero}}`, "false"},
+		{`{{ne (index .Slice 1) .One}}`, "false"},
+		{`{{ge (index .Slice 0) .One}}`, "false"},
+		{`{{ge (index .Slice 1) .Zero}}`, "true"},
+		{`{{gt (index .Slice 0) .One}}`, "false"},
+		{`{{gt (index .Slice 1) .Zero}}`, "true"},
+		{`{{le (index .Slice 0) .One}}`, "true"},
+		{`{{le (index .Slice 1) .Zero}}`, "false"},
+		{`{{lt (index .Slice 0) .One}}`, "true"},
+		{`{{lt (index .Slice 1) .Zero}}`, "false"},
+	}
+
+	for _, tt := range tests {
+		tmpl := Must(New("tmpl").Parse(tt.text))
+		var buf bytes.Buffer
+		err := tmpl.Execute(&buf, map[string]interface{}{
+			"PlusOne": func(n int) int {
+				return n + 1
+			},
+			"Slice": []int{0, 1, 2, 3},
+			"One":   1,
+			"Two":   2,
+			"Nil":   nil,
+			"Zero":  0,
+		})
+		if strings.HasPrefix(tt.out, "ERROR:") {
+			e := strings.TrimSpace(strings.TrimPrefix(tt.out, "ERROR:"))
+			if err == nil || !strings.Contains(err.Error(), e) {
+				t.Errorf("%s: Execute: %v, want error %q", tt.text, err, e)
+			}
+			continue
+		}
+		if err != nil {
+			t.Errorf("%s: Execute: %v", tt.text, err)
+			continue
+		}
+		if buf.String() != tt.out {
+			t.Errorf("%s: template output = %q, want %q", tt.text, &buf, tt.out)
+		}
+	}
+}
+
+// Check that panics during calls are recovered and returned as errors.
+func TestExecutePanicDuringCall(t *testing.T) {
+	funcs := map[string]interface{}{
+		"doPanic": func() string {
+			panic("custom panic string")
+		},
+	}
+	tests := []struct {
+		name    string
+		input   string
+		data    interface{}
+		wantErr string
+	}{
+		{
+			"direct func call panics",
+			"{{doPanic}}", (*T)(nil),
+			`template: t:1:2: executing "t" at <doPanic>: error calling doPanic: custom panic string`,
+		},
+		{
+			"indirect func call panics",
+			"{{call doPanic}}", (*T)(nil),
+			`template: t:1:7: executing "t" at <doPanic>: error calling doPanic: custom panic string`,
+		},
+		{
+			"direct method call panics",
+			"{{.GetU}}", (*T)(nil),
+			`template: t:1:2: executing "t" at <.GetU>: error calling GetU: runtime error: invalid memory address or nil pointer dereference`,
+		},
+		{
+			"indirect method call panics",
+			"{{call .GetU}}", (*T)(nil),
+			`template: t:1:7: executing "t" at <.GetU>: error calling GetU: runtime error: invalid memory address or nil pointer dereference`,
+		},
+		{
+			"func field call panics",
+			"{{call .PanicFunc}}", tVal,
+			`template: t:1:2: executing "t" at <call .PanicFunc>: error calling call: test panic`,
+		},
+		{
+			"method call on nil interface",
+			"{{.NonEmptyInterfaceNil.Method0}}", tVal,
+			`template: t:1:23: executing "t" at <.NonEmptyInterfaceNil.Method0>: nil pointer evaluating template.I.Method0`,
+		},
+	}
+	for _, tc := range tests {
+		b := new(bytes.Buffer)
+		tmpl, err := New("t").Funcs(funcs).Parse(tc.input)
+		if err != nil {
+			t.Fatalf("parse error: %s", err)
+		}
+		err = tmpl.Execute(b, tc.data)
+		if err == nil {
+			t.Errorf("%s: expected error; got none", tc.name)
+		} else if !strings.Contains(err.Error(), tc.wantErr) {
+			if *debug {
+				fmt.Printf("%s: test execute error: %s\n", tc.name, err)
+			}
+			t.Errorf("%s: expected error:\n%s\ngot:\n%s", tc.name, tc.wantErr, err)
+		}
+	}
+}
+
+// Issue 31810. Check that a parenthesized first argument behaves properly.
+func TestIssue31810(t *testing.T) {
+	// A simple value with no arguments is fine.
+	var b bytes.Buffer
+	const text = "{{ (.)  }}"
+	tmpl, err := New("").Parse(text)
+	if err != nil {
+		t.Error(err)
+	}
+	err = tmpl.Execute(&b, "result")
+	if err != nil {
+		t.Error(err)
+	}
+	if b.String() != "result" {
+		t.Errorf("%s got %q, expected %q", text, b.String(), "result")
+	}
+
+	// Even a plain function fails - need to use call.
+	f := func() string { return "result" }
+	b.Reset()
+	err = tmpl.Execute(&b, f)
+	if err == nil {
+		t.Error("expected error with no call, got none")
+	}
+
+	// Works if the function is explicitly called.
+	const textCall = "{{ (call .)  }}"
+	tmpl, err = New("").Parse(textCall)
+	b.Reset()
+	err = tmpl.Execute(&b, f)
+	if err != nil {
+		t.Error(err)
+	}
+	if b.String() != "result" {
+		t.Errorf("%s got %q, expected %q", textCall, b.String(), "result")
+	}
+}
+
+// Issue 43065, range over send only channel
+func TestIssue43065(t *testing.T) {
+	var b bytes.Buffer
+	tmp := Must(New("").Parse(`{{range .}}{{end}}`))
+	ch := make(chan<- int)
+	err := tmp.Execute(&b, ch)
+	if err == nil {
+		t.Error("expected err got nil")
+	} else if !strings.Contains(err.Error(), "range over send-only channel") {
+		t.Errorf("%s", err)
+	}
+}
+
+// Issue 39807: data race in html/template & text/template
+func TestIssue39807(t *testing.T) {
+	var wg sync.WaitGroup
+
+	tplFoo, err := New("foo").Parse(`{{ template "bar" . }}`)
+	if err != nil {
+		t.Error(err)
+	}
+
+	tplBar, err := New("bar").Parse("bar")
+	if err != nil {
+		t.Error(err)
+	}
+
+	gofuncs := 10
+	numTemplates := 10
+
+	for i := 1; i <= gofuncs; i++ {
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			for j := 0; j < numTemplates; j++ {
+				_, err := tplFoo.AddParseTree(tplBar.Name(), tplBar.Tree)
+				if err != nil {
+					t.Error(err)
+				}
+				err = tplFoo.Execute(ioutil.Discard, nil)
+				if err != nil {
+					t.Error(err)
+				}
+			}
+		}()
+	}
+
+	wg.Wait()
+}
diff --git a/internal/backport/text/template/funcs.go b/internal/backport/text/template/funcs.go
new file mode 100644
index 0000000..11e2e90
--- /dev/null
+++ b/internal/backport/text/template/funcs.go
@@ -0,0 +1,753 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"net/url"
+	"reflect"
+	"strings"
+	"sync"
+	"unicode"
+	"unicode/utf8"
+)
+
+// FuncMap is the type of the map defining the mapping from names to functions.
+// Each function must have either a single return value, or two return values of
+// which the second has type error. In that case, if the second (error)
+// return value evaluates to non-nil during execution, execution terminates and
+// Execute returns that error.
+//
+// Errors returned by Execute wrap the underlying error; call errors.As to
+// uncover them.
+//
+// When template execution invokes a function with an argument list, that list
+// must be assignable to the function's parameter types. Functions meant to
+// apply to arguments of arbitrary type can use parameters of type interface{} or
+// of type reflect.Value. Similarly, functions meant to return a result of arbitrary
+// type can return interface{} or reflect.Value.
+type FuncMap map[string]interface{}
+
+// builtins returns the FuncMap.
+// It is not a global variable so the linker can dead code eliminate
+// more when this isn't called. See golang.org/issue/36021.
+// TODO: revert this back to a global map once golang.org/issue/2559 is fixed.
+func builtins() FuncMap {
+	return FuncMap{
+		"and":      and,
+		"call":     call,
+		"html":     HTMLEscaper,
+		"index":    index,
+		"slice":    slice,
+		"js":       JSEscaper,
+		"len":      length,
+		"not":      not,
+		"or":       or,
+		"print":    fmt.Sprint,
+		"printf":   fmt.Sprintf,
+		"println":  fmt.Sprintln,
+		"urlquery": URLQueryEscaper,
+
+		// Comparisons
+		"eq": eq, // ==
+		"ge": ge, // >=
+		"gt": gt, // >
+		"le": le, // <=
+		"lt": lt, // <
+		"ne": ne, // !=
+	}
+}
+
+var builtinFuncsOnce struct {
+	sync.Once
+	v map[string]reflect.Value
+}
+
+// builtinFuncsOnce lazily computes & caches the builtinFuncs map.
+// TODO: revert this back to a global map once golang.org/issue/2559 is fixed.
+func builtinFuncs() map[string]reflect.Value {
+	builtinFuncsOnce.Do(func() {
+		builtinFuncsOnce.v = createValueFuncs(builtins())
+	})
+	return builtinFuncsOnce.v
+}
+
+// createValueFuncs turns a FuncMap into a map[string]reflect.Value
+func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
+	m := make(map[string]reflect.Value)
+	addValueFuncs(m, funcMap)
+	return m
+}
+
+// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
+func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
+	for name, fn := range in {
+		if !goodName(name) {
+			panic(fmt.Errorf("function name %q is not a valid identifier", name))
+		}
+		v := reflect.ValueOf(fn)
+		if v.Kind() != reflect.Func {
+			panic("value for " + name + " not a function")
+		}
+		if !goodFunc(v.Type()) {
+			panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut()))
+		}
+		out[name] = v
+	}
+}
+
+// addFuncs adds to values the functions in funcs. It does no checking of the input -
+// call addValueFuncs first.
+func addFuncs(out, in FuncMap) {
+	for name, fn := range in {
+		out[name] = fn
+	}
+}
+
+// goodFunc reports whether the function or method has the right result signature.
+func goodFunc(typ reflect.Type) bool {
+	// We allow functions with 1 result or 2 results where the second is an error.
+	switch {
+	case typ.NumOut() == 1:
+		return true
+	case typ.NumOut() == 2 && typ.Out(1) == errorType:
+		return true
+	}
+	return false
+}
+
+// goodName reports whether the function name is a valid identifier.
+func goodName(name string) bool {
+	if name == "" {
+		return false
+	}
+	for i, r := range name {
+		switch {
+		case r == '_':
+		case i == 0 && !unicode.IsLetter(r):
+			return false
+		case !unicode.IsLetter(r) && !unicode.IsDigit(r):
+			return false
+		}
+	}
+	return true
+}
+
+// findFunction looks for a function in the template, and global map.
+func findFunction(name string, tmpl *Template) (v reflect.Value, isBuiltin, ok bool) {
+	if tmpl != nil && tmpl.common != nil {
+		tmpl.muFuncs.RLock()
+		defer tmpl.muFuncs.RUnlock()
+		if fn := tmpl.execFuncs[name]; fn.IsValid() {
+			return fn, false, true
+		}
+	}
+	if fn := builtinFuncs()[name]; fn.IsValid() {
+		return fn, true, true
+	}
+	return reflect.Value{}, false, false
+}
+
+// prepareArg checks if value can be used as an argument of type argType, and
+// converts an invalid value to appropriate zero if possible.
+func prepareArg(value reflect.Value, argType reflect.Type) (reflect.Value, error) {
+	if !value.IsValid() {
+		if !canBeNil(argType) {
+			return reflect.Value{}, fmt.Errorf("value is nil; should be of type %s", argType)
+		}
+		value = reflect.Zero(argType)
+	}
+	if value.Type().AssignableTo(argType) {
+		return value, nil
+	}
+	if intLike(value.Kind()) && intLike(argType.Kind()) && value.Type().ConvertibleTo(argType) {
+		value = value.Convert(argType)
+		return value, nil
+	}
+	return reflect.Value{}, fmt.Errorf("value has type %s; should be %s", value.Type(), argType)
+}
+
+func intLike(typ reflect.Kind) bool {
+	switch typ {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return true
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return true
+	}
+	return false
+}
+
+// indexArg checks if a reflect.Value can be used as an index, and converts it to int if possible.
+func indexArg(index reflect.Value, cap int) (int, error) {
+	var x int64
+	switch index.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		x = index.Int()
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		x = int64(index.Uint())
+	case reflect.Invalid:
+		return 0, fmt.Errorf("cannot index slice/array with nil")
+	default:
+		return 0, fmt.Errorf("cannot index slice/array with type %s", index.Type())
+	}
+	if x < 0 || int(x) < 0 || int(x) > cap {
+		return 0, fmt.Errorf("index out of range: %d", x)
+	}
+	return int(x), nil
+}
+
+// Indexing.
+
+// index returns the result of indexing its first argument by the following
+// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
+// indexed item must be a map, slice, or array.
+func index(item reflect.Value, indexes ...reflect.Value) (reflect.Value, error) {
+	item = indirectInterface(item)
+	if !item.IsValid() {
+		return reflect.Value{}, fmt.Errorf("index of untyped nil")
+	}
+	for _, index := range indexes {
+		index = indirectInterface(index)
+		var isNil bool
+		if item, isNil = indirect(item); isNil {
+			return reflect.Value{}, fmt.Errorf("index of nil pointer")
+		}
+		switch item.Kind() {
+		case reflect.Array, reflect.Slice, reflect.String:
+			x, err := indexArg(index, item.Len())
+			if err != nil {
+				return reflect.Value{}, err
+			}
+			item = item.Index(x)
+		case reflect.Map:
+			index, err := prepareArg(index, item.Type().Key())
+			if err != nil {
+				return reflect.Value{}, err
+			}
+			if x := item.MapIndex(index); x.IsValid() {
+				item = x
+			} else {
+				item = reflect.Zero(item.Type().Elem())
+			}
+		case reflect.Invalid:
+			// the loop holds invariant: item.IsValid()
+			panic("unreachable")
+		default:
+			return reflect.Value{}, fmt.Errorf("can't index item of type %s", item.Type())
+		}
+	}
+	return item, nil
+}
+
+// Slicing.
+
+// slice returns the result of slicing its first argument by the remaining
+// arguments. Thus "slice x 1 2" is, in Go syntax, x[1:2], while "slice x"
+// is x[:], "slice x 1" is x[1:], and "slice x 1 2 3" is x[1:2:3]. The first
+// argument must be a string, slice, or array.
+func slice(item reflect.Value, indexes ...reflect.Value) (reflect.Value, error) {
+	item = indirectInterface(item)
+	if !item.IsValid() {
+		return reflect.Value{}, fmt.Errorf("slice of untyped nil")
+	}
+	if len(indexes) > 3 {
+		return reflect.Value{}, fmt.Errorf("too many slice indexes: %d", len(indexes))
+	}
+	var cap int
+	switch item.Kind() {
+	case reflect.String:
+		if len(indexes) == 3 {
+			return reflect.Value{}, fmt.Errorf("cannot 3-index slice a string")
+		}
+		cap = item.Len()
+	case reflect.Array, reflect.Slice:
+		cap = item.Cap()
+	default:
+		return reflect.Value{}, fmt.Errorf("can't slice item of type %s", item.Type())
+	}
+	// set default values for cases item[:], item[i:].
+	idx := [3]int{0, item.Len()}
+	for i, index := range indexes {
+		x, err := indexArg(index, cap)
+		if err != nil {
+			return reflect.Value{}, err
+		}
+		idx[i] = x
+	}
+	// given item[i:j], make sure i <= j.
+	if idx[0] > idx[1] {
+		return reflect.Value{}, fmt.Errorf("invalid slice index: %d > %d", idx[0], idx[1])
+	}
+	if len(indexes) < 3 {
+		return item.Slice(idx[0], idx[1]), nil
+	}
+	// given item[i:j:k], make sure i <= j <= k.
+	if idx[1] > idx[2] {
+		return reflect.Value{}, fmt.Errorf("invalid slice index: %d > %d", idx[1], idx[2])
+	}
+	return item.Slice3(idx[0], idx[1], idx[2]), nil
+}
+
+// Length
+
+// length returns the length of the item, with an error if it has no defined length.
+func length(item reflect.Value) (int, error) {
+	item, isNil := indirect(item)
+	if isNil {
+		return 0, fmt.Errorf("len of nil pointer")
+	}
+	switch item.Kind() {
+	case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
+		return item.Len(), nil
+	}
+	return 0, fmt.Errorf("len of type %s", item.Type())
+}
+
+// Function invocation
+
+// call returns the result of evaluating the first argument as a function.
+// The function must return 1 result, or 2 results, the second of which is an error.
+func call(fn reflect.Value, args ...reflect.Value) (reflect.Value, error) {
+	fn = indirectInterface(fn)
+	if !fn.IsValid() {
+		return reflect.Value{}, fmt.Errorf("call of nil")
+	}
+	typ := fn.Type()
+	if typ.Kind() != reflect.Func {
+		return reflect.Value{}, fmt.Errorf("non-function of type %s", typ)
+	}
+	if !goodFunc(typ) {
+		return reflect.Value{}, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut())
+	}
+	numIn := typ.NumIn()
+	var dddType reflect.Type
+	if typ.IsVariadic() {
+		if len(args) < numIn-1 {
+			return reflect.Value{}, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1)
+		}
+		dddType = typ.In(numIn - 1).Elem()
+	} else {
+		if len(args) != numIn {
+			return reflect.Value{}, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn)
+		}
+	}
+	argv := make([]reflect.Value, len(args))
+	for i, arg := range args {
+		arg = indirectInterface(arg)
+		// Compute the expected type. Clumsy because of variadics.
+		argType := dddType
+		if !typ.IsVariadic() || i < numIn-1 {
+			argType = typ.In(i)
+		}
+
+		var err error
+		if argv[i], err = prepareArg(arg, argType); err != nil {
+			return reflect.Value{}, fmt.Errorf("arg %d: %w", i, err)
+		}
+	}
+	return safeCall(fn, argv)
+}
+
+// safeCall runs fun.Call(args), and returns the resulting value and error, if
+// any. If the call panics, the panic value is returned as an error.
+func safeCall(fun reflect.Value, args []reflect.Value) (val reflect.Value, err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else {
+				err = fmt.Errorf("%v", r)
+			}
+		}
+	}()
+	ret := fun.Call(args)
+	if len(ret) == 2 && !ret[1].IsNil() {
+		return ret[0], ret[1].Interface().(error)
+	}
+	return ret[0], nil
+}
+
+// Boolean logic.
+
+func truth(arg reflect.Value) bool {
+	t, _ := isTrue(indirectInterface(arg))
+	return t
+}
+
+// and computes the Boolean AND of its arguments, returning
+// the first false argument it encounters, or the last argument.
+func and(arg0 reflect.Value, args ...reflect.Value) reflect.Value {
+	panic("unreachable") // implemented as a special case in evalCall
+}
+
+// or computes the Boolean OR of its arguments, returning
+// the first true argument it encounters, or the last argument.
+func or(arg0 reflect.Value, args ...reflect.Value) reflect.Value {
+	panic("unreachable") // implemented as a special case in evalCall
+}
+
+// not returns the Boolean negation of its argument.
+func not(arg reflect.Value) bool {
+	return !truth(arg)
+}
+
+// Comparison.
+
+// TODO: Perhaps allow comparison between signed and unsigned integers.
+
+var (
+	errBadComparisonType = errors.New("invalid type for comparison")
+	errBadComparison     = errors.New("incompatible types for comparison")
+	errNoComparison      = errors.New("missing argument for comparison")
+)
+
+type kind int
+
+const (
+	invalidKind kind = iota
+	boolKind
+	complexKind
+	intKind
+	floatKind
+	stringKind
+	uintKind
+)
+
+func basicKind(v reflect.Value) (kind, error) {
+	switch v.Kind() {
+	case reflect.Bool:
+		return boolKind, nil
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return intKind, nil
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return uintKind, nil
+	case reflect.Float32, reflect.Float64:
+		return floatKind, nil
+	case reflect.Complex64, reflect.Complex128:
+		return complexKind, nil
+	case reflect.String:
+		return stringKind, nil
+	}
+	return invalidKind, errBadComparisonType
+}
+
+// eq evaluates the comparison a == b || a == c || ...
+func eq(arg1 reflect.Value, arg2 ...reflect.Value) (bool, error) {
+	arg1 = indirectInterface(arg1)
+	if arg1 != zero {
+		if t1 := arg1.Type(); !t1.Comparable() {
+			return false, fmt.Errorf("uncomparable type %s: %v", t1, arg1)
+		}
+	}
+	if len(arg2) == 0 {
+		return false, errNoComparison
+	}
+	k1, _ := basicKind(arg1)
+	for _, arg := range arg2 {
+		arg = indirectInterface(arg)
+		k2, _ := basicKind(arg)
+		truth := false
+		if k1 != k2 {
+			// Special case: Can compare integer values regardless of type's sign.
+			switch {
+			case k1 == intKind && k2 == uintKind:
+				truth = arg1.Int() >= 0 && uint64(arg1.Int()) == arg.Uint()
+			case k1 == uintKind && k2 == intKind:
+				truth = arg.Int() >= 0 && arg1.Uint() == uint64(arg.Int())
+			default:
+				if arg1 != zero && arg != zero {
+					return false, errBadComparison
+				}
+			}
+		} else {
+			switch k1 {
+			case boolKind:
+				truth = arg1.Bool() == arg.Bool()
+			case complexKind:
+				truth = arg1.Complex() == arg.Complex()
+			case floatKind:
+				truth = arg1.Float() == arg.Float()
+			case intKind:
+				truth = arg1.Int() == arg.Int()
+			case stringKind:
+				truth = arg1.String() == arg.String()
+			case uintKind:
+				truth = arg1.Uint() == arg.Uint()
+			default:
+				if arg == zero || arg1 == zero {
+					truth = arg1 == arg
+				} else {
+					if t2 := arg.Type(); !t2.Comparable() {
+						return false, fmt.Errorf("uncomparable type %s: %v", t2, arg)
+					}
+					truth = arg1.Interface() == arg.Interface()
+				}
+			}
+		}
+		if truth {
+			return true, nil
+		}
+	}
+	return false, nil
+}
+
+// ne evaluates the comparison a != b.
+func ne(arg1, arg2 reflect.Value) (bool, error) {
+	// != is the inverse of ==.
+	equal, err := eq(arg1, arg2)
+	return !equal, err
+}
+
+// lt evaluates the comparison a < b.
+func lt(arg1, arg2 reflect.Value) (bool, error) {
+	arg1 = indirectInterface(arg1)
+	k1, err := basicKind(arg1)
+	if err != nil {
+		return false, err
+	}
+	arg2 = indirectInterface(arg2)
+	k2, err := basicKind(arg2)
+	if err != nil {
+		return false, err
+	}
+	truth := false
+	if k1 != k2 {
+		// Special case: Can compare integer values regardless of type's sign.
+		switch {
+		case k1 == intKind && k2 == uintKind:
+			truth = arg1.Int() < 0 || uint64(arg1.Int()) < arg2.Uint()
+		case k1 == uintKind && k2 == intKind:
+			truth = arg2.Int() >= 0 && arg1.Uint() < uint64(arg2.Int())
+		default:
+			return false, errBadComparison
+		}
+	} else {
+		switch k1 {
+		case boolKind, complexKind:
+			return false, errBadComparisonType
+		case floatKind:
+			truth = arg1.Float() < arg2.Float()
+		case intKind:
+			truth = arg1.Int() < arg2.Int()
+		case stringKind:
+			truth = arg1.String() < arg2.String()
+		case uintKind:
+			truth = arg1.Uint() < arg2.Uint()
+		default:
+			panic("invalid kind")
+		}
+	}
+	return truth, nil
+}
+
+// le evaluates the comparison <= b.
+func le(arg1, arg2 reflect.Value) (bool, error) {
+	// <= is < or ==.
+	lessThan, err := lt(arg1, arg2)
+	if lessThan || err != nil {
+		return lessThan, err
+	}
+	return eq(arg1, arg2)
+}
+
+// gt evaluates the comparison a > b.
+func gt(arg1, arg2 reflect.Value) (bool, error) {
+	// > is the inverse of <=.
+	lessOrEqual, err := le(arg1, arg2)
+	if err != nil {
+		return false, err
+	}
+	return !lessOrEqual, nil
+}
+
+// ge evaluates the comparison a >= b.
+func ge(arg1, arg2 reflect.Value) (bool, error) {
+	// >= is the inverse of <.
+	lessThan, err := lt(arg1, arg2)
+	if err != nil {
+		return false, err
+	}
+	return !lessThan, nil
+}
+
+// HTML escaping.
+
+var (
+	htmlQuot = []byte("&#34;") // shorter than "&quot;"
+	htmlApos = []byte("&#39;") // shorter than "&apos;" and apos was not in HTML until HTML5
+	htmlAmp  = []byte("&amp;")
+	htmlLt   = []byte("&lt;")
+	htmlGt   = []byte("&gt;")
+	htmlNull = []byte("\uFFFD")
+)
+
+// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
+func HTMLEscape(w io.Writer, b []byte) {
+	last := 0
+	for i, c := range b {
+		var html []byte
+		switch c {
+		case '\000':
+			html = htmlNull
+		case '"':
+			html = htmlQuot
+		case '\'':
+			html = htmlApos
+		case '&':
+			html = htmlAmp
+		case '<':
+			html = htmlLt
+		case '>':
+			html = htmlGt
+		default:
+			continue
+		}
+		w.Write(b[last:i])
+		w.Write(html)
+		last = i + 1
+	}
+	w.Write(b[last:])
+}
+
+// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
+func HTMLEscapeString(s string) string {
+	// Avoid allocation if we can.
+	if !strings.ContainsAny(s, "'\"&<>\000") {
+		return s
+	}
+	var b bytes.Buffer
+	HTMLEscape(&b, []byte(s))
+	return b.String()
+}
+
+// HTMLEscaper returns the escaped HTML equivalent of the textual
+// representation of its arguments.
+func HTMLEscaper(args ...interface{}) string {
+	return HTMLEscapeString(evalArgs(args))
+}
+
+// JavaScript escaping.
+
+var (
+	jsLowUni = []byte(`\u00`)
+	hex      = []byte("0123456789ABCDEF")
+
+	jsBackslash = []byte(`\\`)
+	jsApos      = []byte(`\'`)
+	jsQuot      = []byte(`\"`)
+	jsLt        = []byte(`\u003C`)
+	jsGt        = []byte(`\u003E`)
+	jsAmp       = []byte(`\u0026`)
+	jsEq        = []byte(`\u003D`)
+)
+
+// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
+func JSEscape(w io.Writer, b []byte) {
+	last := 0
+	for i := 0; i < len(b); i++ {
+		c := b[i]
+
+		if !jsIsSpecial(rune(c)) {
+			// fast path: nothing to do
+			continue
+		}
+		w.Write(b[last:i])
+
+		if c < utf8.RuneSelf {
+			// Quotes, slashes and angle brackets get quoted.
+			// Control characters get written as \u00XX.
+			switch c {
+			case '\\':
+				w.Write(jsBackslash)
+			case '\'':
+				w.Write(jsApos)
+			case '"':
+				w.Write(jsQuot)
+			case '<':
+				w.Write(jsLt)
+			case '>':
+				w.Write(jsGt)
+			case '&':
+				w.Write(jsAmp)
+			case '=':
+				w.Write(jsEq)
+			default:
+				w.Write(jsLowUni)
+				t, b := c>>4, c&0x0f
+				w.Write(hex[t : t+1])
+				w.Write(hex[b : b+1])
+			}
+		} else {
+			// Unicode rune.
+			r, size := utf8.DecodeRune(b[i:])
+			if unicode.IsPrint(r) {
+				w.Write(b[i : i+size])
+			} else {
+				fmt.Fprintf(w, "\\u%04X", r)
+			}
+			i += size - 1
+		}
+		last = i + 1
+	}
+	w.Write(b[last:])
+}
+
+// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
+func JSEscapeString(s string) string {
+	// Avoid allocation if we can.
+	if strings.IndexFunc(s, jsIsSpecial) < 0 {
+		return s
+	}
+	var b bytes.Buffer
+	JSEscape(&b, []byte(s))
+	return b.String()
+}
+
+func jsIsSpecial(r rune) bool {
+	switch r {
+	case '\\', '\'', '"', '<', '>', '&', '=':
+		return true
+	}
+	return r < ' ' || utf8.RuneSelf <= r
+}
+
+// JSEscaper returns the escaped JavaScript equivalent of the textual
+// representation of its arguments.
+func JSEscaper(args ...interface{}) string {
+	return JSEscapeString(evalArgs(args))
+}
+
+// URLQueryEscaper returns the escaped value of the textual representation of
+// its arguments in a form suitable for embedding in a URL query.
+func URLQueryEscaper(args ...interface{}) string {
+	return url.QueryEscape(evalArgs(args))
+}
+
+// evalArgs formats the list of arguments into a string. It is therefore equivalent to
+//	fmt.Sprint(args...)
+// except that each argument is indirected (if a pointer), as required,
+// using the same rules as the default string evaluation during template
+// execution.
+func evalArgs(args []interface{}) string {
+	ok := false
+	var s string
+	// Fast path for simple common case.
+	if len(args) == 1 {
+		s, ok = args[0].(string)
+	}
+	if !ok {
+		for i, arg := range args {
+			a, ok := printableValue(reflect.ValueOf(arg))
+			if ok {
+				args[i] = a
+			} // else let fmt do its thing
+		}
+		s = fmt.Sprint(args...)
+	}
+	return s
+}
diff --git a/internal/backport/text/template/funcs.go.save b/internal/backport/text/template/funcs.go.save
new file mode 100644
index 0000000..c3f6d47
--- /dev/null
+++ b/internal/backport/text/template/funcs.go.save
@@ -0,0 +1,771 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"net/url"
+	"reflect"
+	"strings"
+	"sync"
+	"unicode"
+	"unicode/utf8"
+)
+
+// FuncMap is the type of the map defining the mapping from names to functions.
+// Each function must have either a single return value, or two return values of
+// which the second has type error. In that case, if the second (error)
+// return value evaluates to non-nil during execution, execution terminates and
+// Execute returns that error.
+//
+// Errors returned by Execute wrap the underlying error; call errors.As to
+// uncover them.
+//
+// When template execution invokes a function with an argument list, that list
+// must be assignable to the function's parameter types. Functions meant to
+// apply to arguments of arbitrary type can use parameters of type interface{} or
+// of type reflect.Value. Similarly, functions meant to return a result of arbitrary
+// type can return interface{} or reflect.Value.
+type FuncMap map[string]interface{}
+
+// builtins returns the FuncMap.
+// It is not a global variable so the linker can dead code eliminate
+// more when this isn't called. See golang.org/issue/36021.
+// TODO: revert this back to a global map once golang.org/issue/2559 is fixed.
+func builtins() FuncMap {
+	return FuncMap{
+		"and":      and,
+		"call":     call,
+		"html":     HTMLEscaper,
+		"index":    index,
+		"slice":    slice,
+		"js":       JSEscaper,
+		"len":      length,
+		"not":      not,
+		"or":       or,
+		"print":    fmt.Sprint,
+		"printf":   fmt.Sprintf,
+		"println":  fmt.Sprintln,
+		"urlquery": URLQueryEscaper,
+
+		// Comparisons
+		"eq": eq, // ==
+		"ge": ge, // >=
+		"gt": gt, // >
+		"le": le, // <=
+		"lt": lt, // <
+		"ne": ne, // !=
+	}
+}
+
+var builtinFuncsOnce struct {
+	sync.Once
+	v map[string]reflect.Value
+}
+
+// builtinFuncsOnce lazily computes & caches the builtinFuncs map.
+// TODO: revert this back to a global map once golang.org/issue/2559 is fixed.
+func builtinFuncs() map[string]reflect.Value {
+	builtinFuncsOnce.Do(func() {
+		builtinFuncsOnce.v = createValueFuncs(builtins())
+	})
+	return builtinFuncsOnce.v
+}
+
+// createValueFuncs turns a FuncMap into a map[string]reflect.Value
+func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
+	m := make(map[string]reflect.Value)
+	addValueFuncs(m, funcMap)
+	return m
+}
+
+// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
+func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
+	for name, fn := range in {
+		if !goodName(name) {
+			panic(fmt.Errorf("function name %q is not a valid identifier", name))
+		}
+		v := reflect.ValueOf(fn)
+		if v.Kind() != reflect.Func {
+			panic("value for " + name + " not a function")
+		}
+		if !goodFunc(v.Type()) {
+			panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut()))
+		}
+		out[name] = v
+	}
+}
+
+// addFuncs adds to values the functions in funcs. It does no checking of the input -
+// call addValueFuncs first.
+func addFuncs(out, in FuncMap) {
+	for name, fn := range in {
+		out[name] = fn
+	}
+}
+
+// goodFunc reports whether the function or method has the right result signature.
+func goodFunc(typ reflect.Type) bool {
+	// We allow functions with 1 result or 2 results where the second is an error.
+	switch {
+	case typ.NumOut() == 1:
+		return true
+	case typ.NumOut() == 2 && typ.Out(1) == errorType:
+		return true
+	}
+	return false
+}
+
+// goodName reports whether the function name is a valid identifier.
+func goodName(name string) bool {
+	if name == "" {
+		return false
+	}
+	for i, r := range name {
+		switch {
+		case r == '_':
+		case i == 0 && !unicode.IsLetter(r):
+			return false
+		case !unicode.IsLetter(r) && !unicode.IsDigit(r):
+			return false
+		}
+	}
+	return true
+}
+
+// findFunction looks for a function in the template, and global map.
+func findFunction(name string, tmpl *Template) (reflect.Value, bool) {
+	if tmpl != nil && tmpl.common != nil {
+		tmpl.muFuncs.RLock()
+		defer tmpl.muFuncs.RUnlock()
+		if fn := tmpl.execFuncs[name]; fn.IsValid() {
+			return fn, true
+		}
+	}
+	if fn := builtinFuncs()[name]; fn.IsValid() {
+		return fn, true
+	}
+	return reflect.Value{}, false
+}
+
+// prepareArg checks if value can be used as an argument of type argType, and
+// converts an invalid value to appropriate zero if possible.
+func prepareArg(value reflect.Value, argType reflect.Type) (reflect.Value, error) {
+	if !value.IsValid() {
+		if !canBeNil(argType) {
+			return reflect.Value{}, fmt.Errorf("value is nil; should be of type %s", argType)
+		}
+		value = reflect.Zero(argType)
+	}
+	if value.Type().AssignableTo(argType) {
+		return value, nil
+	}
+	if intLike(value.Kind()) && intLike(argType.Kind()) && value.Type().ConvertibleTo(argType) {
+		value = value.Convert(argType)
+		return value, nil
+	}
+	return reflect.Value{}, fmt.Errorf("value has type %s; should be %s", value.Type(), argType)
+}
+
+func intLike(typ reflect.Kind) bool {
+	switch typ {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return true
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return true
+	}
+	return false
+}
+
+// indexArg checks if a reflect.Value can be used as an index, and converts it to int if possible.
+func indexArg(index reflect.Value, cap int) (int, error) {
+	var x int64
+	switch index.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		x = index.Int()
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		x = int64(index.Uint())
+	case reflect.Invalid:
+		return 0, fmt.Errorf("cannot index slice/array with nil")
+	default:
+		return 0, fmt.Errorf("cannot index slice/array with type %s", index.Type())
+	}
+	if x < 0 || int(x) < 0 || int(x) > cap {
+		return 0, fmt.Errorf("index out of range: %d", x)
+	}
+	return int(x), nil
+}
+
+// Indexing.
+
+// index returns the result of indexing its first argument by the following
+// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
+// indexed item must be a map, slice, or array.
+func index(item reflect.Value, indexes ...reflect.Value) (reflect.Value, error) {
+	item = indirectInterface(item)
+	if !item.IsValid() {
+		return reflect.Value{}, fmt.Errorf("index of untyped nil")
+	}
+	for _, index := range indexes {
+		index = indirectInterface(index)
+		var isNil bool
+		if item, isNil = indirect(item); isNil {
+			return reflect.Value{}, fmt.Errorf("index of nil pointer")
+		}
+		switch item.Kind() {
+		case reflect.Array, reflect.Slice, reflect.String:
+			x, err := indexArg(index, item.Len())
+			if err != nil {
+				return reflect.Value{}, err
+			}
+			item = item.Index(x)
+		case reflect.Map:
+			index, err := prepareArg(index, item.Type().Key())
+			if err != nil {
+				return reflect.Value{}, err
+			}
+			if x := item.MapIndex(index); x.IsValid() {
+				item = x
+			} else {
+				item = reflect.Zero(item.Type().Elem())
+			}
+		case reflect.Invalid:
+			// the loop holds invariant: item.IsValid()
+			panic("unreachable")
+		default:
+			return reflect.Value{}, fmt.Errorf("can't index item of type %s", item.Type())
+		}
+	}
+	return item, nil
+}
+
+// Slicing.
+
+// slice returns the result of slicing its first argument by the remaining
+// arguments. Thus "slice x 1 2" is, in Go syntax, x[1:2], while "slice x"
+// is x[:], "slice x 1" is x[1:], and "slice x 1 2 3" is x[1:2:3]. The first
+// argument must be a string, slice, or array.
+func slice(item reflect.Value, indexes ...reflect.Value) (reflect.Value, error) {
+	item = indirectInterface(item)
+	if !item.IsValid() {
+		return reflect.Value{}, fmt.Errorf("slice of untyped nil")
+	}
+	if len(indexes) > 3 {
+		return reflect.Value{}, fmt.Errorf("too many slice indexes: %d", len(indexes))
+	}
+	var cap int
+	switch item.Kind() {
+	case reflect.String:
+		if len(indexes) == 3 {
+			return reflect.Value{}, fmt.Errorf("cannot 3-index slice a string")
+		}
+		cap = item.Len()
+	case reflect.Array, reflect.Slice:
+		cap = item.Cap()
+	default:
+		return reflect.Value{}, fmt.Errorf("can't slice item of type %s", item.Type())
+	}
+	// set default values for cases item[:], item[i:].
+	idx := [3]int{0, item.Len()}
+	for i, index := range indexes {
+		x, err := indexArg(index, cap)
+		if err != nil {
+			return reflect.Value{}, err
+		}
+		idx[i] = x
+	}
+	// given item[i:j], make sure i <= j.
+	if idx[0] > idx[1] {
+		return reflect.Value{}, fmt.Errorf("invalid slice index: %d > %d", idx[0], idx[1])
+	}
+	if len(indexes) < 3 {
+		return item.Slice(idx[0], idx[1]), nil
+	}
+	// given item[i:j:k], make sure i <= j <= k.
+	if idx[1] > idx[2] {
+		return reflect.Value{}, fmt.Errorf("invalid slice index: %d > %d", idx[1], idx[2])
+	}
+	return item.Slice3(idx[0], idx[1], idx[2]), nil
+}
+
+// Length
+
+// length returns the length of the item, with an error if it has no defined length.
+func length(item reflect.Value) (int, error) {
+	item, isNil := indirect(item)
+	if isNil {
+		return 0, fmt.Errorf("len of nil pointer")
+	}
+	switch item.Kind() {
+	case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
+		return item.Len(), nil
+	}
+	return 0, fmt.Errorf("len of type %s", item.Type())
+}
+
+// Function invocation
+
+// call returns the result of evaluating the first argument as a function.
+// The function must return 1 result, or 2 results, the second of which is an error.
+func call(fn reflect.Value, args ...reflect.Value) (reflect.Value, error) {
+	fn = indirectInterface(fn)
+	if !fn.IsValid() {
+		return reflect.Value{}, fmt.Errorf("call of nil")
+	}
+	typ := fn.Type()
+	if typ.Kind() != reflect.Func {
+		return reflect.Value{}, fmt.Errorf("non-function of type %s", typ)
+	}
+	if !goodFunc(typ) {
+		return reflect.Value{}, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut())
+	}
+	numIn := typ.NumIn()
+	var dddType reflect.Type
+	if typ.IsVariadic() {
+		if len(args) < numIn-1 {
+			return reflect.Value{}, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1)
+		}
+		dddType = typ.In(numIn - 1).Elem()
+	} else {
+		if len(args) != numIn {
+			return reflect.Value{}, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn)
+		}
+	}
+	argv := make([]reflect.Value, len(args))
+	for i, arg := range args {
+		arg = indirectInterface(arg)
+		// Compute the expected type. Clumsy because of variadics.
+		argType := dddType
+		if !typ.IsVariadic() || i < numIn-1 {
+			argType = typ.In(i)
+		}
+
+		var err error
+		if argv[i], err = prepareArg(arg, argType); err != nil {
+			return reflect.Value{}, fmt.Errorf("arg %d: %w", i, err)
+		}
+	}
+	return safeCall(fn, argv)
+}
+
+// safeCall runs fun.Call(args), and returns the resulting value and error, if
+// any. If the call panics, the panic value is returned as an error.
+func safeCall(fun reflect.Value, args []reflect.Value) (val reflect.Value, err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else {
+				err = fmt.Errorf("%v", r)
+			}
+		}
+	}()
+	ret := fun.Call(args)
+	if len(ret) == 2 && !ret[1].IsNil() {
+		return ret[0], ret[1].Interface().(error)
+	}
+	return ret[0], nil
+}
+
+// Boolean logic.
+
+func truth(arg reflect.Value) bool {
+	t, _ := isTrue(indirectInterface(arg))
+	return t
+}
+
+// and computes the Boolean AND of its arguments, returning
+// the first false argument it encounters, or the last argument.
+func and(arg0 reflect.Value, args ...reflect.Value) reflect.Value {
+	if !truth(arg0) {
+		return arg0
+	}
+	for i := range args {
+		arg0 = args[i]
+		if !truth(arg0) {
+			break
+		}
+	}
+	return arg0
+}
+
+// or computes the Boolean OR of its arguments, returning
+// the first true argument it encounters, or the last argument.
+func or(arg0 reflect.Value, args ...reflect.Value) reflect.Value {
+	if truth(arg0) {
+		return arg0
+	}
+	for i := range args {
+		arg0 = args[i]
+		if truth(arg0) {
+			break
+		}
+	}
+	return arg0
+}
+
+// not returns the Boolean negation of its argument.
+func not(arg reflect.Value) bool {
+	return !truth(arg)
+}
+
+// Comparison.
+
+// TODO: Perhaps allow comparison between signed and unsigned integers.
+
+var (
+	errBadComparisonType = errors.New("invalid type for comparison")
+	errBadComparison     = errors.New("incompatible types for comparison")
+	errNoComparison      = errors.New("missing argument for comparison")
+)
+
+type kind int
+
+const (
+	invalidKind kind = iota
+	boolKind
+	complexKind
+	intKind
+	floatKind
+	stringKind
+	uintKind
+)
+
+func basicKind(v reflect.Value) (kind, error) {
+	switch v.Kind() {
+	case reflect.Bool:
+		return boolKind, nil
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return intKind, nil
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return uintKind, nil
+	case reflect.Float32, reflect.Float64:
+		return floatKind, nil
+	case reflect.Complex64, reflect.Complex128:
+		return complexKind, nil
+	case reflect.String:
+		return stringKind, nil
+	}
+	return invalidKind, errBadComparisonType
+}
+
+// eq evaluates the comparison a == b || a == c || ...
+func eq(arg1 reflect.Value, arg2 ...reflect.Value) (bool, error) {
+	arg1 = indirectInterface(arg1)
+	if arg1 != zero {
+		if t1 := arg1.Type(); !t1.Comparable() {
+			return false, fmt.Errorf("uncomparable type %s: %v", t1, arg1)
+		}
+	}
+	if len(arg2) == 0 {
+		return false, errNoComparison
+	}
+	k1, _ := basicKind(arg1)
+	for _, arg := range arg2 {
+		arg = indirectInterface(arg)
+		k2, _ := basicKind(arg)
+		truth := false
+		if k1 != k2 {
+			// Special case: Can compare integer values regardless of type's sign.
+			switch {
+			case k1 == intKind && k2 == uintKind:
+				truth = arg1.Int() >= 0 && uint64(arg1.Int()) == arg.Uint()
+			case k1 == uintKind && k2 == intKind:
+				truth = arg.Int() >= 0 && arg1.Uint() == uint64(arg.Int())
+			default:
+				if arg1 != zero && arg != zero {
+					return false, errBadComparison
+				}
+			}
+		} else {
+			switch k1 {
+			case boolKind:
+				truth = arg1.Bool() == arg.Bool()
+			case complexKind:
+				truth = arg1.Complex() == arg.Complex()
+			case floatKind:
+				truth = arg1.Float() == arg.Float()
+			case intKind:
+				truth = arg1.Int() == arg.Int()
+			case stringKind:
+				truth = arg1.String() == arg.String()
+			case uintKind:
+				truth = arg1.Uint() == arg.Uint()
+			default:
+				if arg == zero {
+					truth = arg1 == arg
+				} else {
+					if t2 := arg.Type(); !t2.Comparable() {
+						return false, fmt.Errorf("uncomparable type %s: %v", t2, arg)
+					}
+					truth = arg1.Interface() == arg.Interface()
+				}
+			}
+		}
+		if truth {
+			return true, nil
+		}
+	}
+	return false, nil
+}
+
+// ne evaluates the comparison a != b.
+func ne(arg1, arg2 reflect.Value) (bool, error) {
+	// != is the inverse of ==.
+	equal, err := eq(arg1, arg2)
+	return !equal, err
+}
+
+// lt evaluates the comparison a < b.
+func lt(arg1, arg2 reflect.Value) (bool, error) {
+	arg1 = indirectInterface(arg1)
+	k1, err := basicKind(arg1)
+	if err != nil {
+		return false, err
+	}
+	arg2 = indirectInterface(arg2)
+	k2, err := basicKind(arg2)
+	if err != nil {
+		return false, err
+	}
+	truth := false
+	if k1 != k2 {
+		// Special case: Can compare integer values regardless of type's sign.
+		switch {
+		case k1 == intKind && k2 == uintKind:
+			truth = arg1.Int() < 0 || uint64(arg1.Int()) < arg2.Uint()
+		case k1 == uintKind && k2 == intKind:
+			truth = arg2.Int() >= 0 && arg1.Uint() < uint64(arg2.Int())
+		default:
+			return false, errBadComparison
+		}
+	} else {
+		switch k1 {
+		case boolKind, complexKind:
+			return false, errBadComparisonType
+		case floatKind:
+			truth = arg1.Float() < arg2.Float()
+		case intKind:
+			truth = arg1.Int() < arg2.Int()
+		case stringKind:
+			truth = arg1.String() < arg2.String()
+		case uintKind:
+			truth = arg1.Uint() < arg2.Uint()
+		default:
+			panic("invalid kind")
+		}
+	}
+	return truth, nil
+}
+
+// le evaluates the comparison <= b.
+func le(arg1, arg2 reflect.Value) (bool, error) {
+	// <= is < or ==.
+	lessThan, err := lt(arg1, arg2)
+	if lessThan || err != nil {
+		return lessThan, err
+	}
+	return eq(arg1, arg2)
+}
+
+// gt evaluates the comparison a > b.
+func gt(arg1, arg2 reflect.Value) (bool, error) {
+	// > is the inverse of <=.
+	lessOrEqual, err := le(arg1, arg2)
+	if err != nil {
+		return false, err
+	}
+	return !lessOrEqual, nil
+}
+
+// ge evaluates the comparison a >= b.
+func ge(arg1, arg2 reflect.Value) (bool, error) {
+	// >= is the inverse of <.
+	lessThan, err := lt(arg1, arg2)
+	if err != nil {
+		return false, err
+	}
+	return !lessThan, nil
+}
+
+// HTML escaping.
+
+var (
+	htmlQuot = []byte("&#34;") // shorter than "&quot;"
+	htmlApos = []byte("&#39;") // shorter than "&apos;" and apos was not in HTML until HTML5
+	htmlAmp  = []byte("&amp;")
+	htmlLt   = []byte("&lt;")
+	htmlGt   = []byte("&gt;")
+	htmlNull = []byte("\uFFFD")
+)
+
+// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
+func HTMLEscape(w io.Writer, b []byte) {
+	last := 0
+	for i, c := range b {
+		var html []byte
+		switch c {
+		case '\000':
+			html = htmlNull
+		case '"':
+			html = htmlQuot
+		case '\'':
+			html = htmlApos
+		case '&':
+			html = htmlAmp
+		case '<':
+			html = htmlLt
+		case '>':
+			html = htmlGt
+		default:
+			continue
+		}
+		w.Write(b[last:i])
+		w.Write(html)
+		last = i + 1
+	}
+	w.Write(b[last:])
+}
+
+// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
+func HTMLEscapeString(s string) string {
+	// Avoid allocation if we can.
+	if !strings.ContainsAny(s, "'\"&<>\000") {
+		return s
+	}
+	var b bytes.Buffer
+	HTMLEscape(&b, []byte(s))
+	return b.String()
+}
+
+// HTMLEscaper returns the escaped HTML equivalent of the textual
+// representation of its arguments.
+func HTMLEscaper(args ...interface{}) string {
+	return HTMLEscapeString(evalArgs(args))
+}
+
+// JavaScript escaping.
+
+var (
+	jsLowUni = []byte(`\u00`)
+	hex      = []byte("0123456789ABCDEF")
+
+	jsBackslash = []byte(`\\`)
+	jsApos      = []byte(`\'`)
+	jsQuot      = []byte(`\"`)
+	jsLt        = []byte(`\u003C`)
+	jsGt        = []byte(`\u003E`)
+	jsAmp       = []byte(`\u0026`)
+	jsEq        = []byte(`\u003D`)
+)
+
+// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
+func JSEscape(w io.Writer, b []byte) {
+	last := 0
+	for i := 0; i < len(b); i++ {
+		c := b[i]
+
+		if !jsIsSpecial(rune(c)) {
+			// fast path: nothing to do
+			continue
+		}
+		w.Write(b[last:i])
+
+		if c < utf8.RuneSelf {
+			// Quotes, slashes and angle brackets get quoted.
+			// Control characters get written as \u00XX.
+			switch c {
+			case '\\':
+				w.Write(jsBackslash)
+			case '\'':
+				w.Write(jsApos)
+			case '"':
+				w.Write(jsQuot)
+			case '<':
+				w.Write(jsLt)
+			case '>':
+				w.Write(jsGt)
+			case '&':
+				w.Write(jsAmp)
+			case '=':
+				w.Write(jsEq)
+			default:
+				w.Write(jsLowUni)
+				t, b := c>>4, c&0x0f
+				w.Write(hex[t : t+1])
+				w.Write(hex[b : b+1])
+			}
+		} else {
+			// Unicode rune.
+			r, size := utf8.DecodeRune(b[i:])
+			if unicode.IsPrint(r) {
+				w.Write(b[i : i+size])
+			} else {
+				fmt.Fprintf(w, "\\u%04X", r)
+			}
+			i += size - 1
+		}
+		last = i + 1
+	}
+	w.Write(b[last:])
+}
+
+// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
+func JSEscapeString(s string) string {
+	// Avoid allocation if we can.
+	if strings.IndexFunc(s, jsIsSpecial) < 0 {
+		return s
+	}
+	var b bytes.Buffer
+	JSEscape(&b, []byte(s))
+	return b.String()
+}
+
+func jsIsSpecial(r rune) bool {
+	switch r {
+	case '\\', '\'', '"', '<', '>', '&', '=':
+		return true
+	}
+	return r < ' ' || utf8.RuneSelf <= r
+}
+
+// JSEscaper returns the escaped JavaScript equivalent of the textual
+// representation of its arguments.
+func JSEscaper(args ...interface{}) string {
+	return JSEscapeString(evalArgs(args))
+}
+
+// URLQueryEscaper returns the escaped value of the textual representation of
+// its arguments in a form suitable for embedding in a URL query.
+func URLQueryEscaper(args ...interface{}) string {
+	return url.QueryEscape(evalArgs(args))
+}
+
+// evalArgs formats the list of arguments into a string. It is therefore equivalent to
+//	fmt.Sprint(args...)
+// except that each argument is indirected (if a pointer), as required,
+// using the same rules as the default string evaluation during template
+// execution.
+func evalArgs(args []interface{}) string {
+	ok := false
+	var s string
+	// Fast path for simple common case.
+	if len(args) == 1 {
+		s, ok = args[0].(string)
+	}
+	if !ok {
+		for i, arg := range args {
+			a, ok := printableValue(reflect.ValueOf(arg))
+			if ok {
+				args[i] = a
+			} // else let fmt do its thing
+		}
+		s = fmt.Sprint(args...)
+	}
+	return s
+}
diff --git a/internal/backport/text/template/helper.go b/internal/backport/text/template/helper.go
new file mode 100644
index 0000000..d8e84ab
--- /dev/null
+++ b/internal/backport/text/template/helper.go
@@ -0,0 +1,179 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Helper functions to make constructing templates easier.
+
+package template
+
+import (
+	"fmt"
+	"io/ioutil"
+	"path/filepath"
+
+	"golang.org/x/website/internal/backport/path"
+
+	"golang.org/x/website/internal/backport/io/fs"
+)
+
+// Functions and methods to parse templates.
+
+// Must is a helper that wraps a call to a function returning (*Template, error)
+// and panics if the error is non-nil. It is intended for use in variable
+// initializations such as
+//	var t = template.Must(template.New("name").Parse("text"))
+func Must(t *Template, err error) *Template {
+	if err != nil {
+		panic(err)
+	}
+	return t
+}
+
+// ParseFiles creates a new Template and parses the template definitions from
+// the named files. The returned template's name will have the base name and
+// parsed contents of the first file. There must be at least one file.
+// If an error occurs, parsing stops and the returned *Template is nil.
+//
+// When parsing multiple files with the same name in different directories,
+// the last one mentioned will be the one that results.
+// For instance, ParseFiles("a/foo", "b/foo") stores "b/foo" as the template
+// named "foo", while "a/foo" is unavailable.
+func ParseFiles(filenames ...string) (*Template, error) {
+	return parseFiles(nil, readFileOS, filenames...)
+}
+
+// ParseFiles parses the named files and associates the resulting templates with
+// t. If an error occurs, parsing stops and the returned template is nil;
+// otherwise it is t. There must be at least one file.
+// Since the templates created by ParseFiles are named by the base
+// names of the argument files, t should usually have the name of one
+// of the (base) names of the files. If it does not, depending on t's
+// contents before calling ParseFiles, t.Execute may fail. In that
+// case use t.ExecuteTemplate to execute a valid template.
+//
+// When parsing multiple files with the same name in different directories,
+// the last one mentioned will be the one that results.
+func (t *Template) ParseFiles(filenames ...string) (*Template, error) {
+	t.init()
+	return parseFiles(t, readFileOS, filenames...)
+}
+
+// parseFiles is the helper for the method and function. If the argument
+// template is nil, it is created from the first file.
+func parseFiles(t *Template, readFile func(string) (string, []byte, error), filenames ...string) (*Template, error) {
+	if len(filenames) == 0 {
+		// Not really a problem, but be consistent.
+		return nil, fmt.Errorf("template: no files named in call to ParseFiles")
+	}
+	for _, filename := range filenames {
+		name, b, err := readFile(filename)
+		if err != nil {
+			return nil, err
+		}
+		s := string(b)
+		// First template becomes return value if not already defined,
+		// and we use that one for subsequent New calls to associate
+		// all the templates together. Also, if this file has the same name
+		// as t, this file becomes the contents of t, so
+		//  t, err := New(name).Funcs(xxx).ParseFiles(name)
+		// works. Otherwise we create a new template associated with t.
+		var tmpl *Template
+		if t == nil {
+			t = New(name)
+		}
+		if name == t.Name() {
+			tmpl = t
+		} else {
+			tmpl = t.New(name)
+		}
+		_, err = tmpl.Parse(s)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return t, nil
+}
+
+// ParseGlob creates a new Template and parses the template definitions from
+// the files identified by the pattern. The files are matched according to the
+// semantics of filepath.Match, and the pattern must match at least one file.
+// The returned template will have the (base) name and (parsed) contents of the
+// first file matched by the pattern. ParseGlob is equivalent to calling
+// ParseFiles with the list of files matched by the pattern.
+//
+// When parsing multiple files with the same name in different directories,
+// the last one mentioned will be the one that results.
+func ParseGlob(pattern string) (*Template, error) {
+	return parseGlob(nil, pattern)
+}
+
+// ParseGlob parses the template definitions in the files identified by the
+// pattern and associates the resulting templates with t. The files are matched
+// according to the semantics of filepath.Match, and the pattern must match at
+// least one file. ParseGlob is equivalent to calling t.ParseFiles with the
+// list of files matched by the pattern.
+//
+// When parsing multiple files with the same name in different directories,
+// the last one mentioned will be the one that results.
+func (t *Template) ParseGlob(pattern string) (*Template, error) {
+	t.init()
+	return parseGlob(t, pattern)
+}
+
+// parseGlob is the implementation of the function and method ParseGlob.
+func parseGlob(t *Template, pattern string) (*Template, error) {
+	filenames, err := filepath.Glob(pattern)
+	if err != nil {
+		return nil, err
+	}
+	if len(filenames) == 0 {
+		return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
+	}
+	return parseFiles(t, readFileOS, filenames...)
+}
+
+// ParseFS is like ParseFiles or ParseGlob but reads from the file system fsys
+// instead of the host operating system's file system.
+// It accepts a list of glob patterns.
+// (Note that most file names serve as glob patterns matching only themselves.)
+func ParseFS(fsys fs.FS, patterns ...string) (*Template, error) {
+	return parseFS(nil, fsys, patterns)
+}
+
+// ParseFS is like ParseFiles or ParseGlob but reads from the file system fsys
+// instead of the host operating system's file system.
+// It accepts a list of glob patterns.
+// (Note that most file names serve as glob patterns matching only themselves.)
+func (t *Template) ParseFS(fsys fs.FS, patterns ...string) (*Template, error) {
+	t.init()
+	return parseFS(t, fsys, patterns)
+}
+
+func parseFS(t *Template, fsys fs.FS, patterns []string) (*Template, error) {
+	var filenames []string
+	for _, pattern := range patterns {
+		list, err := fs.Glob(fsys, pattern)
+		if err != nil {
+			return nil, err
+		}
+		if len(list) == 0 {
+			return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
+		}
+		filenames = append(filenames, list...)
+	}
+	return parseFiles(t, readFileFS(fsys), filenames...)
+}
+
+func readFileOS(file string) (name string, b []byte, err error) {
+	name = filepath.Base(file)
+	b, err = ioutil.ReadFile(file)
+	return
+}
+
+func readFileFS(fsys fs.FS) func(string) (string, []byte, error) {
+	return func(file string) (name string, b []byte, err error) {
+		name = path.Base(file)
+		b, err = fs.ReadFile(fsys, file)
+		return
+	}
+}
diff --git a/internal/backport/text/template/multi_test.go b/internal/backport/text/template/multi_test.go
new file mode 100644
index 0000000..457cac4
--- /dev/null
+++ b/internal/backport/text/template/multi_test.go
@@ -0,0 +1,455 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+// Tests for multiple-template parsing and execution.
+
+import (
+	"bytes"
+	"fmt"
+	"testing"
+
+	"golang.org/x/website/internal/backport/osfs"
+	"golang.org/x/website/internal/backport/text/template/parse"
+)
+
+const (
+	noError  = true
+	hasError = false
+)
+
+type multiParseTest struct {
+	name    string
+	input   string
+	ok      bool
+	names   []string
+	results []string
+}
+
+var multiParseTests = []multiParseTest{
+	{"empty", "", noError,
+		nil,
+		nil},
+	{"one", `{{define "foo"}} FOO {{end}}`, noError,
+		[]string{"foo"},
+		[]string{" FOO "}},
+	{"two", `{{define "foo"}} FOO {{end}}{{define "bar"}} BAR {{end}}`, noError,
+		[]string{"foo", "bar"},
+		[]string{" FOO ", " BAR "}},
+	// errors
+	{"missing end", `{{define "foo"}} FOO `, hasError,
+		nil,
+		nil},
+	{"malformed name", `{{define "foo}} FOO `, hasError,
+		nil,
+		nil},
+}
+
+func TestMultiParse(t *testing.T) {
+	for _, test := range multiParseTests {
+		template, err := New("root").Parse(test.input)
+		switch {
+		case err == nil && !test.ok:
+			t.Errorf("%q: expected error; got none", test.name)
+			continue
+		case err != nil && test.ok:
+			t.Errorf("%q: unexpected error: %v", test.name, err)
+			continue
+		case err != nil && !test.ok:
+			// expected error, got one
+			if *debug {
+				fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err)
+			}
+			continue
+		}
+		if template == nil {
+			continue
+		}
+		if len(template.tmpl) != len(test.names)+1 { // +1 for root
+			t.Errorf("%s: wrong number of templates; wanted %d got %d", test.name, len(test.names), len(template.tmpl))
+			continue
+		}
+		for i, name := range test.names {
+			tmpl, ok := template.tmpl[name]
+			if !ok {
+				t.Errorf("%s: can't find template %q", test.name, name)
+				continue
+			}
+			result := tmpl.Root.String()
+			if result != test.results[i] {
+				t.Errorf("%s=(%q): got\n\t%v\nexpected\n\t%v", test.name, test.input, result, test.results[i])
+			}
+		}
+	}
+}
+
+var multiExecTests = []execTest{
+	{"empty", "", "", nil, true},
+	{"text", "some text", "some text", nil, true},
+	{"invoke x", `{{template "x" .SI}}`, "TEXT", tVal, true},
+	{"invoke x no args", `{{template "x"}}`, "TEXT", tVal, true},
+	{"invoke dot int", `{{template "dot" .I}}`, "17", tVal, true},
+	{"invoke dot []int", `{{template "dot" .SI}}`, "[3 4 5]", tVal, true},
+	{"invoke dotV", `{{template "dotV" .U}}`, "v", tVal, true},
+	{"invoke nested int", `{{template "nested" .I}}`, "17", tVal, true},
+	{"variable declared by template", `{{template "nested" $x:=.SI}},{{index $x 1}}`, "[3 4 5],4", tVal, true},
+
+	// User-defined function: test argument evaluator.
+	{"testFunc literal", `{{oneArg "joe"}}`, "oneArg=joe", tVal, true},
+	{"testFunc .", `{{oneArg .}}`, "oneArg=joe", "joe", true},
+}
+
+// These strings are also in testdata/*.
+const multiText1 = `
+	{{define "x"}}TEXT{{end}}
+	{{define "dotV"}}{{.V}}{{end}}
+`
+
+const multiText2 = `
+	{{define "dot"}}{{.}}{{end}}
+	{{define "nested"}}{{template "dot" .}}{{end}}
+`
+
+func TestMultiExecute(t *testing.T) {
+	// Declare a couple of templates first.
+	template, err := New("root").Parse(multiText1)
+	if err != nil {
+		t.Fatalf("parse error for 1: %s", err)
+	}
+	_, err = template.Parse(multiText2)
+	if err != nil {
+		t.Fatalf("parse error for 2: %s", err)
+	}
+	testExecute(multiExecTests, template, t)
+}
+
+func TestParseFiles(t *testing.T) {
+	_, err := ParseFiles("DOES NOT EXIST")
+	if err == nil {
+		t.Error("expected error for non-existent file; got none")
+	}
+	template := New("root")
+	_, err = template.ParseFiles("testdata/file1.tmpl", "testdata/file2.tmpl")
+	if err != nil {
+		t.Fatalf("error parsing files: %v", err)
+	}
+	testExecute(multiExecTests, template, t)
+}
+
+func TestParseGlob(t *testing.T) {
+	_, err := ParseGlob("DOES NOT EXIST")
+	if err == nil {
+		t.Error("expected error for non-existent file; got none")
+	}
+	_, err = New("error").ParseGlob("[x")
+	if err == nil {
+		t.Error("expected error for bad pattern; got none")
+	}
+	template := New("root")
+	_, err = template.ParseGlob("testdata/file*.tmpl")
+	if err != nil {
+		t.Fatalf("error parsing files: %v", err)
+	}
+	testExecute(multiExecTests, template, t)
+}
+
+func TestParseFS(t *testing.T) {
+	fs := osfs.DirFS("testdata")
+
+	{
+		_, err := ParseFS(fs, "DOES NOT EXIST")
+		if err == nil {
+			t.Error("expected error for non-existent file; got none")
+		}
+	}
+
+	{
+		template := New("root")
+		_, err := template.ParseFS(fs, "file1.tmpl", "file2.tmpl")
+		if err != nil {
+			t.Fatalf("error parsing files: %v", err)
+		}
+		testExecute(multiExecTests, template, t)
+	}
+
+	{
+		template := New("root")
+		_, err := template.ParseFS(fs, "file*.tmpl")
+		if err != nil {
+			t.Fatalf("error parsing files: %v", err)
+		}
+		testExecute(multiExecTests, template, t)
+	}
+}
+
+// In these tests, actual content (not just template definitions) comes from the parsed files.
+
+var templateFileExecTests = []execTest{
+	{"test", `{{template "tmpl1.tmpl"}}{{template "tmpl2.tmpl"}}`, "template1\n\ny\ntemplate2\n\nx\n", 0, true},
+}
+
+func TestParseFilesWithData(t *testing.T) {
+	template, err := New("root").ParseFiles("testdata/tmpl1.tmpl", "testdata/tmpl2.tmpl")
+	if err != nil {
+		t.Fatalf("error parsing files: %v", err)
+	}
+	testExecute(templateFileExecTests, template, t)
+}
+
+func TestParseGlobWithData(t *testing.T) {
+	template, err := New("root").ParseGlob("testdata/tmpl*.tmpl")
+	if err != nil {
+		t.Fatalf("error parsing files: %v", err)
+	}
+	testExecute(templateFileExecTests, template, t)
+}
+
+const (
+	cloneText1 = `{{define "a"}}{{template "b"}}{{template "c"}}{{end}}`
+	cloneText2 = `{{define "b"}}b{{end}}`
+	cloneText3 = `{{define "c"}}root{{end}}`
+	cloneText4 = `{{define "c"}}clone{{end}}`
+)
+
+func TestClone(t *testing.T) {
+	// Create some templates and clone the root.
+	root, err := New("root").Parse(cloneText1)
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, err = root.Parse(cloneText2)
+	if err != nil {
+		t.Fatal(err)
+	}
+	clone := Must(root.Clone())
+	// Add variants to both.
+	_, err = root.Parse(cloneText3)
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, err = clone.Parse(cloneText4)
+	if err != nil {
+		t.Fatal(err)
+	}
+	// Verify that the clone is self-consistent.
+	for k, v := range clone.tmpl {
+		if k == clone.name && v.tmpl[k] != clone {
+			t.Error("clone does not contain root")
+		}
+		if v != v.tmpl[v.name] {
+			t.Errorf("clone does not contain self for %q", k)
+		}
+	}
+	// Execute root.
+	var b bytes.Buffer
+	err = root.ExecuteTemplate(&b, "a", 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if b.String() != "broot" {
+		t.Errorf("expected %q got %q", "broot", b.String())
+	}
+	// Execute copy.
+	b.Reset()
+	err = clone.ExecuteTemplate(&b, "a", 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if b.String() != "bclone" {
+		t.Errorf("expected %q got %q", "bclone", b.String())
+	}
+}
+
+func TestAddParseTree(t *testing.T) {
+	// Create some templates.
+	root, err := New("root").Parse(cloneText1)
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, err = root.Parse(cloneText2)
+	if err != nil {
+		t.Fatal(err)
+	}
+	// Add a new parse tree.
+	tree, err := parse.Parse("cloneText3", cloneText3, "", "", nil, builtins())
+	if err != nil {
+		t.Fatal(err)
+	}
+	added, err := root.AddParseTree("c", tree["c"])
+	if err != nil {
+		t.Fatal(err)
+	}
+	// Execute.
+	var b bytes.Buffer
+	err = added.ExecuteTemplate(&b, "a", 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if b.String() != "broot" {
+		t.Errorf("expected %q got %q", "broot", b.String())
+	}
+}
+
+// Issue 7032
+func TestAddParseTreeToUnparsedTemplate(t *testing.T) {
+	master := "{{define \"master\"}}{{end}}"
+	tmpl := New("master")
+	tree, err := parse.Parse("master", master, "", "", nil)
+	if err != nil {
+		t.Fatalf("unexpected parse err: %v", err)
+	}
+	masterTree := tree["master"]
+	tmpl.AddParseTree("master", masterTree) // used to panic
+}
+
+func TestRedefinition(t *testing.T) {
+	var tmpl *Template
+	var err error
+	if tmpl, err = New("tmpl1").Parse(`{{define "test"}}foo{{end}}`); err != nil {
+		t.Fatalf("parse 1: %v", err)
+	}
+	if _, err = tmpl.Parse(`{{define "test"}}bar{{end}}`); err != nil {
+		t.Fatalf("got error %v, expected nil", err)
+	}
+	if _, err = tmpl.New("tmpl2").Parse(`{{define "test"}}bar{{end}}`); err != nil {
+		t.Fatalf("got error %v, expected nil", err)
+	}
+}
+
+// Issue 10879
+func TestEmptyTemplateCloneCrash(t *testing.T) {
+	t1 := New("base")
+	t1.Clone() // used to panic
+}
+
+// Issue 10910, 10926
+func TestTemplateLookUp(t *testing.T) {
+	t1 := New("foo")
+	if t1.Lookup("foo") != nil {
+		t.Error("Lookup returned non-nil value for undefined template foo")
+	}
+	t1.New("bar")
+	if t1.Lookup("bar") != nil {
+		t.Error("Lookup returned non-nil value for undefined template bar")
+	}
+	t1.Parse(`{{define "foo"}}test{{end}}`)
+	if t1.Lookup("foo") == nil {
+		t.Error("Lookup returned nil value for defined template")
+	}
+}
+
+func TestNew(t *testing.T) {
+	// template with same name already exists
+	t1, _ := New("test").Parse(`{{define "test"}}foo{{end}}`)
+	t2 := t1.New("test")
+
+	if t1.common != t2.common {
+		t.Errorf("t1 & t2 didn't share common struct; got %v != %v", t1.common, t2.common)
+	}
+	if t1.Tree == nil {
+		t.Error("defined template got nil Tree")
+	}
+	if t2.Tree != nil {
+		t.Error("undefined template got non-nil Tree")
+	}
+
+	containsT1 := false
+	for _, tmpl := range t1.Templates() {
+		if tmpl == t2 {
+			t.Error("Templates included undefined template")
+		}
+		if tmpl == t1 {
+			containsT1 = true
+		}
+	}
+	if !containsT1 {
+		t.Error("Templates didn't include defined template")
+	}
+}
+
+func TestParse(t *testing.T) {
+	// In multiple calls to Parse with the same receiver template, only one call
+	// can contain text other than space, comments, and template definitions
+	t1 := New("test")
+	if _, err := t1.Parse(`{{define "test"}}{{end}}`); err != nil {
+		t.Fatalf("parsing test: %s", err)
+	}
+	if _, err := t1.Parse(`{{define "test"}}{{/* this is a comment */}}{{end}}`); err != nil {
+		t.Fatalf("parsing test: %s", err)
+	}
+	if _, err := t1.Parse(`{{define "test"}}foo{{end}}`); err != nil {
+		t.Fatalf("parsing test: %s", err)
+	}
+}
+
+func TestEmptyTemplate(t *testing.T) {
+	cases := []struct {
+		defn []string
+		in   string
+		want string
+	}{
+		{[]string{"x", "y"}, "", "y"},
+		{[]string{""}, "once", ""},
+		{[]string{"", ""}, "twice", ""},
+		{[]string{"{{.}}", "{{.}}"}, "twice", "twice"},
+		{[]string{"{{/* a comment */}}", "{{/* a comment */}}"}, "comment", ""},
+		{[]string{"{{.}}", ""}, "twice", ""},
+	}
+
+	for i, c := range cases {
+		root := New("root")
+
+		var (
+			m   *Template
+			err error
+		)
+		for _, d := range c.defn {
+			m, err = root.New(c.in).Parse(d)
+			if err != nil {
+				t.Fatal(err)
+			}
+		}
+		buf := &bytes.Buffer{}
+		if err := m.Execute(buf, c.in); err != nil {
+			t.Error(i, err)
+			continue
+		}
+		if buf.String() != c.want {
+			t.Errorf("expected string %q: got %q", c.want, buf.String())
+		}
+	}
+}
+
+// Issue 19249 was a regression in 1.8 caused by the handling of empty
+// templates added in that release, which got different answers depending
+// on the order templates appeared in the internal map.
+func TestIssue19294(t *testing.T) {
+	// The empty block in "xhtml" should be replaced during execution
+	// by the contents of "stylesheet", but if the internal map associating
+	// names with templates is built in the wrong order, the empty block
+	// looks non-empty and this doesn't happen.
+	var inlined = map[string]string{
+		"stylesheet": `{{define "stylesheet"}}stylesheet{{end}}`,
+		"xhtml":      `{{block "stylesheet" .}}{{end}}`,
+	}
+	all := []string{"stylesheet", "xhtml"}
+	for i := 0; i < 100; i++ {
+		res, err := New("title.xhtml").Parse(`{{template "xhtml" .}}`)
+		if err != nil {
+			t.Fatal(err)
+		}
+		for _, name := range all {
+			_, err := res.New(name).Parse(inlined[name])
+			if err != nil {
+				t.Fatal(err)
+			}
+		}
+		var buf bytes.Buffer
+		res.Execute(&buf, 0)
+		if buf.String() != "stylesheet" {
+			t.Fatalf("iteration %d: got %q; expected %q", i, buf.String(), "stylesheet")
+		}
+	}
+}
diff --git a/internal/backport/text/template/option.go b/internal/backport/text/template/option.go
new file mode 100644
index 0000000..addce2d
--- /dev/null
+++ b/internal/backport/text/template/option.go
@@ -0,0 +1,74 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains the code to handle template options.
+
+package template
+
+import "strings"
+
+// missingKeyAction defines how to respond to indexing a map with a key that is not present.
+type missingKeyAction int
+
+const (
+	mapInvalid   missingKeyAction = iota // Return an invalid reflect.Value.
+	mapZeroValue                         // Return the zero value for the map element.
+	mapError                             // Error out
+)
+
+type option struct {
+	missingKey missingKeyAction
+}
+
+// Option sets options for the template. Options are described by
+// strings, either a simple string or "key=value". There can be at
+// most one equals sign in an option string. If the option string
+// is unrecognized or otherwise invalid, Option panics.
+//
+// Known options:
+//
+// missingkey: Control the behavior during execution if a map is
+// indexed with a key that is not present in the map.
+//	"missingkey=default" or "missingkey=invalid"
+//		The default behavior: Do nothing and continue execution.
+//		If printed, the result of the index operation is the string
+//		"<no value>".
+//	"missingkey=zero"
+//		The operation returns the zero value for the map type's element.
+//	"missingkey=error"
+//		Execution stops immediately with an error.
+//
+func (t *Template) Option(opt ...string) *Template {
+	t.init()
+	for _, s := range opt {
+		t.setOption(s)
+	}
+	return t
+}
+
+func (t *Template) setOption(opt string) {
+	if opt == "" {
+		panic("empty option string")
+	}
+	elems := strings.Split(opt, "=")
+	switch len(elems) {
+	case 2:
+		// key=value
+		switch elems[0] {
+		case "missingkey":
+			switch elems[1] {
+			case "invalid", "default":
+				t.option.missingKey = mapInvalid
+				return
+			case "zero":
+				t.option.missingKey = mapZeroValue
+				return
+			case "error":
+				t.option.missingKey = mapError
+				return
+			}
+		}
+	}
+	panic("unrecognized option: " + opt)
+}
diff --git a/internal/backport/text/template/parse/lex.go b/internal/backport/text/template/parse/lex.go
new file mode 100644
index 0000000..95e3377
--- /dev/null
+++ b/internal/backport/text/template/parse/lex.go
@@ -0,0 +1,682 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package parse
+
+import (
+	"fmt"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+// item represents a token or text string returned from the scanner.
+type item struct {
+	typ  itemType // The type of this item.
+	pos  Pos      // The starting position, in bytes, of this item in the input string.
+	val  string   // The value of this item.
+	line int      // The line number at the start of this item.
+}
+
+func (i item) String() string {
+	switch {
+	case i.typ == itemEOF:
+		return "EOF"
+	case i.typ == itemError:
+		return i.val
+	case i.typ > itemKeyword:
+		return fmt.Sprintf("<%s>", i.val)
+	case len(i.val) > 10:
+		return fmt.Sprintf("%.10q...", i.val)
+	}
+	return fmt.Sprintf("%q", i.val)
+}
+
+// itemType identifies the type of lex items.
+type itemType int
+
+const (
+	itemError        itemType = iota // error occurred; value is text of error
+	itemBool                         // boolean constant
+	itemChar                         // printable ASCII character; grab bag for comma etc.
+	itemCharConstant                 // character constant
+	itemComment                      // comment text
+	itemComplex                      // complex constant (1+2i); imaginary is just a number
+	itemAssign                       // equals ('=') introducing an assignment
+	itemDeclare                      // colon-equals (':=') introducing a declaration
+	itemEOF
+	itemField      // alphanumeric identifier starting with '.'
+	itemIdentifier // alphanumeric identifier not starting with '.'
+	itemLeftDelim  // left action delimiter
+	itemLeftParen  // '(' inside action
+	itemNumber     // simple number, including imaginary
+	itemPipe       // pipe symbol
+	itemRawString  // raw quoted string (includes quotes)
+	itemRightDelim // right action delimiter
+	itemRightParen // ')' inside action
+	itemSpace      // run of spaces separating arguments
+	itemString     // quoted string (includes quotes)
+	itemText       // plain text
+	itemVariable   // variable starting with '$', such as '$' or  '$1' or '$hello'
+	// Keywords appear after all the rest.
+	itemKeyword  // used only to delimit the keywords
+	itemBlock    // block keyword
+	itemBreak    // break keyword
+	itemContinue // continue keyword
+	itemDot      // the cursor, spelled '.'
+	itemDefine   // define keyword
+	itemElse     // else keyword
+	itemEnd      // end keyword
+	itemIf       // if keyword
+	itemNil      // the untyped nil constant, easiest to treat as a keyword
+	itemRange    // range keyword
+	itemTemplate // template keyword
+	itemWith     // with keyword
+)
+
+var key = map[string]itemType{
+	".":        itemDot,
+	"block":    itemBlock,
+	"break":    itemBreak,
+	"continue": itemContinue,
+	"define":   itemDefine,
+	"else":     itemElse,
+	"end":      itemEnd,
+	"if":       itemIf,
+	"range":    itemRange,
+	"nil":      itemNil,
+	"template": itemTemplate,
+	"with":     itemWith,
+}
+
+const eof = -1
+
+// Trimming spaces.
+// If the action begins "{{- " rather than "{{", then all space/tab/newlines
+// preceding the action are trimmed; conversely if it ends " -}}" the
+// leading spaces are trimmed. This is done entirely in the lexer; the
+// parser never sees it happen. We require an ASCII space (' ', \t, \r, \n)
+// to be present to avoid ambiguity with things like "{{-3}}". It reads
+// better with the space present anyway. For simplicity, only ASCII
+// does the job.
+const (
+	spaceChars    = " \t\r\n"  // These are the space characters defined by Go itself.
+	trimMarker    = '-'        // Attached to left/right delimiter, trims trailing spaces from preceding/following text.
+	trimMarkerLen = Pos(1 + 1) // marker plus space before or after
+)
+
+// stateFn represents the state of the scanner as a function that returns the next state.
+type stateFn func(*lexer) stateFn
+
+// lexer holds the state of the scanner.
+type lexer struct {
+	name        string    // the name of the input; used only for error reports
+	input       string    // the string being scanned
+	leftDelim   string    // start of action
+	rightDelim  string    // end of action
+	emitComment bool      // emit itemComment tokens.
+	pos         Pos       // current position in the input
+	start       Pos       // start position of this item
+	width       Pos       // width of last rune read from input
+	items       chan item // channel of scanned items
+	parenDepth  int       // nesting depth of ( ) exprs
+	line        int       // 1+number of newlines seen
+	startLine   int       // start line of this item
+	breakOK     bool      // break keyword allowed
+	continueOK  bool      // continue keyword allowed
+}
+
+// next returns the next rune in the input.
+func (l *lexer) next() rune {
+	if int(l.pos) >= len(l.input) {
+		l.width = 0
+		return eof
+	}
+	r, w := utf8.DecodeRuneInString(l.input[l.pos:])
+	l.width = Pos(w)
+	l.pos += l.width
+	if r == '\n' {
+		l.line++
+	}
+	return r
+}
+
+// peek returns but does not consume the next rune in the input.
+func (l *lexer) peek() rune {
+	r := l.next()
+	l.backup()
+	return r
+}
+
+// backup steps back one rune. Can only be called once per call of next.
+func (l *lexer) backup() {
+	l.pos -= l.width
+	// Correct newline count.
+	if l.width == 1 && l.input[l.pos] == '\n' {
+		l.line--
+	}
+}
+
+// emit passes an item back to the client.
+func (l *lexer) emit(t itemType) {
+	l.items <- item{t, l.start, l.input[l.start:l.pos], l.startLine}
+	l.start = l.pos
+	l.startLine = l.line
+}
+
+// ignore skips over the pending input before this point.
+func (l *lexer) ignore() {
+	l.line += strings.Count(l.input[l.start:l.pos], "\n")
+	l.start = l.pos
+	l.startLine = l.line
+}
+
+// accept consumes the next rune if it's from the valid set.
+func (l *lexer) accept(valid string) bool {
+	if strings.ContainsRune(valid, l.next()) {
+		return true
+	}
+	l.backup()
+	return false
+}
+
+// acceptRun consumes a run of runes from the valid set.
+func (l *lexer) acceptRun(valid string) {
+	for strings.ContainsRune(valid, l.next()) {
+	}
+	l.backup()
+}
+
+// errorf returns an error token and terminates the scan by passing
+// back a nil pointer that will be the next state, terminating l.nextItem.
+func (l *lexer) errorf(format string, args ...interface{}) stateFn {
+	l.items <- item{itemError, l.start, fmt.Sprintf(format, args...), l.startLine}
+	return nil
+}
+
+// nextItem returns the next item from the input.
+// Called by the parser, not in the lexing goroutine.
+func (l *lexer) nextItem() item {
+	return <-l.items
+}
+
+// drain drains the output so the lexing goroutine will exit.
+// Called by the parser, not in the lexing goroutine.
+func (l *lexer) drain() {
+	for range l.items {
+	}
+}
+
+// lex creates a new scanner for the input string.
+func lex(name, input, left, right string, emitComment bool) *lexer {
+	if left == "" {
+		left = leftDelim
+	}
+	if right == "" {
+		right = rightDelim
+	}
+	l := &lexer{
+		name:        name,
+		input:       input,
+		leftDelim:   left,
+		rightDelim:  right,
+		emitComment: emitComment,
+		items:       make(chan item),
+		line:        1,
+		startLine:   1,
+	}
+	go l.run()
+	return l
+}
+
+// run runs the state machine for the lexer.
+func (l *lexer) run() {
+	for state := lexText; state != nil; {
+		state = state(l)
+	}
+	close(l.items)
+}
+
+// state functions
+
+const (
+	leftDelim    = "{{"
+	rightDelim   = "}}"
+	leftComment  = "/*"
+	rightComment = "*/"
+)
+
+// lexText scans until an opening action delimiter, "{{".
+func lexText(l *lexer) stateFn {
+	l.width = 0
+	if x := strings.Index(l.input[l.pos:], l.leftDelim); x >= 0 {
+		ldn := Pos(len(l.leftDelim))
+		l.pos += Pos(x)
+		trimLength := Pos(0)
+		if hasLeftTrimMarker(l.input[l.pos+ldn:]) {
+			trimLength = rightTrimLength(l.input[l.start:l.pos])
+		}
+		l.pos -= trimLength
+		if l.pos > l.start {
+			l.line += strings.Count(l.input[l.start:l.pos], "\n")
+			l.emit(itemText)
+		}
+		l.pos += trimLength
+		l.ignore()
+		return lexLeftDelim
+	}
+	l.pos = Pos(len(l.input))
+	// Correctly reached EOF.
+	if l.pos > l.start {
+		l.line += strings.Count(l.input[l.start:l.pos], "\n")
+		l.emit(itemText)
+	}
+	l.emit(itemEOF)
+	return nil
+}
+
+// rightTrimLength returns the length of the spaces at the end of the string.
+func rightTrimLength(s string) Pos {
+	return Pos(len(s) - len(strings.TrimRight(s, spaceChars)))
+}
+
+// atRightDelim reports whether the lexer is at a right delimiter, possibly preceded by a trim marker.
+func (l *lexer) atRightDelim() (delim, trimSpaces bool) {
+	if hasRightTrimMarker(l.input[l.pos:]) && strings.HasPrefix(l.input[l.pos+trimMarkerLen:], l.rightDelim) { // With trim marker.
+		return true, true
+	}
+	if strings.HasPrefix(l.input[l.pos:], l.rightDelim) { // Without trim marker.
+		return true, false
+	}
+	return false, false
+}
+
+// leftTrimLength returns the length of the spaces at the beginning of the string.
+func leftTrimLength(s string) Pos {
+	return Pos(len(s) - len(strings.TrimLeft(s, spaceChars)))
+}
+
+// lexLeftDelim scans the left delimiter, which is known to be present, possibly with a trim marker.
+func lexLeftDelim(l *lexer) stateFn {
+	l.pos += Pos(len(l.leftDelim))
+	trimSpace := hasLeftTrimMarker(l.input[l.pos:])
+	afterMarker := Pos(0)
+	if trimSpace {
+		afterMarker = trimMarkerLen
+	}
+	if strings.HasPrefix(l.input[l.pos+afterMarker:], leftComment) {
+		l.pos += afterMarker
+		l.ignore()
+		return lexComment
+	}
+	l.emit(itemLeftDelim)
+	l.pos += afterMarker
+	l.ignore()
+	l.parenDepth = 0
+	return lexInsideAction
+}
+
+// lexComment scans a comment. The left comment marker is known to be present.
+func lexComment(l *lexer) stateFn {
+	l.pos += Pos(len(leftComment))
+	i := strings.Index(l.input[l.pos:], rightComment)
+	if i < 0 {
+		return l.errorf("unclosed comment")
+	}
+	l.pos += Pos(i + len(rightComment))
+	delim, trimSpace := l.atRightDelim()
+	if !delim {
+		return l.errorf("comment ends before closing delimiter")
+	}
+	if l.emitComment {
+		l.emit(itemComment)
+	}
+	if trimSpace {
+		l.pos += trimMarkerLen
+	}
+	l.pos += Pos(len(l.rightDelim))
+	if trimSpace {
+		l.pos += leftTrimLength(l.input[l.pos:])
+	}
+	l.ignore()
+	return lexText
+}
+
+// lexRightDelim scans the right delimiter, which is known to be present, possibly with a trim marker.
+func lexRightDelim(l *lexer) stateFn {
+	trimSpace := hasRightTrimMarker(l.input[l.pos:])
+	if trimSpace {
+		l.pos += trimMarkerLen
+		l.ignore()
+	}
+	l.pos += Pos(len(l.rightDelim))
+	l.emit(itemRightDelim)
+	if trimSpace {
+		l.pos += leftTrimLength(l.input[l.pos:])
+		l.ignore()
+	}
+	return lexText
+}
+
+// lexInsideAction scans the elements inside action delimiters.
+func lexInsideAction(l *lexer) stateFn {
+	// Either number, quoted string, or identifier.
+	// Spaces separate arguments; runs of spaces turn into itemSpace.
+	// Pipe symbols separate and are emitted.
+	delim, _ := l.atRightDelim()
+	if delim {
+		if l.parenDepth == 0 {
+			return lexRightDelim
+		}
+		return l.errorf("unclosed left paren")
+	}
+	switch r := l.next(); {
+	case r == eof:
+		return l.errorf("unclosed action")
+	case isSpace(r):
+		l.backup() // Put space back in case we have " -}}".
+		return lexSpace
+	case r == '=':
+		l.emit(itemAssign)
+	case r == ':':
+		if l.next() != '=' {
+			return l.errorf("expected :=")
+		}
+		l.emit(itemDeclare)
+	case r == '|':
+		l.emit(itemPipe)
+	case r == '"':
+		return lexQuote
+	case r == '`':
+		return lexRawQuote
+	case r == '$':
+		return lexVariable
+	case r == '\'':
+		return lexChar
+	case r == '.':
+		// special look-ahead for ".field" so we don't break l.backup().
+		if l.pos < Pos(len(l.input)) {
+			r := l.input[l.pos]
+			if r < '0' || '9' < r {
+				return lexField
+			}
+		}
+		fallthrough // '.' can start a number.
+	case r == '+' || r == '-' || ('0' <= r && r <= '9'):
+		l.backup()
+		return lexNumber
+	case isAlphaNumeric(r):
+		l.backup()
+		return lexIdentifier
+	case r == '(':
+		l.emit(itemLeftParen)
+		l.parenDepth++
+	case r == ')':
+		l.emit(itemRightParen)
+		l.parenDepth--
+		if l.parenDepth < 0 {
+			return l.errorf("unexpected right paren %#U", r)
+		}
+	case r <= unicode.MaxASCII && unicode.IsPrint(r):
+		l.emit(itemChar)
+	default:
+		return l.errorf("unrecognized character in action: %#U", r)
+	}
+	return lexInsideAction
+}
+
+// lexSpace scans a run of space characters.
+// We have not consumed the first space, which is known to be present.
+// Take care if there is a trim-marked right delimiter, which starts with a space.
+func lexSpace(l *lexer) stateFn {
+	var r rune
+	var numSpaces int
+	for {
+		r = l.peek()
+		if !isSpace(r) {
+			break
+		}
+		l.next()
+		numSpaces++
+	}
+	// Be careful about a trim-marked closing delimiter, which has a minus
+	// after a space. We know there is a space, so check for the '-' that might follow.
+	if hasRightTrimMarker(l.input[l.pos-1:]) && strings.HasPrefix(l.input[l.pos-1+trimMarkerLen:], l.rightDelim) {
+		l.backup() // Before the space.
+		if numSpaces == 1 {
+			return lexRightDelim // On the delim, so go right to that.
+		}
+	}
+	l.emit(itemSpace)
+	return lexInsideAction
+}
+
+// lexIdentifier scans an alphanumeric.
+func lexIdentifier(l *lexer) stateFn {
+Loop:
+	for {
+		switch r := l.next(); {
+		case isAlphaNumeric(r):
+			// absorb.
+		default:
+			l.backup()
+			word := l.input[l.start:l.pos]
+			if !l.atTerminator() {
+				return l.errorf("bad character %#U", r)
+			}
+			switch {
+			case key[word] > itemKeyword:
+				item := key[word]
+				if item == itemBreak && !l.breakOK || item == itemContinue && !l.continueOK {
+					l.emit(itemIdentifier)
+				} else {
+					l.emit(item)
+				}
+			case word[0] == '.':
+				l.emit(itemField)
+			case word == "true", word == "false":
+				l.emit(itemBool)
+			default:
+				l.emit(itemIdentifier)
+			}
+			break Loop
+		}
+	}
+	return lexInsideAction
+}
+
+// lexField scans a field: .Alphanumeric.
+// The . has been scanned.
+func lexField(l *lexer) stateFn {
+	return lexFieldOrVariable(l, itemField)
+}
+
+// lexVariable scans a Variable: $Alphanumeric.
+// The $ has been scanned.
+func lexVariable(l *lexer) stateFn {
+	if l.atTerminator() { // Nothing interesting follows -> "$".
+		l.emit(itemVariable)
+		return lexInsideAction
+	}
+	return lexFieldOrVariable(l, itemVariable)
+}
+
+// lexVariable scans a field or variable: [.$]Alphanumeric.
+// The . or $ has been scanned.
+func lexFieldOrVariable(l *lexer, typ itemType) stateFn {
+	if l.atTerminator() { // Nothing interesting follows -> "." or "$".
+		if typ == itemVariable {
+			l.emit(itemVariable)
+		} else {
+			l.emit(itemDot)
+		}
+		return lexInsideAction
+	}
+	var r rune
+	for {
+		r = l.next()
+		if !isAlphaNumeric(r) {
+			l.backup()
+			break
+		}
+	}
+	if !l.atTerminator() {
+		return l.errorf("bad character %#U", r)
+	}
+	l.emit(typ)
+	return lexInsideAction
+}
+
+// atTerminator reports whether the input is at valid termination character to
+// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases
+// like "$x+2" not being acceptable without a space, in case we decide one
+// day to implement arithmetic.
+func (l *lexer) atTerminator() bool {
+	r := l.peek()
+	if isSpace(r) {
+		return true
+	}
+	switch r {
+	case eof, '.', ',', '|', ':', ')', '(':
+		return true
+	}
+	// Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will
+	// succeed but should fail) but only in extremely rare cases caused by willfully
+	// bad choice of delimiter.
+	if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r {
+		return true
+	}
+	return false
+}
+
+// lexChar scans a character constant. The initial quote is already
+// scanned. Syntax checking is done by the parser.
+func lexChar(l *lexer) stateFn {
+Loop:
+	for {
+		switch l.next() {
+		case '\\':
+			if r := l.next(); r != eof && r != '\n' {
+				break
+			}
+			fallthrough
+		case eof, '\n':
+			return l.errorf("unterminated character constant")
+		case '\'':
+			break Loop
+		}
+	}
+	l.emit(itemCharConstant)
+	return lexInsideAction
+}
+
+// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
+// isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
+// and "089" - but when it's wrong the input is invalid and the parser (via
+// strconv) will notice.
+func lexNumber(l *lexer) stateFn {
+	if !l.scanNumber() {
+		return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
+	}
+	if sign := l.peek(); sign == '+' || sign == '-' {
+		// Complex: 1+2i. No spaces, must end in 'i'.
+		if !l.scanNumber() || l.input[l.pos-1] != 'i' {
+			return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
+		}
+		l.emit(itemComplex)
+	} else {
+		l.emit(itemNumber)
+	}
+	return lexInsideAction
+}
+
+func (l *lexer) scanNumber() bool {
+	// Optional leading sign.
+	l.accept("+-")
+	// Is it hex?
+	digits := "0123456789_"
+	if l.accept("0") {
+		// Note: Leading 0 does not mean octal in floats.
+		if l.accept("xX") {
+			digits = "0123456789abcdefABCDEF_"
+		} else if l.accept("oO") {
+			digits = "01234567_"
+		} else if l.accept("bB") {
+			digits = "01_"
+		}
+	}
+	l.acceptRun(digits)
+	if l.accept(".") {
+		l.acceptRun(digits)
+	}
+	if len(digits) == 10+1 && l.accept("eE") {
+		l.accept("+-")
+		l.acceptRun("0123456789_")
+	}
+	if len(digits) == 16+6+1 && l.accept("pP") {
+		l.accept("+-")
+		l.acceptRun("0123456789_")
+	}
+	// Is it imaginary?
+	l.accept("i")
+	// Next thing mustn't be alphanumeric.
+	if isAlphaNumeric(l.peek()) {
+		l.next()
+		return false
+	}
+	return true
+}
+
+// lexQuote scans a quoted string.
+func lexQuote(l *lexer) stateFn {
+Loop:
+	for {
+		switch l.next() {
+		case '\\':
+			if r := l.next(); r != eof && r != '\n' {
+				break
+			}
+			fallthrough
+		case eof, '\n':
+			return l.errorf("unterminated quoted string")
+		case '"':
+			break Loop
+		}
+	}
+	l.emit(itemString)
+	return lexInsideAction
+}
+
+// lexRawQuote scans a raw quoted string.
+func lexRawQuote(l *lexer) stateFn {
+Loop:
+	for {
+		switch l.next() {
+		case eof:
+			return l.errorf("unterminated raw quoted string")
+		case '`':
+			break Loop
+		}
+	}
+	l.emit(itemRawString)
+	return lexInsideAction
+}
+
+// isSpace reports whether r is a space character.
+func isSpace(r rune) bool {
+	return r == ' ' || r == '\t' || r == '\r' || r == '\n'
+}
+
+// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
+func isAlphaNumeric(r rune) bool {
+	return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
+}
+
+func hasLeftTrimMarker(s string) bool {
+	return len(s) >= 2 && s[0] == trimMarker && isSpace(rune(s[1]))
+}
+
+func hasRightTrimMarker(s string) bool {
+	return len(s) >= 2 && isSpace(rune(s[0])) && s[1] == trimMarker
+}
diff --git a/internal/backport/text/template/parse/lex_test.go b/internal/backport/text/template/parse/lex_test.go
new file mode 100644
index 0000000..df6aabf
--- /dev/null
+++ b/internal/backport/text/template/parse/lex_test.go
@@ -0,0 +1,559 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package parse
+
+import (
+	"fmt"
+	"testing"
+)
+
+// Make the types prettyprint.
+var itemName = map[itemType]string{
+	itemError:        "error",
+	itemBool:         "bool",
+	itemChar:         "char",
+	itemCharConstant: "charconst",
+	itemComment:      "comment",
+	itemComplex:      "complex",
+	itemDeclare:      ":=",
+	itemEOF:          "EOF",
+	itemField:        "field",
+	itemIdentifier:   "identifier",
+	itemLeftDelim:    "left delim",
+	itemLeftParen:    "(",
+	itemNumber:       "number",
+	itemPipe:         "pipe",
+	itemRawString:    "raw string",
+	itemRightDelim:   "right delim",
+	itemRightParen:   ")",
+	itemSpace:        "space",
+	itemString:       "string",
+	itemVariable:     "variable",
+
+	// keywords
+	itemDot:      ".",
+	itemBlock:    "block",
+	itemBreak:    "break",
+	itemContinue: "continue",
+	itemDefine:   "define",
+	itemElse:     "else",
+	itemIf:       "if",
+	itemEnd:      "end",
+	itemNil:      "nil",
+	itemRange:    "range",
+	itemTemplate: "template",
+	itemWith:     "with",
+}
+
+func (i itemType) String() string {
+	s := itemName[i]
+	if s == "" {
+		return fmt.Sprintf("item%d", int(i))
+	}
+	return s
+}
+
+type lexTest struct {
+	name  string
+	input string
+	items []item
+}
+
+func mkItem(typ itemType, text string) item {
+	return item{
+		typ: typ,
+		val: text,
+	}
+}
+
+var (
+	tDot        = mkItem(itemDot, ".")
+	tBlock      = mkItem(itemBlock, "block")
+	tEOF        = mkItem(itemEOF, "")
+	tFor        = mkItem(itemIdentifier, "for")
+	tLeft       = mkItem(itemLeftDelim, "{{")
+	tLpar       = mkItem(itemLeftParen, "(")
+	tPipe       = mkItem(itemPipe, "|")
+	tQuote      = mkItem(itemString, `"abc \n\t\" "`)
+	tRange      = mkItem(itemRange, "range")
+	tRight      = mkItem(itemRightDelim, "}}")
+	tRpar       = mkItem(itemRightParen, ")")
+	tSpace      = mkItem(itemSpace, " ")
+	raw         = "`" + `abc\n\t\" ` + "`"
+	rawNL       = "`now is{{\n}}the time`" // Contains newline inside raw quote.
+	tRawQuote   = mkItem(itemRawString, raw)
+	tRawQuoteNL = mkItem(itemRawString, rawNL)
+)
+
+var lexTests = []lexTest{
+	{"empty", "", []item{tEOF}},
+	{"spaces", " \t\n", []item{mkItem(itemText, " \t\n"), tEOF}},
+	{"text", `now is the time`, []item{mkItem(itemText, "now is the time"), tEOF}},
+	{"text with comment", "hello-{{/* this is a comment */}}-world", []item{
+		mkItem(itemText, "hello-"),
+		mkItem(itemComment, "/* this is a comment */"),
+		mkItem(itemText, "-world"),
+		tEOF,
+	}},
+	{"punctuation", "{{,@% }}", []item{
+		tLeft,
+		mkItem(itemChar, ","),
+		mkItem(itemChar, "@"),
+		mkItem(itemChar, "%"),
+		tSpace,
+		tRight,
+		tEOF,
+	}},
+	{"parens", "{{((3))}}", []item{
+		tLeft,
+		tLpar,
+		tLpar,
+		mkItem(itemNumber, "3"),
+		tRpar,
+		tRpar,
+		tRight,
+		tEOF,
+	}},
+	{"empty action", `{{}}`, []item{tLeft, tRight, tEOF}},
+	{"for", `{{for}}`, []item{tLeft, tFor, tRight, tEOF}},
+	{"block", `{{block "foo" .}}`, []item{
+		tLeft, tBlock, tSpace, mkItem(itemString, `"foo"`), tSpace, tDot, tRight, tEOF,
+	}},
+	{"quote", `{{"abc \n\t\" "}}`, []item{tLeft, tQuote, tRight, tEOF}},
+	{"raw quote", "{{" + raw + "}}", []item{tLeft, tRawQuote, tRight, tEOF}},
+	{"raw quote with newline", "{{" + rawNL + "}}", []item{tLeft, tRawQuoteNL, tRight, tEOF}},
+	{"numbers", "{{1 02 0x14 0X14 -7.2i 1e3 1E3 +1.2e-4 4.2i 1+2i 1_2 0x1.e_fp4 0X1.E_FP4}}", []item{
+		tLeft,
+		mkItem(itemNumber, "1"),
+		tSpace,
+		mkItem(itemNumber, "02"),
+		tSpace,
+		mkItem(itemNumber, "0x14"),
+		tSpace,
+		mkItem(itemNumber, "0X14"),
+		tSpace,
+		mkItem(itemNumber, "-7.2i"),
+		tSpace,
+		mkItem(itemNumber, "1e3"),
+		tSpace,
+		mkItem(itemNumber, "1E3"),
+		tSpace,
+		mkItem(itemNumber, "+1.2e-4"),
+		tSpace,
+		mkItem(itemNumber, "4.2i"),
+		tSpace,
+		mkItem(itemComplex, "1+2i"),
+		tSpace,
+		mkItem(itemNumber, "1_2"),
+		tSpace,
+		mkItem(itemNumber, "0x1.e_fp4"),
+		tSpace,
+		mkItem(itemNumber, "0X1.E_FP4"),
+		tRight,
+		tEOF,
+	}},
+	{"characters", `{{'a' '\n' '\'' '\\' '\u00FF' '\xFF' '本'}}`, []item{
+		tLeft,
+		mkItem(itemCharConstant, `'a'`),
+		tSpace,
+		mkItem(itemCharConstant, `'\n'`),
+		tSpace,
+		mkItem(itemCharConstant, `'\''`),
+		tSpace,
+		mkItem(itemCharConstant, `'\\'`),
+		tSpace,
+		mkItem(itemCharConstant, `'\u00FF'`),
+		tSpace,
+		mkItem(itemCharConstant, `'\xFF'`),
+		tSpace,
+		mkItem(itemCharConstant, `'本'`),
+		tRight,
+		tEOF,
+	}},
+	{"bools", "{{true false}}", []item{
+		tLeft,
+		mkItem(itemBool, "true"),
+		tSpace,
+		mkItem(itemBool, "false"),
+		tRight,
+		tEOF,
+	}},
+	{"dot", "{{.}}", []item{
+		tLeft,
+		tDot,
+		tRight,
+		tEOF,
+	}},
+	{"nil", "{{nil}}", []item{
+		tLeft,
+		mkItem(itemNil, "nil"),
+		tRight,
+		tEOF,
+	}},
+	{"dots", "{{.x . .2 .x.y.z}}", []item{
+		tLeft,
+		mkItem(itemField, ".x"),
+		tSpace,
+		tDot,
+		tSpace,
+		mkItem(itemNumber, ".2"),
+		tSpace,
+		mkItem(itemField, ".x"),
+		mkItem(itemField, ".y"),
+		mkItem(itemField, ".z"),
+		tRight,
+		tEOF,
+	}},
+	{"keywords", "{{range if else end with}}", []item{
+		tLeft,
+		mkItem(itemRange, "range"),
+		tSpace,
+		mkItem(itemIf, "if"),
+		tSpace,
+		mkItem(itemElse, "else"),
+		tSpace,
+		mkItem(itemEnd, "end"),
+		tSpace,
+		mkItem(itemWith, "with"),
+		tRight,
+		tEOF,
+	}},
+	{"variables", "{{$c := printf $ $hello $23 $ $var.Field .Method}}", []item{
+		tLeft,
+		mkItem(itemVariable, "$c"),
+		tSpace,
+		mkItem(itemDeclare, ":="),
+		tSpace,
+		mkItem(itemIdentifier, "printf"),
+		tSpace,
+		mkItem(itemVariable, "$"),
+		tSpace,
+		mkItem(itemVariable, "$hello"),
+		tSpace,
+		mkItem(itemVariable, "$23"),
+		tSpace,
+		mkItem(itemVariable, "$"),
+		tSpace,
+		mkItem(itemVariable, "$var"),
+		mkItem(itemField, ".Field"),
+		tSpace,
+		mkItem(itemField, ".Method"),
+		tRight,
+		tEOF,
+	}},
+	{"variable invocation", "{{$x 23}}", []item{
+		tLeft,
+		mkItem(itemVariable, "$x"),
+		tSpace,
+		mkItem(itemNumber, "23"),
+		tRight,
+		tEOF,
+	}},
+	{"pipeline", `intro {{echo hi 1.2 |noargs|args 1 "hi"}} outro`, []item{
+		mkItem(itemText, "intro "),
+		tLeft,
+		mkItem(itemIdentifier, "echo"),
+		tSpace,
+		mkItem(itemIdentifier, "hi"),
+		tSpace,
+		mkItem(itemNumber, "1.2"),
+		tSpace,
+		tPipe,
+		mkItem(itemIdentifier, "noargs"),
+		tPipe,
+		mkItem(itemIdentifier, "args"),
+		tSpace,
+		mkItem(itemNumber, "1"),
+		tSpace,
+		mkItem(itemString, `"hi"`),
+		tRight,
+		mkItem(itemText, " outro"),
+		tEOF,
+	}},
+	{"declaration", "{{$v := 3}}", []item{
+		tLeft,
+		mkItem(itemVariable, "$v"),
+		tSpace,
+		mkItem(itemDeclare, ":="),
+		tSpace,
+		mkItem(itemNumber, "3"),
+		tRight,
+		tEOF,
+	}},
+	{"2 declarations", "{{$v , $w := 3}}", []item{
+		tLeft,
+		mkItem(itemVariable, "$v"),
+		tSpace,
+		mkItem(itemChar, ","),
+		tSpace,
+		mkItem(itemVariable, "$w"),
+		tSpace,
+		mkItem(itemDeclare, ":="),
+		tSpace,
+		mkItem(itemNumber, "3"),
+		tRight,
+		tEOF,
+	}},
+	{"field of parenthesized expression", "{{(.X).Y}}", []item{
+		tLeft,
+		tLpar,
+		mkItem(itemField, ".X"),
+		tRpar,
+		mkItem(itemField, ".Y"),
+		tRight,
+		tEOF,
+	}},
+	{"trimming spaces before and after", "hello- {{- 3 -}} -world", []item{
+		mkItem(itemText, "hello-"),
+		tLeft,
+		mkItem(itemNumber, "3"),
+		tRight,
+		mkItem(itemText, "-world"),
+		tEOF,
+	}},
+	{"trimming spaces before and after comment", "hello- {{- /* hello */ -}} -world", []item{
+		mkItem(itemText, "hello-"),
+		mkItem(itemComment, "/* hello */"),
+		mkItem(itemText, "-world"),
+		tEOF,
+	}},
+	// errors
+	{"badchar", "#{{\x01}}", []item{
+		mkItem(itemText, "#"),
+		tLeft,
+		mkItem(itemError, "unrecognized character in action: U+0001"),
+	}},
+	{"unclosed action", "{{", []item{
+		tLeft,
+		mkItem(itemError, "unclosed action"),
+	}},
+	{"EOF in action", "{{range", []item{
+		tLeft,
+		tRange,
+		mkItem(itemError, "unclosed action"),
+	}},
+	{"unclosed quote", "{{\"\n\"}}", []item{
+		tLeft,
+		mkItem(itemError, "unterminated quoted string"),
+	}},
+	{"unclosed raw quote", "{{`xx}}", []item{
+		tLeft,
+		mkItem(itemError, "unterminated raw quoted string"),
+	}},
+	{"unclosed char constant", "{{'\n}}", []item{
+		tLeft,
+		mkItem(itemError, "unterminated character constant"),
+	}},
+	{"bad number", "{{3k}}", []item{
+		tLeft,
+		mkItem(itemError, `bad number syntax: "3k"`),
+	}},
+	{"unclosed paren", "{{(3}}", []item{
+		tLeft,
+		tLpar,
+		mkItem(itemNumber, "3"),
+		mkItem(itemError, `unclosed left paren`),
+	}},
+	{"extra right paren", "{{3)}}", []item{
+		tLeft,
+		mkItem(itemNumber, "3"),
+		tRpar,
+		mkItem(itemError, `unexpected right paren U+0029 ')'`),
+	}},
+
+	// Fixed bugs
+	// Many elements in an action blew the lookahead until
+	// we made lexInsideAction not loop.
+	{"long pipeline deadlock", "{{|||||}}", []item{
+		tLeft,
+		tPipe,
+		tPipe,
+		tPipe,
+		tPipe,
+		tPipe,
+		tRight,
+		tEOF,
+	}},
+	{"text with bad comment", "hello-{{/*/}}-world", []item{
+		mkItem(itemText, "hello-"),
+		mkItem(itemError, `unclosed comment`),
+	}},
+	{"text with comment close separated from delim", "hello-{{/* */ }}-world", []item{
+		mkItem(itemText, "hello-"),
+		mkItem(itemError, `comment ends before closing delimiter`),
+	}},
+	// This one is an error that we can't catch because it breaks templates with
+	// minimized JavaScript. Should have fixed it before Go 1.1.
+	{"unmatched right delimiter", "hello-{.}}-world", []item{
+		mkItem(itemText, "hello-{.}}-world"),
+		tEOF,
+	}},
+}
+
+// collect gathers the emitted items into a slice.
+func collect(t *lexTest, left, right string) (items []item) {
+	l := lex(t.name, t.input, left, right, true)
+	for {
+		item := l.nextItem()
+		items = append(items, item)
+		if item.typ == itemEOF || item.typ == itemError {
+			break
+		}
+	}
+	return
+}
+
+func equal(i1, i2 []item, checkPos bool) bool {
+	if len(i1) != len(i2) {
+		return false
+	}
+	for k := range i1 {
+		if i1[k].typ != i2[k].typ {
+			return false
+		}
+		if i1[k].val != i2[k].val {
+			return false
+		}
+		if checkPos && i1[k].pos != i2[k].pos {
+			return false
+		}
+		if checkPos && i1[k].line != i2[k].line {
+			return false
+		}
+	}
+	return true
+}
+
+func TestLex(t *testing.T) {
+	for _, test := range lexTests {
+		items := collect(&test, "", "")
+		if !equal(items, test.items, false) {
+			t.Errorf("%s: got\n\t%+v\nexpected\n\t%v", test.name, items, test.items)
+		}
+	}
+}
+
+// Some easy cases from above, but with delimiters $$ and @@
+var lexDelimTests = []lexTest{
+	{"punctuation", "$$,@%{{}}@@", []item{
+		tLeftDelim,
+		mkItem(itemChar, ","),
+		mkItem(itemChar, "@"),
+		mkItem(itemChar, "%"),
+		mkItem(itemChar, "{"),
+		mkItem(itemChar, "{"),
+		mkItem(itemChar, "}"),
+		mkItem(itemChar, "}"),
+		tRightDelim,
+		tEOF,
+	}},
+	{"empty action", `$$@@`, []item{tLeftDelim, tRightDelim, tEOF}},
+	{"for", `$$for@@`, []item{tLeftDelim, tFor, tRightDelim, tEOF}},
+	{"quote", `$$"abc \n\t\" "@@`, []item{tLeftDelim, tQuote, tRightDelim, tEOF}},
+	{"raw quote", "$$" + raw + "@@", []item{tLeftDelim, tRawQuote, tRightDelim, tEOF}},
+}
+
+var (
+	tLeftDelim  = mkItem(itemLeftDelim, "$$")
+	tRightDelim = mkItem(itemRightDelim, "@@")
+)
+
+func TestDelims(t *testing.T) {
+	for _, test := range lexDelimTests {
+		items := collect(&test, "$$", "@@")
+		if !equal(items, test.items, false) {
+			t.Errorf("%s: got\n\t%v\nexpected\n\t%v", test.name, items, test.items)
+		}
+	}
+}
+
+var lexPosTests = []lexTest{
+	{"empty", "", []item{{itemEOF, 0, "", 1}}},
+	{"punctuation", "{{,@%#}}", []item{
+		{itemLeftDelim, 0, "{{", 1},
+		{itemChar, 2, ",", 1},
+		{itemChar, 3, "@", 1},
+		{itemChar, 4, "%", 1},
+		{itemChar, 5, "#", 1},
+		{itemRightDelim, 6, "}}", 1},
+		{itemEOF, 8, "", 1},
+	}},
+	{"sample", "0123{{hello}}xyz", []item{
+		{itemText, 0, "0123", 1},
+		{itemLeftDelim, 4, "{{", 1},
+		{itemIdentifier, 6, "hello", 1},
+		{itemRightDelim, 11, "}}", 1},
+		{itemText, 13, "xyz", 1},
+		{itemEOF, 16, "", 1},
+	}},
+	{"trimafter", "{{x -}}\n{{y}}", []item{
+		{itemLeftDelim, 0, "{{", 1},
+		{itemIdentifier, 2, "x", 1},
+		{itemRightDelim, 5, "}}", 1},
+		{itemLeftDelim, 8, "{{", 2},
+		{itemIdentifier, 10, "y", 2},
+		{itemRightDelim, 11, "}}", 2},
+		{itemEOF, 13, "", 2},
+	}},
+	{"trimbefore", "{{x}}\n{{- y}}", []item{
+		{itemLeftDelim, 0, "{{", 1},
+		{itemIdentifier, 2, "x", 1},
+		{itemRightDelim, 3, "}}", 1},
+		{itemLeftDelim, 6, "{{", 2},
+		{itemIdentifier, 10, "y", 2},
+		{itemRightDelim, 11, "}}", 2},
+		{itemEOF, 13, "", 2},
+	}},
+}
+
+// The other tests don't check position, to make the test cases easier to construct.
+// This one does.
+func TestPos(t *testing.T) {
+	for _, test := range lexPosTests {
+		items := collect(&test, "", "")
+		if !equal(items, test.items, true) {
+			t.Errorf("%s: got\n\t%v\nexpected\n\t%v", test.name, items, test.items)
+			if len(items) == len(test.items) {
+				// Detailed print; avoid item.String() to expose the position value.
+				for i := range items {
+					if !equal(items[i:i+1], test.items[i:i+1], true) {
+						i1 := items[i]
+						i2 := test.items[i]
+						t.Errorf("\t#%d: got {%v %d %q %d} expected {%v %d %q %d}",
+							i, i1.typ, i1.pos, i1.val, i1.line, i2.typ, i2.pos, i2.val, i2.line)
+					}
+				}
+			}
+		}
+	}
+}
+
+// Test that an error shuts down the lexing goroutine.
+func TestShutdown(t *testing.T) {
+	// We need to duplicate template.Parse here to hold on to the lexer.
+	const text = "erroneous{{define}}{{else}}1234"
+	lexer := lex("foo", text, "{{", "}}", false)
+	_, err := New("root").parseLexer(lexer)
+	if err == nil {
+		t.Fatalf("expected error")
+	}
+	// The error should have drained the input. Therefore, the lexer should be shut down.
+	token, ok := <-lexer.items
+	if ok {
+		t.Fatalf("input was not drained; got %v", token)
+	}
+}
+
+// parseLexer is a local version of parse that lets us pass in the lexer instead of building it.
+// We expect an error, so the tree set and funcs list are explicitly nil.
+func (t *Tree) parseLexer(lex *lexer) (tree *Tree, err error) {
+	defer t.recover(&err)
+	t.ParseName = t.Name
+	t.startParse(nil, lex, map[string]*Tree{})
+	t.parse()
+	t.add()
+	t.stopParse()
+	return t, nil
+}
diff --git a/internal/backport/text/template/parse/node.go b/internal/backport/text/template/parse/node.go
new file mode 100644
index 0000000..a2da634
--- /dev/null
+++ b/internal/backport/text/template/parse/node.go
@@ -0,0 +1,1008 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Parse nodes.
+
+package parse
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+var textFormat = "%s" // Changed to "%q" in tests for better error messages.
+
+// A Node is an element in the parse tree. The interface is trivial.
+// The interface contains an unexported method so that only
+// types local to this package can satisfy it.
+type Node interface {
+	Type() NodeType
+	String() string
+	// Copy does a deep copy of the Node and all its components.
+	// To avoid type assertions, some XxxNodes also have specialized
+	// CopyXxx methods that return *XxxNode.
+	Copy() Node
+	Position() Pos // byte position of start of node in full original input string
+	// tree returns the containing *Tree.
+	// It is unexported so all implementations of Node are in this package.
+	tree() *Tree
+	// writeTo writes the String output to the builder.
+	writeTo(*strings.Builder)
+}
+
+// NodeType identifies the type of a parse tree node.
+type NodeType int
+
+// Pos represents a byte position in the original input text from which
+// this template was parsed.
+type Pos int
+
+func (p Pos) Position() Pos {
+	return p
+}
+
+// Type returns itself and provides an easy default implementation
+// for embedding in a Node. Embedded in all non-trivial Nodes.
+func (t NodeType) Type() NodeType {
+	return t
+}
+
+const (
+	NodeText       NodeType = iota // Plain text.
+	NodeAction                     // A non-control action such as a field evaluation.
+	NodeBool                       // A boolean constant.
+	NodeBreak                      // A break action.
+	NodeChain                      // A sequence of field accesses.
+	NodeCommand                    // An element of a pipeline.
+	NodeContinue                   // A continue action.
+	NodeDot                        // The cursor, dot.
+	nodeElse                       // An else action. Not added to tree.
+	nodeEnd                        // An end action. Not added to tree.
+	NodeField                      // A field or method name.
+	NodeIdentifier                 // An identifier; always a function name.
+	NodeIf                         // An if action.
+	NodeList                       // A list of Nodes.
+	NodeNil                        // An untyped nil constant.
+	NodeNumber                     // A numerical constant.
+	NodePipe                       // A pipeline of commands.
+	NodeRange                      // A range action.
+	NodeString                     // A string constant.
+	NodeTemplate                   // A template invocation action.
+	NodeVariable                   // A $ variable.
+	NodeWith                       // A with action.
+	NodeComment                    // A comment.
+)
+
+// Nodes.
+
+// ListNode holds a sequence of nodes.
+type ListNode struct {
+	NodeType
+	Pos
+	tr    *Tree
+	Nodes []Node // The element nodes in lexical order.
+}
+
+func (t *Tree) newList(pos Pos) *ListNode {
+	return &ListNode{tr: t, NodeType: NodeList, Pos: pos}
+}
+
+func (l *ListNode) append(n Node) {
+	l.Nodes = append(l.Nodes, n)
+}
+
+func (l *ListNode) tree() *Tree {
+	return l.tr
+}
+
+func (l *ListNode) String() string {
+	var sb strings.Builder
+	l.writeTo(&sb)
+	return sb.String()
+}
+
+func (l *ListNode) writeTo(sb *strings.Builder) {
+	for _, n := range l.Nodes {
+		n.writeTo(sb)
+	}
+}
+
+func (l *ListNode) CopyList() *ListNode {
+	if l == nil {
+		return l
+	}
+	n := l.tr.newList(l.Pos)
+	for _, elem := range l.Nodes {
+		n.append(elem.Copy())
+	}
+	return n
+}
+
+func (l *ListNode) Copy() Node {
+	return l.CopyList()
+}
+
+// TextNode holds plain text.
+type TextNode struct {
+	NodeType
+	Pos
+	tr   *Tree
+	Text []byte // The text; may span newlines.
+}
+
+func (t *Tree) newText(pos Pos, text string) *TextNode {
+	return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)}
+}
+
+func (t *TextNode) String() string {
+	return fmt.Sprintf(textFormat, t.Text)
+}
+
+func (t *TextNode) writeTo(sb *strings.Builder) {
+	sb.WriteString(t.String())
+}
+
+func (t *TextNode) tree() *Tree {
+	return t.tr
+}
+
+func (t *TextNode) Copy() Node {
+	return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)}
+}
+
+// CommentNode holds a comment.
+type CommentNode struct {
+	NodeType
+	Pos
+	tr   *Tree
+	Text string // Comment text.
+}
+
+func (t *Tree) newComment(pos Pos, text string) *CommentNode {
+	return &CommentNode{tr: t, NodeType: NodeComment, Pos: pos, Text: text}
+}
+
+func (c *CommentNode) String() string {
+	var sb strings.Builder
+	c.writeTo(&sb)
+	return sb.String()
+}
+
+func (c *CommentNode) writeTo(sb *strings.Builder) {
+	sb.WriteString("{{")
+	sb.WriteString(c.Text)
+	sb.WriteString("}}")
+}
+
+func (c *CommentNode) tree() *Tree {
+	return c.tr
+}
+
+func (c *CommentNode) Copy() Node {
+	return &CommentNode{tr: c.tr, NodeType: NodeComment, Pos: c.Pos, Text: c.Text}
+}
+
+// PipeNode holds a pipeline with optional declaration
+type PipeNode struct {
+	NodeType
+	Pos
+	tr       *Tree
+	Line     int             // The line number in the input. Deprecated: Kept for compatibility.
+	IsAssign bool            // The variables are being assigned, not declared.
+	Decl     []*VariableNode // Variables in lexical order.
+	Cmds     []*CommandNode  // The commands in lexical order.
+}
+
+func (t *Tree) newPipeline(pos Pos, line int, vars []*VariableNode) *PipeNode {
+	return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: vars}
+}
+
+func (p *PipeNode) append(command *CommandNode) {
+	p.Cmds = append(p.Cmds, command)
+}
+
+func (p *PipeNode) String() string {
+	var sb strings.Builder
+	p.writeTo(&sb)
+	return sb.String()
+}
+
+func (p *PipeNode) writeTo(sb *strings.Builder) {
+	if len(p.Decl) > 0 {
+		for i, v := range p.Decl {
+			if i > 0 {
+				sb.WriteString(", ")
+			}
+			v.writeTo(sb)
+		}
+		sb.WriteString(" := ")
+	}
+	for i, c := range p.Cmds {
+		if i > 0 {
+			sb.WriteString(" | ")
+		}
+		c.writeTo(sb)
+	}
+}
+
+func (p *PipeNode) tree() *Tree {
+	return p.tr
+}
+
+func (p *PipeNode) CopyPipe() *PipeNode {
+	if p == nil {
+		return p
+	}
+	vars := make([]*VariableNode, len(p.Decl))
+	for i, d := range p.Decl {
+		vars[i] = d.Copy().(*VariableNode)
+	}
+	n := p.tr.newPipeline(p.Pos, p.Line, vars)
+	n.IsAssign = p.IsAssign
+	for _, c := range p.Cmds {
+		n.append(c.Copy().(*CommandNode))
+	}
+	return n
+}
+
+func (p *PipeNode) Copy() Node {
+	return p.CopyPipe()
+}
+
+// ActionNode holds an action (something bounded by delimiters).
+// Control actions have their own nodes; ActionNode represents simple
+// ones such as field evaluations and parenthesized pipelines.
+type ActionNode struct {
+	NodeType
+	Pos
+	tr   *Tree
+	Line int       // The line number in the input. Deprecated: Kept for compatibility.
+	Pipe *PipeNode // The pipeline in the action.
+}
+
+func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode {
+	return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe}
+}
+
+func (a *ActionNode) String() string {
+	var sb strings.Builder
+	a.writeTo(&sb)
+	return sb.String()
+}
+
+func (a *ActionNode) writeTo(sb *strings.Builder) {
+	sb.WriteString("{{")
+	a.Pipe.writeTo(sb)
+	sb.WriteString("}}")
+}
+
+func (a *ActionNode) tree() *Tree {
+	return a.tr
+}
+
+func (a *ActionNode) Copy() Node {
+	return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe())
+
+}
+
+// CommandNode holds a command (a pipeline inside an evaluating action).
+type CommandNode struct {
+	NodeType
+	Pos
+	tr   *Tree
+	Args []Node // Arguments in lexical order: Identifier, field, or constant.
+}
+
+func (t *Tree) newCommand(pos Pos) *CommandNode {
+	return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos}
+}
+
+func (c *CommandNode) append(arg Node) {
+	c.Args = append(c.Args, arg)
+}
+
+func (c *CommandNode) String() string {
+	var sb strings.Builder
+	c.writeTo(&sb)
+	return sb.String()
+}
+
+func (c *CommandNode) writeTo(sb *strings.Builder) {
+	for i, arg := range c.Args {
+		if i > 0 {
+			sb.WriteByte(' ')
+		}
+		if arg, ok := arg.(*PipeNode); ok {
+			sb.WriteByte('(')
+			arg.writeTo(sb)
+			sb.WriteByte(')')
+			continue
+		}
+		arg.writeTo(sb)
+	}
+}
+
+func (c *CommandNode) tree() *Tree {
+	return c.tr
+}
+
+func (c *CommandNode) Copy() Node {
+	if c == nil {
+		return c
+	}
+	n := c.tr.newCommand(c.Pos)
+	for _, c := range c.Args {
+		n.append(c.Copy())
+	}
+	return n
+}
+
+// IdentifierNode holds an identifier.
+type IdentifierNode struct {
+	NodeType
+	Pos
+	tr    *Tree
+	Ident string // The identifier's name.
+}
+
+// NewIdentifier returns a new IdentifierNode with the given identifier name.
+func NewIdentifier(ident string) *IdentifierNode {
+	return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident}
+}
+
+// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature.
+// Chained for convenience.
+// TODO: fix one day?
+func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode {
+	i.Pos = pos
+	return i
+}
+
+// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature.
+// Chained for convenience.
+// TODO: fix one day?
+func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode {
+	i.tr = t
+	return i
+}
+
+func (i *IdentifierNode) String() string {
+	return i.Ident
+}
+
+func (i *IdentifierNode) writeTo(sb *strings.Builder) {
+	sb.WriteString(i.String())
+}
+
+func (i *IdentifierNode) tree() *Tree {
+	return i.tr
+}
+
+func (i *IdentifierNode) Copy() Node {
+	return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos)
+}
+
+// VariableNode holds a list of variable names, possibly with chained field
+// accesses. The dollar sign is part of the (first) name.
+type VariableNode struct {
+	NodeType
+	Pos
+	tr    *Tree
+	Ident []string // Variable name and fields in lexical order.
+}
+
+func (t *Tree) newVariable(pos Pos, ident string) *VariableNode {
+	return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")}
+}
+
+func (v *VariableNode) String() string {
+	var sb strings.Builder
+	v.writeTo(&sb)
+	return sb.String()
+}
+
+func (v *VariableNode) writeTo(sb *strings.Builder) {
+	for i, id := range v.Ident {
+		if i > 0 {
+			sb.WriteByte('.')
+		}
+		sb.WriteString(id)
+	}
+}
+
+func (v *VariableNode) tree() *Tree {
+	return v.tr
+}
+
+func (v *VariableNode) Copy() Node {
+	return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)}
+}
+
+// DotNode holds the special identifier '.'.
+type DotNode struct {
+	NodeType
+	Pos
+	tr *Tree
+}
+
+func (t *Tree) newDot(pos Pos) *DotNode {
+	return &DotNode{tr: t, NodeType: NodeDot, Pos: pos}
+}
+
+func (d *DotNode) Type() NodeType {
+	// Override method on embedded NodeType for API compatibility.
+	// TODO: Not really a problem; could change API without effect but
+	// api tool complains.
+	return NodeDot
+}
+
+func (d *DotNode) String() string {
+	return "."
+}
+
+func (d *DotNode) writeTo(sb *strings.Builder) {
+	sb.WriteString(d.String())
+}
+
+func (d *DotNode) tree() *Tree {
+	return d.tr
+}
+
+func (d *DotNode) Copy() Node {
+	return d.tr.newDot(d.Pos)
+}
+
+// NilNode holds the special identifier 'nil' representing an untyped nil constant.
+type NilNode struct {
+	NodeType
+	Pos
+	tr *Tree
+}
+
+func (t *Tree) newNil(pos Pos) *NilNode {
+	return &NilNode{tr: t, NodeType: NodeNil, Pos: pos}
+}
+
+func (n *NilNode) Type() NodeType {
+	// Override method on embedded NodeType for API compatibility.
+	// TODO: Not really a problem; could change API without effect but
+	// api tool complains.
+	return NodeNil
+}
+
+func (n *NilNode) String() string {
+	return "nil"
+}
+
+func (n *NilNode) writeTo(sb *strings.Builder) {
+	sb.WriteString(n.String())
+}
+
+func (n *NilNode) tree() *Tree {
+	return n.tr
+}
+
+func (n *NilNode) Copy() Node {
+	return n.tr.newNil(n.Pos)
+}
+
+// FieldNode holds a field (identifier starting with '.').
+// The names may be chained ('.x.y').
+// The period is dropped from each ident.
+type FieldNode struct {
+	NodeType
+	Pos
+	tr    *Tree
+	Ident []string // The identifiers in lexical order.
+}
+
+func (t *Tree) newField(pos Pos, ident string) *FieldNode {
+	return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period
+}
+
+func (f *FieldNode) String() string {
+	var sb strings.Builder
+	f.writeTo(&sb)
+	return sb.String()
+}
+
+func (f *FieldNode) writeTo(sb *strings.Builder) {
+	for _, id := range f.Ident {
+		sb.WriteByte('.')
+		sb.WriteString(id)
+	}
+}
+
+func (f *FieldNode) tree() *Tree {
+	return f.tr
+}
+
+func (f *FieldNode) Copy() Node {
+	return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)}
+}
+
+// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.').
+// The names may be chained ('.x.y').
+// The periods are dropped from each ident.
+type ChainNode struct {
+	NodeType
+	Pos
+	tr    *Tree
+	Node  Node
+	Field []string // The identifiers in lexical order.
+}
+
+func (t *Tree) newChain(pos Pos, node Node) *ChainNode {
+	return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node}
+}
+
+// Add adds the named field (which should start with a period) to the end of the chain.
+func (c *ChainNode) Add(field string) {
+	if len(field) == 0 || field[0] != '.' {
+		panic("no dot in field")
+	}
+	field = field[1:] // Remove leading dot.
+	if field == "" {
+		panic("empty field")
+	}
+	c.Field = append(c.Field, field)
+}
+
+func (c *ChainNode) String() string {
+	var sb strings.Builder
+	c.writeTo(&sb)
+	return sb.String()
+}
+
+func (c *ChainNode) writeTo(sb *strings.Builder) {
+	if _, ok := c.Node.(*PipeNode); ok {
+		sb.WriteByte('(')
+		c.Node.writeTo(sb)
+		sb.WriteByte(')')
+	} else {
+		c.Node.writeTo(sb)
+	}
+	for _, field := range c.Field {
+		sb.WriteByte('.')
+		sb.WriteString(field)
+	}
+}
+
+func (c *ChainNode) tree() *Tree {
+	return c.tr
+}
+
+func (c *ChainNode) Copy() Node {
+	return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)}
+}
+
+// BoolNode holds a boolean constant.
+type BoolNode struct {
+	NodeType
+	Pos
+	tr   *Tree
+	True bool // The value of the boolean constant.
+}
+
+func (t *Tree) newBool(pos Pos, true bool) *BoolNode {
+	return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true}
+}
+
+func (b *BoolNode) String() string {
+	if b.True {
+		return "true"
+	}
+	return "false"
+}
+
+func (b *BoolNode) writeTo(sb *strings.Builder) {
+	sb.WriteString(b.String())
+}
+
+func (b *BoolNode) tree() *Tree {
+	return b.tr
+}
+
+func (b *BoolNode) Copy() Node {
+	return b.tr.newBool(b.Pos, b.True)
+}
+
+// NumberNode holds a number: signed or unsigned integer, float, or complex.
+// The value is parsed and stored under all the types that can represent the value.
+// This simulates in a small amount of code the behavior of Go's ideal constants.
+type NumberNode struct {
+	NodeType
+	Pos
+	tr         *Tree
+	IsInt      bool       // Number has an integral value.
+	IsUint     bool       // Number has an unsigned integral value.
+	IsFloat    bool       // Number has a floating-point value.
+	IsComplex  bool       // Number is complex.
+	Int64      int64      // The signed integer value.
+	Uint64     uint64     // The unsigned integer value.
+	Float64    float64    // The floating-point value.
+	Complex128 complex128 // The complex value.
+	Text       string     // The original textual representation from the input.
+}
+
+func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) {
+	n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text}
+	switch typ {
+	case itemCharConstant:
+		rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0])
+		if err != nil {
+			return nil, err
+		}
+		if tail != "'" {
+			return nil, fmt.Errorf("malformed character constant: %s", text)
+		}
+		n.Int64 = int64(rune)
+		n.IsInt = true
+		n.Uint64 = uint64(rune)
+		n.IsUint = true
+		n.Float64 = float64(rune) // odd but those are the rules.
+		n.IsFloat = true
+		return n, nil
+	case itemComplex:
+		// fmt.Sscan can parse the pair, so let it do the work.
+		if _, err := fmt.Sscan(text, &n.Complex128); err != nil {
+			return nil, err
+		}
+		n.IsComplex = true
+		n.simplifyComplex()
+		return n, nil
+	}
+	// Imaginary constants can only be complex unless they are zero.
+	if len(text) > 0 && text[len(text)-1] == 'i' {
+		f, err := strconv.ParseFloat(text[:len(text)-1], 64)
+		if err == nil {
+			n.IsComplex = true
+			n.Complex128 = complex(0, f)
+			n.simplifyComplex()
+			return n, nil
+		}
+	}
+	// Do integer test first so we get 0x123 etc.
+	u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below.
+	if err == nil {
+		n.IsUint = true
+		n.Uint64 = u
+	}
+	i, err := strconv.ParseInt(text, 0, 64)
+	if err == nil {
+		n.IsInt = true
+		n.Int64 = i
+		if i == 0 {
+			n.IsUint = true // in case of -0.
+			n.Uint64 = u
+		}
+	}
+	// If an integer extraction succeeded, promote the float.
+	if n.IsInt {
+		n.IsFloat = true
+		n.Float64 = float64(n.Int64)
+	} else if n.IsUint {
+		n.IsFloat = true
+		n.Float64 = float64(n.Uint64)
+	} else {
+		f, err := strconv.ParseFloat(text, 64)
+		if err == nil {
+			// If we parsed it as a float but it looks like an integer,
+			// it's a huge number too large to fit in an int. Reject it.
+			if !strings.ContainsAny(text, ".eEpP") {
+				return nil, fmt.Errorf("integer overflow: %q", text)
+			}
+			n.IsFloat = true
+			n.Float64 = f
+			// If a floating-point extraction succeeded, extract the int if needed.
+			if !n.IsInt && float64(int64(f)) == f {
+				n.IsInt = true
+				n.Int64 = int64(f)
+			}
+			if !n.IsUint && float64(uint64(f)) == f {
+				n.IsUint = true
+				n.Uint64 = uint64(f)
+			}
+		}
+	}
+	if !n.IsInt && !n.IsUint && !n.IsFloat {
+		return nil, fmt.Errorf("illegal number syntax: %q", text)
+	}
+	return n, nil
+}
+
+// simplifyComplex pulls out any other types that are represented by the complex number.
+// These all require that the imaginary part be zero.
+func (n *NumberNode) simplifyComplex() {
+	n.IsFloat = imag(n.Complex128) == 0
+	if n.IsFloat {
+		n.Float64 = real(n.Complex128)
+		n.IsInt = float64(int64(n.Float64)) == n.Float64
+		if n.IsInt {
+			n.Int64 = int64(n.Float64)
+		}
+		n.IsUint = float64(uint64(n.Float64)) == n.Float64
+		if n.IsUint {
+			n.Uint64 = uint64(n.Float64)
+		}
+	}
+}
+
+func (n *NumberNode) String() string {
+	return n.Text
+}
+
+func (n *NumberNode) writeTo(sb *strings.Builder) {
+	sb.WriteString(n.String())
+}
+
+func (n *NumberNode) tree() *Tree {
+	return n.tr
+}
+
+func (n *NumberNode) Copy() Node {
+	nn := new(NumberNode)
+	*nn = *n // Easy, fast, correct.
+	return nn
+}
+
+// StringNode holds a string constant. The value has been "unquoted".
+type StringNode struct {
+	NodeType
+	Pos
+	tr     *Tree
+	Quoted string // The original text of the string, with quotes.
+	Text   string // The string, after quote processing.
+}
+
+func (t *Tree) newString(pos Pos, orig, text string) *StringNode {
+	return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text}
+}
+
+func (s *StringNode) String() string {
+	return s.Quoted
+}
+
+func (s *StringNode) writeTo(sb *strings.Builder) {
+	sb.WriteString(s.String())
+}
+
+func (s *StringNode) tree() *Tree {
+	return s.tr
+}
+
+func (s *StringNode) Copy() Node {
+	return s.tr.newString(s.Pos, s.Quoted, s.Text)
+}
+
+// endNode represents an {{end}} action.
+// It does not appear in the final parse tree.
+type endNode struct {
+	NodeType
+	Pos
+	tr *Tree
+}
+
+func (t *Tree) newEnd(pos Pos) *endNode {
+	return &endNode{tr: t, NodeType: nodeEnd, Pos: pos}
+}
+
+func (e *endNode) String() string {
+	return "{{end}}"
+}
+
+func (e *endNode) writeTo(sb *strings.Builder) {
+	sb.WriteString(e.String())
+}
+
+func (e *endNode) tree() *Tree {
+	return e.tr
+}
+
+func (e *endNode) Copy() Node {
+	return e.tr.newEnd(e.Pos)
+}
+
+// elseNode represents an {{else}} action. Does not appear in the final tree.
+type elseNode struct {
+	NodeType
+	Pos
+	tr   *Tree
+	Line int // The line number in the input. Deprecated: Kept for compatibility.
+}
+
+func (t *Tree) newElse(pos Pos, line int) *elseNode {
+	return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line}
+}
+
+func (e *elseNode) Type() NodeType {
+	return nodeElse
+}
+
+func (e *elseNode) String() string {
+	return "{{else}}"
+}
+
+func (e *elseNode) writeTo(sb *strings.Builder) {
+	sb.WriteString(e.String())
+}
+
+func (e *elseNode) tree() *Tree {
+	return e.tr
+}
+
+func (e *elseNode) Copy() Node {
+	return e.tr.newElse(e.Pos, e.Line)
+}
+
+// BranchNode is the common representation of if, range, and with.
+type BranchNode struct {
+	NodeType
+	Pos
+	tr       *Tree
+	Line     int       // The line number in the input. Deprecated: Kept for compatibility.
+	Pipe     *PipeNode // The pipeline to be evaluated.
+	List     *ListNode // What to execute if the value is non-empty.
+	ElseList *ListNode // What to execute if the value is empty (nil if absent).
+}
+
+func (b *BranchNode) String() string {
+	var sb strings.Builder
+	b.writeTo(&sb)
+	return sb.String()
+}
+
+func (b *BranchNode) writeTo(sb *strings.Builder) {
+	name := ""
+	switch b.NodeType {
+	case NodeIf:
+		name = "if"
+	case NodeRange:
+		name = "range"
+	case NodeWith:
+		name = "with"
+	default:
+		panic("unknown branch type")
+	}
+	sb.WriteString("{{")
+	sb.WriteString(name)
+	sb.WriteByte(' ')
+	b.Pipe.writeTo(sb)
+	sb.WriteString("}}")
+	b.List.writeTo(sb)
+	if b.ElseList != nil {
+		sb.WriteString("{{else}}")
+		b.ElseList.writeTo(sb)
+	}
+	sb.WriteString("{{end}}")
+}
+
+func (b *BranchNode) tree() *Tree {
+	return b.tr
+}
+
+func (b *BranchNode) Copy() Node {
+	switch b.NodeType {
+	case NodeIf:
+		return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
+	case NodeRange:
+		return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
+	case NodeWith:
+		return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
+	default:
+		panic("unknown branch type")
+	}
+}
+
+// IfNode represents an {{if}} action and its commands.
+type IfNode struct {
+	BranchNode
+}
+
+func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode {
+	return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
+}
+
+func (i *IfNode) Copy() Node {
+	return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList())
+}
+
+// BreakNode represents a {{break}} action.
+type BreakNode struct {
+	tr *Tree
+	NodeType
+	Pos
+	Line int
+}
+
+func (t *Tree) newBreak(pos Pos, line int) *BreakNode {
+	return &BreakNode{tr: t, NodeType: NodeBreak, Pos: pos, Line: line}
+}
+
+func (b *BreakNode) Copy() Node                  { return b.tr.newBreak(b.Pos, b.Line) }
+func (b *BreakNode) String() string              { return "{{break}}" }
+func (b *BreakNode) tree() *Tree                 { return b.tr }
+func (b *BreakNode) writeTo(sb *strings.Builder) { sb.WriteString("{{break}}") }
+
+// ContinueNode represents a {{continue}} action.
+type ContinueNode struct {
+	tr *Tree
+	NodeType
+	Pos
+	Line int
+}
+
+func (t *Tree) newContinue(pos Pos, line int) *ContinueNode {
+	return &ContinueNode{tr: t, NodeType: NodeContinue, Pos: pos, Line: line}
+}
+
+func (c *ContinueNode) Copy() Node                  { return c.tr.newContinue(c.Pos, c.Line) }
+func (c *ContinueNode) String() string              { return "{{continue}}" }
+func (c *ContinueNode) tree() *Tree                 { return c.tr }
+func (c *ContinueNode) writeTo(sb *strings.Builder) { sb.WriteString("{{continue}}") }
+
+// RangeNode represents a {{range}} action and its commands.
+type RangeNode struct {
+	BranchNode
+}
+
+func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode {
+	return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
+}
+
+func (r *RangeNode) Copy() Node {
+	return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList())
+}
+
+// WithNode represents a {{with}} action and its commands.
+type WithNode struct {
+	BranchNode
+}
+
+func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode {
+	return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
+}
+
+func (w *WithNode) Copy() Node {
+	return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList())
+}
+
+// TemplateNode represents a {{template}} action.
+type TemplateNode struct {
+	NodeType
+	Pos
+	tr   *Tree
+	Line int       // The line number in the input. Deprecated: Kept for compatibility.
+	Name string    // The name of the template (unquoted).
+	Pipe *PipeNode // The command to evaluate as dot for the template.
+}
+
+func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode {
+	return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe}
+}
+
+func (t *TemplateNode) String() string {
+	var sb strings.Builder
+	t.writeTo(&sb)
+	return sb.String()
+}
+
+func (t *TemplateNode) writeTo(sb *strings.Builder) {
+	sb.WriteString("{{template ")
+	sb.WriteString(strconv.Quote(t.Name))
+	if t.Pipe != nil {
+		sb.WriteByte(' ')
+		t.Pipe.writeTo(sb)
+	}
+	sb.WriteString("}}")
+}
+
+func (t *TemplateNode) tree() *Tree {
+	return t.tr
+}
+
+func (t *TemplateNode) Copy() Node {
+	return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe())
+}
diff --git a/internal/backport/text/template/parse/parse.go b/internal/backport/text/template/parse/parse.go
new file mode 100644
index 0000000..d92bed5
--- /dev/null
+++ b/internal/backport/text/template/parse/parse.go
@@ -0,0 +1,796 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package parse builds parse trees for templates as defined by text/template
+// and html/template. Clients should use those packages to construct templates
+// rather than this one, which provides shared internal data structures not
+// intended for general use.
+package parse
+
+import (
+	"bytes"
+	"fmt"
+	"runtime"
+	"strconv"
+	"strings"
+)
+
+// Tree is the representation of a single parsed template.
+type Tree struct {
+	Name      string    // name of the template represented by the tree.
+	ParseName string    // name of the top-level template during parsing, for error messages.
+	Root      *ListNode // top-level root of the tree.
+	Mode      Mode      // parsing mode.
+	text      string    // text parsed to create the template (or its parent)
+	// Parsing only; cleared after parse.
+	funcs      []map[string]interface{}
+	lex        *lexer
+	token      [3]item // three-token lookahead for parser.
+	peekCount  int
+	vars       []string // variables defined at the moment.
+	treeSet    map[string]*Tree
+	actionLine int // line of left delim starting action
+	rangeDepth int
+	mode       Mode
+}
+
+// A mode value is a set of flags (or 0). Modes control parser behavior.
+type Mode uint
+
+const (
+	ParseComments Mode = 1 << iota // parse comments and add them to AST
+	SkipFuncCheck                  // do not check that functions are defined
+)
+
+// Copy returns a copy of the Tree. Any parsing state is discarded.
+func (t *Tree) Copy() *Tree {
+	if t == nil {
+		return nil
+	}
+	return &Tree{
+		Name:      t.Name,
+		ParseName: t.ParseName,
+		Root:      t.Root.CopyList(),
+		text:      t.text,
+	}
+}
+
+// Parse returns a map from template name to parse.Tree, created by parsing the
+// templates described in the argument string. The top-level template will be
+// given the specified name. If an error is encountered, parsing stops and an
+// empty map is returned with the error.
+func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (map[string]*Tree, error) {
+	treeSet := make(map[string]*Tree)
+	t := New(name)
+	t.text = text
+	_, err := t.Parse(text, leftDelim, rightDelim, treeSet, funcs...)
+	return treeSet, err
+}
+
+// next returns the next token.
+func (t *Tree) next() item {
+	if t.peekCount > 0 {
+		t.peekCount--
+	} else {
+		t.token[0] = t.lex.nextItem()
+	}
+	return t.token[t.peekCount]
+}
+
+// backup backs the input stream up one token.
+func (t *Tree) backup() {
+	t.peekCount++
+}
+
+// backup2 backs the input stream up two tokens.
+// The zeroth token is already there.
+func (t *Tree) backup2(t1 item) {
+	t.token[1] = t1
+	t.peekCount = 2
+}
+
+// backup3 backs the input stream up three tokens
+// The zeroth token is already there.
+func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back.
+	t.token[1] = t1
+	t.token[2] = t2
+	t.peekCount = 3
+}
+
+// peek returns but does not consume the next token.
+func (t *Tree) peek() item {
+	if t.peekCount > 0 {
+		return t.token[t.peekCount-1]
+	}
+	t.peekCount = 1
+	t.token[0] = t.lex.nextItem()
+	return t.token[0]
+}
+
+// nextNonSpace returns the next non-space token.
+func (t *Tree) nextNonSpace() (token item) {
+	for {
+		token = t.next()
+		if token.typ != itemSpace {
+			break
+		}
+	}
+	return token
+}
+
+// peekNonSpace returns but does not consume the next non-space token.
+func (t *Tree) peekNonSpace() item {
+	token := t.nextNonSpace()
+	t.backup()
+	return token
+}
+
+// Parsing.
+
+// New allocates a new parse tree with the given name.
+func New(name string, funcs ...map[string]interface{}) *Tree {
+	return &Tree{
+		Name:  name,
+		funcs: funcs,
+	}
+}
+
+// ErrorContext returns a textual representation of the location of the node in the input text.
+// The receiver is only used when the node does not have a pointer to the tree inside,
+// which can occur in old code.
+func (t *Tree) ErrorContext(n Node) (location, context string) {
+	pos := int(n.Position())
+	tree := n.tree()
+	if tree == nil {
+		tree = t
+	}
+	text := tree.text[:pos]
+	byteNum := strings.LastIndex(text, "\n")
+	if byteNum == -1 {
+		byteNum = pos // On first line.
+	} else {
+		byteNum++ // After the newline.
+		byteNum = pos - byteNum
+	}
+	lineNum := 1 + strings.Count(text, "\n")
+	context = n.String()
+	return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context
+}
+
+// errorf formats the error and terminates processing.
+func (t *Tree) errorf(format string, args ...interface{}) {
+	t.Root = nil
+	format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.token[0].line, format)
+	panic(fmt.Errorf(format, args...))
+}
+
+// error terminates processing.
+func (t *Tree) error(err error) {
+	t.errorf("%s", err)
+}
+
+// expect consumes the next token and guarantees it has the required type.
+func (t *Tree) expect(expected itemType, context string) item {
+	token := t.nextNonSpace()
+	if token.typ != expected {
+		t.unexpected(token, context)
+	}
+	return token
+}
+
+// expectOneOf consumes the next token and guarantees it has one of the required types.
+func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item {
+	token := t.nextNonSpace()
+	if token.typ != expected1 && token.typ != expected2 {
+		t.unexpected(token, context)
+	}
+	return token
+}
+
+// unexpected complains about the token and terminates processing.
+func (t *Tree) unexpected(token item, context string) {
+	if token.typ == itemError {
+		extra := ""
+		if t.actionLine != 0 && t.actionLine != token.line {
+			extra = fmt.Sprintf(" in action started at %s:%d", t.ParseName, t.actionLine)
+			if strings.HasSuffix(token.val, " action") {
+				extra = extra[len(" in action"):] // avoid "action in action"
+			}
+		}
+		t.errorf("%s%s", token, extra)
+	}
+	t.errorf("unexpected %s in %s", token, context)
+}
+
+// recover is the handler that turns panics into returns from the top level of Parse.
+func (t *Tree) recover(errp *error) {
+	e := recover()
+	if e != nil {
+		if _, ok := e.(runtime.Error); ok {
+			panic(e)
+		}
+		if t != nil {
+			t.lex.drain()
+			t.stopParse()
+		}
+		*errp = e.(error)
+	}
+}
+
+// startParse initializes the parser, using the lexer.
+func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer, treeSet map[string]*Tree) {
+	t.Root = nil
+	t.lex = lex
+	t.vars = []string{"$"}
+	t.funcs = funcs
+	t.treeSet = treeSet
+	lex.breakOK = !t.hasFunction("break")
+	lex.continueOK = !t.hasFunction("continue")
+}
+
+// stopParse terminates parsing.
+func (t *Tree) stopParse() {
+	t.lex = nil
+	t.vars = nil
+	t.funcs = nil
+	t.treeSet = nil
+}
+
+// Parse parses the template definition string to construct a representation of
+// the template for execution. If either action delimiter string is empty, the
+// default ("{{" or "}}") is used. Embedded template definitions are added to
+// the treeSet map.
+func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) {
+	defer t.recover(&err)
+	t.ParseName = t.Name
+	emitComment := t.Mode&ParseComments != 0
+	t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim, emitComment), treeSet)
+	t.text = text
+	t.parse()
+	t.add()
+	t.stopParse()
+	return t, nil
+}
+
+// add adds tree to t.treeSet.
+func (t *Tree) add() {
+	tree := t.treeSet[t.Name]
+	if tree == nil || IsEmptyTree(tree.Root) {
+		t.treeSet[t.Name] = t
+		return
+	}
+	if !IsEmptyTree(t.Root) {
+		t.errorf("template: multiple definition of template %q", t.Name)
+	}
+}
+
+// IsEmptyTree reports whether this tree (node) is empty of everything but space or comments.
+func IsEmptyTree(n Node) bool {
+	switch n := n.(type) {
+	case nil:
+		return true
+	case *ActionNode:
+	case *CommentNode:
+		return true
+	case *IfNode:
+	case *ListNode:
+		for _, node := range n.Nodes {
+			if !IsEmptyTree(node) {
+				return false
+			}
+		}
+		return true
+	case *RangeNode:
+	case *TemplateNode:
+	case *TextNode:
+		return len(bytes.TrimSpace(n.Text)) == 0
+	case *WithNode:
+	default:
+		panic("unknown node: " + n.String())
+	}
+	return false
+}
+
+// parse is the top-level parser for a template, essentially the same
+// as itemList except it also parses {{define}} actions.
+// It runs to EOF.
+func (t *Tree) parse() {
+	t.Root = t.newList(t.peek().pos)
+	for t.peek().typ != itemEOF {
+		if t.peek().typ == itemLeftDelim {
+			delim := t.next()
+			if t.nextNonSpace().typ == itemDefine {
+				newT := New("definition") // name will be updated once we know it.
+				newT.text = t.text
+				newT.Mode = t.Mode
+				newT.ParseName = t.ParseName
+				newT.startParse(t.funcs, t.lex, t.treeSet)
+				newT.parseDefinition()
+				continue
+			}
+			t.backup2(delim)
+		}
+		switch n := t.textOrAction(); n.Type() {
+		case nodeEnd, nodeElse:
+			t.errorf("unexpected %s", n)
+		default:
+			t.Root.append(n)
+		}
+	}
+}
+
+// parseDefinition parses a {{define}} ...  {{end}} template definition and
+// installs the definition in t.treeSet. The "define" keyword has already
+// been scanned.
+func (t *Tree) parseDefinition() {
+	const context = "define clause"
+	name := t.expectOneOf(itemString, itemRawString, context)
+	var err error
+	t.Name, err = strconv.Unquote(name.val)
+	if err != nil {
+		t.error(err)
+	}
+	t.expect(itemRightDelim, context)
+	var end Node
+	t.Root, end = t.itemList()
+	if end.Type() != nodeEnd {
+		t.errorf("unexpected %s in %s", end, context)
+	}
+	t.add()
+	t.stopParse()
+}
+
+// itemList:
+//	textOrAction*
+// Terminates at {{end}} or {{else}}, returned separately.
+func (t *Tree) itemList() (list *ListNode, next Node) {
+	list = t.newList(t.peekNonSpace().pos)
+	for t.peekNonSpace().typ != itemEOF {
+		n := t.textOrAction()
+		switch n.Type() {
+		case nodeEnd, nodeElse:
+			return list, n
+		}
+		list.append(n)
+	}
+	t.errorf("unexpected EOF")
+	return
+}
+
+// textOrAction:
+//	text | comment | action
+func (t *Tree) textOrAction() Node {
+	switch token := t.nextNonSpace(); token.typ {
+	case itemText:
+		return t.newText(token.pos, token.val)
+	case itemLeftDelim:
+		t.actionLine = token.line
+		defer t.clearActionLine()
+		return t.action()
+	case itemComment:
+		return t.newComment(token.pos, token.val)
+	default:
+		t.unexpected(token, "input")
+	}
+	return nil
+}
+
+func (t *Tree) clearActionLine() {
+	t.actionLine = 0
+}
+
+// Action:
+//	control
+//	command ("|" command)*
+// Left delim is past. Now get actions.
+// First word could be a keyword such as range.
+func (t *Tree) action() (n Node) {
+	switch token := t.nextNonSpace(); token.typ {
+	case itemBlock:
+		return t.blockControl()
+	case itemBreak:
+		return t.breakControl(token.pos, token.line)
+	case itemContinue:
+		return t.continueControl(token.pos, token.line)
+	case itemElse:
+		return t.elseControl()
+	case itemEnd:
+		return t.endControl()
+	case itemIf:
+		return t.ifControl()
+	case itemRange:
+		return t.rangeControl()
+	case itemTemplate:
+		return t.templateControl()
+	case itemWith:
+		return t.withControl()
+	}
+	t.backup()
+	token := t.peek()
+	// Do not pop variables; they persist until "end".
+	return t.newAction(token.pos, token.line, t.pipeline("command", itemRightDelim))
+}
+
+// Break:
+//	{{break}}
+// Break keyword is past.
+func (t *Tree) breakControl(pos Pos, line int) Node {
+	if token := t.next(); token.typ != itemRightDelim {
+		t.unexpected(token, "in {{break}}")
+	}
+	if t.rangeDepth == 0 {
+		t.errorf("{{break}} outside {{range}}")
+	}
+	return t.newBreak(pos, line)
+}
+
+// Continue:
+//	{{continue}}
+// Continue keyword is past.
+func (t *Tree) continueControl(pos Pos, line int) Node {
+	if token := t.next(); token.typ != itemRightDelim {
+		t.unexpected(token, "in {{continue}}")
+	}
+	if t.rangeDepth == 0 {
+		t.errorf("{{continue}} outside {{range}}")
+	}
+	return t.newContinue(pos, line)
+}
+
+// Pipeline:
+//	declarations? command ('|' command)*
+func (t *Tree) pipeline(context string, end itemType) (pipe *PipeNode) {
+	token := t.peekNonSpace()
+	pipe = t.newPipeline(token.pos, token.line, nil)
+	// Are there declarations or assignments?
+decls:
+	if v := t.peekNonSpace(); v.typ == itemVariable {
+		t.next()
+		// Since space is a token, we need 3-token look-ahead here in the worst case:
+		// in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an
+		// argument variable rather than a declaration. So remember the token
+		// adjacent to the variable so we can push it back if necessary.
+		tokenAfterVariable := t.peek()
+		next := t.peekNonSpace()
+		switch {
+		case next.typ == itemAssign, next.typ == itemDeclare:
+			pipe.IsAssign = next.typ == itemAssign
+			t.nextNonSpace()
+			pipe.Decl = append(pipe.Decl, t.newVariable(v.pos, v.val))
+			t.vars = append(t.vars, v.val)
+		case next.typ == itemChar && next.val == ",":
+			t.nextNonSpace()
+			pipe.Decl = append(pipe.Decl, t.newVariable(v.pos, v.val))
+			t.vars = append(t.vars, v.val)
+			if context == "range" && len(pipe.Decl) < 2 {
+				switch t.peekNonSpace().typ {
+				case itemVariable, itemRightDelim, itemRightParen:
+					// second initialized variable in a range pipeline
+					goto decls
+				default:
+					t.errorf("range can only initialize variables")
+				}
+			}
+			t.errorf("too many declarations in %s", context)
+		case tokenAfterVariable.typ == itemSpace:
+			t.backup3(v, tokenAfterVariable)
+		default:
+			t.backup2(v)
+		}
+	}
+	for {
+		switch token := t.nextNonSpace(); token.typ {
+		case end:
+			// At this point, the pipeline is complete
+			t.checkPipeline(pipe, context)
+			return
+		case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier,
+			itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen:
+			t.backup()
+			pipe.append(t.command())
+		default:
+			t.unexpected(token, context)
+		}
+	}
+}
+
+func (t *Tree) checkPipeline(pipe *PipeNode, context string) {
+	// Reject empty pipelines
+	if len(pipe.Cmds) == 0 {
+		t.errorf("missing value for %s", context)
+	}
+	// Only the first command of a pipeline can start with a non executable operand
+	for i, c := range pipe.Cmds[1:] {
+		switch c.Args[0].Type() {
+		case NodeBool, NodeDot, NodeNil, NodeNumber, NodeString:
+			// With A|B|C, pipeline stage 2 is B
+			t.errorf("non executable command in pipeline stage %d", i+2)
+		}
+	}
+}
+
+func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) {
+	defer t.popVars(len(t.vars))
+	pipe = t.pipeline(context, itemRightDelim)
+	if context == "range" {
+		t.rangeDepth++
+	}
+	var next Node
+	list, next = t.itemList()
+	if context == "range" {
+		t.rangeDepth--
+	}
+	switch next.Type() {
+	case nodeEnd: //done
+	case nodeElse:
+		if allowElseIf {
+			// Special case for "else if". If the "else" is followed immediately by an "if",
+			// the elseControl will have left the "if" token pending. Treat
+			//	{{if a}}_{{else if b}}_{{end}}
+			// as
+			//	{{if a}}_{{else}}{{if b}}_{{end}}{{end}}.
+			// To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}}
+			// is assumed. This technique works even for long if-else-if chains.
+			// TODO: Should we allow else-if in with and range?
+			if t.peek().typ == itemIf {
+				t.next() // Consume the "if" token.
+				elseList = t.newList(next.Position())
+				elseList.append(t.ifControl())
+				// Do not consume the next item - only one {{end}} required.
+				break
+			}
+		}
+		elseList, next = t.itemList()
+		if next.Type() != nodeEnd {
+			t.errorf("expected end; found %s", next)
+		}
+	}
+	return pipe.Position(), pipe.Line, pipe, list, elseList
+}
+
+// If:
+//	{{if pipeline}} itemList {{end}}
+//	{{if pipeline}} itemList {{else}} itemList {{end}}
+// If keyword is past.
+func (t *Tree) ifControl() Node {
+	return t.newIf(t.parseControl(true, "if"))
+}
+
+// Range:
+//	{{range pipeline}} itemList {{end}}
+//	{{range pipeline}} itemList {{else}} itemList {{end}}
+// Range keyword is past.
+func (t *Tree) rangeControl() Node {
+	r := t.newRange(t.parseControl(false, "range"))
+	return r
+}
+
+// With:
+//	{{with pipeline}} itemList {{end}}
+//	{{with pipeline}} itemList {{else}} itemList {{end}}
+// If keyword is past.
+func (t *Tree) withControl() Node {
+	return t.newWith(t.parseControl(false, "with"))
+}
+
+// End:
+//	{{end}}
+// End keyword is past.
+func (t *Tree) endControl() Node {
+	return t.newEnd(t.expect(itemRightDelim, "end").pos)
+}
+
+// Else:
+//	{{else}}
+// Else keyword is past.
+func (t *Tree) elseControl() Node {
+	// Special case for "else if".
+	peek := t.peekNonSpace()
+	if peek.typ == itemIf {
+		// We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ".
+		return t.newElse(peek.pos, peek.line)
+	}
+	token := t.expect(itemRightDelim, "else")
+	return t.newElse(token.pos, token.line)
+}
+
+// Block:
+//	{{block stringValue pipeline}}
+// Block keyword is past.
+// The name must be something that can evaluate to a string.
+// The pipeline is mandatory.
+func (t *Tree) blockControl() Node {
+	const context = "block clause"
+
+	token := t.nextNonSpace()
+	name := t.parseTemplateName(token, context)
+	pipe := t.pipeline(context, itemRightDelim)
+
+	block := New(name) // name will be updated once we know it.
+	block.text = t.text
+	block.Mode = t.Mode
+	block.ParseName = t.ParseName
+	block.startParse(t.funcs, t.lex, t.treeSet)
+	var end Node
+	block.Root, end = block.itemList()
+	if end.Type() != nodeEnd {
+		t.errorf("unexpected %s in %s", end, context)
+	}
+	block.add()
+	block.stopParse()
+
+	return t.newTemplate(token.pos, token.line, name, pipe)
+}
+
+// Template:
+//	{{template stringValue pipeline}}
+// Template keyword is past. The name must be something that can evaluate
+// to a string.
+func (t *Tree) templateControl() Node {
+	const context = "template clause"
+	token := t.nextNonSpace()
+	name := t.parseTemplateName(token, context)
+	var pipe *PipeNode
+	if t.nextNonSpace().typ != itemRightDelim {
+		t.backup()
+		// Do not pop variables; they persist until "end".
+		pipe = t.pipeline(context, itemRightDelim)
+	}
+	return t.newTemplate(token.pos, token.line, name, pipe)
+}
+
+func (t *Tree) parseTemplateName(token item, context string) (name string) {
+	switch token.typ {
+	case itemString, itemRawString:
+		s, err := strconv.Unquote(token.val)
+		if err != nil {
+			t.error(err)
+		}
+		name = s
+	default:
+		t.unexpected(token, context)
+	}
+	return
+}
+
+// command:
+//	operand (space operand)*
+// space-separated arguments up to a pipeline character or right delimiter.
+// we consume the pipe character but leave the right delim to terminate the action.
+func (t *Tree) command() *CommandNode {
+	cmd := t.newCommand(t.peekNonSpace().pos)
+	for {
+		t.peekNonSpace() // skip leading spaces.
+		operand := t.operand()
+		if operand != nil {
+			cmd.append(operand)
+		}
+		switch token := t.next(); token.typ {
+		case itemSpace:
+			continue
+		case itemRightDelim, itemRightParen:
+			t.backup()
+		case itemPipe:
+			// nothing here; break loop below
+		default:
+			t.unexpected(token, "operand")
+		}
+		break
+	}
+	if len(cmd.Args) == 0 {
+		t.errorf("empty command")
+	}
+	return cmd
+}
+
+// operand:
+//	term .Field*
+// An operand is a space-separated component of a command,
+// a term possibly followed by field accesses.
+// A nil return means the next item is not an operand.
+func (t *Tree) operand() Node {
+	node := t.term()
+	if node == nil {
+		return nil
+	}
+	if t.peek().typ == itemField {
+		chain := t.newChain(t.peek().pos, node)
+		for t.peek().typ == itemField {
+			chain.Add(t.next().val)
+		}
+		// Compatibility with original API: If the term is of type NodeField
+		// or NodeVariable, just put more fields on the original.
+		// Otherwise, keep the Chain node.
+		// Obvious parsing errors involving literal values are detected here.
+		// More complex error cases will have to be handled at execution time.
+		switch node.Type() {
+		case NodeField:
+			node = t.newField(chain.Position(), chain.String())
+		case NodeVariable:
+			node = t.newVariable(chain.Position(), chain.String())
+		case NodeBool, NodeString, NodeNumber, NodeNil, NodeDot:
+			t.errorf("unexpected . after term %q", node.String())
+		default:
+			node = chain
+		}
+	}
+	return node
+}
+
+// term:
+//	literal (number, string, nil, boolean)
+//	function (identifier)
+//	.
+//	.Field
+//	$
+//	'(' pipeline ')'
+// A term is a simple "expression".
+// A nil return means the next item is not a term.
+func (t *Tree) term() Node {
+	switch token := t.nextNonSpace(); token.typ {
+	case itemIdentifier:
+		checkFunc := t.Mode&SkipFuncCheck == 0
+		if checkFunc && !t.hasFunction(token.val) {
+			t.errorf("function %q not defined", token.val)
+		}
+		return NewIdentifier(token.val).SetTree(t).SetPos(token.pos)
+	case itemDot:
+		return t.newDot(token.pos)
+	case itemNil:
+		return t.newNil(token.pos)
+	case itemVariable:
+		return t.useVar(token.pos, token.val)
+	case itemField:
+		return t.newField(token.pos, token.val)
+	case itemBool:
+		return t.newBool(token.pos, token.val == "true")
+	case itemCharConstant, itemComplex, itemNumber:
+		number, err := t.newNumber(token.pos, token.val, token.typ)
+		if err != nil {
+			t.error(err)
+		}
+		return number
+	case itemLeftParen:
+		return t.pipeline("parenthesized pipeline", itemRightParen)
+	case itemString, itemRawString:
+		s, err := strconv.Unquote(token.val)
+		if err != nil {
+			t.error(err)
+		}
+		return t.newString(token.pos, token.val, s)
+	}
+	t.backup()
+	return nil
+}
+
+// hasFunction reports if a function name exists in the Tree's maps.
+func (t *Tree) hasFunction(name string) bool {
+	for _, funcMap := range t.funcs {
+		if funcMap == nil {
+			continue
+		}
+		if funcMap[name] != nil {
+			return true
+		}
+	}
+	return false
+}
+
+// popVars trims the variable list to the specified length
+func (t *Tree) popVars(n int) {
+	t.vars = t.vars[:n]
+}
+
+// useVar returns a node for a variable reference. It errors if the
+// variable is not defined.
+func (t *Tree) useVar(pos Pos, name string) Node {
+	v := t.newVariable(pos, name)
+	for _, varName := range t.vars {
+		if varName == v.Ident[0] {
+			return v
+		}
+	}
+	t.errorf("undefined variable %q", v.Ident[0])
+	return nil
+}
diff --git a/internal/backport/text/template/parse/parse_test.go b/internal/backport/text/template/parse/parse_test.go
new file mode 100644
index 0000000..c3679a0
--- /dev/null
+++ b/internal/backport/text/template/parse/parse_test.go
@@ -0,0 +1,676 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package parse
+
+import (
+	"flag"
+	"fmt"
+	"strings"
+	"testing"
+)
+
+var debug = flag.Bool("debug", false, "show the errors produced by the main tests")
+
+type numberTest struct {
+	text      string
+	isInt     bool
+	isUint    bool
+	isFloat   bool
+	isComplex bool
+	int64
+	uint64
+	float64
+	complex128
+}
+
+var numberTests = []numberTest{
+	// basics
+	{"0", true, true, true, false, 0, 0, 0, 0},
+	{"-0", true, true, true, false, 0, 0, 0, 0}, // check that -0 is a uint.
+	{"73", true, true, true, false, 73, 73, 73, 0},
+	{"7_3", true, true, true, false, 73, 73, 73, 0},
+	{"0b10_010_01", true, true, true, false, 73, 73, 73, 0},
+	{"0B10_010_01", true, true, true, false, 73, 73, 73, 0},
+	{"073", true, true, true, false, 073, 073, 073, 0},
+	{"0o73", true, true, true, false, 073, 073, 073, 0},
+	{"0O73", true, true, true, false, 073, 073, 073, 0},
+	{"0x73", true, true, true, false, 0x73, 0x73, 0x73, 0},
+	{"0X73", true, true, true, false, 0x73, 0x73, 0x73, 0},
+	{"0x7_3", true, true, true, false, 0x73, 0x73, 0x73, 0},
+	{"-73", true, false, true, false, -73, 0, -73, 0},
+	{"+73", true, false, true, false, 73, 0, 73, 0},
+	{"100", true, true, true, false, 100, 100, 100, 0},
+	{"1e9", true, true, true, false, 1e9, 1e9, 1e9, 0},
+	{"-1e9", true, false, true, false, -1e9, 0, -1e9, 0},
+	{"-1.2", false, false, true, false, 0, 0, -1.2, 0},
+	{"1e19", false, true, true, false, 0, 1e19, 1e19, 0},
+	{"1e1_9", false, true, true, false, 0, 1e19, 1e19, 0},
+	{"1E19", false, true, true, false, 0, 1e19, 1e19, 0},
+	{"-1e19", false, false, true, false, 0, 0, -1e19, 0},
+	{"0x_1p4", true, true, true, false, 16, 16, 16, 0},
+	{"0X_1P4", true, true, true, false, 16, 16, 16, 0},
+	{"0x_1p-4", false, false, true, false, 0, 0, 1 / 16., 0},
+	{"4i", false, false, false, true, 0, 0, 0, 4i},
+	{"-1.2+4.2i", false, false, false, true, 0, 0, 0, -1.2 + 4.2i},
+	{"073i", false, false, false, true, 0, 0, 0, 73i}, // not octal!
+	// complex with 0 imaginary are float (and maybe integer)
+	{"0i", true, true, true, true, 0, 0, 0, 0},
+	{"-1.2+0i", false, false, true, true, 0, 0, -1.2, -1.2},
+	{"-12+0i", true, false, true, true, -12, 0, -12, -12},
+	{"13+0i", true, true, true, true, 13, 13, 13, 13},
+	// funny bases
+	{"0123", true, true, true, false, 0123, 0123, 0123, 0},
+	{"-0x0", true, true, true, false, 0, 0, 0, 0},
+	{"0xdeadbeef", true, true, true, false, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef, 0},
+	// character constants
+	{`'a'`, true, true, true, false, 'a', 'a', 'a', 0},
+	{`'\n'`, true, true, true, false, '\n', '\n', '\n', 0},
+	{`'\\'`, true, true, true, false, '\\', '\\', '\\', 0},
+	{`'\''`, true, true, true, false, '\'', '\'', '\'', 0},
+	{`'\xFF'`, true, true, true, false, 0xFF, 0xFF, 0xFF, 0},
+	{`'パ'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0},
+	{`'\u30d1'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0},
+	{`'\U000030d1'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0},
+	// some broken syntax
+	{text: "+-2"},
+	{text: "0x123."},
+	{text: "1e."},
+	{text: "0xi."},
+	{text: "1+2."},
+	{text: "'x"},
+	{text: "'xx'"},
+	{text: "'433937734937734969526500969526500'"}, // Integer too large - issue 10634.
+	// Issue 8622 - 0xe parsed as floating point. Very embarrassing.
+	{"0xef", true, true, true, false, 0xef, 0xef, 0xef, 0},
+}
+
+func TestNumberParse(t *testing.T) {
+	for _, test := range numberTests {
+		// If fmt.Sscan thinks it's complex, it's complex. We can't trust the output
+		// because imaginary comes out as a number.
+		var c complex128
+		typ := itemNumber
+		var tree *Tree
+		if test.text[0] == '\'' {
+			typ = itemCharConstant
+		} else {
+			_, err := fmt.Sscan(test.text, &c)
+			if err == nil {
+				typ = itemComplex
+			}
+		}
+		n, err := tree.newNumber(0, test.text, typ)
+		ok := test.isInt || test.isUint || test.isFloat || test.isComplex
+		if ok && err != nil {
+			t.Errorf("unexpected error for %q: %s", test.text, err)
+			continue
+		}
+		if !ok && err == nil {
+			t.Errorf("expected error for %q", test.text)
+			continue
+		}
+		if !ok {
+			if *debug {
+				fmt.Printf("%s\n\t%s\n", test.text, err)
+			}
+			continue
+		}
+		if n.IsComplex != test.isComplex {
+			t.Errorf("complex incorrect for %q; should be %t", test.text, test.isComplex)
+		}
+		if test.isInt {
+			if !n.IsInt {
+				t.Errorf("expected integer for %q", test.text)
+			}
+			if n.Int64 != test.int64 {
+				t.Errorf("int64 for %q should be %d Is %d", test.text, test.int64, n.Int64)
+			}
+		} else if n.IsInt {
+			t.Errorf("did not expect integer for %q", test.text)
+		}
+		if test.isUint {
+			if !n.IsUint {
+				t.Errorf("expected unsigned integer for %q", test.text)
+			}
+			if n.Uint64 != test.uint64 {
+				t.Errorf("uint64 for %q should be %d Is %d", test.text, test.uint64, n.Uint64)
+			}
+		} else if n.IsUint {
+			t.Errorf("did not expect unsigned integer for %q", test.text)
+		}
+		if test.isFloat {
+			if !n.IsFloat {
+				t.Errorf("expected float for %q", test.text)
+			}
+			if n.Float64 != test.float64 {
+				t.Errorf("float64 for %q should be %g Is %g", test.text, test.float64, n.Float64)
+			}
+		} else if n.IsFloat {
+			t.Errorf("did not expect float for %q", test.text)
+		}
+		if test.isComplex {
+			if !n.IsComplex {
+				t.Errorf("expected complex for %q", test.text)
+			}
+			if n.Complex128 != test.complex128 {
+				t.Errorf("complex128 for %q should be %g Is %g", test.text, test.complex128, n.Complex128)
+			}
+		} else if n.IsComplex {
+			t.Errorf("did not expect complex for %q", test.text)
+		}
+	}
+}
+
+type parseTest struct {
+	name   string
+	input  string
+	ok     bool
+	result string // what the user would see in an error message.
+}
+
+const (
+	noError  = true
+	hasError = false
+)
+
+var parseTests = []parseTest{
+	{"empty", "", noError,
+		``},
+	{"comment", "{{/*\n\n\n*/}}", noError,
+		``},
+	{"spaces", " \t\n", noError,
+		`" \t\n"`},
+	{"text", "some text", noError,
+		`"some text"`},
+	{"emptyAction", "{{}}", hasError,
+		`{{}}`},
+	{"field", "{{.X}}", noError,
+		`{{.X}}`},
+	{"simple command", "{{printf}}", noError,
+		`{{printf}}`},
+	{"$ invocation", "{{$}}", noError,
+		"{{$}}"},
+	{"variable invocation", "{{with $x := 3}}{{$x 23}}{{end}}", noError,
+		"{{with $x := 3}}{{$x 23}}{{end}}"},
+	{"variable with fields", "{{$.I}}", noError,
+		"{{$.I}}"},
+	{"multi-word command", "{{printf `%d` 23}}", noError,
+		"{{printf `%d` 23}}"},
+	{"pipeline", "{{.X|.Y}}", noError,
+		`{{.X | .Y}}`},
+	{"pipeline with decl", "{{$x := .X|.Y}}", noError,
+		`{{$x := .X | .Y}}`},
+	{"nested pipeline", "{{.X (.Y .Z) (.A | .B .C) (.E)}}", noError,
+		`{{.X (.Y .Z) (.A | .B .C) (.E)}}`},
+	{"field applied to parentheses", "{{(.Y .Z).Field}}", noError,
+		`{{(.Y .Z).Field}}`},
+	{"simple if", "{{if .X}}hello{{end}}", noError,
+		`{{if .X}}"hello"{{end}}`},
+	{"if with else", "{{if .X}}true{{else}}false{{end}}", noError,
+		`{{if .X}}"true"{{else}}"false"{{end}}`},
+	{"if with else if", "{{if .X}}true{{else if .Y}}false{{end}}", noError,
+		`{{if .X}}"true"{{else}}{{if .Y}}"false"{{end}}{{end}}`},
+	{"if else chain", "+{{if .X}}X{{else if .Y}}Y{{else if .Z}}Z{{end}}+", noError,
+		`"+"{{if .X}}"X"{{else}}{{if .Y}}"Y"{{else}}{{if .Z}}"Z"{{end}}{{end}}{{end}}"+"`},
+	{"simple range", "{{range .X}}hello{{end}}", noError,
+		`{{range .X}}"hello"{{end}}`},
+	{"chained field range", "{{range .X.Y.Z}}hello{{end}}", noError,
+		`{{range .X.Y.Z}}"hello"{{end}}`},
+	{"nested range", "{{range .X}}hello{{range .Y}}goodbye{{end}}{{end}}", noError,
+		`{{range .X}}"hello"{{range .Y}}"goodbye"{{end}}{{end}}`},
+	{"range with else", "{{range .X}}true{{else}}false{{end}}", noError,
+		`{{range .X}}"true"{{else}}"false"{{end}}`},
+	{"range over pipeline", "{{range .X|.M}}true{{else}}false{{end}}", noError,
+		`{{range .X | .M}}"true"{{else}}"false"{{end}}`},
+	{"range []int", "{{range .SI}}{{.}}{{end}}", noError,
+		`{{range .SI}}{{.}}{{end}}`},
+	{"range 1 var", "{{range $x := .SI}}{{.}}{{end}}", noError,
+		`{{range $x := .SI}}{{.}}{{end}}`},
+	{"range 2 vars", "{{range $x, $y := .SI}}{{.}}{{end}}", noError,
+		`{{range $x, $y := .SI}}{{.}}{{end}}`},
+	{"range with break", "{{range .SI}}{{.}}{{break}}{{end}}", noError,
+		`{{range .SI}}{{.}}{{break}}{{end}}`},
+	{"range with continue", "{{range .SI}}{{.}}{{continue}}{{end}}", noError,
+		`{{range .SI}}{{.}}{{continue}}{{end}}`},
+	{"constants", "{{range .SI 1 -3.2i true false 'a' nil}}{{end}}", noError,
+		`{{range .SI 1 -3.2i true false 'a' nil}}{{end}}`},
+	{"template", "{{template `x`}}", noError,
+		`{{template "x"}}`},
+	{"template with arg", "{{template `x` .Y}}", noError,
+		`{{template "x" .Y}}`},
+	{"with", "{{with .X}}hello{{end}}", noError,
+		`{{with .X}}"hello"{{end}}`},
+	{"with with else", "{{with .X}}hello{{else}}goodbye{{end}}", noError,
+		`{{with .X}}"hello"{{else}}"goodbye"{{end}}`},
+	// Trimming spaces.
+	{"trim left", "x \r\n\t{{- 3}}", noError, `"x"{{3}}`},
+	{"trim right", "{{3 -}}\n\n\ty", noError, `{{3}}"y"`},
+	{"trim left and right", "x \r\n\t{{- 3 -}}\n\n\ty", noError, `"x"{{3}}"y"`},
+	{"trim with extra spaces", "x\n{{-  3   -}}\ny", noError, `"x"{{3}}"y"`},
+	{"comment trim left", "x \r\n\t{{- /* hi */}}", noError, `"x"`},
+	{"comment trim right", "{{/* hi */ -}}\n\n\ty", noError, `"y"`},
+	{"comment trim left and right", "x \r\n\t{{- /* */ -}}\n\n\ty", noError, `"x""y"`},
+	{"block definition", `{{block "foo" .}}hello{{end}}`, noError,
+		`{{template "foo" .}}`},
+
+	{"newline in assignment", "{{ $x \n := \n 1 \n }}", noError, "{{$x := 1}}"},
+	{"newline in empty action", "{{\n}}", hasError, "{{\n}}"},
+	{"newline in pipeline", "{{\n\"x\"\n|\nprintf\n}}", noError, `{{"x" | printf}}`},
+	{"newline in comment", "{{/*\nhello\n*/}}", noError, ""},
+	{"newline in comment", "{{-\n/*\nhello\n*/\n-}}", noError, ""},
+
+	// Errors.
+	{"unclosed action", "hello{{range", hasError, ""},
+	{"unmatched end", "{{end}}", hasError, ""},
+	{"unmatched else", "{{else}}", hasError, ""},
+	{"unmatched else after if", "{{if .X}}hello{{end}}{{else}}", hasError, ""},
+	{"multiple else", "{{if .X}}1{{else}}2{{else}}3{{end}}", hasError, ""},
+	{"missing end", "hello{{range .x}}", hasError, ""},
+	{"missing end after else", "hello{{range .x}}{{else}}", hasError, ""},
+	{"undefined function", "hello{{undefined}}", hasError, ""},
+	{"undefined variable", "{{$x}}", hasError, ""},
+	{"variable undefined after end", "{{with $x := 4}}{{end}}{{$x}}", hasError, ""},
+	{"variable undefined in template", "{{template $v}}", hasError, ""},
+	{"declare with field", "{{with $x.Y := 4}}{{end}}", hasError, ""},
+	{"template with field ref", "{{template .X}}", hasError, ""},
+	{"template with var", "{{template $v}}", hasError, ""},
+	{"invalid punctuation", "{{printf 3, 4}}", hasError, ""},
+	{"multidecl outside range", "{{with $v, $u := 3}}{{end}}", hasError, ""},
+	{"too many decls in range", "{{range $u, $v, $w := 3}}{{end}}", hasError, ""},
+	{"dot applied to parentheses", "{{printf (printf .).}}", hasError, ""},
+	{"adjacent args", "{{printf 3`x`}}", hasError, ""},
+	{"adjacent args with .", "{{printf `x`.}}", hasError, ""},
+	{"extra end after if", "{{if .X}}a{{else if .Y}}b{{end}}{{end}}", hasError, ""},
+	{"break outside range", "{{range .}}{{end}} {{break}}", hasError, ""},
+	{"continue outside range", "{{range .}}{{end}} {{continue}}", hasError, ""},
+	{"break in range else", "{{range .}}{{else}}{{break}}{{end}}", hasError, ""},
+	{"continue in range else", "{{range .}}{{else}}{{continue}}{{end}}", hasError, ""},
+	// Other kinds of assignments and operators aren't available yet.
+	{"bug0a", "{{$x := 0}}{{$x}}", noError, "{{$x := 0}}{{$x}}"},
+	{"bug0b", "{{$x += 1}}{{$x}}", hasError, ""},
+	{"bug0c", "{{$x ! 2}}{{$x}}", hasError, ""},
+	{"bug0d", "{{$x % 3}}{{$x}}", hasError, ""},
+	// Check the parse fails for := rather than comma.
+	{"bug0e", "{{range $x := $y := 3}}{{end}}", hasError, ""},
+	// Another bug: variable read must ignore following punctuation.
+	{"bug1a", "{{$x:=.}}{{$x!2}}", hasError, ""},                     // ! is just illegal here.
+	{"bug1b", "{{$x:=.}}{{$x+2}}", hasError, ""},                     // $x+2 should not parse as ($x) (+2).
+	{"bug1c", "{{$x:=.}}{{$x +2}}", noError, "{{$x := .}}{{$x +2}}"}, // It's OK with a space.
+	// dot following a literal value
+	{"dot after integer", "{{1.E}}", hasError, ""},
+	{"dot after float", "{{0.1.E}}", hasError, ""},
+	{"dot after boolean", "{{true.E}}", hasError, ""},
+	{"dot after char", "{{'a'.any}}", hasError, ""},
+	{"dot after string", `{{"hello".guys}}`, hasError, ""},
+	{"dot after dot", "{{..E}}", hasError, ""},
+	{"dot after nil", "{{nil.E}}", hasError, ""},
+	// Wrong pipeline
+	{"wrong pipeline dot", "{{12|.}}", hasError, ""},
+	{"wrong pipeline number", "{{.|12|printf}}", hasError, ""},
+	{"wrong pipeline string", "{{.|printf|\"error\"}}", hasError, ""},
+	{"wrong pipeline char", "{{12|printf|'e'}}", hasError, ""},
+	{"wrong pipeline boolean", "{{.|true}}", hasError, ""},
+	{"wrong pipeline nil", "{{'c'|nil}}", hasError, ""},
+	{"empty pipeline", `{{printf "%d" ( ) }}`, hasError, ""},
+	// Missing pipeline in block
+	{"block definition", `{{block "foo"}}hello{{end}}`, hasError, ""},
+}
+
+var builtins = map[string]interface{}{
+	"printf":   fmt.Sprintf,
+	"contains": strings.Contains,
+}
+
+func testParse(doCopy bool, t *testing.T) {
+	textFormat = "%q"
+	defer func() { textFormat = "%s" }()
+	for _, test := range parseTests {
+		tmpl, err := New(test.name).Parse(test.input, "", "", make(map[string]*Tree), builtins)
+		switch {
+		case err == nil && !test.ok:
+			t.Errorf("%q: expected error; got none", test.name)
+			continue
+		case err != nil && test.ok:
+			t.Errorf("%q: unexpected error: %v", test.name, err)
+			continue
+		case err != nil && !test.ok:
+			// expected error, got one
+			if *debug {
+				fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err)
+			}
+			continue
+		}
+		var result string
+		if doCopy {
+			result = tmpl.Root.Copy().String()
+		} else {
+			result = tmpl.Root.String()
+		}
+		if result != test.result {
+			t.Errorf("%s=(%q): got\n\t%v\nexpected\n\t%v", test.name, test.input, result, test.result)
+		}
+	}
+}
+
+func TestParse(t *testing.T) {
+	testParse(false, t)
+}
+
+// Same as TestParse, but we copy the node first
+func TestParseCopy(t *testing.T) {
+	testParse(true, t)
+}
+
+func TestParseWithComments(t *testing.T) {
+	textFormat = "%q"
+	defer func() { textFormat = "%s" }()
+	tests := [...]parseTest{
+		{"comment", "{{/*\n\n\n*/}}", noError, "{{/*\n\n\n*/}}"},
+		{"comment trim left", "x \r\n\t{{- /* hi */}}", noError, `"x"{{/* hi */}}`},
+		{"comment trim right", "{{/* hi */ -}}\n\n\ty", noError, `{{/* hi */}}"y"`},
+		{"comment trim left and right", "x \r\n\t{{- /* */ -}}\n\n\ty", noError, `"x"{{/* */}}"y"`},
+	}
+	for _, test := range tests {
+		t.Run(test.name, func(t *testing.T) {
+			tr := New(test.name)
+			tr.Mode = ParseComments
+			tmpl, err := tr.Parse(test.input, "", "", make(map[string]*Tree))
+			if err != nil {
+				t.Errorf("%q: expected error; got none", test.name)
+			}
+			if result := tmpl.Root.String(); result != test.result {
+				t.Errorf("%s=(%q): got\n\t%v\nexpected\n\t%v", test.name, test.input, result, test.result)
+			}
+		})
+	}
+}
+
+func TestSkipFuncCheck(t *testing.T) {
+	oldTextFormat := textFormat
+	textFormat = "%q"
+	defer func() { textFormat = oldTextFormat }()
+	tr := New("skip func check")
+	tr.Mode = SkipFuncCheck
+	tmpl, err := tr.Parse("{{fn 1 2}}", "", "", make(map[string]*Tree))
+	if err != nil {
+		t.Fatalf("unexpected error: %v", err)
+	}
+	expected := "{{fn 1 2}}"
+	if result := tmpl.Root.String(); result != expected {
+		t.Errorf("got\n\t%v\nexpected\n\t%v", result, expected)
+	}
+}
+
+type isEmptyTest struct {
+	name  string
+	input string
+	empty bool
+}
+
+var isEmptyTests = []isEmptyTest{
+	{"empty", ``, true},
+	{"nonempty", `hello`, false},
+	{"spaces only", " \t\n \t\n", true},
+	{"comment only", "{{/* comment */}}", true},
+	{"definition", `{{define "x"}}something{{end}}`, true},
+	{"definitions and space", "{{define `x`}}something{{end}}\n\n{{define `y`}}something{{end}}\n\n", true},
+	{"definitions and text", "{{define `x`}}something{{end}}\nx\n{{define `y`}}something{{end}}\ny\n", false},
+	{"definition and action", "{{define `x`}}something{{end}}{{if 3}}foo{{end}}", false},
+}
+
+func TestIsEmpty(t *testing.T) {
+	if !IsEmptyTree(nil) {
+		t.Errorf("nil tree is not empty")
+	}
+	for _, test := range isEmptyTests {
+		tree, err := New("root").Parse(test.input, "", "", make(map[string]*Tree), nil)
+		if err != nil {
+			t.Errorf("%q: unexpected error: %v", test.name, err)
+			continue
+		}
+		if empty := IsEmptyTree(tree.Root); empty != test.empty {
+			t.Errorf("%q: expected %t got %t", test.name, test.empty, empty)
+		}
+	}
+}
+
+func TestErrorContextWithTreeCopy(t *testing.T) {
+	tree, err := New("root").Parse("{{if true}}{{end}}", "", "", make(map[string]*Tree), nil)
+	if err != nil {
+		t.Fatalf("unexpected tree parse failure: %v", err)
+	}
+	treeCopy := tree.Copy()
+	wantLocation, wantContext := tree.ErrorContext(tree.Root.Nodes[0])
+	gotLocation, gotContext := treeCopy.ErrorContext(treeCopy.Root.Nodes[0])
+	if wantLocation != gotLocation {
+		t.Errorf("wrong error location want %q got %q", wantLocation, gotLocation)
+	}
+	if wantContext != gotContext {
+		t.Errorf("wrong error location want %q got %q", wantContext, gotContext)
+	}
+}
+
+// All failures, and the result is a string that must appear in the error message.
+var errorTests = []parseTest{
+	// Check line numbers are accurate.
+	{"unclosed1",
+		"line1\n{{",
+		hasError, `unclosed1:2: unclosed action`},
+	{"unclosed2",
+		"line1\n{{define `x`}}line2\n{{",
+		hasError, `unclosed2:3: unclosed action`},
+	{"unclosed3",
+		"line1\n{{\"x\"\n\"y\"\n",
+		hasError, `unclosed3:4: unclosed action started at unclosed3:2`},
+	{"unclosed4",
+		"{{\n\n\n\n\n",
+		hasError, `unclosed4:6: unclosed action started at unclosed4:1`},
+	{"var1",
+		"line1\n{{\nx\n}}",
+		hasError, `var1:3: function "x" not defined`},
+	// Specific errors.
+	{"function",
+		"{{foo}}",
+		hasError, `function "foo" not defined`},
+	{"comment1",
+		"{{/*}}",
+		hasError, `comment1:1: unclosed comment`},
+	{"comment2",
+		"{{/*\nhello\n}}",
+		hasError, `comment2:1: unclosed comment`},
+	{"lparen",
+		"{{.X (1 2 3}}",
+		hasError, `unclosed left paren`},
+	{"rparen",
+		"{{.X 1 2 3 ) }}",
+		hasError, `unexpected ")" in command`},
+	{"rparen2",
+		"{{(.X 1 2 3",
+		hasError, `unclosed action`},
+	{"space",
+		"{{`x`3}}",
+		hasError, `in operand`},
+	{"idchar",
+		"{{a#}}",
+		hasError, `'#'`},
+	{"charconst",
+		"{{'a}}",
+		hasError, `unterminated character constant`},
+	{"stringconst",
+		`{{"a}}`,
+		hasError, `unterminated quoted string`},
+	{"rawstringconst",
+		"{{`a}}",
+		hasError, `unterminated raw quoted string`},
+	{"number",
+		"{{0xi}}",
+		hasError, `number syntax`},
+	{"multidefine",
+		"{{define `a`}}a{{end}}{{define `a`}}b{{end}}",
+		hasError, `multiple definition of template`},
+	{"eof",
+		"{{range .X}}",
+		hasError, `unexpected EOF`},
+	{"variable",
+		// Declare $x so it's defined, to avoid that error, and then check we don't parse a declaration.
+		"{{$x := 23}}{{with $x.y := 3}}{{$x 23}}{{end}}",
+		hasError, `unexpected ":="`},
+	{"multidecl",
+		"{{$a,$b,$c := 23}}",
+		hasError, `too many declarations`},
+	{"undefvar",
+		"{{$a}}",
+		hasError, `undefined variable`},
+	{"wrongdot",
+		"{{true.any}}",
+		hasError, `unexpected . after term`},
+	{"wrongpipeline",
+		"{{12|false}}",
+		hasError, `non executable command in pipeline`},
+	{"emptypipeline",
+		`{{ ( ) }}`,
+		hasError, `missing value for parenthesized pipeline`},
+	{"multilinerawstring",
+		"{{ $v := `\n` }} {{",
+		hasError, `multilinerawstring:2: unclosed action`},
+	{"rangeundefvar",
+		"{{range $k}}{{end}}",
+		hasError, `undefined variable`},
+	{"rangeundefvars",
+		"{{range $k, $v}}{{end}}",
+		hasError, `undefined variable`},
+	{"rangemissingvalue1",
+		"{{range $k,}}{{end}}",
+		hasError, `missing value for range`},
+	{"rangemissingvalue2",
+		"{{range $k, $v := }}{{end}}",
+		hasError, `missing value for range`},
+	{"rangenotvariable1",
+		"{{range $k, .}}{{end}}",
+		hasError, `range can only initialize variables`},
+	{"rangenotvariable2",
+		"{{range $k, 123 := .}}{{end}}",
+		hasError, `range can only initialize variables`},
+}
+
+func TestErrors(t *testing.T) {
+	for _, test := range errorTests {
+		t.Run(test.name, func(t *testing.T) {
+			_, err := New(test.name).Parse(test.input, "", "", make(map[string]*Tree))
+			if err == nil {
+				t.Fatalf("expected error %q, got nil", test.result)
+			}
+			if !strings.Contains(err.Error(), test.result) {
+				t.Fatalf("error %q does not contain %q", err, test.result)
+			}
+		})
+	}
+}
+
+func TestBlock(t *testing.T) {
+	const (
+		input = `a{{block "inner" .}}bar{{.}}baz{{end}}b`
+		outer = `a{{template "inner" .}}b`
+		inner = `bar{{.}}baz`
+	)
+	treeSet := make(map[string]*Tree)
+	tmpl, err := New("outer").Parse(input, "", "", treeSet, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if g, w := tmpl.Root.String(), outer; g != w {
+		t.Errorf("outer template = %q, want %q", g, w)
+	}
+	inTmpl := treeSet["inner"]
+	if inTmpl == nil {
+		t.Fatal("block did not define template")
+	}
+	if g, w := inTmpl.Root.String(), inner; g != w {
+		t.Errorf("inner template = %q, want %q", g, w)
+	}
+}
+
+func TestLineNum(t *testing.T) {
+	const count = 100
+	text := strings.Repeat("{{printf 1234}}\n", count)
+	tree, err := New("bench").Parse(text, "", "", make(map[string]*Tree), builtins)
+	if err != nil {
+		t.Fatal(err)
+	}
+	// Check the line numbers. Each line is an action containing a template, followed by text.
+	// That's two nodes per line.
+	nodes := tree.Root.Nodes
+	for i := 0; i < len(nodes); i += 2 {
+		line := 1 + i/2
+		// Action first.
+		action := nodes[i].(*ActionNode)
+		if action.Line != line {
+			t.Fatalf("line %d: action is line %d", line, action.Line)
+		}
+		pipe := action.Pipe
+		if pipe.Line != line {
+			t.Fatalf("line %d: pipe is line %d", line, pipe.Line)
+		}
+	}
+}
+
+func BenchmarkParseLarge(b *testing.B) {
+	text := strings.Repeat("{{1234}}\n", 10000)
+	for i := 0; i < b.N; i++ {
+		_, err := New("bench").Parse(text, "", "", make(map[string]*Tree), builtins)
+		if err != nil {
+			b.Fatal(err)
+		}
+	}
+}
+
+var sinkv, sinkl string
+
+func BenchmarkVariableString(b *testing.B) {
+	v := &VariableNode{
+		Ident: []string{"$", "A", "BB", "CCC", "THIS_IS_THE_VARIABLE_BEING_PROCESSED"},
+	}
+	b.ResetTimer()
+	b.ReportAllocs()
+	for i := 0; i < b.N; i++ {
+		sinkv = v.String()
+	}
+	if sinkv == "" {
+		b.Fatal("Benchmark was not run")
+	}
+}
+
+func BenchmarkListString(b *testing.B) {
+	text := `
+{{(printf .Field1.Field2.Field3).Value}}
+{{$x := (printf .Field1.Field2.Field3).Value}}
+{{$y := (printf $x.Field1.Field2.Field3).Value}}
+{{$z := $y.Field1.Field2.Field3}}
+{{if contains $y $z}}
+	{{printf "%q" $y}}
+{{else}}
+	{{printf "%q" $x}}
+{{end}}
+{{with $z.Field1 | contains "boring"}}
+	{{printf "%q" . | printf "%s"}}
+{{else}}
+	{{printf "%d %d %d" 11 11 11}}
+	{{printf "%d %d %s" 22 22 $x.Field1.Field2.Field3 | printf "%s"}}
+	{{printf "%v" (contains $z.Field1.Field2 $y)}}
+{{end}}
+`
+	tree, err := New("bench").Parse(text, "", "", make(map[string]*Tree), builtins)
+	if err != nil {
+		b.Fatal(err)
+	}
+	b.ResetTimer()
+	b.ReportAllocs()
+	for i := 0; i < b.N; i++ {
+		sinkl = tree.Root.String()
+	}
+	if sinkl == "" {
+		b.Fatal("Benchmark was not run")
+	}
+}
diff --git a/internal/backport/text/template/template.go b/internal/backport/text/template/template.go
new file mode 100644
index 0000000..978b986
--- /dev/null
+++ b/internal/backport/text/template/template.go
@@ -0,0 +1,239 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"reflect"
+	"sync"
+
+	"golang.org/x/website/internal/backport/text/template/parse"
+)
+
+// common holds the information shared by related templates.
+type common struct {
+	tmpl   map[string]*Template // Map from name to defined templates.
+	muTmpl sync.RWMutex         // protects tmpl
+	option option
+	// We use two maps, one for parsing and one for execution.
+	// This separation makes the API cleaner since it doesn't
+	// expose reflection to the client.
+	muFuncs    sync.RWMutex // protects parseFuncs and execFuncs
+	parseFuncs FuncMap
+	execFuncs  map[string]reflect.Value
+}
+
+// Template is the representation of a parsed template. The *parse.Tree
+// field is exported only for use by html/template and should be treated
+// as unexported by all other clients.
+type Template struct {
+	name string
+	*parse.Tree
+	*common
+	leftDelim  string
+	rightDelim string
+}
+
+// New allocates a new, undefined template with the given name.
+func New(name string) *Template {
+	t := &Template{
+		name: name,
+	}
+	t.init()
+	return t
+}
+
+// Name returns the name of the template.
+func (t *Template) Name() string {
+	return t.name
+}
+
+// New allocates a new, undefined template associated with the given one and with the same
+// delimiters. The association, which is transitive, allows one template to
+// invoke another with a {{template}} action.
+//
+// Because associated templates share underlying data, template construction
+// cannot be done safely in parallel. Once the templates are constructed, they
+// can be executed in parallel.
+func (t *Template) New(name string) *Template {
+	t.init()
+	nt := &Template{
+		name:       name,
+		common:     t.common,
+		leftDelim:  t.leftDelim,
+		rightDelim: t.rightDelim,
+	}
+	return nt
+}
+
+// init guarantees that t has a valid common structure.
+func (t *Template) init() {
+	if t.common == nil {
+		c := new(common)
+		c.tmpl = make(map[string]*Template)
+		c.parseFuncs = make(FuncMap)
+		c.execFuncs = make(map[string]reflect.Value)
+		t.common = c
+	}
+}
+
+// Clone returns a duplicate of the template, including all associated
+// templates. The actual representation is not copied, but the name space of
+// associated templates is, so further calls to Parse in the copy will add
+// templates to the copy but not to the original. Clone can be used to prepare
+// common templates and use them with variant definitions for other templates
+// by adding the variants after the clone is made.
+func (t *Template) Clone() (*Template, error) {
+	nt := t.copy(nil)
+	nt.init()
+	if t.common == nil {
+		return nt, nil
+	}
+	t.muTmpl.RLock()
+	defer t.muTmpl.RUnlock()
+	for k, v := range t.tmpl {
+		if k == t.name {
+			nt.tmpl[t.name] = nt
+			continue
+		}
+		// The associated templates share nt's common structure.
+		tmpl := v.copy(nt.common)
+		nt.tmpl[k] = tmpl
+	}
+	t.muFuncs.RLock()
+	defer t.muFuncs.RUnlock()
+	for k, v := range t.parseFuncs {
+		nt.parseFuncs[k] = v
+	}
+	for k, v := range t.execFuncs {
+		nt.execFuncs[k] = v
+	}
+	return nt, nil
+}
+
+// copy returns a shallow copy of t, with common set to the argument.
+func (t *Template) copy(c *common) *Template {
+	return &Template{
+		name:       t.name,
+		Tree:       t.Tree,
+		common:     c,
+		leftDelim:  t.leftDelim,
+		rightDelim: t.rightDelim,
+	}
+}
+
+// AddParseTree associates the argument parse tree with the template t, giving
+// it the specified name. If the template has not been defined, this tree becomes
+// its definition. If it has been defined and already has that name, the existing
+// definition is replaced; otherwise a new template is created, defined, and returned.
+func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) {
+	t.muTmpl.Lock()
+	defer t.muTmpl.Unlock()
+	t.init()
+	nt := t
+	if name != t.name {
+		nt = t.New(name)
+	}
+	// Even if nt == t, we need to install it in the common.tmpl map.
+	if t.associate(nt, tree) || nt.Tree == nil {
+		nt.Tree = tree
+	}
+	return nt, nil
+}
+
+// Templates returns a slice of defined templates associated with t.
+func (t *Template) Templates() []*Template {
+	if t.common == nil {
+		return nil
+	}
+	// Return a slice so we don't expose the map.
+	t.muTmpl.RLock()
+	defer t.muTmpl.RUnlock()
+	m := make([]*Template, 0, len(t.tmpl))
+	for _, v := range t.tmpl {
+		m = append(m, v)
+	}
+	return m
+}
+
+// Delims sets the action delimiters to the specified strings, to be used in
+// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template
+// definitions will inherit the settings. An empty delimiter stands for the
+// corresponding default: {{ or }}.
+// The return value is the template, so calls can be chained.
+func (t *Template) Delims(left, right string) *Template {
+	t.init()
+	t.leftDelim = left
+	t.rightDelim = right
+	return t
+}
+
+// Funcs adds the elements of the argument map to the template's function map.
+// It must be called before the template is parsed.
+// It panics if a value in the map is not a function with appropriate return
+// type or if the name cannot be used syntactically as a function in a template.
+// It is legal to overwrite elements of the map. The return value is the template,
+// so calls can be chained.
+func (t *Template) Funcs(funcMap FuncMap) *Template {
+	t.init()
+	t.muFuncs.Lock()
+	defer t.muFuncs.Unlock()
+	addValueFuncs(t.execFuncs, funcMap)
+	addFuncs(t.parseFuncs, funcMap)
+	return t
+}
+
+// Lookup returns the template with the given name that is associated with t.
+// It returns nil if there is no such template or the template has no definition.
+func (t *Template) Lookup(name string) *Template {
+	if t.common == nil {
+		return nil
+	}
+	t.muTmpl.RLock()
+	defer t.muTmpl.RUnlock()
+	return t.tmpl[name]
+}
+
+// Parse parses text as a template body for t.
+// Named template definitions ({{define ...}} or {{block ...}} statements) in text
+// define additional templates associated with t and are removed from the
+// definition of t itself.
+//
+// Templates can be redefined in successive calls to Parse.
+// A template definition with a body containing only white space and comments
+// is considered empty and will not replace an existing template's body.
+// This allows using Parse to add new named template definitions without
+// overwriting the main template body.
+func (t *Template) Parse(text string) (*Template, error) {
+	t.init()
+	t.muFuncs.RLock()
+	trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins())
+	t.muFuncs.RUnlock()
+	if err != nil {
+		return nil, err
+	}
+	// Add the newly parsed trees, including the one for t, into our common structure.
+	for name, tree := range trees {
+		if _, err := t.AddParseTree(name, tree); err != nil {
+			return nil, err
+		}
+	}
+	return t, nil
+}
+
+// associate installs the new template into the group of templates associated
+// with t. The two are already known to share the common structure.
+// The boolean return value reports whether to store this tree as t.Tree.
+func (t *Template) associate(new *Template, tree *parse.Tree) bool {
+	if new.common != t.common {
+		panic("internal error: associate not common")
+	}
+	if old := t.tmpl[new.name]; old != nil && parse.IsEmptyTree(tree.Root) && old.Tree != nil {
+		// If a template by that name exists,
+		// don't replace it with an empty template.
+		return false
+	}
+	t.tmpl[new.name] = new
+	return true
+}
diff --git a/internal/backport/text/template/testdata/file1.tmpl b/internal/backport/text/template/testdata/file1.tmpl
new file mode 100644
index 0000000..febf9d9
--- /dev/null
+++ b/internal/backport/text/template/testdata/file1.tmpl
@@ -0,0 +1,2 @@
+{{define "x"}}TEXT{{end}}
+{{define "dotV"}}{{.V}}{{end}}
diff --git a/internal/backport/text/template/testdata/file2.tmpl b/internal/backport/text/template/testdata/file2.tmpl
new file mode 100644
index 0000000..39bf6fb
--- /dev/null
+++ b/internal/backport/text/template/testdata/file2.tmpl
@@ -0,0 +1,2 @@
+{{define "dot"}}{{.}}{{end}}
+{{define "nested"}}{{template "dot" .}}{{end}}
diff --git a/internal/backport/text/template/testdata/tmpl1.tmpl b/internal/backport/text/template/testdata/tmpl1.tmpl
new file mode 100644
index 0000000..b72b3a3
--- /dev/null
+++ b/internal/backport/text/template/testdata/tmpl1.tmpl
@@ -0,0 +1,3 @@
+template1
+{{define "x"}}x{{end}}
+{{template "y"}}
diff --git a/internal/backport/text/template/testdata/tmpl2.tmpl b/internal/backport/text/template/testdata/tmpl2.tmpl
new file mode 100644
index 0000000..16beba6
--- /dev/null
+++ b/internal/backport/text/template/testdata/tmpl2.tmpl
@@ -0,0 +1,3 @@
+template2
+{{define "y"}}y{{end}}
+{{template "x"}}