internal/backport: add backports of current Go dev template, fs packages

This will let us use these APIs on App Engine before Go 1.16 is available.
The only thing we can't backport from Go 1.16 is embed, so that has to
be build-tagged off still, but we don't really need embed on App Engine.

This will let us move golang.org off App Engine Flex onto regular App Engine,
which will greatly simplify deployment and make it possible to do automatic
deployments.

When App Engine adds Go 1.16 support, most of this can go away.
(The template packages will need to stay, as go.dev will need some
Go 1.17 bug fixes from them.)

Change-Id: I16ee64862a43f591a31fae04caf4caa0fdb5af62
Reviewed-on: https://go-review.googlesource.com/c/website/+/323890
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org>
diff --git a/internal/backport/README.md b/internal/backport/README.md
new file mode 100644
index 0000000..92c80ad
--- /dev/null
+++ b/internal/backport/README.md
@@ -0,0 +1,10 @@
+This directory contains backports of upcoming packages from the
+standard library, beyond the Go version supplied by the latest Google
+App Engine.
+
+The procedure for adding a backport is fairly manual: copy the files
+to a new directory and then global search and replace to modify import
+paths to point to the backport.
+
+As new versions of Go land on Google App Engine, this directory should
+be pruned as much as possible.
diff --git a/internal/backport/archive/zip/example_test.go b/internal/backport/archive/zip/example_test.go
new file mode 100644
index 0000000..1eed304
--- /dev/null
+++ b/internal/backport/archive/zip/example_test.go
@@ -0,0 +1,93 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zip_test
+
+import (
+	"archive/zip"
+	"bytes"
+	"compress/flate"
+	"fmt"
+	"io"
+	"log"
+	"os"
+)
+
+func ExampleWriter() {
+	// Create a buffer to write our archive to.
+	buf := new(bytes.Buffer)
+
+	// Create a new zip archive.
+	w := zip.NewWriter(buf)
+
+	// Add some files to the archive.
+	var files = []struct {
+		Name, Body string
+	}{
+		{"readme.txt", "This archive contains some text files."},
+		{"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"},
+		{"todo.txt", "Get animal handling licence.\nWrite more examples."},
+	}
+	for _, file := range files {
+		f, err := w.Create(file.Name)
+		if err != nil {
+			log.Fatal(err)
+		}
+		_, err = f.Write([]byte(file.Body))
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+
+	// Make sure to check the error on Close.
+	err := w.Close()
+	if err != nil {
+		log.Fatal(err)
+	}
+}
+
+func ExampleReader() {
+	// Open a zip archive for reading.
+	r, err := zip.OpenReader("testdata/readme.zip")
+	if err != nil {
+		log.Fatal(err)
+	}
+	defer r.Close()
+
+	// Iterate through the files in the archive,
+	// printing some of their contents.
+	for _, f := range r.File {
+		fmt.Printf("Contents of %s:\n", f.Name)
+		rc, err := f.Open()
+		if err != nil {
+			log.Fatal(err)
+		}
+		_, err = io.CopyN(os.Stdout, rc, 68)
+		if err != nil {
+			log.Fatal(err)
+		}
+		rc.Close()
+		fmt.Println()
+	}
+	// Output:
+	// Contents of README:
+	// This is the source code repository for the Go programming language.
+}
+
+func ExampleWriter_RegisterCompressor() {
+	// Override the default Deflate compressor with a higher compression level.
+
+	// Create a buffer to write our archive to.
+	buf := new(bytes.Buffer)
+
+	// Create a new zip archive.
+	w := zip.NewWriter(buf)
+
+	// Register a custom Deflate compressor.
+	w.RegisterCompressor(zip.Deflate, func(out io.Writer) (io.WriteCloser, error) {
+		return flate.NewWriter(out, flate.BestCompression)
+	})
+
+	// Proceed to add files to w.
+}
diff --git a/internal/backport/archive/zip/reader.go b/internal/backport/archive/zip/reader.go
new file mode 100644
index 0000000..e99ed1c
--- /dev/null
+++ b/internal/backport/archive/zip/reader.go
@@ -0,0 +1,872 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zip
+
+import (
+	"bufio"
+	"encoding/binary"
+	"errors"
+	"hash"
+	"hash/crc32"
+	"io"
+	"os"
+	"path"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+
+	"golang.org/x/website/internal/backport/io/fs"
+)
+
+var (
+	ErrFormat    = errors.New("zip: not a valid zip file")
+	ErrAlgorithm = errors.New("zip: unsupported compression algorithm")
+	ErrChecksum  = errors.New("zip: checksum error")
+)
+
+// A Reader serves content from a ZIP archive.
+type Reader struct {
+	r             io.ReaderAt
+	File          []*File
+	Comment       string
+	decompressors map[uint16]Decompressor
+
+	// fileList is a list of files sorted by ename,
+	// for use by the Open method.
+	fileListOnce sync.Once
+	fileList     []fileListEntry
+}
+
+// A ReadCloser is a Reader that must be closed when no longer needed.
+type ReadCloser struct {
+	f *os.File
+	Reader
+}
+
+// A File is a single file in a ZIP archive.
+// The file information is in the embedded FileHeader.
+// The file content can be accessed by calling Open.
+type File struct {
+	FileHeader
+	zip          *Reader
+	zipr         io.ReaderAt
+	headerOffset int64
+	zip64        bool  // zip64 extended information extra field presence
+	descErr      error // error reading the data descriptor during init
+}
+
+// OpenReader will open the Zip file specified by name and return a ReadCloser.
+func OpenReader(name string) (*ReadCloser, error) {
+	f, err := os.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	fi, err := f.Stat()
+	if err != nil {
+		f.Close()
+		return nil, err
+	}
+	r := new(ReadCloser)
+	if err := r.init(f, fi.Size()); err != nil {
+		f.Close()
+		return nil, err
+	}
+	r.f = f
+	return r, nil
+}
+
+// NewReader returns a new Reader reading from r, which is assumed to
+// have the given size in bytes.
+func NewReader(r io.ReaderAt, size int64) (*Reader, error) {
+	if size < 0 {
+		return nil, errors.New("zip: size cannot be negative")
+	}
+	zr := new(Reader)
+	if err := zr.init(r, size); err != nil {
+		return nil, err
+	}
+	return zr, nil
+}
+
+func (z *Reader) init(r io.ReaderAt, size int64) error {
+	end, err := readDirectoryEnd(r, size)
+	if err != nil {
+		return err
+	}
+	z.r = r
+	z.File = make([]*File, 0, end.directoryRecords)
+	z.Comment = end.comment
+	rs := io.NewSectionReader(r, 0, size)
+	if _, err = rs.Seek(int64(end.directoryOffset), io.SeekStart); err != nil {
+		return err
+	}
+	buf := bufio.NewReader(rs)
+
+	// The count of files inside a zip is truncated to fit in a uint16.
+	// Gloss over this by reading headers until we encounter
+	// a bad one, and then only report an ErrFormat or UnexpectedEOF if
+	// the file count modulo 65536 is incorrect.
+	for {
+		f := &File{zip: z, zipr: r}
+		err = readDirectoryHeader(f, buf)
+		if err == ErrFormat || err == io.ErrUnexpectedEOF {
+			break
+		}
+		if err != nil {
+			return err
+		}
+		f.readDataDescriptor()
+		z.File = append(z.File, f)
+	}
+	if uint16(len(z.File)) != uint16(end.directoryRecords) { // only compare 16 bits here
+		// Return the readDirectoryHeader error if we read
+		// the wrong number of directory entries.
+		return err
+	}
+	return nil
+}
+
+// RegisterDecompressor registers or overrides a custom decompressor for a
+// specific method ID. If a decompressor for a given method is not found,
+// Reader will default to looking up the decompressor at the package level.
+func (z *Reader) RegisterDecompressor(method uint16, dcomp Decompressor) {
+	if z.decompressors == nil {
+		z.decompressors = make(map[uint16]Decompressor)
+	}
+	z.decompressors[method] = dcomp
+}
+
+func (z *Reader) decompressor(method uint16) Decompressor {
+	dcomp := z.decompressors[method]
+	if dcomp == nil {
+		dcomp = decompressor(method)
+	}
+	return dcomp
+}
+
+// Close closes the Zip file, rendering it unusable for I/O.
+func (rc *ReadCloser) Close() error {
+	return rc.f.Close()
+}
+
+// DataOffset returns the offset of the file's possibly-compressed
+// data, relative to the beginning of the zip file.
+//
+// Most callers should instead use Open, which transparently
+// decompresses data and verifies checksums.
+func (f *File) DataOffset() (offset int64, err error) {
+	bodyOffset, err := f.findBodyOffset()
+	if err != nil {
+		return
+	}
+	return f.headerOffset + bodyOffset, nil
+}
+
+// Open returns a ReadCloser that provides access to the File's contents.
+// Multiple files may be read concurrently.
+func (f *File) Open() (io.ReadCloser, error) {
+	bodyOffset, err := f.findBodyOffset()
+	if err != nil {
+		return nil, err
+	}
+	size := int64(f.CompressedSize64)
+	r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, size)
+	dcomp := f.zip.decompressor(f.Method)
+	if dcomp == nil {
+		return nil, ErrAlgorithm
+	}
+	var rc io.ReadCloser = dcomp(r)
+	rc = &checksumReader{
+		rc:   rc,
+		hash: crc32.NewIEEE(),
+		f:    f,
+	}
+	return rc, nil
+}
+
+// OpenRaw returns a Reader that provides access to the File's contents without
+// decompression.
+func (f *File) OpenRaw() (io.Reader, error) {
+	bodyOffset, err := f.findBodyOffset()
+	if err != nil {
+		return nil, err
+	}
+	r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, int64(f.CompressedSize64))
+	return r, nil
+}
+
+func (f *File) readDataDescriptor() {
+	if !f.hasDataDescriptor() {
+		return
+	}
+
+	bodyOffset, err := f.findBodyOffset()
+	if err != nil {
+		f.descErr = err
+		return
+	}
+
+	// In section 4.3.9.2 of the spec: "However ZIP64 format MAY be used
+	// regardless of the size of a file.  When extracting, if the zip64
+	// extended information extra field is present for the file the
+	// compressed and uncompressed sizes will be 8 byte values."
+	//
+	// Historically, this package has used the compressed and uncompressed
+	// sizes from the central directory to determine if the package is
+	// zip64.
+	//
+	// For this case we allow either the extra field or sizes to determine
+	// the data descriptor length.
+	zip64 := f.zip64 || f.isZip64()
+	n := int64(dataDescriptorLen)
+	if zip64 {
+		n = dataDescriptor64Len
+	}
+	size := int64(f.CompressedSize64)
+	r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset+size, n)
+	dd, err := readDataDescriptor(r, zip64)
+	if err != nil {
+		f.descErr = err
+		return
+	}
+	f.CRC32 = dd.crc32
+}
+
+type checksumReader struct {
+	rc    io.ReadCloser
+	hash  hash.Hash32
+	nread uint64 // number of bytes read so far
+	f     *File
+	err   error // sticky error
+}
+
+func (r *checksumReader) Stat() (fs.FileInfo, error) {
+	return headerFileInfo{&r.f.FileHeader}, nil
+}
+
+func (r *checksumReader) Read(b []byte) (n int, err error) {
+	if r.err != nil {
+		return 0, r.err
+	}
+	n, err = r.rc.Read(b)
+	r.hash.Write(b[:n])
+	r.nread += uint64(n)
+	if err == nil {
+		return
+	}
+	if err == io.EOF {
+		if r.nread != r.f.UncompressedSize64 {
+			return 0, io.ErrUnexpectedEOF
+		}
+		if r.f.hasDataDescriptor() {
+			if r.f.descErr != nil {
+				if r.f.descErr == io.EOF {
+					err = io.ErrUnexpectedEOF
+				} else {
+					err = r.f.descErr
+				}
+			} else if r.hash.Sum32() != r.f.CRC32 {
+				err = ErrChecksum
+			}
+		} else {
+			// If there's not a data descriptor, we still compare
+			// the CRC32 of what we've read against the file header
+			// or TOC's CRC32, if it seems like it was set.
+			if r.f.CRC32 != 0 && r.hash.Sum32() != r.f.CRC32 {
+				err = ErrChecksum
+			}
+		}
+	}
+	r.err = err
+	return
+}
+
+func (r *checksumReader) Close() error { return r.rc.Close() }
+
+// findBodyOffset does the minimum work to verify the file has a header
+// and returns the file body offset.
+func (f *File) findBodyOffset() (int64, error) {
+	var buf [fileHeaderLen]byte
+	if _, err := f.zipr.ReadAt(buf[:], f.headerOffset); err != nil {
+		return 0, err
+	}
+	b := readBuf(buf[:])
+	if sig := b.uint32(); sig != fileHeaderSignature {
+		return 0, ErrFormat
+	}
+	b = b[22:] // skip over most of the header
+	filenameLen := int(b.uint16())
+	extraLen := int(b.uint16())
+	return int64(fileHeaderLen + filenameLen + extraLen), nil
+}
+
+// readDirectoryHeader attempts to read a directory header from r.
+// It returns io.ErrUnexpectedEOF if it cannot read a complete header,
+// and ErrFormat if it doesn't find a valid header signature.
+func readDirectoryHeader(f *File, r io.Reader) error {
+	var buf [directoryHeaderLen]byte
+	if _, err := io.ReadFull(r, buf[:]); err != nil {
+		return err
+	}
+	b := readBuf(buf[:])
+	if sig := b.uint32(); sig != directoryHeaderSignature {
+		return ErrFormat
+	}
+	f.CreatorVersion = b.uint16()
+	f.ReaderVersion = b.uint16()
+	f.Flags = b.uint16()
+	f.Method = b.uint16()
+	f.ModifiedTime = b.uint16()
+	f.ModifiedDate = b.uint16()
+	f.CRC32 = b.uint32()
+	f.CompressedSize = b.uint32()
+	f.UncompressedSize = b.uint32()
+	f.CompressedSize64 = uint64(f.CompressedSize)
+	f.UncompressedSize64 = uint64(f.UncompressedSize)
+	filenameLen := int(b.uint16())
+	extraLen := int(b.uint16())
+	commentLen := int(b.uint16())
+	b = b[4:] // skipped start disk number and internal attributes (2x uint16)
+	f.ExternalAttrs = b.uint32()
+	f.headerOffset = int64(b.uint32())
+	d := make([]byte, filenameLen+extraLen+commentLen)
+	if _, err := io.ReadFull(r, d); err != nil {
+		return err
+	}
+	f.Name = string(d[:filenameLen])
+	f.Extra = d[filenameLen : filenameLen+extraLen]
+	f.Comment = string(d[filenameLen+extraLen:])
+
+	// Determine the character encoding.
+	utf8Valid1, utf8Require1 := detectUTF8(f.Name)
+	utf8Valid2, utf8Require2 := detectUTF8(f.Comment)
+	switch {
+	case !utf8Valid1 || !utf8Valid2:
+		// Name and Comment definitely not UTF-8.
+		f.NonUTF8 = true
+	case !utf8Require1 && !utf8Require2:
+		// Name and Comment use only single-byte runes that overlap with UTF-8.
+		f.NonUTF8 = false
+	default:
+		// Might be UTF-8, might be some other encoding; preserve existing flag.
+		// Some ZIP writers use UTF-8 encoding without setting the UTF-8 flag.
+		// Since it is impossible to always distinguish valid UTF-8 from some
+		// other encoding (e.g., GBK or Shift-JIS), we trust the flag.
+		f.NonUTF8 = f.Flags&0x800 == 0
+	}
+
+	needUSize := f.UncompressedSize == ^uint32(0)
+	needCSize := f.CompressedSize == ^uint32(0)
+	needHeaderOffset := f.headerOffset == int64(^uint32(0))
+
+	// Best effort to find what we need.
+	// Other zip authors might not even follow the basic format,
+	// and we'll just ignore the Extra content in that case.
+	var modified time.Time
+parseExtras:
+	for extra := readBuf(f.Extra); len(extra) >= 4; { // need at least tag and size
+		fieldTag := extra.uint16()
+		fieldSize := int(extra.uint16())
+		if len(extra) < fieldSize {
+			break
+		}
+		fieldBuf := extra.sub(fieldSize)
+
+		switch fieldTag {
+		case zip64ExtraID:
+			f.zip64 = true
+
+			// update directory values from the zip64 extra block.
+			// They should only be consulted if the sizes read earlier
+			// are maxed out.
+			// See golang.org/issue/13367.
+			if needUSize {
+				needUSize = false
+				if len(fieldBuf) < 8 {
+					return ErrFormat
+				}
+				f.UncompressedSize64 = fieldBuf.uint64()
+			}
+			if needCSize {
+				needCSize = false
+				if len(fieldBuf) < 8 {
+					return ErrFormat
+				}
+				f.CompressedSize64 = fieldBuf.uint64()
+			}
+			if needHeaderOffset {
+				needHeaderOffset = false
+				if len(fieldBuf) < 8 {
+					return ErrFormat
+				}
+				f.headerOffset = int64(fieldBuf.uint64())
+			}
+		case ntfsExtraID:
+			if len(fieldBuf) < 4 {
+				continue parseExtras
+			}
+			fieldBuf.uint32()        // reserved (ignored)
+			for len(fieldBuf) >= 4 { // need at least tag and size
+				attrTag := fieldBuf.uint16()
+				attrSize := int(fieldBuf.uint16())
+				if len(fieldBuf) < attrSize {
+					continue parseExtras
+				}
+				attrBuf := fieldBuf.sub(attrSize)
+				if attrTag != 1 || attrSize != 24 {
+					continue // Ignore irrelevant attributes
+				}
+
+				const ticksPerSecond = 1e7    // Windows timestamp resolution
+				ts := int64(attrBuf.uint64()) // ModTime since Windows epoch
+				secs := int64(ts / ticksPerSecond)
+				nsecs := (1e9 / ticksPerSecond) * int64(ts%ticksPerSecond)
+				epoch := time.Date(1601, time.January, 1, 0, 0, 0, 0, time.UTC)
+				modified = time.Unix(epoch.Unix()+secs, nsecs)
+			}
+		case unixExtraID, infoZipUnixExtraID:
+			if len(fieldBuf) < 8 {
+				continue parseExtras
+			}
+			fieldBuf.uint32()              // AcTime (ignored)
+			ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch
+			modified = time.Unix(ts, 0)
+		case extTimeExtraID:
+			if len(fieldBuf) < 5 || fieldBuf.uint8()&1 == 0 {
+				continue parseExtras
+			}
+			ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch
+			modified = time.Unix(ts, 0)
+		}
+	}
+
+	msdosModified := msDosTimeToTime(f.ModifiedDate, f.ModifiedTime)
+	f.Modified = msdosModified
+	if !modified.IsZero() {
+		f.Modified = modified.UTC()
+
+		// If legacy MS-DOS timestamps are set, we can use the delta between
+		// the legacy and extended versions to estimate timezone offset.
+		//
+		// A non-UTC timezone is always used (even if offset is zero).
+		// Thus, FileHeader.Modified.Location() == time.UTC is useful for
+		// determining whether extended timestamps are present.
+		// This is necessary for users that need to do additional time
+		// calculations when dealing with legacy ZIP formats.
+		if f.ModifiedTime != 0 || f.ModifiedDate != 0 {
+			f.Modified = modified.In(timeZone(msdosModified.Sub(modified)))
+		}
+	}
+
+	// Assume that uncompressed size 2³²-1 could plausibly happen in
+	// an old zip32 file that was sharding inputs into the largest chunks
+	// possible (or is just malicious; search the web for 42.zip).
+	// If needUSize is true still, it means we didn't see a zip64 extension.
+	// As long as the compressed size is not also 2³²-1 (implausible)
+	// and the header is not also 2³²-1 (equally implausible),
+	// accept the uncompressed size 2³²-1 as valid.
+	// If nothing else, this keeps archive/zip working with 42.zip.
+	_ = needUSize
+
+	if needCSize || needHeaderOffset {
+		return ErrFormat
+	}
+
+	return nil
+}
+
+func readDataDescriptor(r io.Reader, zip64 bool) (*dataDescriptor, error) {
+	// Create enough space for the largest possible size
+	var buf [dataDescriptor64Len]byte
+
+	// The spec says: "Although not originally assigned a
+	// signature, the value 0x08074b50 has commonly been adopted
+	// as a signature value for the data descriptor record.
+	// Implementers should be aware that ZIP files may be
+	// encountered with or without this signature marking data
+	// descriptors and should account for either case when reading
+	// ZIP files to ensure compatibility."
+	//
+	// First read just those 4 bytes to see if the signature exists.
+	if _, err := io.ReadFull(r, buf[:4]); err != nil {
+		return nil, err
+	}
+	off := 0
+	maybeSig := readBuf(buf[:4])
+	if maybeSig.uint32() != dataDescriptorSignature {
+		// No data descriptor signature. Keep these four
+		// bytes.
+		off += 4
+	}
+
+	end := dataDescriptorLen - 4
+	if zip64 {
+		end = dataDescriptor64Len - 4
+	}
+	if _, err := io.ReadFull(r, buf[off:end]); err != nil {
+		return nil, err
+	}
+	b := readBuf(buf[:end])
+
+	out := &dataDescriptor{
+		crc32: b.uint32(),
+	}
+
+	if zip64 {
+		out.compressedSize = b.uint64()
+		out.uncompressedSize = b.uint64()
+	} else {
+		out.compressedSize = uint64(b.uint32())
+		out.uncompressedSize = uint64(b.uint32())
+	}
+	return out, nil
+}
+
+func readDirectoryEnd(r io.ReaderAt, size int64) (dir *directoryEnd, err error) {
+	// look for directoryEndSignature in the last 1k, then in the last 65k
+	var buf []byte
+	var directoryEndOffset int64
+	for i, bLen := range []int64{1024, 65 * 1024} {
+		if bLen > size {
+			bLen = size
+		}
+		buf = make([]byte, int(bLen))
+		if _, err := r.ReadAt(buf, size-bLen); err != nil && err != io.EOF {
+			return nil, err
+		}
+		if p := findSignatureInBlock(buf); p >= 0 {
+			buf = buf[p:]
+			directoryEndOffset = size - bLen + int64(p)
+			break
+		}
+		if i == 1 || bLen == size {
+			return nil, ErrFormat
+		}
+	}
+
+	// read header into struct
+	b := readBuf(buf[4:]) // skip signature
+	d := &directoryEnd{
+		diskNbr:            uint32(b.uint16()),
+		dirDiskNbr:         uint32(b.uint16()),
+		dirRecordsThisDisk: uint64(b.uint16()),
+		directoryRecords:   uint64(b.uint16()),
+		directorySize:      uint64(b.uint32()),
+		directoryOffset:    uint64(b.uint32()),
+		commentLen:         b.uint16(),
+	}
+	l := int(d.commentLen)
+	if l > len(b) {
+		return nil, errors.New("zip: invalid comment length")
+	}
+	d.comment = string(b[:l])
+
+	// These values mean that the file can be a zip64 file
+	if d.directoryRecords == 0xffff || d.directorySize == 0xffff || d.directoryOffset == 0xffffffff {
+		p, err := findDirectory64End(r, directoryEndOffset)
+		if err == nil && p >= 0 {
+			err = readDirectory64End(r, p, d)
+		}
+		if err != nil {
+			return nil, err
+		}
+	}
+	// Make sure directoryOffset points to somewhere in our file.
+	if o := int64(d.directoryOffset); o < 0 || o >= size {
+		return nil, ErrFormat
+	}
+	return d, nil
+}
+
+// findDirectory64End tries to read the zip64 locator just before the
+// directory end and returns the offset of the zip64 directory end if
+// found.
+func findDirectory64End(r io.ReaderAt, directoryEndOffset int64) (int64, error) {
+	locOffset := directoryEndOffset - directory64LocLen
+	if locOffset < 0 {
+		return -1, nil // no need to look for a header outside the file
+	}
+	buf := make([]byte, directory64LocLen)
+	if _, err := r.ReadAt(buf, locOffset); err != nil {
+		return -1, err
+	}
+	b := readBuf(buf)
+	if sig := b.uint32(); sig != directory64LocSignature {
+		return -1, nil
+	}
+	if b.uint32() != 0 { // number of the disk with the start of the zip64 end of central directory
+		return -1, nil // the file is not a valid zip64-file
+	}
+	p := b.uint64()      // relative offset of the zip64 end of central directory record
+	if b.uint32() != 1 { // total number of disks
+		return -1, nil // the file is not a valid zip64-file
+	}
+	return int64(p), nil
+}
+
+// readDirectory64End reads the zip64 directory end and updates the
+// directory end with the zip64 directory end values.
+func readDirectory64End(r io.ReaderAt, offset int64, d *directoryEnd) (err error) {
+	buf := make([]byte, directory64EndLen)
+	if _, err := r.ReadAt(buf, offset); err != nil {
+		return err
+	}
+
+	b := readBuf(buf)
+	if sig := b.uint32(); sig != directory64EndSignature {
+		return ErrFormat
+	}
+
+	b = b[12:]                        // skip dir size, version and version needed (uint64 + 2x uint16)
+	d.diskNbr = b.uint32()            // number of this disk
+	d.dirDiskNbr = b.uint32()         // number of the disk with the start of the central directory
+	d.dirRecordsThisDisk = b.uint64() // total number of entries in the central directory on this disk
+	d.directoryRecords = b.uint64()   // total number of entries in the central directory
+	d.directorySize = b.uint64()      // size of the central directory
+	d.directoryOffset = b.uint64()    // offset of start of central directory with respect to the starting disk number
+
+	return nil
+}
+
+func findSignatureInBlock(b []byte) int {
+	for i := len(b) - directoryEndLen; i >= 0; i-- {
+		// defined from directoryEndSignature in struct.go
+		if b[i] == 'P' && b[i+1] == 'K' && b[i+2] == 0x05 && b[i+3] == 0x06 {
+			// n is length of comment
+			n := int(b[i+directoryEndLen-2]) | int(b[i+directoryEndLen-1])<<8
+			if n+directoryEndLen+i <= len(b) {
+				return i
+			}
+		}
+	}
+	return -1
+}
+
+type readBuf []byte
+
+func (b *readBuf) uint8() uint8 {
+	v := (*b)[0]
+	*b = (*b)[1:]
+	return v
+}
+
+func (b *readBuf) uint16() uint16 {
+	v := binary.LittleEndian.Uint16(*b)
+	*b = (*b)[2:]
+	return v
+}
+
+func (b *readBuf) uint32() uint32 {
+	v := binary.LittleEndian.Uint32(*b)
+	*b = (*b)[4:]
+	return v
+}
+
+func (b *readBuf) uint64() uint64 {
+	v := binary.LittleEndian.Uint64(*b)
+	*b = (*b)[8:]
+	return v
+}
+
+func (b *readBuf) sub(n int) readBuf {
+	b2 := (*b)[:n]
+	*b = (*b)[n:]
+	return b2
+}
+
+// A fileListEntry is a File and its ename.
+// If file == nil, the fileListEntry describes a directory without metadata.
+type fileListEntry struct {
+	name  string
+	file  *File
+	isDir bool
+}
+
+type fileInfoDirEntry interface {
+	fs.FileInfo
+	fs.DirEntry
+}
+
+func (e *fileListEntry) stat() fileInfoDirEntry {
+	if !e.isDir {
+		return headerFileInfo{&e.file.FileHeader}
+	}
+	return e
+}
+
+// Only used for directories.
+func (f *fileListEntry) Name() string      { _, elem, _ := split(f.name); return elem }
+func (f *fileListEntry) Size() int64       { return 0 }
+func (f *fileListEntry) Mode() fs.FileMode { return fs.ModeDir | 0555 }
+func (f *fileListEntry) Type() fs.FileMode { return fs.ModeDir }
+func (f *fileListEntry) IsDir() bool       { return true }
+func (f *fileListEntry) Sys() interface{}  { return nil }
+
+func (f *fileListEntry) ModTime() time.Time {
+	if f.file == nil {
+		return time.Time{}
+	}
+	return f.file.FileHeader.Modified.UTC()
+}
+
+func (f *fileListEntry) Info() (fs.FileInfo, error) { return f, nil }
+
+// toValidName coerces name to be a valid name for fs.FS.Open.
+func toValidName(name string) string {
+	name = strings.ReplaceAll(name, `\`, `/`)
+	p := path.Clean(name)
+	if strings.HasPrefix(p, "/") {
+		p = p[len("/"):]
+	}
+	for strings.HasPrefix(p, "../") {
+		p = p[len("../"):]
+	}
+	return p
+}
+
+func (r *Reader) initFileList() {
+	r.fileListOnce.Do(func() {
+		dirs := make(map[string]bool)
+		knownDirs := make(map[string]bool)
+		for _, file := range r.File {
+			isDir := len(file.Name) > 0 && file.Name[len(file.Name)-1] == '/'
+			name := toValidName(file.Name)
+			for dir := path.Dir(name); dir != "."; dir = path.Dir(dir) {
+				dirs[dir] = true
+			}
+			entry := fileListEntry{
+				name:  name,
+				file:  file,
+				isDir: isDir,
+			}
+			r.fileList = append(r.fileList, entry)
+			if isDir {
+				knownDirs[name] = true
+			}
+		}
+		for dir := range dirs {
+			if !knownDirs[dir] {
+				entry := fileListEntry{
+					name:  dir,
+					file:  nil,
+					isDir: true,
+				}
+				r.fileList = append(r.fileList, entry)
+			}
+		}
+
+		sort.Slice(r.fileList, func(i, j int) bool { return fileEntryLess(r.fileList[i].name, r.fileList[j].name) })
+	})
+}
+
+func fileEntryLess(x, y string) bool {
+	xdir, xelem, _ := split(x)
+	ydir, yelem, _ := split(y)
+	return xdir < ydir || xdir == ydir && xelem < yelem
+}
+
+// Open opens the named file in the ZIP archive,
+// using the semantics of fs.FS.Open:
+// paths are always slash separated, with no
+// leading / or ../ elements.
+func (r *Reader) Open(name string) (fs.File, error) {
+	r.initFileList()
+
+	e := r.openLookup(name)
+	if e == nil || !fs.ValidPath(name) {
+		return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist}
+	}
+	if e.isDir {
+		return &openDir{e, r.openReadDir(name), 0}, nil
+	}
+	rc, err := e.file.Open()
+	if err != nil {
+		return nil, err
+	}
+	return rc.(fs.File), nil
+}
+
+func split(name string) (dir, elem string, isDir bool) {
+	if name[len(name)-1] == '/' {
+		isDir = true
+		name = name[:len(name)-1]
+	}
+	i := len(name) - 1
+	for i >= 0 && name[i] != '/' {
+		i--
+	}
+	if i < 0 {
+		return ".", name, isDir
+	}
+	return name[:i], name[i+1:], isDir
+}
+
+var dotFile = &fileListEntry{name: "./", isDir: true}
+
+func (r *Reader) openLookup(name string) *fileListEntry {
+	if name == "." {
+		return dotFile
+	}
+
+	dir, elem, _ := split(name)
+	files := r.fileList
+	i := sort.Search(len(files), func(i int) bool {
+		idir, ielem, _ := split(files[i].name)
+		return idir > dir || idir == dir && ielem >= elem
+	})
+	if i < len(files) {
+		fname := files[i].name
+		if fname == name || len(fname) == len(name)+1 && fname[len(name)] == '/' && fname[:len(name)] == name {
+			return &files[i]
+		}
+	}
+	return nil
+}
+
+func (r *Reader) openReadDir(dir string) []fileListEntry {
+	files := r.fileList
+	i := sort.Search(len(files), func(i int) bool {
+		idir, _, _ := split(files[i].name)
+		return idir >= dir
+	})
+	j := sort.Search(len(files), func(j int) bool {
+		jdir, _, _ := split(files[j].name)
+		return jdir > dir
+	})
+	return files[i:j]
+}
+
+type openDir struct {
+	e      *fileListEntry
+	files  []fileListEntry
+	offset int
+}
+
+func (d *openDir) Close() error               { return nil }
+func (d *openDir) Stat() (fs.FileInfo, error) { return d.e.stat(), nil }
+
+func (d *openDir) Read([]byte) (int, error) {
+	return 0, &fs.PathError{Op: "read", Path: d.e.name, Err: errors.New("is a directory")}
+}
+
+func (d *openDir) ReadDir(count int) ([]fs.DirEntry, error) {
+	n := len(d.files) - d.offset
+	if count > 0 && n > count {
+		n = count
+	}
+	if n == 0 {
+		if count <= 0 {
+			return nil, nil
+		}
+		return nil, io.EOF
+	}
+	list := make([]fs.DirEntry, n)
+	for i := range list {
+		list[i] = d.files[d.offset+i].stat()
+	}
+	d.offset += n
+	return list, nil
+}
diff --git a/internal/backport/archive/zip/reader_test.go b/internal/backport/archive/zip/reader_test.go
new file mode 100644
index 0000000..a3b43fa
--- /dev/null
+++ b/internal/backport/archive/zip/reader_test.go
@@ -0,0 +1,1329 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zip
+
+import (
+	"bytes"
+	"encoding/binary"
+	"encoding/hex"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"regexp"
+	"strings"
+	"testing"
+	"time"
+
+	"golang.org/x/website/internal/backport/io/fs"
+	"golang.org/x/website/internal/backport/obscuretestdata"
+	"golang.org/x/website/internal/backport/testing/fstest"
+)
+
+type ZipTest struct {
+	Name     string
+	Source   func() (r io.ReaderAt, size int64) // if non-nil, used instead of testdata/<Name> file
+	Comment  string
+	File     []ZipTestFile
+	Obscured bool  // needed for Apple notarization (golang.org/issue/34986)
+	Error    error // the error that Opening this file should return
+}
+
+type ZipTestFile struct {
+	Name     string
+	Mode     fs.FileMode
+	NonUTF8  bool
+	ModTime  time.Time
+	Modified time.Time
+
+	// Information describing expected zip file content.
+	// First, reading the entire content should produce the error ContentErr.
+	// Second, if ContentErr==nil, the content should match Content.
+	// If content is large, an alternative to setting Content is to set File,
+	// which names a file in the testdata/ directory containing the
+	// uncompressed expected content.
+	// If content is very large, an alternative to setting Content or File
+	// is to set Size, which will then be checked against the header-reported size
+	// but will bypass the decompressing of the actual data.
+	// This last option is used for testing very large (multi-GB) compressed files.
+	ContentErr error
+	Content    []byte
+	File       string
+	Size       uint64
+}
+
+var tests = []ZipTest{
+	{
+		Name:    "test.zip",
+		Comment: "This is a zipfile comment.",
+		File: []ZipTestFile{
+			{
+				Name:     "test.txt",
+				Content:  []byte("This is a test text file.\n"),
+				Modified: time.Date(2010, 9, 5, 12, 12, 1, 0, timeZone(+10*time.Hour)),
+				Mode:     0644,
+			},
+			{
+				Name:     "gophercolor16x16.png",
+				File:     "gophercolor16x16.png",
+				Modified: time.Date(2010, 9, 5, 15, 52, 58, 0, timeZone(+10*time.Hour)),
+				Mode:     0644,
+			},
+		},
+	},
+	{
+		Name:    "test-trailing-junk.zip",
+		Comment: "This is a zipfile comment.",
+		File: []ZipTestFile{
+			{
+				Name:     "test.txt",
+				Content:  []byte("This is a test text file.\n"),
+				Modified: time.Date(2010, 9, 5, 12, 12, 1, 0, timeZone(+10*time.Hour)),
+				Mode:     0644,
+			},
+			{
+				Name:     "gophercolor16x16.png",
+				File:     "gophercolor16x16.png",
+				Modified: time.Date(2010, 9, 5, 15, 52, 58, 0, timeZone(+10*time.Hour)),
+				Mode:     0644,
+			},
+		},
+	},
+	{
+		Name:   "r.zip",
+		Source: returnRecursiveZip,
+		File: []ZipTestFile{
+			{
+				Name:     "r/r.zip",
+				Content:  rZipBytes(),
+				Modified: time.Date(2010, 3, 4, 0, 24, 16, 0, time.UTC),
+				Mode:     0666,
+			},
+		},
+	},
+	{
+		Name: "symlink.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "symlink",
+				Content:  []byte("../target"),
+				Modified: time.Date(2012, 2, 3, 19, 56, 48, 0, timeZone(-2*time.Hour)),
+				Mode:     0777 | fs.ModeSymlink,
+			},
+		},
+	},
+	{
+		Name: "readme.zip",
+	},
+	{
+		Name:  "readme.notzip",
+		Error: ErrFormat,
+	},
+	{
+		Name: "dd.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "filename",
+				Content:  []byte("This is a test textfile.\n"),
+				Modified: time.Date(2011, 2, 2, 13, 6, 20, 0, time.UTC),
+				Mode:     0666,
+			},
+		},
+	},
+	{
+		// created in windows XP file manager.
+		Name: "winxp.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "hello",
+				Content:  []byte("world \r\n"),
+				Modified: time.Date(2011, 12, 8, 10, 4, 24, 0, time.UTC),
+				Mode:     0666,
+			},
+			{
+				Name:     "dir/bar",
+				Content:  []byte("foo \r\n"),
+				Modified: time.Date(2011, 12, 8, 10, 4, 50, 0, time.UTC),
+				Mode:     0666,
+			},
+			{
+				Name:     "dir/empty/",
+				Content:  []byte{},
+				Modified: time.Date(2011, 12, 8, 10, 8, 6, 0, time.UTC),
+				Mode:     fs.ModeDir | 0777,
+			},
+			{
+				Name:     "readonly",
+				Content:  []byte("important \r\n"),
+				Modified: time.Date(2011, 12, 8, 10, 6, 8, 0, time.UTC),
+				Mode:     0444,
+			},
+		},
+	},
+	{
+		// created by Zip 3.0 under Linux
+		Name: "unix.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "hello",
+				Content:  []byte("world \r\n"),
+				Modified: time.Date(2011, 12, 8, 10, 4, 24, 0, timeZone(0)),
+				Mode:     0666,
+			},
+			{
+				Name:     "dir/bar",
+				Content:  []byte("foo \r\n"),
+				Modified: time.Date(2011, 12, 8, 10, 4, 50, 0, timeZone(0)),
+				Mode:     0666,
+			},
+			{
+				Name:     "dir/empty/",
+				Content:  []byte{},
+				Modified: time.Date(2011, 12, 8, 10, 8, 6, 0, timeZone(0)),
+				Mode:     fs.ModeDir | 0777,
+			},
+			{
+				Name:     "readonly",
+				Content:  []byte("important \r\n"),
+				Modified: time.Date(2011, 12, 8, 10, 6, 8, 0, timeZone(0)),
+				Mode:     0444,
+			},
+		},
+	},
+	{
+		// created by Go, before we wrote the "optional" data
+		// descriptor signatures (which are required by macOS).
+		// Use obscured file to avoid Apple’s notarization service
+		// rejecting the toolchain due to an inability to unzip this archive.
+		// See golang.org/issue/34986
+		Name:     "go-no-datadesc-sig.zip.base64",
+		Obscured: true,
+		File: []ZipTestFile{
+			{
+				Name:     "foo.txt",
+				Content:  []byte("foo\n"),
+				Modified: time.Date(2012, 3, 8, 16, 59, 10, 0, timeZone(-8*time.Hour)),
+				Mode:     0644,
+			},
+			{
+				Name:     "bar.txt",
+				Content:  []byte("bar\n"),
+				Modified: time.Date(2012, 3, 8, 16, 59, 12, 0, timeZone(-8*time.Hour)),
+				Mode:     0644,
+			},
+		},
+	},
+	{
+		// created by Go, after we wrote the "optional" data
+		// descriptor signatures (which are required by macOS)
+		Name: "go-with-datadesc-sig.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "foo.txt",
+				Content:  []byte("foo\n"),
+				Modified: time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC),
+				Mode:     0666,
+			},
+			{
+				Name:     "bar.txt",
+				Content:  []byte("bar\n"),
+				Modified: time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC),
+				Mode:     0666,
+			},
+		},
+	},
+	{
+		Name:   "Bad-CRC32-in-data-descriptor",
+		Source: returnCorruptCRC32Zip,
+		File: []ZipTestFile{
+			{
+				Name:       "foo.txt",
+				Content:    []byte("foo\n"),
+				Modified:   time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC),
+				Mode:       0666,
+				ContentErr: ErrChecksum,
+			},
+			{
+				Name:     "bar.txt",
+				Content:  []byte("bar\n"),
+				Modified: time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC),
+				Mode:     0666,
+			},
+		},
+	},
+	// Tests that we verify (and accept valid) crc32s on files
+	// with crc32s in their file header (not in data descriptors)
+	{
+		Name: "crc32-not-streamed.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "foo.txt",
+				Content:  []byte("foo\n"),
+				Modified: time.Date(2012, 3, 8, 16, 59, 10, 0, timeZone(-8*time.Hour)),
+				Mode:     0644,
+			},
+			{
+				Name:     "bar.txt",
+				Content:  []byte("bar\n"),
+				Modified: time.Date(2012, 3, 8, 16, 59, 12, 0, timeZone(-8*time.Hour)),
+				Mode:     0644,
+			},
+		},
+	},
+	// Tests that we verify (and reject invalid) crc32s on files
+	// with crc32s in their file header (not in data descriptors)
+	{
+		Name:   "crc32-not-streamed.zip",
+		Source: returnCorruptNotStreamedZip,
+		File: []ZipTestFile{
+			{
+				Name:       "foo.txt",
+				Content:    []byte("foo\n"),
+				Modified:   time.Date(2012, 3, 8, 16, 59, 10, 0, timeZone(-8*time.Hour)),
+				Mode:       0644,
+				ContentErr: ErrChecksum,
+			},
+			{
+				Name:     "bar.txt",
+				Content:  []byte("bar\n"),
+				Modified: time.Date(2012, 3, 8, 16, 59, 12, 0, timeZone(-8*time.Hour)),
+				Mode:     0644,
+			},
+		},
+	},
+	{
+		Name: "zip64.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "README",
+				Content:  []byte("This small file is in ZIP64 format.\n"),
+				Modified: time.Date(2012, 8, 10, 14, 33, 32, 0, time.UTC),
+				Mode:     0644,
+			},
+		},
+	},
+	// Another zip64 file with different Extras fields. (golang.org/issue/7069)
+	{
+		Name: "zip64-2.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "README",
+				Content:  []byte("This small file is in ZIP64 format.\n"),
+				Modified: time.Date(2012, 8, 10, 14, 33, 32, 0, timeZone(-4*time.Hour)),
+				Mode:     0644,
+			},
+		},
+	},
+	// Largest possible non-zip64 file, with no zip64 header.
+	{
+		Name:   "big.zip",
+		Source: returnBigZipBytes,
+		File: []ZipTestFile{
+			{
+				Name:     "big.file",
+				Content:  nil,
+				Size:     1<<32 - 1,
+				Modified: time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC),
+				Mode:     0666,
+			},
+		},
+	},
+	{
+		Name: "utf8-7zip.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "世界",
+				Content:  []byte{},
+				Mode:     0666,
+				Modified: time.Date(2017, 11, 6, 13, 9, 27, 867862500, timeZone(-8*time.Hour)),
+			},
+		},
+	},
+	{
+		Name: "utf8-infozip.zip",
+		File: []ZipTestFile{
+			{
+				Name:    "世界",
+				Content: []byte{},
+				Mode:    0644,
+				// Name is valid UTF-8, but format does not have UTF-8 flag set.
+				// We don't do UTF-8 detection for multi-byte runes due to
+				// false-positives with other encodings (e.g., Shift-JIS).
+				// Format says encoding is not UTF-8, so we trust it.
+				NonUTF8:  true,
+				Modified: time.Date(2017, 11, 6, 13, 9, 27, 0, timeZone(-8*time.Hour)),
+			},
+		},
+	},
+	{
+		Name: "utf8-osx.zip",
+		File: []ZipTestFile{
+			{
+				Name:    "世界",
+				Content: []byte{},
+				Mode:    0644,
+				// Name is valid UTF-8, but format does not have UTF-8 set.
+				NonUTF8:  true,
+				Modified: time.Date(2017, 11, 6, 13, 9, 27, 0, timeZone(-8*time.Hour)),
+			},
+		},
+	},
+	{
+		Name: "utf8-winrar.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "世界",
+				Content:  []byte{},
+				Mode:     0666,
+				Modified: time.Date(2017, 11, 6, 13, 9, 27, 867862500, timeZone(-8*time.Hour)),
+			},
+		},
+	},
+	{
+		Name: "utf8-winzip.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "世界",
+				Content:  []byte{},
+				Mode:     0666,
+				Modified: time.Date(2017, 11, 6, 13, 9, 27, 867000000, timeZone(-8*time.Hour)),
+			},
+		},
+	},
+	{
+		Name: "time-7zip.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "test.txt",
+				Content:  []byte{},
+				Size:     1<<32 - 1,
+				Modified: time.Date(2017, 10, 31, 21, 11, 57, 244817900, timeZone(-7*time.Hour)),
+				Mode:     0666,
+			},
+		},
+	},
+	{
+		Name: "time-infozip.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "test.txt",
+				Content:  []byte{},
+				Size:     1<<32 - 1,
+				Modified: time.Date(2017, 10, 31, 21, 11, 57, 0, timeZone(-7*time.Hour)),
+				Mode:     0644,
+			},
+		},
+	},
+	{
+		Name: "time-osx.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "test.txt",
+				Content:  []byte{},
+				Size:     1<<32 - 1,
+				Modified: time.Date(2017, 10, 31, 21, 11, 57, 0, timeZone(-7*time.Hour)),
+				Mode:     0644,
+			},
+		},
+	},
+	{
+		Name: "time-win7.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "test.txt",
+				Content:  []byte{},
+				Size:     1<<32 - 1,
+				Modified: time.Date(2017, 10, 31, 21, 11, 58, 0, time.UTC),
+				Mode:     0666,
+			},
+		},
+	},
+	{
+		Name: "time-winrar.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "test.txt",
+				Content:  []byte{},
+				Size:     1<<32 - 1,
+				Modified: time.Date(2017, 10, 31, 21, 11, 57, 244817900, timeZone(-7*time.Hour)),
+				Mode:     0666,
+			},
+		},
+	},
+	{
+		Name: "time-winzip.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "test.txt",
+				Content:  []byte{},
+				Size:     1<<32 - 1,
+				Modified: time.Date(2017, 10, 31, 21, 11, 57, 244000000, timeZone(-7*time.Hour)),
+				Mode:     0666,
+			},
+		},
+	},
+	{
+		Name: "time-go.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "test.txt",
+				Content:  []byte{},
+				Size:     1<<32 - 1,
+				Modified: time.Date(2017, 10, 31, 21, 11, 57, 0, timeZone(-7*time.Hour)),
+				Mode:     0666,
+			},
+		},
+	},
+	{
+		Name: "time-22738.zip",
+		File: []ZipTestFile{
+			{
+				Name:     "file",
+				Content:  []byte{},
+				Mode:     0666,
+				Modified: time.Date(1999, 12, 31, 19, 0, 0, 0, timeZone(-5*time.Hour)),
+				ModTime:  time.Date(1999, 12, 31, 19, 0, 0, 0, time.UTC),
+			},
+		},
+	},
+}
+
+func TestReader(t *testing.T) {
+	for _, zt := range tests {
+		t.Run(zt.Name, func(t *testing.T) {
+			readTestZip(t, zt)
+		})
+	}
+}
+
+func readTestZip(t *testing.T, zt ZipTest) {
+	var z *Reader
+	var err error
+	var raw []byte
+	if zt.Source != nil {
+		rat, size := zt.Source()
+		z, err = NewReader(rat, size)
+		raw = make([]byte, size)
+		if _, err := rat.ReadAt(raw, 0); err != nil {
+			t.Errorf("ReadAt error=%v", err)
+			return
+		}
+	} else {
+		path := filepath.Join("testdata", zt.Name)
+		if zt.Obscured {
+			tf, err := obscuretestdata.DecodeToTempFile(path)
+			if err != nil {
+				t.Errorf("obscuretestdata.DecodeToTempFile(%s): %v", path, err)
+				return
+			}
+			defer os.Remove(tf)
+			path = tf
+		}
+		var rc *ReadCloser
+		rc, err = OpenReader(path)
+		if err == nil {
+			defer rc.Close()
+			z = &rc.Reader
+		}
+		var err2 error
+		raw, err2 = ioutil.ReadFile(path)
+		if err2 != nil {
+			t.Errorf("ReadFile(%s) error=%v", path, err2)
+			return
+		}
+	}
+	if err != zt.Error {
+		t.Errorf("error=%v, want %v", err, zt.Error)
+		return
+	}
+
+	// bail if file is not zip
+	if err == ErrFormat {
+		return
+	}
+
+	// bail here if no Files expected to be tested
+	// (there may actually be files in the zip, but we don't care)
+	if zt.File == nil {
+		return
+	}
+
+	if z.Comment != zt.Comment {
+		t.Errorf("comment=%q, want %q", z.Comment, zt.Comment)
+	}
+	if len(z.File) != len(zt.File) {
+		t.Fatalf("file count=%d, want %d", len(z.File), len(zt.File))
+	}
+
+	// test read of each file
+	for i, ft := range zt.File {
+		readTestFile(t, zt, ft, z.File[i], raw)
+	}
+	if t.Failed() {
+		return
+	}
+
+	// test simultaneous reads
+	n := 0
+	done := make(chan bool)
+	for i := 0; i < 5; i++ {
+		for j, ft := range zt.File {
+			go func(j int, ft ZipTestFile) {
+				readTestFile(t, zt, ft, z.File[j], raw)
+				done <- true
+			}(j, ft)
+			n++
+		}
+	}
+	for ; n > 0; n-- {
+		<-done
+	}
+}
+
+func equalTimeAndZone(t1, t2 time.Time) bool {
+	name1, offset1 := t1.Zone()
+	name2, offset2 := t2.Zone()
+	return t1.Equal(t2) && name1 == name2 && offset1 == offset2
+}
+
+func readTestFile(t *testing.T, zt ZipTest, ft ZipTestFile, f *File, raw []byte) {
+	if f.Name != ft.Name {
+		t.Errorf("name=%q, want %q", f.Name, ft.Name)
+	}
+	if !ft.Modified.IsZero() && !equalTimeAndZone(f.Modified, ft.Modified) {
+		t.Errorf("%s: Modified=%s, want %s", f.Name, f.Modified, ft.Modified)
+	}
+	if !ft.ModTime.IsZero() && !equalTimeAndZone(f.ModTime(), ft.ModTime) {
+		t.Errorf("%s: ModTime=%s, want %s", f.Name, f.ModTime(), ft.ModTime)
+	}
+
+	testFileMode(t, f, ft.Mode)
+
+	size := uint64(f.UncompressedSize)
+	if size == uint32max {
+		size = f.UncompressedSize64
+	} else if size != f.UncompressedSize64 {
+		t.Errorf("%v: UncompressedSize=%#x does not match UncompressedSize64=%#x", f.Name, size, f.UncompressedSize64)
+	}
+
+	// Check that OpenRaw returns the correct byte segment
+	rw, err := f.OpenRaw()
+	if err != nil {
+		t.Errorf("%v: OpenRaw error=%v", f.Name, err)
+		return
+	}
+	start, err := f.DataOffset()
+	if err != nil {
+		t.Errorf("%v: DataOffset error=%v", f.Name, err)
+		return
+	}
+	got, err := ioutil.ReadAll(rw)
+	if err != nil {
+		t.Errorf("%v: OpenRaw ReadAll error=%v", f.Name, err)
+		return
+	}
+	end := uint64(start) + f.CompressedSize64
+	want := raw[start:end]
+	if !bytes.Equal(got, want) {
+		t.Logf("got %q", got)
+		t.Logf("want %q", want)
+		t.Errorf("%v: OpenRaw returned unexpected bytes", f.Name)
+		return
+	}
+
+	r, err := f.Open()
+	if err != nil {
+		t.Errorf("%v", err)
+		return
+	}
+
+	// For very large files, just check that the size is correct.
+	// The content is expected to be all zeros.
+	// Don't bother uncompressing: too big.
+	if ft.Content == nil && ft.File == "" && ft.Size > 0 {
+		if size != ft.Size {
+			t.Errorf("%v: uncompressed size %#x, want %#x", ft.Name, size, ft.Size)
+		}
+		r.Close()
+		return
+	}
+
+	var b bytes.Buffer
+	_, err = io.Copy(&b, r)
+	if err != ft.ContentErr {
+		t.Errorf("copying contents: %v (want %v)", err, ft.ContentErr)
+	}
+	if err != nil {
+		return
+	}
+	r.Close()
+
+	if g := uint64(b.Len()); g != size {
+		t.Errorf("%v: read %v bytes but f.UncompressedSize == %v", f.Name, g, size)
+	}
+
+	var c []byte
+	if ft.Content != nil {
+		c = ft.Content
+	} else if c, err = ioutil.ReadFile("testdata/" + ft.File); err != nil {
+		t.Error(err)
+		return
+	}
+
+	if b.Len() != len(c) {
+		t.Errorf("%s: len=%d, want %d", f.Name, b.Len(), len(c))
+		return
+	}
+
+	for i, b := range b.Bytes() {
+		if b != c[i] {
+			t.Errorf("%s: content[%d]=%q want %q", f.Name, i, b, c[i])
+			return
+		}
+	}
+}
+
+func testFileMode(t *testing.T, f *File, want fs.FileMode) {
+	mode := f.Mode()
+	if want == 0 {
+		t.Errorf("%s mode: got %v, want none", f.Name, mode)
+	} else if mode != want {
+		t.Errorf("%s mode: want %v, got %v", f.Name, want, mode)
+	}
+}
+
+func TestInvalidFiles(t *testing.T) {
+	const size = 1024 * 70 // 70kb
+	b := make([]byte, size)
+
+	// zeroes
+	_, err := NewReader(bytes.NewReader(b), size)
+	if err != ErrFormat {
+		t.Errorf("zeroes: error=%v, want %v", err, ErrFormat)
+	}
+
+	// repeated directoryEndSignatures
+	sig := make([]byte, 4)
+	binary.LittleEndian.PutUint32(sig, directoryEndSignature)
+	for i := 0; i < size-4; i += 4 {
+		copy(b[i:i+4], sig)
+	}
+	_, err = NewReader(bytes.NewReader(b), size)
+	if err != ErrFormat {
+		t.Errorf("sigs: error=%v, want %v", err, ErrFormat)
+	}
+
+	// negative size
+	_, err = NewReader(bytes.NewReader([]byte("foobar")), -1)
+	if err == nil {
+		t.Errorf("archive/zip.NewReader: expected error when negative size is passed")
+	}
+}
+
+func messWith(fileName string, corrupter func(b []byte)) (r io.ReaderAt, size int64) {
+	data, err := ioutil.ReadFile(filepath.Join("testdata", fileName))
+	if err != nil {
+		panic("Error reading " + fileName + ": " + err.Error())
+	}
+	corrupter(data)
+	return bytes.NewReader(data), int64(len(data))
+}
+
+func returnCorruptCRC32Zip() (r io.ReaderAt, size int64) {
+	return messWith("go-with-datadesc-sig.zip", func(b []byte) {
+		// Corrupt one of the CRC32s in the data descriptor:
+		b[0x2d]++
+	})
+}
+
+func returnCorruptNotStreamedZip() (r io.ReaderAt, size int64) {
+	return messWith("crc32-not-streamed.zip", func(b []byte) {
+		// Corrupt foo.txt's final crc32 byte, in both
+		// the file header and TOC. (0x7e -> 0x7f)
+		b[0x11]++
+		b[0x9d]++
+
+		// TODO(bradfitz): add a new test that only corrupts
+		// one of these values, and verify that that's also an
+		// error. Currently, the reader code doesn't verify the
+		// fileheader and TOC's crc32 match if they're both
+		// non-zero and only the second line above, the TOC,
+		// is what matters.
+	})
+}
+
+// rZipBytes returns the bytes of a recursive zip file, without
+// putting it on disk and triggering certain virus scanners.
+func rZipBytes() []byte {
+	s := `
+0000000 50 4b 03 04 14 00 00 00 08 00 08 03 64 3c f9 f4
+0000010 89 64 48 01 00 00 b8 01 00 00 07 00 00 00 72 2f
+0000020 72 2e 7a 69 70 00 25 00 da ff 50 4b 03 04 14 00
+0000030 00 00 08 00 08 03 64 3c f9 f4 89 64 48 01 00 00
+0000040 b8 01 00 00 07 00 00 00 72 2f 72 2e 7a 69 70 00
+0000050 2f 00 d0 ff 00 25 00 da ff 50 4b 03 04 14 00 00
+0000060 00 08 00 08 03 64 3c f9 f4 89 64 48 01 00 00 b8
+0000070 01 00 00 07 00 00 00 72 2f 72 2e 7a 69 70 00 2f
+0000080 00 d0 ff c2 54 8e 57 39 00 05 00 fa ff c2 54 8e
+0000090 57 39 00 05 00 fa ff 00 05 00 fa ff 00 14 00 eb
+00000a0 ff c2 54 8e 57 39 00 05 00 fa ff 00 05 00 fa ff
+00000b0 00 14 00 eb ff 42 88 21 c4 00 00 14 00 eb ff 42
+00000c0 88 21 c4 00 00 14 00 eb ff 42 88 21 c4 00 00 14
+00000d0 00 eb ff 42 88 21 c4 00 00 14 00 eb ff 42 88 21
+00000e0 c4 00 00 00 00 ff ff 00 00 00 ff ff 00 34 00 cb
+00000f0 ff 42 88 21 c4 00 00 00 00 ff ff 00 00 00 ff ff
+0000100 00 34 00 cb ff 42 e8 21 5e 0f 00 00 00 ff ff 0a
+0000110 f0 66 64 12 61 c0 15 dc e8 a0 48 bf 48 af 2a b3
+0000120 20 c0 9b 95 0d c4 67 04 42 53 06 06 06 40 00 06
+0000130 00 f9 ff 6d 01 00 00 00 00 42 e8 21 5e 0f 00 00
+0000140 00 ff ff 0a f0 66 64 12 61 c0 15 dc e8 a0 48 bf
+0000150 48 af 2a b3 20 c0 9b 95 0d c4 67 04 42 53 06 06
+0000160 06 40 00 06 00 f9 ff 6d 01 00 00 00 00 50 4b 01
+0000170 02 14 00 14 00 00 00 08 00 08 03 64 3c f9 f4 89
+0000180 64 48 01 00 00 b8 01 00 00 07 00 00 00 00 00 00
+0000190 00 00 00 00 00 00 00 00 00 00 00 72 2f 72 2e 7a
+00001a0 69 70 50 4b 05 06 00 00 00 00 01 00 01 00 35 00
+00001b0 00 00 6d 01 00 00 00 00`
+	s = regexp.MustCompile(`[0-9a-f]{7}`).ReplaceAllString(s, "")
+	s = regexp.MustCompile(`\s+`).ReplaceAllString(s, "")
+	b, err := hex.DecodeString(s)
+	if err != nil {
+		panic(err)
+	}
+	return b
+}
+
+func returnRecursiveZip() (r io.ReaderAt, size int64) {
+	b := rZipBytes()
+	return bytes.NewReader(b), int64(len(b))
+}
+
+// biggestZipBytes returns the bytes of a zip file biggest.zip
+// that contains a zip file bigger.zip that contains a zip file
+// big.zip that contains big.file, which contains 2³²-1 zeros.
+// The big.zip file is interesting because it has no zip64 header,
+// much like the innermost zip files in the well-known 42.zip.
+//
+// biggest.zip was generated by changing isZip64 to use > uint32max
+// instead of >= uint32max and then running this program:
+//
+//	package main
+//
+//	import (
+//		"archive/zip"
+//		"bytes"
+//		"io"
+//		"log"
+//		"os"
+//	)
+//
+//	type zeros struct{}
+//
+//	func (zeros) Read(b []byte) (int, error) {
+//		for i := range b {
+//			b[i] = 0
+//		}
+//		return len(b), nil
+//	}
+//
+//	func main() {
+//		bigZip := makeZip("big.file", io.LimitReader(zeros{}, 1<<32-1))
+//		if err := os.WriteFile("/tmp/big.zip", bigZip, 0666); err != nil {
+//			log.Fatal(err)
+//		}
+//
+//		biggerZip := makeZip("big.zip", bytes.NewReader(bigZip))
+//		if err := os.WriteFile("/tmp/bigger.zip", biggerZip, 0666); err != nil {
+//			log.Fatal(err)
+//		}
+//
+//		biggestZip := makeZip("bigger.zip", bytes.NewReader(biggerZip))
+//		if err := os.WriteFile("/tmp/biggest.zip", biggestZip, 0666); err != nil {
+//			log.Fatal(err)
+//		}
+//	}
+//
+//	func makeZip(name string, r io.Reader) []byte {
+//		var buf bytes.Buffer
+//		w := zip.NewWriter(&buf)
+//		wf, err := w.Create(name)
+//		if err != nil {
+//			log.Fatal(err)
+//		}
+//		if _, err = io.Copy(wf, r); err != nil {
+//			log.Fatal(err)
+//		}
+//		if err := w.Close(); err != nil {
+//			log.Fatal(err)
+//		}
+//		return buf.Bytes()
+//	}
+//
+// The 4 GB of zeros compresses to 4 MB, which compresses to 20 kB,
+// which compresses to 1252 bytes (in the hex dump below).
+//
+// It's here in hex for the same reason as rZipBytes above: to avoid
+// problems with on-disk virus scanners or other zip processors.
+//
+func biggestZipBytes() []byte {
+	s := `
+0000000 50 4b 03 04 14 00 08 00 08 00 00 00 00 00 00 00
+0000010 00 00 00 00 00 00 00 00 00 00 0a 00 00 00 62 69
+0000020 67 67 65 72 2e 7a 69 70 ec dc 6b 4c 53 67 18 07
+0000030 f0 16 c5 ca 65 2e cb b8 94 20 61 1f 44 33 c7 cd
+0000040 c0 86 4a b5 c0 62 8a 61 05 c6 cd 91 b2 54 8c 1b
+0000050 63 8b 03 9c 1b 95 52 5a e3 a0 19 6c b2 05 59 44
+0000060 64 9d 73 83 71 11 46 61 14 b9 1d 14 09 4a c3 60
+0000070 2e 4c 6e a5 60 45 02 62 81 95 b6 94 9e 9e 77 e7
+0000080 d0 43 b6 f8 71 df 96 3c e7 a4 69 ce bf cf e9 79
+0000090 ce ef 79 3f bf f1 31 db b6 bb 31 76 92 e7 f3 07
+00000a0 8b fc 9c ca cc 08 cc cb cc 5e d2 1c 88 d9 7e bb
+00000b0 4f bb 3a 3f 75 f1 5d 7f 8f c2 68 67 77 8f 25 ff
+00000c0 84 e2 93 2d ef a4 95 3d 71 4e 2c b9 b0 87 c3 be
+00000d0 3d f8 a7 60 24 61 c5 ef ae 9e c8 6c 6d 4e 69 c8
+00000e0 67 65 34 f8 37 76 2d 76 5c 54 f3 95 65 49 c7 0f
+00000f0 18 71 4b 7e 5b 6a d1 79 47 61 41 b0 4e 2a 74 45
+0000100 43 58 12 b2 5a a5 c6 7d 68 55 88 d4 98 75 18 6d
+0000110 08 d1 1f 8f 5a 9e 96 ee 45 cf a4 84 4e 4b e8 50
+0000120 a7 13 d9 06 de 52 81 97 36 b2 d7 b8 fc 2b 5f 55
+0000130 23 1f 32 59 cf 30 27 fb e2 8a b9 de 45 dd 63 9c
+0000140 4b b5 8b 96 4c 7a 62 62 cc a1 a7 cf fa f1 fe dd
+0000150 54 62 11 bf 36 78 b3 c7 b1 b5 f2 61 4d 4e dd 66
+0000160 32 2e e6 70 34 5f f4 c9 e6 6c 43 6f da 6b c6 c3
+0000170 09 2c ce 09 57 7f d2 7e b4 23 ba 7c 1b 99 bc 22
+0000180 3e f1 de 91 2f e3 9c 1b 82 cc c2 84 39 aa e6 de
+0000190 b4 69 fc cc cb 72 a6 61 45 f0 d3 1d 26 19 7c 8d
+00001a0 29 c8 66 02 be 77 6a f9 3d 34 79 17 19 c8 96 24
+00001b0 a3 ac e4 dd 3b 1a 8e c6 fe 96 38 6b bf 67 5a 23
+00001c0 f4 16 f4 e6 8a b4 fc c2 cd bf 95 66 1d bb 35 aa
+00001d0 92 7d 66 d8 08 8d a5 1f 54 2a af 09 cf 61 ff d2
+00001e0 85 9d 8f b6 d7 88 07 4a 86 03 db 64 f3 d9 92 73
+00001f0 df ec a7 fc 23 4c 8d 83 79 63 2a d9 fd 8d b3 c8
+0000200 8f 7e d4 19 85 e6 8d 1c 76 f0 8b 58 32 fd 9a d6
+0000210 85 e2 48 ad c3 d5 60 6f 7e 22 dd ef 09 49 7c 7f
+0000220 3a 45 c3 71 b7 df f3 4c 63 fb b5 d9 31 5f 6e d6
+0000230 24 1d a4 4a fe 32 a7 5c 16 48 5c 3e 08 6b 8a d3
+0000240 25 1d a2 12 a5 59 24 ea 20 5f 52 6d ad 94 db 6b
+0000250 94 b9 5d eb 4b a7 5c 44 bb 1e f2 3c 6b cf 52 c9
+0000260 e9 e5 ba 06 b9 c4 e5 0a d0 00 0d d0 00 0d d0 00
+0000270 0d d0 00 0d d0 00 0d d0 00 0d d0 00 0d d0 00 0d
+0000280 d0 00 0d d0 00 0d d0 00 0d d0 00 0d d0 00 0d d0
+0000290 00 0d d0 00 0d d0 00 0d d0 00 0d d0 00 0d d0 00
+00002a0 0d d0 00 cd ff 9e 46 86 fa a7 7d 3a 43 d7 8e 10
+00002b0 52 e9 be e6 6e cf eb 9e 85 4d 65 ce cc 30 c1 44
+00002c0 c0 4e af bc 9c 6c 4b a0 d7 54 ff 1d d5 5c 89 fb
+00002d0 b5 34 7e c4 c2 9e f5 a0 f6 5b 7e 6e ca 73 c7 ef
+00002e0 5d be de f9 e8 81 eb a5 0a a5 63 54 2c d7 1c d1
+00002f0 89 17 85 f8 16 94 f2 8a b2 a3 f5 b6 6d df 75 cd
+0000300 90 dd 64 bd 5d 55 4e f2 55 19 1b b7 cc ef 1b ea
+0000310 2e 05 9c f4 aa 1e a8 cd a6 82 c7 59 0f 5e 9d e0
+0000320 bb fc 6c d6 99 23 eb 36 ad c6 c5 e1 d8 e1 e2 3e
+0000330 d9 90 5a f7 91 5d 6f bc 33 6d 98 47 d2 7c 2e 2f
+0000340 99 a4 25 72 85 49 2c be 0b 5b af 8f e5 6e 81 a6
+0000350 a3 5a 6f 39 53 3a ab 7a 8b 1e 26 f7 46 6c 7d 26
+0000360 53 b3 22 31 94 d3 83 f2 18 4d f5 92 33 27 53 97
+0000370 0f d3 e6 55 9c a6 c5 31 87 6f d3 f3 ae 39 6f 56
+0000380 10 7b ab 7e d0 b4 ca f2 b8 05 be 3f 0e 6e 5a 75
+0000390 ab 0c f5 37 0e ba 8e 75 71 7a aa ed 7a dd 6a 63
+00003a0 be 9b a0 97 27 6a 6f e7 d3 8b c4 7c ec d3 91 56
+00003b0 d9 ac 5e bf 16 42 2f 00 1f 93 a2 23 87 bd e2 59
+00003c0 a0 de 1a 66 c8 62 eb 55 8f 91 17 b4 61 42 7a 50
+00003d0 40 03 34 40 03 34 40 03 34 40 03 34 40 03 34 40
+00003e0 03 34 40 03 34 40 03 34 40 03 34 40 03 34 40 03
+00003f0 34 40 03 34 40 03 34 ff 85 86 90 8b ea 67 90 0d
+0000400 e1 42 1b d2 61 d6 79 ec fd 3e 44 28 a4 51 6c 5c
+0000410 fc d2 72 ca ba 82 18 46 16 61 cd 93 a9 0f d1 24
+0000420 17 99 e2 2c 71 16 84 0c c8 7a 13 0f 9a 5e c5 f0
+0000430 79 64 e2 12 4d c8 82 a1 81 19 2d aa 44 6d 87 54
+0000440 84 71 c1 f6 d4 ca 25 8c 77 b9 08 c7 c8 5e 10 8a
+0000450 8f 61 ed 8c ba 30 1f 79 9a c7 60 34 2b b9 8c f8
+0000460 18 a6 83 1b e3 9f ad 79 fe fd 1b 8b f1 fc 41 6f
+0000470 d4 13 1f e3 b8 83 ba 64 92 e7 eb e4 77 05 8f ba
+0000480 fa 3b 00 00 ff ff 50 4b 07 08 a6 18 b1 91 5e 04
+0000490 00 00 e4 47 00 00 50 4b 01 02 14 00 14 00 08 00
+00004a0 08 00 00 00 00 00 a6 18 b1 91 5e 04 00 00 e4 47
+00004b0 00 00 0a 00 00 00 00 00 00 00 00 00 00 00 00 00
+00004c0 00 00 00 00 62 69 67 67 65 72 2e 7a 69 70 50 4b
+00004d0 05 06 00 00 00 00 01 00 01 00 38 00 00 00 96 04
+00004e0 00 00 00 00`
+	s = regexp.MustCompile(`[0-9a-f]{7}`).ReplaceAllString(s, "")
+	s = regexp.MustCompile(`\s+`).ReplaceAllString(s, "")
+	b, err := hex.DecodeString(s)
+	if err != nil {
+		panic(err)
+	}
+	return b
+}
+
+func returnBigZipBytes() (r io.ReaderAt, size int64) {
+	b := biggestZipBytes()
+	for i := 0; i < 2; i++ {
+		r, err := NewReader(bytes.NewReader(b), int64(len(b)))
+		if err != nil {
+			panic(err)
+		}
+		f, err := r.File[0].Open()
+		if err != nil {
+			panic(err)
+		}
+		b, err = ioutil.ReadAll(f)
+		if err != nil {
+			panic(err)
+		}
+	}
+	return bytes.NewReader(b), int64(len(b))
+}
+
+func TestIssue8186(t *testing.T) {
+	// Directory headers & data found in the TOC of a JAR file.
+	dirEnts := []string{
+		"PK\x01\x02\n\x00\n\x00\x00\b\x00\x004\x9d3?\xaa\x1b\x06\xf0\x81\x02\x00\x00\x81\x02\x00\x00-\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00res/drawable-xhdpi-v4/ic_actionbar_accept.png\xfe\xca\x00\x00\x00",
+		"PK\x01\x02\n\x00\n\x00\x00\b\x00\x004\x9d3?\x90K\x89\xc7t\n\x00\x00t\n\x00\x00\x0e\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x02\x00\x00resources.arsc\x00\x00\x00",
+		"PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?\xff$\x18\xed3\x03\x00\x00\xb4\b\x00\x00\x13\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00t\r\x00\x00AndroidManifest.xml",
+		"PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?\x14\xc5K\xab\x192\x02\x00\xc8\xcd\x04\x00\v\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe8\x10\x00\x00classes.dex",
+		"PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?E\x96\nD\xac\x01\x00\x00P\x03\x00\x00&\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00:C\x02\x00res/layout/actionbar_set_wallpaper.xml",
+		"PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?Ļ\x14\xe3\xd8\x01\x00\x00\xd8\x03\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00:E\x02\x00res/layout/wallpaper_cropper.xml",
+		"PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?}\xc1\x15\x9eZ\x01\x00\x00!\x02\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00`G\x02\x00META-INF/MANIFEST.MF",
+		"PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?\xe6\x98Ьo\x01\x00\x00\x84\x02\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfcH\x02\x00META-INF/CERT.SF",
+		"PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?\xbfP\x96b\x86\x04\x00\x00\xb2\x06\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa9J\x02\x00META-INF/CERT.RSA",
+	}
+	for i, s := range dirEnts {
+		var f File
+		err := readDirectoryHeader(&f, strings.NewReader(s))
+		if err != nil {
+			t.Errorf("error reading #%d: %v", i, err)
+		}
+	}
+}
+
+// Verify we return ErrUnexpectedEOF when length is short.
+func TestIssue10957(t *testing.T) {
+	data := []byte("PK\x03\x040000000PK\x01\x0200000" +
+		"0000000000000000000\x00" +
+		"\x00\x00\x00\x00\x00000000000000PK\x01" +
+		"\x020000000000000000000" +
+		"00000\v\x00\x00\x00\x00\x00000000000" +
+		"00000000000000PK\x01\x0200" +
+		"00000000000000000000" +
+		"00\v\x00\x00\x00\x00\x00000000000000" +
+		"00000000000PK\x01\x020000<" +
+		"0\x00\x0000000000000000\v\x00\v" +
+		"\x00\x00\x00\x00\x0000000000\x00\x00\x00\x00000" +
+		"00000000PK\x01\x0200000000" +
+		"0000000000000000\v\x00\x00\x00" +
+		"\x00\x0000PK\x05\x06000000\x05\x000000" +
+		"\v\x00\x00\x00\x00\x00")
+	z, err := NewReader(bytes.NewReader(data), int64(len(data)))
+	if err != nil {
+		t.Fatal(err)
+	}
+	for i, f := range z.File {
+		r, err := f.Open()
+		if err != nil {
+			continue
+		}
+		if f.UncompressedSize64 < 1e6 {
+			n, err := io.Copy(ioutil.Discard, r)
+			if i == 3 && err != io.ErrUnexpectedEOF {
+				t.Errorf("File[3] error = %v; want io.ErrUnexpectedEOF", err)
+			}
+			if err == nil && uint64(n) != f.UncompressedSize64 {
+				t.Errorf("file %d: bad size: copied=%d; want=%d", i, n, f.UncompressedSize64)
+			}
+		}
+		r.Close()
+	}
+}
+
+// Verify that this particular malformed zip file is rejected.
+func TestIssue10956(t *testing.T) {
+	data := []byte("PK\x06\x06PK\x06\a0000\x00\x00\x00\x00\x00\x00\x00\x00" +
+		"0000PK\x05\x06000000000000" +
+		"0000\v\x00000\x00\x00\x00\x00\x00\x00\x000")
+	r, err := NewReader(bytes.NewReader(data), int64(len(data)))
+	if err == nil {
+		t.Errorf("got nil error, want ErrFormat")
+	}
+	if r != nil {
+		t.Errorf("got non-nil Reader, want nil")
+	}
+}
+
+// Verify we return ErrUnexpectedEOF when reading truncated data descriptor.
+func TestIssue11146(t *testing.T) {
+	data := []byte("PK\x03\x040000000000000000" +
+		"000000\x01\x00\x00\x000\x01\x00\x00\xff\xff0000" +
+		"0000000000000000PK\x01\x02" +
+		"0000\b0\b\x00000000000000" +
+		"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x000000PK\x05\x06\x00\x00" +
+		"\x00\x0000\x01\x0000008\x00\x00\x00\x00\x00")
+	z, err := NewReader(bytes.NewReader(data), int64(len(data)))
+	if err != nil {
+		t.Fatal(err)
+	}
+	r, err := z.File[0].Open()
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, err = ioutil.ReadAll(r)
+	if err != io.ErrUnexpectedEOF {
+		t.Errorf("File[0] error = %v; want io.ErrUnexpectedEOF", err)
+	}
+	r.Close()
+}
+
+// Verify we do not treat non-zip64 archives as zip64
+func TestIssue12449(t *testing.T) {
+	data := []byte{
+		0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x08, 0x00,
+		0x00, 0x00, 0x6b, 0xb4, 0xba, 0x46, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x03, 0x00, 0x18, 0x00, 0xca, 0x64,
+		0x55, 0x75, 0x78, 0x0b, 0x00, 0x50, 0x4b, 0x05,
+		0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01,
+		0x00, 0x49, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00,
+		0x00, 0x31, 0x31, 0x31, 0x32, 0x32, 0x32, 0x0a,
+		0x50, 0x4b, 0x07, 0x08, 0x1d, 0x88, 0x77, 0xb0,
+		0x07, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00,
+		0x50, 0x4b, 0x01, 0x02, 0x14, 0x03, 0x14, 0x00,
+		0x08, 0x00, 0x00, 0x00, 0x6b, 0xb4, 0xba, 0x46,
+		0x1d, 0x88, 0x77, 0xb0, 0x07, 0x00, 0x00, 0x00,
+		0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x18, 0x00,
+		0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0xa0, 0x81, 0x00, 0x00, 0x00, 0x00, 0xca, 0x64,
+		0x55, 0x75, 0x78, 0x0b, 0x00, 0x50, 0x4b, 0x05,
+		0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01,
+		0x00, 0x49, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00,
+		0x00, 0x97, 0x2b, 0x49, 0x23, 0x05, 0xc5, 0x0b,
+		0xa7, 0xd1, 0x52, 0xa2, 0x9c, 0x50, 0x4b, 0x06,
+		0x07, 0xc8, 0x19, 0xc1, 0xaf, 0x94, 0x9c, 0x61,
+		0x44, 0xbe, 0x94, 0x19, 0x42, 0x58, 0x12, 0xc6,
+		0x5b, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00,
+		0x00, 0x01, 0x00, 0x01, 0x00, 0x69, 0x00, 0x00,
+		0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00,
+	}
+	// Read in the archive.
+	_, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data)))
+	if err != nil {
+		t.Errorf("Error reading the archive: %v", err)
+	}
+}
+
+func TestFS(t *testing.T) {
+	for _, test := range []struct {
+		file string
+		want []string
+	}{
+		{
+			"testdata/unix.zip",
+			[]string{"hello", "dir/bar", "readonly"},
+		},
+		{
+			"testdata/subdir.zip",
+			[]string{"a/b/c"},
+		},
+	} {
+		t.Run(test.file, func(t *testing.T) {
+			t.Parallel()
+			z, err := OpenReader(test.file)
+			if err != nil {
+				t.Fatal(err)
+			}
+			defer z.Close()
+			if err := fstest.TestFS(z, test.want...); err != nil {
+				t.Error(err)
+			}
+		})
+	}
+}
+
+func TestFSModTime(t *testing.T) {
+	t.Parallel()
+	z, err := OpenReader("testdata/subdir.zip")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer z.Close()
+
+	for _, test := range []struct {
+		name string
+		want time.Time
+	}{
+		{
+			"a",
+			time.Date(2021, 4, 19, 12, 29, 56, 0, timeZone(-7*time.Hour)).UTC(),
+		},
+		{
+			"a/b/c",
+			time.Date(2021, 4, 19, 12, 29, 59, 0, timeZone(-7*time.Hour)).UTC(),
+		},
+	} {
+		fi, err := fs.Stat(z, test.name)
+		if err != nil {
+			t.Errorf("%s: %v", test.name, err)
+			continue
+		}
+		if got := fi.ModTime(); !got.Equal(test.want) {
+			t.Errorf("%s: got modtime %v, want %v", test.name, got, test.want)
+		}
+	}
+}
+
+func TestCVE202127919(t *testing.T) {
+	// Archive containing only the file "../test.txt"
+	data := []byte{
+		0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x08, 0x00,
+		0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x2e, 0x2e,
+		0x2f, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x74, 0x78,
+		0x74, 0x0a, 0xc9, 0xc8, 0x2c, 0x56, 0xc8, 0x2c,
+		0x56, 0x48, 0x54, 0x28, 0x49, 0x2d, 0x2e, 0x51,
+		0x28, 0x49, 0xad, 0x28, 0x51, 0x48, 0xcb, 0xcc,
+		0x49, 0xd5, 0xe3, 0x02, 0x04, 0x00, 0x00, 0xff,
+		0xff, 0x50, 0x4b, 0x07, 0x08, 0xc0, 0xd7, 0xed,
+		0xc3, 0x20, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00,
+		0x00, 0x50, 0x4b, 0x01, 0x02, 0x14, 0x00, 0x14,
+		0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0xc0, 0xd7, 0xed, 0xc3, 0x20, 0x00, 0x00,
+		0x00, 0x1a, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e,
+		0x2e, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x74,
+		0x78, 0x74, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00,
+		0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x39, 0x00,
+		0x00, 0x00, 0x59, 0x00, 0x00, 0x00, 0x00, 0x00,
+	}
+	r, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data)))
+	if err != nil {
+		t.Fatalf("Error reading the archive: %v", err)
+	}
+	_, err = r.Open("test.txt")
+	if err != nil {
+		t.Errorf("Error reading file: %v", err)
+	}
+}
+
+func TestReadDataDescriptor(t *testing.T) {
+	tests := []struct {
+		desc    string
+		in      []byte
+		zip64   bool
+		want    *dataDescriptor
+		wantErr error
+	}{{
+		desc: "valid 32 bit with signature",
+		in: []byte{
+			0x50, 0x4b, 0x07, 0x08, // signature
+			0x00, 0x01, 0x02, 0x03, // crc32
+			0x04, 0x05, 0x06, 0x07, // compressed size
+			0x08, 0x09, 0x0a, 0x0b, // uncompressed size
+		},
+		want: &dataDescriptor{
+			crc32:            0x03020100,
+			compressedSize:   0x07060504,
+			uncompressedSize: 0x0b0a0908,
+		},
+	}, {
+		desc: "valid 32 bit without signature",
+		in: []byte{
+			0x00, 0x01, 0x02, 0x03, // crc32
+			0x04, 0x05, 0x06, 0x07, // compressed size
+			0x08, 0x09, 0x0a, 0x0b, // uncompressed size
+		},
+		want: &dataDescriptor{
+			crc32:            0x03020100,
+			compressedSize:   0x07060504,
+			uncompressedSize: 0x0b0a0908,
+		},
+	}, {
+		desc: "valid 64 bit with signature",
+		in: []byte{
+			0x50, 0x4b, 0x07, 0x08, // signature
+			0x00, 0x01, 0x02, 0x03, // crc32
+			0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, // compressed size
+			0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, // uncompressed size
+		},
+		zip64: true,
+		want: &dataDescriptor{
+			crc32:            0x03020100,
+			compressedSize:   0x0b0a090807060504,
+			uncompressedSize: 0x131211100f0e0d0c,
+		},
+	}, {
+		desc: "valid 64 bit without signature",
+		in: []byte{
+			0x00, 0x01, 0x02, 0x03, // crc32
+			0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, // compressed size
+			0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, // uncompressed size
+		},
+		zip64: true,
+		want: &dataDescriptor{
+			crc32:            0x03020100,
+			compressedSize:   0x0b0a090807060504,
+			uncompressedSize: 0x131211100f0e0d0c,
+		},
+	}, {
+		desc: "invalid 32 bit with signature",
+		in: []byte{
+			0x50, 0x4b, 0x07, 0x08, // signature
+			0x00, 0x01, 0x02, 0x03, // crc32
+			0x04, 0x05, // unexpected end
+		},
+		wantErr: io.ErrUnexpectedEOF,
+	}, {
+		desc: "invalid 32 bit without signature",
+		in: []byte{
+			0x00, 0x01, 0x02, 0x03, // crc32
+			0x04, 0x05, // unexpected end
+		},
+		wantErr: io.ErrUnexpectedEOF,
+	}, {
+		desc: "invalid 64 bit with signature",
+		in: []byte{
+			0x50, 0x4b, 0x07, 0x08, // signature
+			0x00, 0x01, 0x02, 0x03, // crc32
+			0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, // compressed size
+			0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, // unexpected end
+		},
+		zip64:   true,
+		wantErr: io.ErrUnexpectedEOF,
+	}, {
+		desc: "invalid 64 bit without signature",
+		in: []byte{
+			0x00, 0x01, 0x02, 0x03, // crc32
+			0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, // compressed size
+			0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, // unexpected end
+		},
+		zip64:   true,
+		wantErr: io.ErrUnexpectedEOF,
+	}}
+
+	for _, test := range tests {
+		t.Run(test.desc, func(t *testing.T) {
+			r := bytes.NewReader(test.in)
+
+			desc, err := readDataDescriptor(r, test.zip64)
+			if err != test.wantErr {
+				t.Fatalf("got err %v; want nil", err)
+			}
+			if test.want == nil {
+				return
+			}
+			if desc == nil {
+				t.Fatalf("got nil DataDescriptor; want non-nil")
+			}
+			if desc.crc32 != test.want.crc32 {
+				t.Errorf("got CRC32 %#x; want %#x", desc.crc32, test.want.crc32)
+			}
+			if desc.compressedSize != test.want.compressedSize {
+				t.Errorf("got CompressedSize %#x; want %#x", desc.compressedSize, test.want.compressedSize)
+			}
+			if desc.uncompressedSize != test.want.uncompressedSize {
+				t.Errorf("got UncompressedSize %#x; want %#x", desc.uncompressedSize, test.want.uncompressedSize)
+			}
+		})
+	}
+}
diff --git a/internal/backport/archive/zip/register.go b/internal/backport/archive/zip/register.go
new file mode 100644
index 0000000..51e9c3e
--- /dev/null
+++ b/internal/backport/archive/zip/register.go
@@ -0,0 +1,148 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zip
+
+import (
+	"compress/flate"
+	"errors"
+	"io"
+	"io/ioutil"
+	"sync"
+)
+
+// A Compressor returns a new compressing writer, writing to w.
+// The WriteCloser's Close method must be used to flush pending data to w.
+// The Compressor itself must be safe to invoke from multiple goroutines
+// simultaneously, but each returned writer will be used only by
+// one goroutine at a time.
+type Compressor func(w io.Writer) (io.WriteCloser, error)
+
+// A Decompressor returns a new decompressing reader, reading from r.
+// The ReadCloser's Close method must be used to release associated resources.
+// The Decompressor itself must be safe to invoke from multiple goroutines
+// simultaneously, but each returned reader will be used only by
+// one goroutine at a time.
+type Decompressor func(r io.Reader) io.ReadCloser
+
+var flateWriterPool sync.Pool
+
+func newFlateWriter(w io.Writer) io.WriteCloser {
+	fw, ok := flateWriterPool.Get().(*flate.Writer)
+	if ok {
+		fw.Reset(w)
+	} else {
+		fw, _ = flate.NewWriter(w, 5)
+	}
+	return &pooledFlateWriter{fw: fw}
+}
+
+type pooledFlateWriter struct {
+	mu sync.Mutex // guards Close and Write
+	fw *flate.Writer
+}
+
+func (w *pooledFlateWriter) Write(p []byte) (n int, err error) {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+	if w.fw == nil {
+		return 0, errors.New("Write after Close")
+	}
+	return w.fw.Write(p)
+}
+
+func (w *pooledFlateWriter) Close() error {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+	var err error
+	if w.fw != nil {
+		err = w.fw.Close()
+		flateWriterPool.Put(w.fw)
+		w.fw = nil
+	}
+	return err
+}
+
+var flateReaderPool sync.Pool
+
+func newFlateReader(r io.Reader) io.ReadCloser {
+	fr, ok := flateReaderPool.Get().(io.ReadCloser)
+	if ok {
+		fr.(flate.Resetter).Reset(r, nil)
+	} else {
+		fr = flate.NewReader(r)
+	}
+	return &pooledFlateReader{fr: fr}
+}
+
+type pooledFlateReader struct {
+	mu sync.Mutex // guards Close and Read
+	fr io.ReadCloser
+}
+
+func (r *pooledFlateReader) Read(p []byte) (n int, err error) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	if r.fr == nil {
+		return 0, errors.New("Read after Close")
+	}
+	return r.fr.Read(p)
+}
+
+func (r *pooledFlateReader) Close() error {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	var err error
+	if r.fr != nil {
+		err = r.fr.Close()
+		flateReaderPool.Put(r.fr)
+		r.fr = nil
+	}
+	return err
+}
+
+var (
+	compressors   sync.Map // map[uint16]Compressor
+	decompressors sync.Map // map[uint16]Decompressor
+)
+
+func init() {
+	compressors.Store(Store, Compressor(func(w io.Writer) (io.WriteCloser, error) { return &nopCloser{w}, nil }))
+	compressors.Store(Deflate, Compressor(func(w io.Writer) (io.WriteCloser, error) { return newFlateWriter(w), nil }))
+
+	decompressors.Store(Store, Decompressor(ioutil.NopCloser))
+	decompressors.Store(Deflate, Decompressor(newFlateReader))
+}
+
+// RegisterDecompressor allows custom decompressors for a specified method ID.
+// The common methods Store and Deflate are built in.
+func RegisterDecompressor(method uint16, dcomp Decompressor) {
+	if _, dup := decompressors.LoadOrStore(method, dcomp); dup {
+		panic("decompressor already registered")
+	}
+}
+
+// RegisterCompressor registers custom compressors for a specified method ID.
+// The common methods Store and Deflate are built in.
+func RegisterCompressor(method uint16, comp Compressor) {
+	if _, dup := compressors.LoadOrStore(method, comp); dup {
+		panic("compressor already registered")
+	}
+}
+
+func compressor(method uint16) Compressor {
+	ci, ok := compressors.Load(method)
+	if !ok {
+		return nil
+	}
+	return ci.(Compressor)
+}
+
+func decompressor(method uint16) Decompressor {
+	di, ok := decompressors.Load(method)
+	if !ok {
+		return nil
+	}
+	return di.(Decompressor)
+}
diff --git a/internal/backport/archive/zip/struct.go b/internal/backport/archive/zip/struct.go
new file mode 100644
index 0000000..d407427
--- /dev/null
+++ b/internal/backport/archive/zip/struct.go
@@ -0,0 +1,401 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package zip provides support for reading and writing ZIP archives.
+
+See: https://www.pkware.com/appnote
+
+This package does not support disk spanning.
+
+A note about ZIP64:
+
+To be backwards compatible the FileHeader has both 32 and 64 bit Size
+fields. The 64 bit fields will always contain the correct value and
+for normal archives both fields will be the same. For files requiring
+the ZIP64 format the 32 bit fields will be 0xffffffff and the 64 bit
+fields must be used instead.
+*/
+package zip
+
+import (
+	"path"
+	"time"
+
+	"golang.org/x/website/internal/backport/io/fs"
+)
+
+// Compression methods.
+const (
+	Store   uint16 = 0 // no compression
+	Deflate uint16 = 8 // DEFLATE compressed
+)
+
+const (
+	fileHeaderSignature      = 0x04034b50
+	directoryHeaderSignature = 0x02014b50
+	directoryEndSignature    = 0x06054b50
+	directory64LocSignature  = 0x07064b50
+	directory64EndSignature  = 0x06064b50
+	dataDescriptorSignature  = 0x08074b50 // de-facto standard; required by OS X Finder
+	fileHeaderLen            = 30         // + filename + extra
+	directoryHeaderLen       = 46         // + filename + extra + comment
+	directoryEndLen          = 22         // + comment
+	dataDescriptorLen        = 16         // four uint32: descriptor signature, crc32, compressed size, size
+	dataDescriptor64Len      = 24         // two uint32: signature, crc32 | two uint64: compressed size, size
+	directory64LocLen        = 20         //
+	directory64EndLen        = 56         // + extra
+
+	// Constants for the first byte in CreatorVersion.
+	creatorFAT    = 0
+	creatorUnix   = 3
+	creatorNTFS   = 11
+	creatorVFAT   = 14
+	creatorMacOSX = 19
+
+	// Version numbers.
+	zipVersion20 = 20 // 2.0
+	zipVersion45 = 45 // 4.5 (reads and writes zip64 archives)
+
+	// Limits for non zip64 files.
+	uint16max = (1 << 16) - 1
+	uint32max = (1 << 32) - 1
+
+	// Extra header IDs.
+	//
+	// IDs 0..31 are reserved for official use by PKWARE.
+	// IDs above that range are defined by third-party vendors.
+	// Since ZIP lacked high precision timestamps (nor a official specification
+	// of the timezone used for the date fields), many competing extra fields
+	// have been invented. Pervasive use effectively makes them "official".
+	//
+	// See http://mdfs.net/Docs/Comp/Archiving/Zip/ExtraField
+	zip64ExtraID       = 0x0001 // Zip64 extended information
+	ntfsExtraID        = 0x000a // NTFS
+	unixExtraID        = 0x000d // UNIX
+	extTimeExtraID     = 0x5455 // Extended timestamp
+	infoZipUnixExtraID = 0x5855 // Info-ZIP Unix extension
+)
+
+// FileHeader describes a file within a zip file.
+// See the zip spec for details.
+type FileHeader struct {
+	// Name is the name of the file.
+	//
+	// It must be a relative path, not start with a drive letter (such as "C:"),
+	// and must use forward slashes instead of back slashes. A trailing slash
+	// indicates that this file is a directory and should have no data.
+	//
+	// When reading zip files, the Name field is populated from
+	// the zip file directly and is not validated for correctness.
+	// It is the caller's responsibility to sanitize it as
+	// appropriate, including canonicalizing slash directions,
+	// validating that paths are relative, and preventing path
+	// traversal through filenames ("../../../").
+	Name string
+
+	// Comment is any arbitrary user-defined string shorter than 64KiB.
+	Comment string
+
+	// NonUTF8 indicates that Name and Comment are not encoded in UTF-8.
+	//
+	// By specification, the only other encoding permitted should be CP-437,
+	// but historically many ZIP readers interpret Name and Comment as whatever
+	// the system's local character encoding happens to be.
+	//
+	// This flag should only be set if the user intends to encode a non-portable
+	// ZIP file for a specific localized region. Otherwise, the Writer
+	// automatically sets the ZIP format's UTF-8 flag for valid UTF-8 strings.
+	NonUTF8 bool
+
+	CreatorVersion uint16
+	ReaderVersion  uint16
+	Flags          uint16
+
+	// Method is the compression method. If zero, Store is used.
+	Method uint16
+
+	// Modified is the modified time of the file.
+	//
+	// When reading, an extended timestamp is preferred over the legacy MS-DOS
+	// date field, and the offset between the times is used as the timezone.
+	// If only the MS-DOS date is present, the timezone is assumed to be UTC.
+	//
+	// When writing, an extended timestamp (which is timezone-agnostic) is
+	// always emitted. The legacy MS-DOS date field is encoded according to the
+	// location of the Modified time.
+	Modified     time.Time
+	ModifiedTime uint16 // Deprecated: Legacy MS-DOS date; use Modified instead.
+	ModifiedDate uint16 // Deprecated: Legacy MS-DOS time; use Modified instead.
+
+	CRC32              uint32
+	CompressedSize     uint32 // Deprecated: Use CompressedSize64 instead.
+	UncompressedSize   uint32 // Deprecated: Use UncompressedSize64 instead.
+	CompressedSize64   uint64
+	UncompressedSize64 uint64
+	Extra              []byte
+	ExternalAttrs      uint32 // Meaning depends on CreatorVersion
+}
+
+// FileInfo returns an fs.FileInfo for the FileHeader.
+func (h *FileHeader) FileInfo() fs.FileInfo {
+	return headerFileInfo{h}
+}
+
+// headerFileInfo implements fs.FileInfo.
+type headerFileInfo struct {
+	fh *FileHeader
+}
+
+func (fi headerFileInfo) Name() string { return path.Base(fi.fh.Name) }
+func (fi headerFileInfo) Size() int64 {
+	if fi.fh.UncompressedSize64 > 0 {
+		return int64(fi.fh.UncompressedSize64)
+	}
+	return int64(fi.fh.UncompressedSize)
+}
+func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
+func (fi headerFileInfo) ModTime() time.Time {
+	if fi.fh.Modified.IsZero() {
+		return fi.fh.ModTime()
+	}
+	return fi.fh.Modified.UTC()
+}
+func (fi headerFileInfo) Mode() fs.FileMode { return fi.fh.Mode() }
+func (fi headerFileInfo) Type() fs.FileMode { return fi.fh.Mode() & fs.ModeType }
+func (fi headerFileInfo) Sys() interface{}  { return fi.fh }
+
+func (fi headerFileInfo) Info() (fs.FileInfo, error) { return fi, nil }
+
+// FileInfoHeader creates a partially-populated FileHeader from an
+// fs.FileInfo.
+// Because fs.FileInfo's Name method returns only the base name of
+// the file it describes, it may be necessary to modify the Name field
+// of the returned header to provide the full path name of the file.
+// If compression is desired, callers should set the FileHeader.Method
+// field; it is unset by default.
+func FileInfoHeader(fi fs.FileInfo) (*FileHeader, error) {
+	size := fi.Size()
+	fh := &FileHeader{
+		Name:               fi.Name(),
+		UncompressedSize64: uint64(size),
+	}
+	fh.SetModTime(fi.ModTime())
+	fh.SetMode(fi.Mode())
+	if fh.UncompressedSize64 > uint32max {
+		fh.UncompressedSize = uint32max
+	} else {
+		fh.UncompressedSize = uint32(fh.UncompressedSize64)
+	}
+	return fh, nil
+}
+
+type directoryEnd struct {
+	diskNbr            uint32 // unused
+	dirDiskNbr         uint32 // unused
+	dirRecordsThisDisk uint64 // unused
+	directoryRecords   uint64
+	directorySize      uint64
+	directoryOffset    uint64 // relative to file
+	commentLen         uint16
+	comment            string
+}
+
+// timeZone returns a *time.Location based on the provided offset.
+// If the offset is non-sensible, then this uses an offset of zero.
+func timeZone(offset time.Duration) *time.Location {
+	const (
+		minOffset   = -12 * time.Hour  // E.g., Baker island at -12:00
+		maxOffset   = +14 * time.Hour  // E.g., Line island at +14:00
+		offsetAlias = 15 * time.Minute // E.g., Nepal at +5:45
+	)
+	offset = offset.Round(offsetAlias)
+	if offset < minOffset || maxOffset < offset {
+		offset = 0
+	}
+	return time.FixedZone("", int(offset/time.Second))
+}
+
+// msDosTimeToTime converts an MS-DOS date and time into a time.Time.
+// The resolution is 2s.
+// See: https://msdn.microsoft.com/en-us/library/ms724247(v=VS.85).aspx
+func msDosTimeToTime(dosDate, dosTime uint16) time.Time {
+	return time.Date(
+		// date bits 0-4: day of month; 5-8: month; 9-15: years since 1980
+		int(dosDate>>9+1980),
+		time.Month(dosDate>>5&0xf),
+		int(dosDate&0x1f),
+
+		// time bits 0-4: second/2; 5-10: minute; 11-15: hour
+		int(dosTime>>11),
+		int(dosTime>>5&0x3f),
+		int(dosTime&0x1f*2),
+		0, // nanoseconds
+
+		time.UTC,
+	)
+}
+
+// timeToMsDosTime converts a time.Time to an MS-DOS date and time.
+// The resolution is 2s.
+// See: https://msdn.microsoft.com/en-us/library/ms724274(v=VS.85).aspx
+func timeToMsDosTime(t time.Time) (fDate uint16, fTime uint16) {
+	fDate = uint16(t.Day() + int(t.Month())<<5 + (t.Year()-1980)<<9)
+	fTime = uint16(t.Second()/2 + t.Minute()<<5 + t.Hour()<<11)
+	return
+}
+
+// ModTime returns the modification time in UTC using the legacy
+// ModifiedDate and ModifiedTime fields.
+//
+// Deprecated: Use Modified instead.
+func (h *FileHeader) ModTime() time.Time {
+	return msDosTimeToTime(h.ModifiedDate, h.ModifiedTime)
+}
+
+// SetModTime sets the Modified, ModifiedTime, and ModifiedDate fields
+// to the given time in UTC.
+//
+// Deprecated: Use Modified instead.
+func (h *FileHeader) SetModTime(t time.Time) {
+	t = t.UTC() // Convert to UTC for compatibility
+	h.Modified = t
+	h.ModifiedDate, h.ModifiedTime = timeToMsDosTime(t)
+}
+
+const (
+	// Unix constants. The specification doesn't mention them,
+	// but these seem to be the values agreed on by tools.
+	s_IFMT   = 0xf000
+	s_IFSOCK = 0xc000
+	s_IFLNK  = 0xa000
+	s_IFREG  = 0x8000
+	s_IFBLK  = 0x6000
+	s_IFDIR  = 0x4000
+	s_IFCHR  = 0x2000
+	s_IFIFO  = 0x1000
+	s_ISUID  = 0x800
+	s_ISGID  = 0x400
+	s_ISVTX  = 0x200
+
+	msdosDir      = 0x10
+	msdosReadOnly = 0x01
+)
+
+// Mode returns the permission and mode bits for the FileHeader.
+func (h *FileHeader) Mode() (mode fs.FileMode) {
+	switch h.CreatorVersion >> 8 {
+	case creatorUnix, creatorMacOSX:
+		mode = unixModeToFileMode(h.ExternalAttrs >> 16)
+	case creatorNTFS, creatorVFAT, creatorFAT:
+		mode = msdosModeToFileMode(h.ExternalAttrs)
+	}
+	if len(h.Name) > 0 && h.Name[len(h.Name)-1] == '/' {
+		mode |= fs.ModeDir
+	}
+	return mode
+}
+
+// SetMode changes the permission and mode bits for the FileHeader.
+func (h *FileHeader) SetMode(mode fs.FileMode) {
+	h.CreatorVersion = h.CreatorVersion&0xff | creatorUnix<<8
+	h.ExternalAttrs = fileModeToUnixMode(mode) << 16
+
+	// set MSDOS attributes too, as the original zip does.
+	if mode&fs.ModeDir != 0 {
+		h.ExternalAttrs |= msdosDir
+	}
+	if mode&0200 == 0 {
+		h.ExternalAttrs |= msdosReadOnly
+	}
+}
+
+// isZip64 reports whether the file size exceeds the 32 bit limit
+func (h *FileHeader) isZip64() bool {
+	return h.CompressedSize64 >= uint32max || h.UncompressedSize64 >= uint32max
+}
+
+func (f *FileHeader) hasDataDescriptor() bool {
+	return f.Flags&0x8 != 0
+}
+
+func msdosModeToFileMode(m uint32) (mode fs.FileMode) {
+	if m&msdosDir != 0 {
+		mode = fs.ModeDir | 0777
+	} else {
+		mode = 0666
+	}
+	if m&msdosReadOnly != 0 {
+		mode &^= 0222
+	}
+	return mode
+}
+
+func fileModeToUnixMode(mode fs.FileMode) uint32 {
+	var m uint32
+	switch mode & fs.ModeType {
+	default:
+		m = s_IFREG
+	case fs.ModeDir:
+		m = s_IFDIR
+	case fs.ModeSymlink:
+		m = s_IFLNK
+	case fs.ModeNamedPipe:
+		m = s_IFIFO
+	case fs.ModeSocket:
+		m = s_IFSOCK
+	case fs.ModeDevice:
+		m = s_IFBLK
+	case fs.ModeDevice | fs.ModeCharDevice:
+		m = s_IFCHR
+	}
+	if mode&fs.ModeSetuid != 0 {
+		m |= s_ISUID
+	}
+	if mode&fs.ModeSetgid != 0 {
+		m |= s_ISGID
+	}
+	if mode&fs.ModeSticky != 0 {
+		m |= s_ISVTX
+	}
+	return m | uint32(mode&0777)
+}
+
+func unixModeToFileMode(m uint32) fs.FileMode {
+	mode := fs.FileMode(m & 0777)
+	switch m & s_IFMT {
+	case s_IFBLK:
+		mode |= fs.ModeDevice
+	case s_IFCHR:
+		mode |= fs.ModeDevice | fs.ModeCharDevice
+	case s_IFDIR:
+		mode |= fs.ModeDir
+	case s_IFIFO:
+		mode |= fs.ModeNamedPipe
+	case s_IFLNK:
+		mode |= fs.ModeSymlink
+	case s_IFREG:
+		// nothing to do
+	case s_IFSOCK:
+		mode |= fs.ModeSocket
+	}
+	if m&s_ISGID != 0 {
+		mode |= fs.ModeSetgid
+	}
+	if m&s_ISUID != 0 {
+		mode |= fs.ModeSetuid
+	}
+	if m&s_ISVTX != 0 {
+		mode |= fs.ModeSticky
+	}
+	return mode
+}
+
+// dataDescriptor holds the data descriptor that optionally follows the file
+// contents in the zip file.
+type dataDescriptor struct {
+	crc32            uint32
+	compressedSize   uint64
+	uncompressedSize uint64
+}
diff --git a/internal/backport/archive/zip/testdata/crc32-not-streamed.zip b/internal/backport/archive/zip/testdata/crc32-not-streamed.zip
new file mode 100644
index 0000000..f268d88
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/crc32-not-streamed.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/dd.zip b/internal/backport/archive/zip/testdata/dd.zip
new file mode 100644
index 0000000..e53378b
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/dd.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/go-no-datadesc-sig.zip.base64 b/internal/backport/archive/zip/testdata/go-no-datadesc-sig.zip.base64
new file mode 100644
index 0000000..1c2c071
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/go-no-datadesc-sig.zip.base64
@@ -0,0 +1 @@
+UEsDBBQACAAAAGWHaECoZTJ+BAAAAAQAAAAHABgAZm9vLnR4dFVUBQAD3lVZT3V4CwABBPUBAAAEFAAAAGZvbwqoZTJ+BAAAAAQAAABQSwMEFAAIAAAAZodoQOmzogQEAAAABAAAAAcAGABiYXIudHh0VVQFAAPgVVlPdXgLAAEE9QEAAAQUAAAAYmFyCumzogQEAAAABAAAAFBLAQIUAxQACAAAAGWHaECoZTJ+BAAAAAQAAAAHABgAAAAAAAAAAACkgQAAAABmb28udHh0VVQFAAPeVVlPdXgLAAEE9QEAAAQUAAAAUEsBAhQDFAAIAAAAZodoQOmzogQEAAAABAAAAAcAGAAAAAAAAAAAAKSBTQAAAGJhci50eHRVVAUAA+BVWU91eAsAAQT1AQAABBQAAABQSwUGAAAAAAIAAgCaAAAAmgAAAAAA
diff --git a/internal/backport/archive/zip/testdata/go-with-datadesc-sig.zip b/internal/backport/archive/zip/testdata/go-with-datadesc-sig.zip
new file mode 100644
index 0000000..bcfe121
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/go-with-datadesc-sig.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/gophercolor16x16.png b/internal/backport/archive/zip/testdata/gophercolor16x16.png
new file mode 100644
index 0000000..48854ff
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/gophercolor16x16.png
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/readme.notzip b/internal/backport/archive/zip/testdata/readme.notzip
new file mode 100644
index 0000000..8173727
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/readme.notzip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/readme.zip b/internal/backport/archive/zip/testdata/readme.zip
new file mode 100644
index 0000000..5642a67
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/readme.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/subdir.zip b/internal/backport/archive/zip/testdata/subdir.zip
new file mode 100644
index 0000000..324d06b
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/subdir.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/symlink.zip b/internal/backport/archive/zip/testdata/symlink.zip
new file mode 100644
index 0000000..af84693
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/symlink.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/test-trailing-junk.zip b/internal/backport/archive/zip/testdata/test-trailing-junk.zip
new file mode 100644
index 0000000..42281b4
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/test-trailing-junk.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/test.zip b/internal/backport/archive/zip/testdata/test.zip
new file mode 100644
index 0000000..03890c0
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/test.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/time-22738.zip b/internal/backport/archive/zip/testdata/time-22738.zip
new file mode 100644
index 0000000..eb85b57
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/time-22738.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/time-7zip.zip b/internal/backport/archive/zip/testdata/time-7zip.zip
new file mode 100644
index 0000000..4f74819
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/time-7zip.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/time-go.zip b/internal/backport/archive/zip/testdata/time-go.zip
new file mode 100644
index 0000000..f008805
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/time-go.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/time-infozip.zip b/internal/backport/archive/zip/testdata/time-infozip.zip
new file mode 100644
index 0000000..8e63948
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/time-infozip.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/time-osx.zip b/internal/backport/archive/zip/testdata/time-osx.zip
new file mode 100644
index 0000000..e82c5c2
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/time-osx.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/time-win7.zip b/internal/backport/archive/zip/testdata/time-win7.zip
new file mode 100644
index 0000000..8ba222b
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/time-win7.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/time-winrar.zip b/internal/backport/archive/zip/testdata/time-winrar.zip
new file mode 100644
index 0000000..a8a19b0
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/time-winrar.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/time-winzip.zip b/internal/backport/archive/zip/testdata/time-winzip.zip
new file mode 100644
index 0000000..f6e8f8b
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/time-winzip.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/unix.zip b/internal/backport/archive/zip/testdata/unix.zip
new file mode 100644
index 0000000..ce1a981
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/unix.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/utf8-7zip.zip b/internal/backport/archive/zip/testdata/utf8-7zip.zip
new file mode 100644
index 0000000..0e97884
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/utf8-7zip.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/utf8-infozip.zip b/internal/backport/archive/zip/testdata/utf8-infozip.zip
new file mode 100644
index 0000000..25a8926
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/utf8-infozip.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/utf8-osx.zip b/internal/backport/archive/zip/testdata/utf8-osx.zip
new file mode 100644
index 0000000..9b0c058
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/utf8-osx.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/utf8-winrar.zip b/internal/backport/archive/zip/testdata/utf8-winrar.zip
new file mode 100644
index 0000000..4bad6c3
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/utf8-winrar.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/utf8-winzip.zip b/internal/backport/archive/zip/testdata/utf8-winzip.zip
new file mode 100644
index 0000000..909d52e
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/utf8-winzip.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/winxp.zip b/internal/backport/archive/zip/testdata/winxp.zip
new file mode 100644
index 0000000..3919322
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/winxp.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/zip64-2.zip b/internal/backport/archive/zip/testdata/zip64-2.zip
new file mode 100644
index 0000000..f844e35
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/zip64-2.zip
Binary files differ
diff --git a/internal/backport/archive/zip/testdata/zip64.zip b/internal/backport/archive/zip/testdata/zip64.zip
new file mode 100644
index 0000000..a2ee1fa
--- /dev/null
+++ b/internal/backport/archive/zip/testdata/zip64.zip
Binary files differ
diff --git a/internal/backport/archive/zip/writer.go b/internal/backport/archive/zip/writer.go
new file mode 100644
index 0000000..3b23cc3
--- /dev/null
+++ b/internal/backport/archive/zip/writer.go
@@ -0,0 +1,634 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zip
+
+import (
+	"bufio"
+	"encoding/binary"
+	"errors"
+	"hash"
+	"hash/crc32"
+	"io"
+	"strings"
+	"unicode/utf8"
+)
+
+var (
+	errLongName  = errors.New("zip: FileHeader.Name too long")
+	errLongExtra = errors.New("zip: FileHeader.Extra too long")
+)
+
+// Writer implements a zip file writer.
+type Writer struct {
+	cw          *countWriter
+	dir         []*header
+	last        *fileWriter
+	closed      bool
+	compressors map[uint16]Compressor
+	comment     string
+
+	// testHookCloseSizeOffset if non-nil is called with the size
+	// of offset of the central directory at Close.
+	testHookCloseSizeOffset func(size, offset uint64)
+}
+
+type header struct {
+	*FileHeader
+	offset uint64
+	raw    bool
+}
+
+// NewWriter returns a new Writer writing a zip file to w.
+func NewWriter(w io.Writer) *Writer {
+	return &Writer{cw: &countWriter{w: bufio.NewWriter(w)}}
+}
+
+// SetOffset sets the offset of the beginning of the zip data within the
+// underlying writer. It should be used when the zip data is appended to an
+// existing file, such as a binary executable.
+// It must be called before any data is written.
+func (w *Writer) SetOffset(n int64) {
+	if w.cw.count != 0 {
+		panic("zip: SetOffset called after data was written")
+	}
+	w.cw.count = n
+}
+
+// Flush flushes any buffered data to the underlying writer.
+// Calling Flush is not normally necessary; calling Close is sufficient.
+func (w *Writer) Flush() error {
+	return w.cw.w.(*bufio.Writer).Flush()
+}
+
+// SetComment sets the end-of-central-directory comment field.
+// It can only be called before Close.
+func (w *Writer) SetComment(comment string) error {
+	if len(comment) > uint16max {
+		return errors.New("zip: Writer.Comment too long")
+	}
+	w.comment = comment
+	return nil
+}
+
+// Close finishes writing the zip file by writing the central directory.
+// It does not close the underlying writer.
+func (w *Writer) Close() error {
+	if w.last != nil && !w.last.closed {
+		if err := w.last.close(); err != nil {
+			return err
+		}
+		w.last = nil
+	}
+	if w.closed {
+		return errors.New("zip: writer closed twice")
+	}
+	w.closed = true
+
+	// write central directory
+	start := w.cw.count
+	for _, h := range w.dir {
+		var buf [directoryHeaderLen]byte
+		b := writeBuf(buf[:])
+		b.uint32(uint32(directoryHeaderSignature))
+		b.uint16(h.CreatorVersion)
+		b.uint16(h.ReaderVersion)
+		b.uint16(h.Flags)
+		b.uint16(h.Method)
+		b.uint16(h.ModifiedTime)
+		b.uint16(h.ModifiedDate)
+		b.uint32(h.CRC32)
+		if h.isZip64() || h.offset >= uint32max {
+			// the file needs a zip64 header. store maxint in both
+			// 32 bit size fields (and offset later) to signal that the
+			// zip64 extra header should be used.
+			b.uint32(uint32max) // compressed size
+			b.uint32(uint32max) // uncompressed size
+
+			// append a zip64 extra block to Extra
+			var buf [28]byte // 2x uint16 + 3x uint64
+			eb := writeBuf(buf[:])
+			eb.uint16(zip64ExtraID)
+			eb.uint16(24) // size = 3x uint64
+			eb.uint64(h.UncompressedSize64)
+			eb.uint64(h.CompressedSize64)
+			eb.uint64(h.offset)
+			h.Extra = append(h.Extra, buf[:]...)
+		} else {
+			b.uint32(h.CompressedSize)
+			b.uint32(h.UncompressedSize)
+		}
+
+		b.uint16(uint16(len(h.Name)))
+		b.uint16(uint16(len(h.Extra)))
+		b.uint16(uint16(len(h.Comment)))
+		b = b[4:] // skip disk number start and internal file attr (2x uint16)
+		b.uint32(h.ExternalAttrs)
+		if h.offset > uint32max {
+			b.uint32(uint32max)
+		} else {
+			b.uint32(uint32(h.offset))
+		}
+		if _, err := w.cw.Write(buf[:]); err != nil {
+			return err
+		}
+		if _, err := io.WriteString(w.cw, h.Name); err != nil {
+			return err
+		}
+		if _, err := w.cw.Write(h.Extra); err != nil {
+			return err
+		}
+		if _, err := io.WriteString(w.cw, h.Comment); err != nil {
+			return err
+		}
+	}
+	end := w.cw.count
+
+	records := uint64(len(w.dir))
+	size := uint64(end - start)
+	offset := uint64(start)
+
+	if f := w.testHookCloseSizeOffset; f != nil {
+		f(size, offset)
+	}
+
+	if records >= uint16max || size >= uint32max || offset >= uint32max {
+		var buf [directory64EndLen + directory64LocLen]byte
+		b := writeBuf(buf[:])
+
+		// zip64 end of central directory record
+		b.uint32(directory64EndSignature)
+		b.uint64(directory64EndLen - 12) // length minus signature (uint32) and length fields (uint64)
+		b.uint16(zipVersion45)           // version made by
+		b.uint16(zipVersion45)           // version needed to extract
+		b.uint32(0)                      // number of this disk
+		b.uint32(0)                      // number of the disk with the start of the central directory
+		b.uint64(records)                // total number of entries in the central directory on this disk
+		b.uint64(records)                // total number of entries in the central directory
+		b.uint64(size)                   // size of the central directory
+		b.uint64(offset)                 // offset of start of central directory with respect to the starting disk number
+
+		// zip64 end of central directory locator
+		b.uint32(directory64LocSignature)
+		b.uint32(0)           // number of the disk with the start of the zip64 end of central directory
+		b.uint64(uint64(end)) // relative offset of the zip64 end of central directory record
+		b.uint32(1)           // total number of disks
+
+		if _, err := w.cw.Write(buf[:]); err != nil {
+			return err
+		}
+
+		// store max values in the regular end record to signal
+		// that the zip64 values should be used instead
+		records = uint16max
+		size = uint32max
+		offset = uint32max
+	}
+
+	// write end record
+	var buf [directoryEndLen]byte
+	b := writeBuf(buf[:])
+	b.uint32(uint32(directoryEndSignature))
+	b = b[4:]                        // skip over disk number and first disk number (2x uint16)
+	b.uint16(uint16(records))        // number of entries this disk
+	b.uint16(uint16(records))        // number of entries total
+	b.uint32(uint32(size))           // size of directory
+	b.uint32(uint32(offset))         // start of directory
+	b.uint16(uint16(len(w.comment))) // byte size of EOCD comment
+	if _, err := w.cw.Write(buf[:]); err != nil {
+		return err
+	}
+	if _, err := io.WriteString(w.cw, w.comment); err != nil {
+		return err
+	}
+
+	return w.cw.w.(*bufio.Writer).Flush()
+}
+
+// Create adds a file to the zip file using the provided name.
+// It returns a Writer to which the file contents should be written.
+// The file contents will be compressed using the Deflate method.
+// The name must be a relative path: it must not start with a drive
+// letter (e.g. C:) or leading slash, and only forward slashes are
+// allowed. To create a directory instead of a file, add a trailing
+// slash to the name.
+// The file's contents must be written to the io.Writer before the next
+// call to Create, CreateHeader, or Close.
+func (w *Writer) Create(name string) (io.Writer, error) {
+	header := &FileHeader{
+		Name:   name,
+		Method: Deflate,
+	}
+	return w.CreateHeader(header)
+}
+
+// detectUTF8 reports whether s is a valid UTF-8 string, and whether the string
+// must be considered UTF-8 encoding (i.e., not compatible with CP-437, ASCII,
+// or any other common encoding).
+func detectUTF8(s string) (valid, require bool) {
+	for i := 0; i < len(s); {
+		r, size := utf8.DecodeRuneInString(s[i:])
+		i += size
+		// Officially, ZIP uses CP-437, but many readers use the system's
+		// local character encoding. Most encoding are compatible with a large
+		// subset of CP-437, which itself is ASCII-like.
+		//
+		// Forbid 0x7e and 0x5c since EUC-KR and Shift-JIS replace those
+		// characters with localized currency and overline characters.
+		if r < 0x20 || r > 0x7d || r == 0x5c {
+			if !utf8.ValidRune(r) || (r == utf8.RuneError && size == 1) {
+				return false, false
+			}
+			require = true
+		}
+	}
+	return true, require
+}
+
+// prepare performs the bookkeeping operations required at the start of
+// CreateHeader and CreateRaw.
+func (w *Writer) prepare(fh *FileHeader) error {
+	if w.last != nil && !w.last.closed {
+		if err := w.last.close(); err != nil {
+			return err
+		}
+	}
+	if len(w.dir) > 0 && w.dir[len(w.dir)-1].FileHeader == fh {
+		// See https://golang.org/issue/11144 confusion.
+		return errors.New("archive/zip: invalid duplicate FileHeader")
+	}
+	return nil
+}
+
+// CreateHeader adds a file to the zip archive using the provided FileHeader
+// for the file metadata. Writer takes ownership of fh and may mutate
+// its fields. The caller must not modify fh after calling CreateHeader.
+//
+// This returns a Writer to which the file contents should be written.
+// The file's contents must be written to the io.Writer before the next
+// call to Create, CreateHeader, CreateRaw, or Close.
+func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) {
+	if err := w.prepare(fh); err != nil {
+		return nil, err
+	}
+
+	// The ZIP format has a sad state of affairs regarding character encoding.
+	// Officially, the name and comment fields are supposed to be encoded
+	// in CP-437 (which is mostly compatible with ASCII), unless the UTF-8
+	// flag bit is set. However, there are several problems:
+	//
+	//	* Many ZIP readers still do not support UTF-8.
+	//	* If the UTF-8 flag is cleared, several readers simply interpret the
+	//	name and comment fields as whatever the local system encoding is.
+	//
+	// In order to avoid breaking readers without UTF-8 support,
+	// we avoid setting the UTF-8 flag if the strings are CP-437 compatible.
+	// However, if the strings require multibyte UTF-8 encoding and is a
+	// valid UTF-8 string, then we set the UTF-8 bit.
+	//
+	// For the case, where the user explicitly wants to specify the encoding
+	// as UTF-8, they will need to set the flag bit themselves.
+	utf8Valid1, utf8Require1 := detectUTF8(fh.Name)
+	utf8Valid2, utf8Require2 := detectUTF8(fh.Comment)
+	switch {
+	case fh.NonUTF8:
+		fh.Flags &^= 0x800
+	case (utf8Require1 || utf8Require2) && (utf8Valid1 && utf8Valid2):
+		fh.Flags |= 0x800
+	}
+
+	fh.CreatorVersion = fh.CreatorVersion&0xff00 | zipVersion20 // preserve compatibility byte
+	fh.ReaderVersion = zipVersion20
+
+	// If Modified is set, this takes precedence over MS-DOS timestamp fields.
+	if !fh.Modified.IsZero() {
+		// Contrary to the FileHeader.SetModTime method, we intentionally
+		// do not convert to UTC, because we assume the user intends to encode
+		// the date using the specified timezone. A user may want this control
+		// because many legacy ZIP readers interpret the timestamp according
+		// to the local timezone.
+		//
+		// The timezone is only non-UTC if a user directly sets the Modified
+		// field directly themselves. All other approaches sets UTC.
+		fh.ModifiedDate, fh.ModifiedTime = timeToMsDosTime(fh.Modified)
+
+		// Use "extended timestamp" format since this is what Info-ZIP uses.
+		// Nearly every major ZIP implementation uses a different format,
+		// but at least most seem to be able to understand the other formats.
+		//
+		// This format happens to be identical for both local and central header
+		// if modification time is the only timestamp being encoded.
+		var mbuf [9]byte // 2*SizeOf(uint16) + SizeOf(uint8) + SizeOf(uint32)
+		mt := uint32(fh.Modified.Unix())
+		eb := writeBuf(mbuf[:])
+		eb.uint16(extTimeExtraID)
+		eb.uint16(5)  // Size: SizeOf(uint8) + SizeOf(uint32)
+		eb.uint8(1)   // Flags: ModTime
+		eb.uint32(mt) // ModTime
+		fh.Extra = append(fh.Extra, mbuf[:]...)
+	}
+
+	var (
+		ow io.Writer
+		fw *fileWriter
+	)
+	h := &header{
+		FileHeader: fh,
+		offset:     uint64(w.cw.count),
+	}
+
+	if strings.HasSuffix(fh.Name, "/") {
+		// Set the compression method to Store to ensure data length is truly zero,
+		// which the writeHeader method always encodes for the size fields.
+		// This is necessary as most compression formats have non-zero lengths
+		// even when compressing an empty string.
+		fh.Method = Store
+		fh.Flags &^= 0x8 // we will not write a data descriptor
+
+		// Explicitly clear sizes as they have no meaning for directories.
+		fh.CompressedSize = 0
+		fh.CompressedSize64 = 0
+		fh.UncompressedSize = 0
+		fh.UncompressedSize64 = 0
+
+		ow = dirWriter{}
+	} else {
+		fh.Flags |= 0x8 // we will write a data descriptor
+
+		fw = &fileWriter{
+			zipw:      w.cw,
+			compCount: &countWriter{w: w.cw},
+			crc32:     crc32.NewIEEE(),
+		}
+		comp := w.compressor(fh.Method)
+		if comp == nil {
+			return nil, ErrAlgorithm
+		}
+		var err error
+		fw.comp, err = comp(fw.compCount)
+		if err != nil {
+			return nil, err
+		}
+		fw.rawCount = &countWriter{w: fw.comp}
+		fw.header = h
+		ow = fw
+	}
+	w.dir = append(w.dir, h)
+	if err := writeHeader(w.cw, h); err != nil {
+		return nil, err
+	}
+	// If we're creating a directory, fw is nil.
+	w.last = fw
+	return ow, nil
+}
+
+func writeHeader(w io.Writer, h *header) error {
+	const maxUint16 = 1<<16 - 1
+	if len(h.Name) > maxUint16 {
+		return errLongName
+	}
+	if len(h.Extra) > maxUint16 {
+		return errLongExtra
+	}
+
+	var buf [fileHeaderLen]byte
+	b := writeBuf(buf[:])
+	b.uint32(uint32(fileHeaderSignature))
+	b.uint16(h.ReaderVersion)
+	b.uint16(h.Flags)
+	b.uint16(h.Method)
+	b.uint16(h.ModifiedTime)
+	b.uint16(h.ModifiedDate)
+	// In raw mode (caller does the compression), the values are either
+	// written here or in the trailing data descriptor based on the header
+	// flags.
+	if h.raw && !h.hasDataDescriptor() {
+		b.uint32(h.CRC32)
+		b.uint32(uint32(min64(h.CompressedSize64, uint32max)))
+		b.uint32(uint32(min64(h.UncompressedSize64, uint32max)))
+	} else {
+		// When this package handle the compression, these values are
+		// always written to the trailing data descriptor.
+		b.uint32(0) // crc32
+		b.uint32(0) // compressed size
+		b.uint32(0) // uncompressed size
+	}
+	b.uint16(uint16(len(h.Name)))
+	b.uint16(uint16(len(h.Extra)))
+	if _, err := w.Write(buf[:]); err != nil {
+		return err
+	}
+	if _, err := io.WriteString(w, h.Name); err != nil {
+		return err
+	}
+	_, err := w.Write(h.Extra)
+	return err
+}
+
+func min64(x, y uint64) uint64 {
+	if x < y {
+		return x
+	}
+	return y
+}
+
+// CreateRaw adds a file to the zip archive using the provided FileHeader and
+// returns a Writer to which the file contents should be written. The file's
+// contents must be written to the io.Writer before the next call to Create,
+// CreateHeader, CreateRaw, or Close.
+//
+// In contrast to CreateHeader, the bytes passed to Writer are not compressed.
+func (w *Writer) CreateRaw(fh *FileHeader) (io.Writer, error) {
+	if err := w.prepare(fh); err != nil {
+		return nil, err
+	}
+
+	fh.CompressedSize = uint32(min64(fh.CompressedSize64, uint32max))
+	fh.UncompressedSize = uint32(min64(fh.UncompressedSize64, uint32max))
+
+	h := &header{
+		FileHeader: fh,
+		offset:     uint64(w.cw.count),
+		raw:        true,
+	}
+	w.dir = append(w.dir, h)
+	if err := writeHeader(w.cw, h); err != nil {
+		return nil, err
+	}
+
+	if strings.HasSuffix(fh.Name, "/") {
+		w.last = nil
+		return dirWriter{}, nil
+	}
+
+	fw := &fileWriter{
+		header: h,
+		zipw:   w.cw,
+	}
+	w.last = fw
+	return fw, nil
+}
+
+// Copy copies the file f (obtained from a Reader) into w. It copies the raw
+// form directly bypassing decompression, compression, and validation.
+func (w *Writer) Copy(f *File) error {
+	r, err := f.OpenRaw()
+	if err != nil {
+		return err
+	}
+	fw, err := w.CreateRaw(&f.FileHeader)
+	if err != nil {
+		return err
+	}
+	_, err = io.Copy(fw, r)
+	return err
+}
+
+// RegisterCompressor registers or overrides a custom compressor for a specific
+// method ID. If a compressor for a given method is not found, Writer will
+// default to looking up the compressor at the package level.
+func (w *Writer) RegisterCompressor(method uint16, comp Compressor) {
+	if w.compressors == nil {
+		w.compressors = make(map[uint16]Compressor)
+	}
+	w.compressors[method] = comp
+}
+
+func (w *Writer) compressor(method uint16) Compressor {
+	comp := w.compressors[method]
+	if comp == nil {
+		comp = compressor(method)
+	}
+	return comp
+}
+
+type dirWriter struct{}
+
+func (dirWriter) Write(b []byte) (int, error) {
+	if len(b) == 0 {
+		return 0, nil
+	}
+	return 0, errors.New("zip: write to directory")
+}
+
+type fileWriter struct {
+	*header
+	zipw      io.Writer
+	rawCount  *countWriter
+	comp      io.WriteCloser
+	compCount *countWriter
+	crc32     hash.Hash32
+	closed    bool
+}
+
+func (w *fileWriter) Write(p []byte) (int, error) {
+	if w.closed {
+		return 0, errors.New("zip: write to closed file")
+	}
+	if w.raw {
+		return w.zipw.Write(p)
+	}
+	w.crc32.Write(p)
+	return w.rawCount.Write(p)
+}
+
+func (w *fileWriter) close() error {
+	if w.closed {
+		return errors.New("zip: file closed twice")
+	}
+	w.closed = true
+	if w.raw {
+		return w.writeDataDescriptor()
+	}
+	if err := w.comp.Close(); err != nil {
+		return err
+	}
+
+	// update FileHeader
+	fh := w.header.FileHeader
+	fh.CRC32 = w.crc32.Sum32()
+	fh.CompressedSize64 = uint64(w.compCount.count)
+	fh.UncompressedSize64 = uint64(w.rawCount.count)
+
+	if fh.isZip64() {
+		fh.CompressedSize = uint32max
+		fh.UncompressedSize = uint32max
+		fh.ReaderVersion = zipVersion45 // requires 4.5 - File uses ZIP64 format extensions
+	} else {
+		fh.CompressedSize = uint32(fh.CompressedSize64)
+		fh.UncompressedSize = uint32(fh.UncompressedSize64)
+	}
+
+	return w.writeDataDescriptor()
+}
+
+func (w *fileWriter) writeDataDescriptor() error {
+	if !w.hasDataDescriptor() {
+		return nil
+	}
+	// Write data descriptor. This is more complicated than one would
+	// think, see e.g. comments in zipfile.c:putextended() and
+	// http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=7073588.
+	// The approach here is to write 8 byte sizes if needed without
+	// adding a zip64 extra in the local header (too late anyway).
+	var buf []byte
+	if w.isZip64() {
+		buf = make([]byte, dataDescriptor64Len)
+	} else {
+		buf = make([]byte, dataDescriptorLen)
+	}
+	b := writeBuf(buf)
+	b.uint32(dataDescriptorSignature) // de-facto standard, required by OS X
+	b.uint32(w.CRC32)
+	if w.isZip64() {
+		b.uint64(w.CompressedSize64)
+		b.uint64(w.UncompressedSize64)
+	} else {
+		b.uint32(w.CompressedSize)
+		b.uint32(w.UncompressedSize)
+	}
+	_, err := w.zipw.Write(buf)
+	return err
+}
+
+type countWriter struct {
+	w     io.Writer
+	count int64
+}
+
+func (w *countWriter) Write(p []byte) (int, error) {
+	n, err := w.w.Write(p)
+	w.count += int64(n)
+	return n, err
+}
+
+type nopCloser struct {
+	io.Writer
+}
+
+func (w nopCloser) Close() error {
+	return nil
+}
+
+type writeBuf []byte
+
+func (b *writeBuf) uint8(v uint8) {
+	(*b)[0] = v
+	*b = (*b)[1:]
+}
+
+func (b *writeBuf) uint16(v uint16) {
+	binary.LittleEndian.PutUint16(*b, v)
+	*b = (*b)[2:]
+}
+
+func (b *writeBuf) uint32(v uint32) {
+	binary.LittleEndian.PutUint32(*b, v)
+	*b = (*b)[4:]
+}
+
+func (b *writeBuf) uint64(v uint64) {
+	binary.LittleEndian.PutUint64(*b, v)
+	*b = (*b)[8:]
+}
diff --git a/internal/backport/archive/zip/writer_test.go b/internal/backport/archive/zip/writer_test.go
new file mode 100644
index 0000000..f001529
--- /dev/null
+++ b/internal/backport/archive/zip/writer_test.go
@@ -0,0 +1,605 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zip
+
+import (
+	"bytes"
+	"compress/flate"
+	"encoding/binary"
+	"fmt"
+	"hash/crc32"
+	"io"
+	"io/ioutil"
+	"math/rand"
+	"strings"
+	"testing"
+	"time"
+
+	"golang.org/x/website/internal/backport/io/fs"
+)
+
+// TODO(adg): a more sophisticated test suite
+
+type WriteTest struct {
+	Name   string
+	Data   []byte
+	Method uint16
+	Mode   fs.FileMode
+}
+
+var writeTests = []WriteTest{
+	{
+		Name:   "foo",
+		Data:   []byte("Rabbits, guinea pigs, gophers, marsupial rats, and quolls."),
+		Method: Store,
+		Mode:   0666,
+	},
+	{
+		Name:   "bar",
+		Data:   nil, // large data set in the test
+		Method: Deflate,
+		Mode:   0644,
+	},
+	{
+		Name:   "setuid",
+		Data:   []byte("setuid file"),
+		Method: Deflate,
+		Mode:   0755 | fs.ModeSetuid,
+	},
+	{
+		Name:   "setgid",
+		Data:   []byte("setgid file"),
+		Method: Deflate,
+		Mode:   0755 | fs.ModeSetgid,
+	},
+	{
+		Name:   "symlink",
+		Data:   []byte("../link/target"),
+		Method: Deflate,
+		Mode:   0755 | fs.ModeSymlink,
+	},
+	{
+		Name:   "device",
+		Data:   []byte("device file"),
+		Method: Deflate,
+		Mode:   0755 | fs.ModeDevice,
+	},
+	{
+		Name:   "chardevice",
+		Data:   []byte("char device file"),
+		Method: Deflate,
+		Mode:   0755 | fs.ModeDevice | fs.ModeCharDevice,
+	},
+}
+
+func TestWriter(t *testing.T) {
+	largeData := make([]byte, 1<<17)
+	if _, err := rand.Read(largeData); err != nil {
+		t.Fatal("rand.Read failed:", err)
+	}
+	writeTests[1].Data = largeData
+	defer func() {
+		writeTests[1].Data = nil
+	}()
+
+	// write a zip file
+	buf := new(bytes.Buffer)
+	w := NewWriter(buf)
+
+	for _, wt := range writeTests {
+		testCreate(t, w, &wt)
+	}
+
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	// read it back
+	r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
+	if err != nil {
+		t.Fatal(err)
+	}
+	for i, wt := range writeTests {
+		testReadFile(t, r.File[i], &wt)
+	}
+}
+
+// TestWriterComment is test for EOCD comment read/write.
+func TestWriterComment(t *testing.T) {
+	var tests = []struct {
+		comment string
+		ok      bool
+	}{
+		{"hi, hello", true},
+		{"hi, こんにちわ", true},
+		{strings.Repeat("a", uint16max), true},
+		{strings.Repeat("a", uint16max+1), false},
+	}
+
+	for _, test := range tests {
+		// write a zip file
+		buf := new(bytes.Buffer)
+		w := NewWriter(buf)
+		if err := w.SetComment(test.comment); err != nil {
+			if test.ok {
+				t.Fatalf("SetComment: unexpected error %v", err)
+			}
+			continue
+		} else {
+			if !test.ok {
+				t.Fatalf("SetComment: unexpected success, want error")
+			}
+		}
+
+		if err := w.Close(); test.ok == (err != nil) {
+			t.Fatal(err)
+		}
+
+		if w.closed != test.ok {
+			t.Fatalf("Writer.closed: got %v, want %v", w.closed, test.ok)
+		}
+
+		// skip read test in failure cases
+		if !test.ok {
+			continue
+		}
+
+		// read it back
+		r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
+		if err != nil {
+			t.Fatal(err)
+		}
+		if r.Comment != test.comment {
+			t.Fatalf("Reader.Comment: got %v, want %v", r.Comment, test.comment)
+		}
+	}
+}
+
+func TestWriterUTF8(t *testing.T) {
+	var utf8Tests = []struct {
+		name    string
+		comment string
+		nonUTF8 bool
+		flags   uint16
+	}{
+		{
+			name:    "hi, hello",
+			comment: "in the world",
+			flags:   0x8,
+		},
+		{
+			name:    "hi, こんにちわ",
+			comment: "in the world",
+			flags:   0x808,
+		},
+		{
+			name:    "hi, こんにちわ",
+			comment: "in the world",
+			nonUTF8: true,
+			flags:   0x8,
+		},
+		{
+			name:    "hi, hello",
+			comment: "in the 世界",
+			flags:   0x808,
+		},
+		{
+			name:    "hi, こんにちわ",
+			comment: "in the 世界",
+			flags:   0x808,
+		},
+		{
+			name:    "the replacement rune is �",
+			comment: "the replacement rune is �",
+			flags:   0x808,
+		},
+		{
+			// Name is Japanese encoded in Shift JIS.
+			name:    "\x93\xfa\x96{\x8c\xea.txt",
+			comment: "in the 世界",
+			flags:   0x008, // UTF-8 must not be set
+		},
+	}
+
+	// write a zip file
+	buf := new(bytes.Buffer)
+	w := NewWriter(buf)
+
+	for _, test := range utf8Tests {
+		h := &FileHeader{
+			Name:    test.name,
+			Comment: test.comment,
+			NonUTF8: test.nonUTF8,
+			Method:  Deflate,
+		}
+		w, err := w.CreateHeader(h)
+		if err != nil {
+			t.Fatal(err)
+		}
+		w.Write([]byte{})
+	}
+
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	// read it back
+	r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
+	if err != nil {
+		t.Fatal(err)
+	}
+	for i, test := range utf8Tests {
+		flags := r.File[i].Flags
+		if flags != test.flags {
+			t.Errorf("CreateHeader(name=%q comment=%q nonUTF8=%v): flags=%#x, want %#x", test.name, test.comment, test.nonUTF8, flags, test.flags)
+		}
+	}
+}
+
+func TestWriterTime(t *testing.T) {
+	var buf bytes.Buffer
+	h := &FileHeader{
+		Name:     "test.txt",
+		Modified: time.Date(2017, 10, 31, 21, 11, 57, 0, timeZone(-7*time.Hour)),
+	}
+	w := NewWriter(&buf)
+	if _, err := w.CreateHeader(h); err != nil {
+		t.Fatalf("unexpected CreateHeader error: %v", err)
+	}
+	if err := w.Close(); err != nil {
+		t.Fatalf("unexpected Close error: %v", err)
+	}
+
+	want, err := ioutil.ReadFile("testdata/time-go.zip")
+	if err != nil {
+		t.Fatalf("unexpected ReadFile error: %v", err)
+	}
+	if got := buf.Bytes(); !bytes.Equal(got, want) {
+		fmt.Printf("%x\n%x\n", got, want)
+		t.Error("contents of time-go.zip differ")
+	}
+}
+
+func TestWriterOffset(t *testing.T) {
+	largeData := make([]byte, 1<<17)
+	if _, err := rand.Read(largeData); err != nil {
+		t.Fatal("rand.Read failed:", err)
+	}
+	writeTests[1].Data = largeData
+	defer func() {
+		writeTests[1].Data = nil
+	}()
+
+	// write a zip file
+	buf := new(bytes.Buffer)
+	existingData := []byte{1, 2, 3, 1, 2, 3, 1, 2, 3}
+	n, _ := buf.Write(existingData)
+	w := NewWriter(buf)
+	w.SetOffset(int64(n))
+
+	for _, wt := range writeTests {
+		testCreate(t, w, &wt)
+	}
+
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	// read it back
+	r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
+	if err != nil {
+		t.Fatal(err)
+	}
+	for i, wt := range writeTests {
+		testReadFile(t, r.File[i], &wt)
+	}
+}
+
+func TestWriterFlush(t *testing.T) {
+	var buf bytes.Buffer
+	w := NewWriter(struct{ io.Writer }{&buf})
+	_, err := w.Create("foo")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if buf.Len() > 0 {
+		t.Fatalf("Unexpected %d bytes already in buffer", buf.Len())
+	}
+	if err := w.Flush(); err != nil {
+		t.Fatal(err)
+	}
+	if buf.Len() == 0 {
+		t.Fatal("No bytes written after Flush")
+	}
+}
+
+func TestWriterDir(t *testing.T) {
+	w := NewWriter(ioutil.Discard)
+	dw, err := w.Create("dir/")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := dw.Write(nil); err != nil {
+		t.Errorf("Write(nil) to directory: got %v, want nil", err)
+	}
+	if _, err := dw.Write([]byte("hello")); err == nil {
+		t.Error(`Write("hello") to directory: got nil error, want non-nil`)
+	}
+}
+
+func TestWriterDirAttributes(t *testing.T) {
+	var buf bytes.Buffer
+	w := NewWriter(&buf)
+	if _, err := w.CreateHeader(&FileHeader{
+		Name:               "dir/",
+		Method:             Deflate,
+		CompressedSize64:   1234,
+		UncompressedSize64: 5678,
+	}); err != nil {
+		t.Fatal(err)
+	}
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+	b := buf.Bytes()
+
+	var sig [4]byte
+	binary.LittleEndian.PutUint32(sig[:], uint32(fileHeaderSignature))
+
+	idx := bytes.Index(b, sig[:])
+	if idx == -1 {
+		t.Fatal("file header not found")
+	}
+	b = b[idx:]
+
+	if !bytes.Equal(b[6:10], []byte{0, 0, 0, 0}) { // FileHeader.Flags: 0, FileHeader.Method: 0
+		t.Errorf("unexpected method and flags: %v", b[6:10])
+	}
+
+	if !bytes.Equal(b[14:26], make([]byte, 12)) { // FileHeader.{CRC32,CompressSize,UncompressedSize} all zero.
+		t.Errorf("unexpected crc, compress and uncompressed size to be 0 was: %v", b[14:26])
+	}
+
+	binary.LittleEndian.PutUint32(sig[:], uint32(dataDescriptorSignature))
+	if bytes.Index(b, sig[:]) != -1 {
+		t.Error("there should be no data descriptor")
+	}
+}
+
+func TestWriterCopy(t *testing.T) {
+	// make a zip file
+	buf := new(bytes.Buffer)
+	w := NewWriter(buf)
+	for _, wt := range writeTests {
+		testCreate(t, w, &wt)
+	}
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	// read it back
+	src, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
+	if err != nil {
+		t.Fatal(err)
+	}
+	for i, wt := range writeTests {
+		testReadFile(t, src.File[i], &wt)
+	}
+
+	// make a new zip file copying the old compressed data.
+	buf2 := new(bytes.Buffer)
+	dst := NewWriter(buf2)
+	for _, f := range src.File {
+		if err := dst.Copy(f); err != nil {
+			t.Fatal(err)
+		}
+	}
+	if err := dst.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	// read the new one back
+	r, err := NewReader(bytes.NewReader(buf2.Bytes()), int64(buf2.Len()))
+	if err != nil {
+		t.Fatal(err)
+	}
+	for i, wt := range writeTests {
+		testReadFile(t, r.File[i], &wt)
+	}
+}
+
+func TestWriterCreateRaw(t *testing.T) {
+	files := []struct {
+		name             string
+		content          []byte
+		method           uint16
+		flags            uint16
+		crc32            uint32
+		uncompressedSize uint64
+		compressedSize   uint64
+	}{
+		{
+			name:    "small store w desc",
+			content: []byte("gophers"),
+			method:  Store,
+			flags:   0x8,
+		},
+		{
+			name:    "small deflate wo desc",
+			content: bytes.Repeat([]byte("abcdefg"), 2048),
+			method:  Deflate,
+		},
+	}
+
+	// write a zip file
+	archive := new(bytes.Buffer)
+	w := NewWriter(archive)
+
+	for i := range files {
+		f := &files[i]
+		f.crc32 = crc32.ChecksumIEEE(f.content)
+		size := uint64(len(f.content))
+		f.uncompressedSize = size
+		f.compressedSize = size
+
+		var compressedContent []byte
+		if f.method == Deflate {
+			var buf bytes.Buffer
+			w, err := flate.NewWriter(&buf, flate.BestSpeed)
+			if err != nil {
+				t.Fatalf("flate.NewWriter err = %v", err)
+			}
+			_, err = w.Write(f.content)
+			if err != nil {
+				t.Fatalf("flate Write err = %v", err)
+			}
+			err = w.Close()
+			if err != nil {
+				t.Fatalf("flate Writer.Close err = %v", err)
+			}
+			compressedContent = buf.Bytes()
+			f.compressedSize = uint64(len(compressedContent))
+		}
+
+		h := &FileHeader{
+			Name:               f.name,
+			Method:             f.method,
+			Flags:              f.flags,
+			CRC32:              f.crc32,
+			CompressedSize64:   f.compressedSize,
+			UncompressedSize64: f.uncompressedSize,
+		}
+		w, err := w.CreateRaw(h)
+		if err != nil {
+			t.Fatal(err)
+		}
+		if compressedContent != nil {
+			_, err = w.Write(compressedContent)
+		} else {
+			_, err = w.Write(f.content)
+		}
+		if err != nil {
+			t.Fatalf("%s Write got %v; want nil", f.name, err)
+		}
+	}
+
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	// read it back
+	r, err := NewReader(bytes.NewReader(archive.Bytes()), int64(archive.Len()))
+	if err != nil {
+		t.Fatal(err)
+	}
+	for i, want := range files {
+		got := r.File[i]
+		if got.Name != want.name {
+			t.Errorf("got Name %s; want %s", got.Name, want.name)
+		}
+		if got.Method != want.method {
+			t.Errorf("%s: got Method %#x; want %#x", want.name, got.Method, want.method)
+		}
+		if got.Flags != want.flags {
+			t.Errorf("%s: got Flags %#x; want %#x", want.name, got.Flags, want.flags)
+		}
+		if got.CRC32 != want.crc32 {
+			t.Errorf("%s: got CRC32 %#x; want %#x", want.name, got.CRC32, want.crc32)
+		}
+		if got.CompressedSize64 != want.compressedSize {
+			t.Errorf("%s: got CompressedSize64 %d; want %d", want.name, got.CompressedSize64, want.compressedSize)
+		}
+		if got.UncompressedSize64 != want.uncompressedSize {
+			t.Errorf("%s: got UncompressedSize64 %d; want %d", want.name, got.UncompressedSize64, want.uncompressedSize)
+		}
+
+		r, err := got.Open()
+		if err != nil {
+			t.Errorf("%s: Open err = %v", got.Name, err)
+			continue
+		}
+
+		buf, err := ioutil.ReadAll(r)
+		if err != nil {
+			t.Errorf("%s: ReadAll err = %v", got.Name, err)
+			continue
+		}
+
+		if !bytes.Equal(buf, want.content) {
+			t.Errorf("%v: ReadAll returned unexpected bytes", got.Name)
+		}
+	}
+}
+
+func testCreate(t *testing.T, w *Writer, wt *WriteTest) {
+	header := &FileHeader{
+		Name:   wt.Name,
+		Method: wt.Method,
+	}
+	if wt.Mode != 0 {
+		header.SetMode(wt.Mode)
+	}
+	f, err := w.CreateHeader(header)
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, err = f.Write(wt.Data)
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
+func testReadFile(t *testing.T, f *File, wt *WriteTest) {
+	if f.Name != wt.Name {
+		t.Fatalf("File name: got %q, want %q", f.Name, wt.Name)
+	}
+	testFileMode(t, f, wt.Mode)
+	rc, err := f.Open()
+	if err != nil {
+		t.Fatalf("opening %s: %v", f.Name, err)
+	}
+	b, err := ioutil.ReadAll(rc)
+	if err != nil {
+		t.Fatalf("reading %s: %v", f.Name, err)
+	}
+	err = rc.Close()
+	if err != nil {
+		t.Fatalf("closing %s: %v", f.Name, err)
+	}
+	if !bytes.Equal(b, wt.Data) {
+		t.Errorf("File contents %q, want %q", b, wt.Data)
+	}
+}
+
+func BenchmarkCompressedZipGarbage(b *testing.B) {
+	bigBuf := bytes.Repeat([]byte("a"), 1<<20)
+
+	runOnce := func(buf *bytes.Buffer) {
+		buf.Reset()
+		zw := NewWriter(buf)
+		for j := 0; j < 3; j++ {
+			w, _ := zw.CreateHeader(&FileHeader{
+				Name:   "foo",
+				Method: Deflate,
+			})
+			w.Write(bigBuf)
+		}
+		zw.Close()
+	}
+
+	b.ReportAllocs()
+	// Run once and then reset the timer.
+	// This effectively discards the very large initial flate setup cost,
+	// as well as the initialization of bigBuf.
+	runOnce(&bytes.Buffer{})
+	b.ResetTimer()
+
+	b.RunParallel(func(pb *testing.PB) {
+		var buf bytes.Buffer
+		for pb.Next() {
+			runOnce(&buf)
+		}
+	})
+}
diff --git a/internal/backport/archive/zip/zip_test.go b/internal/backport/archive/zip/zip_test.go
new file mode 100644
index 0000000..17f5331
--- /dev/null
+++ b/internal/backport/archive/zip/zip_test.go
@@ -0,0 +1,625 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests that involve both reading and writing.
+
+package zip
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"hash"
+	"io"
+	"io/ioutil"
+	"sort"
+	"strings"
+	"testing"
+	"time"
+)
+
+func TestModTime(t *testing.T) {
+	var testTime = time.Date(2009, time.November, 10, 23, 45, 58, 0, time.UTC)
+	fh := new(FileHeader)
+	fh.SetModTime(testTime)
+	outTime := fh.ModTime()
+	if !outTime.Equal(testTime) {
+		t.Errorf("times don't match: got %s, want %s", outTime, testTime)
+	}
+}
+
+func testHeaderRoundTrip(fh *FileHeader, wantUncompressedSize uint32, wantUncompressedSize64 uint64, t *testing.T) {
+	fi := fh.FileInfo()
+	fh2, err := FileInfoHeader(fi)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if got, want := fh2.Name, fh.Name; got != want {
+		t.Errorf("Name: got %s, want %s\n", got, want)
+	}
+	if got, want := fh2.UncompressedSize, wantUncompressedSize; got != want {
+		t.Errorf("UncompressedSize: got %d, want %d\n", got, want)
+	}
+	if got, want := fh2.UncompressedSize64, wantUncompressedSize64; got != want {
+		t.Errorf("UncompressedSize64: got %d, want %d\n", got, want)
+	}
+	if got, want := fh2.ModifiedTime, fh.ModifiedTime; got != want {
+		t.Errorf("ModifiedTime: got %d, want %d\n", got, want)
+	}
+	if got, want := fh2.ModifiedDate, fh.ModifiedDate; got != want {
+		t.Errorf("ModifiedDate: got %d, want %d\n", got, want)
+	}
+
+	if sysfh, ok := fi.Sys().(*FileHeader); !ok && sysfh != fh {
+		t.Errorf("Sys didn't return original *FileHeader")
+	}
+}
+
+func TestFileHeaderRoundTrip(t *testing.T) {
+	fh := &FileHeader{
+		Name:             "foo.txt",
+		UncompressedSize: 987654321,
+		ModifiedTime:     1234,
+		ModifiedDate:     5678,
+	}
+	testHeaderRoundTrip(fh, fh.UncompressedSize, uint64(fh.UncompressedSize), t)
+}
+
+func TestFileHeaderRoundTrip64(t *testing.T) {
+	fh := &FileHeader{
+		Name:               "foo.txt",
+		UncompressedSize64: 9876543210,
+		ModifiedTime:       1234,
+		ModifiedDate:       5678,
+	}
+	testHeaderRoundTrip(fh, uint32max, fh.UncompressedSize64, t)
+}
+
+func TestFileHeaderRoundTripModified(t *testing.T) {
+	fh := &FileHeader{
+		Name:             "foo.txt",
+		UncompressedSize: 987654321,
+		Modified:         time.Now().Local(),
+		ModifiedTime:     1234,
+		ModifiedDate:     5678,
+	}
+	fi := fh.FileInfo()
+	fh2, err := FileInfoHeader(fi)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if got, want := fh2.Modified, fh.Modified.UTC(); got != want {
+		t.Errorf("Modified: got %s, want %s\n", got, want)
+	}
+	if got, want := fi.ModTime(), fh.Modified.UTC(); got != want {
+		t.Errorf("Modified: got %s, want %s\n", got, want)
+	}
+}
+
+func TestFileHeaderRoundTripWithoutModified(t *testing.T) {
+	fh := &FileHeader{
+		Name:             "foo.txt",
+		UncompressedSize: 987654321,
+		ModifiedTime:     1234,
+		ModifiedDate:     5678,
+	}
+	fi := fh.FileInfo()
+	fh2, err := FileInfoHeader(fi)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if got, want := fh2.ModTime(), fh.ModTime(); got != want {
+		t.Errorf("Modified: got %s, want %s\n", got, want)
+	}
+	if got, want := fi.ModTime(), fh.ModTime(); got != want {
+		t.Errorf("Modified: got %s, want %s\n", got, want)
+	}
+}
+
+type repeatedByte struct {
+	off int64
+	b   byte
+	n   int64
+}
+
+// rleBuffer is a run-length-encoded byte buffer.
+// It's an io.Writer (like a bytes.Buffer) and also an io.ReaderAt,
+// allowing random-access reads.
+type rleBuffer struct {
+	buf []repeatedByte
+}
+
+func (r *rleBuffer) Size() int64 {
+	if len(r.buf) == 0 {
+		return 0
+	}
+	last := &r.buf[len(r.buf)-1]
+	return last.off + last.n
+}
+
+func (r *rleBuffer) Write(p []byte) (n int, err error) {
+	var rp *repeatedByte
+	if len(r.buf) > 0 {
+		rp = &r.buf[len(r.buf)-1]
+		// Fast path, if p is entirely the same byte repeated.
+		if lastByte := rp.b; len(p) > 0 && p[0] == lastByte {
+			if bytes.Count(p, []byte{lastByte}) == len(p) {
+				rp.n += int64(len(p))
+				return len(p), nil
+			}
+		}
+	}
+
+	for _, b := range p {
+		if rp == nil || rp.b != b {
+			r.buf = append(r.buf, repeatedByte{r.Size(), b, 1})
+			rp = &r.buf[len(r.buf)-1]
+		} else {
+			rp.n++
+		}
+	}
+	return len(p), nil
+}
+
+func min(x, y int64) int64 {
+	if x < y {
+		return x
+	}
+	return y
+}
+
+func memset(a []byte, b byte) {
+	if len(a) == 0 {
+		return
+	}
+	// Double, until we reach power of 2 >= len(a), same as bytes.Repeat,
+	// but without allocation.
+	a[0] = b
+	for i, l := 1, len(a); i < l; i *= 2 {
+		copy(a[i:], a[:i])
+	}
+}
+
+func (r *rleBuffer) ReadAt(p []byte, off int64) (n int, err error) {
+	if len(p) == 0 {
+		return
+	}
+	skipParts := sort.Search(len(r.buf), func(i int) bool {
+		part := &r.buf[i]
+		return part.off+part.n > off
+	})
+	parts := r.buf[skipParts:]
+	if len(parts) > 0 {
+		skipBytes := off - parts[0].off
+		for _, part := range parts {
+			repeat := int(min(part.n-skipBytes, int64(len(p)-n)))
+			memset(p[n:n+repeat], part.b)
+			n += repeat
+			if n == len(p) {
+				return
+			}
+			skipBytes = 0
+		}
+	}
+	if n != len(p) {
+		err = io.ErrUnexpectedEOF
+	}
+	return
+}
+
+// Just testing the rleBuffer used in the Zip64 test above. Not used by the zip code.
+func TestRLEBuffer(t *testing.T) {
+	b := new(rleBuffer)
+	var all []byte
+	writes := []string{"abcdeee", "eeeeeee", "eeeefghaaiii"}
+	for _, w := range writes {
+		b.Write([]byte(w))
+		all = append(all, w...)
+	}
+	if len(b.buf) != 10 {
+		t.Fatalf("len(b.buf) = %d; want 10", len(b.buf))
+	}
+
+	for i := 0; i < len(all); i++ {
+		for j := 0; j < len(all)-i; j++ {
+			buf := make([]byte, j)
+			n, err := b.ReadAt(buf, int64(i))
+			if err != nil || n != len(buf) {
+				t.Errorf("ReadAt(%d, %d) = %d, %v; want %d, nil", i, j, n, err, len(buf))
+			}
+			if !bytes.Equal(buf, all[i:i+j]) {
+				t.Errorf("ReadAt(%d, %d) = %q; want %q", i, j, buf, all[i:i+j])
+			}
+		}
+	}
+}
+
+// fakeHash32 is a dummy Hash32 that always returns 0.
+type fakeHash32 struct {
+	hash.Hash32
+}
+
+func (fakeHash32) Write(p []byte) (int, error) { return len(p), nil }
+func (fakeHash32) Sum32() uint32               { return 0 }
+
+// suffixSaver is an io.Writer & io.ReaderAt that remembers the last 0
+// to 'keep' bytes of data written to it. Call Suffix to get the
+// suffix bytes.
+type suffixSaver struct {
+	keep  int
+	buf   []byte
+	start int
+	size  int64
+}
+
+func (ss *suffixSaver) Size() int64 { return ss.size }
+
+var errDiscardedBytes = errors.New("ReadAt of discarded bytes")
+
+func (ss *suffixSaver) ReadAt(p []byte, off int64) (n int, err error) {
+	back := ss.size - off
+	if back > int64(ss.keep) {
+		return 0, errDiscardedBytes
+	}
+	suf := ss.Suffix()
+	n = copy(p, suf[len(suf)-int(back):])
+	if n != len(p) {
+		err = io.EOF
+	}
+	return
+}
+
+func (ss *suffixSaver) Suffix() []byte {
+	if len(ss.buf) < ss.keep {
+		return ss.buf
+	}
+	buf := make([]byte, ss.keep)
+	n := copy(buf, ss.buf[ss.start:])
+	copy(buf[n:], ss.buf[:])
+	return buf
+}
+
+func (ss *suffixSaver) Write(p []byte) (n int, err error) {
+	n = len(p)
+	ss.size += int64(len(p))
+	if len(ss.buf) < ss.keep {
+		space := ss.keep - len(ss.buf)
+		add := len(p)
+		if add > space {
+			add = space
+		}
+		ss.buf = append(ss.buf, p[:add]...)
+		p = p[add:]
+	}
+	for len(p) > 0 {
+		n := copy(ss.buf[ss.start:], p)
+		p = p[n:]
+		ss.start += n
+		if ss.start == ss.keep {
+			ss.start = 0
+		}
+	}
+	return
+}
+
+// generatesZip64 reports whether f wrote a zip64 file.
+// f is also responsible for closing w.
+func generatesZip64(t *testing.T, f func(w *Writer)) bool {
+	ss := &suffixSaver{keep: 10 << 20}
+	w := NewWriter(ss)
+	f(w)
+	return suffixIsZip64(t, ss)
+}
+
+type sizedReaderAt interface {
+	io.ReaderAt
+	Size() int64
+}
+
+func suffixIsZip64(t *testing.T, zip sizedReaderAt) bool {
+	d := make([]byte, 1024)
+	if _, err := zip.ReadAt(d, zip.Size()-int64(len(d))); err != nil {
+		t.Fatalf("ReadAt: %v", err)
+	}
+
+	sigOff := findSignatureInBlock(d)
+	if sigOff == -1 {
+		t.Errorf("failed to find signature in block")
+		return false
+	}
+
+	dirOff, err := findDirectory64End(zip, zip.Size()-int64(len(d))+int64(sigOff))
+	if err != nil {
+		t.Fatalf("findDirectory64End: %v", err)
+	}
+	if dirOff == -1 {
+		return false
+	}
+
+	d = make([]byte, directory64EndLen)
+	if _, err := zip.ReadAt(d, dirOff); err != nil {
+		t.Fatalf("ReadAt(off=%d): %v", dirOff, err)
+	}
+
+	b := readBuf(d)
+	if sig := b.uint32(); sig != directory64EndSignature {
+		return false
+	}
+
+	size := b.uint64()
+	if size != directory64EndLen-12 {
+		t.Errorf("expected length of %d, got %d", directory64EndLen-12, size)
+	}
+	return true
+}
+
+func testZip64(t testing.TB, size int64) *rleBuffer {
+	const chunkSize = 1024
+	chunks := int(size / chunkSize)
+	// write size bytes plus "END\n" to a zip file
+	buf := new(rleBuffer)
+	w := NewWriter(buf)
+	f, err := w.CreateHeader(&FileHeader{
+		Name:   "huge.txt",
+		Method: Store,
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+	f.(*fileWriter).crc32 = fakeHash32{}
+	chunk := make([]byte, chunkSize)
+	for i := range chunk {
+		chunk[i] = '.'
+	}
+	for i := 0; i < chunks; i++ {
+		_, err := f.Write(chunk)
+		if err != nil {
+			t.Fatal("write chunk:", err)
+		}
+	}
+	if frag := int(size % chunkSize); frag > 0 {
+		_, err := f.Write(chunk[:frag])
+		if err != nil {
+			t.Fatal("write chunk:", err)
+		}
+	}
+	end := []byte("END\n")
+	_, err = f.Write(end)
+	if err != nil {
+		t.Fatal("write end:", err)
+	}
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	// read back zip file and check that we get to the end of it
+	r, err := NewReader(buf, int64(buf.Size()))
+	if err != nil {
+		t.Fatal("reader:", err)
+	}
+	f0 := r.File[0]
+	rc, err := f0.Open()
+	if err != nil {
+		t.Fatal("opening:", err)
+	}
+	rc.(*checksumReader).hash = fakeHash32{}
+	for i := 0; i < chunks; i++ {
+		_, err := io.ReadFull(rc, chunk)
+		if err != nil {
+			t.Fatal("read:", err)
+		}
+	}
+	if frag := int(size % chunkSize); frag > 0 {
+		_, err := io.ReadFull(rc, chunk[:frag])
+		if err != nil {
+			t.Fatal("read:", err)
+		}
+	}
+	gotEnd, err := ioutil.ReadAll(rc)
+	if err != nil {
+		t.Fatal("read end:", err)
+	}
+	if !bytes.Equal(gotEnd, end) {
+		t.Errorf("End of zip64 archive %q, want %q", gotEnd, end)
+	}
+	err = rc.Close()
+	if err != nil {
+		t.Fatal("closing:", err)
+	}
+	if size+int64(len("END\n")) >= 1<<32-1 {
+		if got, want := f0.UncompressedSize, uint32(uint32max); got != want {
+			t.Errorf("UncompressedSize %#x, want %#x", got, want)
+		}
+	}
+
+	if got, want := f0.UncompressedSize64, uint64(size)+uint64(len(end)); got != want {
+		t.Errorf("UncompressedSize64 %#x, want %#x", got, want)
+	}
+
+	return buf
+}
+
+// Issue 9857
+func testZip64DirectoryRecordLength(buf *rleBuffer, t *testing.T) {
+	if !suffixIsZip64(t, buf) {
+		t.Fatal("not a zip64")
+	}
+}
+
+func testValidHeader(h *FileHeader, t *testing.T) {
+	var buf bytes.Buffer
+	z := NewWriter(&buf)
+
+	f, err := z.CreateHeader(h)
+	if err != nil {
+		t.Fatalf("error creating header: %v", err)
+	}
+	if _, err := f.Write([]byte("hi")); err != nil {
+		t.Fatalf("error writing content: %v", err)
+	}
+	if err := z.Close(); err != nil {
+		t.Fatalf("error closing zip writer: %v", err)
+	}
+
+	b := buf.Bytes()
+	zf, err := NewReader(bytes.NewReader(b), int64(len(b)))
+	if err != nil {
+		t.Fatalf("got %v, expected nil", err)
+	}
+	zh := zf.File[0].FileHeader
+	if zh.Name != h.Name || zh.Method != h.Method || zh.UncompressedSize64 != uint64(len("hi")) {
+		t.Fatalf("got %q/%d/%d expected %q/%d/%d", zh.Name, zh.Method, zh.UncompressedSize64, h.Name, h.Method, len("hi"))
+	}
+}
+
+// Issue 4302.
+func TestHeaderInvalidTagAndSize(t *testing.T) {
+	const timeFormat = "20060102T150405.000.txt"
+
+	ts := time.Now()
+	filename := ts.Format(timeFormat)
+
+	h := FileHeader{
+		Name:   filename,
+		Method: Deflate,
+		Extra:  []byte(ts.Format(time.RFC3339Nano)), // missing tag and len, but Extra is best-effort parsing
+	}
+	h.SetModTime(ts)
+
+	testValidHeader(&h, t)
+}
+
+func TestHeaderTooShort(t *testing.T) {
+	h := FileHeader{
+		Name:   "foo.txt",
+		Method: Deflate,
+		Extra:  []byte{zip64ExtraID}, // missing size and second half of tag, but Extra is best-effort parsing
+	}
+	testValidHeader(&h, t)
+}
+
+func TestHeaderTooLongErr(t *testing.T) {
+	var headerTests = []struct {
+		name    string
+		extra   []byte
+		wanterr error
+	}{
+		{
+			name:    strings.Repeat("x", 1<<16),
+			extra:   []byte{},
+			wanterr: errLongName,
+		},
+		{
+			name:    "long_extra",
+			extra:   bytes.Repeat([]byte{0xff}, 1<<16),
+			wanterr: errLongExtra,
+		},
+	}
+
+	// write a zip file
+	buf := new(bytes.Buffer)
+	w := NewWriter(buf)
+
+	for _, test := range headerTests {
+		h := &FileHeader{
+			Name:  test.name,
+			Extra: test.extra,
+		}
+		_, err := w.CreateHeader(h)
+		if err != test.wanterr {
+			t.Errorf("error=%v, want %v", err, test.wanterr)
+		}
+	}
+
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestHeaderIgnoredSize(t *testing.T) {
+	h := FileHeader{
+		Name:   "foo.txt",
+		Method: Deflate,
+		Extra:  []byte{zip64ExtraID & 0xFF, zip64ExtraID >> 8, 24, 0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8}, // bad size but shouldn't be consulted
+	}
+	testValidHeader(&h, t)
+}
+
+// Issue 4393. It is valid to have an extra data header
+// which contains no body.
+func TestZeroLengthHeader(t *testing.T) {
+	h := FileHeader{
+		Name:   "extadata.txt",
+		Method: Deflate,
+		Extra: []byte{
+			85, 84, 5, 0, 3, 154, 144, 195, 77, // tag 21589 size 5
+			85, 120, 0, 0, // tag 30805 size 0
+		},
+	}
+	testValidHeader(&h, t)
+}
+
+// Just benchmarking how fast the Zip64 test above is. Not related to
+// our zip performance, since the test above disabled CRC32 and flate.
+func BenchmarkZip64Test(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		testZip64(b, 1<<26)
+	}
+}
+
+func BenchmarkZip64TestSizes(b *testing.B) {
+	for _, size := range []int64{1 << 12, 1 << 20, 1 << 26} {
+		b.Run(fmt.Sprint(size), func(b *testing.B) {
+			b.RunParallel(func(pb *testing.PB) {
+				for pb.Next() {
+					testZip64(b, size)
+				}
+			})
+		})
+	}
+}
+
+func TestSuffixSaver(t *testing.T) {
+	const keep = 10
+	ss := &suffixSaver{keep: keep}
+	ss.Write([]byte("abc"))
+	if got := string(ss.Suffix()); got != "abc" {
+		t.Errorf("got = %q; want abc", got)
+	}
+	ss.Write([]byte("defghijklmno"))
+	if got := string(ss.Suffix()); got != "fghijklmno" {
+		t.Errorf("got = %q; want fghijklmno", got)
+	}
+	if got, want := ss.Size(), int64(len("abc")+len("defghijklmno")); got != want {
+		t.Errorf("Size = %d; want %d", got, want)
+	}
+	buf := make([]byte, ss.Size())
+	for off := int64(0); off < ss.Size(); off++ {
+		for size := 1; size <= int(ss.Size()-off); size++ {
+			readBuf := buf[:size]
+			n, err := ss.ReadAt(readBuf, off)
+			if off < ss.Size()-keep {
+				if err != errDiscardedBytes {
+					t.Errorf("off %d, size %d = %v, %v (%q); want errDiscardedBytes", off, size, n, err, readBuf[:n])
+				}
+				continue
+			}
+			want := "abcdefghijklmno"[off : off+int64(size)]
+			got := string(readBuf[:n])
+			if err != nil || got != want {
+				t.Errorf("off %d, size %d = %v, %v (%q); want %q", off, size, n, err, got, want)
+			}
+		}
+	}
+
+}
+
+type zeros struct{}
+
+func (zeros) Read(p []byte) (int, error) {
+	for i := range p {
+		p[i] = 0
+	}
+	return len(p), nil
+}
diff --git a/internal/backport/fmtsort/export_test.go b/internal/backport/fmtsort/export_test.go
new file mode 100644
index 0000000..25cbb5d
--- /dev/null
+++ b/internal/backport/fmtsort/export_test.go
@@ -0,0 +1,11 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fmtsort
+
+import "reflect"
+
+func Compare(a, b reflect.Value) int {
+	return compare(a, b)
+}
diff --git a/internal/backport/fmtsort/sort.go b/internal/backport/fmtsort/sort.go
new file mode 100644
index 0000000..7127ba6
--- /dev/null
+++ b/internal/backport/fmtsort/sort.go
@@ -0,0 +1,220 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fmtsort provides a general stable ordering mechanism
+// for maps, on behalf of the fmt and text/template packages.
+// It is not guaranteed to be efficient and works only for types
+// that are valid map keys.
+package fmtsort
+
+import (
+	"reflect"
+	"sort"
+)
+
+// Note: Throughout this package we avoid calling reflect.Value.Interface as
+// it is not always legal to do so and it's easier to avoid the issue than to face it.
+
+// SortedMap represents a map's keys and values. The keys and values are
+// aligned in index order: Value[i] is the value in the map corresponding to Key[i].
+type SortedMap struct {
+	Key   []reflect.Value
+	Value []reflect.Value
+}
+
+func (o *SortedMap) Len() int           { return len(o.Key) }
+func (o *SortedMap) Less(i, j int) bool { return compare(o.Key[i], o.Key[j]) < 0 }
+func (o *SortedMap) Swap(i, j int) {
+	o.Key[i], o.Key[j] = o.Key[j], o.Key[i]
+	o.Value[i], o.Value[j] = o.Value[j], o.Value[i]
+}
+
+// Sort accepts a map and returns a SortedMap that has the same keys and
+// values but in a stable sorted order according to the keys, modulo issues
+// raised by unorderable key values such as NaNs.
+//
+// The ordering rules are more general than with Go's < operator:
+//
+//  - when applicable, nil compares low
+//  - ints, floats, and strings order by <
+//  - NaN compares less than non-NaN floats
+//  - bool compares false before true
+//  - complex compares real, then imag
+//  - pointers compare by machine address
+//  - channel values compare by machine address
+//  - structs compare each field in turn
+//  - arrays compare each element in turn.
+//    Otherwise identical arrays compare by length.
+//  - interface values compare first by reflect.Type describing the concrete type
+//    and then by concrete value as described in the previous rules.
+//
+func Sort(mapValue reflect.Value) *SortedMap {
+	if mapValue.Type().Kind() != reflect.Map {
+		return nil
+	}
+	// Note: this code is arranged to not panic even in the presence
+	// of a concurrent map update. The runtime is responsible for
+	// yelling loudly if that happens. See issue 33275.
+	n := mapValue.Len()
+	key := make([]reflect.Value, 0, n)
+	value := make([]reflect.Value, 0, n)
+	iter := mapValue.MapRange()
+	for iter.Next() {
+		key = append(key, iter.Key())
+		value = append(value, iter.Value())
+	}
+	sorted := &SortedMap{
+		Key:   key,
+		Value: value,
+	}
+	sort.Stable(sorted)
+	return sorted
+}
+
+// compare compares two values of the same type. It returns -1, 0, 1
+// according to whether a > b (1), a == b (0), or a < b (-1).
+// If the types differ, it returns -1.
+// See the comment on Sort for the comparison rules.
+func compare(aVal, bVal reflect.Value) int {
+	aType, bType := aVal.Type(), bVal.Type()
+	if aType != bType {
+		return -1 // No good answer possible, but don't return 0: they're not equal.
+	}
+	switch aVal.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		a, b := aVal.Int(), bVal.Int()
+		switch {
+		case a < b:
+			return -1
+		case a > b:
+			return 1
+		default:
+			return 0
+		}
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		a, b := aVal.Uint(), bVal.Uint()
+		switch {
+		case a < b:
+			return -1
+		case a > b:
+			return 1
+		default:
+			return 0
+		}
+	case reflect.String:
+		a, b := aVal.String(), bVal.String()
+		switch {
+		case a < b:
+			return -1
+		case a > b:
+			return 1
+		default:
+			return 0
+		}
+	case reflect.Float32, reflect.Float64:
+		return floatCompare(aVal.Float(), bVal.Float())
+	case reflect.Complex64, reflect.Complex128:
+		a, b := aVal.Complex(), bVal.Complex()
+		if c := floatCompare(real(a), real(b)); c != 0 {
+			return c
+		}
+		return floatCompare(imag(a), imag(b))
+	case reflect.Bool:
+		a, b := aVal.Bool(), bVal.Bool()
+		switch {
+		case a == b:
+			return 0
+		case a:
+			return 1
+		default:
+			return -1
+		}
+	case reflect.Ptr, reflect.UnsafePointer:
+		a, b := aVal.Pointer(), bVal.Pointer()
+		switch {
+		case a < b:
+			return -1
+		case a > b:
+			return 1
+		default:
+			return 0
+		}
+	case reflect.Chan:
+		if c, ok := nilCompare(aVal, bVal); ok {
+			return c
+		}
+		ap, bp := aVal.Pointer(), bVal.Pointer()
+		switch {
+		case ap < bp:
+			return -1
+		case ap > bp:
+			return 1
+		default:
+			return 0
+		}
+	case reflect.Struct:
+		for i := 0; i < aVal.NumField(); i++ {
+			if c := compare(aVal.Field(i), bVal.Field(i)); c != 0 {
+				return c
+			}
+		}
+		return 0
+	case reflect.Array:
+		for i := 0; i < aVal.Len(); i++ {
+			if c := compare(aVal.Index(i), bVal.Index(i)); c != 0 {
+				return c
+			}
+		}
+		return 0
+	case reflect.Interface:
+		if c, ok := nilCompare(aVal, bVal); ok {
+			return c
+		}
+		c := compare(reflect.ValueOf(aVal.Elem().Type()), reflect.ValueOf(bVal.Elem().Type()))
+		if c != 0 {
+			return c
+		}
+		return compare(aVal.Elem(), bVal.Elem())
+	default:
+		// Certain types cannot appear as keys (maps, funcs, slices), but be explicit.
+		panic("bad type in compare: " + aType.String())
+	}
+}
+
+// nilCompare checks whether either value is nil. If not, the boolean is false.
+// If either value is nil, the boolean is true and the integer is the comparison
+// value. The comparison is defined to be 0 if both are nil, otherwise the one
+// nil value compares low. Both arguments must represent a chan, func,
+// interface, map, pointer, or slice.
+func nilCompare(aVal, bVal reflect.Value) (int, bool) {
+	if aVal.IsNil() {
+		if bVal.IsNil() {
+			return 0, true
+		}
+		return -1, true
+	}
+	if bVal.IsNil() {
+		return 1, true
+	}
+	return 0, false
+}
+
+// floatCompare compares two floating-point values. NaNs compare low.
+func floatCompare(a, b float64) int {
+	switch {
+	case isNaN(a):
+		return -1 // No good answer if b is a NaN so don't bother checking.
+	case isNaN(b):
+		return 1
+	case a < b:
+		return -1
+	case a > b:
+		return 1
+	}
+	return 0
+}
+
+func isNaN(a float64) bool {
+	return a != a
+}
diff --git a/internal/backport/fmtsort/sort_test.go b/internal/backport/fmtsort/sort_test.go
new file mode 100644
index 0000000..4e249d9
--- /dev/null
+++ b/internal/backport/fmtsort/sort_test.go
@@ -0,0 +1,269 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fmtsort_test
+
+import (
+	"fmt"
+	"math"
+	"reflect"
+	"strings"
+	"testing"
+	"unsafe"
+
+	"golang.org/x/website/internal/backport/fmtsort"
+)
+
+var compareTests = [][]reflect.Value{
+	ct(reflect.TypeOf(int(0)), -1, 0, 1),
+	ct(reflect.TypeOf(int8(0)), -1, 0, 1),
+	ct(reflect.TypeOf(int16(0)), -1, 0, 1),
+	ct(reflect.TypeOf(int32(0)), -1, 0, 1),
+	ct(reflect.TypeOf(int64(0)), -1, 0, 1),
+	ct(reflect.TypeOf(uint(0)), 0, 1, 5),
+	ct(reflect.TypeOf(uint8(0)), 0, 1, 5),
+	ct(reflect.TypeOf(uint16(0)), 0, 1, 5),
+	ct(reflect.TypeOf(uint32(0)), 0, 1, 5),
+	ct(reflect.TypeOf(uint64(0)), 0, 1, 5),
+	ct(reflect.TypeOf(uintptr(0)), 0, 1, 5),
+	ct(reflect.TypeOf(string("")), "", "a", "ab"),
+	ct(reflect.TypeOf(float32(0)), math.NaN(), math.Inf(-1), -1e10, 0, 1e10, math.Inf(1)),
+	ct(reflect.TypeOf(float64(0)), math.NaN(), math.Inf(-1), -1e10, 0, 1e10, math.Inf(1)),
+	ct(reflect.TypeOf(complex64(0+1i)), -1-1i, -1+0i, -1+1i, 0-1i, 0+0i, 0+1i, 1-1i, 1+0i, 1+1i),
+	ct(reflect.TypeOf(complex128(0+1i)), -1-1i, -1+0i, -1+1i, 0-1i, 0+0i, 0+1i, 1-1i, 1+0i, 1+1i),
+	ct(reflect.TypeOf(false), false, true),
+	ct(reflect.TypeOf(&ints[0]), &ints[0], &ints[1], &ints[2]),
+	ct(reflect.TypeOf(unsafe.Pointer(&ints[0])), unsafe.Pointer(&ints[0]), unsafe.Pointer(&ints[1]), unsafe.Pointer(&ints[2])),
+	ct(reflect.TypeOf(chans[0]), chans[0], chans[1], chans[2]),
+	ct(reflect.TypeOf(toy{}), toy{0, 1}, toy{0, 2}, toy{1, -1}, toy{1, 1}),
+	ct(reflect.TypeOf([2]int{}), [2]int{1, 1}, [2]int{1, 2}, [2]int{2, 0}),
+	ct(reflect.TypeOf(interface{}(interface{}(0))), iFace, 1, 2, 3),
+}
+
+var iFace interface{}
+
+func ct(typ reflect.Type, args ...interface{}) []reflect.Value {
+	value := make([]reflect.Value, len(args))
+	for i, v := range args {
+		x := reflect.ValueOf(v)
+		if !x.IsValid() { // Make it a typed nil.
+			x = reflect.Zero(typ)
+		} else {
+			x = x.Convert(typ)
+		}
+		value[i] = x
+	}
+	return value
+}
+
+func TestCompare(t *testing.T) {
+	for _, test := range compareTests {
+		for i, v0 := range test {
+			for j, v1 := range test {
+				c := fmtsort.Compare(v0, v1)
+				var expect int
+				switch {
+				case i == j:
+					expect = 0
+					// NaNs are tricky.
+					if typ := v0.Type(); (typ.Kind() == reflect.Float32 || typ.Kind() == reflect.Float64) && math.IsNaN(v0.Float()) {
+						expect = -1
+					}
+				case i < j:
+					expect = -1
+				case i > j:
+					expect = 1
+				}
+				if c != expect {
+					t.Errorf("%s: compare(%v,%v)=%d; expect %d", v0.Type(), v0, v1, c, expect)
+				}
+			}
+		}
+	}
+}
+
+type sortTest struct {
+	data  interface{} // Always a map.
+	print string      // Printed result using our custom printer.
+}
+
+var sortTests = []sortTest{
+	{
+		map[int]string{7: "bar", -3: "foo"},
+		"-3:foo 7:bar",
+	},
+	{
+		map[uint8]string{7: "bar", 3: "foo"},
+		"3:foo 7:bar",
+	},
+	{
+		map[string]string{"7": "bar", "3": "foo"},
+		"3:foo 7:bar",
+	},
+	{
+		map[float64]string{7: "bar", -3: "foo", math.NaN(): "nan", math.Inf(0): "inf"},
+		"NaN:nan -3:foo 7:bar +Inf:inf",
+	},
+	{
+		map[complex128]string{7 + 2i: "bar2", 7 + 1i: "bar", -3: "foo", complex(math.NaN(), 0i): "nan", complex(math.Inf(0), 0i): "inf"},
+		"(NaN+0i):nan (-3+0i):foo (7+1i):bar (7+2i):bar2 (+Inf+0i):inf",
+	},
+	{
+		map[bool]string{true: "true", false: "false"},
+		"false:false true:true",
+	},
+	{
+		chanMap(),
+		"CHAN0:0 CHAN1:1 CHAN2:2",
+	},
+	{
+		pointerMap(),
+		"PTR0:0 PTR1:1 PTR2:2",
+	},
+	{
+		unsafePointerMap(),
+		"UNSAFEPTR0:0 UNSAFEPTR1:1 UNSAFEPTR2:2",
+	},
+	{
+		map[toy]string{{7, 2}: "72", {7, 1}: "71", {3, 4}: "34"},
+		"{3 4}:34 {7 1}:71 {7 2}:72",
+	},
+	{
+		map[[2]int]string{{7, 2}: "72", {7, 1}: "71", {3, 4}: "34"},
+		"[3 4]:34 [7 1]:71 [7 2]:72",
+	},
+}
+
+func sprint(data interface{}) string {
+	om := fmtsort.Sort(reflect.ValueOf(data))
+	if om == nil {
+		return "nil"
+	}
+	b := new(strings.Builder)
+	for i, key := range om.Key {
+		if i > 0 {
+			b.WriteRune(' ')
+		}
+		b.WriteString(sprintKey(key))
+		b.WriteRune(':')
+		b.WriteString(fmt.Sprint(om.Value[i]))
+	}
+	return b.String()
+}
+
+// sprintKey formats a reflect.Value but gives reproducible values for some
+// problematic types such as pointers. Note that it only does special handling
+// for the troublesome types used in the test cases; it is not a general
+// printer.
+func sprintKey(key reflect.Value) string {
+	switch str := key.Type().String(); str {
+	case "*int":
+		ptr := key.Interface().(*int)
+		for i := range ints {
+			if ptr == &ints[i] {
+				return fmt.Sprintf("PTR%d", i)
+			}
+		}
+		return "PTR???"
+	case "unsafe.Pointer":
+		ptr := key.Interface().(unsafe.Pointer)
+		for i := range ints {
+			if ptr == unsafe.Pointer(&ints[i]) {
+				return fmt.Sprintf("UNSAFEPTR%d", i)
+			}
+		}
+		return "UNSAFEPTR???"
+	case "chan int":
+		c := key.Interface().(chan int)
+		for i := range chans {
+			if c == chans[i] {
+				return fmt.Sprintf("CHAN%d", i)
+			}
+		}
+		return "CHAN???"
+	default:
+		return fmt.Sprint(key)
+	}
+}
+
+var (
+	ints  [3]int
+	chans = [3]chan int{make(chan int), make(chan int), make(chan int)}
+)
+
+func pointerMap() map[*int]string {
+	m := make(map[*int]string)
+	for i := 2; i >= 0; i-- {
+		m[&ints[i]] = fmt.Sprint(i)
+	}
+	return m
+}
+
+func unsafePointerMap() map[unsafe.Pointer]string {
+	m := make(map[unsafe.Pointer]string)
+	for i := 2; i >= 0; i-- {
+		m[unsafe.Pointer(&ints[i])] = fmt.Sprint(i)
+	}
+	return m
+}
+
+func chanMap() map[chan int]string {
+	m := make(map[chan int]string)
+	for i := 2; i >= 0; i-- {
+		m[chans[i]] = fmt.Sprint(i)
+	}
+	return m
+}
+
+type toy struct {
+	A int // Exported.
+	b int // Unexported.
+}
+
+func TestOrder(t *testing.T) {
+	for _, test := range sortTests {
+		got := sprint(test.data)
+		if got != test.print {
+			t.Errorf("%s: got %q, want %q", reflect.TypeOf(test.data), got, test.print)
+		}
+	}
+}
+
+func TestInterface(t *testing.T) {
+	// A map containing multiple concrete types should be sorted by type,
+	// then value. However, the relative ordering of types is unspecified,
+	// so test this by checking the presence of sorted subgroups.
+	m := map[interface{}]string{
+		[2]int{1, 0}:             "",
+		[2]int{0, 1}:             "",
+		true:                     "",
+		false:                    "",
+		3.1:                      "",
+		2.1:                      "",
+		1.1:                      "",
+		math.NaN():               "",
+		3:                        "",
+		2:                        "",
+		1:                        "",
+		"c":                      "",
+		"b":                      "",
+		"a":                      "",
+		struct{ x, y int }{1, 0}: "",
+		struct{ x, y int }{0, 1}: "",
+	}
+	got := sprint(m)
+	typeGroups := []string{
+		"NaN: 1.1: 2.1: 3.1:", // float64
+		"false: true:",        // bool
+		"1: 2: 3:",            // int
+		"a: b: c:",            // string
+		"[0 1]: [1 0]:",       // [2]int
+		"{0 1}: {1 0}:",       // struct{ x int; y int }
+	}
+	for _, g := range typeGroups {
+		if !strings.Contains(got, g) {
+			t.Errorf("sorted map should contain %q", g)
+		}
+	}
+}
diff --git a/internal/backport/html/template/attr.go b/internal/backport/html/template/attr.go
new file mode 100644
index 0000000..22922e6
--- /dev/null
+++ b/internal/backport/html/template/attr.go
@@ -0,0 +1,175 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"strings"
+)
+
+// attrTypeMap[n] describes the value of the given attribute.
+// If an attribute affects (or can mask) the encoding or interpretation of
+// other content, or affects the contents, idempotency, or credentials of a
+// network message, then the value in this map is contentTypeUnsafe.
+// This map is derived from HTML5, specifically
+// https://www.w3.org/TR/html5/Overview.html#attributes-1
+// as well as "%URI"-typed attributes from
+// https://www.w3.org/TR/html4/index/attributes.html
+var attrTypeMap = map[string]contentType{
+	"accept":          contentTypePlain,
+	"accept-charset":  contentTypeUnsafe,
+	"action":          contentTypeURL,
+	"alt":             contentTypePlain,
+	"archive":         contentTypeURL,
+	"async":           contentTypeUnsafe,
+	"autocomplete":    contentTypePlain,
+	"autofocus":       contentTypePlain,
+	"autoplay":        contentTypePlain,
+	"background":      contentTypeURL,
+	"border":          contentTypePlain,
+	"checked":         contentTypePlain,
+	"cite":            contentTypeURL,
+	"challenge":       contentTypeUnsafe,
+	"charset":         contentTypeUnsafe,
+	"class":           contentTypePlain,
+	"classid":         contentTypeURL,
+	"codebase":        contentTypeURL,
+	"cols":            contentTypePlain,
+	"colspan":         contentTypePlain,
+	"content":         contentTypeUnsafe,
+	"contenteditable": contentTypePlain,
+	"contextmenu":     contentTypePlain,
+	"controls":        contentTypePlain,
+	"coords":          contentTypePlain,
+	"crossorigin":     contentTypeUnsafe,
+	"data":            contentTypeURL,
+	"datetime":        contentTypePlain,
+	"default":         contentTypePlain,
+	"defer":           contentTypeUnsafe,
+	"dir":             contentTypePlain,
+	"dirname":         contentTypePlain,
+	"disabled":        contentTypePlain,
+	"draggable":       contentTypePlain,
+	"dropzone":        contentTypePlain,
+	"enctype":         contentTypeUnsafe,
+	"for":             contentTypePlain,
+	"form":            contentTypeUnsafe,
+	"formaction":      contentTypeURL,
+	"formenctype":     contentTypeUnsafe,
+	"formmethod":      contentTypeUnsafe,
+	"formnovalidate":  contentTypeUnsafe,
+	"formtarget":      contentTypePlain,
+	"headers":         contentTypePlain,
+	"height":          contentTypePlain,
+	"hidden":          contentTypePlain,
+	"high":            contentTypePlain,
+	"href":            contentTypeURL,
+	"hreflang":        contentTypePlain,
+	"http-equiv":      contentTypeUnsafe,
+	"icon":            contentTypeURL,
+	"id":              contentTypePlain,
+	"ismap":           contentTypePlain,
+	"keytype":         contentTypeUnsafe,
+	"kind":            contentTypePlain,
+	"label":           contentTypePlain,
+	"lang":            contentTypePlain,
+	"language":        contentTypeUnsafe,
+	"list":            contentTypePlain,
+	"longdesc":        contentTypeURL,
+	"loop":            contentTypePlain,
+	"low":             contentTypePlain,
+	"manifest":        contentTypeURL,
+	"max":             contentTypePlain,
+	"maxlength":       contentTypePlain,
+	"media":           contentTypePlain,
+	"mediagroup":      contentTypePlain,
+	"method":          contentTypeUnsafe,
+	"min":             contentTypePlain,
+	"multiple":        contentTypePlain,
+	"name":            contentTypePlain,
+	"novalidate":      contentTypeUnsafe,
+	// Skip handler names from
+	// https://www.w3.org/TR/html5/webappapis.html#event-handlers-on-elements,-document-objects,-and-window-objects
+	// since we have special handling in attrType.
+	"open":        contentTypePlain,
+	"optimum":     contentTypePlain,
+	"pattern":     contentTypeUnsafe,
+	"placeholder": contentTypePlain,
+	"poster":      contentTypeURL,
+	"profile":     contentTypeURL,
+	"preload":     contentTypePlain,
+	"pubdate":     contentTypePlain,
+	"radiogroup":  contentTypePlain,
+	"readonly":    contentTypePlain,
+	"rel":         contentTypeUnsafe,
+	"required":    contentTypePlain,
+	"reversed":    contentTypePlain,
+	"rows":        contentTypePlain,
+	"rowspan":     contentTypePlain,
+	"sandbox":     contentTypeUnsafe,
+	"spellcheck":  contentTypePlain,
+	"scope":       contentTypePlain,
+	"scoped":      contentTypePlain,
+	"seamless":    contentTypePlain,
+	"selected":    contentTypePlain,
+	"shape":       contentTypePlain,
+	"size":        contentTypePlain,
+	"sizes":       contentTypePlain,
+	"span":        contentTypePlain,
+	"src":         contentTypeURL,
+	"srcdoc":      contentTypeHTML,
+	"srclang":     contentTypePlain,
+	"srcset":      contentTypeSrcset,
+	"start":       contentTypePlain,
+	"step":        contentTypePlain,
+	"style":       contentTypeCSS,
+	"tabindex":    contentTypePlain,
+	"target":      contentTypePlain,
+	"title":       contentTypePlain,
+	"type":        contentTypeUnsafe,
+	"usemap":      contentTypeURL,
+	"value":       contentTypeUnsafe,
+	"width":       contentTypePlain,
+	"wrap":        contentTypePlain,
+	"xmlns":       contentTypeURL,
+}
+
+// attrType returns a conservative (upper-bound on authority) guess at the
+// type of the lowercase named attribute.
+func attrType(name string) contentType {
+	if strings.HasPrefix(name, "data-") {
+		// Strip data- so that custom attribute heuristics below are
+		// widely applied.
+		// Treat data-action as URL below.
+		name = name[5:]
+	} else if colon := strings.IndexRune(name, ':'); colon != -1 {
+		if name[:colon] == "xmlns" {
+			return contentTypeURL
+		}
+		// Treat svg:href and xlink:href as href below.
+		name = name[colon+1:]
+	}
+	if t, ok := attrTypeMap[name]; ok {
+		return t
+	}
+	// Treat partial event handler names as script.
+	if strings.HasPrefix(name, "on") {
+		return contentTypeJS
+	}
+
+	// Heuristics to prevent "javascript:..." injection in custom
+	// data attributes and custom attributes like g:tweetUrl.
+	// https://www.w3.org/TR/html5/dom.html#embedding-custom-non-visible-data-with-the-data-*-attributes
+	// "Custom data attributes are intended to store custom data
+	//  private to the page or application, for which there are no
+	//  more appropriate attributes or elements."
+	// Developers seem to store URL content in data URLs that start
+	// or end with "URI" or "URL".
+	if strings.Contains(name, "src") ||
+		strings.Contains(name, "uri") ||
+		strings.Contains(name, "url") {
+		return contentTypeURL
+	}
+	return contentTypePlain
+}
diff --git a/internal/backport/html/template/attr_string.go b/internal/backport/html/template/attr_string.go
new file mode 100644
index 0000000..babe70c
--- /dev/null
+++ b/internal/backport/html/template/attr_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type attr"; DO NOT EDIT.
+
+package template
+
+import "strconv"
+
+const _attr_name = "attrNoneattrScriptattrScriptTypeattrStyleattrURLattrSrcset"
+
+var _attr_index = [...]uint8{0, 8, 18, 32, 41, 48, 58}
+
+func (i attr) String() string {
+	if i >= attr(len(_attr_index)-1) {
+		return "attr(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _attr_name[_attr_index[i]:_attr_index[i+1]]
+}
diff --git a/internal/backport/html/template/clone_test.go b/internal/backport/html/template/clone_test.go
new file mode 100644
index 0000000..07e8ef3
--- /dev/null
+++ b/internal/backport/html/template/clone_test.go
@@ -0,0 +1,280 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"strings"
+	"sync"
+	"testing"
+
+	"golang.org/x/website/internal/backport/text/template/parse"
+)
+
+func TestAddParseTreeHTML(t *testing.T) {
+	root := Must(New("root").Parse(`{{define "a"}} {{.}} {{template "b"}} {{.}} "></a>{{end}}`))
+	tree, err := parse.Parse("t", `{{define "b"}}<a href="{{end}}`, "", "", nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	added := Must(root.AddParseTree("b", tree["b"]))
+	b := new(bytes.Buffer)
+	err = added.ExecuteTemplate(b, "a", "1>0")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if got, want := b.String(), ` 1&gt;0 <a href=" 1%3e0 "></a>`; got != want {
+		t.Errorf("got %q want %q", got, want)
+	}
+}
+
+func TestClone(t *testing.T) {
+	// The {{.}} will be executed with data "<i>*/" in different contexts.
+	// In the t0 template, it will be in a text context.
+	// In the t1 template, it will be in a URL context.
+	// In the t2 template, it will be in a JavaScript context.
+	// In the t3 template, it will be in a CSS context.
+	const tmpl = `{{define "a"}}{{template "lhs"}}{{.}}{{template "rhs"}}{{end}}`
+	b := new(bytes.Buffer)
+
+	// Create an incomplete template t0.
+	t0 := Must(New("t0").Parse(tmpl))
+
+	// Clone t0 as t1.
+	t1 := Must(t0.Clone())
+	Must(t1.Parse(`{{define "lhs"}} <a href=" {{end}}`))
+	Must(t1.Parse(`{{define "rhs"}} "></a> {{end}}`))
+
+	// Execute t1.
+	b.Reset()
+	if err := t1.ExecuteTemplate(b, "a", "<i>*/"); err != nil {
+		t.Fatal(err)
+	}
+	if got, want := b.String(), ` <a href=" %3ci%3e*/ "></a> `; got != want {
+		t.Errorf("t1: got %q want %q", got, want)
+	}
+
+	// Clone t0 as t2.
+	t2 := Must(t0.Clone())
+	Must(t2.Parse(`{{define "lhs"}} <p onclick="javascript: {{end}}`))
+	Must(t2.Parse(`{{define "rhs"}} "></p> {{end}}`))
+
+	// Execute t2.
+	b.Reset()
+	if err := t2.ExecuteTemplate(b, "a", "<i>*/"); err != nil {
+		t.Fatal(err)
+	}
+	if got, want := b.String(), ` <p onclick="javascript: &#34;\u003ci\u003e*/&#34; "></p> `; got != want {
+		t.Errorf("t2: got %q want %q", got, want)
+	}
+
+	// Clone t0 as t3, but do not execute t3 yet.
+	t3 := Must(t0.Clone())
+	Must(t3.Parse(`{{define "lhs"}} <style> {{end}}`))
+	Must(t3.Parse(`{{define "rhs"}} </style> {{end}}`))
+
+	// Complete t0.
+	Must(t0.Parse(`{{define "lhs"}} ( {{end}}`))
+	Must(t0.Parse(`{{define "rhs"}} ) {{end}}`))
+
+	// Clone t0 as t4. Redefining the "lhs" template should not fail.
+	t4 := Must(t0.Clone())
+	if _, err := t4.Parse(`{{define "lhs"}} OK {{end}}`); err != nil {
+		t.Errorf(`redefine "lhs": got err %v want nil`, err)
+	}
+	// Cloning t1 should fail as it has been executed.
+	if _, err := t1.Clone(); err == nil {
+		t.Error("cloning t1: got nil err want non-nil")
+	}
+	// Redefining the "lhs" template in t1 should fail as it has been executed.
+	if _, err := t1.Parse(`{{define "lhs"}} OK {{end}}`); err == nil {
+		t.Error(`redefine "lhs": got nil err want non-nil`)
+	}
+
+	// Execute t0.
+	b.Reset()
+	if err := t0.ExecuteTemplate(b, "a", "<i>*/"); err != nil {
+		t.Fatal(err)
+	}
+	if got, want := b.String(), ` ( &lt;i&gt;*/ ) `; got != want {
+		t.Errorf("t0: got %q want %q", got, want)
+	}
+
+	// Clone t0. This should fail, as t0 has already executed.
+	if _, err := t0.Clone(); err == nil {
+		t.Error(`t0.Clone(): got nil err want non-nil`)
+	}
+
+	// Similarly, cloning sub-templates should fail.
+	if _, err := t0.Lookup("a").Clone(); err == nil {
+		t.Error(`t0.Lookup("a").Clone(): got nil err want non-nil`)
+	}
+	if _, err := t0.Lookup("lhs").Clone(); err == nil {
+		t.Error(`t0.Lookup("lhs").Clone(): got nil err want non-nil`)
+	}
+
+	// Execute t3.
+	b.Reset()
+	if err := t3.ExecuteTemplate(b, "a", "<i>*/"); err != nil {
+		t.Fatal(err)
+	}
+	if got, want := b.String(), ` <style> ZgotmplZ </style> `; got != want {
+		t.Errorf("t3: got %q want %q", got, want)
+	}
+}
+
+func TestTemplates(t *testing.T) {
+	names := []string{"t0", "a", "lhs", "rhs"}
+	// Some template definitions borrowed from TestClone.
+	const tmpl = `
+		{{define "a"}}{{template "lhs"}}{{.}}{{template "rhs"}}{{end}}
+		{{define "lhs"}} <a href=" {{end}}
+		{{define "rhs"}} "></a> {{end}}`
+	t0 := Must(New("t0").Parse(tmpl))
+	templates := t0.Templates()
+	if len(templates) != len(names) {
+		t.Errorf("expected %d templates; got %d", len(names), len(templates))
+	}
+	for _, name := range names {
+		found := false
+		for _, tmpl := range templates {
+			if name == tmpl.text.Name() {
+				found = true
+				break
+			}
+		}
+		if !found {
+			t.Error("could not find template", name)
+		}
+	}
+}
+
+// This used to crash; https://golang.org/issue/3281
+func TestCloneCrash(t *testing.T) {
+	t1 := New("all")
+	Must(t1.New("t1").Parse(`{{define "foo"}}foo{{end}}`))
+	t1.Clone()
+}
+
+// Ensure that this guarantee from the docs is upheld:
+// "Further calls to Parse in the copy will add templates
+// to the copy but not to the original."
+func TestCloneThenParse(t *testing.T) {
+	t0 := Must(New("t0").Parse(`{{define "a"}}{{template "embedded"}}{{end}}`))
+	t1 := Must(t0.Clone())
+	Must(t1.Parse(`{{define "embedded"}}t1{{end}}`))
+	if len(t0.Templates())+1 != len(t1.Templates()) {
+		t.Error("adding a template to a clone added it to the original")
+	}
+	// double check that the embedded template isn't available in the original
+	err := t0.ExecuteTemplate(ioutil.Discard, "a", nil)
+	if err == nil {
+		t.Error("expected 'no such template' error")
+	}
+}
+
+// https://golang.org/issue/5980
+func TestFuncMapWorksAfterClone(t *testing.T) {
+	funcs := FuncMap{"customFunc": func() (string, error) {
+		return "", errors.New("issue5980")
+	}}
+
+	// get the expected error output (no clone)
+	uncloned := Must(New("").Funcs(funcs).Parse("{{customFunc}}"))
+	wantErr := uncloned.Execute(ioutil.Discard, nil)
+
+	// toClone must be the same as uncloned. It has to be recreated from scratch,
+	// since cloning cannot occur after execution.
+	toClone := Must(New("").Funcs(funcs).Parse("{{customFunc}}"))
+	cloned := Must(toClone.Clone())
+	gotErr := cloned.Execute(ioutil.Discard, nil)
+
+	if wantErr.Error() != gotErr.Error() {
+		t.Errorf("clone error message mismatch want %q got %q", wantErr, gotErr)
+	}
+}
+
+// https://golang.org/issue/16101
+func TestTemplateCloneExecuteRace(t *testing.T) {
+	const (
+		input   = `<title>{{block "a" .}}a{{end}}</title><body>{{block "b" .}}b{{end}}<body>`
+		overlay = `{{define "b"}}A{{end}}`
+	)
+	outer := Must(New("outer").Parse(input))
+	tmpl := Must(Must(outer.Clone()).Parse(overlay))
+
+	var wg sync.WaitGroup
+	for i := 0; i < 10; i++ {
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			for i := 0; i < 100; i++ {
+				if err := tmpl.Execute(ioutil.Discard, "data"); err != nil {
+					panic(err)
+				}
+			}
+		}()
+	}
+	wg.Wait()
+}
+
+func TestTemplateCloneLookup(t *testing.T) {
+	// Template.escape makes an assumption that the template associated
+	// with t.Name() is t. Check that this holds.
+	tmpl := Must(New("x").Parse("a"))
+	tmpl = Must(tmpl.Clone())
+	if tmpl.Lookup(tmpl.Name()) != tmpl {
+		t.Error("after Clone, tmpl.Lookup(tmpl.Name()) != tmpl")
+	}
+}
+
+func TestCloneGrowth(t *testing.T) {
+	tmpl := Must(New("root").Parse(`<title>{{block "B". }}Arg{{end}}</title>`))
+	tmpl = Must(tmpl.Clone())
+	Must(tmpl.Parse(`{{define "B"}}Text{{end}}`))
+	for i := 0; i < 10; i++ {
+		tmpl.Execute(ioutil.Discard, nil)
+	}
+	if len(tmpl.DefinedTemplates()) > 200 {
+		t.Fatalf("too many templates: %v", len(tmpl.DefinedTemplates()))
+	}
+}
+
+// https://golang.org/issue/17735
+func TestCloneRedefinedName(t *testing.T) {
+	const base = `
+{{ define "a" -}}<title>{{ template "b" . -}}</title>{{ end -}}
+{{ define "b" }}{{ end -}}
+`
+	const page = `{{ template "a" . }}`
+
+	t1 := Must(New("a").Parse(base))
+
+	for i := 0; i < 2; i++ {
+		t2 := Must(t1.Clone())
+		t2 = Must(t2.New(fmt.Sprintf("%d", i)).Parse(page))
+		err := t2.Execute(ioutil.Discard, nil)
+		if err != nil {
+			t.Fatal(err)
+		}
+	}
+}
+
+// Issue 24791.
+func TestClonePipe(t *testing.T) {
+	a := Must(New("a").Parse(`{{define "a"}}{{range $v := .A}}{{$v}}{{end}}{{end}}`))
+	data := struct{ A []string }{A: []string{"hi"}}
+	b := Must(a.Clone())
+	var buf strings.Builder
+	if err := b.Execute(&buf, &data); err != nil {
+		t.Fatal(err)
+	}
+	if got, want := buf.String(), "hi"; got != want {
+		t.Errorf("got %q want %q", got, want)
+	}
+}
diff --git a/internal/backport/html/template/content.go b/internal/backport/html/template/content.go
new file mode 100644
index 0000000..6ba87a9
--- /dev/null
+++ b/internal/backport/html/template/content.go
@@ -0,0 +1,185 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"fmt"
+	"reflect"
+)
+
+// Strings of content from a trusted source.
+type (
+	// CSS encapsulates known safe content that matches any of:
+	//   1. The CSS3 stylesheet production, such as `p { color: purple }`.
+	//   2. The CSS3 rule production, such as `a[href=~"https:"].foo#bar`.
+	//   3. CSS3 declaration productions, such as `color: red; margin: 2px`.
+	//   4. The CSS3 value production, such as `rgba(0, 0, 255, 127)`.
+	// See https://www.w3.org/TR/css3-syntax/#parsing and
+	// https://web.archive.org/web/20090211114933/http://w3.org/TR/css3-syntax#style
+	//
+	// Use of this type presents a security risk:
+	// the encapsulated content should come from a trusted source,
+	// as it will be included verbatim in the template output.
+	CSS string
+
+	// HTML encapsulates a known safe HTML document fragment.
+	// It should not be used for HTML from a third-party, or HTML with
+	// unclosed tags or comments. The outputs of a sound HTML sanitizer
+	// and a template escaped by this package are fine for use with HTML.
+	//
+	// Use of this type presents a security risk:
+	// the encapsulated content should come from a trusted source,
+	// as it will be included verbatim in the template output.
+	HTML string
+
+	// HTMLAttr encapsulates an HTML attribute from a trusted source,
+	// for example, ` dir="ltr"`.
+	//
+	// Use of this type presents a security risk:
+	// the encapsulated content should come from a trusted source,
+	// as it will be included verbatim in the template output.
+	HTMLAttr string
+
+	// JS encapsulates a known safe EcmaScript5 Expression, for example,
+	// `(x + y * z())`.
+	// Template authors are responsible for ensuring that typed expressions
+	// do not break the intended precedence and that there is no
+	// statement/expression ambiguity as when passing an expression like
+	// "{ foo: bar() }\n['foo']()", which is both a valid Expression and a
+	// valid Program with a very different meaning.
+	//
+	// Use of this type presents a security risk:
+	// the encapsulated content should come from a trusted source,
+	// as it will be included verbatim in the template output.
+	//
+	// Using JS to include valid but untrusted JSON is not safe.
+	// A safe alternative is to parse the JSON with json.Unmarshal and then
+	// pass the resultant object into the template, where it will be
+	// converted to sanitized JSON when presented in a JavaScript context.
+	JS string
+
+	// JSStr encapsulates a sequence of characters meant to be embedded
+	// between quotes in a JavaScript expression.
+	// The string must match a series of StringCharacters:
+	//   StringCharacter :: SourceCharacter but not `\` or LineTerminator
+	//                    | EscapeSequence
+	// Note that LineContinuations are not allowed.
+	// JSStr("foo\\nbar") is fine, but JSStr("foo\\\nbar") is not.
+	//
+	// Use of this type presents a security risk:
+	// the encapsulated content should come from a trusted source,
+	// as it will be included verbatim in the template output.
+	JSStr string
+
+	// URL encapsulates a known safe URL or URL substring (see RFC 3986).
+	// A URL like `javascript:checkThatFormNotEditedBeforeLeavingPage()`
+	// from a trusted source should go in the page, but by default dynamic
+	// `javascript:` URLs are filtered out since they are a frequently
+	// exploited injection vector.
+	//
+	// Use of this type presents a security risk:
+	// the encapsulated content should come from a trusted source,
+	// as it will be included verbatim in the template output.
+	URL string
+
+	// Srcset encapsulates a known safe srcset attribute
+	// (see https://w3c.github.io/html/semantics-embedded-content.html#element-attrdef-img-srcset).
+	//
+	// Use of this type presents a security risk:
+	// the encapsulated content should come from a trusted source,
+	// as it will be included verbatim in the template output.
+	Srcset string
+)
+
+type contentType uint8
+
+const (
+	contentTypePlain contentType = iota
+	contentTypeCSS
+	contentTypeHTML
+	contentTypeHTMLAttr
+	contentTypeJS
+	contentTypeJSStr
+	contentTypeURL
+	contentTypeSrcset
+	// contentTypeUnsafe is used in attr.go for values that affect how
+	// embedded content and network messages are formed, vetted,
+	// or interpreted; or which credentials network messages carry.
+	contentTypeUnsafe
+)
+
+// indirect returns the value, after dereferencing as many times
+// as necessary to reach the base type (or nil).
+func indirect(a interface{}) interface{} {
+	if a == nil {
+		return nil
+	}
+	if t := reflect.TypeOf(a); t.Kind() != reflect.Ptr {
+		// Avoid creating a reflect.Value if it's not a pointer.
+		return a
+	}
+	v := reflect.ValueOf(a)
+	for v.Kind() == reflect.Ptr && !v.IsNil() {
+		v = v.Elem()
+	}
+	return v.Interface()
+}
+
+var (
+	errorType       = reflect.TypeOf((*error)(nil)).Elem()
+	fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
+)
+
+// indirectToStringerOrError returns the value, after dereferencing as many times
+// as necessary to reach the base type (or nil) or an implementation of fmt.Stringer
+// or error,
+func indirectToStringerOrError(a interface{}) interface{} {
+	if a == nil {
+		return nil
+	}
+	v := reflect.ValueOf(a)
+	for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() {
+		v = v.Elem()
+	}
+	return v.Interface()
+}
+
+// stringify converts its arguments to a string and the type of the content.
+// All pointers are dereferenced, as in the text/template package.
+func stringify(args ...interface{}) (string, contentType) {
+	if len(args) == 1 {
+		switch s := indirect(args[0]).(type) {
+		case string:
+			return s, contentTypePlain
+		case CSS:
+			return string(s), contentTypeCSS
+		case HTML:
+			return string(s), contentTypeHTML
+		case HTMLAttr:
+			return string(s), contentTypeHTMLAttr
+		case JS:
+			return string(s), contentTypeJS
+		case JSStr:
+			return string(s), contentTypeJSStr
+		case URL:
+			return string(s), contentTypeURL
+		case Srcset:
+			return string(s), contentTypeSrcset
+		}
+	}
+	i := 0
+	for _, arg := range args {
+		// We skip untyped nil arguments for backward compatibility.
+		// Without this they would be output as <nil>, escaped.
+		// See issue 25875.
+		if arg == nil {
+			continue
+		}
+
+		args[i] = indirectToStringerOrError(arg)
+		i++
+	}
+	return fmt.Sprint(args[:i]...), contentTypePlain
+}
diff --git a/internal/backport/html/template/content_test.go b/internal/backport/html/template/content_test.go
new file mode 100644
index 0000000..e6f47a2
--- /dev/null
+++ b/internal/backport/html/template/content_test.go
@@ -0,0 +1,458 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"bytes"
+	"fmt"
+	"strings"
+	"testing"
+)
+
+func TestTypedContent(t *testing.T) {
+	data := []interface{}{
+		`<b> "foo%" O'Reilly &bar;`,
+		CSS(`a[href =~ "//example.com"]#foo`),
+		HTML(`Hello, <b>World</b> &amp;tc!`),
+		HTMLAttr(` dir="ltr"`),
+		JS(`c && alert("Hello, World!");`),
+		JSStr(`Hello, World & O'Reilly\u0021`),
+		URL(`greeting=H%69,&addressee=(World)`),
+		Srcset(`greeting=H%69,&addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`),
+		URL(`,foo/,`),
+	}
+
+	// For each content sensitive escaper, see how it does on
+	// each of the typed strings above.
+	tests := []struct {
+		// A template containing a single {{.}}.
+		input string
+		want  []string
+	}{
+		{
+			`<style>{{.}} { color: blue }</style>`,
+			[]string{
+				`ZgotmplZ`,
+				// Allowed but not escaped.
+				`a[href =~ "//example.com"]#foo`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+			},
+		},
+		{
+			`<div style="{{.}}">`,
+			[]string{
+				`ZgotmplZ`,
+				// Allowed and HTML escaped.
+				`a[href =~ &#34;//example.com&#34;]#foo`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+			},
+		},
+		{
+			`{{.}}`,
+			[]string{
+				`&lt;b&gt; &#34;foo%&#34; O&#39;Reilly &amp;bar;`,
+				`a[href =~ &#34;//example.com&#34;]#foo`,
+				// Not escaped.
+				`Hello, <b>World</b> &amp;tc!`,
+				` dir=&#34;ltr&#34;`,
+				`c &amp;&amp; alert(&#34;Hello, World!&#34;);`,
+				`Hello, World &amp; O&#39;Reilly\u0021`,
+				`greeting=H%69,&amp;addressee=(World)`,
+				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
+				`,foo/,`,
+			},
+		},
+		{
+			`<a{{.}}>`,
+			[]string{
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				// Allowed and HTML escaped.
+				` dir="ltr"`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+				`ZgotmplZ`,
+			},
+		},
+		{
+			`<a title={{.}}>`,
+			[]string{
+				`&lt;b&gt;&#32;&#34;foo%&#34;&#32;O&#39;Reilly&#32;&amp;bar;`,
+				`a[href&#32;&#61;~&#32;&#34;//example.com&#34;]#foo`,
+				// Tags stripped, spaces escaped, entity not re-escaped.
+				`Hello,&#32;World&#32;&amp;tc!`,
+				`&#32;dir&#61;&#34;ltr&#34;`,
+				`c&#32;&amp;&amp;&#32;alert(&#34;Hello,&#32;World!&#34;);`,
+				`Hello,&#32;World&#32;&amp;&#32;O&#39;Reilly\u0021`,
+				`greeting&#61;H%69,&amp;addressee&#61;(World)`,
+				`greeting&#61;H%69,&amp;addressee&#61;(World)&#32;2x,&#32;https://golang.org/favicon.ico&#32;500.5w`,
+				`,foo/,`,
+			},
+		},
+		{
+			`<a title='{{.}}'>`,
+			[]string{
+				`&lt;b&gt; &#34;foo%&#34; O&#39;Reilly &amp;bar;`,
+				`a[href =~ &#34;//example.com&#34;]#foo`,
+				// Tags stripped, entity not re-escaped.
+				`Hello, World &amp;tc!`,
+				` dir=&#34;ltr&#34;`,
+				`c &amp;&amp; alert(&#34;Hello, World!&#34;);`,
+				`Hello, World &amp; O&#39;Reilly\u0021`,
+				`greeting=H%69,&amp;addressee=(World)`,
+				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
+				`,foo/,`,
+			},
+		},
+		{
+			`<textarea>{{.}}</textarea>`,
+			[]string{
+				`&lt;b&gt; &#34;foo%&#34; O&#39;Reilly &amp;bar;`,
+				`a[href =~ &#34;//example.com&#34;]#foo`,
+				// Angle brackets escaped to prevent injection of close tags, entity not re-escaped.
+				`Hello, &lt;b&gt;World&lt;/b&gt; &amp;tc!`,
+				` dir=&#34;ltr&#34;`,
+				`c &amp;&amp; alert(&#34;Hello, World!&#34;);`,
+				`Hello, World &amp; O&#39;Reilly\u0021`,
+				`greeting=H%69,&amp;addressee=(World)`,
+				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
+				`,foo/,`,
+			},
+		},
+		{
+			`<script>alert({{.}})</script>`,
+			[]string{
+				`"\u003cb\u003e \"foo%\" O'Reilly \u0026bar;"`,
+				`"a[href =~ \"//example.com\"]#foo"`,
+				`"Hello, \u003cb\u003eWorld\u003c/b\u003e \u0026amp;tc!"`,
+				`" dir=\"ltr\""`,
+				// Not escaped.
+				`c && alert("Hello, World!");`,
+				// Escape sequence not over-escaped.
+				`"Hello, World & O'Reilly\u0021"`,
+				`"greeting=H%69,\u0026addressee=(World)"`,
+				`"greeting=H%69,\u0026addressee=(World) 2x, https://golang.org/favicon.ico 500.5w"`,
+				`",foo/,"`,
+			},
+		},
+		{
+			`<button onclick="alert({{.}})">`,
+			[]string{
+				`&#34;\u003cb\u003e \&#34;foo%\&#34; O&#39;Reilly \u0026bar;&#34;`,
+				`&#34;a[href =~ \&#34;//example.com\&#34;]#foo&#34;`,
+				`&#34;Hello, \u003cb\u003eWorld\u003c/b\u003e \u0026amp;tc!&#34;`,
+				`&#34; dir=\&#34;ltr\&#34;&#34;`,
+				// Not JS escaped but HTML escaped.
+				`c &amp;&amp; alert(&#34;Hello, World!&#34;);`,
+				// Escape sequence not over-escaped.
+				`&#34;Hello, World &amp; O&#39;Reilly\u0021&#34;`,
+				`&#34;greeting=H%69,\u0026addressee=(World)&#34;`,
+				`&#34;greeting=H%69,\u0026addressee=(World) 2x, https://golang.org/favicon.ico 500.5w&#34;`,
+				`&#34;,foo/,&#34;`,
+			},
+		},
+		{
+			`<script>alert("{{.}}")</script>`,
+			[]string{
+				`\u003cb\u003e \u0022foo%\u0022 O\u0027Reilly \u0026bar;`,
+				`a[href =~ \u0022\/\/example.com\u0022]#foo`,
+				`Hello, \u003cb\u003eWorld\u003c\/b\u003e \u0026amp;tc!`,
+				` dir=\u0022ltr\u0022`,
+				`c \u0026\u0026 alert(\u0022Hello, World!\u0022);`,
+				// Escape sequence not over-escaped.
+				`Hello, World \u0026 O\u0027Reilly\u0021`,
+				`greeting=H%69,\u0026addressee=(World)`,
+				`greeting=H%69,\u0026addressee=(World) 2x, https:\/\/golang.org\/favicon.ico 500.5w`,
+				`,foo\/,`,
+			},
+		},
+		{
+			`<script type="text/javascript">alert("{{.}}")</script>`,
+			[]string{
+				`\u003cb\u003e \u0022foo%\u0022 O\u0027Reilly \u0026bar;`,
+				`a[href =~ \u0022\/\/example.com\u0022]#foo`,
+				`Hello, \u003cb\u003eWorld\u003c\/b\u003e \u0026amp;tc!`,
+				` dir=\u0022ltr\u0022`,
+				`c \u0026\u0026 alert(\u0022Hello, World!\u0022);`,
+				// Escape sequence not over-escaped.
+				`Hello, World \u0026 O\u0027Reilly\u0021`,
+				`greeting=H%69,\u0026addressee=(World)`,
+				`greeting=H%69,\u0026addressee=(World) 2x, https:\/\/golang.org\/favicon.ico 500.5w`,
+				`,foo\/,`,
+			},
+		},
+		{
+			`<script type="text/javascript">alert({{.}})</script>`,
+			[]string{
+				`"\u003cb\u003e \"foo%\" O'Reilly \u0026bar;"`,
+				`"a[href =~ \"//example.com\"]#foo"`,
+				`"Hello, \u003cb\u003eWorld\u003c/b\u003e \u0026amp;tc!"`,
+				`" dir=\"ltr\""`,
+				// Not escaped.
+				`c && alert("Hello, World!");`,
+				// Escape sequence not over-escaped.
+				`"Hello, World & O'Reilly\u0021"`,
+				`"greeting=H%69,\u0026addressee=(World)"`,
+				`"greeting=H%69,\u0026addressee=(World) 2x, https://golang.org/favicon.ico 500.5w"`,
+				`",foo/,"`,
+			},
+		},
+		{
+			// Not treated as JS. The output is same as for <div>{{.}}</div>
+			`<script type="golang.org/x/website/internal/backport/text/template">{{.}}</script>`,
+			[]string{
+				`&lt;b&gt; &#34;foo%&#34; O&#39;Reilly &amp;bar;`,
+				`a[href =~ &#34;//example.com&#34;]#foo`,
+				// Not escaped.
+				`Hello, <b>World</b> &amp;tc!`,
+				` dir=&#34;ltr&#34;`,
+				`c &amp;&amp; alert(&#34;Hello, World!&#34;);`,
+				`Hello, World &amp; O&#39;Reilly\u0021`,
+				`greeting=H%69,&amp;addressee=(World)`,
+				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
+				`,foo/,`,
+			},
+		},
+		{
+			`<button onclick='alert("{{.}}")'>`,
+			[]string{
+				`\u003cb\u003e \u0022foo%\u0022 O\u0027Reilly \u0026bar;`,
+				`a[href =~ \u0022\/\/example.com\u0022]#foo`,
+				`Hello, \u003cb\u003eWorld\u003c\/b\u003e \u0026amp;tc!`,
+				` dir=\u0022ltr\u0022`,
+				`c \u0026\u0026 alert(\u0022Hello, World!\u0022);`,
+				// Escape sequence not over-escaped.
+				`Hello, World \u0026 O\u0027Reilly\u0021`,
+				`greeting=H%69,\u0026addressee=(World)`,
+				`greeting=H%69,\u0026addressee=(World) 2x, https:\/\/golang.org\/favicon.ico 500.5w`,
+				`,foo\/,`,
+			},
+		},
+		{
+			`<a href="?q={{.}}">`,
+			[]string{
+				`%3cb%3e%20%22foo%25%22%20O%27Reilly%20%26bar%3b`,
+				`a%5bhref%20%3d~%20%22%2f%2fexample.com%22%5d%23foo`,
+				`Hello%2c%20%3cb%3eWorld%3c%2fb%3e%20%26amp%3btc%21`,
+				`%20dir%3d%22ltr%22`,
+				`c%20%26%26%20alert%28%22Hello%2c%20World%21%22%29%3b`,
+				`Hello%2c%20World%20%26%20O%27Reilly%5cu0021`,
+				// Quotes and parens are escaped but %69 is not over-escaped. HTML escaping is done.
+				`greeting=H%69,&amp;addressee=%28World%29`,
+				`greeting%3dH%2569%2c%26addressee%3d%28World%29%202x%2c%20https%3a%2f%2fgolang.org%2ffavicon.ico%20500.5w`,
+				`,foo/,`,
+			},
+		},
+		{
+			`<style>body { background: url('?img={{.}}') }</style>`,
+			[]string{
+				`%3cb%3e%20%22foo%25%22%20O%27Reilly%20%26bar%3b`,
+				`a%5bhref%20%3d~%20%22%2f%2fexample.com%22%5d%23foo`,
+				`Hello%2c%20%3cb%3eWorld%3c%2fb%3e%20%26amp%3btc%21`,
+				`%20dir%3d%22ltr%22`,
+				`c%20%26%26%20alert%28%22Hello%2c%20World%21%22%29%3b`,
+				`Hello%2c%20World%20%26%20O%27Reilly%5cu0021`,
+				// Quotes and parens are escaped but %69 is not over-escaped. HTML escaping is not done.
+				`greeting=H%69,&addressee=%28World%29`,
+				`greeting%3dH%2569%2c%26addressee%3d%28World%29%202x%2c%20https%3a%2f%2fgolang.org%2ffavicon.ico%20500.5w`,
+				`,foo/,`,
+			},
+		},
+		{
+			`<img srcset="{{.}}">`,
+			[]string{
+				`#ZgotmplZ`,
+				`#ZgotmplZ`,
+				// Commas are not esacped
+				`Hello,#ZgotmplZ`,
+				// Leading spaces are not percent escapes.
+				` dir=%22ltr%22`,
+				// Spaces after commas are not percent escaped.
+				`#ZgotmplZ, World!%22%29;`,
+				`Hello,#ZgotmplZ`,
+				`greeting=H%69%2c&amp;addressee=%28World%29`,
+				// Metadata is not escaped.
+				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
+				`%2cfoo/%2c`,
+			},
+		},
+		{
+			`<img srcset={{.}}>`,
+			[]string{
+				`#ZgotmplZ`,
+				`#ZgotmplZ`,
+				`Hello,#ZgotmplZ`,
+				// Spaces are HTML escaped not %-escaped
+				`&#32;dir&#61;%22ltr%22`,
+				`#ZgotmplZ,&#32;World!%22%29;`,
+				`Hello,#ZgotmplZ`,
+				`greeting&#61;H%69%2c&amp;addressee&#61;%28World%29`,
+				`greeting&#61;H%69,&amp;addressee&#61;(World)&#32;2x,&#32;https://golang.org/favicon.ico&#32;500.5w`,
+				// Commas are escaped.
+				`%2cfoo/%2c`,
+			},
+		},
+		{
+			`<img srcset="{{.}} 2x, https://golang.org/ 500.5w">`,
+			[]string{
+				`#ZgotmplZ`,
+				`#ZgotmplZ`,
+				`Hello,#ZgotmplZ`,
+				` dir=%22ltr%22`,
+				`#ZgotmplZ, World!%22%29;`,
+				`Hello,#ZgotmplZ`,
+				`greeting=H%69%2c&amp;addressee=%28World%29`,
+				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
+				`%2cfoo/%2c`,
+			},
+		},
+		{
+			`<img srcset="http://godoc.org/ {{.}}, https://golang.org/ 500.5w">`,
+			[]string{
+				`#ZgotmplZ`,
+				`#ZgotmplZ`,
+				`Hello,#ZgotmplZ`,
+				` dir=%22ltr%22`,
+				`#ZgotmplZ, World!%22%29;`,
+				`Hello,#ZgotmplZ`,
+				`greeting=H%69%2c&amp;addressee=%28World%29`,
+				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
+				`%2cfoo/%2c`,
+			},
+		},
+		{
+			`<img srcset="http://godoc.org/?q={{.}} 2x, https://golang.org/ 500.5w">`,
+			[]string{
+				`#ZgotmplZ`,
+				`#ZgotmplZ`,
+				`Hello,#ZgotmplZ`,
+				` dir=%22ltr%22`,
+				`#ZgotmplZ, World!%22%29;`,
+				`Hello,#ZgotmplZ`,
+				`greeting=H%69%2c&amp;addressee=%28World%29`,
+				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
+				`%2cfoo/%2c`,
+			},
+		},
+		{
+			`<img srcset="http://godoc.org/ 2x, {{.}} 500.5w">`,
+			[]string{
+				`#ZgotmplZ`,
+				`#ZgotmplZ`,
+				`Hello,#ZgotmplZ`,
+				` dir=%22ltr%22`,
+				`#ZgotmplZ, World!%22%29;`,
+				`Hello,#ZgotmplZ`,
+				`greeting=H%69%2c&amp;addressee=%28World%29`,
+				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
+				`%2cfoo/%2c`,
+			},
+		},
+		{
+			`<img srcset="http://godoc.org/ 2x, https://golang.org/ {{.}}">`,
+			[]string{
+				`#ZgotmplZ`,
+				`#ZgotmplZ`,
+				`Hello,#ZgotmplZ`,
+				` dir=%22ltr%22`,
+				`#ZgotmplZ, World!%22%29;`,
+				`Hello,#ZgotmplZ`,
+				`greeting=H%69%2c&amp;addressee=%28World%29`,
+				`greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
+				`%2cfoo/%2c`,
+			},
+		},
+	}
+
+	for _, test := range tests {
+		tmpl := Must(New("x").Parse(test.input))
+		pre := strings.Index(test.input, "{{.}}")
+		post := len(test.input) - (pre + 5)
+		var b bytes.Buffer
+		for i, x := range data {
+			b.Reset()
+			if err := tmpl.Execute(&b, x); err != nil {
+				t.Errorf("%q with %v: %s", test.input, x, err)
+				continue
+			}
+			if want, got := test.want[i], b.String()[pre:b.Len()-post]; want != got {
+				t.Errorf("%q with %v:\nwant\n\t%q,\ngot\n\t%q\n", test.input, x, want, got)
+				continue
+			}
+		}
+	}
+}
+
+// Test that we print using the String method. Was issue 3073.
+type myStringer struct {
+	v int
+}
+
+func (s *myStringer) String() string {
+	return fmt.Sprintf("string=%d", s.v)
+}
+
+type errorer struct {
+	v int
+}
+
+func (s *errorer) Error() string {
+	return fmt.Sprintf("error=%d", s.v)
+}
+
+func TestStringer(t *testing.T) {
+	s := &myStringer{3}
+	b := new(bytes.Buffer)
+	tmpl := Must(New("x").Parse("{{.}}"))
+	if err := tmpl.Execute(b, s); err != nil {
+		t.Fatal(err)
+	}
+	var expect = "string=3"
+	if b.String() != expect {
+		t.Errorf("expected %q got %q", expect, b.String())
+	}
+	e := &errorer{7}
+	b.Reset()
+	if err := tmpl.Execute(b, e); err != nil {
+		t.Fatal(err)
+	}
+	expect = "error=7"
+	if b.String() != expect {
+		t.Errorf("expected %q got %q", expect, b.String())
+	}
+}
+
+// https://golang.org/issue/5982
+func TestEscapingNilNonemptyInterfaces(t *testing.T) {
+	tmpl := Must(New("x").Parse("{{.E}}"))
+
+	got := new(bytes.Buffer)
+	testData := struct{ E error }{} // any non-empty interface here will do; error is just ready at hand
+	tmpl.Execute(got, testData)
+
+	// A non-empty interface should print like an empty interface.
+	want := new(bytes.Buffer)
+	data := struct{ E interface{} }{}
+	tmpl.Execute(want, data)
+
+	if !bytes.Equal(want.Bytes(), got.Bytes()) {
+		t.Errorf("expected %q got %q", string(want.Bytes()), string(got.Bytes()))
+	}
+}
diff --git a/internal/backport/html/template/context.go b/internal/backport/html/template/context.go
new file mode 100644
index 0000000..1aacec4
--- /dev/null
+++ b/internal/backport/html/template/context.go
@@ -0,0 +1,265 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"fmt"
+
+	"golang.org/x/website/internal/backport/text/template/parse"
+)
+
+// context describes the state an HTML parser must be in when it reaches the
+// portion of HTML produced by evaluating a particular template node.
+//
+// The zero value of type context is the start context for a template that
+// produces an HTML fragment as defined at
+// https://www.w3.org/TR/html5/syntax.html#the-end
+// where the context element is null.
+type context struct {
+	state   state
+	delim   delim
+	urlPart urlPart
+	jsCtx   jsCtx
+	attr    attr
+	element element
+	n       parse.Node // for range break/continue
+	err     *Error
+}
+
+func (c context) String() string {
+	var err error
+	if c.err != nil {
+		err = c.err
+	}
+	return fmt.Sprintf("{%v %v %v %v %v %v %v}", c.state, c.delim, c.urlPart, c.jsCtx, c.attr, c.element, err)
+}
+
+// eq reports whether two contexts are equal.
+func (c context) eq(d context) bool {
+	return c.state == d.state &&
+		c.delim == d.delim &&
+		c.urlPart == d.urlPart &&
+		c.jsCtx == d.jsCtx &&
+		c.attr == d.attr &&
+		c.element == d.element &&
+		c.err == d.err
+}
+
+// mangle produces an identifier that includes a suffix that distinguishes it
+// from template names mangled with different contexts.
+func (c context) mangle(templateName string) string {
+	// The mangled name for the default context is the input templateName.
+	if c.state == stateText {
+		return templateName
+	}
+	s := templateName + "$htmltemplate_" + c.state.String()
+	if c.delim != delimNone {
+		s += "_" + c.delim.String()
+	}
+	if c.urlPart != urlPartNone {
+		s += "_" + c.urlPart.String()
+	}
+	if c.jsCtx != jsCtxRegexp {
+		s += "_" + c.jsCtx.String()
+	}
+	if c.attr != attrNone {
+		s += "_" + c.attr.String()
+	}
+	if c.element != elementNone {
+		s += "_" + c.element.String()
+	}
+	return s
+}
+
+// state describes a high-level HTML parser state.
+//
+// It bounds the top of the element stack, and by extension the HTML insertion
+// mode, but also contains state that does not correspond to anything in the
+// HTML5 parsing algorithm because a single token production in the HTML
+// grammar may contain embedded actions in a template. For instance, the quoted
+// HTML attribute produced by
+//     <div title="Hello {{.World}}">
+// is a single token in HTML's grammar but in a template spans several nodes.
+type state uint8
+
+//go:generate stringer -type state
+
+const (
+	// stateText is parsed character data. An HTML parser is in
+	// this state when its parse position is outside an HTML tag,
+	// directive, comment, and special element body.
+	stateText state = iota
+	// stateTag occurs before an HTML attribute or the end of a tag.
+	stateTag
+	// stateAttrName occurs inside an attribute name.
+	// It occurs between the ^'s in ` ^name^ = value`.
+	stateAttrName
+	// stateAfterName occurs after an attr name has ended but before any
+	// equals sign. It occurs between the ^'s in ` name^ ^= value`.
+	stateAfterName
+	// stateBeforeValue occurs after the equals sign but before the value.
+	// It occurs between the ^'s in ` name =^ ^value`.
+	stateBeforeValue
+	// stateHTMLCmt occurs inside an <!-- HTML comment -->.
+	stateHTMLCmt
+	// stateRCDATA occurs inside an RCDATA element (<textarea> or <title>)
+	// as described at https://www.w3.org/TR/html5/syntax.html#elements-0
+	stateRCDATA
+	// stateAttr occurs inside an HTML attribute whose content is text.
+	stateAttr
+	// stateURL occurs inside an HTML attribute whose content is a URL.
+	stateURL
+	// stateSrcset occurs inside an HTML srcset attribute.
+	stateSrcset
+	// stateJS occurs inside an event handler or script element.
+	stateJS
+	// stateJSDqStr occurs inside a JavaScript double quoted string.
+	stateJSDqStr
+	// stateJSSqStr occurs inside a JavaScript single quoted string.
+	stateJSSqStr
+	// stateJSRegexp occurs inside a JavaScript regexp literal.
+	stateJSRegexp
+	// stateJSBlockCmt occurs inside a JavaScript /* block comment */.
+	stateJSBlockCmt
+	// stateJSLineCmt occurs inside a JavaScript // line comment.
+	stateJSLineCmt
+	// stateCSS occurs inside a <style> element or style attribute.
+	stateCSS
+	// stateCSSDqStr occurs inside a CSS double quoted string.
+	stateCSSDqStr
+	// stateCSSSqStr occurs inside a CSS single quoted string.
+	stateCSSSqStr
+	// stateCSSDqURL occurs inside a CSS double quoted url("...").
+	stateCSSDqURL
+	// stateCSSSqURL occurs inside a CSS single quoted url('...').
+	stateCSSSqURL
+	// stateCSSURL occurs inside a CSS unquoted url(...).
+	stateCSSURL
+	// stateCSSBlockCmt occurs inside a CSS /* block comment */.
+	stateCSSBlockCmt
+	// stateCSSLineCmt occurs inside a CSS // line comment.
+	stateCSSLineCmt
+	// stateError is an infectious error state outside any valid
+	// HTML/CSS/JS construct.
+	stateError
+	// stateDead marks unreachable code after a {{break}} or {{continue}}.
+	stateDead
+)
+
+// isComment is true for any state that contains content meant for template
+// authors & maintainers, not for end-users or machines.
+func isComment(s state) bool {
+	switch s {
+	case stateHTMLCmt, stateJSBlockCmt, stateJSLineCmt, stateCSSBlockCmt, stateCSSLineCmt:
+		return true
+	}
+	return false
+}
+
+// isInTag return whether s occurs solely inside an HTML tag.
+func isInTag(s state) bool {
+	switch s {
+	case stateTag, stateAttrName, stateAfterName, stateBeforeValue, stateAttr:
+		return true
+	}
+	return false
+}
+
+// delim is the delimiter that will end the current HTML attribute.
+type delim uint8
+
+//go:generate stringer -type delim
+
+const (
+	// delimNone occurs outside any attribute.
+	delimNone delim = iota
+	// delimDoubleQuote occurs when a double quote (") closes the attribute.
+	delimDoubleQuote
+	// delimSingleQuote occurs when a single quote (') closes the attribute.
+	delimSingleQuote
+	// delimSpaceOrTagEnd occurs when a space or right angle bracket (>)
+	// closes the attribute.
+	delimSpaceOrTagEnd
+)
+
+// urlPart identifies a part in an RFC 3986 hierarchical URL to allow different
+// encoding strategies.
+type urlPart uint8
+
+//go:generate stringer -type urlPart
+
+const (
+	// urlPartNone occurs when not in a URL, or possibly at the start:
+	// ^ in "^http://auth/path?k=v#frag".
+	urlPartNone urlPart = iota
+	// urlPartPreQuery occurs in the scheme, authority, or path; between the
+	// ^s in "h^ttp://auth/path^?k=v#frag".
+	urlPartPreQuery
+	// urlPartQueryOrFrag occurs in the query portion between the ^s in
+	// "http://auth/path?^k=v#frag^".
+	urlPartQueryOrFrag
+	// urlPartUnknown occurs due to joining of contexts both before and
+	// after the query separator.
+	urlPartUnknown
+)
+
+// jsCtx determines whether a '/' starts a regular expression literal or a
+// division operator.
+type jsCtx uint8
+
+//go:generate stringer -type jsCtx
+
+const (
+	// jsCtxRegexp occurs where a '/' would start a regexp literal.
+	jsCtxRegexp jsCtx = iota
+	// jsCtxDivOp occurs where a '/' would start a division operator.
+	jsCtxDivOp
+	// jsCtxUnknown occurs where a '/' is ambiguous due to context joining.
+	jsCtxUnknown
+)
+
+// element identifies the HTML element when inside a start tag or special body.
+// Certain HTML element (for example <script> and <style>) have bodies that are
+// treated differently from stateText so the element type is necessary to
+// transition into the correct context at the end of a tag and to identify the
+// end delimiter for the body.
+type element uint8
+
+//go:generate stringer -type element
+
+const (
+	// elementNone occurs outside a special tag or special element body.
+	elementNone element = iota
+	// elementScript corresponds to the raw text <script> element
+	// with JS MIME type or no type attribute.
+	elementScript
+	// elementStyle corresponds to the raw text <style> element.
+	elementStyle
+	// elementTextarea corresponds to the RCDATA <textarea> element.
+	elementTextarea
+	// elementTitle corresponds to the RCDATA <title> element.
+	elementTitle
+)
+
+//go:generate stringer -type attr
+
+// attr identifies the current HTML attribute when inside the attribute,
+// that is, starting from stateAttrName until stateTag/stateText (exclusive).
+type attr uint8
+
+const (
+	// attrNone corresponds to a normal attribute or no attribute.
+	attrNone attr = iota
+	// attrScript corresponds to an event handler attribute.
+	attrScript
+	// attrScriptType corresponds to the type attribute in script HTML element
+	attrScriptType
+	// attrStyle corresponds to the style attribute whose value is CSS.
+	attrStyle
+	// attrURL corresponds to an attribute whose value is a URL.
+	attrURL
+	// attrSrcset corresponds to a srcset attribute.
+	attrSrcset
+)
diff --git a/internal/backport/html/template/css.go b/internal/backport/html/template/css.go
new file mode 100644
index 0000000..eb92fc9
--- /dev/null
+++ b/internal/backport/html/template/css.go
@@ -0,0 +1,260 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"bytes"
+	"fmt"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+// endsWithCSSKeyword reports whether b ends with an ident that
+// case-insensitively matches the lower-case kw.
+func endsWithCSSKeyword(b []byte, kw string) bool {
+	i := len(b) - len(kw)
+	if i < 0 {
+		// Too short.
+		return false
+	}
+	if i != 0 {
+		r, _ := utf8.DecodeLastRune(b[:i])
+		if isCSSNmchar(r) {
+			// Too long.
+			return false
+		}
+	}
+	// Many CSS keywords, such as "!important" can have characters encoded,
+	// but the URI production does not allow that according to
+	// https://www.w3.org/TR/css3-syntax/#TOK-URI
+	// This does not attempt to recognize encoded keywords. For example,
+	// given "\75\72\6c" and "url" this return false.
+	return string(bytes.ToLower(b[i:])) == kw
+}
+
+// isCSSNmchar reports whether rune is allowed anywhere in a CSS identifier.
+func isCSSNmchar(r rune) bool {
+	// Based on the CSS3 nmchar production but ignores multi-rune escape
+	// sequences.
+	// https://www.w3.org/TR/css3-syntax/#SUBTOK-nmchar
+	return 'a' <= r && r <= 'z' ||
+		'A' <= r && r <= 'Z' ||
+		'0' <= r && r <= '9' ||
+		r == '-' ||
+		r == '_' ||
+		// Non-ASCII cases below.
+		0x80 <= r && r <= 0xd7ff ||
+		0xe000 <= r && r <= 0xfffd ||
+		0x10000 <= r && r <= 0x10ffff
+}
+
+// decodeCSS decodes CSS3 escapes given a sequence of stringchars.
+// If there is no change, it returns the input, otherwise it returns a slice
+// backed by a new array.
+// https://www.w3.org/TR/css3-syntax/#SUBTOK-stringchar defines stringchar.
+func decodeCSS(s []byte) []byte {
+	i := bytes.IndexByte(s, '\\')
+	if i == -1 {
+		return s
+	}
+	// The UTF-8 sequence for a codepoint is never longer than 1 + the
+	// number hex digits need to represent that codepoint, so len(s) is an
+	// upper bound on the output length.
+	b := make([]byte, 0, len(s))
+	for len(s) != 0 {
+		i := bytes.IndexByte(s, '\\')
+		if i == -1 {
+			i = len(s)
+		}
+		b, s = append(b, s[:i]...), s[i:]
+		if len(s) < 2 {
+			break
+		}
+		// https://www.w3.org/TR/css3-syntax/#SUBTOK-escape
+		// escape ::= unicode | '\' [#x20-#x7E#x80-#xD7FF#xE000-#xFFFD#x10000-#x10FFFF]
+		if isHex(s[1]) {
+			// https://www.w3.org/TR/css3-syntax/#SUBTOK-unicode
+			//   unicode ::= '\' [0-9a-fA-F]{1,6} wc?
+			j := 2
+			for j < len(s) && j < 7 && isHex(s[j]) {
+				j++
+			}
+			r := hexDecode(s[1:j])
+			if r > unicode.MaxRune {
+				r, j = r/16, j-1
+			}
+			n := utf8.EncodeRune(b[len(b):cap(b)], r)
+			// The optional space at the end allows a hex
+			// sequence to be followed by a literal hex.
+			// string(decodeCSS([]byte(`\A B`))) == "\nB"
+			b, s = b[:len(b)+n], skipCSSSpace(s[j:])
+		} else {
+			// `\\` decodes to `\` and `\"` to `"`.
+			_, n := utf8.DecodeRune(s[1:])
+			b, s = append(b, s[1:1+n]...), s[1+n:]
+		}
+	}
+	return b
+}
+
+// isHex reports whether the given character is a hex digit.
+func isHex(c byte) bool {
+	return '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F'
+}
+
+// hexDecode decodes a short hex digit sequence: "10" -> 16.
+func hexDecode(s []byte) rune {
+	n := '\x00'
+	for _, c := range s {
+		n <<= 4
+		switch {
+		case '0' <= c && c <= '9':
+			n |= rune(c - '0')
+		case 'a' <= c && c <= 'f':
+			n |= rune(c-'a') + 10
+		case 'A' <= c && c <= 'F':
+			n |= rune(c-'A') + 10
+		default:
+			panic(fmt.Sprintf("Bad hex digit in %q", s))
+		}
+	}
+	return n
+}
+
+// skipCSSSpace returns a suffix of c, skipping over a single space.
+func skipCSSSpace(c []byte) []byte {
+	if len(c) == 0 {
+		return c
+	}
+	// wc ::= #x9 | #xA | #xC | #xD | #x20
+	switch c[0] {
+	case '\t', '\n', '\f', ' ':
+		return c[1:]
+	case '\r':
+		// This differs from CSS3's wc production because it contains a
+		// probable spec error whereby wc contains all the single byte
+		// sequences in nl (newline) but not CRLF.
+		if len(c) >= 2 && c[1] == '\n' {
+			return c[2:]
+		}
+		return c[1:]
+	}
+	return c
+}
+
+// isCSSSpace reports whether b is a CSS space char as defined in wc.
+func isCSSSpace(b byte) bool {
+	switch b {
+	case '\t', '\n', '\f', '\r', ' ':
+		return true
+	}
+	return false
+}
+
+// cssEscaper escapes HTML and CSS special characters using \<hex>+ escapes.
+func cssEscaper(args ...interface{}) string {
+	s, _ := stringify(args...)
+	var b strings.Builder
+	r, w, written := rune(0), 0, 0
+	for i := 0; i < len(s); i += w {
+		// See comment in htmlEscaper.
+		r, w = utf8.DecodeRuneInString(s[i:])
+		var repl string
+		switch {
+		case int(r) < len(cssReplacementTable) && cssReplacementTable[r] != "":
+			repl = cssReplacementTable[r]
+		default:
+			continue
+		}
+		if written == 0 {
+			b.Grow(len(s))
+		}
+		b.WriteString(s[written:i])
+		b.WriteString(repl)
+		written = i + w
+		if repl != `\\` && (written == len(s) || isHex(s[written]) || isCSSSpace(s[written])) {
+			b.WriteByte(' ')
+		}
+	}
+	if written == 0 {
+		return s
+	}
+	b.WriteString(s[written:])
+	return b.String()
+}
+
+var cssReplacementTable = []string{
+	0:    `\0`,
+	'\t': `\9`,
+	'\n': `\a`,
+	'\f': `\c`,
+	'\r': `\d`,
+	// Encode HTML specials as hex so the output can be embedded
+	// in HTML attributes without further encoding.
+	'"':  `\22`,
+	'&':  `\26`,
+	'\'': `\27`,
+	'(':  `\28`,
+	')':  `\29`,
+	'+':  `\2b`,
+	'/':  `\2f`,
+	':':  `\3a`,
+	';':  `\3b`,
+	'<':  `\3c`,
+	'>':  `\3e`,
+	'\\': `\\`,
+	'{':  `\7b`,
+	'}':  `\7d`,
+}
+
+var expressionBytes = []byte("expression")
+var mozBindingBytes = []byte("mozbinding")
+
+// cssValueFilter allows innocuous CSS values in the output including CSS
+// quantities (10px or 25%), ID or class literals (#foo, .bar), keyword values
+// (inherit, blue), and colors (#888).
+// It filters out unsafe values, such as those that affect token boundaries,
+// and anything that might execute scripts.
+func cssValueFilter(args ...interface{}) string {
+	s, t := stringify(args...)
+	if t == contentTypeCSS {
+		return s
+	}
+	b, id := decodeCSS([]byte(s)), make([]byte, 0, 64)
+
+	// CSS3 error handling is specified as honoring string boundaries per
+	// https://www.w3.org/TR/css3-syntax/#error-handling :
+	//     Malformed declarations. User agents must handle unexpected
+	//     tokens encountered while parsing a declaration by reading until
+	//     the end of the declaration, while observing the rules for
+	//     matching pairs of (), [], {}, "", and '', and correctly handling
+	//     escapes. For example, a malformed declaration may be missing a
+	//     property, colon (:) or value.
+	// So we need to make sure that values do not have mismatched bracket
+	// or quote characters to prevent the browser from restarting parsing
+	// inside a string that might embed JavaScript source.
+	for i, c := range b {
+		switch c {
+		case 0, '"', '\'', '(', ')', '/', ';', '@', '[', '\\', ']', '`', '{', '}':
+			return filterFailsafe
+		case '-':
+			// Disallow <!-- or -->.
+			// -- should not appear in valid identifiers.
+			if i != 0 && b[i-1] == '-' {
+				return filterFailsafe
+			}
+		default:
+			if c < utf8.RuneSelf && isCSSNmchar(rune(c)) {
+				id = append(id, c)
+			}
+		}
+	}
+	id = bytes.ToLower(id)
+	if bytes.Contains(id, expressionBytes) || bytes.Contains(id, mozBindingBytes) {
+		return filterFailsafe
+	}
+	return string(b)
+}
diff --git a/internal/backport/html/template/css_test.go b/internal/backport/html/template/css_test.go
new file mode 100644
index 0000000..a735638
--- /dev/null
+++ b/internal/backport/html/template/css_test.go
@@ -0,0 +1,281 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"strconv"
+	"strings"
+	"testing"
+)
+
+func TestEndsWithCSSKeyword(t *testing.T) {
+	tests := []struct {
+		css, kw string
+		want    bool
+	}{
+		{"", "url", false},
+		{"url", "url", true},
+		{"URL", "url", true},
+		{"Url", "url", true},
+		{"url", "important", false},
+		{"important", "important", true},
+		{"image-url", "url", false},
+		{"imageurl", "url", false},
+		{"image url", "url", true},
+	}
+	for _, test := range tests {
+		got := endsWithCSSKeyword([]byte(test.css), test.kw)
+		if got != test.want {
+			t.Errorf("want %t but got %t for css=%v, kw=%v", test.want, got, test.css, test.kw)
+		}
+	}
+}
+
+func TestIsCSSNmchar(t *testing.T) {
+	tests := []struct {
+		rune rune
+		want bool
+	}{
+		{0, false},
+		{'0', true},
+		{'9', true},
+		{'A', true},
+		{'Z', true},
+		{'a', true},
+		{'z', true},
+		{'_', true},
+		{'-', true},
+		{':', false},
+		{';', false},
+		{' ', false},
+		{0x7f, false},
+		{0x80, true},
+		{0x1234, true},
+		{0xd800, false},
+		{0xdc00, false},
+		{0xfffe, false},
+		{0x10000, true},
+		{0x110000, false},
+	}
+	for _, test := range tests {
+		got := isCSSNmchar(test.rune)
+		if got != test.want {
+			t.Errorf("%q: want %t but got %t", string(test.rune), test.want, got)
+		}
+	}
+}
+
+func TestDecodeCSS(t *testing.T) {
+	tests := []struct {
+		css, want string
+	}{
+		{``, ``},
+		{`foo`, `foo`},
+		{`foo\`, `foo`},
+		{`foo\\`, `foo\`},
+		{`\`, ``},
+		{`\A`, "\n"},
+		{`\a`, "\n"},
+		{`\0a`, "\n"},
+		{`\00000a`, "\n"},
+		{`\000000a`, "\u0000a"},
+		{`\1234 5`, "\u1234" + "5"},
+		{`\1234\20 5`, "\u1234" + " 5"},
+		{`\1234\A 5`, "\u1234" + "\n5"},
+		{"\\1234\t5", "\u1234" + "5"},
+		{"\\1234\n5", "\u1234" + "5"},
+		{"\\1234\r\n5", "\u1234" + "5"},
+		{`\12345`, "\U00012345"},
+		{`\\`, `\`},
+		{`\\ `, `\ `},
+		{`\"`, `"`},
+		{`\'`, `'`},
+		{`\.`, `.`},
+		{`\. .`, `. .`},
+		{
+			`The \3c i\3equick\3c/i\3e,\d\A\3cspan style=\27 color:brown\27\3e brown\3c/span\3e  fox jumps\2028over the \3c canine class=\22lazy\22 \3e dog\3c/canine\3e`,
+			"The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>",
+		},
+	}
+	for _, test := range tests {
+		got1 := string(decodeCSS([]byte(test.css)))
+		if got1 != test.want {
+			t.Errorf("%q: want\n\t%q\nbut got\n\t%q", test.css, test.want, got1)
+		}
+		recoded := cssEscaper(got1)
+		if got2 := string(decodeCSS([]byte(recoded))); got2 != test.want {
+			t.Errorf("%q: escape & decode not dual for %q", test.css, recoded)
+		}
+	}
+}
+
+func TestHexDecode(t *testing.T) {
+	for i := 0; i < 0x200000; i += 101 /* coprime with 16 */ {
+		s := strconv.FormatInt(int64(i), 16)
+		if got := int(hexDecode([]byte(s))); got != i {
+			t.Errorf("%s: want %d but got %d", s, i, got)
+		}
+		s = strings.ToUpper(s)
+		if got := int(hexDecode([]byte(s))); got != i {
+			t.Errorf("%s: want %d but got %d", s, i, got)
+		}
+	}
+}
+
+func TestSkipCSSSpace(t *testing.T) {
+	tests := []struct {
+		css, want string
+	}{
+		{"", ""},
+		{"foo", "foo"},
+		{"\n", ""},
+		{"\r\n", ""},
+		{"\r", ""},
+		{"\t", ""},
+		{" ", ""},
+		{"\f", ""},
+		{" foo", "foo"},
+		{"  foo", " foo"},
+		{`\20`, `\20`},
+	}
+	for _, test := range tests {
+		got := string(skipCSSSpace([]byte(test.css)))
+		if got != test.want {
+			t.Errorf("%q: want %q but got %q", test.css, test.want, got)
+		}
+	}
+}
+
+func TestCSSEscaper(t *testing.T) {
+	input := ("\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f" +
+		"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
+		` !"#$%&'()*+,-./` +
+		`0123456789:;<=>?` +
+		`@ABCDEFGHIJKLMNO` +
+		`PQRSTUVWXYZ[\]^_` +
+		"`abcdefghijklmno" +
+		"pqrstuvwxyz{|}~\x7f" +
+		"\u00A0\u0100\u2028\u2029\ufeff\U0001D11E")
+
+	want := ("\\0\x01\x02\x03\x04\x05\x06\x07" +
+		"\x08\\9 \\a\x0b\\c \\d\x0E\x0F" +
+		"\x10\x11\x12\x13\x14\x15\x16\x17" +
+		"\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
+		` !\22#$%\26\27\28\29*\2b,-.\2f ` +
+		`0123456789\3a\3b\3c=\3e?` +
+		`@ABCDEFGHIJKLMNO` +
+		`PQRSTUVWXYZ[\\]^_` +
+		"`abcdefghijklmno" +
+		`pqrstuvwxyz\7b|\7d~` + "\u007f" +
+		"\u00A0\u0100\u2028\u2029\ufeff\U0001D11E")
+
+	got := cssEscaper(input)
+	if got != want {
+		t.Errorf("encode: want\n\t%q\nbut got\n\t%q", want, got)
+	}
+
+	got = string(decodeCSS([]byte(got)))
+	if input != got {
+		t.Errorf("decode: want\n\t%q\nbut got\n\t%q", input, got)
+	}
+}
+
+func TestCSSValueFilter(t *testing.T) {
+	tests := []struct {
+		css, want string
+	}{
+		{"", ""},
+		{"foo", "foo"},
+		{"0", "0"},
+		{"0px", "0px"},
+		{"-5px", "-5px"},
+		{"1.25in", "1.25in"},
+		{"+.33em", "+.33em"},
+		{"100%", "100%"},
+		{"12.5%", "12.5%"},
+		{".foo", ".foo"},
+		{"#bar", "#bar"},
+		{"corner-radius", "corner-radius"},
+		{"-moz-corner-radius", "-moz-corner-radius"},
+		{"#000", "#000"},
+		{"#48f", "#48f"},
+		{"#123456", "#123456"},
+		{"U+00-FF, U+980-9FF", "U+00-FF, U+980-9FF"},
+		{"color: red", "color: red"},
+		{"<!--", "ZgotmplZ"},
+		{"-->", "ZgotmplZ"},
+		{"<![CDATA[", "ZgotmplZ"},
+		{"]]>", "ZgotmplZ"},
+		{"</style", "ZgotmplZ"},
+		{`"`, "ZgotmplZ"},
+		{`'`, "ZgotmplZ"},
+		{"`", "ZgotmplZ"},
+		{"\x00", "ZgotmplZ"},
+		{"/* foo */", "ZgotmplZ"},
+		{"//", "ZgotmplZ"},
+		{"[href=~", "ZgotmplZ"},
+		{"expression(alert(1337))", "ZgotmplZ"},
+		{"-expression(alert(1337))", "ZgotmplZ"},
+		{"expression", "ZgotmplZ"},
+		{"Expression", "ZgotmplZ"},
+		{"EXPRESSION", "ZgotmplZ"},
+		{"-moz-binding", "ZgotmplZ"},
+		{"-expr\x00ession(alert(1337))", "ZgotmplZ"},
+		{`-expr\0ession(alert(1337))`, "ZgotmplZ"},
+		{`-express\69on(alert(1337))`, "ZgotmplZ"},
+		{`-express\69 on(alert(1337))`, "ZgotmplZ"},
+		{`-exp\72 ession(alert(1337))`, "ZgotmplZ"},
+		{`-exp\52 ession(alert(1337))`, "ZgotmplZ"},
+		{`-exp\000052 ession(alert(1337))`, "ZgotmplZ"},
+		{`-expre\0000073sion`, "-expre\x073sion"},
+		{`@import url evil.css`, "ZgotmplZ"},
+	}
+	for _, test := range tests {
+		got := cssValueFilter(test.css)
+		if got != test.want {
+			t.Errorf("%q: want %q but got %q", test.css, test.want, got)
+		}
+	}
+}
+
+func BenchmarkCSSEscaper(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		cssEscaper("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
+	}
+}
+
+func BenchmarkCSSEscaperNoSpecials(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		cssEscaper("The quick, brown fox jumps over the lazy dog.")
+	}
+}
+
+func BenchmarkDecodeCSS(b *testing.B) {
+	s := []byte(`The \3c i\3equick\3c/i\3e,\d\A\3cspan style=\27 color:brown\27\3e brown\3c/span\3e fox jumps\2028over the \3c canine class=\22lazy\22 \3edog\3c/canine\3e`)
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		decodeCSS(s)
+	}
+}
+
+func BenchmarkDecodeCSSNoSpecials(b *testing.B) {
+	s := []byte("The quick, brown fox jumps over the lazy dog.")
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		decodeCSS(s)
+	}
+}
+
+func BenchmarkCSSValueFilter(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		cssValueFilter(`  e\78preS\0Sio/**/n(alert(1337))`)
+	}
+}
+
+func BenchmarkCSSValueFilterOk(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		cssValueFilter(`Times New Roman`)
+	}
+}
diff --git a/internal/backport/html/template/delim_string.go b/internal/backport/html/template/delim_string.go
new file mode 100644
index 0000000..6d80e09
--- /dev/null
+++ b/internal/backport/html/template/delim_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type delim"; DO NOT EDIT.
+
+package template
+
+import "strconv"
+
+const _delim_name = "delimNonedelimDoubleQuotedelimSingleQuotedelimSpaceOrTagEnd"
+
+var _delim_index = [...]uint8{0, 9, 25, 41, 59}
+
+func (i delim) String() string {
+	if i >= delim(len(_delim_index)-1) {
+		return "delim(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _delim_name[_delim_index[i]:_delim_index[i+1]]
+}
diff --git a/internal/backport/html/template/doc.go b/internal/backport/html/template/doc.go
new file mode 100644
index 0000000..ed2a7e2
--- /dev/null
+++ b/internal/backport/html/template/doc.go
@@ -0,0 +1,241 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package template (html/template) implements data-driven templates for
+generating HTML output safe against code injection. It provides the
+same interface as package text/template and should be used instead of
+text/template whenever the output is HTML.
+
+The documentation here focuses on the security features of the package.
+For information about how to program the templates themselves, see the
+documentation for text/template.
+
+Introduction
+
+This package wraps package text/template so you can share its template API
+to parse and execute HTML templates safely.
+
+  tmpl, err := template.New("name").Parse(...)
+  // Error checking elided
+  err = tmpl.Execute(out, data)
+
+If successful, tmpl will now be injection-safe. Otherwise, err is an error
+defined in the docs for ErrorCode.
+
+HTML templates treat data values as plain text which should be encoded so they
+can be safely embedded in an HTML document. The escaping is contextual, so
+actions can appear within JavaScript, CSS, and URI contexts.
+
+The security model used by this package assumes that template authors are
+trusted, while Execute's data parameter is not. More details are
+provided below.
+
+Example
+
+  import "golang.org/x/website/internal/backport/text/template"
+  ...
+  t, err := template.New("foo").Parse(`{{define "T"}}Hello, {{.}}!{{end}}`)
+  err = t.ExecuteTemplate(out, "T", "<script>alert('you have been pwned')</script>")
+
+produces
+
+  Hello, <script>alert('you have been pwned')</script>!
+
+but the contextual autoescaping in html/template
+
+  import "html/template"
+  ...
+  t, err := template.New("foo").Parse(`{{define "T"}}Hello, {{.}}!{{end}}`)
+  err = t.ExecuteTemplate(out, "T", "<script>alert('you have been pwned')</script>")
+
+produces safe, escaped HTML output
+
+  Hello, &lt;script&gt;alert(&#39;you have been pwned&#39;)&lt;/script&gt;!
+
+
+Contexts
+
+This package understands HTML, CSS, JavaScript, and URIs. It adds sanitizing
+functions to each simple action pipeline, so given the excerpt
+
+  <a href="/search?q={{.}}">{{.}}</a>
+
+At parse time each {{.}} is overwritten to add escaping functions as necessary.
+In this case it becomes
+
+  <a href="/search?q={{. | urlescaper | attrescaper}}">{{. | htmlescaper}}</a>
+
+where urlescaper, attrescaper, and htmlescaper are aliases for internal escaping
+functions.
+
+For these internal escaping functions, if an action pipeline evaluates to
+a nil interface value, it is treated as though it were an empty string.
+
+Namespaced and data- attributes
+
+Attributes with a namespace are treated as if they had no namespace.
+Given the excerpt
+
+  <a my:href="{{.}}"></a>
+
+At parse time the attribute will be treated as if it were just "href".
+So at parse time the template becomes:
+
+  <a my:href="{{. | urlescaper | attrescaper}}"></a>
+
+Similarly to attributes with namespaces, attributes with a "data-" prefix are
+treated as if they had no "data-" prefix. So given
+
+  <a data-href="{{.}}"></a>
+
+At parse time this becomes
+
+  <a data-href="{{. | urlescaper | attrescaper}}"></a>
+
+If an attribute has both a namespace and a "data-" prefix, only the namespace
+will be removed when determining the context. For example
+
+  <a my:data-href="{{.}}"></a>
+
+This is handled as if "my:data-href" was just "data-href" and not "href" as
+it would be if the "data-" prefix were to be ignored too. Thus at parse
+time this becomes just
+
+  <a my:data-href="{{. | attrescaper}}"></a>
+
+As a special case, attributes with the namespace "xmlns" are always treated
+as containing URLs. Given the excerpts
+
+  <a xmlns:title="{{.}}"></a>
+  <a xmlns:href="{{.}}"></a>
+  <a xmlns:onclick="{{.}}"></a>
+
+At parse time they become:
+
+  <a xmlns:title="{{. | urlescaper | attrescaper}}"></a>
+  <a xmlns:href="{{. | urlescaper | attrescaper}}"></a>
+  <a xmlns:onclick="{{. | urlescaper | attrescaper}}"></a>
+
+Errors
+
+See the documentation of ErrorCode for details.
+
+
+A fuller picture
+
+The rest of this package comment may be skipped on first reading; it includes
+details necessary to understand escaping contexts and error messages. Most users
+will not need to understand these details.
+
+
+Contexts
+
+Assuming {{.}} is `O'Reilly: How are <i>you</i>?`, the table below shows
+how {{.}} appears when used in the context to the left.
+
+  Context                          {{.}} After
+  {{.}}                            O'Reilly: How are &lt;i&gt;you&lt;/i&gt;?
+  <a title='{{.}}'>                O&#39;Reilly: How are you?
+  <a href="/{{.}}">                O&#39;Reilly: How are %3ci%3eyou%3c/i%3e?
+  <a href="?q={{.}}">              O&#39;Reilly%3a%20How%20are%3ci%3e...%3f
+  <a onx='f("{{.}}")'>             O\x27Reilly: How are \x3ci\x3eyou...?
+  <a onx='f({{.}})'>               "O\x27Reilly: How are \x3ci\x3eyou...?"
+  <a onx='pattern = /{{.}}/;'>     O\x27Reilly: How are \x3ci\x3eyou...\x3f
+
+If used in an unsafe context, then the value might be filtered out:
+
+  Context                          {{.}} After
+  <a href="{{.}}">                 #ZgotmplZ
+
+since "O'Reilly:" is not an allowed protocol like "http:".
+
+
+If {{.}} is the innocuous word, `left`, then it can appear more widely,
+
+  Context                              {{.}} After
+  {{.}}                                left
+  <a title='{{.}}'>                    left
+  <a href='{{.}}'>                     left
+  <a href='/{{.}}'>                    left
+  <a href='?dir={{.}}'>                left
+  <a style="border-{{.}}: 4px">        left
+  <a style="align: {{.}}">             left
+  <a style="background: '{{.}}'>       left
+  <a style="background: url('{{.}}')>  left
+  <style>p.{{.}} {color:red}</style>   left
+
+Non-string values can be used in JavaScript contexts.
+If {{.}} is
+
+  struct{A,B string}{ "foo", "bar" }
+
+in the escaped template
+
+  <script>var pair = {{.}};</script>
+
+then the template output is
+
+  <script>var pair = {"A": "foo", "B": "bar"};</script>
+
+See package json to understand how non-string content is marshaled for
+embedding in JavaScript contexts.
+
+
+Typed Strings
+
+By default, this package assumes that all pipelines produce a plain text string.
+It adds escaping pipeline stages necessary to correctly and safely embed that
+plain text string in the appropriate context.
+
+When a data value is not plain text, you can make sure it is not over-escaped
+by marking it with its type.
+
+Types HTML, JS, URL, and others from content.go can carry safe content that is
+exempted from escaping.
+
+The template
+
+  Hello, {{.}}!
+
+can be invoked with
+
+  tmpl.Execute(out, template.HTML(`<b>World</b>`))
+
+to produce
+
+  Hello, <b>World</b>!
+
+instead of the
+
+  Hello, &lt;b&gt;World&lt;b&gt;!
+
+that would have been produced if {{.}} was a regular string.
+
+
+Security Model
+
+https://rawgit.com/mikesamuel/sanitized-jquery-templates/trunk/safetemplate.html#problem_definition defines "safe" as used by this package.
+
+This package assumes that template authors are trusted, that Execute's data
+parameter is not, and seeks to preserve the properties below in the face
+of untrusted data:
+
+Structure Preservation Property:
+"... when a template author writes an HTML tag in a safe templating language,
+the browser will interpret the corresponding portion of the output as a tag
+regardless of the values of untrusted data, and similarly for other structures
+such as attribute boundaries and JS and CSS string boundaries."
+
+Code Effect Property:
+"... only code specified by the template author should run as a result of
+injecting the template output into a page and all code specified by the
+template author should run as a result of the same."
+
+Least Surprise Property:
+"A developer (or code reviewer) familiar with HTML, CSS, and JavaScript, who
+knows that contextual autoescaping happens should be able to look at a {{.}}
+and correctly infer what sanitization happens."
+*/
+package template
diff --git a/internal/backport/html/template/element_string.go b/internal/backport/html/template/element_string.go
new file mode 100644
index 0000000..4573e08
--- /dev/null
+++ b/internal/backport/html/template/element_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type element"; DO NOT EDIT.
+
+package template
+
+import "strconv"
+
+const _element_name = "elementNoneelementScriptelementStyleelementTextareaelementTitle"
+
+var _element_index = [...]uint8{0, 11, 24, 36, 51, 63}
+
+func (i element) String() string {
+	if i >= element(len(_element_index)-1) {
+		return "element(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _element_name[_element_index[i]:_element_index[i+1]]
+}
diff --git a/internal/backport/html/template/error.go b/internal/backport/html/template/error.go
new file mode 100644
index 0000000..0eae11f
--- /dev/null
+++ b/internal/backport/html/template/error.go
@@ -0,0 +1,234 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"fmt"
+
+	"golang.org/x/website/internal/backport/text/template/parse"
+)
+
+// Error describes a problem encountered during template Escaping.
+type Error struct {
+	// ErrorCode describes the kind of error.
+	ErrorCode ErrorCode
+	// Node is the node that caused the problem, if known.
+	// If not nil, it overrides Name and Line.
+	Node parse.Node
+	// Name is the name of the template in which the error was encountered.
+	Name string
+	// Line is the line number of the error in the template source or 0.
+	Line int
+	// Description is a human-readable description of the problem.
+	Description string
+}
+
+// ErrorCode is a code for a kind of error.
+type ErrorCode int
+
+// We define codes for each error that manifests while escaping templates, but
+// escaped templates may also fail at runtime.
+//
+// Output: "ZgotmplZ"
+// Example:
+//   <img src="{{.X}}">
+//   where {{.X}} evaluates to `javascript:...`
+// Discussion:
+//   "ZgotmplZ" is a special value that indicates that unsafe content reached a
+//   CSS or URL context at runtime. The output of the example will be
+//     <img src="#ZgotmplZ">
+//   If the data comes from a trusted source, use content types to exempt it
+//   from filtering: URL(`javascript:...`).
+const (
+	// OK indicates the lack of an error.
+	OK ErrorCode = iota
+
+	// ErrAmbigContext: "... appears in an ambiguous context within a URL"
+	// Example:
+	//   <a href="
+	//      {{if .C}}
+	//        /path/
+	//      {{else}}
+	//        /search?q=
+	//      {{end}}
+	//      {{.X}}
+	//   ">
+	// Discussion:
+	//   {{.X}} is in an ambiguous URL context since, depending on {{.C}},
+	//  it may be either a URL suffix or a query parameter.
+	//   Moving {{.X}} into the condition removes the ambiguity:
+	//   <a href="{{if .C}}/path/{{.X}}{{else}}/search?q={{.X}}">
+	ErrAmbigContext
+
+	// ErrBadHTML: "expected space, attr name, or end of tag, but got ...",
+	//   "... in unquoted attr", "... in attribute name"
+	// Example:
+	//   <a href = /search?q=foo>
+	//   <href=foo>
+	//   <form na<e=...>
+	//   <option selected<
+	// Discussion:
+	//   This is often due to a typo in an HTML element, but some runes
+	//   are banned in tag names, attribute names, and unquoted attribute
+	//   values because they can tickle parser ambiguities.
+	//   Quoting all attributes is the best policy.
+	ErrBadHTML
+
+	// ErrBranchEnd: "{{if}} branches end in different contexts"
+	// Example:
+	//   {{if .C}}<a href="{{end}}{{.X}}
+	// Discussion:
+	//   Package html/template statically examines each path through an
+	//   {{if}}, {{range}}, or {{with}} to escape any following pipelines.
+	//   The example is ambiguous since {{.X}} might be an HTML text node,
+	//   or a URL prefix in an HTML attribute. The context of {{.X}} is
+	//   used to figure out how to escape it, but that context depends on
+	//   the run-time value of {{.C}} which is not statically known.
+	//
+	//   The problem is usually something like missing quotes or angle
+	//   brackets, or can be avoided by refactoring to put the two contexts
+	//   into different branches of an if, range or with. If the problem
+	//   is in a {{range}} over a collection that should never be empty,
+	//   adding a dummy {{else}} can help.
+	ErrBranchEnd
+
+	// ErrEndContext: "... ends in a non-text context: ..."
+	// Examples:
+	//   <div
+	//   <div title="no close quote>
+	//   <script>f()
+	// Discussion:
+	//   Executed templates should produce a DocumentFragment of HTML.
+	//   Templates that end without closing tags will trigger this error.
+	//   Templates that should not be used in an HTML context or that
+	//   produce incomplete Fragments should not be executed directly.
+	//
+	//   {{define "main"}} <script>{{template "helper"}}</script> {{end}}
+	//   {{define "helper"}} document.write(' <div title=" ') {{end}}
+	//
+	//   "helper" does not produce a valid document fragment, so should
+	//   not be Executed directly.
+	ErrEndContext
+
+	// ErrNoSuchTemplate: "no such template ..."
+	// Examples:
+	//   {{define "main"}}<div {{template "attrs"}}>{{end}}
+	//   {{define "attrs"}}href="{{.URL}}"{{end}}
+	// Discussion:
+	//   Package html/template looks through template calls to compute the
+	//   context.
+	//   Here the {{.URL}} in "attrs" must be treated as a URL when called
+	//   from "main", but you will get this error if "attrs" is not defined
+	//   when "main" is parsed.
+	ErrNoSuchTemplate
+
+	// ErrOutputContext: "cannot compute output context for template ..."
+	// Examples:
+	//   {{define "t"}}{{if .T}}{{template "t" .T}}{{end}}{{.H}}",{{end}}
+	// Discussion:
+	//   A recursive template does not end in the same context in which it
+	//   starts, and a reliable output context cannot be computed.
+	//   Look for typos in the named template.
+	//   If the template should not be called in the named start context,
+	//   look for calls to that template in unexpected contexts.
+	//   Maybe refactor recursive templates to not be recursive.
+	ErrOutputContext
+
+	// ErrPartialCharset: "unfinished JS regexp charset in ..."
+	// Example:
+	//     <script>var pattern = /foo[{{.Chars}}]/</script>
+	// Discussion:
+	//   Package html/template does not support interpolation into regular
+	//   expression literal character sets.
+	ErrPartialCharset
+
+	// ErrPartialEscape: "unfinished escape sequence in ..."
+	// Example:
+	//   <script>alert("\{{.X}}")</script>
+	// Discussion:
+	//   Package html/template does not support actions following a
+	//   backslash.
+	//   This is usually an error and there are better solutions; for
+	//   example
+	//     <script>alert("{{.X}}")</script>
+	//   should work, and if {{.X}} is a partial escape sequence such as
+	//   "xA0", mark the whole sequence as safe content: JSStr(`\xA0`)
+	ErrPartialEscape
+
+	// ErrRangeLoopReentry: "on range loop re-entry: ..."
+	// Example:
+	//   <script>var x = [{{range .}}'{{.}},{{end}}]</script>
+	// Discussion:
+	//   If an iteration through a range would cause it to end in a
+	//   different context than an earlier pass, there is no single context.
+	//   In the example, there is missing a quote, so it is not clear
+	//   whether {{.}} is meant to be inside a JS string or in a JS value
+	//   context. The second iteration would produce something like
+	//
+	//     <script>var x = ['firstValue,'secondValue]</script>
+	ErrRangeLoopReentry
+
+	// ErrSlashAmbig: '/' could start a division or regexp.
+	// Example:
+	//   <script>
+	//     {{if .C}}var x = 1{{end}}
+	//     /-{{.N}}/i.test(x) ? doThis : doThat();
+	//   </script>
+	// Discussion:
+	//   The example above could produce `var x = 1/-2/i.test(s)...`
+	//   in which the first '/' is a mathematical division operator or it
+	//   could produce `/-2/i.test(s)` in which the first '/' starts a
+	//   regexp literal.
+	//   Look for missing semicolons inside branches, and maybe add
+	//   parentheses to make it clear which interpretation you intend.
+	ErrSlashAmbig
+
+	// ErrPredefinedEscaper: "predefined escaper ... disallowed in template"
+	// Example:
+	//   <div class={{. | html}}>Hello<div>
+	// Discussion:
+	//   Package html/template already contextually escapes all pipelines to
+	//   produce HTML output safe against code injection. Manually escaping
+	//   pipeline output using the predefined escapers "html" or "urlquery" is
+	//   unnecessary, and may affect the correctness or safety of the escaped
+	//   pipeline output in Go 1.8 and earlier.
+	//
+	//   In most cases, such as the given example, this error can be resolved by
+	//   simply removing the predefined escaper from the pipeline and letting the
+	//   contextual autoescaper handle the escaping of the pipeline. In other
+	//   instances, where the predefined escaper occurs in the middle of a
+	//   pipeline where subsequent commands expect escaped input, e.g.
+	//     {{.X | html | makeALink}}
+	//   where makeALink does
+	//     return `<a href="`+input+`">link</a>`
+	//   consider refactoring the surrounding template to make use of the
+	//   contextual autoescaper, i.e.
+	//     <a href="{{.X}}">link</a>
+	//
+	//   To ease migration to Go 1.9 and beyond, "html" and "urlquery" will
+	//   continue to be allowed as the last command in a pipeline. However, if the
+	//   pipeline occurs in an unquoted attribute value context, "html" is
+	//   disallowed. Avoid using "html" and "urlquery" entirely in new templates.
+	ErrPredefinedEscaper
+)
+
+func (e *Error) Error() string {
+	switch {
+	case e.Node != nil:
+		loc, _ := (*parse.Tree)(nil).ErrorContext(e.Node)
+		return fmt.Sprintf("html/template:%s: %s", loc, e.Description)
+	case e.Line != 0:
+		return fmt.Sprintf("html/template:%s:%d: %s", e.Name, e.Line, e.Description)
+	case e.Name != "":
+		return fmt.Sprintf("html/template:%s: %s", e.Name, e.Description)
+	}
+	return "html/template: " + e.Description
+}
+
+// errorf creates an error given a format string f and args.
+// The template Name still needs to be supplied.
+func errorf(k ErrorCode, node parse.Node, line int, f string, args ...interface{}) *Error {
+	return &Error{k, node, "", line, fmt.Sprintf(f, args...)}
+}
diff --git a/internal/backport/html/template/escape.go b/internal/backport/html/template/escape.go
new file mode 100644
index 0000000..dfdfea5
--- /dev/null
+++ b/internal/backport/html/template/escape.go
@@ -0,0 +1,962 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"bytes"
+	"fmt"
+	"html"
+	"io"
+
+	"golang.org/x/website/internal/backport/text/template"
+	"golang.org/x/website/internal/backport/text/template/parse"
+)
+
+// escapeTemplate rewrites the named template, which must be
+// associated with t, to guarantee that the output of any of the named
+// templates is properly escaped. If no error is returned, then the named templates have
+// been modified. Otherwise the named templates have been rendered
+// unusable.
+func escapeTemplate(tmpl *Template, node parse.Node, name string) error {
+	c, _ := tmpl.esc.escapeTree(context{}, node, name, 0)
+	var err error
+	if c.err != nil {
+		err, c.err.Name = c.err, name
+	} else if c.state != stateText {
+		err = &Error{ErrEndContext, nil, name, 0, fmt.Sprintf("ends in a non-text context: %v", c)}
+	}
+	if err != nil {
+		// Prevent execution of unsafe templates.
+		if t := tmpl.set[name]; t != nil {
+			t.escapeErr = err
+			t.text.Tree = nil
+			t.Tree = nil
+		}
+		return err
+	}
+	tmpl.esc.commit()
+	if t := tmpl.set[name]; t != nil {
+		t.escapeErr = escapeOK
+		t.Tree = t.text.Tree
+	}
+	return nil
+}
+
+// evalArgs formats the list of arguments into a string. It is equivalent to
+// fmt.Sprint(args...), except that it deferences all pointers.
+func evalArgs(args ...interface{}) string {
+	// Optimization for simple common case of a single string argument.
+	if len(args) == 1 {
+		if s, ok := args[0].(string); ok {
+			return s
+		}
+	}
+	for i, arg := range args {
+		args[i] = indirectToStringerOrError(arg)
+	}
+	return fmt.Sprint(args...)
+}
+
+// funcMap maps command names to functions that render their inputs safe.
+var funcMap = template.FuncMap{
+	"_html_template_attrescaper":     attrEscaper,
+	"_html_template_commentescaper":  commentEscaper,
+	"_html_template_cssescaper":      cssEscaper,
+	"_html_template_cssvaluefilter":  cssValueFilter,
+	"_html_template_htmlnamefilter":  htmlNameFilter,
+	"_html_template_htmlescaper":     htmlEscaper,
+	"_html_template_jsregexpescaper": jsRegexpEscaper,
+	"_html_template_jsstrescaper":    jsStrEscaper,
+	"_html_template_jsvalescaper":    jsValEscaper,
+	"_html_template_nospaceescaper":  htmlNospaceEscaper,
+	"_html_template_rcdataescaper":   rcdataEscaper,
+	"_html_template_srcsetescaper":   srcsetFilterAndEscaper,
+	"_html_template_urlescaper":      urlEscaper,
+	"_html_template_urlfilter":       urlFilter,
+	"_html_template_urlnormalizer":   urlNormalizer,
+	"_eval_args_":                    evalArgs,
+}
+
+// escaper collects type inferences about templates and changes needed to make
+// templates injection safe.
+type escaper struct {
+	// ns is the nameSpace that this escaper is associated with.
+	ns *nameSpace
+	// output[templateName] is the output context for a templateName that
+	// has been mangled to include its input context.
+	output map[string]context
+	// derived[c.mangle(name)] maps to a template derived from the template
+	// named name templateName for the start context c.
+	derived map[string]*template.Template
+	// called[templateName] is a set of called mangled template names.
+	called map[string]bool
+	// xxxNodeEdits are the accumulated edits to apply during commit.
+	// Such edits are not applied immediately in case a template set
+	// executes a given template in different escaping contexts.
+	actionNodeEdits   map[*parse.ActionNode][]string
+	templateNodeEdits map[*parse.TemplateNode]string
+	textNodeEdits     map[*parse.TextNode][]byte
+	// rangeContext holds context about the current range loop.
+	rangeContext *rangeContext
+}
+
+// rangeContext holds information about the current range loop.
+type rangeContext struct {
+	outer     *rangeContext // outer loop
+	breaks    []context     // context at each break action
+	continues []context     // context at each continue action
+}
+
+// makeEscaper creates a blank escaper for the given set.
+func makeEscaper(n *nameSpace) escaper {
+	return escaper{
+		n,
+		map[string]context{},
+		map[string]*template.Template{},
+		map[string]bool{},
+		map[*parse.ActionNode][]string{},
+		map[*parse.TemplateNode]string{},
+		map[*parse.TextNode][]byte{},
+		nil,
+	}
+}
+
+// filterFailsafe is an innocuous word that is emitted in place of unsafe values
+// by sanitizer functions. It is not a keyword in any programming language,
+// contains no special characters, is not empty, and when it appears in output
+// it is distinct enough that a developer can find the source of the problem
+// via a search engine.
+const filterFailsafe = "ZgotmplZ"
+
+// escape escapes a template node.
+func (e *escaper) escape(c context, n parse.Node) context {
+	switch n := n.(type) {
+	case *parse.ActionNode:
+		return e.escapeAction(c, n)
+	case *parse.BreakNode:
+		c.n = n
+		e.rangeContext.breaks = append(e.rangeContext.breaks, c)
+		return context{state: stateDead}
+	case *parse.CommentNode:
+		return c
+	case *parse.ContinueNode:
+		c.n = n
+		e.rangeContext.continues = append(e.rangeContext.breaks, c)
+		return context{state: stateDead}
+	case *parse.IfNode:
+		return e.escapeBranch(c, &n.BranchNode, "if")
+	case *parse.ListNode:
+		return e.escapeList(c, n)
+	case *parse.RangeNode:
+		return e.escapeBranch(c, &n.BranchNode, "range")
+	case *parse.TemplateNode:
+		return e.escapeTemplate(c, n)
+	case *parse.TextNode:
+		return e.escapeText(c, n)
+	case *parse.WithNode:
+		return e.escapeBranch(c, &n.BranchNode, "with")
+	}
+	panic("escaping " + n.String() + " is unimplemented")
+}
+
+// escapeAction escapes an action template node.
+func (e *escaper) escapeAction(c context, n *parse.ActionNode) context {
+	if len(n.Pipe.Decl) != 0 {
+		// A local variable assignment, not an interpolation.
+		return c
+	}
+	c = nudge(c)
+	// Check for disallowed use of predefined escapers in the pipeline.
+	for pos, idNode := range n.Pipe.Cmds {
+		node, ok := idNode.Args[0].(*parse.IdentifierNode)
+		if !ok {
+			// A predefined escaper "esc" will never be found as an identifier in a
+			// Chain or Field node, since:
+			// - "esc.x ..." is invalid, since predefined escapers return strings, and
+			//   strings do not have methods, keys or fields.
+			// - "... .esc" is invalid, since predefined escapers are global functions,
+			//   not methods or fields of any types.
+			// Therefore, it is safe to ignore these two node types.
+			continue
+		}
+		ident := node.Ident
+		if _, ok := predefinedEscapers[ident]; ok {
+			if pos < len(n.Pipe.Cmds)-1 ||
+				c.state == stateAttr && c.delim == delimSpaceOrTagEnd && ident == "html" {
+				return context{
+					state: stateError,
+					err:   errorf(ErrPredefinedEscaper, n, n.Line, "predefined escaper %q disallowed in template", ident),
+				}
+			}
+		}
+	}
+	s := make([]string, 0, 3)
+	switch c.state {
+	case stateError:
+		return c
+	case stateURL, stateCSSDqStr, stateCSSSqStr, stateCSSDqURL, stateCSSSqURL, stateCSSURL:
+		switch c.urlPart {
+		case urlPartNone:
+			s = append(s, "_html_template_urlfilter")
+			fallthrough
+		case urlPartPreQuery:
+			switch c.state {
+			case stateCSSDqStr, stateCSSSqStr:
+				s = append(s, "_html_template_cssescaper")
+			default:
+				s = append(s, "_html_template_urlnormalizer")
+			}
+		case urlPartQueryOrFrag:
+			s = append(s, "_html_template_urlescaper")
+		case urlPartUnknown:
+			return context{
+				state: stateError,
+				err:   errorf(ErrAmbigContext, n, n.Line, "%s appears in an ambiguous context within a URL", n),
+			}
+		default:
+			panic(c.urlPart.String())
+		}
+	case stateJS:
+		s = append(s, "_html_template_jsvalescaper")
+		// A slash after a value starts a div operator.
+		c.jsCtx = jsCtxDivOp
+	case stateJSDqStr, stateJSSqStr:
+		s = append(s, "_html_template_jsstrescaper")
+	case stateJSRegexp:
+		s = append(s, "_html_template_jsregexpescaper")
+	case stateCSS:
+		s = append(s, "_html_template_cssvaluefilter")
+	case stateText:
+		s = append(s, "_html_template_htmlescaper")
+	case stateRCDATA:
+		s = append(s, "_html_template_rcdataescaper")
+	case stateAttr:
+		// Handled below in delim check.
+	case stateAttrName, stateTag:
+		c.state = stateAttrName
+		s = append(s, "_html_template_htmlnamefilter")
+	case stateSrcset:
+		s = append(s, "_html_template_srcsetescaper")
+	default:
+		if isComment(c.state) {
+			s = append(s, "_html_template_commentescaper")
+		} else {
+			panic("unexpected state " + c.state.String())
+		}
+	}
+	switch c.delim {
+	case delimNone:
+		// No extra-escaping needed for raw text content.
+	case delimSpaceOrTagEnd:
+		s = append(s, "_html_template_nospaceescaper")
+	default:
+		s = append(s, "_html_template_attrescaper")
+	}
+	e.editActionNode(n, s)
+	return c
+}
+
+// ensurePipelineContains ensures that the pipeline ends with the commands with
+// the identifiers in s in order. If the pipeline ends with a predefined escaper
+// (i.e. "html" or "urlquery"), merge it with the identifiers in s.
+func ensurePipelineContains(p *parse.PipeNode, s []string) {
+	if len(s) == 0 {
+		// Do not rewrite pipeline if we have no escapers to insert.
+		return
+	}
+	// Precondition: p.Cmds contains at most one predefined escaper and the
+	// escaper will be present at p.Cmds[len(p.Cmds)-1]. This precondition is
+	// always true because of the checks in escapeAction.
+	pipelineLen := len(p.Cmds)
+	if pipelineLen > 0 {
+		lastCmd := p.Cmds[pipelineLen-1]
+		if idNode, ok := lastCmd.Args[0].(*parse.IdentifierNode); ok {
+			if esc := idNode.Ident; predefinedEscapers[esc] {
+				// Pipeline ends with a predefined escaper.
+				if len(p.Cmds) == 1 && len(lastCmd.Args) > 1 {
+					// Special case: pipeline is of the form {{ esc arg1 arg2 ... argN }},
+					// where esc is the predefined escaper, and arg1...argN are its arguments.
+					// Convert this into the equivalent form
+					// {{ _eval_args_ arg1 arg2 ... argN | esc }}, so that esc can be easily
+					// merged with the escapers in s.
+					lastCmd.Args[0] = parse.NewIdentifier("_eval_args_").SetTree(nil).SetPos(lastCmd.Args[0].Position())
+					p.Cmds = appendCmd(p.Cmds, newIdentCmd(esc, p.Position()))
+					pipelineLen++
+				}
+				// If any of the commands in s that we are about to insert is equivalent
+				// to the predefined escaper, use the predefined escaper instead.
+				dup := false
+				for i, escaper := range s {
+					if escFnsEq(esc, escaper) {
+						s[i] = idNode.Ident
+						dup = true
+					}
+				}
+				if dup {
+					// The predefined escaper will already be inserted along with the
+					// escapers in s, so do not copy it to the rewritten pipeline.
+					pipelineLen--
+				}
+			}
+		}
+	}
+	// Rewrite the pipeline, creating the escapers in s at the end of the pipeline.
+	newCmds := make([]*parse.CommandNode, pipelineLen, pipelineLen+len(s))
+	insertedIdents := make(map[string]bool)
+	for i := 0; i < pipelineLen; i++ {
+		cmd := p.Cmds[i]
+		newCmds[i] = cmd
+		if idNode, ok := cmd.Args[0].(*parse.IdentifierNode); ok {
+			insertedIdents[normalizeEscFn(idNode.Ident)] = true
+		}
+	}
+	for _, name := range s {
+		if !insertedIdents[normalizeEscFn(name)] {
+			// When two templates share an underlying parse tree via the use of
+			// AddParseTree and one template is executed after the other, this check
+			// ensures that escapers that were already inserted into the pipeline on
+			// the first escaping pass do not get inserted again.
+			newCmds = appendCmd(newCmds, newIdentCmd(name, p.Position()))
+		}
+	}
+	p.Cmds = newCmds
+}
+
+// predefinedEscapers contains template predefined escapers that are equivalent
+// to some contextual escapers. Keep in sync with equivEscapers.
+var predefinedEscapers = map[string]bool{
+	"html":     true,
+	"urlquery": true,
+}
+
+// equivEscapers matches contextual escapers to equivalent predefined
+// template escapers.
+var equivEscapers = map[string]string{
+	// The following pairs of HTML escapers provide equivalent security
+	// guarantees, since they all escape '\000', '\'', '"', '&', '<', and '>'.
+	"_html_template_attrescaper":   "html",
+	"_html_template_htmlescaper":   "html",
+	"_html_template_rcdataescaper": "html",
+	// These two URL escapers produce URLs safe for embedding in a URL query by
+	// percent-encoding all the reserved characters specified in RFC 3986 Section
+	// 2.2
+	"_html_template_urlescaper": "urlquery",
+	// These two functions are not actually equivalent; urlquery is stricter as it
+	// escapes reserved characters (e.g. '#'), while _html_template_urlnormalizer
+	// does not. It is therefore only safe to replace _html_template_urlnormalizer
+	// with urlquery (this happens in ensurePipelineContains), but not the otherI've
+	// way around. We keep this entry around to preserve the behavior of templates
+	// written before Go 1.9, which might depend on this substitution taking place.
+	"_html_template_urlnormalizer": "urlquery",
+}
+
+// escFnsEq reports whether the two escaping functions are equivalent.
+func escFnsEq(a, b string) bool {
+	return normalizeEscFn(a) == normalizeEscFn(b)
+}
+
+// normalizeEscFn(a) is equal to normalizeEscFn(b) for any pair of names of
+// escaper functions a and b that are equivalent.
+func normalizeEscFn(e string) string {
+	if norm := equivEscapers[e]; norm != "" {
+		return norm
+	}
+	return e
+}
+
+// redundantFuncs[a][b] implies that funcMap[b](funcMap[a](x)) == funcMap[a](x)
+// for all x.
+var redundantFuncs = map[string]map[string]bool{
+	"_html_template_commentescaper": {
+		"_html_template_attrescaper":    true,
+		"_html_template_nospaceescaper": true,
+		"_html_template_htmlescaper":    true,
+	},
+	"_html_template_cssescaper": {
+		"_html_template_attrescaper": true,
+	},
+	"_html_template_jsregexpescaper": {
+		"_html_template_attrescaper": true,
+	},
+	"_html_template_jsstrescaper": {
+		"_html_template_attrescaper": true,
+	},
+	"_html_template_urlescaper": {
+		"_html_template_urlnormalizer": true,
+	},
+}
+
+// appendCmd appends the given command to the end of the command pipeline
+// unless it is redundant with the last command.
+func appendCmd(cmds []*parse.CommandNode, cmd *parse.CommandNode) []*parse.CommandNode {
+	if n := len(cmds); n != 0 {
+		last, okLast := cmds[n-1].Args[0].(*parse.IdentifierNode)
+		next, okNext := cmd.Args[0].(*parse.IdentifierNode)
+		if okLast && okNext && redundantFuncs[last.Ident][next.Ident] {
+			return cmds
+		}
+	}
+	return append(cmds, cmd)
+}
+
+// newIdentCmd produces a command containing a single identifier node.
+func newIdentCmd(identifier string, pos parse.Pos) *parse.CommandNode {
+	return &parse.CommandNode{
+		NodeType: parse.NodeCommand,
+		Args:     []parse.Node{parse.NewIdentifier(identifier).SetTree(nil).SetPos(pos)}, // TODO: SetTree.
+	}
+}
+
+// nudge returns the context that would result from following empty string
+// transitions from the input context.
+// For example, parsing:
+//     `<a href=`
+// will end in context{stateBeforeValue, attrURL}, but parsing one extra rune:
+//     `<a href=x`
+// will end in context{stateURL, delimSpaceOrTagEnd, ...}.
+// There are two transitions that happen when the 'x' is seen:
+// (1) Transition from a before-value state to a start-of-value state without
+//     consuming any character.
+// (2) Consume 'x' and transition past the first value character.
+// In this case, nudging produces the context after (1) happens.
+func nudge(c context) context {
+	switch c.state {
+	case stateTag:
+		// In `<foo {{.}}`, the action should emit an attribute.
+		c.state = stateAttrName
+	case stateBeforeValue:
+		// In `<foo bar={{.}}`, the action is an undelimited value.
+		c.state, c.delim, c.attr = attrStartStates[c.attr], delimSpaceOrTagEnd, attrNone
+	case stateAfterName:
+		// In `<foo bar {{.}}`, the action is an attribute name.
+		c.state, c.attr = stateAttrName, attrNone
+	}
+	return c
+}
+
+// join joins the two contexts of a branch template node. The result is an
+// error context if either of the input contexts are error contexts, or if the
+// input contexts differ.
+func join(a, b context, node parse.Node, nodeName string) context {
+	if a.state == stateError {
+		return a
+	}
+	if b.state == stateError {
+		return b
+	}
+	if a.state == stateDead {
+		return b
+	}
+	if b.state == stateDead {
+		return a
+	}
+	if a.eq(b) {
+		return a
+	}
+
+	c := a
+	c.urlPart = b.urlPart
+	if c.eq(b) {
+		// The contexts differ only by urlPart.
+		c.urlPart = urlPartUnknown
+		return c
+	}
+
+	c = a
+	c.jsCtx = b.jsCtx
+	if c.eq(b) {
+		// The contexts differ only by jsCtx.
+		c.jsCtx = jsCtxUnknown
+		return c
+	}
+
+	// Allow a nudged context to join with an unnudged one.
+	// This means that
+	//   <p title={{if .C}}{{.}}{{end}}
+	// ends in an unquoted value state even though the else branch
+	// ends in stateBeforeValue.
+	if c, d := nudge(a), nudge(b); !(c.eq(a) && d.eq(b)) {
+		if e := join(c, d, node, nodeName); e.state != stateError {
+			return e
+		}
+	}
+
+	return context{
+		state: stateError,
+		err:   errorf(ErrBranchEnd, node, 0, "{{%s}} branches end in different contexts: %v, %v", nodeName, a, b),
+	}
+}
+
+// escapeBranch escapes a branch template node: "if", "range" and "with".
+func (e *escaper) escapeBranch(c context, n *parse.BranchNode, nodeName string) context {
+	if nodeName == "range" {
+		e.rangeContext = &rangeContext{outer: e.rangeContext}
+	}
+	c0 := e.escapeList(c, n.List)
+	if nodeName == "range" {
+		if c0.state != stateError {
+			c0 = joinRange(c0, e.rangeContext)
+		}
+		e.rangeContext = e.rangeContext.outer
+		if c0.state == stateError {
+			return c0
+		}
+
+		// The "true" branch of a "range" node can execute multiple times.
+		// We check that executing n.List once results in the same context
+		// as executing n.List twice.
+		e.rangeContext = &rangeContext{outer: e.rangeContext}
+		c1, _ := e.escapeListConditionally(c0, n.List, nil)
+		c0 = join(c0, c1, n, nodeName)
+		if c0.state == stateError {
+			e.rangeContext = e.rangeContext.outer
+			// Make clear that this is a problem on loop re-entry
+			// since developers tend to overlook that branch when
+			// debugging templates.
+			c0.err.Line = n.Line
+			c0.err.Description = "on range loop re-entry: " + c0.err.Description
+			return c0
+		}
+		c0 = joinRange(c0, e.rangeContext)
+		e.rangeContext = e.rangeContext.outer
+		if c0.state == stateError {
+			return c0
+		}
+	}
+	c1 := e.escapeList(c, n.ElseList)
+	return join(c0, c1, n, nodeName)
+}
+
+func joinRange(c0 context, rc *rangeContext) context {
+	// Merge contexts at break and continue statements into overall body context.
+	// In theory we could treat breaks differently from continues, but for now it is
+	// enough to treat them both as going back to the start of the loop (which may then stop).
+	for _, c := range rc.breaks {
+		c0 = join(c0, c, c.n, "range")
+		if c0.state == stateError {
+			c0.err.Line = c.n.(*parse.BreakNode).Line
+			c0.err.Description = "at range loop break: " + c0.err.Description
+			return c0
+		}
+	}
+	for _, c := range rc.continues {
+		c0 = join(c0, c, c.n, "range")
+		if c0.state == stateError {
+			c0.err.Line = c.n.(*parse.ContinueNode).Line
+			c0.err.Description = "at range loop continue: " + c0.err.Description
+			return c0
+		}
+	}
+	return c0
+}
+
+// escapeList escapes a list template node.
+func (e *escaper) escapeList(c context, n *parse.ListNode) context {
+	if n == nil {
+		return c
+	}
+	for _, m := range n.Nodes {
+		c = e.escape(c, m)
+		if c.state == stateDead {
+			break
+		}
+	}
+	return c
+}
+
+// escapeListConditionally escapes a list node but only preserves edits and
+// inferences in e if the inferences and output context satisfy filter.
+// It returns the best guess at an output context, and the result of the filter
+// which is the same as whether e was updated.
+func (e *escaper) escapeListConditionally(c context, n *parse.ListNode, filter func(*escaper, context) bool) (context, bool) {
+	e1 := makeEscaper(e.ns)
+	e1.rangeContext = e.rangeContext
+	// Make type inferences available to f.
+	for k, v := range e.output {
+		e1.output[k] = v
+	}
+	c = e1.escapeList(c, n)
+	ok := filter != nil && filter(&e1, c)
+	if ok {
+		// Copy inferences and edits from e1 back into e.
+		for k, v := range e1.output {
+			e.output[k] = v
+		}
+		for k, v := range e1.derived {
+			e.derived[k] = v
+		}
+		for k, v := range e1.called {
+			e.called[k] = v
+		}
+		for k, v := range e1.actionNodeEdits {
+			e.editActionNode(k, v)
+		}
+		for k, v := range e1.templateNodeEdits {
+			e.editTemplateNode(k, v)
+		}
+		for k, v := range e1.textNodeEdits {
+			e.editTextNode(k, v)
+		}
+	}
+	return c, ok
+}
+
+// escapeTemplate escapes a {{template}} call node.
+func (e *escaper) escapeTemplate(c context, n *parse.TemplateNode) context {
+	c, name := e.escapeTree(c, n, n.Name, n.Line)
+	if name != n.Name {
+		e.editTemplateNode(n, name)
+	}
+	return c
+}
+
+// escapeTree escapes the named template starting in the given context as
+// necessary and returns its output context.
+func (e *escaper) escapeTree(c context, node parse.Node, name string, line int) (context, string) {
+	// Mangle the template name with the input context to produce a reliable
+	// identifier.
+	dname := c.mangle(name)
+	e.called[dname] = true
+	if out, ok := e.output[dname]; ok {
+		// Already escaped.
+		return out, dname
+	}
+	t := e.template(name)
+	if t == nil {
+		// Two cases: The template exists but is empty, or has never been mentioned at
+		// all. Distinguish the cases in the error messages.
+		if e.ns.set[name] != nil {
+			return context{
+				state: stateError,
+				err:   errorf(ErrNoSuchTemplate, node, line, "%q is an incomplete or empty template", name),
+			}, dname
+		}
+		return context{
+			state: stateError,
+			err:   errorf(ErrNoSuchTemplate, node, line, "no such template %q", name),
+		}, dname
+	}
+	if dname != name {
+		// Use any template derived during an earlier call to escapeTemplate
+		// with different top level templates, or clone if necessary.
+		dt := e.template(dname)
+		if dt == nil {
+			dt = template.New(dname)
+			dt.Tree = &parse.Tree{Name: dname, Root: t.Root.CopyList()}
+			e.derived[dname] = dt
+		}
+		t = dt
+	}
+	return e.computeOutCtx(c, t), dname
+}
+
+// computeOutCtx takes a template and its start context and computes the output
+// context while storing any inferences in e.
+func (e *escaper) computeOutCtx(c context, t *template.Template) context {
+	// Propagate context over the body.
+	c1, ok := e.escapeTemplateBody(c, t)
+	if !ok {
+		// Look for a fixed point by assuming c1 as the output context.
+		if c2, ok2 := e.escapeTemplateBody(c1, t); ok2 {
+			c1, ok = c2, true
+		}
+		// Use c1 as the error context if neither assumption worked.
+	}
+	if !ok && c1.state != stateError {
+		return context{
+			state: stateError,
+			err:   errorf(ErrOutputContext, t.Tree.Root, 0, "cannot compute output context for template %s", t.Name()),
+		}
+	}
+	return c1
+}
+
+// escapeTemplateBody escapes the given template assuming the given output
+// context, and returns the best guess at the output context and whether the
+// assumption was correct.
+func (e *escaper) escapeTemplateBody(c context, t *template.Template) (context, bool) {
+	filter := func(e1 *escaper, c1 context) bool {
+		if c1.state == stateError {
+			// Do not update the input escaper, e.
+			return false
+		}
+		if !e1.called[t.Name()] {
+			// If t is not recursively called, then c1 is an
+			// accurate output context.
+			return true
+		}
+		// c1 is accurate if it matches our assumed output context.
+		return c.eq(c1)
+	}
+	// We need to assume an output context so that recursive template calls
+	// take the fast path out of escapeTree instead of infinitely recursing.
+	// Naively assuming that the input context is the same as the output
+	// works >90% of the time.
+	e.output[t.Name()] = c
+	return e.escapeListConditionally(c, t.Tree.Root, filter)
+}
+
+// delimEnds maps each delim to a string of characters that terminate it.
+var delimEnds = [...]string{
+	delimDoubleQuote: `"`,
+	delimSingleQuote: "'",
+	// Determined empirically by running the below in various browsers.
+	// var div = document.createElement("DIV");
+	// for (var i = 0; i < 0x10000; ++i) {
+	//   div.innerHTML = "<span title=x" + String.fromCharCode(i) + "-bar>";
+	//   if (div.getElementsByTagName("SPAN")[0].title.indexOf("bar") < 0)
+	//     document.write("<p>U+" + i.toString(16));
+	// }
+	delimSpaceOrTagEnd: " \t\n\f\r>",
+}
+
+var doctypeBytes = []byte("<!DOCTYPE")
+
+// escapeText escapes a text template node.
+func (e *escaper) escapeText(c context, n *parse.TextNode) context {
+	s, written, i, b := n.Text, 0, 0, new(bytes.Buffer)
+	for i != len(s) {
+		c1, nread := contextAfterText(c, s[i:])
+		i1 := i + nread
+		if c.state == stateText || c.state == stateRCDATA {
+			end := i1
+			if c1.state != c.state {
+				for j := end - 1; j >= i; j-- {
+					if s[j] == '<' {
+						end = j
+						break
+					}
+				}
+			}
+			for j := i; j < end; j++ {
+				if s[j] == '<' && !bytes.HasPrefix(bytes.ToUpper(s[j:]), doctypeBytes) {
+					b.Write(s[written:j])
+					b.WriteString("&lt;")
+					written = j + 1
+				}
+			}
+		} else if isComment(c.state) && c.delim == delimNone {
+			switch c.state {
+			case stateJSBlockCmt:
+				// https://es5.github.com/#x7.4:
+				// "Comments behave like white space and are
+				// discarded except that, if a MultiLineComment
+				// contains a line terminator character, then
+				// the entire comment is considered to be a
+				// LineTerminator for purposes of parsing by
+				// the syntactic grammar."
+				if bytes.ContainsAny(s[written:i1], "\n\r\u2028\u2029") {
+					b.WriteByte('\n')
+				} else {
+					b.WriteByte(' ')
+				}
+			case stateCSSBlockCmt:
+				b.WriteByte(' ')
+			}
+			written = i1
+		}
+		if c.state != c1.state && isComment(c1.state) && c1.delim == delimNone {
+			// Preserve the portion between written and the comment start.
+			cs := i1 - 2
+			if c1.state == stateHTMLCmt {
+				// "<!--" instead of "/*" or "//"
+				cs -= 2
+			}
+			b.Write(s[written:cs])
+			written = i1
+		}
+		if i == i1 && c.state == c1.state {
+			panic(fmt.Sprintf("infinite loop from %v to %v on %q..%q", c, c1, s[:i], s[i:]))
+		}
+		c, i = c1, i1
+	}
+
+	if written != 0 && c.state != stateError {
+		if !isComment(c.state) || c.delim != delimNone {
+			b.Write(n.Text[written:])
+		}
+		e.editTextNode(n, b.Bytes())
+	}
+	return c
+}
+
+// contextAfterText starts in context c, consumes some tokens from the front of
+// s, then returns the context after those tokens and the unprocessed suffix.
+func contextAfterText(c context, s []byte) (context, int) {
+	if c.delim == delimNone {
+		c1, i := tSpecialTagEnd(c, s)
+		if i == 0 {
+			// A special end tag (`</script>`) has been seen and
+			// all content preceding it has been consumed.
+			return c1, 0
+		}
+		// Consider all content up to any end tag.
+		return transitionFunc[c.state](c, s[:i])
+	}
+
+	// We are at the beginning of an attribute value.
+
+	i := bytes.IndexAny(s, delimEnds[c.delim])
+	if i == -1 {
+		i = len(s)
+	}
+	if c.delim == delimSpaceOrTagEnd {
+		// https://www.w3.org/TR/html5/syntax.html#attribute-value-(unquoted)-state
+		// lists the runes below as error characters.
+		// Error out because HTML parsers may differ on whether
+		// "<a id= onclick=f("     ends inside id's or onclick's value,
+		// "<a class=`foo "        ends inside a value,
+		// "<a style=font:'Arial'" needs open-quote fixup.
+		// IE treats '`' as a quotation character.
+		if j := bytes.IndexAny(s[:i], "\"'<=`"); j >= 0 {
+			return context{
+				state: stateError,
+				err:   errorf(ErrBadHTML, nil, 0, "%q in unquoted attr: %q", s[j:j+1], s[:i]),
+			}, len(s)
+		}
+	}
+	if i == len(s) {
+		// Remain inside the attribute.
+		// Decode the value so non-HTML rules can easily handle
+		//     <button onclick="alert(&quot;Hi!&quot;)">
+		// without having to entity decode token boundaries.
+		for u := []byte(html.UnescapeString(string(s))); len(u) != 0; {
+			c1, i1 := transitionFunc[c.state](c, u)
+			c, u = c1, u[i1:]
+		}
+		return c, len(s)
+	}
+
+	element := c.element
+
+	// If this is a non-JS "type" attribute inside "script" tag, do not treat the contents as JS.
+	if c.state == stateAttr && c.element == elementScript && c.attr == attrScriptType && !isJSType(string(s[:i])) {
+		element = elementNone
+	}
+
+	if c.delim != delimSpaceOrTagEnd {
+		// Consume any quote.
+		i++
+	}
+	// On exiting an attribute, we discard all state information
+	// except the state and element.
+	return context{state: stateTag, element: element}, i
+}
+
+// editActionNode records a change to an action pipeline for later commit.
+func (e *escaper) editActionNode(n *parse.ActionNode, cmds []string) {
+	if _, ok := e.actionNodeEdits[n]; ok {
+		panic(fmt.Sprintf("node %s shared between templates", n))
+	}
+	e.actionNodeEdits[n] = cmds
+}
+
+// editTemplateNode records a change to a {{template}} callee for later commit.
+func (e *escaper) editTemplateNode(n *parse.TemplateNode, callee string) {
+	if _, ok := e.templateNodeEdits[n]; ok {
+		panic(fmt.Sprintf("node %s shared between templates", n))
+	}
+	e.templateNodeEdits[n] = callee
+}
+
+// editTextNode records a change to a text node for later commit.
+func (e *escaper) editTextNode(n *parse.TextNode, text []byte) {
+	if _, ok := e.textNodeEdits[n]; ok {
+		panic(fmt.Sprintf("node %s shared between templates", n))
+	}
+	e.textNodeEdits[n] = text
+}
+
+// commit applies changes to actions and template calls needed to contextually
+// autoescape content and adds any derived templates to the set.
+func (e *escaper) commit() {
+	for name := range e.output {
+		e.template(name).Funcs(funcMap)
+	}
+	// Any template from the name space associated with this escaper can be used
+	// to add derived templates to the underlying text/template name space.
+	tmpl := e.arbitraryTemplate()
+	for _, t := range e.derived {
+		if _, err := tmpl.text.AddParseTree(t.Name(), t.Tree); err != nil {
+			panic("error adding derived template")
+		}
+	}
+	for n, s := range e.actionNodeEdits {
+		ensurePipelineContains(n.Pipe, s)
+	}
+	for n, name := range e.templateNodeEdits {
+		n.Name = name
+	}
+	for n, s := range e.textNodeEdits {
+		n.Text = s
+	}
+	// Reset state that is specific to this commit so that the same changes are
+	// not re-applied to the template on subsequent calls to commit.
+	e.called = make(map[string]bool)
+	e.actionNodeEdits = make(map[*parse.ActionNode][]string)
+	e.templateNodeEdits = make(map[*parse.TemplateNode]string)
+	e.textNodeEdits = make(map[*parse.TextNode][]byte)
+}
+
+// template returns the named template given a mangled template name.
+func (e *escaper) template(name string) *template.Template {
+	// Any template from the name space associated with this escaper can be used
+	// to look up templates in the underlying text/template name space.
+	t := e.arbitraryTemplate().text.Lookup(name)
+	if t == nil {
+		t = e.derived[name]
+	}
+	return t
+}
+
+// arbitraryTemplate returns an arbitrary template from the name space
+// associated with e and panics if no templates are found.
+func (e *escaper) arbitraryTemplate() *Template {
+	for _, t := range e.ns.set {
+		return t
+	}
+	panic("no templates in name space")
+}
+
+// Forwarding functions so that clients need only import this package
+// to reach the general escaping functions of text/template.
+
+// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
+func HTMLEscape(w io.Writer, b []byte) {
+	template.HTMLEscape(w, b)
+}
+
+// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
+func HTMLEscapeString(s string) string {
+	return template.HTMLEscapeString(s)
+}
+
+// HTMLEscaper returns the escaped HTML equivalent of the textual
+// representation of its arguments.
+func HTMLEscaper(args ...interface{}) string {
+	return template.HTMLEscaper(args...)
+}
+
+// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
+func JSEscape(w io.Writer, b []byte) {
+	template.JSEscape(w, b)
+}
+
+// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
+func JSEscapeString(s string) string {
+	return template.JSEscapeString(s)
+}
+
+// JSEscaper returns the escaped JavaScript equivalent of the textual
+// representation of its arguments.
+func JSEscaper(args ...interface{}) string {
+	return template.JSEscaper(args...)
+}
+
+// URLQueryEscaper returns the escaped value of the textual representation of
+// its arguments in a form suitable for embedding in a URL query.
+func URLQueryEscaper(args ...interface{}) string {
+	return template.URLQueryEscaper(args...)
+}
diff --git a/internal/backport/html/template/escape_test.go b/internal/backport/html/template/escape_test.go
new file mode 100644
index 0000000..8145269
--- /dev/null
+++ b/internal/backport/html/template/escape_test.go
@@ -0,0 +1,1994 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"os"
+	"strings"
+	"testing"
+
+	"golang.org/x/website/internal/backport/text/template"
+	"golang.org/x/website/internal/backport/text/template/parse"
+)
+
+type badMarshaler struct{}
+
+func (x *badMarshaler) MarshalJSON() ([]byte, error) {
+	// Keys in valid JSON must be double quoted as must all strings.
+	return []byte("{ foo: 'not quite valid JSON' }"), nil
+}
+
+type goodMarshaler struct{}
+
+func (x *goodMarshaler) MarshalJSON() ([]byte, error) {
+	return []byte(`{ "<foo>": "O'Reilly" }`), nil
+}
+
+func TestEscape(t *testing.T) {
+	data := struct {
+		F, T    bool
+		C, G, H string
+		A, E    []string
+		B, M    json.Marshaler
+		N       int
+		U       interface{} // untyped nil
+		Z       *int        // typed nil
+		W       HTML
+	}{
+		F: false,
+		T: true,
+		C: "<Cincinnati>",
+		G: "<Goodbye>",
+		H: "<Hello>",
+		A: []string{"<a>", "<b>"},
+		E: []string{},
+		N: 42,
+		B: &badMarshaler{},
+		M: &goodMarshaler{},
+		U: nil,
+		Z: nil,
+		W: HTML(`&iexcl;<b class="foo">Hello</b>, <textarea>O'World</textarea>!`),
+	}
+	pdata := &data
+
+	tests := []struct {
+		name   string
+		input  string
+		output string
+	}{
+		{
+			"if",
+			"{{if .T}}Hello{{end}}, {{.C}}!",
+			"Hello, &lt;Cincinnati&gt;!",
+		},
+		{
+			"else",
+			"{{if .F}}{{.H}}{{else}}{{.G}}{{end}}!",
+			"&lt;Goodbye&gt;!",
+		},
+		{
+			"overescaping1",
+			"Hello, {{.C | html}}!",
+			"Hello, &lt;Cincinnati&gt;!",
+		},
+		{
+			"overescaping2",
+			"Hello, {{html .C}}!",
+			"Hello, &lt;Cincinnati&gt;!",
+		},
+		{
+			"overescaping3",
+			"{{with .C}}{{$msg := .}}Hello, {{$msg}}!{{end}}",
+			"Hello, &lt;Cincinnati&gt;!",
+		},
+		{
+			"assignment",
+			"{{if $x := .H}}{{$x}}{{end}}",
+			"&lt;Hello&gt;",
+		},
+		{
+			"withBody",
+			"{{with .H}}{{.}}{{end}}",
+			"&lt;Hello&gt;",
+		},
+		{
+			"withElse",
+			"{{with .E}}{{.}}{{else}}{{.H}}{{end}}",
+			"&lt;Hello&gt;",
+		},
+		{
+			"rangeBody",
+			"{{range .A}}{{.}}{{end}}",
+			"&lt;a&gt;&lt;b&gt;",
+		},
+		{
+			"rangeElse",
+			"{{range .E}}{{.}}{{else}}{{.H}}{{end}}",
+			"&lt;Hello&gt;",
+		},
+		{
+			"nonStringValue",
+			"{{.T}}",
+			"true",
+		},
+		{
+			"untypedNilValue",
+			"{{.U}}",
+			"",
+		},
+		{
+			"typedNilValue",
+			"{{.Z}}",
+			"&lt;nil&gt;",
+		},
+		{
+			"constant",
+			`<a href="/search?q={{"'a<b'"}}">`,
+			`<a href="/search?q=%27a%3cb%27">`,
+		},
+		{
+			"multipleAttrs",
+			"<a b=1 c={{.H}}>",
+			"<a b=1 c=&lt;Hello&gt;>",
+		},
+		{
+			"urlStartRel",
+			`<a href='{{"/foo/bar?a=b&c=d"}}'>`,
+			`<a href='/foo/bar?a=b&amp;c=d'>`,
+		},
+		{
+			"urlStartAbsOk",
+			`<a href='{{"http://example.com/foo/bar?a=b&c=d"}}'>`,
+			`<a href='http://example.com/foo/bar?a=b&amp;c=d'>`,
+		},
+		{
+			"protocolRelativeURLStart",
+			`<a href='{{"//example.com:8000/foo/bar?a=b&c=d"}}'>`,
+			`<a href='//example.com:8000/foo/bar?a=b&amp;c=d'>`,
+		},
+		{
+			"pathRelativeURLStart",
+			`<a href="{{"/javascript:80/foo/bar"}}">`,
+			`<a href="/javascript:80/foo/bar">`,
+		},
+		{
+			"dangerousURLStart",
+			`<a href='{{"javascript:alert(%22pwned%22)"}}'>`,
+			`<a href='#ZgotmplZ'>`,
+		},
+		{
+			"dangerousURLStart2",
+			`<a href='  {{"javascript:alert(%22pwned%22)"}}'>`,
+			`<a href='  #ZgotmplZ'>`,
+		},
+		{
+			"nonHierURL",
+			`<a href={{"mailto:Muhammed \"The Greatest\" Ali <m.ali@example.com>"}}>`,
+			`<a href=mailto:Muhammed%20%22The%20Greatest%22%20Ali%20%3cm.ali@example.com%3e>`,
+		},
+		{
+			"urlPath",
+			`<a href='http://{{"javascript:80"}}/foo'>`,
+			`<a href='http://javascript:80/foo'>`,
+		},
+		{
+			"urlQuery",
+			`<a href='/search?q={{.H}}'>`,
+			`<a href='/search?q=%3cHello%3e'>`,
+		},
+		{
+			"urlFragment",
+			`<a href='/faq#{{.H}}'>`,
+			`<a href='/faq#%3cHello%3e'>`,
+		},
+		{
+			"urlBranch",
+			`<a href="{{if .F}}/foo?a=b{{else}}/bar{{end}}">`,
+			`<a href="/bar">`,
+		},
+		{
+			"urlBranchConflictMoot",
+			`<a href="{{if .T}}/foo?a={{else}}/bar#{{end}}{{.C}}">`,
+			`<a href="/foo?a=%3cCincinnati%3e">`,
+		},
+		{
+			"jsStrValue",
+			"<button onclick='alert({{.H}})'>",
+			`<button onclick='alert(&#34;\u003cHello\u003e&#34;)'>`,
+		},
+		{
+			"jsNumericValue",
+			"<button onclick='alert({{.N}})'>",
+			`<button onclick='alert( 42 )'>`,
+		},
+		{
+			"jsBoolValue",
+			"<button onclick='alert({{.T}})'>",
+			`<button onclick='alert( true )'>`,
+		},
+		{
+			"jsNilValueTyped",
+			"<button onclick='alert(typeof{{.Z}})'>",
+			`<button onclick='alert(typeof null )'>`,
+		},
+		{
+			"jsNilValueUntyped",
+			"<button onclick='alert(typeof{{.U}})'>",
+			`<button onclick='alert(typeof null )'>`,
+		},
+		{
+			"jsObjValue",
+			"<button onclick='alert({{.A}})'>",
+			`<button onclick='alert([&#34;\u003ca\u003e&#34;,&#34;\u003cb\u003e&#34;])'>`,
+		},
+		{
+			"jsObjValueScript",
+			"<script>alert({{.A}})</script>",
+			`<script>alert(["\u003ca\u003e","\u003cb\u003e"])</script>`,
+		},
+		{
+			"jsObjValueNotOverEscaped",
+			"<button onclick='alert({{.A | html}})'>",
+			`<button onclick='alert([&#34;\u003ca\u003e&#34;,&#34;\u003cb\u003e&#34;])'>`,
+		},
+		{
+			"jsStr",
+			"<button onclick='alert(&quot;{{.H}}&quot;)'>",
+			`<button onclick='alert(&quot;\u003cHello\u003e&quot;)'>`,
+		},
+		{
+			"badMarshaler",
+			`<button onclick='alert(1/{{.B}}in numbers)'>`,
+			`<button onclick='alert(1/ /* json: error calling MarshalJSON for type *template.badMarshaler: invalid character &#39;f&#39; looking for beginning of object key string */null in numbers)'>`,
+		},
+		{
+			"jsMarshaler",
+			`<button onclick='alert({{.M}})'>`,
+			`<button onclick='alert({&#34;\u003cfoo\u003e&#34;:&#34;O&#39;Reilly&#34;})'>`,
+		},
+		{
+			"jsStrNotUnderEscaped",
+			"<button onclick='alert({{.C | urlquery}})'>",
+			// URL escaped, then quoted for JS.
+			`<button onclick='alert(&#34;%3CCincinnati%3E&#34;)'>`,
+		},
+		{
+			"jsRe",
+			`<button onclick='alert(/{{"foo+bar"}}/.test(""))'>`,
+			`<button onclick='alert(/foo\u002bbar/.test(""))'>`,
+		},
+		{
+			"jsReBlank",
+			`<script>alert(/{{""}}/.test(""));</script>`,
+			`<script>alert(/(?:)/.test(""));</script>`,
+		},
+		{
+			"jsReAmbigOk",
+			`<script>{{if true}}var x = 1{{end}}</script>`,
+			// The {if} ends in an ambiguous jsCtx but there is
+			// no slash following so we shouldn't care.
+			`<script>var x = 1</script>`,
+		},
+		{
+			"styleBidiKeywordPassed",
+			`<p style="dir: {{"ltr"}}">`,
+			`<p style="dir: ltr">`,
+		},
+		{
+			"styleBidiPropNamePassed",
+			`<p style="border-{{"left"}}: 0; border-{{"right"}}: 1in">`,
+			`<p style="border-left: 0; border-right: 1in">`,
+		},
+		{
+			"styleExpressionBlocked",
+			`<p style="width: {{"expression(alert(1337))"}}">`,
+			`<p style="width: ZgotmplZ">`,
+		},
+		{
+			"styleTagSelectorPassed",
+			`<style>{{"p"}} { color: pink }</style>`,
+			`<style>p { color: pink }</style>`,
+		},
+		{
+			"styleIDPassed",
+			`<style>p{{"#my-ID"}} { font: Arial }</style>`,
+			`<style>p#my-ID { font: Arial }</style>`,
+		},
+		{
+			"styleClassPassed",
+			`<style>p{{".my_class"}} { font: Arial }</style>`,
+			`<style>p.my_class { font: Arial }</style>`,
+		},
+		{
+			"styleQuantityPassed",
+			`<a style="left: {{"2em"}}; top: {{0}}">`,
+			`<a style="left: 2em; top: 0">`,
+		},
+		{
+			"stylePctPassed",
+			`<table style=width:{{"100%"}}>`,
+			`<table style=width:100%>`,
+		},
+		{
+			"styleColorPassed",
+			`<p style="color: {{"#8ff"}}; background: {{"#000"}}">`,
+			`<p style="color: #8ff; background: #000">`,
+		},
+		{
+			"styleObfuscatedExpressionBlocked",
+			`<p style="width: {{"  e\\78preS\x00Sio/**/n(alert(1337))"}}">`,
+			`<p style="width: ZgotmplZ">`,
+		},
+		{
+			"styleMozBindingBlocked",
+			`<p style="{{"-moz-binding(alert(1337))"}}: ...">`,
+			`<p style="ZgotmplZ: ...">`,
+		},
+		{
+			"styleObfuscatedMozBindingBlocked",
+			`<p style="{{"  -mo\\7a-B\x00I/**/nding(alert(1337))"}}: ...">`,
+			`<p style="ZgotmplZ: ...">`,
+		},
+		{
+			"styleFontNameString",
+			`<p style='font-family: "{{"Times New Roman"}}"'>`,
+			`<p style='font-family: "Times New Roman"'>`,
+		},
+		{
+			"styleFontNameString",
+			`<p style='font-family: "{{"Times New Roman"}}", "{{"sans-serif"}}"'>`,
+			`<p style='font-family: "Times New Roman", "sans-serif"'>`,
+		},
+		{
+			"styleFontNameUnquoted",
+			`<p style='font-family: {{"Times New Roman"}}'>`,
+			`<p style='font-family: Times New Roman'>`,
+		},
+		{
+			"styleURLQueryEncoded",
+			`<p style="background: url(/img?name={{"O'Reilly Animal(1)<2>.png"}})">`,
+			`<p style="background: url(/img?name=O%27Reilly%20Animal%281%29%3c2%3e.png)">`,
+		},
+		{
+			"styleQuotedURLQueryEncoded",
+			`<p style="background: url('/img?name={{"O'Reilly Animal(1)<2>.png"}}')">`,
+			`<p style="background: url('/img?name=O%27Reilly%20Animal%281%29%3c2%3e.png')">`,
+		},
+		{
+			"styleStrQueryEncoded",
+			`<p style="background: '/img?name={{"O'Reilly Animal(1)<2>.png"}}'">`,
+			`<p style="background: '/img?name=O%27Reilly%20Animal%281%29%3c2%3e.png'">`,
+		},
+		{
+			"styleURLBadProtocolBlocked",
+			`<a style="background: url('{{"javascript:alert(1337)"}}')">`,
+			`<a style="background: url('#ZgotmplZ')">`,
+		},
+		{
+			"styleStrBadProtocolBlocked",
+			`<a style="background: '{{"vbscript:alert(1337)"}}'">`,
+			`<a style="background: '#ZgotmplZ'">`,
+		},
+		{
+			"styleStrEncodedProtocolEncoded",
+			`<a style="background: '{{"javascript\\3a alert(1337)"}}'">`,
+			// The CSS string 'javascript\\3a alert(1337)' does not contain a colon.
+			`<a style="background: 'javascript\\3a alert\28 1337\29 '">`,
+		},
+		{
+			"styleURLGoodProtocolPassed",
+			`<a style="background: url('{{"http://oreilly.com/O'Reilly Animals(1)<2>;{}.html"}}')">`,
+			`<a style="background: url('http://oreilly.com/O%27Reilly%20Animals%281%29%3c2%3e;%7b%7d.html')">`,
+		},
+		{
+			"styleStrGoodProtocolPassed",
+			`<a style="background: '{{"http://oreilly.com/O'Reilly Animals(1)<2>;{}.html"}}'">`,
+			`<a style="background: 'http\3a\2f\2foreilly.com\2fO\27Reilly Animals\28 1\29\3c 2\3e\3b\7b\7d.html'">`,
+		},
+		{
+			"styleURLEncodedForHTMLInAttr",
+			`<a style="background: url('{{"/search?img=foo&size=icon"}}')">`,
+			`<a style="background: url('/search?img=foo&amp;size=icon')">`,
+		},
+		{
+			"styleURLNotEncodedForHTMLInCdata",
+			`<style>body { background: url('{{"/search?img=foo&size=icon"}}') }</style>`,
+			`<style>body { background: url('/search?img=foo&size=icon') }</style>`,
+		},
+		{
+			"styleURLMixedCase",
+			`<p style="background: URL(#{{.H}})">`,
+			`<p style="background: URL(#%3cHello%3e)">`,
+		},
+		{
+			"stylePropertyPairPassed",
+			`<a style='{{"color: red"}}'>`,
+			`<a style='color: red'>`,
+		},
+		{
+			"styleStrSpecialsEncoded",
+			`<a style="font-family: '{{"/**/'\";:// \\"}}', &quot;{{"/**/'\";:// \\"}}&quot;">`,
+			`<a style="font-family: '\2f**\2f\27\22\3b\3a\2f\2f  \\', &quot;\2f**\2f\27\22\3b\3a\2f\2f  \\&quot;">`,
+		},
+		{
+			"styleURLSpecialsEncoded",
+			`<a style="border-image: url({{"/**/'\";:// \\"}}), url(&quot;{{"/**/'\";:// \\"}}&quot;), url('{{"/**/'\";:// \\"}}'), 'http://www.example.com/?q={{"/**/'\";:// \\"}}''">`,
+			`<a style="border-image: url(/**/%27%22;://%20%5c), url(&quot;/**/%27%22;://%20%5c&quot;), url('/**/%27%22;://%20%5c'), 'http://www.example.com/?q=%2f%2a%2a%2f%27%22%3b%3a%2f%2f%20%5c''">`,
+		},
+		{
+			"HTML comment",
+			"<b>Hello, <!-- name of world -->{{.C}}</b>",
+			"<b>Hello, &lt;Cincinnati&gt;</b>",
+		},
+		{
+			"HTML comment not first < in text node.",
+			"<<!-- -->!--",
+			"&lt;!--",
+		},
+		{
+			"HTML normalization 1",
+			"a < b",
+			"a &lt; b",
+		},
+		{
+			"HTML normalization 2",
+			"a << b",
+			"a &lt;&lt; b",
+		},
+		{
+			"HTML normalization 3",
+			"a<<!-- --><!-- -->b",
+			"a&lt;b",
+		},
+		{
+			"HTML doctype not normalized",
+			"<!DOCTYPE html>Hello, World!",
+			"<!DOCTYPE html>Hello, World!",
+		},
+		{
+			"HTML doctype not case-insensitive",
+			"<!doCtYPE htMl>Hello, World!",
+			"<!doCtYPE htMl>Hello, World!",
+		},
+		{
+			"No doctype injection",
+			`<!{{"DOCTYPE"}}`,
+			"&lt;!DOCTYPE",
+		},
+		{
+			"Split HTML comment",
+			"<b>Hello, <!-- name of {{if .T}}city -->{{.C}}{{else}}world -->{{.W}}{{end}}</b>",
+			"<b>Hello, &lt;Cincinnati&gt;</b>",
+		},
+		{
+			"JS line comment",
+			"<script>for (;;) { if (c()) break// foo not a label\n" +
+				"foo({{.T}});}</script>",
+			"<script>for (;;) { if (c()) break\n" +
+				"foo( true );}</script>",
+		},
+		{
+			"JS multiline block comment",
+			"<script>for (;;) { if (c()) break/* foo not a label\n" +
+				" */foo({{.T}});}</script>",
+			// Newline separates break from call. If newline
+			// removed, then break will consume label leaving
+			// code invalid.
+			"<script>for (;;) { if (c()) break\n" +
+				"foo( true );}</script>",
+		},
+		{
+			"JS single-line block comment",
+			"<script>for (;;) {\n" +
+				"if (c()) break/* foo a label */foo;" +
+				"x({{.T}});}</script>",
+			// Newline separates break from call. If newline
+			// removed, then break will consume label leaving
+			// code invalid.
+			"<script>for (;;) {\n" +
+				"if (c()) break foo;" +
+				"x( true );}</script>",
+		},
+		{
+			"JS block comment flush with mathematical division",
+			"<script>var a/*b*//c\nd</script>",
+			"<script>var a /c\nd</script>",
+		},
+		{
+			"JS mixed comments",
+			"<script>var a/*b*///c\nd</script>",
+			"<script>var a \nd</script>",
+		},
+		{
+			"CSS comments",
+			"<style>p// paragraph\n" +
+				`{border: 1px/* color */{{"#00f"}}}</style>`,
+			"<style>p\n" +
+				"{border: 1px #00f}</style>",
+		},
+		{
+			"JS attr block comment",
+			`<a onclick="f(&quot;&quot;); /* alert({{.H}}) */">`,
+			// Attribute comment tests should pass if the comments
+			// are successfully elided.
+			`<a onclick="f(&quot;&quot;); /* alert() */">`,
+		},
+		{
+			"JS attr line comment",
+			`<a onclick="// alert({{.G}})">`,
+			`<a onclick="// alert()">`,
+		},
+		{
+			"CSS attr block comment",
+			`<a style="/* color: {{.H}} */">`,
+			`<a style="/* color:  */">`,
+		},
+		{
+			"CSS attr line comment",
+			`<a style="// color: {{.G}}">`,
+			`<a style="// color: ">`,
+		},
+		{
+			"HTML substitution commented out",
+			"<p><!-- {{.H}} --></p>",
+			"<p></p>",
+		},
+		{
+			"Comment ends flush with start",
+			"<!--{{.}}--><script>/*{{.}}*///{{.}}\n</script><style>/*{{.}}*///{{.}}\n</style><a onclick='/*{{.}}*///{{.}}' style='/*{{.}}*///{{.}}'>",
+			"<script> \n</script><style> \n</style><a onclick='/**///' style='/**///'>",
+		},
+		{
+			"typed HTML in text",
+			`{{.W}}`,
+			`&iexcl;<b class="foo">Hello</b>, <textarea>O'World</textarea>!`,
+		},
+		{
+			"typed HTML in attribute",
+			`<div title="{{.W}}">`,
+			`<div title="&iexcl;Hello, O&#39;World!">`,
+		},
+		{
+			"typed HTML in script",
+			`<button onclick="alert({{.W}})">`,
+			`<button onclick="alert(&#34;\u0026iexcl;\u003cb class=\&#34;foo\&#34;\u003eHello\u003c/b\u003e, \u003ctextarea\u003eO&#39;World\u003c/textarea\u003e!&#34;)">`,
+		},
+		{
+			"typed HTML in RCDATA",
+			`<textarea>{{.W}}</textarea>`,
+			`<textarea>&iexcl;&lt;b class=&#34;foo&#34;&gt;Hello&lt;/b&gt;, &lt;textarea&gt;O&#39;World&lt;/textarea&gt;!</textarea>`,
+		},
+		{
+			"range in textarea",
+			"<textarea>{{range .A}}{{.}}{{end}}</textarea>",
+			"<textarea>&lt;a&gt;&lt;b&gt;</textarea>",
+		},
+		{
+			"No tag injection",
+			`{{"10$"}}<{{"script src,evil.org/pwnd.js"}}...`,
+			`10$&lt;script src,evil.org/pwnd.js...`,
+		},
+		{
+			"No comment injection",
+			`<{{"!--"}}`,
+			`&lt;!--`,
+		},
+		{
+			"No RCDATA end tag injection",
+			`<textarea><{{"/textarea "}}...</textarea>`,
+			`<textarea>&lt;/textarea ...</textarea>`,
+		},
+		{
+			"optional attrs",
+			`<img class="{{"iconClass"}}"` +
+				`{{if .T}} id="{{"<iconId>"}}"{{end}}` +
+				// Double quotes inside if/else.
+				` src=` +
+				`{{if .T}}"?{{"<iconPath>"}}"` +
+				`{{else}}"images/cleardot.gif"{{end}}` +
+				// Missing space before title, but it is not a
+				// part of the src attribute.
+				`{{if .T}}title="{{"<title>"}}"{{end}}` +
+				// Quotes outside if/else.
+				` alt="` +
+				`{{if .T}}{{"<alt>"}}` +
+				`{{else}}{{if .F}}{{"<title>"}}{{end}}` +
+				`{{end}}"` +
+				`>`,
+			`<img class="iconClass" id="&lt;iconId&gt;" src="?%3ciconPath%3e"title="&lt;title&gt;" alt="&lt;alt&gt;">`,
+		},
+		{
+			"conditional valueless attr name",
+			`<input{{if .T}} checked{{end}} name=n>`,
+			`<input checked name=n>`,
+		},
+		{
+			"conditional dynamic valueless attr name 1",
+			`<input{{if .T}} {{"checked"}}{{end}} name=n>`,
+			`<input checked name=n>`,
+		},
+		{
+			"conditional dynamic valueless attr name 2",
+			`<input {{if .T}}{{"checked"}} {{end}}name=n>`,
+			`<input checked name=n>`,
+		},
+		{
+			"dynamic attribute name",
+			`<img on{{"load"}}="alert({{"loaded"}})">`,
+			// Treated as JS since quotes are inserted.
+			`<img onload="alert(&#34;loaded&#34;)">`,
+		},
+		{
+			"bad dynamic attribute name 1",
+			// Allow checked, selected, disabled, but not JS or
+			// CSS attributes.
+			`<input {{"onchange"}}="{{"doEvil()"}}">`,
+			`<input ZgotmplZ="doEvil()">`,
+		},
+		{
+			"bad dynamic attribute name 2",
+			`<div {{"sTyle"}}="{{"color: expression(alert(1337))"}}">`,
+			`<div ZgotmplZ="color: expression(alert(1337))">`,
+		},
+		{
+			"bad dynamic attribute name 3",
+			// Allow title or alt, but not a URL.
+			`<img {{"src"}}="{{"javascript:doEvil()"}}">`,
+			`<img ZgotmplZ="javascript:doEvil()">`,
+		},
+		{
+			"bad dynamic attribute name 4",
+			// Structure preservation requires values to associate
+			// with a consistent attribute.
+			`<input checked {{""}}="Whose value am I?">`,
+			`<input checked ZgotmplZ="Whose value am I?">`,
+		},
+		{
+			"dynamic element name",
+			`<h{{3}}><table><t{{"head"}}>...</h{{3}}>`,
+			`<h3><table><thead>...</h3>`,
+		},
+		{
+			"bad dynamic element name",
+			// Dynamic element names are typically used to switch
+			// between (thead, tfoot, tbody), (ul, ol), (th, td),
+			// and other replaceable sets.
+			// We do not currently easily support (ul, ol).
+			// If we do change to support that, this test should
+			// catch failures to filter out special tag names which
+			// would violate the structure preservation property --
+			// if any special tag name could be substituted, then
+			// the content could be raw text/RCDATA for some inputs
+			// and regular HTML content for others.
+			`<{{"script"}}>{{"doEvil()"}}</{{"script"}}>`,
+			`&lt;script>doEvil()&lt;/script>`,
+		},
+		{
+			"srcset bad URL in second position",
+			`<img srcset="{{"/not-an-image#,javascript:alert(1)"}}">`,
+			// The second URL is also filtered.
+			`<img srcset="/not-an-image#,#ZgotmplZ">`,
+		},
+		{
+			"srcset buffer growth",
+			`<img srcset={{",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,"}}>`,
+			`<img srcset=,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,>`,
+		},
+	}
+
+	for _, test := range tests {
+		tmpl := New(test.name)
+		tmpl = Must(tmpl.Parse(test.input))
+		// Check for bug 6459: Tree field was not set in Parse.
+		if tmpl.Tree != tmpl.text.Tree {
+			t.Errorf("%s: tree not set properly", test.name)
+			continue
+		}
+		b := new(bytes.Buffer)
+		if err := tmpl.Execute(b, data); err != nil {
+			t.Errorf("%s: template execution failed: %s", test.name, err)
+			continue
+		}
+		if w, g := test.output, b.String(); w != g {
+			t.Errorf("%s: escaped output: want\n\t%q\ngot\n\t%q", test.name, w, g)
+			continue
+		}
+		b.Reset()
+		if err := tmpl.Execute(b, pdata); err != nil {
+			t.Errorf("%s: template execution failed for pointer: %s", test.name, err)
+			continue
+		}
+		if w, g := test.output, b.String(); w != g {
+			t.Errorf("%s: escaped output for pointer: want\n\t%q\ngot\n\t%q", test.name, w, g)
+			continue
+		}
+		if tmpl.Tree != tmpl.text.Tree {
+			t.Errorf("%s: tree mismatch", test.name)
+			continue
+		}
+	}
+}
+
+func TestEscapeMap(t *testing.T) {
+	data := map[string]string{
+		"html":     `<h1>Hi!</h1>`,
+		"urlquery": `http://www.foo.com/index.html?title=main`,
+	}
+	for _, test := range [...]struct {
+		desc, input, output string
+	}{
+		// covering issue 20323
+		{
+			"field with predefined escaper name 1",
+			`{{.html | print}}`,
+			`&lt;h1&gt;Hi!&lt;/h1&gt;`,
+		},
+		// covering issue 20323
+		{
+			"field with predefined escaper name 2",
+			`{{.urlquery | print}}`,
+			`http://www.foo.com/index.html?title=main`,
+		},
+	} {
+		tmpl := Must(New("").Parse(test.input))
+		b := new(bytes.Buffer)
+		if err := tmpl.Execute(b, data); err != nil {
+			t.Errorf("%s: template execution failed: %s", test.desc, err)
+			continue
+		}
+		if w, g := test.output, b.String(); w != g {
+			t.Errorf("%s: escaped output: want\n\t%q\ngot\n\t%q", test.desc, w, g)
+			continue
+		}
+	}
+}
+
+func TestEscapeSet(t *testing.T) {
+	type dataItem struct {
+		Children []*dataItem
+		X        string
+	}
+
+	data := dataItem{
+		Children: []*dataItem{
+			{X: "foo"},
+			{X: "<bar>"},
+			{
+				Children: []*dataItem{
+					{X: "baz"},
+				},
+			},
+		},
+	}
+
+	tests := []struct {
+		inputs map[string]string
+		want   string
+	}{
+		// The trivial set.
+		{
+			map[string]string{
+				"main": ``,
+			},
+			``,
+		},
+		// A template called in the start context.
+		{
+			map[string]string{
+				"main": `Hello, {{template "helper"}}!`,
+				// Not a valid top level HTML template.
+				// "<b" is not a full tag.
+				"helper": `{{"<World>"}}`,
+			},
+			`Hello, &lt;World&gt;!`,
+		},
+		// A template called in a context other than the start.
+		{
+			map[string]string{
+				"main": `<a onclick='a = {{template "helper"}};'>`,
+				// Not a valid top level HTML template.
+				// "<b" is not a full tag.
+				"helper": `{{"<a>"}}<b`,
+			},
+			`<a onclick='a = &#34;\u003ca\u003e&#34;<b;'>`,
+		},
+		// A recursive template that ends in its start context.
+		{
+			map[string]string{
+				"main": `{{range .Children}}{{template "main" .}}{{else}}{{.X}} {{end}}`,
+			},
+			`foo &lt;bar&gt; baz `,
+		},
+		// A recursive helper template that ends in its start context.
+		{
+			map[string]string{
+				"main":   `{{template "helper" .}}`,
+				"helper": `{{if .Children}}<ul>{{range .Children}}<li>{{template "main" .}}</li>{{end}}</ul>{{else}}{{.X}}{{end}}`,
+			},
+			`<ul><li>foo</li><li>&lt;bar&gt;</li><li><ul><li>baz</li></ul></li></ul>`,
+		},
+		// Co-recursive templates that end in its start context.
+		{
+			map[string]string{
+				"main":   `<blockquote>{{range .Children}}{{template "helper" .}}{{end}}</blockquote>`,
+				"helper": `{{if .Children}}{{template "main" .}}{{else}}{{.X}}<br>{{end}}`,
+			},
+			`<blockquote>foo<br>&lt;bar&gt;<br><blockquote>baz<br></blockquote></blockquote>`,
+		},
+		// A template that is called in two different contexts.
+		{
+			map[string]string{
+				"main":   `<button onclick="title='{{template "helper"}}'; ...">{{template "helper"}}</button>`,
+				"helper": `{{11}} of {{"<100>"}}`,
+			},
+			`<button onclick="title='11 of \u003c100\u003e'; ...">11 of &lt;100&gt;</button>`,
+		},
+		// A non-recursive template that ends in a different context.
+		// helper starts in jsCtxRegexp and ends in jsCtxDivOp.
+		{
+			map[string]string{
+				"main":   `<script>var x={{template "helper"}}/{{"42"}};</script>`,
+				"helper": "{{126}}",
+			},
+			`<script>var x= 126 /"42";</script>`,
+		},
+		// A recursive template that ends in a similar context.
+		{
+			map[string]string{
+				"main":      `<script>var x=[{{template "countdown" 4}}];</script>`,
+				"countdown": `{{.}}{{if .}},{{template "countdown" . | pred}}{{end}}`,
+			},
+			`<script>var x=[ 4 , 3 , 2 , 1 , 0 ];</script>`,
+		},
+		// A recursive template that ends in a different context.
+		/*
+			{
+				map[string]string{
+					"main":   `<a href="/foo{{template "helper" .}}">`,
+					"helper": `{{if .Children}}{{range .Children}}{{template "helper" .}}{{end}}{{else}}?x={{.X}}{{end}}`,
+				},
+				`<a href="/foo?x=foo?x=%3cbar%3e?x=baz">`,
+			},
+		*/
+	}
+
+	// pred is a template function that returns the predecessor of a
+	// natural number for testing recursive templates.
+	fns := FuncMap{"pred": func(a ...interface{}) (interface{}, error) {
+		if len(a) == 1 {
+			if i, _ := a[0].(int); i > 0 {
+				return i - 1, nil
+			}
+		}
+		return nil, fmt.Errorf("undefined pred(%v)", a)
+	}}
+
+	for _, test := range tests {
+		source := ""
+		for name, body := range test.inputs {
+			source += fmt.Sprintf("{{define %q}}%s{{end}} ", name, body)
+		}
+		tmpl, err := New("root").Funcs(fns).Parse(source)
+		if err != nil {
+			t.Errorf("error parsing %q: %v", source, err)
+			continue
+		}
+		var b bytes.Buffer
+
+		if err := tmpl.ExecuteTemplate(&b, "main", data); err != nil {
+			t.Errorf("%q executing %v", err.Error(), tmpl.Lookup("main"))
+			continue
+		}
+		if got := b.String(); test.want != got {
+			t.Errorf("want\n\t%q\ngot\n\t%q", test.want, got)
+		}
+	}
+
+}
+
+func TestErrors(t *testing.T) {
+	tests := []struct {
+		input string
+		err   string
+	}{
+		// Non-error cases.
+		{
+			"{{if .Cond}}<a>{{else}}<b>{{end}}",
+			"",
+		},
+		{
+			"{{if .Cond}}<a>{{end}}",
+			"",
+		},
+		{
+			"{{if .Cond}}{{else}}<b>{{end}}",
+			"",
+		},
+		{
+			"{{with .Cond}}<div>{{end}}",
+			"",
+		},
+		{
+			"{{range .Items}}<a>{{end}}",
+			"",
+		},
+		{
+			"<a href='/foo?{{range .Items}}&{{.K}}={{.V}}{{end}}'>",
+			"",
+		},
+		{
+			"{{range .Items}}<a{{if .X}}{{end}}>{{end}}",
+			"",
+		},
+		{
+			"{{range .Items}}<a{{if .X}}{{end}}>{{continue}}{{end}}",
+			"",
+		},
+		{
+			"{{range .Items}}<a{{if .X}}{{end}}>{{break}}{{end}}",
+			"",
+		},
+		{
+			"{{range .Items}}<a{{if .X}}{{end}}>{{if .X}}{{break}}{{end}}{{end}}",
+			"",
+		},
+		// Error cases.
+		{
+			"{{if .Cond}}<a{{end}}",
+			"z:1:5: {{if}} branches",
+		},
+		{
+			"{{if .Cond}}\n{{else}}\n<a{{end}}",
+			"z:1:5: {{if}} branches",
+		},
+		{
+			// Missing quote in the else branch.
+			`{{if .Cond}}<a href="foo">{{else}}<a href="bar>{{end}}`,
+			"z:1:5: {{if}} branches",
+		},
+		{
+			// Different kind of attribute: href implies a URL.
+			"<a {{if .Cond}}href='{{else}}title='{{end}}{{.X}}'>",
+			"z:1:8: {{if}} branches",
+		},
+		{
+			"\n{{with .X}}<a{{end}}",
+			"z:2:7: {{with}} branches",
+		},
+		{
+			"\n{{with .X}}<a>{{else}}<a{{end}}",
+			"z:2:7: {{with}} branches",
+		},
+		{
+			"{{range .Items}}<a{{end}}",
+			`z:1: on range loop re-entry: "<" in attribute name: "<a"`,
+		},
+		{
+			"\n{{range .Items}} x='<a{{end}}",
+			"z:2:8: on range loop re-entry: {{range}} branches",
+		},
+		{
+			"{{range .Items}}<a{{if .X}}{{break}}{{end}}>{{end}}",
+			"z:1:29: at range loop break: {{range}} branches end in different contexts",
+		},
+		{
+			"{{range .Items}}<a{{if .X}}{{continue}}{{end}}>{{end}}",
+			"z:1:29: at range loop continue: {{range}} branches end in different contexts",
+		},
+		{
+			"<a b=1 c={{.H}}",
+			"z: ends in a non-text context: {stateAttr delimSpaceOrTagEnd",
+		},
+		{
+			"<script>foo();",
+			"z: ends in a non-text context: {stateJS",
+		},
+		{
+			`<a href="{{if .F}}/foo?a={{else}}/bar/{{end}}{{.H}}">`,
+			"z:1:47: {{.H}} appears in an ambiguous context within a URL",
+		},
+		{
+			`<a onclick="alert('Hello \`,
+			`unfinished escape sequence in JS string: "Hello \\"`,
+		},
+		{
+			`<a onclick='alert("Hello\, World\`,
+			`unfinished escape sequence in JS string: "Hello\\, World\\"`,
+		},
+		{
+			`<a onclick='alert(/x+\`,
+			`unfinished escape sequence in JS string: "x+\\"`,
+		},
+		{
+			`<a onclick="/foo[\]/`,
+			`unfinished JS regexp charset: "foo[\\]/"`,
+		},
+		{
+			// It is ambiguous whether 1.5 should be 1\.5 or 1.5.
+			// Either `var x = 1/- 1.5 /i.test(x)`
+			// where `i.test(x)` is a method call of reference i,
+			// or `/-1\.5/i.test(x)` which is a method call on a
+			// case insensitive regular expression.
+			`<script>{{if false}}var x = 1{{end}}/-{{"1.5"}}/i.test(x)</script>`,
+			`'/' could start a division or regexp: "/-"`,
+		},
+		{
+			`{{template "foo"}}`,
+			"z:1:11: no such template \"foo\"",
+		},
+		{
+			`<div{{template "y"}}>` +
+				// Illegal starting in stateTag but not in stateText.
+				`{{define "y"}} foo<b{{end}}`,
+			`"<" in attribute name: " foo<b"`,
+		},
+		{
+			`<script>reverseList = [{{template "t"}}]</script>` +
+				// Missing " after recursive call.
+				`{{define "t"}}{{if .Tail}}{{template "t" .Tail}}{{end}}{{.Head}}",{{end}}`,
+			`: cannot compute output context for template t$htmltemplate_stateJS_elementScript`,
+		},
+		{
+			`<input type=button value=onclick=>`,
+			`html/template:z: "=" in unquoted attr: "onclick="`,
+		},
+		{
+			`<input type=button value= onclick=>`,
+			`html/template:z: "=" in unquoted attr: "onclick="`,
+		},
+		{
+			`<input type=button value= 1+1=2>`,
+			`html/template:z: "=" in unquoted attr: "1+1=2"`,
+		},
+		{
+			"<a class=`foo>",
+			"html/template:z: \"`\" in unquoted attr: \"`foo\"",
+		},
+		{
+			`<a style=font:'Arial'>`,
+			`html/template:z: "'" in unquoted attr: "font:'Arial'"`,
+		},
+		{
+			`<a=foo>`,
+			`: expected space, attr name, or end of tag, but got "=foo>"`,
+		},
+		{
+			`Hello, {{. | urlquery | print}}!`,
+			// urlquery is disallowed if it is not the last command in the pipeline.
+			`predefined escaper "urlquery" disallowed in template`,
+		},
+		{
+			`Hello, {{. | html | print}}!`,
+			// html is disallowed if it is not the last command in the pipeline.
+			`predefined escaper "html" disallowed in template`,
+		},
+		{
+			`Hello, {{html . | print}}!`,
+			// A direct call to html is disallowed if it is not the last command in the pipeline.
+			`predefined escaper "html" disallowed in template`,
+		},
+		{
+			`<div class={{. | html}}>Hello<div>`,
+			// html is disallowed in a pipeline that is in an unquoted attribute context,
+			// even if it is the last command in the pipeline.
+			`predefined escaper "html" disallowed in template`,
+		},
+		{
+			`Hello, {{. | urlquery | html}}!`,
+			// html is allowed since it is the last command in the pipeline, but urlquery is not.
+			`predefined escaper "urlquery" disallowed in template`,
+		},
+	}
+	for _, test := range tests {
+		buf := new(bytes.Buffer)
+		tmpl, err := New("z").Parse(test.input)
+		if err != nil {
+			t.Errorf("input=%q: unexpected parse error %s\n", test.input, err)
+			continue
+		}
+		err = tmpl.Execute(buf, nil)
+		var got string
+		if err != nil {
+			got = err.Error()
+		}
+		if test.err == "" {
+			if got != "" {
+				t.Errorf("input=%q: unexpected error %q", test.input, got)
+			}
+			continue
+		}
+		if !strings.Contains(got, test.err) {
+			t.Errorf("input=%q: error\n\t%q\ndoes not contain expected string\n\t%q", test.input, got, test.err)
+			continue
+		}
+		// Check that we get the same error if we call Execute again.
+		if err := tmpl.Execute(buf, nil); err == nil || err.Error() != got {
+			t.Errorf("input=%q: unexpected error on second call %q", test.input, err)
+
+		}
+	}
+}
+
+func TestEscapeText(t *testing.T) {
+	tests := []struct {
+		input  string
+		output context
+	}{
+		{
+			``,
+			context{},
+		},
+		{
+			`Hello, World!`,
+			context{},
+		},
+		{
+			// An orphaned "<" is OK.
+			`I <3 Ponies!`,
+			context{},
+		},
+		{
+			`<a`,
+			context{state: stateTag},
+		},
+		{
+			`<a `,
+			context{state: stateTag},
+		},
+		{
+			`<a>`,
+			context{state: stateText},
+		},
+		{
+			`<a href`,
+			context{state: stateAttrName, attr: attrURL},
+		},
+		{
+			`<a on`,
+			context{state: stateAttrName, attr: attrScript},
+		},
+		{
+			`<a href `,
+			context{state: stateAfterName, attr: attrURL},
+		},
+		{
+			`<a style  =  `,
+			context{state: stateBeforeValue, attr: attrStyle},
+		},
+		{
+			`<a href=`,
+			context{state: stateBeforeValue, attr: attrURL},
+		},
+		{
+			`<a href=x`,
+			context{state: stateURL, delim: delimSpaceOrTagEnd, urlPart: urlPartPreQuery, attr: attrURL},
+		},
+		{
+			`<a href=x `,
+			context{state: stateTag},
+		},
+		{
+			`<a href=>`,
+			context{state: stateText},
+		},
+		{
+			`<a href=x>`,
+			context{state: stateText},
+		},
+		{
+			`<a href ='`,
+			context{state: stateURL, delim: delimSingleQuote, attr: attrURL},
+		},
+		{
+			`<a href=''`,
+			context{state: stateTag},
+		},
+		{
+			`<a href= "`,
+			context{state: stateURL, delim: delimDoubleQuote, attr: attrURL},
+		},
+		{
+			`<a href=""`,
+			context{state: stateTag},
+		},
+		{
+			`<a title="`,
+			context{state: stateAttr, delim: delimDoubleQuote},
+		},
+		{
+			`<a HREF='http:`,
+			context{state: stateURL, delim: delimSingleQuote, urlPart: urlPartPreQuery, attr: attrURL},
+		},
+		{
+			`<a Href='/`,
+			context{state: stateURL, delim: delimSingleQuote, urlPart: urlPartPreQuery, attr: attrURL},
+		},
+		{
+			`<a href='"`,
+			context{state: stateURL, delim: delimSingleQuote, urlPart: urlPartPreQuery, attr: attrURL},
+		},
+		{
+			`<a href="'`,
+			context{state: stateURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery, attr: attrURL},
+		},
+		{
+			`<a href='&apos;`,
+			context{state: stateURL, delim: delimSingleQuote, urlPart: urlPartPreQuery, attr: attrURL},
+		},
+		{
+			`<a href="&quot;`,
+			context{state: stateURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery, attr: attrURL},
+		},
+		{
+			`<a href="&#34;`,
+			context{state: stateURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery, attr: attrURL},
+		},
+		{
+			`<a href=&quot;`,
+			context{state: stateURL, delim: delimSpaceOrTagEnd, urlPart: urlPartPreQuery, attr: attrURL},
+		},
+		{
+			`<img alt="1">`,
+			context{state: stateText},
+		},
+		{
+			`<img alt="1>"`,
+			context{state: stateTag},
+		},
+		{
+			`<img alt="1>">`,
+			context{state: stateText},
+		},
+		{
+			`<input checked type="checkbox"`,
+			context{state: stateTag},
+		},
+		{
+			`<a onclick="`,
+			context{state: stateJS, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			`<a onclick="//foo`,
+			context{state: stateJSLineCmt, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			"<a onclick='//\n",
+			context{state: stateJS, delim: delimSingleQuote, attr: attrScript},
+		},
+		{
+			"<a onclick='//\r\n",
+			context{state: stateJS, delim: delimSingleQuote, attr: attrScript},
+		},
+		{
+			"<a onclick='//\u2028",
+			context{state: stateJS, delim: delimSingleQuote, attr: attrScript},
+		},
+		{
+			`<a onclick="/*`,
+			context{state: stateJSBlockCmt, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			`<a onclick="/*/`,
+			context{state: stateJSBlockCmt, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			`<a onclick="/**/`,
+			context{state: stateJS, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			`<a onkeypress="&quot;`,
+			context{state: stateJSDqStr, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			`<a onclick='&quot;foo&quot;`,
+			context{state: stateJS, delim: delimSingleQuote, jsCtx: jsCtxDivOp, attr: attrScript},
+		},
+		{
+			`<a onclick=&#39;foo&#39;`,
+			context{state: stateJS, delim: delimSpaceOrTagEnd, jsCtx: jsCtxDivOp, attr: attrScript},
+		},
+		{
+			`<a onclick=&#39;foo`,
+			context{state: stateJSSqStr, delim: delimSpaceOrTagEnd, attr: attrScript},
+		},
+		{
+			`<a onclick="&quot;foo'`,
+			context{state: stateJSDqStr, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			`<a onclick="'foo&quot;`,
+			context{state: stateJSSqStr, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			`<A ONCLICK="'`,
+			context{state: stateJSSqStr, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			`<a onclick="/`,
+			context{state: stateJSRegexp, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			`<a onclick="'foo'`,
+			context{state: stateJS, delim: delimDoubleQuote, jsCtx: jsCtxDivOp, attr: attrScript},
+		},
+		{
+			`<a onclick="'foo\'`,
+			context{state: stateJSSqStr, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			`<a onclick="'foo\'`,
+			context{state: stateJSSqStr, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			`<a onclick="/foo/`,
+			context{state: stateJS, delim: delimDoubleQuote, jsCtx: jsCtxDivOp, attr: attrScript},
+		},
+		{
+			`<script>/foo/ /=`,
+			context{state: stateJS, element: elementScript},
+		},
+		{
+			`<a onclick="1 /foo`,
+			context{state: stateJS, delim: delimDoubleQuote, jsCtx: jsCtxDivOp, attr: attrScript},
+		},
+		{
+			`<a onclick="1 /*c*/ /foo`,
+			context{state: stateJS, delim: delimDoubleQuote, jsCtx: jsCtxDivOp, attr: attrScript},
+		},
+		{
+			`<a onclick="/foo[/]`,
+			context{state: stateJSRegexp, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			`<a onclick="/foo\/`,
+			context{state: stateJSRegexp, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			`<a onclick="/foo/`,
+			context{state: stateJS, delim: delimDoubleQuote, jsCtx: jsCtxDivOp, attr: attrScript},
+		},
+		{
+			`<input checked style="`,
+			context{state: stateCSS, delim: delimDoubleQuote, attr: attrStyle},
+		},
+		{
+			`<a style="//`,
+			context{state: stateCSSLineCmt, delim: delimDoubleQuote, attr: attrStyle},
+		},
+		{
+			`<a style="//</script>`,
+			context{state: stateCSSLineCmt, delim: delimDoubleQuote, attr: attrStyle},
+		},
+		{
+			"<a style='//\n",
+			context{state: stateCSS, delim: delimSingleQuote, attr: attrStyle},
+		},
+		{
+			"<a style='//\r",
+			context{state: stateCSS, delim: delimSingleQuote, attr: attrStyle},
+		},
+		{
+			`<a style="/*`,
+			context{state: stateCSSBlockCmt, delim: delimDoubleQuote, attr: attrStyle},
+		},
+		{
+			`<a style="/*/`,
+			context{state: stateCSSBlockCmt, delim: delimDoubleQuote, attr: attrStyle},
+		},
+		{
+			`<a style="/**/`,
+			context{state: stateCSS, delim: delimDoubleQuote, attr: attrStyle},
+		},
+		{
+			`<a style="background: '`,
+			context{state: stateCSSSqStr, delim: delimDoubleQuote, attr: attrStyle},
+		},
+		{
+			`<a style="background: &quot;`,
+			context{state: stateCSSDqStr, delim: delimDoubleQuote, attr: attrStyle},
+		},
+		{
+			`<a style="background: '/foo?img=`,
+			context{state: stateCSSSqStr, delim: delimDoubleQuote, urlPart: urlPartQueryOrFrag, attr: attrStyle},
+		},
+		{
+			`<a style="background: '/`,
+			context{state: stateCSSSqStr, delim: delimDoubleQuote, urlPart: urlPartPreQuery, attr: attrStyle},
+		},
+		{
+			`<a style="background: url(&#x22;/`,
+			context{state: stateCSSDqURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery, attr: attrStyle},
+		},
+		{
+			`<a style="background: url('/`,
+			context{state: stateCSSSqURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery, attr: attrStyle},
+		},
+		{
+			`<a style="background: url('/)`,
+			context{state: stateCSSSqURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery, attr: attrStyle},
+		},
+		{
+			`<a style="background: url('/ `,
+			context{state: stateCSSSqURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery, attr: attrStyle},
+		},
+		{
+			`<a style="background: url(/`,
+			context{state: stateCSSURL, delim: delimDoubleQuote, urlPart: urlPartPreQuery, attr: attrStyle},
+		},
+		{
+			`<a style="background: url( `,
+			context{state: stateCSSURL, delim: delimDoubleQuote, attr: attrStyle},
+		},
+		{
+			`<a style="background: url( /image?name=`,
+			context{state: stateCSSURL, delim: delimDoubleQuote, urlPart: urlPartQueryOrFrag, attr: attrStyle},
+		},
+		{
+			`<a style="background: url(x)`,
+			context{state: stateCSS, delim: delimDoubleQuote, attr: attrStyle},
+		},
+		{
+			`<a style="background: url('x'`,
+			context{state: stateCSS, delim: delimDoubleQuote, attr: attrStyle},
+		},
+		{
+			`<a style="background: url( x `,
+			context{state: stateCSS, delim: delimDoubleQuote, attr: attrStyle},
+		},
+		{
+			`<!-- foo`,
+			context{state: stateHTMLCmt},
+		},
+		{
+			`<!-->`,
+			context{state: stateHTMLCmt},
+		},
+		{
+			`<!--->`,
+			context{state: stateHTMLCmt},
+		},
+		{
+			`<!-- foo -->`,
+			context{state: stateText},
+		},
+		{
+			`<script`,
+			context{state: stateTag, element: elementScript},
+		},
+		{
+			`<script `,
+			context{state: stateTag, element: elementScript},
+		},
+		{
+			`<script src="foo.js" `,
+			context{state: stateTag, element: elementScript},
+		},
+		{
+			`<script src='foo.js' `,
+			context{state: stateTag, element: elementScript},
+		},
+		{
+			`<script type=text/javascript `,
+			context{state: stateTag, element: elementScript},
+		},
+		{
+			`<script>`,
+			context{state: stateJS, jsCtx: jsCtxRegexp, element: elementScript},
+		},
+		{
+			`<script>foo`,
+			context{state: stateJS, jsCtx: jsCtxDivOp, element: elementScript},
+		},
+		{
+			`<script>foo</script>`,
+			context{state: stateText},
+		},
+		{
+			`<script>foo</script><!--`,
+			context{state: stateHTMLCmt},
+		},
+		{
+			`<script>document.write("<p>foo</p>");`,
+			context{state: stateJS, element: elementScript},
+		},
+		{
+			`<script>document.write("<p>foo<\/script>");`,
+			context{state: stateJS, element: elementScript},
+		},
+		{
+			`<script>document.write("<script>alert(1)</script>");`,
+			context{state: stateText},
+		},
+		{
+			`<script type="golang.org/x/website/internal/backport/text/template">`,
+			context{state: stateText},
+		},
+		// covering issue 19968
+		{
+			`<script type="TEXT/JAVASCRIPT">`,
+			context{state: stateJS, element: elementScript},
+		},
+		// covering issue 19965
+		{
+			`<script TYPE="golang.org/x/website/internal/backport/text/template">`,
+			context{state: stateText},
+		},
+		{
+			`<script type="notjs">`,
+			context{state: stateText},
+		},
+		{
+			`<Script>`,
+			context{state: stateJS, element: elementScript},
+		},
+		{
+			`<SCRIPT>foo`,
+			context{state: stateJS, jsCtx: jsCtxDivOp, element: elementScript},
+		},
+		{
+			`<textarea>value`,
+			context{state: stateRCDATA, element: elementTextarea},
+		},
+		{
+			`<textarea>value</TEXTAREA>`,
+			context{state: stateText},
+		},
+		{
+			`<textarea name=html><b`,
+			context{state: stateRCDATA, element: elementTextarea},
+		},
+		{
+			`<title>value`,
+			context{state: stateRCDATA, element: elementTitle},
+		},
+		{
+			`<style>value`,
+			context{state: stateCSS, element: elementStyle},
+		},
+		{
+			`<a xlink:href`,
+			context{state: stateAttrName, attr: attrURL},
+		},
+		{
+			`<a xmlns`,
+			context{state: stateAttrName, attr: attrURL},
+		},
+		{
+			`<a xmlns:foo`,
+			context{state: stateAttrName, attr: attrURL},
+		},
+		{
+			`<a xmlnsxyz`,
+			context{state: stateAttrName},
+		},
+		{
+			`<a data-url`,
+			context{state: stateAttrName, attr: attrURL},
+		},
+		{
+			`<a data-iconUri`,
+			context{state: stateAttrName, attr: attrURL},
+		},
+		{
+			`<a data-urlItem`,
+			context{state: stateAttrName, attr: attrURL},
+		},
+		{
+			`<a g:`,
+			context{state: stateAttrName},
+		},
+		{
+			`<a g:url`,
+			context{state: stateAttrName, attr: attrURL},
+		},
+		{
+			`<a g:iconUri`,
+			context{state: stateAttrName, attr: attrURL},
+		},
+		{
+			`<a g:urlItem`,
+			context{state: stateAttrName, attr: attrURL},
+		},
+		{
+			`<a g:value`,
+			context{state: stateAttrName},
+		},
+		{
+			`<a svg:style='`,
+			context{state: stateCSS, delim: delimSingleQuote, attr: attrStyle},
+		},
+		{
+			`<svg:font-face`,
+			context{state: stateTag},
+		},
+		{
+			`<svg:a svg:onclick="`,
+			context{state: stateJS, delim: delimDoubleQuote, attr: attrScript},
+		},
+		{
+			`<svg:a svg:onclick="x()">`,
+			context{},
+		},
+	}
+
+	for _, test := range tests {
+		b, e := []byte(test.input), makeEscaper(nil)
+		c := e.escapeText(context{}, &parse.TextNode{NodeType: parse.NodeText, Text: b})
+		if !test.output.eq(c) {
+			t.Errorf("input %q: want context\n\t%v\ngot\n\t%v", test.input, test.output, c)
+			continue
+		}
+		if test.input != string(b) {
+			t.Errorf("input %q: text node was modified: want %q got %q", test.input, test.input, b)
+			continue
+		}
+	}
+}
+
+func TestEnsurePipelineContains(t *testing.T) {
+	tests := []struct {
+		input, output string
+		ids           []string
+	}{
+		{
+			"{{.X}}",
+			".X",
+			[]string{},
+		},
+		{
+			"{{.X | html}}",
+			".X | html",
+			[]string{},
+		},
+		{
+			"{{.X}}",
+			".X | html",
+			[]string{"html"},
+		},
+		{
+			"{{html .X}}",
+			"_eval_args_ .X | html | urlquery",
+			[]string{"html", "urlquery"},
+		},
+		{
+			"{{html .X .Y .Z}}",
+			"_eval_args_ .X .Y .Z | html | urlquery",
+			[]string{"html", "urlquery"},
+		},
+		{
+			"{{.X | print}}",
+			".X | print | urlquery",
+			[]string{"urlquery"},
+		},
+		{
+			"{{.X | print | urlquery}}",
+			".X | print | urlquery",
+			[]string{"urlquery"},
+		},
+		{
+			"{{.X | urlquery}}",
+			".X | html | urlquery",
+			[]string{"html", "urlquery"},
+		},
+		{
+			"{{.X | print 2 | .f 3}}",
+			".X | print 2 | .f 3 | urlquery | html",
+			[]string{"urlquery", "html"},
+		},
+		{
+			// covering issue 10801
+			"{{.X | println.x }}",
+			".X | println.x | urlquery | html",
+			[]string{"urlquery", "html"},
+		},
+		{
+			// covering issue 10801
+			"{{.X | (print 12 | println).x }}",
+			".X | (print 12 | println).x | urlquery | html",
+			[]string{"urlquery", "html"},
+		},
+		// The following test cases ensure that the merging of internal escapers
+		// with the predefined "html" and "urlquery" escapers is correct.
+		{
+			"{{.X | urlquery}}",
+			".X | _html_template_urlfilter | urlquery",
+			[]string{"_html_template_urlfilter", "_html_template_urlnormalizer"},
+		},
+		{
+			"{{.X | urlquery}}",
+			".X | urlquery | _html_template_urlfilter | _html_template_cssescaper",
+			[]string{"_html_template_urlfilter", "_html_template_cssescaper"},
+		},
+		{
+			"{{.X | urlquery}}",
+			".X | urlquery",
+			[]string{"_html_template_urlnormalizer"},
+		},
+		{
+			"{{.X | urlquery}}",
+			".X | urlquery",
+			[]string{"_html_template_urlescaper"},
+		},
+		{
+			"{{.X | html}}",
+			".X | html",
+			[]string{"_html_template_htmlescaper"},
+		},
+		{
+			"{{.X | html}}",
+			".X | html",
+			[]string{"_html_template_rcdataescaper"},
+		},
+	}
+	for i, test := range tests {
+		tmpl := template.Must(template.New("test").Parse(test.input))
+		action, ok := (tmpl.Tree.Root.Nodes[0].(*parse.ActionNode))
+		if !ok {
+			t.Errorf("First node is not an action: %s", test.input)
+			continue
+		}
+		pipe := action.Pipe
+		originalIDs := make([]string, len(test.ids))
+		copy(originalIDs, test.ids)
+		ensurePipelineContains(pipe, test.ids)
+		got := pipe.String()
+		if got != test.output {
+			t.Errorf("#%d: %s, %v: want\n\t%s\ngot\n\t%s", i, test.input, originalIDs, test.output, got)
+		}
+	}
+}
+
+func TestEscapeMalformedPipelines(t *testing.T) {
+	tests := []string{
+		"{{ 0 | $ }}",
+		"{{ 0 | $ | urlquery }}",
+		"{{ 0 | (nil) }}",
+		"{{ 0 | (nil) | html }}",
+	}
+	for _, test := range tests {
+		var b bytes.Buffer
+		tmpl, err := New("test").Parse(test)
+		if err != nil {
+			t.Errorf("failed to parse set: %q", err)
+		}
+		err = tmpl.Execute(&b, nil)
+		if err == nil {
+			t.Errorf("Expected error for %q", test)
+		}
+	}
+}
+
+func TestEscapeErrorsNotIgnorable(t *testing.T) {
+	var b bytes.Buffer
+	tmpl, _ := New("dangerous").Parse("<a")
+	err := tmpl.Execute(&b, nil)
+	if err == nil {
+		t.Errorf("Expected error")
+	} else if b.Len() != 0 {
+		t.Errorf("Emitted output despite escaping failure")
+	}
+}
+
+func TestEscapeSetErrorsNotIgnorable(t *testing.T) {
+	var b bytes.Buffer
+	tmpl, err := New("root").Parse(`{{define "t"}}<a{{end}}`)
+	if err != nil {
+		t.Errorf("failed to parse set: %q", err)
+	}
+	err = tmpl.ExecuteTemplate(&b, "t", nil)
+	if err == nil {
+		t.Errorf("Expected error")
+	} else if b.Len() != 0 {
+		t.Errorf("Emitted output despite escaping failure")
+	}
+}
+
+func TestRedundantFuncs(t *testing.T) {
+	inputs := []interface{}{
+		"\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f" +
+			"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
+			` !"#$%&'()*+,-./` +
+			`0123456789:;<=>?` +
+			`@ABCDEFGHIJKLMNO` +
+			`PQRSTUVWXYZ[\]^_` +
+			"`abcdefghijklmno" +
+			"pqrstuvwxyz{|}~\x7f" +
+			"\u00A0\u0100\u2028\u2029\ufeff\ufdec\ufffd\uffff\U0001D11E" +
+			"&amp;%22\\",
+		CSS(`a[href =~ "//example.com"]#foo`),
+		HTML(`Hello, <b>World</b> &amp;tc!`),
+		HTMLAttr(` dir="ltr"`),
+		JS(`c && alert("Hello, World!");`),
+		JSStr(`Hello, World & O'Reilly\x21`),
+		URL(`greeting=H%69&addressee=(World)`),
+	}
+
+	for n0, m := range redundantFuncs {
+		f0 := funcMap[n0].(func(...interface{}) string)
+		for n1 := range m {
+			f1 := funcMap[n1].(func(...interface{}) string)
+			for _, input := range inputs {
+				want := f0(input)
+				if got := f1(want); want != got {
+					t.Errorf("%s %s with %T %q: want\n\t%q,\ngot\n\t%q", n0, n1, input, input, want, got)
+				}
+			}
+		}
+	}
+}
+
+func TestIndirectPrint(t *testing.T) {
+	a := 3
+	ap := &a
+	b := "hello"
+	bp := &b
+	bpp := &bp
+	tmpl := Must(New("t").Parse(`{{.}}`))
+	var buf bytes.Buffer
+	err := tmpl.Execute(&buf, ap)
+	if err != nil {
+		t.Errorf("Unexpected error: %s", err)
+	} else if buf.String() != "3" {
+		t.Errorf(`Expected "3"; got %q`, buf.String())
+	}
+	buf.Reset()
+	err = tmpl.Execute(&buf, bpp)
+	if err != nil {
+		t.Errorf("Unexpected error: %s", err)
+	} else if buf.String() != "hello" {
+		t.Errorf(`Expected "hello"; got %q`, buf.String())
+	}
+}
+
+// This is a test for issue 3272.
+func TestEmptyTemplateHTML(t *testing.T) {
+	page := Must(New("page").ParseFiles(os.DevNull))
+	if err := page.ExecuteTemplate(os.Stdout, "page", "nothing"); err == nil {
+		t.Fatal("expected error")
+	}
+}
+
+type Issue7379 int
+
+func (Issue7379) SomeMethod(x int) string {
+	return fmt.Sprintf("<%d>", x)
+}
+
+// This is a test for issue 7379: type assertion error caused panic, and then
+// the code to handle the panic breaks escaping. It's hard to see the second
+// problem once the first is fixed, but its fix is trivial so we let that go. See
+// the discussion for issue 7379.
+func TestPipeToMethodIsEscaped(t *testing.T) {
+	tmpl := Must(New("x").Parse("<html>{{0 | .SomeMethod}}</html>\n"))
+	tryExec := func() string {
+		defer func() {
+			panicValue := recover()
+			if panicValue != nil {
+				t.Errorf("panicked: %v\n", panicValue)
+			}
+		}()
+		var b bytes.Buffer
+		tmpl.Execute(&b, Issue7379(0))
+		return b.String()
+	}
+	for i := 0; i < 3; i++ {
+		str := tryExec()
+		const expect = "<html>&lt;0&gt;</html>\n"
+		if str != expect {
+			t.Errorf("expected %q got %q", expect, str)
+		}
+	}
+}
+
+// Unlike text/template, html/template crashed if given an incomplete
+// template, that is, a template that had been named but not given any content.
+// This is issue #10204.
+func TestErrorOnUndefined(t *testing.T) {
+	tmpl := New("undefined")
+
+	err := tmpl.Execute(nil, nil)
+	if err == nil {
+		t.Error("expected error")
+	} else if !strings.Contains(err.Error(), "incomplete") {
+		t.Errorf("expected error about incomplete template; got %s", err)
+	}
+}
+
+// This covers issue #20842.
+func TestIdempotentExecute(t *testing.T) {
+	tmpl := Must(New("").
+		Parse(`{{define "main"}}<body>{{template "hello"}}</body>{{end}}`))
+	Must(tmpl.
+		Parse(`{{define "hello"}}Hello, {{"Ladies & Gentlemen!"}}{{end}}`))
+	got := new(bytes.Buffer)
+	var err error
+	// Ensure that "hello" produces the same output when executed twice.
+	want := "Hello, Ladies &amp; Gentlemen!"
+	for i := 0; i < 2; i++ {
+		err = tmpl.ExecuteTemplate(got, "hello", nil)
+		if err != nil {
+			t.Errorf("unexpected error: %s", err)
+		}
+		if got.String() != want {
+			t.Errorf("after executing template \"hello\", got:\n\t%q\nwant:\n\t%q\n", got.String(), want)
+		}
+		got.Reset()
+	}
+	// Ensure that the implicit re-execution of "hello" during the execution of
+	// "main" does not cause the output of "hello" to change.
+	err = tmpl.ExecuteTemplate(got, "main", nil)
+	if err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	// If the HTML escaper is added again to the action {{"Ladies & Gentlemen!"}},
+	// we would expected to see the ampersand overescaped to "&amp;amp;".
+	want = "<body>Hello, Ladies &amp; Gentlemen!</body>"
+	if got.String() != want {
+		t.Errorf("after executing template \"main\", got:\n\t%q\nwant:\n\t%q\n", got.String(), want)
+	}
+}
+
+func BenchmarkEscapedExecute(b *testing.B) {
+	tmpl := Must(New("t").Parse(`<a onclick="alert('{{.}}')">{{.}}</a>`))
+	var buf bytes.Buffer
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		tmpl.Execute(&buf, "foo & 'bar' & baz")
+		buf.Reset()
+	}
+}
+
+// Covers issue 22780.
+func TestOrphanedTemplate(t *testing.T) {
+	t1 := Must(New("foo").Parse(`<a href="{{.}}">link1</a>`))
+	t2 := Must(t1.New("foo").Parse(`bar`))
+
+	var b bytes.Buffer
+	const wantError = `template: "foo" is an incomplete or empty template`
+	if err := t1.Execute(&b, "javascript:alert(1)"); err == nil {
+		t.Fatal("expected error executing t1")
+	} else if gotError := err.Error(); gotError != wantError {
+		t.Fatalf("got t1 execution error:\n\t%s\nwant:\n\t%s", gotError, wantError)
+	}
+	b.Reset()
+	if err := t2.Execute(&b, nil); err != nil {
+		t.Fatalf("error executing t2: %s", err)
+	}
+	const want = "bar"
+	if got := b.String(); got != want {
+		t.Fatalf("t2 rendered %q, want %q", got, want)
+	}
+}
+
+// Covers issue 21844.
+func TestAliasedParseTreeDoesNotOverescape(t *testing.T) {
+	const (
+		tmplText = `{{.}}`
+		data     = `<baz>`
+		want     = `&lt;baz&gt;`
+	)
+	// Templates "foo" and "bar" both alias the same underlying parse tree.
+	tpl := Must(New("foo").Parse(tmplText))
+	if _, err := tpl.AddParseTree("bar", tpl.Tree); err != nil {
+		t.Fatalf("AddParseTree error: %v", err)
+	}
+	var b1, b2 bytes.Buffer
+	if err := tpl.ExecuteTemplate(&b1, "foo", data); err != nil {
+		t.Fatalf(`ExecuteTemplate failed for "foo": %v`, err)
+	}
+	if err := tpl.ExecuteTemplate(&b2, "bar", data); err != nil {
+		t.Fatalf(`ExecuteTemplate failed for "foo": %v`, err)
+	}
+	got1, got2 := b1.String(), b2.String()
+	if got1 != want {
+		t.Fatalf(`Template "foo" rendered %q, want %q`, got1, want)
+	}
+	if got1 != got2 {
+		t.Fatalf(`Template "foo" and "bar" rendered %q and %q respectively, expected equal values`, got1, got2)
+	}
+}
diff --git a/internal/backport/html/template/example_test.go b/internal/backport/html/template/example_test.go
new file mode 100644
index 0000000..dbb6511
--- /dev/null
+++ b/internal/backport/html/template/example_test.go
@@ -0,0 +1,182 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template_test
+
+import (
+	"fmt"
+	"log"
+	"os"
+	"strings"
+
+	"golang.org/x/website/internal/backport/html/template"
+)
+
+func Example() {
+	const tpl = `
+<!DOCTYPE html>
+<html>
+	<head>
+		<meta charset="UTF-8">
+		<title>{{.Title}}</title>
+	</head>
+	<body>
+		{{range .Items}}<div>{{ . }}</div>{{else}}<div><strong>no rows</strong></div>{{end}}
+	</body>
+</html>`
+
+	check := func(err error) {
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+	t, err := template.New("webpage").Parse(tpl)
+	check(err)
+
+	data := struct {
+		Title string
+		Items []string
+	}{
+		Title: "My page",
+		Items: []string{
+			"My photos",
+			"My blog",
+		},
+	}
+
+	err = t.Execute(os.Stdout, data)
+	check(err)
+
+	noItems := struct {
+		Title string
+		Items []string
+	}{
+		Title: "My another page",
+		Items: []string{},
+	}
+
+	err = t.Execute(os.Stdout, noItems)
+	check(err)
+
+	// Output:
+	// <!DOCTYPE html>
+	// <html>
+	// 	<head>
+	// 		<meta charset="UTF-8">
+	// 		<title>My page</title>
+	// 	</head>
+	// 	<body>
+	// 		<div>My photos</div><div>My blog</div>
+	// 	</body>
+	// </html>
+	// <!DOCTYPE html>
+	// <html>
+	// 	<head>
+	// 		<meta charset="UTF-8">
+	// 		<title>My another page</title>
+	// 	</head>
+	// 	<body>
+	// 		<div><strong>no rows</strong></div>
+	// 	</body>
+	// </html>
+
+}
+
+func Example_autoescaping() {
+	check := func(err error) {
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+	t, err := template.New("foo").Parse(`{{define "T"}}Hello, {{.}}!{{end}}`)
+	check(err)
+	err = t.ExecuteTemplate(os.Stdout, "T", "<script>alert('you have been pwned')</script>")
+	check(err)
+	// Output:
+	// Hello, &lt;script&gt;alert(&#39;you have been pwned&#39;)&lt;/script&gt;!
+}
+
+func Example_escape() {
+	const s = `"Fran & Freddie's Diner" <tasty@example.com>`
+	v := []interface{}{`"Fran & Freddie's Diner"`, ' ', `<tasty@example.com>`}
+
+	fmt.Println(template.HTMLEscapeString(s))
+	template.HTMLEscape(os.Stdout, []byte(s))
+	fmt.Fprintln(os.Stdout, "")
+	fmt.Println(template.HTMLEscaper(v...))
+
+	fmt.Println(template.JSEscapeString(s))
+	template.JSEscape(os.Stdout, []byte(s))
+	fmt.Fprintln(os.Stdout, "")
+	fmt.Println(template.JSEscaper(v...))
+
+	fmt.Println(template.URLQueryEscaper(v...))
+
+	// Output:
+	// &#34;Fran &amp; Freddie&#39;s Diner&#34; &lt;tasty@example.com&gt;
+	// &#34;Fran &amp; Freddie&#39;s Diner&#34; &lt;tasty@example.com&gt;
+	// &#34;Fran &amp; Freddie&#39;s Diner&#34;32&lt;tasty@example.com&gt;
+	// \"Fran \u0026 Freddie\'s Diner\" \u003Ctasty@example.com\u003E
+	// \"Fran \u0026 Freddie\'s Diner\" \u003Ctasty@example.com\u003E
+	// \"Fran \u0026 Freddie\'s Diner\"32\u003Ctasty@example.com\u003E
+	// %22Fran+%26+Freddie%27s+Diner%2232%3Ctasty%40example.com%3E
+
+}
+
+func ExampleTemplate_Delims() {
+	const text = "<<.Greeting>> {{.Name}}"
+
+	data := struct {
+		Greeting string
+		Name     string
+	}{
+		Greeting: "Hello",
+		Name:     "Joe",
+	}
+
+	t := template.Must(template.New("tpl").Delims("<<", ">>").Parse(text))
+
+	err := t.Execute(os.Stdout, data)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	// Output:
+	// Hello {{.Name}}
+}
+
+// The following example is duplicated in text/template; keep them in sync.
+
+func ExampleTemplate_block() {
+	const (
+		master  = `Names:{{block "list" .}}{{"\n"}}{{range .}}{{println "-" .}}{{end}}{{end}}`
+		overlay = `{{define "list"}} {{join . ", "}}{{end}} `
+	)
+	var (
+		funcs     = template.FuncMap{"join": strings.Join}
+		guardians = []string{"Gamora", "Groot", "Nebula", "Rocket", "Star-Lord"}
+	)
+	masterTmpl, err := template.New("master").Funcs(funcs).Parse(master)
+	if err != nil {
+		log.Fatal(err)
+	}
+	overlayTmpl, err := template.Must(masterTmpl.Clone()).Parse(overlay)
+	if err != nil {
+		log.Fatal(err)
+	}
+	if err := masterTmpl.Execute(os.Stdout, guardians); err != nil {
+		log.Fatal(err)
+	}
+	if err := overlayTmpl.Execute(os.Stdout, guardians); err != nil {
+		log.Fatal(err)
+	}
+	// Output:
+	// Names:
+	// - Gamora
+	// - Groot
+	// - Nebula
+	// - Rocket
+	// - Star-Lord
+	// Names: Gamora, Groot, Nebula, Rocket, Star-Lord
+}
diff --git a/internal/backport/html/template/examplefiles_test.go b/internal/backport/html/template/examplefiles_test.go
new file mode 100644
index 0000000..2efc8e5
--- /dev/null
+++ b/internal/backport/html/template/examplefiles_test.go
@@ -0,0 +1,227 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template_test
+
+import (
+	"io"
+	"io/ioutil"
+	"log"
+	"os"
+	"path/filepath"
+
+	"golang.org/x/website/internal/backport/text/template"
+)
+
+// templateFile defines the contents of a template to be stored in a file, for testing.
+type templateFile struct {
+	name     string
+	contents string
+}
+
+func createTestDir(files []templateFile) string {
+	dir, err := ioutil.TempDir("", "template")
+	if err != nil {
+		log.Fatal(err)
+	}
+	for _, file := range files {
+		f, err := os.Create(filepath.Join(dir, file.name))
+		if err != nil {
+			log.Fatal(err)
+		}
+		defer f.Close()
+		_, err = io.WriteString(f, file.contents)
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+	return dir
+}
+
+// The following example is duplicated in text/template; keep them in sync.
+
+// Here we demonstrate loading a set of templates from a directory.
+func ExampleTemplate_glob() {
+	// Here we create a temporary directory and populate it with our sample
+	// template definition files; usually the template files would already
+	// exist in some location known to the program.
+	dir := createTestDir([]templateFile{
+		// T0.tmpl is a plain template file that just invokes T1.
+		{"T0.tmpl", `T0 invokes T1: ({{template "T1"}})`},
+		// T1.tmpl defines a template, T1 that invokes T2.
+		{"T1.tmpl", `{{define "T1"}}T1 invokes T2: ({{template "T2"}}){{end}}`},
+		// T2.tmpl defines a template T2.
+		{"T2.tmpl", `{{define "T2"}}This is T2{{end}}`},
+	})
+	// Clean up after the test; another quirk of running as an example.
+	defer os.RemoveAll(dir)
+
+	// pattern is the glob pattern used to find all the template files.
+	pattern := filepath.Join(dir, "*.tmpl")
+
+	// Here starts the example proper.
+	// T0.tmpl is the first name matched, so it becomes the starting template,
+	// the value returned by ParseGlob.
+	tmpl := template.Must(template.ParseGlob(pattern))
+
+	err := tmpl.Execute(os.Stdout, nil)
+	if err != nil {
+		log.Fatalf("template execution: %s", err)
+	}
+	// Output:
+	// T0 invokes T1: (T1 invokes T2: (This is T2))
+}
+
+// Here we demonstrate loading a set of templates from files in different directories
+func ExampleTemplate_parsefiles() {
+	// Here we create different temporary directories and populate them with our sample
+	// template definition files; usually the template files would already
+	// exist in some location known to the program.
+	dir1 := createTestDir([]templateFile{
+		// T1.tmpl is a plain template file that just invokes T2.
+		{"T1.tmpl", `T1 invokes T2: ({{template "T2"}})`},
+	})
+
+	dir2 := createTestDir([]templateFile{
+		// T2.tmpl defines a template T2.
+		{"T2.tmpl", `{{define "T2"}}This is T2{{end}}`},
+	})
+
+	// Clean up after the test; another quirk of running as an example.
+	defer func(dirs ...string) {
+		for _, dir := range dirs {
+			os.RemoveAll(dir)
+		}
+	}(dir1, dir2)
+
+	// Here starts the example proper.
+	// Let's just parse only dir1/T0 and dir2/T2
+	paths := []string{
+		filepath.Join(dir1, "T1.tmpl"),
+		filepath.Join(dir2, "T2.tmpl"),
+	}
+	tmpl := template.Must(template.ParseFiles(paths...))
+
+	err := tmpl.Execute(os.Stdout, nil)
+	if err != nil {
+		log.Fatalf("template execution: %s", err)
+	}
+	// Output:
+	// T1 invokes T2: (This is T2)
+}
+
+// The following example is duplicated in text/template; keep them in sync.
+
+// This example demonstrates one way to share some templates
+// and use them in different contexts. In this variant we add multiple driver
+// templates by hand to an existing bundle of templates.
+func ExampleTemplate_helpers() {
+	// Here we create a temporary directory and populate it with our sample
+	// template definition files; usually the template files would already
+	// exist in some location known to the program.
+	dir := createTestDir([]templateFile{
+		// T1.tmpl defines a template, T1 that invokes T2.
+		{"T1.tmpl", `{{define "T1"}}T1 invokes T2: ({{template "T2"}}){{end}}`},
+		// T2.tmpl defines a template T2.
+		{"T2.tmpl", `{{define "T2"}}This is T2{{end}}`},
+	})
+	// Clean up after the test; another quirk of running as an example.
+	defer os.RemoveAll(dir)
+
+	// pattern is the glob pattern used to find all the template files.
+	pattern := filepath.Join(dir, "*.tmpl")
+
+	// Here starts the example proper.
+	// Load the helpers.
+	templates := template.Must(template.ParseGlob(pattern))
+	// Add one driver template to the bunch; we do this with an explicit template definition.
+	_, err := templates.Parse("{{define `driver1`}}Driver 1 calls T1: ({{template `T1`}})\n{{end}}")
+	if err != nil {
+		log.Fatal("parsing driver1: ", err)
+	}
+	// Add another driver template.
+	_, err = templates.Parse("{{define `driver2`}}Driver 2 calls T2: ({{template `T2`}})\n{{end}}")
+	if err != nil {
+		log.Fatal("parsing driver2: ", err)
+	}
+	// We load all the templates before execution. This package does not require
+	// that behavior but html/template's escaping does, so it's a good habit.
+	err = templates.ExecuteTemplate(os.Stdout, "driver1", nil)
+	if err != nil {
+		log.Fatalf("driver1 execution: %s", err)
+	}
+	err = templates.ExecuteTemplate(os.Stdout, "driver2", nil)
+	if err != nil {
+		log.Fatalf("driver2 execution: %s", err)
+	}
+	// Output:
+	// Driver 1 calls T1: (T1 invokes T2: (This is T2))
+	// Driver 2 calls T2: (This is T2)
+}
+
+// The following example is duplicated in text/template; keep them in sync.
+
+// This example demonstrates how to use one group of driver
+// templates with distinct sets of helper templates.
+func ExampleTemplate_share() {
+	// Here we create a temporary directory and populate it with our sample
+	// template definition files; usually the template files would already
+	// exist in some location known to the program.
+	dir := createTestDir([]templateFile{
+		// T0.tmpl is a plain template file that just invokes T1.
+		{"T0.tmpl", "T0 ({{.}} version) invokes T1: ({{template `T1`}})\n"},
+		// T1.tmpl defines a template, T1 that invokes T2. Note T2 is not defined
+		{"T1.tmpl", `{{define "T1"}}T1 invokes T2: ({{template "T2"}}){{end}}`},
+	})
+	// Clean up after the test; another quirk of running as an example.
+	defer os.RemoveAll(dir)
+
+	// pattern is the glob pattern used to find all the template files.
+	pattern := filepath.Join(dir, "*.tmpl")
+
+	// Here starts the example proper.
+	// Load the drivers.
+	drivers := template.Must(template.ParseGlob(pattern))
+
+	// We must define an implementation of the T2 template. First we clone
+	// the drivers, then add a definition of T2 to the template name space.
+
+	// 1. Clone the helper set to create a new name space from which to run them.
+	first, err := drivers.Clone()
+	if err != nil {
+		log.Fatal("cloning helpers: ", err)
+	}
+	// 2. Define T2, version A, and parse it.
+	_, err = first.Parse("{{define `T2`}}T2, version A{{end}}")
+	if err != nil {
+		log.Fatal("parsing T2: ", err)
+	}
+
+	// Now repeat the whole thing, using a different version of T2.
+	// 1. Clone the drivers.
+	second, err := drivers.Clone()
+	if err != nil {
+		log.Fatal("cloning drivers: ", err)
+	}
+	// 2. Define T2, version B, and parse it.
+	_, err = second.Parse("{{define `T2`}}T2, version B{{end}}")
+	if err != nil {
+		log.Fatal("parsing T2: ", err)
+	}
+
+	// Execute the templates in the reverse order to verify the
+	// first is unaffected by the second.
+	err = second.ExecuteTemplate(os.Stdout, "T0.tmpl", "second")
+	if err != nil {
+		log.Fatalf("second execution: %s", err)
+	}
+	err = first.ExecuteTemplate(os.Stdout, "T0.tmpl", "first")
+	if err != nil {
+		log.Fatalf("first: execution: %s", err)
+	}
+
+	// Output:
+	// T0 (second version) invokes T1: (T1 invokes T2: (T2, version B))
+	// T0 (first version) invokes T1: (T1 invokes T2: (T2, version A))
+}
diff --git a/internal/backport/html/template/exec_test.go b/internal/backport/html/template/exec_test.go
new file mode 100644
index 0000000..4342aa5
--- /dev/null
+++ b/internal/backport/html/template/exec_test.go
@@ -0,0 +1,1834 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests for template execution, copied from text/template.
+
+package template
+
+import (
+	"bytes"
+	"errors"
+	"flag"
+	"fmt"
+	"io/ioutil"
+	"reflect"
+	"strings"
+	"sync"
+	"testing"
+
+	"golang.org/x/website/internal/backport/text/template"
+)
+
+var debug = flag.Bool("debug", false, "show the errors produced by the tests")
+
+// T has lots of interesting pieces to use to test execution.
+type T struct {
+	// Basics
+	True        bool
+	I           int
+	U16         uint16
+	X, S        string
+	FloatZero   float64
+	ComplexZero complex128
+	// Nested structs.
+	U *U
+	// Struct with String method.
+	V0     V
+	V1, V2 *V
+	// Struct with Error method.
+	W0     W
+	W1, W2 *W
+	// Slices
+	SI      []int
+	SICap   []int
+	SIEmpty []int
+	SB      []bool
+	// Arrays
+	AI [3]int
+	// Maps
+	MSI      map[string]int
+	MSIone   map[string]int // one element, for deterministic output
+	MSIEmpty map[string]int
+	MXI      map[interface{}]int
+	MII      map[int]int
+	MI32S    map[int32]string
+	MI64S    map[int64]string
+	MUI32S   map[uint32]string
+	MUI64S   map[uint64]string
+	MI8S     map[int8]string
+	MUI8S    map[uint8]string
+	SMSI     []map[string]int
+	// Empty interfaces; used to see if we can dig inside one.
+	Empty0 interface{} // nil
+	Empty1 interface{}
+	Empty2 interface{}
+	Empty3 interface{}
+	Empty4 interface{}
+	// Non-empty interfaces.
+	NonEmptyInterface         I
+	NonEmptyInterfacePtS      *I
+	NonEmptyInterfaceNil      I
+	NonEmptyInterfaceTypedNil I
+	// Stringer.
+	Str fmt.Stringer
+	Err error
+	// Pointers
+	PI  *int
+	PS  *string
+	PSI *[]int
+	NIL *int
+	// Function (not method)
+	BinaryFunc      func(string, string) string
+	VariadicFunc    func(...string) string
+	VariadicFuncInt func(int, ...string) string
+	NilOKFunc       func(*int) bool
+	ErrFunc         func() (string, error)
+	PanicFunc       func() string
+	// Template to test evaluation of templates.
+	Tmpl *Template
+	// Unexported field; cannot be accessed by template.
+	unexported int
+}
+
+type S []string
+
+func (S) Method0() string {
+	return "M0"
+}
+
+type U struct {
+	V string
+}
+
+type V struct {
+	j int
+}
+
+func (v *V) String() string {
+	if v == nil {
+		return "nilV"
+	}
+	return fmt.Sprintf("<%d>", v.j)
+}
+
+type W struct {
+	k int
+}
+
+func (w *W) Error() string {
+	if w == nil {
+		return "nilW"
+	}
+	return fmt.Sprintf("[%d]", w.k)
+}
+
+var siVal = I(S{"a", "b"})
+
+var tVal = &T{
+	True:   true,
+	I:      17,
+	U16:    16,
+	X:      "x",
+	S:      "xyz",
+	U:      &U{"v"},
+	V0:     V{6666},
+	V1:     &V{7777}, // leave V2 as nil
+	W0:     W{888},
+	W1:     &W{999}, // leave W2 as nil
+	SI:     []int{3, 4, 5},
+	SICap:  make([]int, 5, 10),
+	AI:     [3]int{3, 4, 5},
+	SB:     []bool{true, false},
+	MSI:    map[string]int{"one": 1, "two": 2, "three": 3},
+	MSIone: map[string]int{"one": 1},
+	MXI:    map[interface{}]int{"one": 1},
+	MII:    map[int]int{1: 1},
+	MI32S:  map[int32]string{1: "one", 2: "two"},
+	MI64S:  map[int64]string{2: "i642", 3: "i643"},
+	MUI32S: map[uint32]string{2: "u322", 3: "u323"},
+	MUI64S: map[uint64]string{2: "ui642", 3: "ui643"},
+	MI8S:   map[int8]string{2: "i82", 3: "i83"},
+	MUI8S:  map[uint8]string{2: "u82", 3: "u83"},
+	SMSI: []map[string]int{
+		{"one": 1, "two": 2},
+		{"eleven": 11, "twelve": 12},
+	},
+	Empty1:                    3,
+	Empty2:                    "empty2",
+	Empty3:                    []int{7, 8},
+	Empty4:                    &U{"UinEmpty"},
+	NonEmptyInterface:         &T{X: "x"},
+	NonEmptyInterfacePtS:      &siVal,
+	NonEmptyInterfaceTypedNil: (*T)(nil),
+	Str:                       bytes.NewBuffer([]byte("foozle")),
+	Err:                       errors.New("erroozle"),
+	PI:                        newInt(23),
+	PS:                        newString("a string"),
+	PSI:                       newIntSlice(21, 22, 23),
+	BinaryFunc:                func(a, b string) string { return fmt.Sprintf("[%s=%s]", a, b) },
+	VariadicFunc:              func(s ...string) string { return fmt.Sprint("<", strings.Join(s, "+"), ">") },
+	VariadicFuncInt:           func(a int, s ...string) string { return fmt.Sprint(a, "=<", strings.Join(s, "+"), ">") },
+	NilOKFunc:                 func(s *int) bool { return s == nil },
+	ErrFunc:                   func() (string, error) { return "bla", nil },
+	PanicFunc:                 func() string { panic("test panic") },
+	Tmpl:                      Must(New("x").Parse("test template")), // "x" is the value of .X
+}
+
+var tSliceOfNil = []*T{nil}
+
+// A non-empty interface.
+type I interface {
+	Method0() string
+}
+
+var iVal I = tVal
+
+// Helpers for creation.
+func newInt(n int) *int {
+	return &n
+}
+
+func newString(s string) *string {
+	return &s
+}
+
+func newIntSlice(n ...int) *[]int {
+	p := new([]int)
+	*p = make([]int, len(n))
+	copy(*p, n)
+	return p
+}
+
+// Simple methods with and without arguments.
+func (t *T) Method0() string {
+	return "M0"
+}
+
+func (t *T) Method1(a int) int {
+	return a
+}
+
+func (t *T) Method2(a uint16, b string) string {
+	return fmt.Sprintf("Method2: %d %s", a, b)
+}
+
+func (t *T) Method3(v interface{}) string {
+	return fmt.Sprintf("Method3: %v", v)
+}
+
+func (t *T) Copy() *T {
+	n := new(T)
+	*n = *t
+	return n
+}
+
+func (t *T) MAdd(a int, b []int) []int {
+	v := make([]int, len(b))
+	for i, x := range b {
+		v[i] = x + a
+	}
+	return v
+}
+
+var myError = errors.New("my error")
+
+// MyError returns a value and an error according to its argument.
+func (t *T) MyError(error bool) (bool, error) {
+	if error {
+		return true, myError
+	}
+	return false, nil
+}
+
+// A few methods to test chaining.
+func (t *T) GetU() *U {
+	return t.U
+}
+
+func (u *U) TrueFalse(b bool) string {
+	if b {
+		return "true"
+	}
+	return ""
+}
+
+func typeOf(arg interface{}) string {
+	return fmt.Sprintf("%T", arg)
+}
+
+type execTest struct {
+	name   string
+	input  string
+	output string
+	data   interface{}
+	ok     bool
+}
+
+// bigInt and bigUint are hex string representing numbers either side
+// of the max int boundary.
+// We do it this way so the test doesn't depend on ints being 32 bits.
+var (
+	bigInt  = fmt.Sprintf("0x%x", int(1<<uint(reflect.TypeOf(0).Bits()-1)-1))
+	bigUint = fmt.Sprintf("0x%x", uint(1<<uint(reflect.TypeOf(0).Bits()-1)))
+)
+
+var execTests = []execTest{
+	// Trivial cases.
+	{"empty", "", "", nil, true},
+	{"text", "some text", "some text", nil, true},
+	{"nil action", "{{nil}}", "", nil, false},
+
+	// Ideal constants.
+	{"ideal int", "{{typeOf 3}}", "int", 0, true},
+	{"ideal float", "{{typeOf 1.0}}", "float64", 0, true},
+	{"ideal exp float", "{{typeOf 1e1}}", "float64", 0, true},
+	{"ideal complex", "{{typeOf 1i}}", "complex128", 0, true},
+	{"ideal int", "{{typeOf " + bigInt + "}}", "int", 0, true},
+	{"ideal too big", "{{typeOf " + bigUint + "}}", "", 0, false},
+	{"ideal nil without type", "{{nil}}", "", 0, false},
+
+	// Fields of structs.
+	{".X", "-{{.X}}-", "-x-", tVal, true},
+	{".U.V", "-{{.U.V}}-", "-v-", tVal, true},
+	{".unexported", "{{.unexported}}", "", tVal, false},
+
+	// Fields on maps.
+	{"map .one", "{{.MSI.one}}", "1", tVal, true},
+	{"map .two", "{{.MSI.two}}", "2", tVal, true},
+	{"map .NO", "{{.MSI.NO}}", "", tVal, true}, // NOTE: <no value> in text/template
+	{"map .one interface", "{{.MXI.one}}", "1", tVal, true},
+	{"map .WRONG args", "{{.MSI.one 1}}", "", tVal, false},
+	{"map .WRONG type", "{{.MII.one}}", "", tVal, false},
+
+	// Dots of all kinds to test basic evaluation.
+	{"dot int", "<{{.}}>", "&lt;13>", 13, true},
+	{"dot uint", "<{{.}}>", "&lt;14>", uint(14), true},
+	{"dot float", "<{{.}}>", "&lt;15.1>", 15.1, true},
+	{"dot bool", "<{{.}}>", "&lt;true>", true, true},
+	{"dot complex", "<{{.}}>", "&lt;(16.2-17i)>", 16.2 - 17i, true},
+	{"dot string", "<{{.}}>", "&lt;hello>", "hello", true},
+	{"dot slice", "<{{.}}>", "&lt;[-1 -2 -3]>", []int{-1, -2, -3}, true},
+	{"dot map", "<{{.}}>", "&lt;map[two:22]>", map[string]int{"two": 22}, true},
+	{"dot struct", "<{{.}}>", "&lt;{7 seven}>", struct {
+		a int
+		b string
+	}{7, "seven"}, true},
+
+	// Variables.
+	{"$ int", "{{$}}", "123", 123, true},
+	{"$.I", "{{$.I}}", "17", tVal, true},
+	{"$.U.V", "{{$.U.V}}", "v", tVal, true},
+	{"declare in action", "{{$x := $.U.V}}{{$x}}", "v", tVal, true},
+	{"simple assignment", "{{$x := 2}}{{$x = 3}}{{$x}}", "3", tVal, true},
+	{"nested assignment",
+		"{{$x := 2}}{{if true}}{{$x = 3}}{{end}}{{$x}}",
+		"3", tVal, true},
+	{"nested assignment changes the last declaration",
+		"{{$x := 1}}{{if true}}{{$x := 2}}{{if true}}{{$x = 3}}{{end}}{{end}}{{$x}}",
+		"1", tVal, true},
+
+	// Type with String method.
+	{"V{6666}.String()", "-{{.V0}}-", "-{6666}-", tVal, true}, //  NOTE: -<6666>- in text/template
+	{"&V{7777}.String()", "-{{.V1}}-", "-&lt;7777&gt;-", tVal, true},
+	{"(*V)(nil).String()", "-{{.V2}}-", "-nilV-", tVal, true},
+
+	// Type with Error method.
+	{"W{888}.Error()", "-{{.W0}}-", "-{888}-", tVal, true}, // NOTE: -[888] in text/template
+	{"&W{999}.Error()", "-{{.W1}}-", "-[999]-", tVal, true},
+	{"(*W)(nil).Error()", "-{{.W2}}-", "-nilW-", tVal, true},
+
+	// Pointers.
+	{"*int", "{{.PI}}", "23", tVal, true},
+	{"*string", "{{.PS}}", "a string", tVal, true},
+	{"*[]int", "{{.PSI}}", "[21 22 23]", tVal, true},
+	{"*[]int[1]", "{{index .PSI 1}}", "22", tVal, true},
+	{"NIL", "{{.NIL}}", "&lt;nil&gt;", tVal, true},
+
+	// Empty interfaces holding values.
+	{"empty nil", "{{.Empty0}}", "", tVal, true}, // NOTE: <no value> in text/template
+	{"empty with int", "{{.Empty1}}", "3", tVal, true},
+	{"empty with string", "{{.Empty2}}", "empty2", tVal, true},
+	{"empty with slice", "{{.Empty3}}", "[7 8]", tVal, true},
+	{"empty with struct", "{{.Empty4}}", "{UinEmpty}", tVal, true},
+	{"empty with struct, field", "{{.Empty4.V}}", "UinEmpty", tVal, true},
+
+	// Edge cases with <no value> with an interface value
+	{"field on interface", "{{.foo}}", "", nil, true},                  // NOTE: <no value> in text/template
+	{"field on parenthesized interface", "{{(.).foo}}", "", nil, true}, // NOTE: <no value> in text/template
+
+	// Issue 31810: Parenthesized first element of pipeline with arguments.
+	// See also TestIssue31810.
+	{"unparenthesized non-function", "{{1 2}}", "", nil, false},
+	{"parenthesized non-function", "{{(1) 2}}", "", nil, false},
+	{"parenthesized non-function with no args", "{{(1)}}", "1", nil, true}, // This is fine.
+
+	// Method calls.
+	{".Method0", "-{{.Method0}}-", "-M0-", tVal, true},
+	{".Method1(1234)", "-{{.Method1 1234}}-", "-1234-", tVal, true},
+	{".Method1(.I)", "-{{.Method1 .I}}-", "-17-", tVal, true},
+	{".Method2(3, .X)", "-{{.Method2 3 .X}}-", "-Method2: 3 x-", tVal, true},
+	{".Method2(.U16, `str`)", "-{{.Method2 .U16 `str`}}-", "-Method2: 16 str-", tVal, true},
+	{".Method2(.U16, $x)", "{{if $x := .X}}-{{.Method2 .U16 $x}}{{end}}-", "-Method2: 16 x-", tVal, true},
+	{".Method3(nil constant)", "-{{.Method3 nil}}-", "-Method3: &lt;nil&gt;-", tVal, true},
+	{".Method3(nil value)", "-{{.Method3 .MXI.unset}}-", "-Method3: &lt;nil&gt;-", tVal, true},
+	{"method on var", "{{if $x := .}}-{{$x.Method2 .U16 $x.X}}{{end}}-", "-Method2: 16 x-", tVal, true},
+	{"method on chained var",
+		"{{range .MSIone}}{{if $.U.TrueFalse $.True}}{{$.U.TrueFalse $.True}}{{else}}WRONG{{end}}{{end}}",
+		"true", tVal, true},
+	{"chained method",
+		"{{range .MSIone}}{{if $.GetU.TrueFalse $.True}}{{$.U.TrueFalse $.True}}{{else}}WRONG{{end}}{{end}}",
+		"true", tVal, true},
+	{"chained method on variable",
+		"{{with $x := .}}{{with .SI}}{{$.GetU.TrueFalse $.True}}{{end}}{{end}}",
+		"true", tVal, true},
+	{".NilOKFunc not nil", "{{call .NilOKFunc .PI}}", "false", tVal, true},
+	{".NilOKFunc nil", "{{call .NilOKFunc nil}}", "true", tVal, true},
+	{"method on nil value from slice", "-{{range .}}{{.Method1 1234}}{{end}}-", "-1234-", tSliceOfNil, true},
+	{"method on typed nil interface value", "{{.NonEmptyInterfaceTypedNil.Method0}}", "M0", tVal, true},
+
+	// Function call builtin.
+	{".BinaryFunc", "{{call .BinaryFunc `1` `2`}}", "[1=2]", tVal, true},
+	{".VariadicFunc0", "{{call .VariadicFunc}}", "&lt;&gt;", tVal, true},
+	{".VariadicFunc2", "{{call .VariadicFunc `he` `llo`}}", "&lt;he&#43;llo&gt;", tVal, true},
+	{".VariadicFuncInt", "{{call .VariadicFuncInt 33 `he` `llo`}}", "33=&lt;he&#43;llo&gt;", tVal, true},
+	{"if .BinaryFunc call", "{{ if .BinaryFunc}}{{call .BinaryFunc `1` `2`}}{{end}}", "[1=2]", tVal, true},
+	{"if not .BinaryFunc call", "{{ if not .BinaryFunc}}{{call .BinaryFunc `1` `2`}}{{else}}No{{end}}", "No", tVal, true},
+	{"Interface Call", `{{stringer .S}}`, "foozle", map[string]interface{}{"S": bytes.NewBufferString("foozle")}, true},
+	{".ErrFunc", "{{call .ErrFunc}}", "bla", tVal, true},
+	{"call nil", "{{call nil}}", "", tVal, false},
+
+	// Erroneous function calls (check args).
+	{".BinaryFuncTooFew", "{{call .BinaryFunc `1`}}", "", tVal, false},
+	{".BinaryFuncTooMany", "{{call .BinaryFunc `1` `2` `3`}}", "", tVal, false},
+	{".BinaryFuncBad0", "{{call .BinaryFunc 1 3}}", "", tVal, false},
+	{".BinaryFuncBad1", "{{call .BinaryFunc `1` 3}}", "", tVal, false},
+	{".VariadicFuncBad0", "{{call .VariadicFunc 3}}", "", tVal, false},
+	{".VariadicFuncIntBad0", "{{call .VariadicFuncInt}}", "", tVal, false},
+	{".VariadicFuncIntBad`", "{{call .VariadicFuncInt `x`}}", "", tVal, false},
+	{".VariadicFuncNilBad", "{{call .VariadicFunc nil}}", "", tVal, false},
+
+	// Pipelines.
+	{"pipeline", "-{{.Method0 | .Method2 .U16}}-", "-Method2: 16 M0-", tVal, true},
+	{"pipeline func", "-{{call .VariadicFunc `llo` | call .VariadicFunc `he` }}-", "-&lt;he&#43;&lt;llo&gt;&gt;-", tVal, true},
+
+	// Nil values aren't missing arguments.
+	{"nil pipeline", "{{ .Empty0 | call .NilOKFunc }}", "true", tVal, true},
+	{"nil call arg", "{{ call .NilOKFunc .Empty0 }}", "true", tVal, true},
+	{"bad nil pipeline", "{{ .Empty0 | .VariadicFunc }}", "", tVal, false},
+
+	// Parenthesized expressions
+	{"parens in pipeline", "{{printf `%d %d %d` (1) (2 | add 3) (add 4 (add 5 6))}}", "1 5 15", tVal, true},
+
+	// Parenthesized expressions with field accesses
+	{"parens: $ in paren", "{{($).X}}", "x", tVal, true},
+	{"parens: $.GetU in paren", "{{($.GetU).V}}", "v", tVal, true},
+	{"parens: $ in paren in pipe", "{{($ | echo).X}}", "x", tVal, true},
+	{"parens: spaces and args", `{{(makemap "up" "down" "left" "right").left}}`, "right", tVal, true},
+
+	// If.
+	{"if true", "{{if true}}TRUE{{end}}", "TRUE", tVal, true},
+	{"if false", "{{if false}}TRUE{{else}}FALSE{{end}}", "FALSE", tVal, true},
+	{"if nil", "{{if nil}}TRUE{{end}}", "", tVal, false},
+	{"if on typed nil interface value", "{{if .NonEmptyInterfaceTypedNil}}TRUE{{ end }}", "", tVal, true},
+	{"if 1", "{{if 1}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true},
+	{"if 0", "{{if 0}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
+	{"if 1.5", "{{if 1.5}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true},
+	{"if 0.0", "{{if .FloatZero}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
+	{"if 1.5i", "{{if 1.5i}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true},
+	{"if 0.0i", "{{if .ComplexZero}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
+	{"if emptystring", "{{if ``}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+	{"if string", "{{if `notempty`}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true},
+	{"if emptyslice", "{{if .SIEmpty}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+	{"if slice", "{{if .SI}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true},
+	{"if emptymap", "{{if .MSIEmpty}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+	{"if map", "{{if .MSI}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true},
+	{"if map unset", "{{if .MXI.none}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
+	{"if map not unset", "{{if not .MXI.none}}ZERO{{else}}NON-ZERO{{end}}", "ZERO", tVal, true},
+	{"if $x with $y int", "{{if $x := true}}{{with $y := .I}}{{$x}},{{$y}}{{end}}{{end}}", "true,17", tVal, true},
+	{"if $x with $x int", "{{if $x := true}}{{with $x := .I}}{{$x}},{{end}}{{$x}}{{end}}", "17,true", tVal, true},
+	{"if else if", "{{if false}}FALSE{{else if true}}TRUE{{end}}", "TRUE", tVal, true},
+	{"if else chain", "{{if eq 1 3}}1{{else if eq 2 3}}2{{else if eq 3 3}}3{{end}}", "3", tVal, true},
+
+	// Print etc.
+	{"print", `{{print "hello, print"}}`, "hello, print", tVal, true},
+	{"print 123", `{{print 1 2 3}}`, "1 2 3", tVal, true},
+	{"print nil", `{{print nil}}`, "&lt;nil&gt;", tVal, true},
+	{"println", `{{println 1 2 3}}`, "1 2 3\n", tVal, true},
+	{"printf int", `{{printf "%04x" 127}}`, "007f", tVal, true},
+	{"printf float", `{{printf "%g" 3.5}}`, "3.5", tVal, true},
+	{"printf complex", `{{printf "%g" 1+7i}}`, "(1&#43;7i)", tVal, true},
+	{"printf string", `{{printf "%s" "hello"}}`, "hello", tVal, true},
+	{"printf function", `{{printf "%#q" zeroArgs}}`, "`zeroArgs`", tVal, true},
+	{"printf field", `{{printf "%s" .U.V}}`, "v", tVal, true},
+	{"printf method", `{{printf "%s" .Method0}}`, "M0", tVal, true},
+	{"printf dot", `{{with .I}}{{printf "%d" .}}{{end}}`, "17", tVal, true},
+	{"printf var", `{{with $x := .I}}{{printf "%d" $x}}{{end}}`, "17", tVal, true},
+	{"printf lots", `{{printf "%d %s %g %s" 127 "hello" 7-3i .Method0}}`, "127 hello (7-3i) M0", tVal, true},
+
+	// HTML.
+	{"html", `{{html "<script>alert(\"XSS\");</script>"}}`,
+		"&lt;script&gt;alert(&#34;XSS&#34;);&lt;/script&gt;", nil, true},
+	{"html pipeline", `{{printf "<script>alert(\"XSS\");</script>" | html}}`,
+		"&lt;script&gt;alert(&#34;XSS&#34;);&lt;/script&gt;", nil, true},
+	{"html", `{{html .PS}}`, "a string", tVal, true},
+	{"html typed nil", `{{html .NIL}}`, "&lt;nil&gt;", tVal, true},
+	{"html untyped nil", `{{html .Empty0}}`, "&lt;nil&gt;", tVal, true}, // NOTE: "&lt;no value&gt;" in text/template
+
+	// JavaScript.
+	{"js", `{{js .}}`, `It\&#39;d be nice.`, `It'd be nice.`, true},
+
+	// URL query.
+	{"urlquery", `{{"http://www.example.org/"|urlquery}}`, "http%3A%2F%2Fwww.example.org%2F", nil, true},
+
+	// Booleans
+	{"not", "{{not true}} {{not false}}", "false true", nil, true},
+	{"and", "{{and false 0}} {{and 1 0}} {{and 0 true}} {{and 1 1}}", "false 0 0 1", nil, true},
+	{"or", "{{or 0 0}} {{or 1 0}} {{or 0 true}} {{or 1 1}}", "0 1 true 1", nil, true},
+	{"boolean if", "{{if and true 1 `hi`}}TRUE{{else}}FALSE{{end}}", "TRUE", tVal, true},
+	{"boolean if not", "{{if and true 1 `hi` | not}}TRUE{{else}}FALSE{{end}}", "FALSE", nil, true},
+
+	// Indexing.
+	{"slice[0]", "{{index .SI 0}}", "3", tVal, true},
+	{"slice[1]", "{{index .SI 1}}", "4", tVal, true},
+	{"slice[HUGE]", "{{index .SI 10}}", "", tVal, false},
+	{"slice[WRONG]", "{{index .SI `hello`}}", "", tVal, false},
+	{"slice[nil]", "{{index .SI nil}}", "", tVal, false},
+	{"map[one]", "{{index .MSI `one`}}", "1", tVal, true},
+	{"map[two]", "{{index .MSI `two`}}", "2", tVal, true},
+	{"map[NO]", "{{index .MSI `XXX`}}", "0", tVal, true},
+	{"map[nil]", "{{index .MSI nil}}", "", tVal, false},
+	{"map[``]", "{{index .MSI ``}}", "0", tVal, true},
+	{"map[WRONG]", "{{index .MSI 10}}", "", tVal, false},
+	{"double index", "{{index .SMSI 1 `eleven`}}", "11", tVal, true},
+	{"nil[1]", "{{index nil 1}}", "", tVal, false},
+	{"map MI64S", "{{index .MI64S 2}}", "i642", tVal, true},
+	{"map MI32S", "{{index .MI32S 2}}", "two", tVal, true},
+	{"map MUI64S", "{{index .MUI64S 3}}", "ui643", tVal, true},
+	{"map MI8S", "{{index .MI8S 3}}", "i83", tVal, true},
+	{"map MUI8S", "{{index .MUI8S 2}}", "u82", tVal, true},
+	{"index of an interface field", "{{index .Empty3 0}}", "7", tVal, true},
+
+	// Slicing.
+	{"slice[:]", "{{slice .SI}}", "[3 4 5]", tVal, true},
+	{"slice[1:]", "{{slice .SI 1}}", "[4 5]", tVal, true},
+	{"slice[1:2]", "{{slice .SI 1 2}}", "[4]", tVal, true},
+	{"slice[-1:]", "{{slice .SI -1}}", "", tVal, false},
+	{"slice[1:-2]", "{{slice .SI 1 -2}}", "", tVal, false},
+	{"slice[1:2:-1]", "{{slice .SI 1 2 -1}}", "", tVal, false},
+	{"slice[2:1]", "{{slice .SI 2 1}}", "", tVal, false},
+	{"slice[2:2:1]", "{{slice .SI 2 2 1}}", "", tVal, false},
+	{"out of range", "{{slice .SI 4 5}}", "", tVal, false},
+	{"out of range", "{{slice .SI 2 2 5}}", "", tVal, false},
+	{"len(s) < indexes < cap(s)", "{{slice .SICap 6 10}}", "[0 0 0 0]", tVal, true},
+	{"len(s) < indexes < cap(s)", "{{slice .SICap 6 10 10}}", "[0 0 0 0]", tVal, true},
+	{"indexes > cap(s)", "{{slice .SICap 10 11}}", "", tVal, false},
+	{"indexes > cap(s)", "{{slice .SICap 6 10 11}}", "", tVal, false},
+	{"array[:]", "{{slice .AI}}", "[3 4 5]", tVal, true},
+	{"array[1:]", "{{slice .AI 1}}", "[4 5]", tVal, true},
+	{"array[1:2]", "{{slice .AI 1 2}}", "[4]", tVal, true},
+	{"string[:]", "{{slice .S}}", "xyz", tVal, true},
+	{"string[0:1]", "{{slice .S 0 1}}", "x", tVal, true},
+	{"string[1:]", "{{slice .S 1}}", "yz", tVal, true},
+	{"string[1:2]", "{{slice .S 1 2}}", "y", tVal, true},
+	{"out of range", "{{slice .S 1 5}}", "", tVal, false},
+	{"3-index slice of string", "{{slice .S 1 2 2}}", "", tVal, false},
+	{"slice of an interface field", "{{slice .Empty3 0 1}}", "[7]", tVal, true},
+
+	// Len.
+	{"slice", "{{len .SI}}", "3", tVal, true},
+	{"map", "{{len .MSI }}", "3", tVal, true},
+	{"len of int", "{{len 3}}", "", tVal, false},
+	{"len of nothing", "{{len .Empty0}}", "", tVal, false},
+	{"len of an interface field", "{{len .Empty3}}", "2", tVal, true},
+
+	// With.
+	{"with true", "{{with true}}{{.}}{{end}}", "true", tVal, true},
+	{"with false", "{{with false}}{{.}}{{else}}FALSE{{end}}", "FALSE", tVal, true},
+	{"with 1", "{{with 1}}{{.}}{{else}}ZERO{{end}}", "1", tVal, true},
+	{"with 0", "{{with 0}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true},
+	{"with 1.5", "{{with 1.5}}{{.}}{{else}}ZERO{{end}}", "1.5", tVal, true},
+	{"with 0.0", "{{with .FloatZero}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true},
+	{"with 1.5i", "{{with 1.5i}}{{.}}{{else}}ZERO{{end}}", "(0&#43;1.5i)", tVal, true},
+	{"with 0.0i", "{{with .ComplexZero}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true},
+	{"with emptystring", "{{with ``}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+	{"with string", "{{with `notempty`}}{{.}}{{else}}EMPTY{{end}}", "notempty", tVal, true},
+	{"with emptyslice", "{{with .SIEmpty}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+	{"with slice", "{{with .SI}}{{.}}{{else}}EMPTY{{end}}", "[3 4 5]", tVal, true},
+	{"with emptymap", "{{with .MSIEmpty}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+	{"with map", "{{with .MSIone}}{{.}}{{else}}EMPTY{{end}}", "map[one:1]", tVal, true},
+	{"with empty interface, struct field", "{{with .Empty4}}{{.V}}{{end}}", "UinEmpty", tVal, true},
+	{"with $x int", "{{with $x := .I}}{{$x}}{{end}}", "17", tVal, true},
+	{"with $x struct.U.V", "{{with $x := $}}{{$x.U.V}}{{end}}", "v", tVal, true},
+	{"with variable and action", "{{with $x := $}}{{$y := $.U.V}}{{$y}}{{end}}", "v", tVal, true},
+	{"with on typed nil interface value", "{{with .NonEmptyInterfaceTypedNil}}TRUE{{ end }}", "", tVal, true},
+
+	// Range.
+	{"range []int", "{{range .SI}}-{{.}}-{{end}}", "-3--4--5-", tVal, true},
+	{"range empty no else", "{{range .SIEmpty}}-{{.}}-{{end}}", "", tVal, true},
+	{"range []int else", "{{range .SI}}-{{.}}-{{else}}EMPTY{{end}}", "-3--4--5-", tVal, true},
+	{"range empty else", "{{range .SIEmpty}}-{{.}}-{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+	{"range []int break else", "{{range .SI}}-{{.}}-{{break}}NOTREACHED{{else}}EMPTY{{end}}", "-3-", tVal, true},
+	{"range []int continue else", "{{range .SI}}-{{.}}-{{continue}}NOTREACHED{{else}}EMPTY{{end}}", "-3--4--5-", tVal, true},
+	{"range []bool", "{{range .SB}}-{{.}}-{{end}}", "-true--false-", tVal, true},
+	{"range []int method", "{{range .SI | .MAdd .I}}-{{.}}-{{end}}", "-20--21--22-", tVal, true},
+	{"range map", "{{range .MSI}}-{{.}}-{{end}}", "-1--3--2-", tVal, true},
+	{"range empty map no else", "{{range .MSIEmpty}}-{{.}}-{{end}}", "", tVal, true},
+	{"range map else", "{{range .MSI}}-{{.}}-{{else}}EMPTY{{end}}", "-1--3--2-", tVal, true},
+	{"range empty map else", "{{range .MSIEmpty}}-{{.}}-{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+	{"range empty interface", "{{range .Empty3}}-{{.}}-{{else}}EMPTY{{end}}", "-7--8-", tVal, true},
+	{"range empty nil", "{{range .Empty0}}-{{.}}-{{end}}", "", tVal, true},
+	{"range $x SI", "{{range $x := .SI}}<{{$x}}>{{end}}", "&lt;3>&lt;4>&lt;5>", tVal, true},
+	{"range $x $y SI", "{{range $x, $y := .SI}}<{{$x}}={{$y}}>{{end}}", "&lt;0=3>&lt;1=4>&lt;2=5>", tVal, true},
+	{"range $x MSIone", "{{range $x := .MSIone}}<{{$x}}>{{end}}", "&lt;1>", tVal, true},
+	{"range $x $y MSIone", "{{range $x, $y := .MSIone}}<{{$x}}={{$y}}>{{end}}", "&lt;one=1>", tVal, true},
+	{"range $x PSI", "{{range $x := .PSI}}<{{$x}}>{{end}}", "&lt;21>&lt;22>&lt;23>", tVal, true},
+	{"declare in range", "{{range $x := .PSI}}<{{$foo:=$x}}{{$x}}>{{end}}", "&lt;21>&lt;22>&lt;23>", tVal, true},
+	{"range count", `{{range $i, $x := count 5}}[{{$i}}]{{$x}}{{end}}`, "[0]a[1]b[2]c[3]d[4]e", tVal, true},
+	{"range nil count", `{{range $i, $x := count 0}}{{else}}empty{{end}}`, "empty", tVal, true},
+
+	// Cute examples.
+	{"or as if true", `{{or .SI "slice is empty"}}`, "[3 4 5]", tVal, true},
+	{"or as if false", `{{or .SIEmpty "slice is empty"}}`, "slice is empty", tVal, true},
+
+	// Error handling.
+	{"error method, error", "{{.MyError true}}", "", tVal, false},
+	{"error method, no error", "{{.MyError false}}", "false", tVal, true},
+
+	// Numbers
+	{"decimal", "{{print 1234}}", "1234", tVal, true},
+	{"decimal _", "{{print 12_34}}", "1234", tVal, true},
+	{"binary", "{{print 0b101}}", "5", tVal, true},
+	{"binary _", "{{print 0b_1_0_1}}", "5", tVal, true},
+	{"BINARY", "{{print 0B101}}", "5", tVal, true},
+	{"octal0", "{{print 0377}}", "255", tVal, true},
+	{"octal", "{{print 0o377}}", "255", tVal, true},
+	{"octal _", "{{print 0o_3_7_7}}", "255", tVal, true},
+	{"OCTAL", "{{print 0O377}}", "255", tVal, true},
+	{"hex", "{{print 0x123}}", "291", tVal, true},
+	{"hex _", "{{print 0x1_23}}", "291", tVal, true},
+	{"HEX", "{{print 0X123ABC}}", "1194684", tVal, true},
+	{"float", "{{print 123.4}}", "123.4", tVal, true},
+	{"float _", "{{print 0_0_1_2_3.4}}", "123.4", tVal, true},
+	{"hex float", "{{print +0x1.ep+2}}", "7.5", tVal, true},
+	{"hex float _", "{{print +0x_1.e_0p+0_2}}", "7.5", tVal, true},
+	{"HEX float", "{{print +0X1.EP+2}}", "7.5", tVal, true},
+	{"print multi", "{{print 1_2_3_4 7.5_00_00_00}}", "1234 7.5", tVal, true},
+	{"print multi2", "{{print 1234 0x0_1.e_0p+02}}", "1234 7.5", tVal, true},
+
+	// Fixed bugs.
+	// Must separate dot and receiver; otherwise args are evaluated with dot set to variable.
+	{"bug0", "{{range .MSIone}}{{if $.Method1 .}}X{{end}}{{end}}", "X", tVal, true},
+	// Do not loop endlessly in indirect for non-empty interfaces.
+	// The bug appears with *interface only; looped forever.
+	{"bug1", "{{.Method0}}", "M0", &iVal, true},
+	// Was taking address of interface field, so method set was empty.
+	{"bug2", "{{$.NonEmptyInterface.Method0}}", "M0", tVal, true},
+	// Struct values were not legal in with - mere oversight.
+	{"bug3", "{{with $}}{{.Method0}}{{end}}", "M0", tVal, true},
+	// Nil interface values in if.
+	{"bug4", "{{if .Empty0}}non-nil{{else}}nil{{end}}", "nil", tVal, true},
+	// Stringer.
+	{"bug5", "{{.Str}}", "foozle", tVal, true},
+	{"bug5a", "{{.Err}}", "erroozle", tVal, true},
+	// Args need to be indirected and dereferenced sometimes.
+	{"bug6a", "{{vfunc .V0 .V1}}", "vfunc", tVal, true},
+	{"bug6b", "{{vfunc .V0 .V0}}", "vfunc", tVal, true},
+	{"bug6c", "{{vfunc .V1 .V0}}", "vfunc", tVal, true},
+	{"bug6d", "{{vfunc .V1 .V1}}", "vfunc", tVal, true},
+	// Legal parse but illegal execution: non-function should have no arguments.
+	{"bug7a", "{{3 2}}", "", tVal, false},
+	{"bug7b", "{{$x := 1}}{{$x 2}}", "", tVal, false},
+	{"bug7c", "{{$x := 1}}{{3 | $x}}", "", tVal, false},
+	// Pipelined arg was not being type-checked.
+	{"bug8a", "{{3|oneArg}}", "", tVal, false},
+	{"bug8b", "{{4|dddArg 3}}", "", tVal, false},
+	// A bug was introduced that broke map lookups for lower-case names.
+	{"bug9", "{{.cause}}", "neglect", map[string]string{"cause": "neglect"}, true},
+	// Field chain starting with function did not work.
+	{"bug10", "{{mapOfThree.three}}-{{(mapOfThree).three}}", "3-3", 0, true},
+	// Dereferencing nil pointer while evaluating function arguments should not panic. Issue 7333.
+	{"bug11", "{{valueString .PS}}", "", T{}, false},
+	// 0xef gave constant type float64. Issue 8622.
+	{"bug12xe", "{{printf `%T` 0xef}}", "int", T{}, true},
+	{"bug12xE", "{{printf `%T` 0xEE}}", "int", T{}, true},
+	{"bug12Xe", "{{printf `%T` 0Xef}}", "int", T{}, true},
+	{"bug12XE", "{{printf `%T` 0XEE}}", "int", T{}, true},
+	// Chained nodes did not work as arguments. Issue 8473.
+	{"bug13", "{{print (.Copy).I}}", "17", tVal, true},
+	// Didn't protect against nil or literal values in field chains.
+	{"bug14a", "{{(nil).True}}", "", tVal, false},
+	{"bug14b", "{{$x := nil}}{{$x.anything}}", "", tVal, false},
+	{"bug14c", `{{$x := (1.0)}}{{$y := ("hello")}}{{$x.anything}}{{$y.true}}`, "", tVal, false},
+	// Didn't call validateType on function results. Issue 10800.
+	{"bug15", "{{valueString returnInt}}", "", tVal, false},
+	// Variadic function corner cases. Issue 10946.
+	{"bug16a", "{{true|printf}}", "", tVal, false},
+	{"bug16b", "{{1|printf}}", "", tVal, false},
+	{"bug16c", "{{1.1|printf}}", "", tVal, false},
+	{"bug16d", "{{'x'|printf}}", "", tVal, false},
+	{"bug16e", "{{0i|printf}}", "", tVal, false},
+	{"bug16f", "{{true|twoArgs \"xxx\"}}", "", tVal, false},
+	{"bug16g", "{{\"aaa\" |twoArgs \"bbb\"}}", "twoArgs=bbbaaa", tVal, true},
+	{"bug16h", "{{1|oneArg}}", "", tVal, false},
+	{"bug16i", "{{\"aaa\"|oneArg}}", "oneArg=aaa", tVal, true},
+	{"bug16j", "{{1+2i|printf \"%v\"}}", "(1&#43;2i)", tVal, true},
+	{"bug16k", "{{\"aaa\"|printf }}", "aaa", tVal, true},
+	{"bug17a", "{{.NonEmptyInterface.X}}", "x", tVal, true},
+	{"bug17b", "-{{.NonEmptyInterface.Method1 1234}}-", "-1234-", tVal, true},
+	{"bug17c", "{{len .NonEmptyInterfacePtS}}", "2", tVal, true},
+	{"bug17d", "{{index .NonEmptyInterfacePtS 0}}", "a", tVal, true},
+	{"bug17e", "{{range .NonEmptyInterfacePtS}}-{{.}}-{{end}}", "-a--b-", tVal, true},
+
+	// More variadic function corner cases. Some runes would get evaluated
+	// as constant floats instead of ints. Issue 34483.
+	{"bug18a", "{{eq . '.'}}", "true", '.', true},
+	{"bug18b", "{{eq . 'e'}}", "true", 'e', true},
+	{"bug18c", "{{eq . 'P'}}", "true", 'P', true},
+}
+
+func zeroArgs() string {
+	return "zeroArgs"
+}
+
+func oneArg(a string) string {
+	return "oneArg=" + a
+}
+
+func twoArgs(a, b string) string {
+	return "twoArgs=" + a + b
+}
+
+func dddArg(a int, b ...string) string {
+	return fmt.Sprintln(a, b)
+}
+
+// count returns a channel that will deliver n sequential 1-letter strings starting at "a"
+func count(n int) chan string {
+	if n == 0 {
+		return nil
+	}
+	c := make(chan string)
+	go func() {
+		for i := 0; i < n; i++ {
+			c <- "abcdefghijklmnop"[i : i+1]
+		}
+		close(c)
+	}()
+	return c
+}
+
+// vfunc takes a *V and a V
+func vfunc(V, *V) string {
+	return "vfunc"
+}
+
+// valueString takes a string, not a pointer.
+func valueString(v string) string {
+	return "value is ignored"
+}
+
+// returnInt returns an int
+func returnInt() int {
+	return 7
+}
+
+func add(args ...int) int {
+	sum := 0
+	for _, x := range args {
+		sum += x
+	}
+	return sum
+}
+
+func echo(arg interface{}) interface{} {
+	return arg
+}
+
+func makemap(arg ...string) map[string]string {
+	if len(arg)%2 != 0 {
+		panic("bad makemap")
+	}
+	m := make(map[string]string)
+	for i := 0; i < len(arg); i += 2 {
+		m[arg[i]] = arg[i+1]
+	}
+	return m
+}
+
+func stringer(s fmt.Stringer) string {
+	return s.String()
+}
+
+func mapOfThree() interface{} {
+	return map[string]int{"three": 3}
+}
+
+func testExecute(execTests []execTest, template *Template, t *testing.T) {
+	b := new(bytes.Buffer)
+	funcs := FuncMap{
+		"add":         add,
+		"count":       count,
+		"dddArg":      dddArg,
+		"echo":        echo,
+		"makemap":     makemap,
+		"mapOfThree":  mapOfThree,
+		"oneArg":      oneArg,
+		"returnInt":   returnInt,
+		"stringer":    stringer,
+		"twoArgs":     twoArgs,
+		"typeOf":      typeOf,
+		"valueString": valueString,
+		"vfunc":       vfunc,
+		"zeroArgs":    zeroArgs,
+	}
+	for _, test := range execTests {
+		var tmpl *Template
+		var err error
+		if template == nil {
+			tmpl, err = New(test.name).Funcs(funcs).Parse(test.input)
+		} else {
+			tmpl, err = template.Clone()
+			if err != nil {
+				t.Errorf("%s: clone error: %s", test.name, err)
+				continue
+			}
+			tmpl, err = tmpl.New(test.name).Funcs(funcs).Parse(test.input)
+		}
+		if err != nil {
+			t.Errorf("%s: parse error: %s", test.name, err)
+			continue
+		}
+		b.Reset()
+		err = tmpl.Execute(b, test.data)
+		switch {
+		case !test.ok && err == nil:
+			t.Errorf("%s: expected error; got none", test.name)
+			continue
+		case test.ok && err != nil:
+			t.Errorf("%s: unexpected execute error: %s", test.name, err)
+			continue
+		case !test.ok && err != nil:
+			// expected error, got one
+			if *debug {
+				fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err)
+			}
+		}
+		result := b.String()
+		if result != test.output {
+			t.Errorf("%s: expected\n\t%q\ngot\n\t%q", test.name, test.output, result)
+		}
+	}
+}
+
+func TestExecute(t *testing.T) {
+	testExecute(execTests, nil, t)
+}
+
+var delimPairs = []string{
+	"", "", // default
+	"{{", "}}", // same as default
+	"|", "|", // same
+	"(日)", "(本)", // peculiar
+}
+
+func TestDelims(t *testing.T) {
+	const hello = "Hello, world"
+	var value = struct{ Str string }{hello}
+	for i := 0; i < len(delimPairs); i += 2 {
+		text := ".Str"
+		left := delimPairs[i+0]
+		trueLeft := left
+		right := delimPairs[i+1]
+		trueRight := right
+		if left == "" { // default case
+			trueLeft = "{{"
+		}
+		if right == "" { // default case
+			trueRight = "}}"
+		}
+		text = trueLeft + text + trueRight
+		// Now add a comment
+		text += trueLeft + "/*comment*/" + trueRight
+		// Now add  an action containing a string.
+		text += trueLeft + `"` + trueLeft + `"` + trueRight
+		// At this point text looks like `{{.Str}}{{/*comment*/}}{{"{{"}}`.
+		tmpl, err := New("delims").Delims(left, right).Parse(text)
+		if err != nil {
+			t.Fatalf("delim %q text %q parse err %s", left, text, err)
+		}
+		var b = new(bytes.Buffer)
+		err = tmpl.Execute(b, value)
+		if err != nil {
+			t.Fatalf("delim %q exec err %s", left, err)
+		}
+		if b.String() != hello+trueLeft {
+			t.Errorf("expected %q got %q", hello+trueLeft, b.String())
+		}
+	}
+}
+
+// Check that an error from a method flows back to the top.
+func TestExecuteError(t *testing.T) {
+	b := new(bytes.Buffer)
+	tmpl := New("error")
+	_, err := tmpl.Parse("{{.MyError true}}")
+	if err != nil {
+		t.Fatalf("parse error: %s", err)
+	}
+	err = tmpl.Execute(b, tVal)
+	if err == nil {
+		t.Errorf("expected error; got none")
+	} else if !strings.Contains(err.Error(), myError.Error()) {
+		if *debug {
+			fmt.Printf("test execute error: %s\n", err)
+		}
+		t.Errorf("expected myError; got %s", err)
+	}
+}
+
+const execErrorText = `line 1
+line 2
+line 3
+{{template "one" .}}
+{{define "one"}}{{template "two" .}}{{end}}
+{{define "two"}}{{template "three" .}}{{end}}
+{{define "three"}}{{index "hi" $}}{{end}}`
+
+// Check that an error from a nested template contains all the relevant information.
+func TestExecError(t *testing.T) {
+	tmpl, err := New("top").Parse(execErrorText)
+	if err != nil {
+		t.Fatal("parse error:", err)
+	}
+	var b bytes.Buffer
+	err = tmpl.Execute(&b, 5) // 5 is out of range indexing "hi"
+	if err == nil {
+		t.Fatal("expected error")
+	}
+	const want = `template: top:7:20: executing "three" at <index "hi" $>: error calling index: index out of range: 5`
+	got := err.Error()
+	if got != want {
+		t.Errorf("expected\n%q\ngot\n%q", want, got)
+	}
+}
+
+func TestJSEscaping(t *testing.T) {
+	testCases := []struct {
+		in, exp string
+	}{
+		{`a`, `a`},
+		{`'foo`, `\'foo`},
+		{`Go "jump" \`, `Go \"jump\" \\`},
+		{`Yukihiro says "今日は世界"`, `Yukihiro says \"今日は世界\"`},
+		{"unprintable \uFDFF", `unprintable \uFDFF`},
+		{`<html>`, `\u003Chtml\u003E`},
+		{`no = in attributes`, `no \u003D in attributes`},
+		{`&#x27; does not become HTML entity`, `\u0026#x27; does not become HTML entity`},
+	}
+	for _, tc := range testCases {
+		s := JSEscapeString(tc.in)
+		if s != tc.exp {
+			t.Errorf("JS escaping [%s] got [%s] want [%s]", tc.in, s, tc.exp)
+		}
+	}
+}
+
+// A nice example: walk a binary tree.
+
+type Tree struct {
+	Val         int
+	Left, Right *Tree
+}
+
+// Use different delimiters to test Set.Delims.
+// Also test the trimming of leading and trailing spaces.
+const treeTemplate = `
+	(- define "tree" -)
+	[
+		(- .Val -)
+		(- with .Left -)
+			(template "tree" . -)
+		(- end -)
+		(- with .Right -)
+			(- template "tree" . -)
+		(- end -)
+	]
+	(- end -)
+`
+
+func TestTree(t *testing.T) {
+	var tree = &Tree{
+		1,
+		&Tree{
+			2, &Tree{
+				3,
+				&Tree{
+					4, nil, nil,
+				},
+				nil,
+			},
+			&Tree{
+				5,
+				&Tree{
+					6, nil, nil,
+				},
+				nil,
+			},
+		},
+		&Tree{
+			7,
+			&Tree{
+				8,
+				&Tree{
+					9, nil, nil,
+				},
+				nil,
+			},
+			&Tree{
+				10,
+				&Tree{
+					11, nil, nil,
+				},
+				nil,
+			},
+		},
+	}
+	tmpl, err := New("root").Delims("(", ")").Parse(treeTemplate)
+	if err != nil {
+		t.Fatal("parse error:", err)
+	}
+	var b bytes.Buffer
+	const expect = "[1[2[3[4]][5[6]]][7[8[9]][10[11]]]]"
+	// First by looking up the template.
+	err = tmpl.Lookup("tree").Execute(&b, tree)
+	if err != nil {
+		t.Fatal("exec error:", err)
+	}
+	result := b.String()
+	if result != expect {
+		t.Errorf("expected %q got %q", expect, result)
+	}
+	// Then direct to execution.
+	b.Reset()
+	err = tmpl.ExecuteTemplate(&b, "tree", tree)
+	if err != nil {
+		t.Fatal("exec error:", err)
+	}
+	result = b.String()
+	if result != expect {
+		t.Errorf("expected %q got %q", expect, result)
+	}
+}
+
+func TestExecuteOnNewTemplate(t *testing.T) {
+	// This is issue 3872.
+	New("Name").Templates()
+	// This is issue 11379.
+	// new(Template).Templates() // TODO: crashes
+	// new(Template).Parse("") // TODO: crashes
+	// new(Template).New("abc").Parse("") // TODO: crashes
+	// new(Template).Execute(nil, nil)                // TODO: crashes; returns an error (but does not crash)
+	// new(Template).ExecuteTemplate(nil, "XXX", nil) // TODO: crashes; returns an error (but does not crash)
+}
+
+const testTemplates = `{{define "one"}}one{{end}}{{define "two"}}two{{end}}`
+
+func TestMessageForExecuteEmpty(t *testing.T) {
+	// Test a truly empty template.
+	tmpl := New("empty")
+	var b bytes.Buffer
+	err := tmpl.Execute(&b, 0)
+	if err == nil {
+		t.Fatal("expected initial error")
+	}
+	got := err.Error()
+	want := `template: "empty" is an incomplete or empty template` // NOTE: text/template has extra "empty: " in message
+	if got != want {
+		t.Errorf("expected error %s got %s", want, got)
+	}
+
+	// Add a non-empty template to check that the error is helpful.
+	tmpl = New("empty")
+	tests, err := New("").Parse(testTemplates)
+	if err != nil {
+		t.Fatal(err)
+	}
+	tmpl.AddParseTree("secondary", tests.Tree)
+	err = tmpl.Execute(&b, 0)
+	if err == nil {
+		t.Fatal("expected second error")
+	}
+	got = err.Error()
+	if got != want {
+		t.Errorf("expected error %s got %s", want, got)
+	}
+	// Make sure we can execute the secondary.
+	err = tmpl.ExecuteTemplate(&b, "secondary", 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestFinalForPrintf(t *testing.T) {
+	tmpl, err := New("").Parse(`{{"x" | printf}}`)
+	if err != nil {
+		t.Fatal(err)
+	}
+	var b bytes.Buffer
+	err = tmpl.Execute(&b, 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
+type cmpTest struct {
+	expr  string
+	truth string
+	ok    bool
+}
+
+var cmpTests = []cmpTest{
+	{"eq true true", "true", true},
+	{"eq true false", "false", true},
+	{"eq 1+2i 1+2i", "true", true},
+	{"eq 1+2i 1+3i", "false", true},
+	{"eq 1.5 1.5", "true", true},
+	{"eq 1.5 2.5", "false", true},
+	{"eq 1 1", "true", true},
+	{"eq 1 2", "false", true},
+	{"eq `xy` `xy`", "true", true},
+	{"eq `xy` `xyz`", "false", true},
+	{"eq .Uthree .Uthree", "true", true},
+	{"eq .Uthree .Ufour", "false", true},
+	{"eq 3 4 5 6 3", "true", true},
+	{"eq 3 4 5 6 7", "false", true},
+	{"ne true true", "false", true},
+	{"ne true false", "true", true},
+	{"ne 1+2i 1+2i", "false", true},
+	{"ne 1+2i 1+3i", "true", true},
+	{"ne 1.5 1.5", "false", true},
+	{"ne 1.5 2.5", "true", true},
+	{"ne 1 1", "false", true},
+	{"ne 1 2", "true", true},
+	{"ne `xy` `xy`", "false", true},
+	{"ne `xy` `xyz`", "true", true},
+	{"ne .Uthree .Uthree", "false", true},
+	{"ne .Uthree .Ufour", "true", true},
+	{"lt 1.5 1.5", "false", true},
+	{"lt 1.5 2.5", "true", true},
+	{"lt 1 1", "false", true},
+	{"lt 1 2", "true", true},
+	{"lt `xy` `xy`", "false", true},
+	{"lt `xy` `xyz`", "true", true},
+	{"lt .Uthree .Uthree", "false", true},
+	{"lt .Uthree .Ufour", "true", true},
+	{"le 1.5 1.5", "true", true},
+	{"le 1.5 2.5", "true", true},
+	{"le 2.5 1.5", "false", true},
+	{"le 1 1", "true", true},
+	{"le 1 2", "true", true},
+	{"le 2 1", "false", true},
+	{"le `xy` `xy`", "true", true},
+	{"le `xy` `xyz`", "true", true},
+	{"le `xyz` `xy`", "false", true},
+	{"le .Uthree .Uthree", "true", true},
+	{"le .Uthree .Ufour", "true", true},
+	{"le .Ufour .Uthree", "false", true},
+	{"gt 1.5 1.5", "false", true},
+	{"gt 1.5 2.5", "false", true},
+	{"gt 1 1", "false", true},
+	{"gt 2 1", "true", true},
+	{"gt 1 2", "false", true},
+	{"gt `xy` `xy`", "false", true},
+	{"gt `xy` `xyz`", "false", true},
+	{"gt .Uthree .Uthree", "false", true},
+	{"gt .Uthree .Ufour", "false", true},
+	{"gt .Ufour .Uthree", "true", true},
+	{"ge 1.5 1.5", "true", true},
+	{"ge 1.5 2.5", "false", true},
+	{"ge 2.5 1.5", "true", true},
+	{"ge 1 1", "true", true},
+	{"ge 1 2", "false", true},
+	{"ge 2 1", "true", true},
+	{"ge `xy` `xy`", "true", true},
+	{"ge `xy` `xyz`", "false", true},
+	{"ge `xyz` `xy`", "true", true},
+	{"ge .Uthree .Uthree", "true", true},
+	{"ge .Uthree .Ufour", "false", true},
+	{"ge .Ufour .Uthree", "true", true},
+	// Mixing signed and unsigned integers.
+	{"eq .Uthree .Three", "true", true},
+	{"eq .Three .Uthree", "true", true},
+	{"le .Uthree .Three", "true", true},
+	{"le .Three .Uthree", "true", true},
+	{"ge .Uthree .Three", "true", true},
+	{"ge .Three .Uthree", "true", true},
+	{"lt .Uthree .Three", "false", true},
+	{"lt .Three .Uthree", "false", true},
+	{"gt .Uthree .Three", "false", true},
+	{"gt .Three .Uthree", "false", true},
+	{"eq .Ufour .Three", "false", true},
+	{"lt .Ufour .Three", "false", true},
+	{"gt .Ufour .Three", "true", true},
+	{"eq .NegOne .Uthree", "false", true},
+	{"eq .Uthree .NegOne", "false", true},
+	{"ne .NegOne .Uthree", "true", true},
+	{"ne .Uthree .NegOne", "true", true},
+	{"lt .NegOne .Uthree", "true", true},
+	{"lt .Uthree .NegOne", "false", true},
+	{"le .NegOne .Uthree", "true", true},
+	{"le .Uthree .NegOne", "false", true},
+	{"gt .NegOne .Uthree", "false", true},
+	{"gt .Uthree .NegOne", "true", true},
+	{"ge .NegOne .Uthree", "false", true},
+	{"ge .Uthree .NegOne", "true", true},
+	{"eq (index `x` 0) 'x'", "true", true}, // The example that triggered this rule.
+	{"eq (index `x` 0) 'y'", "false", true},
+	{"eq .V1 .V2", "true", true},
+	{"eq .Ptr .Ptr", "true", true},
+	{"eq .Ptr .NilPtr", "false", true},
+	{"eq .NilPtr .NilPtr", "true", true},
+	{"eq .Iface1 .Iface1", "true", true},
+	{"eq .Iface1 .Iface2", "false", true},
+	{"eq .Iface2 .Iface2", "true", true},
+	// Errors
+	{"eq `xy` 1", "", false},       // Different types.
+	{"eq 2 2.0", "", false},        // Different types.
+	{"lt true true", "", false},    // Unordered types.
+	{"lt 1+0i 1+0i", "", false},    // Unordered types.
+	{"eq .Ptr 1", "", false},       // Incompatible types.
+	{"eq .Ptr .NegOne", "", false}, // Incompatible types.
+	{"eq .Map .Map", "", false},    // Uncomparable types.
+	{"eq .Map .V1", "", false},     // Uncomparable types.
+}
+
+func TestComparison(t *testing.T) {
+	b := new(bytes.Buffer)
+	var cmpStruct = struct {
+		Uthree, Ufour  uint
+		NegOne, Three  int
+		Ptr, NilPtr    *int
+		Map            map[int]int
+		V1, V2         V
+		Iface1, Iface2 fmt.Stringer
+	}{
+		Uthree: 3,
+		Ufour:  4,
+		NegOne: -1,
+		Three:  3,
+		Ptr:    new(int),
+		Iface1: b,
+	}
+	for _, test := range cmpTests {
+		text := fmt.Sprintf("{{if %s}}true{{else}}false{{end}}", test.expr)
+		tmpl, err := New("empty").Parse(text)
+		if err != nil {
+			t.Fatalf("%q: %s", test.expr, err)
+		}
+		b.Reset()
+		err = tmpl.Execute(b, &cmpStruct)
+		if test.ok && err != nil {
+			t.Errorf("%s errored incorrectly: %s", test.expr, err)
+			continue
+		}
+		if !test.ok && err == nil {
+			t.Errorf("%s did not error", test.expr)
+			continue
+		}
+		if b.String() != test.truth {
+			t.Errorf("%s: want %s; got %s", test.expr, test.truth, b.String())
+		}
+	}
+}
+
+func TestMissingMapKey(t *testing.T) {
+	data := map[string]int{
+		"x": 99,
+	}
+	tmpl, err := New("t1").Parse("{{.x}} {{.y}}")
+	if err != nil {
+		t.Fatal(err)
+	}
+	var b bytes.Buffer
+	// By default, just get "<no value>" // NOTE: not in html/template, get empty string
+	err = tmpl.Execute(&b, data)
+	if err != nil {
+		t.Fatal(err)
+	}
+	want := "99 "
+	got := b.String()
+	if got != want {
+		t.Errorf("got %q; expected %q", got, want)
+	}
+	// Same if we set the option explicitly to the default.
+	tmpl.Option("missingkey=default")
+	b.Reset()
+	err = tmpl.Execute(&b, data)
+	if err != nil {
+		t.Fatal("default:", err)
+	}
+	got = b.String()
+	if got != want {
+		t.Errorf("got %q; expected %q", got, want)
+	}
+	// Next we ask for a zero value
+	tmpl.Option("missingkey=zero")
+	b.Reset()
+	err = tmpl.Execute(&b, data)
+	if err != nil {
+		t.Fatal("zero:", err)
+	}
+	want = "99 0"
+	got = b.String()
+	if got != want {
+		t.Errorf("got %q; expected %q", got, want)
+	}
+	// Now we ask for an error.
+	tmpl.Option("missingkey=error")
+	err = tmpl.Execute(&b, data)
+	if err == nil {
+		t.Errorf("expected error; got none")
+	}
+	// same Option, but now a nil interface: ask for an error
+	err = tmpl.Execute(&b, nil)
+	t.Log(err)
+	if err == nil {
+		t.Errorf("expected error for nil-interface; got none")
+	}
+}
+
+// Test that the error message for multiline unterminated string
+// refers to the line number of the opening quote.
+func TestUnterminatedStringError(t *testing.T) {
+	_, err := New("X").Parse("hello\n\n{{`unterminated\n\n\n\n}}\n some more\n\n")
+	if err == nil {
+		t.Fatal("expected error")
+	}
+	str := err.Error()
+	if !strings.Contains(str, "X:3: unterminated raw quoted string") {
+		t.Fatalf("unexpected error: %s", str)
+	}
+}
+
+const alwaysErrorText = "always be failing"
+
+var alwaysError = errors.New(alwaysErrorText)
+
+type ErrorWriter int
+
+func (e ErrorWriter) Write(p []byte) (int, error) {
+	return 0, alwaysError
+}
+
+func TestExecuteGivesExecError(t *testing.T) {
+	// First, a non-execution error shouldn't be an ExecError.
+	tmpl, err := New("X").Parse("hello")
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = tmpl.Execute(ErrorWriter(0), 0)
+	if err == nil {
+		t.Fatal("expected error; got none")
+	}
+	if err.Error() != alwaysErrorText {
+		t.Errorf("expected %q error; got %q", alwaysErrorText, err)
+	}
+	// This one should be an ExecError.
+	tmpl, err = New("X").Parse("hello, {{.X.Y}}")
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = tmpl.Execute(ioutil.Discard, 0)
+	if err == nil {
+		t.Fatal("expected error; got none")
+	}
+	eerr, ok := err.(template.ExecError)
+	if !ok {
+		t.Fatalf("did not expect ExecError %s", eerr)
+	}
+	expect := "field X in type int"
+	if !strings.Contains(err.Error(), expect) {
+		t.Errorf("expected %q; got %q", expect, err)
+	}
+}
+
+func funcNameTestFunc() int {
+	return 0
+}
+
+func TestGoodFuncNames(t *testing.T) {
+	names := []string{
+		"_",
+		"a",
+		"a1",
+		"a1",
+		"Ӵ",
+	}
+	for _, name := range names {
+		tmpl := New("X").Funcs(
+			FuncMap{
+				name: funcNameTestFunc,
+			},
+		)
+		if tmpl == nil {
+			t.Fatalf("nil result for %q", name)
+		}
+	}
+}
+
+func TestBadFuncNames(t *testing.T) {
+	names := []string{
+		"",
+		"2",
+		"a-b",
+	}
+	for _, name := range names {
+		testBadFuncName(name, t)
+	}
+}
+
+func testBadFuncName(name string, t *testing.T) {
+	t.Helper()
+	defer func() {
+		recover()
+	}()
+	New("X").Funcs(
+		FuncMap{
+			name: funcNameTestFunc,
+		},
+	)
+	// If we get here, the name did not cause a panic, which is how Funcs
+	// reports an error.
+	t.Errorf("%q succeeded incorrectly as function name", name)
+}
+
+func TestBlock(t *testing.T) {
+	const (
+		input   = `a({{block "inner" .}}bar({{.}})baz{{end}})b`
+		want    = `a(bar(hello)baz)b`
+		overlay = `{{define "inner"}}foo({{.}})bar{{end}}`
+		want2   = `a(foo(goodbye)bar)b`
+	)
+	tmpl, err := New("outer").Parse(input)
+	if err != nil {
+		t.Fatal(err)
+	}
+	tmpl2, err := Must(tmpl.Clone()).Parse(overlay)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	var buf bytes.Buffer
+	if err := tmpl.Execute(&buf, "hello"); err != nil {
+		t.Fatal(err)
+	}
+	if got := buf.String(); got != want {
+		t.Errorf("got %q, want %q", got, want)
+	}
+
+	buf.Reset()
+	if err := tmpl2.Execute(&buf, "goodbye"); err != nil {
+		t.Fatal(err)
+	}
+	if got := buf.String(); got != want2 {
+		t.Errorf("got %q, want %q", got, want2)
+	}
+}
+
+func TestEvalFieldErrors(t *testing.T) {
+	tests := []struct {
+		name, src string
+		value     interface{}
+		want      string
+	}{
+		{
+			// Check that calling an invalid field on nil pointer
+			// prints a field error instead of a distracting nil
+			// pointer error. https://golang.org/issue/15125
+			"MissingFieldOnNil",
+			"{{.MissingField}}",
+			(*T)(nil),
+			"can't evaluate field MissingField in type *template.T",
+		},
+		{
+			"MissingFieldOnNonNil",
+			"{{.MissingField}}",
+			&T{},
+			"can't evaluate field MissingField in type *template.T",
+		},
+		{
+			"ExistingFieldOnNil",
+			"{{.X}}",
+			(*T)(nil),
+			"nil pointer evaluating *template.T.X",
+		},
+		{
+			"MissingKeyOnNilMap",
+			"{{.MissingKey}}",
+			(*map[string]string)(nil),
+			"nil pointer evaluating *map[string]string.MissingKey",
+		},
+		{
+			"MissingKeyOnNilMapPtr",
+			"{{.MissingKey}}",
+			(*map[string]string)(nil),
+			"nil pointer evaluating *map[string]string.MissingKey",
+		},
+		{
+			"MissingKeyOnMapPtrToNil",
+			"{{.MissingKey}}",
+			&map[string]string{},
+			"<nil>",
+		},
+	}
+	for _, tc := range tests {
+		t.Run(tc.name, func(t *testing.T) {
+			tmpl := Must(New("tmpl").Parse(tc.src))
+			err := tmpl.Execute(ioutil.Discard, tc.value)
+			got := "<nil>"
+			if err != nil {
+				got = err.Error()
+			}
+			if !strings.HasSuffix(got, tc.want) {
+				t.Fatalf("got error %q, want %q", got, tc.want)
+			}
+		})
+	}
+}
+
+func TestMaxExecDepth(t *testing.T) {
+	if testing.Short() {
+		t.Skip("skipping in -short mode")
+	}
+	tmpl := Must(New("tmpl").Parse(`{{template "tmpl" .}}`))
+	err := tmpl.Execute(ioutil.Discard, nil)
+	got := "<nil>"
+	if err != nil {
+		got = err.Error()
+	}
+	const want = "exceeded maximum template depth"
+	if !strings.Contains(got, want) {
+		t.Errorf("got error %q; want %q", got, want)
+	}
+}
+
+func TestAddrOfIndex(t *testing.T) {
+	// golang.org/issue/14916.
+	// Before index worked on reflect.Values, the .String could not be
+	// found on the (incorrectly unaddressable) V value,
+	// in contrast to range, which worked fine.
+	// Also testing that passing a reflect.Value to tmpl.Execute works.
+	texts := []string{
+		`{{range .}}{{.String}}{{end}}`,
+		`{{with index . 0}}{{.String}}{{end}}`,
+	}
+	for _, text := range texts {
+		tmpl := Must(New("tmpl").Parse(text))
+		var buf bytes.Buffer
+		err := tmpl.Execute(&buf, reflect.ValueOf([]V{{1}}))
+		if err != nil {
+			t.Fatalf("%s: Execute: %v", text, err)
+		}
+		if buf.String() != "&lt;1&gt;" {
+			t.Fatalf("%s: template output = %q, want %q", text, &buf, "&lt;1&gt;")
+		}
+	}
+}
+
+func TestInterfaceValues(t *testing.T) {
+	// golang.org/issue/17714.
+	// Before index worked on reflect.Values, interface values
+	// were always implicitly promoted to the underlying value,
+	// except that nil interfaces were promoted to the zero reflect.Value.
+	// Eliminating a round trip to interface{} and back to reflect.Value
+	// eliminated this promotion, breaking these cases.
+	tests := []struct {
+		text string
+		out  string
+	}{
+		{`{{index .Nil 1}}`, "ERROR: index of untyped nil"},
+		{`{{index .Slice 2}}`, "2"},
+		{`{{index .Slice .Two}}`, "2"},
+		{`{{call .Nil 1}}`, "ERROR: call of nil"},
+		{`{{call .PlusOne 1}}`, "2"},
+		{`{{call .PlusOne .One}}`, "2"},
+		{`{{and (index .Slice 0) true}}`, "0"},
+		{`{{and .Zero true}}`, "0"},
+		{`{{and (index .Slice 1) false}}`, "false"},
+		{`{{and .One false}}`, "false"},
+		{`{{or (index .Slice 0) false}}`, "false"},
+		{`{{or .Zero false}}`, "false"},
+		{`{{or (index .Slice 1) true}}`, "1"},
+		{`{{or .One true}}`, "1"},
+		{`{{not (index .Slice 0)}}`, "true"},
+		{`{{not .Zero}}`, "true"},
+		{`{{not (index .Slice 1)}}`, "false"},
+		{`{{not .One}}`, "false"},
+		{`{{eq (index .Slice 0) .Zero}}`, "true"},
+		{`{{eq (index .Slice 1) .One}}`, "true"},
+		{`{{ne (index .Slice 0) .Zero}}`, "false"},
+		{`{{ne (index .Slice 1) .One}}`, "false"},
+		{`{{ge (index .Slice 0) .One}}`, "false"},
+		{`{{ge (index .Slice 1) .Zero}}`, "true"},
+		{`{{gt (index .Slice 0) .One}}`, "false"},
+		{`{{gt (index .Slice 1) .Zero}}`, "true"},
+		{`{{le (index .Slice 0) .One}}`, "true"},
+		{`{{le (index .Slice 1) .Zero}}`, "false"},
+		{`{{lt (index .Slice 0) .One}}`, "true"},
+		{`{{lt (index .Slice 1) .Zero}}`, "false"},
+	}
+
+	for _, tt := range tests {
+		tmpl := Must(New("tmpl").Parse(tt.text))
+		var buf bytes.Buffer
+		err := tmpl.Execute(&buf, map[string]interface{}{
+			"PlusOne": func(n int) int {
+				return n + 1
+			},
+			"Slice": []int{0, 1, 2, 3},
+			"One":   1,
+			"Two":   2,
+			"Nil":   nil,
+			"Zero":  0,
+		})
+		if strings.HasPrefix(tt.out, "ERROR:") {
+			e := strings.TrimSpace(strings.TrimPrefix(tt.out, "ERROR:"))
+			if err == nil || !strings.Contains(err.Error(), e) {
+				t.Errorf("%s: Execute: %v, want error %q", tt.text, err, e)
+			}
+			continue
+		}
+		if err != nil {
+			t.Errorf("%s: Execute: %v", tt.text, err)
+			continue
+		}
+		if buf.String() != tt.out {
+			t.Errorf("%s: template output = %q, want %q", tt.text, &buf, tt.out)
+		}
+	}
+}
+
+// Check that panics during calls are recovered and returned as errors.
+func TestExecutePanicDuringCall(t *testing.T) {
+	funcs := map[string]interface{}{
+		"doPanic": func() string {
+			panic("custom panic string")
+		},
+	}
+	tests := []struct {
+		name    string
+		input   string
+		data    interface{}
+		wantErr string
+	}{
+		{
+			"direct func call panics",
+			"{{doPanic}}", (*T)(nil),
+			`template: t:1:2: executing "t" at <doPanic>: error calling doPanic: custom panic string`,
+		},
+		{
+			"indirect func call panics",
+			"{{call doPanic}}", (*T)(nil),
+			`template: t:1:7: executing "t" at <doPanic>: error calling doPanic: custom panic string`,
+		},
+		{
+			"direct method call panics",
+			"{{.GetU}}", (*T)(nil),
+			`template: t:1:2: executing "t" at <.GetU>: error calling GetU: runtime error: invalid memory address or nil pointer dereference`,
+		},
+		{
+			"indirect method call panics",
+			"{{call .GetU}}", (*T)(nil),
+			`template: t:1:7: executing "t" at <.GetU>: error calling GetU: runtime error: invalid memory address or nil pointer dereference`,
+		},
+		{
+			"func field call panics",
+			"{{call .PanicFunc}}", tVal,
+			`template: t:1:2: executing "t" at <call .PanicFunc>: error calling call: test panic`,
+		},
+		{
+			"method call on nil interface",
+			"{{.NonEmptyInterfaceNil.Method0}}", tVal,
+			`template: t:1:23: executing "t" at <.NonEmptyInterfaceNil.Method0>: nil pointer evaluating template.I.Method0`,
+		},
+	}
+	for _, tc := range tests {
+		b := new(bytes.Buffer)
+		tmpl, err := New("t").Funcs(funcs).Parse(tc.input)
+		if err != nil {
+			t.Fatalf("parse error: %s", err)
+		}
+		err = tmpl.Execute(b, tc.data)
+		if err == nil {
+			t.Errorf("%s: expected error; got none", tc.name)
+		} else if !strings.Contains(err.Error(), tc.wantErr) {
+			if *debug {
+				fmt.Printf("%s: test execute error: %s\n", tc.name, err)
+			}
+			t.Errorf("%s: expected error:\n%s\ngot:\n%s", tc.name, tc.wantErr, err)
+		}
+	}
+}
+
+// Issue 31810. Check that a parenthesized first argument behaves properly.
+func TestIssue31810(t *testing.T) {
+	t.Skip("broken in html/template")
+
+	// A simple value with no arguments is fine.
+	var b bytes.Buffer
+	const text = "{{ (.)  }}"
+	tmpl, err := New("").Parse(text)
+	if err != nil {
+		t.Error(err)
+	}
+	err = tmpl.Execute(&b, "result")
+	if err != nil {
+		t.Error(err)
+	}
+	if b.String() != "result" {
+		t.Errorf("%s got %q, expected %q", text, b.String(), "result")
+	}
+
+	// Even a plain function fails - need to use call.
+	f := func() string { return "result" }
+	b.Reset()
+	err = tmpl.Execute(&b, f)
+	if err == nil {
+		t.Error("expected error with no call, got none")
+	}
+
+	// Works if the function is explicitly called.
+	const textCall = "{{ (call .)  }}"
+	tmpl, err = New("").Parse(textCall)
+	b.Reset()
+	err = tmpl.Execute(&b, f)
+	if err != nil {
+		t.Error(err)
+	}
+	if b.String() != "result" {
+		t.Errorf("%s got %q, expected %q", textCall, b.String(), "result")
+	}
+}
+
+// Issue 39807. There was a race applying escapeTemplate.
+
+const raceText = `
+{{- define "jstempl" -}}
+var v = "v";
+{{- end -}}
+<script type="application/javascript">
+{{ template "jstempl" $ }}
+</script>
+`
+
+func TestEscapeRace(t *testing.T) {
+	tmpl := New("")
+	_, err := tmpl.New("templ.html").Parse(raceText)
+	if err != nil {
+		t.Fatal(err)
+	}
+	const count = 20
+	for i := 0; i < count; i++ {
+		_, err := tmpl.New(fmt.Sprintf("x%d.html", i)).Parse(`{{ template "templ.html" .}}`)
+		if err != nil {
+			t.Fatal(err)
+		}
+	}
+
+	var wg sync.WaitGroup
+	for i := 0; i < 10; i++ {
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			for j := 0; j < count; j++ {
+				sub := tmpl.Lookup(fmt.Sprintf("x%d.html", j))
+				if err := sub.Execute(ioutil.Discard, nil); err != nil {
+					t.Error(err)
+				}
+			}
+		}()
+	}
+	wg.Wait()
+}
+
+func TestRecursiveExecute(t *testing.T) {
+	tmpl := New("")
+
+	recur := func() (HTML, error) {
+		var sb strings.Builder
+		if err := tmpl.ExecuteTemplate(&sb, "subroutine", nil); err != nil {
+			t.Fatal(err)
+		}
+		return HTML(sb.String()), nil
+	}
+
+	m := FuncMap{
+		"recur": recur,
+	}
+
+	top, err := tmpl.New("x.html").Funcs(m).Parse(`{{recur}}`)
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, err = tmpl.New("subroutine").Parse(`<a href="/x?p={{"'a<b'"}}">`)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if err := top.Execute(ioutil.Discard, nil); err != nil {
+		t.Fatal(err)
+	}
+}
+
+// recursiveInvoker is for TestRecursiveExecuteViaMethod.
+type recursiveInvoker struct {
+	t    *testing.T
+	tmpl *Template
+}
+
+func (r *recursiveInvoker) Recur() (string, error) {
+	var sb strings.Builder
+	if err := r.tmpl.ExecuteTemplate(&sb, "subroutine", nil); err != nil {
+		r.t.Fatal(err)
+	}
+	return sb.String(), nil
+}
+
+func TestRecursiveExecuteViaMethod(t *testing.T) {
+	tmpl := New("")
+	top, err := tmpl.New("x.html").Parse(`{{.Recur}}`)
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, err = tmpl.New("subroutine").Parse(`<a href="/x?p={{"'a<b'"}}">`)
+	if err != nil {
+		t.Fatal(err)
+	}
+	r := &recursiveInvoker{
+		t:    t,
+		tmpl: tmpl,
+	}
+	if err := top.Execute(ioutil.Discard, r); err != nil {
+		t.Fatal(err)
+	}
+}
+
+// Issue 43295.
+func TestTemplateFuncsAfterClone(t *testing.T) {
+	s := `{{ f . }}`
+	want := "test"
+	orig := New("orig").Funcs(map[string]interface{}{
+		"f": func(in string) string {
+			return in
+		},
+	}).New("child")
+
+	overviewTmpl := Must(Must(orig.Clone()).Parse(s))
+	var out strings.Builder
+	if err := overviewTmpl.Execute(&out, want); err != nil {
+		t.Fatal(err)
+	}
+	if got := out.String(); got != want {
+		t.Fatalf("got %q; want %q", got, want)
+	}
+}
diff --git a/internal/backport/html/template/html.go b/internal/backport/html/template/html.go
new file mode 100644
index 0000000..356b829
--- /dev/null
+++ b/internal/backport/html/template/html.go
@@ -0,0 +1,265 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"bytes"
+	"fmt"
+	"strings"
+	"unicode/utf8"
+)
+
+// htmlNospaceEscaper escapes for inclusion in unquoted attribute values.
+func htmlNospaceEscaper(args ...interface{}) string {
+	s, t := stringify(args...)
+	if t == contentTypeHTML {
+		return htmlReplacer(stripTags(s), htmlNospaceNormReplacementTable, false)
+	}
+	return htmlReplacer(s, htmlNospaceReplacementTable, false)
+}
+
+// attrEscaper escapes for inclusion in quoted attribute values.
+func attrEscaper(args ...interface{}) string {
+	s, t := stringify(args...)
+	if t == contentTypeHTML {
+		return htmlReplacer(stripTags(s), htmlNormReplacementTable, true)
+	}
+	return htmlReplacer(s, htmlReplacementTable, true)
+}
+
+// rcdataEscaper escapes for inclusion in an RCDATA element body.
+func rcdataEscaper(args ...interface{}) string {
+	s, t := stringify(args...)
+	if t == contentTypeHTML {
+		return htmlReplacer(s, htmlNormReplacementTable, true)
+	}
+	return htmlReplacer(s, htmlReplacementTable, true)
+}
+
+// htmlEscaper escapes for inclusion in HTML text.
+func htmlEscaper(args ...interface{}) string {
+	s, t := stringify(args...)
+	if t == contentTypeHTML {
+		return s
+	}
+	return htmlReplacer(s, htmlReplacementTable, true)
+}
+
+// htmlReplacementTable contains the runes that need to be escaped
+// inside a quoted attribute value or in a text node.
+var htmlReplacementTable = []string{
+	// https://www.w3.org/TR/html5/syntax.html#attribute-value-(unquoted)-state
+	// U+0000 NULL Parse error. Append a U+FFFD REPLACEMENT
+	// CHARACTER character to the current attribute's value.
+	// "
+	// and similarly
+	// https://www.w3.org/TR/html5/syntax.html#before-attribute-value-state
+	0:    "\uFFFD",
+	'"':  "&#34;",
+	'&':  "&amp;",
+	'\'': "&#39;",
+	'+':  "&#43;",
+	'<':  "&lt;",
+	'>':  "&gt;",
+}
+
+// htmlNormReplacementTable is like htmlReplacementTable but without '&' to
+// avoid over-encoding existing entities.
+var htmlNormReplacementTable = []string{
+	0:    "\uFFFD",
+	'"':  "&#34;",
+	'\'': "&#39;",
+	'+':  "&#43;",
+	'<':  "&lt;",
+	'>':  "&gt;",
+}
+
+// htmlNospaceReplacementTable contains the runes that need to be escaped
+// inside an unquoted attribute value.
+// The set of runes escaped is the union of the HTML specials and
+// those determined by running the JS below in browsers:
+// <div id=d></div>
+// <script>(function () {
+// var a = [], d = document.getElementById("d"), i, c, s;
+// for (i = 0; i < 0x10000; ++i) {
+//   c = String.fromCharCode(i);
+//   d.innerHTML = "<span title=" + c + "lt" + c + "></span>"
+//   s = d.getElementsByTagName("SPAN")[0];
+//   if (!s || s.title !== c + "lt" + c) { a.push(i.toString(16)); }
+// }
+// document.write(a.join(", "));
+// })()</script>
+var htmlNospaceReplacementTable = []string{
+	0:    "&#xfffd;",
+	'\t': "&#9;",
+	'\n': "&#10;",
+	'\v': "&#11;",
+	'\f': "&#12;",
+	'\r': "&#13;",
+	' ':  "&#32;",
+	'"':  "&#34;",
+	'&':  "&amp;",
+	'\'': "&#39;",
+	'+':  "&#43;",
+	'<':  "&lt;",
+	'=':  "&#61;",
+	'>':  "&gt;",
+	// A parse error in the attribute value (unquoted) and
+	// before attribute value states.
+	// Treated as a quoting character by IE.
+	'`': "&#96;",
+}
+
+// htmlNospaceNormReplacementTable is like htmlNospaceReplacementTable but
+// without '&' to avoid over-encoding existing entities.
+var htmlNospaceNormReplacementTable = []string{
+	0:    "&#xfffd;",
+	'\t': "&#9;",
+	'\n': "&#10;",
+	'\v': "&#11;",
+	'\f': "&#12;",
+	'\r': "&#13;",
+	' ':  "&#32;",
+	'"':  "&#34;",
+	'\'': "&#39;",
+	'+':  "&#43;",
+	'<':  "&lt;",
+	'=':  "&#61;",
+	'>':  "&gt;",
+	// A parse error in the attribute value (unquoted) and
+	// before attribute value states.
+	// Treated as a quoting character by IE.
+	'`': "&#96;",
+}
+
+// htmlReplacer returns s with runes replaced according to replacementTable
+// and when badRunes is true, certain bad runes are allowed through unescaped.
+func htmlReplacer(s string, replacementTable []string, badRunes bool) string {
+	written, b := 0, new(strings.Builder)
+	r, w := rune(0), 0
+	for i := 0; i < len(s); i += w {
+		// Cannot use 'for range s' because we need to preserve the width
+		// of the runes in the input. If we see a decoding error, the input
+		// width will not be utf8.Runelen(r) and we will overrun the buffer.
+		r, w = utf8.DecodeRuneInString(s[i:])
+		if int(r) < len(replacementTable) {
+			if repl := replacementTable[r]; len(repl) != 0 {
+				if written == 0 {
+					b.Grow(len(s))
+				}
+				b.WriteString(s[written:i])
+				b.WriteString(repl)
+				written = i + w
+			}
+		} else if badRunes {
+			// No-op.
+			// IE does not allow these ranges in unquoted attrs.
+		} else if 0xfdd0 <= r && r <= 0xfdef || 0xfff0 <= r && r <= 0xffff {
+			if written == 0 {
+				b.Grow(len(s))
+			}
+			fmt.Fprintf(b, "%s&#x%x;", s[written:i], r)
+			written = i + w
+		}
+	}
+	if written == 0 {
+		return s
+	}
+	b.WriteString(s[written:])
+	return b.String()
+}
+
+// stripTags takes a snippet of HTML and returns only the text content.
+// For example, `<b>&iexcl;Hi!</b> <script>...</script>` -> `&iexcl;Hi! `.
+func stripTags(html string) string {
+	var b bytes.Buffer
+	s, c, i, allText := []byte(html), context{}, 0, true
+	// Using the transition funcs helps us avoid mangling
+	// `<div title="1>2">` or `I <3 Ponies!`.
+	for i != len(s) {
+		if c.delim == delimNone {
+			st := c.state
+			// Use RCDATA instead of parsing into JS or CSS styles.
+			if c.element != elementNone && !isInTag(st) {
+				st = stateRCDATA
+			}
+			d, nread := transitionFunc[st](c, s[i:])
+			i1 := i + nread
+			if c.state == stateText || c.state == stateRCDATA {
+				// Emit text up to the start of the tag or comment.
+				j := i1
+				if d.state != c.state {
+					for j1 := j - 1; j1 >= i; j1-- {
+						if s[j1] == '<' {
+							j = j1
+							break
+						}
+					}
+				}
+				b.Write(s[i:j])
+			} else {
+				allText = false
+			}
+			c, i = d, i1
+			continue
+		}
+		i1 := i + bytes.IndexAny(s[i:], delimEnds[c.delim])
+		if i1 < i {
+			break
+		}
+		if c.delim != delimSpaceOrTagEnd {
+			// Consume any quote.
+			i1++
+		}
+		c, i = context{state: stateTag, element: c.element}, i1
+	}
+	if allText {
+		return html
+	} else if c.state == stateText || c.state == stateRCDATA {
+		b.Write(s[i:])
+	}
+	return b.String()
+}
+
+// htmlNameFilter accepts valid parts of an HTML attribute or tag name or
+// a known-safe HTML attribute.
+func htmlNameFilter(args ...interface{}) string {
+	s, t := stringify(args...)
+	if t == contentTypeHTMLAttr {
+		return s
+	}
+	if len(s) == 0 {
+		// Avoid violation of structure preservation.
+		// <input checked {{.K}}={{.V}}>.
+		// Without this, if .K is empty then .V is the value of
+		// checked, but otherwise .V is the value of the attribute
+		// named .K.
+		return filterFailsafe
+	}
+	s = strings.ToLower(s)
+	if t := attrType(s); t != contentTypePlain {
+		// TODO: Split attr and element name part filters so we can recognize known attributes.
+		return filterFailsafe
+	}
+	for _, r := range s {
+		switch {
+		case '0' <= r && r <= '9':
+		case 'a' <= r && r <= 'z':
+		default:
+			return filterFailsafe
+		}
+	}
+	return s
+}
+
+// commentEscaper returns the empty string regardless of input.
+// Comment content does not correspond to any parsed structure or
+// human-readable content, so the simplest and most secure policy is to drop
+// content interpolated into comments.
+// This approach is equally valid whether or not static comment content is
+// removed from the template.
+func commentEscaper(args ...interface{}) string {
+	return ""
+}
diff --git a/internal/backport/html/template/html_test.go b/internal/backport/html/template/html_test.go
new file mode 100644
index 0000000..f04ee04
--- /dev/null
+++ b/internal/backport/html/template/html_test.go
@@ -0,0 +1,97 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"html"
+	"strings"
+	"testing"
+)
+
+func TestHTMLNospaceEscaper(t *testing.T) {
+	input := ("\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f" +
+		"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
+		` !"#$%&'()*+,-./` +
+		`0123456789:;<=>?` +
+		`@ABCDEFGHIJKLMNO` +
+		`PQRSTUVWXYZ[\]^_` +
+		"`abcdefghijklmno" +
+		"pqrstuvwxyz{|}~\x7f" +
+		"\u00A0\u0100\u2028\u2029\ufeff\ufdec\U0001D11E" +
+		"erroneous\x960") // keep at the end
+
+	want := ("&#xfffd;\x01\x02\x03\x04\x05\x06\x07" +
+		"\x08&#9;&#10;&#11;&#12;&#13;\x0E\x0F" +
+		"\x10\x11\x12\x13\x14\x15\x16\x17" +
+		"\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
+		`&#32;!&#34;#$%&amp;&#39;()*&#43;,-./` +
+		`0123456789:;&lt;&#61;&gt;?` +
+		`@ABCDEFGHIJKLMNO` +
+		`PQRSTUVWXYZ[\]^_` +
+		`&#96;abcdefghijklmno` +
+		`pqrstuvwxyz{|}~` + "\u007f" +
+		"\u00A0\u0100\u2028\u2029\ufeff&#xfdec;\U0001D11E" +
+		"erroneous&#xfffd;0") // keep at the end
+
+	got := htmlNospaceEscaper(input)
+	if got != want {
+		t.Errorf("encode: want\n\t%q\nbut got\n\t%q", want, got)
+	}
+
+	r := strings.NewReplacer("\x00", "\ufffd", "\x96", "\ufffd")
+	got, want = html.UnescapeString(got), r.Replace(input)
+	if want != got {
+		t.Errorf("decode: want\n\t%q\nbut got\n\t%q", want, got)
+	}
+}
+
+func TestStripTags(t *testing.T) {
+	tests := []struct {
+		input, want string
+	}{
+		{"", ""},
+		{"Hello, World!", "Hello, World!"},
+		{"foo&amp;bar", "foo&amp;bar"},
+		{`Hello <a href="www.example.com/">World</a>!`, "Hello World!"},
+		{"Foo <textarea>Bar</textarea> Baz", "Foo Bar Baz"},
+		{"Foo <!-- Bar --> Baz", "Foo  Baz"},
+		{"<", "<"},
+		{"foo < bar", "foo < bar"},
+		{`Foo<script type="text/javascript">alert(1337)</script>Bar`, "FooBar"},
+		{`Foo<div title="1>2">Bar`, "FooBar"},
+		{`I <3 Ponies!`, `I <3 Ponies!`},
+		{`<script>foo()</script>`, ``},
+	}
+
+	for _, test := range tests {
+		if got := stripTags(test.input); got != test.want {
+			t.Errorf("%q: want %q, got %q", test.input, test.want, got)
+		}
+	}
+}
+
+func BenchmarkHTMLNospaceEscaper(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		htmlNospaceEscaper("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
+	}
+}
+
+func BenchmarkHTMLNospaceEscaperNoSpecials(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		htmlNospaceEscaper("The_quick,_brown_fox_jumps_over_the_lazy_dog.")
+	}
+}
+
+func BenchmarkStripTags(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		stripTags("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
+	}
+}
+
+func BenchmarkStripTagsNoSpecials(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		stripTags("The quick, brown fox jumps over the lazy dog.")
+	}
+}
diff --git a/internal/backport/html/template/js.go b/internal/backport/html/template/js.go
new file mode 100644
index 0000000..ea9c183
--- /dev/null
+++ b/internal/backport/html/template/js.go
@@ -0,0 +1,431 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"reflect"
+	"strings"
+	"unicode/utf8"
+)
+
+// nextJSCtx returns the context that determines whether a slash after the
+// given run of tokens starts a regular expression instead of a division
+// operator: / or /=.
+//
+// This assumes that the token run does not include any string tokens, comment
+// tokens, regular expression literal tokens, or division operators.
+//
+// This fails on some valid but nonsensical JavaScript programs like
+// "x = ++/foo/i" which is quite different than "x++/foo/i", but is not known to
+// fail on any known useful programs. It is based on the draft
+// JavaScript 2.0 lexical grammar and requires one token of lookbehind:
+// https://www.mozilla.org/js/language/js20-2000-07/rationale/syntax.html
+func nextJSCtx(s []byte, preceding jsCtx) jsCtx {
+	s = bytes.TrimRight(s, "\t\n\f\r \u2028\u2029")
+	if len(s) == 0 {
+		return preceding
+	}
+
+	// All cases below are in the single-byte UTF-8 group.
+	switch c, n := s[len(s)-1], len(s); c {
+	case '+', '-':
+		// ++ and -- are not regexp preceders, but + and - are whether
+		// they are used as infix or prefix operators.
+		start := n - 1
+		// Count the number of adjacent dashes or pluses.
+		for start > 0 && s[start-1] == c {
+			start--
+		}
+		if (n-start)&1 == 1 {
+			// Reached for trailing minus signs since "---" is the
+			// same as "-- -".
+			return jsCtxRegexp
+		}
+		return jsCtxDivOp
+	case '.':
+		// Handle "42."
+		if n != 1 && '0' <= s[n-2] && s[n-2] <= '9' {
+			return jsCtxDivOp
+		}
+		return jsCtxRegexp
+	// Suffixes for all punctuators from section 7.7 of the language spec
+	// that only end binary operators not handled above.
+	case ',', '<', '>', '=', '*', '%', '&', '|', '^', '?':
+		return jsCtxRegexp
+	// Suffixes for all punctuators from section 7.7 of the language spec
+	// that are prefix operators not handled above.
+	case '!', '~':
+		return jsCtxRegexp
+	// Matches all the punctuators from section 7.7 of the language spec
+	// that are open brackets not handled above.
+	case '(', '[':
+		return jsCtxRegexp
+	// Matches all the punctuators from section 7.7 of the language spec
+	// that precede expression starts.
+	case ':', ';', '{':
+		return jsCtxRegexp
+	// CAVEAT: the close punctuators ('}', ']', ')') precede div ops and
+	// are handled in the default except for '}' which can precede a
+	// division op as in
+	//    ({ valueOf: function () { return 42 } } / 2
+	// which is valid, but, in practice, developers don't divide object
+	// literals, so our heuristic works well for code like
+	//    function () { ... }  /foo/.test(x) && sideEffect();
+	// The ')' punctuator can precede a regular expression as in
+	//     if (b) /foo/.test(x) && ...
+	// but this is much less likely than
+	//     (a + b) / c
+	case '}':
+		return jsCtxRegexp
+	default:
+		// Look for an IdentifierName and see if it is a keyword that
+		// can precede a regular expression.
+		j := n
+		for j > 0 && isJSIdentPart(rune(s[j-1])) {
+			j--
+		}
+		if regexpPrecederKeywords[string(s[j:])] {
+			return jsCtxRegexp
+		}
+	}
+	// Otherwise is a punctuator not listed above, or
+	// a string which precedes a div op, or an identifier
+	// which precedes a div op.
+	return jsCtxDivOp
+}
+
+// regexpPrecederKeywords is a set of reserved JS keywords that can precede a
+// regular expression in JS source.
+var regexpPrecederKeywords = map[string]bool{
+	"break":      true,
+	"case":       true,
+	"continue":   true,
+	"delete":     true,
+	"do":         true,
+	"else":       true,
+	"finally":    true,
+	"in":         true,
+	"instanceof": true,
+	"return":     true,
+	"throw":      true,
+	"try":        true,
+	"typeof":     true,
+	"void":       true,
+}
+
+var jsonMarshalType = reflect.TypeOf((*json.Marshaler)(nil)).Elem()
+
+// indirectToJSONMarshaler returns the value, after dereferencing as many times
+// as necessary to reach the base type (or nil) or an implementation of json.Marshal.
+func indirectToJSONMarshaler(a interface{}) interface{} {
+	// text/template now supports passing untyped nil as a func call
+	// argument, so we must support it. Otherwise we'd panic below, as one
+	// cannot call the Type or Interface methods on an invalid
+	// reflect.Value. See golang.org/issue/18716.
+	if a == nil {
+		return nil
+	}
+
+	v := reflect.ValueOf(a)
+	for !v.Type().Implements(jsonMarshalType) && v.Kind() == reflect.Ptr && !v.IsNil() {
+		v = v.Elem()
+	}
+	return v.Interface()
+}
+
+// jsValEscaper escapes its inputs to a JS Expression (section 11.14) that has
+// neither side-effects nor free variables outside (NaN, Infinity).
+func jsValEscaper(args ...interface{}) string {
+	var a interface{}
+	if len(args) == 1 {
+		a = indirectToJSONMarshaler(args[0])
+		switch t := a.(type) {
+		case JS:
+			return string(t)
+		case JSStr:
+			// TODO: normalize quotes.
+			return `"` + string(t) + `"`
+		case json.Marshaler:
+			// Do not treat as a Stringer.
+		case fmt.Stringer:
+			a = t.String()
+		}
+	} else {
+		for i, arg := range args {
+			args[i] = indirectToJSONMarshaler(arg)
+		}
+		a = fmt.Sprint(args...)
+	}
+	// TODO: detect cycles before calling Marshal which loops infinitely on
+	// cyclic data. This may be an unacceptable DoS risk.
+	b, err := json.Marshal(a)
+	if err != nil {
+		// Put a space before comment so that if it is flush against
+		// a division operator it is not turned into a line comment:
+		//     x/{{y}}
+		// turning into
+		//     x//* error marshaling y:
+		//          second line of error message */null
+		return fmt.Sprintf(" /* %s */null ", strings.ReplaceAll(err.Error(), "*/", "* /"))
+	}
+
+	// TODO: maybe post-process output to prevent it from containing
+	// "<!--", "-->", "<![CDATA[", "]]>", or "</script"
+	// in case custom marshalers produce output containing those.
+	// Note: Do not use \x escaping to save bytes because it is not JSON compatible and this escaper
+	// supports ld+json content-type.
+	if len(b) == 0 {
+		// In, `x=y/{{.}}*z` a json.Marshaler that produces "" should
+		// not cause the output `x=y/*z`.
+		return " null "
+	}
+	first, _ := utf8.DecodeRune(b)
+	last, _ := utf8.DecodeLastRune(b)
+	var buf strings.Builder
+	// Prevent IdentifierNames and NumericLiterals from running into
+	// keywords: in, instanceof, typeof, void
+	pad := isJSIdentPart(first) || isJSIdentPart(last)
+	if pad {
+		buf.WriteByte(' ')
+	}
+	written := 0
+	// Make sure that json.Marshal escapes codepoints U+2028 & U+2029
+	// so it falls within the subset of JSON which is valid JS.
+	for i := 0; i < len(b); {
+		rune, n := utf8.DecodeRune(b[i:])
+		repl := ""
+		if rune == 0x2028 {
+			repl = `\u2028`
+		} else if rune == 0x2029 {
+			repl = `\u2029`
+		}
+		if repl != "" {
+			buf.Write(b[written:i])
+			buf.WriteString(repl)
+			written = i + n
+		}
+		i += n
+	}
+	if buf.Len() != 0 {
+		buf.Write(b[written:])
+		if pad {
+			buf.WriteByte(' ')
+		}
+		return buf.String()
+	}
+	return string(b)
+}
+
+// jsStrEscaper produces a string that can be included between quotes in
+// JavaScript source, in JavaScript embedded in an HTML5 <script> element,
+// or in an HTML5 event handler attribute such as onclick.
+func jsStrEscaper(args ...interface{}) string {
+	s, t := stringify(args...)
+	if t == contentTypeJSStr {
+		return replace(s, jsStrNormReplacementTable)
+	}
+	return replace(s, jsStrReplacementTable)
+}
+
+// jsRegexpEscaper behaves like jsStrEscaper but escapes regular expression
+// specials so the result is treated literally when included in a regular
+// expression literal. /foo{{.X}}bar/ matches the string "foo" followed by
+// the literal text of {{.X}} followed by the string "bar".
+func jsRegexpEscaper(args ...interface{}) string {
+	s, _ := stringify(args...)
+	s = replace(s, jsRegexpReplacementTable)
+	if s == "" {
+		// /{{.X}}/ should not produce a line comment when .X == "".
+		return "(?:)"
+	}
+	return s
+}
+
+// replace replaces each rune r of s with replacementTable[r], provided that
+// r < len(replacementTable). If replacementTable[r] is the empty string then
+// no replacement is made.
+// It also replaces runes U+2028 and U+2029 with the raw strings `\u2028` and
+// `\u2029`.
+func replace(s string, replacementTable []string) string {
+	var b strings.Builder
+	r, w, written := rune(0), 0, 0
+	for i := 0; i < len(s); i += w {
+		// See comment in htmlEscaper.
+		r, w = utf8.DecodeRuneInString(s[i:])
+		var repl string
+		switch {
+		case int(r) < len(lowUnicodeReplacementTable):
+			repl = lowUnicodeReplacementTable[r]
+		case int(r) < len(replacementTable) && replacementTable[r] != "":
+			repl = replacementTable[r]
+		case r == '\u2028':
+			repl = `\u2028`
+		case r == '\u2029':
+			repl = `\u2029`
+		default:
+			continue
+		}
+		if written == 0 {
+			b.Grow(len(s))
+		}
+		b.WriteString(s[written:i])
+		b.WriteString(repl)
+		written = i + w
+	}
+	if written == 0 {
+		return s
+	}
+	b.WriteString(s[written:])
+	return b.String()
+}
+
+var lowUnicodeReplacementTable = []string{
+	0: `\u0000`, 1: `\u0001`, 2: `\u0002`, 3: `\u0003`, 4: `\u0004`, 5: `\u0005`, 6: `\u0006`,
+	'\a': `\u0007`,
+	'\b': `\u0008`,
+	'\t': `\t`,
+	'\n': `\n`,
+	'\v': `\u000b`, // "\v" == "v" on IE 6.
+	'\f': `\f`,
+	'\r': `\r`,
+	0xe:  `\u000e`, 0xf: `\u000f`, 0x10: `\u0010`, 0x11: `\u0011`, 0x12: `\u0012`, 0x13: `\u0013`,
+	0x14: `\u0014`, 0x15: `\u0015`, 0x16: `\u0016`, 0x17: `\u0017`, 0x18: `\u0018`, 0x19: `\u0019`,
+	0x1a: `\u001a`, 0x1b: `\u001b`, 0x1c: `\u001c`, 0x1d: `\u001d`, 0x1e: `\u001e`, 0x1f: `\u001f`,
+}
+
+var jsStrReplacementTable = []string{
+	0:    `\u0000`,
+	'\t': `\t`,
+	'\n': `\n`,
+	'\v': `\u000b`, // "\v" == "v" on IE 6.
+	'\f': `\f`,
+	'\r': `\r`,
+	// Encode HTML specials as hex so the output can be embedded
+	// in HTML attributes without further encoding.
+	'"':  `\u0022`,
+	'&':  `\u0026`,
+	'\'': `\u0027`,
+	'+':  `\u002b`,
+	'/':  `\/`,
+	'<':  `\u003c`,
+	'>':  `\u003e`,
+	'\\': `\\`,
+}
+
+// jsStrNormReplacementTable is like jsStrReplacementTable but does not
+// overencode existing escapes since this table has no entry for `\`.
+var jsStrNormReplacementTable = []string{
+	0:    `\u0000`,
+	'\t': `\t`,
+	'\n': `\n`,
+	'\v': `\u000b`, // "\v" == "v" on IE 6.
+	'\f': `\f`,
+	'\r': `\r`,
+	// Encode HTML specials as hex so the output can be embedded
+	// in HTML attributes without further encoding.
+	'"':  `\u0022`,
+	'&':  `\u0026`,
+	'\'': `\u0027`,
+	'+':  `\u002b`,
+	'/':  `\/`,
+	'<':  `\u003c`,
+	'>':  `\u003e`,
+}
+var jsRegexpReplacementTable = []string{
+	0:    `\u0000`,
+	'\t': `\t`,
+	'\n': `\n`,
+	'\v': `\u000b`, // "\v" == "v" on IE 6.
+	'\f': `\f`,
+	'\r': `\r`,
+	// Encode HTML specials as hex so the output can be embedded
+	// in HTML attributes without further encoding.
+	'"':  `\u0022`,
+	'$':  `\$`,
+	'&':  `\u0026`,
+	'\'': `\u0027`,
+	'(':  `\(`,
+	')':  `\)`,
+	'*':  `\*`,
+	'+':  `\u002b`,
+	'-':  `\-`,
+	'.':  `\.`,
+	'/':  `\/`,
+	'<':  `\u003c`,
+	'>':  `\u003e`,
+	'?':  `\?`,
+	'[':  `\[`,
+	'\\': `\\`,
+	']':  `\]`,
+	'^':  `\^`,
+	'{':  `\{`,
+	'|':  `\|`,
+	'}':  `\}`,
+}
+
+// isJSIdentPart reports whether the given rune is a JS identifier part.
+// It does not handle all the non-Latin letters, joiners, and combining marks,
+// but it does handle every codepoint that can occur in a numeric literal or
+// a keyword.
+func isJSIdentPart(r rune) bool {
+	switch {
+	case r == '$':
+		return true
+	case '0' <= r && r <= '9':
+		return true
+	case 'A' <= r && r <= 'Z':
+		return true
+	case r == '_':
+		return true
+	case 'a' <= r && r <= 'z':
+		return true
+	}
+	return false
+}
+
+// isJSType reports whether the given MIME type should be considered JavaScript.
+//
+// It is used to determine whether a script tag with a type attribute is a javascript container.
+func isJSType(mimeType string) bool {
+	// per
+	//   https://www.w3.org/TR/html5/scripting-1.html#attr-script-type
+	//   https://tools.ietf.org/html/rfc7231#section-3.1.1
+	//   https://tools.ietf.org/html/rfc4329#section-3
+	//   https://www.ietf.org/rfc/rfc4627.txt
+	// discard parameters
+	if i := strings.Index(mimeType, ";"); i >= 0 {
+		mimeType = mimeType[:i]
+	}
+	mimeType = strings.ToLower(mimeType)
+	mimeType = strings.TrimSpace(mimeType)
+	switch mimeType {
+	case
+		"application/ecmascript",
+		"application/javascript",
+		"application/json",
+		"application/ld+json",
+		"application/x-ecmascript",
+		"application/x-javascript",
+		"module",
+		"text/ecmascript",
+		"text/javascript",
+		"text/javascript1.0",
+		"text/javascript1.1",
+		"text/javascript1.2",
+		"text/javascript1.3",
+		"text/javascript1.4",
+		"text/javascript1.5",
+		"text/jscript",
+		"text/livescript",
+		"text/x-ecmascript",
+		"text/x-javascript":
+		return true
+	default:
+		return false
+	}
+}
diff --git a/internal/backport/html/template/js_test.go b/internal/backport/html/template/js_test.go
new file mode 100644
index 0000000..d7ee47b
--- /dev/null
+++ b/internal/backport/html/template/js_test.go
@@ -0,0 +1,423 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"bytes"
+	"math"
+	"strings"
+	"testing"
+)
+
+func TestNextJsCtx(t *testing.T) {
+	tests := []struct {
+		jsCtx jsCtx
+		s     string
+	}{
+		// Statement terminators precede regexps.
+		{jsCtxRegexp, ";"},
+		// This is not airtight.
+		//     ({ valueOf: function () { return 1 } } / 2)
+		// is valid JavaScript but in practice, devs do not do this.
+		// A block followed by a statement starting with a RegExp is
+		// much more common:
+		//     while (x) {...} /foo/.test(x) || panic()
+		{jsCtxRegexp, "}"},
+		// But member, call, grouping, and array expression terminators
+		// precede div ops.
+		{jsCtxDivOp, ")"},
+		{jsCtxDivOp, "]"},
+		// At the start of a primary expression, array, or expression
+		// statement, expect a regexp.
+		{jsCtxRegexp, "("},
+		{jsCtxRegexp, "["},
+		{jsCtxRegexp, "{"},
+		// Assignment operators precede regexps as do all exclusively
+		// prefix and binary operators.
+		{jsCtxRegexp, "="},
+		{jsCtxRegexp, "+="},
+		{jsCtxRegexp, "*="},
+		{jsCtxRegexp, "*"},
+		{jsCtxRegexp, "!"},
+		// Whether the + or - is infix or prefix, it cannot precede a
+		// div op.
+		{jsCtxRegexp, "+"},
+		{jsCtxRegexp, "-"},
+		// An incr/decr op precedes a div operator.
+		// This is not airtight. In (g = ++/h/i) a regexp follows a
+		// pre-increment operator, but in practice devs do not try to
+		// increment or decrement regular expressions.
+		// (g++/h/i) where ++ is a postfix operator on g is much more
+		// common.
+		{jsCtxDivOp, "--"},
+		{jsCtxDivOp, "++"},
+		{jsCtxDivOp, "x--"},
+		// When we have many dashes or pluses, then they are grouped
+		// left to right.
+		{jsCtxRegexp, "x---"}, // A postfix -- then a -.
+		// return followed by a slash returns the regexp literal or the
+		// slash starts a regexp literal in an expression statement that
+		// is dead code.
+		{jsCtxRegexp, "return"},
+		{jsCtxRegexp, "return "},
+		{jsCtxRegexp, "return\t"},
+		{jsCtxRegexp, "return\n"},
+		{jsCtxRegexp, "return\u2028"},
+		// Identifiers can be divided and cannot validly be preceded by
+		// a regular expressions. Semicolon insertion cannot happen
+		// between an identifier and a regular expression on a new line
+		// because the one token lookahead for semicolon insertion has
+		// to conclude that it could be a div binary op and treat it as
+		// such.
+		{jsCtxDivOp, "x"},
+		{jsCtxDivOp, "x "},
+		{jsCtxDivOp, "x\t"},
+		{jsCtxDivOp, "x\n"},
+		{jsCtxDivOp, "x\u2028"},
+		{jsCtxDivOp, "preturn"},
+		// Numbers precede div ops.
+		{jsCtxDivOp, "0"},
+		// Dots that are part of a number are div preceders.
+		{jsCtxDivOp, "0."},
+	}
+
+	for _, test := range tests {
+		if nextJSCtx([]byte(test.s), jsCtxRegexp) != test.jsCtx {
+			t.Errorf("want %s got %q", test.jsCtx, test.s)
+		}
+		if nextJSCtx([]byte(test.s), jsCtxDivOp) != test.jsCtx {
+			t.Errorf("want %s got %q", test.jsCtx, test.s)
+		}
+	}
+
+	if nextJSCtx([]byte("   "), jsCtxRegexp) != jsCtxRegexp {
+		t.Error("Blank tokens")
+	}
+
+	if nextJSCtx([]byte("   "), jsCtxDivOp) != jsCtxDivOp {
+		t.Error("Blank tokens")
+	}
+}
+
+func TestJSValEscaper(t *testing.T) {
+	tests := []struct {
+		x  interface{}
+		js string
+	}{
+		{int(42), " 42 "},
+		{uint(42), " 42 "},
+		{int16(42), " 42 "},
+		{uint16(42), " 42 "},
+		{int32(-42), " -42 "},
+		{uint32(42), " 42 "},
+		{int16(-42), " -42 "},
+		{uint16(42), " 42 "},
+		{int64(-42), " -42 "},
+		{uint64(42), " 42 "},
+		{uint64(1) << 53, " 9007199254740992 "},
+		// ulp(1 << 53) > 1 so this loses precision in JS
+		// but it is still a representable integer literal.
+		{uint64(1)<<53 + 1, " 9007199254740993 "},
+		{float32(1.0), " 1 "},
+		{float32(-1.0), " -1 "},
+		{float32(0.5), " 0.5 "},
+		{float32(-0.5), " -0.5 "},
+		{float32(1.0) / float32(256), " 0.00390625 "},
+		{float32(0), " 0 "},
+		{math.Copysign(0, -1), " -0 "},
+		{float64(1.0), " 1 "},
+		{float64(-1.0), " -1 "},
+		{float64(0.5), " 0.5 "},
+		{float64(-0.5), " -0.5 "},
+		{float64(0), " 0 "},
+		{math.Copysign(0, -1), " -0 "},
+		{"", `""`},
+		{"foo", `"foo"`},
+		// Newlines.
+		{"\r\n\u2028\u2029", `"\r\n\u2028\u2029"`},
+		// "\v" == "v" on IE 6 so use "\u000b" instead.
+		{"\t\x0b", `"\t\u000b"`},
+		{struct{ X, Y int }{1, 2}, `{"X":1,"Y":2}`},
+		{[]interface{}{}, "[]"},
+		{[]interface{}{42, "foo", nil}, `[42,"foo",null]`},
+		{[]string{"<!--", "</script>", "-->"}, `["\u003c!--","\u003c/script\u003e","--\u003e"]`},
+		{"<!--", `"\u003c!--"`},
+		{"-->", `"--\u003e"`},
+		{"<![CDATA[", `"\u003c![CDATA["`},
+		{"]]>", `"]]\u003e"`},
+		{"</script", `"\u003c/script"`},
+		{"\U0001D11E", "\"\U0001D11E\""}, // or "\uD834\uDD1E"
+		{nil, " null "},
+	}
+
+	for _, test := range tests {
+		if js := jsValEscaper(test.x); js != test.js {
+			t.Errorf("%+v: want\n\t%q\ngot\n\t%q", test.x, test.js, js)
+		}
+		// Make sure that escaping corner cases are not broken
+		// by nesting.
+		a := []interface{}{test.x}
+		want := "[" + strings.TrimSpace(test.js) + "]"
+		if js := jsValEscaper(a); js != want {
+			t.Errorf("%+v: want\n\t%q\ngot\n\t%q", a, want, js)
+		}
+	}
+}
+
+func TestJSStrEscaper(t *testing.T) {
+	tests := []struct {
+		x   interface{}
+		esc string
+	}{
+		{"", ``},
+		{"foo", `foo`},
+		{"\u0000", `\u0000`},
+		{"\t", `\t`},
+		{"\n", `\n`},
+		{"\r", `\r`},
+		{"\u2028", `\u2028`},
+		{"\u2029", `\u2029`},
+		{"\\", `\\`},
+		{"\\n", `\\n`},
+		{"foo\r\nbar", `foo\r\nbar`},
+		// Preserve attribute boundaries.
+		{`"`, `\u0022`},
+		{`'`, `\u0027`},
+		// Allow embedding in HTML without further escaping.
+		{`&amp;`, `\u0026amp;`},
+		// Prevent breaking out of text node and element boundaries.
+		{"</script>", `\u003c\/script\u003e`},
+		{"<![CDATA[", `\u003c![CDATA[`},
+		{"]]>", `]]\u003e`},
+		// https://dev.w3.org/html5/markup/aria/syntax.html#escaping-text-span
+		//   "The text in style, script, title, and textarea elements
+		//   must not have an escaping text span start that is not
+		//   followed by an escaping text span end."
+		// Furthermore, spoofing an escaping text span end could lead
+		// to different interpretation of a </script> sequence otherwise
+		// masked by the escaping text span, and spoofing a start could
+		// allow regular text content to be interpreted as script
+		// allowing script execution via a combination of a JS string
+		// injection followed by an HTML text injection.
+		{"<!--", `\u003c!--`},
+		{"-->", `--\u003e`},
+		// From https://code.google.com/p/doctype/wiki/ArticleUtf7
+		{"+ADw-script+AD4-alert(1)+ADw-/script+AD4-",
+			`\u002bADw-script\u002bAD4-alert(1)\u002bADw-\/script\u002bAD4-`,
+		},
+		// Invalid UTF-8 sequence
+		{"foo\xA0bar", "foo\xA0bar"},
+		// Invalid unicode scalar value.
+		{"foo\xed\xa0\x80bar", "foo\xed\xa0\x80bar"},
+	}
+
+	for _, test := range tests {
+		esc := jsStrEscaper(test.x)
+		if esc != test.esc {
+			t.Errorf("%q: want %q got %q", test.x, test.esc, esc)
+		}
+	}
+}
+
+func TestJSRegexpEscaper(t *testing.T) {
+	tests := []struct {
+		x   interface{}
+		esc string
+	}{
+		{"", `(?:)`},
+		{"foo", `foo`},
+		{"\u0000", `\u0000`},
+		{"\t", `\t`},
+		{"\n", `\n`},
+		{"\r", `\r`},
+		{"\u2028", `\u2028`},
+		{"\u2029", `\u2029`},
+		{"\\", `\\`},
+		{"\\n", `\\n`},
+		{"foo\r\nbar", `foo\r\nbar`},
+		// Preserve attribute boundaries.
+		{`"`, `\u0022`},
+		{`'`, `\u0027`},
+		// Allow embedding in HTML without further escaping.
+		{`&amp;`, `\u0026amp;`},
+		// Prevent breaking out of text node and element boundaries.
+		{"</script>", `\u003c\/script\u003e`},
+		{"<![CDATA[", `\u003c!\[CDATA\[`},
+		{"]]>", `\]\]\u003e`},
+		// Escaping text spans.
+		{"<!--", `\u003c!\-\-`},
+		{"-->", `\-\-\u003e`},
+		{"*", `\*`},
+		{"+", `\u002b`},
+		{"?", `\?`},
+		{"[](){}", `\[\]\(\)\{\}`},
+		{"$foo|x.y", `\$foo\|x\.y`},
+		{"x^y", `x\^y`},
+	}
+
+	for _, test := range tests {
+		esc := jsRegexpEscaper(test.x)
+		if esc != test.esc {
+			t.Errorf("%q: want %q got %q", test.x, test.esc, esc)
+		}
+	}
+}
+
+func TestEscapersOnLower7AndSelectHighCodepoints(t *testing.T) {
+	input := ("\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f" +
+		"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
+		` !"#$%&'()*+,-./` +
+		`0123456789:;<=>?` +
+		`@ABCDEFGHIJKLMNO` +
+		`PQRSTUVWXYZ[\]^_` +
+		"`abcdefghijklmno" +
+		"pqrstuvwxyz{|}~\x7f" +
+		"\u00A0\u0100\u2028\u2029\ufeff\U0001D11E")
+
+	tests := []struct {
+		name    string
+		escaper func(...interface{}) string
+		escaped string
+	}{
+		{
+			"jsStrEscaper",
+			jsStrEscaper,
+			`\u0000\u0001\u0002\u0003\u0004\u0005\u0006\u0007` +
+				`\u0008\t\n\u000b\f\r\u000e\u000f` +
+				`\u0010\u0011\u0012\u0013\u0014\u0015\u0016\u0017` +
+				`\u0018\u0019\u001a\u001b\u001c\u001d\u001e\u001f` +
+				` !\u0022#$%\u0026\u0027()*\u002b,-.\/` +
+				`0123456789:;\u003c=\u003e?` +
+				`@ABCDEFGHIJKLMNO` +
+				`PQRSTUVWXYZ[\\]^_` +
+				"`abcdefghijklmno" +
+				"pqrstuvwxyz{|}~\u007f" +
+				"\u00A0\u0100\\u2028\\u2029\ufeff\U0001D11E",
+		},
+		{
+			"jsRegexpEscaper",
+			jsRegexpEscaper,
+			`\u0000\u0001\u0002\u0003\u0004\u0005\u0006\u0007` +
+				`\u0008\t\n\u000b\f\r\u000e\u000f` +
+				`\u0010\u0011\u0012\u0013\u0014\u0015\u0016\u0017` +
+				`\u0018\u0019\u001a\u001b\u001c\u001d\u001e\u001f` +
+				` !\u0022#\$%\u0026\u0027\(\)\*\u002b,\-\.\/` +
+				`0123456789:;\u003c=\u003e\?` +
+				`@ABCDEFGHIJKLMNO` +
+				`PQRSTUVWXYZ\[\\\]\^_` +
+				"`abcdefghijklmno" +
+				`pqrstuvwxyz\{\|\}~` + "\u007f" +
+				"\u00A0\u0100\\u2028\\u2029\ufeff\U0001D11E",
+		},
+	}
+
+	for _, test := range tests {
+		if s := test.escaper(input); s != test.escaped {
+			t.Errorf("%s once: want\n\t%q\ngot\n\t%q", test.name, test.escaped, s)
+			continue
+		}
+
+		// Escape it rune by rune to make sure that any
+		// fast-path checking does not break escaping.
+		var buf bytes.Buffer
+		for _, c := range input {
+			buf.WriteString(test.escaper(string(c)))
+		}
+
+		if s := buf.String(); s != test.escaped {
+			t.Errorf("%s rune-wise: want\n\t%q\ngot\n\t%q", test.name, test.escaped, s)
+			continue
+		}
+	}
+}
+
+func TestIsJsMimeType(t *testing.T) {
+	tests := []struct {
+		in  string
+		out bool
+	}{
+		{"application/javascript;version=1.8", true},
+		{"application/javascript;version=1.8;foo=bar", true},
+		{"application/javascript/version=1.8", false},
+		{"text/javascript", true},
+		{"application/json", true},
+		{"application/ld+json", true},
+		{"module", true},
+	}
+
+	for _, test := range tests {
+		if isJSType(test.in) != test.out {
+			t.Errorf("isJSType(%q) = %v, want %v", test.in, !test.out, test.out)
+		}
+	}
+}
+
+func BenchmarkJSValEscaperWithNum(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		jsValEscaper(3.141592654)
+	}
+}
+
+func BenchmarkJSValEscaperWithStr(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		jsValEscaper("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
+	}
+}
+
+func BenchmarkJSValEscaperWithStrNoSpecials(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		jsValEscaper("The quick, brown fox jumps over the lazy dog")
+	}
+}
+
+func BenchmarkJSValEscaperWithObj(b *testing.B) {
+	o := struct {
+		S string
+		N int
+	}{
+		"The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>\u2028",
+		42,
+	}
+	for i := 0; i < b.N; i++ {
+		jsValEscaper(o)
+	}
+}
+
+func BenchmarkJSValEscaperWithObjNoSpecials(b *testing.B) {
+	o := struct {
+		S string
+		N int
+	}{
+		"The quick, brown fox jumps over the lazy dog",
+		42,
+	}
+	for i := 0; i < b.N; i++ {
+		jsValEscaper(o)
+	}
+}
+
+func BenchmarkJSStrEscaperNoSpecials(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		jsStrEscaper("The quick, brown fox jumps over the lazy dog.")
+	}
+}
+
+func BenchmarkJSStrEscaper(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		jsStrEscaper("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
+	}
+}
+
+func BenchmarkJSRegexpEscaperNoSpecials(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		jsRegexpEscaper("The quick, brown fox jumps over the lazy dog")
+	}
+}
+
+func BenchmarkJSRegexpEscaper(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		jsRegexpEscaper("The <i>quick</i>,\r\n<span style='color:brown'>brown</span> fox jumps\u2028over the <canine class=\"lazy\">dog</canine>")
+	}
+}
diff --git a/internal/backport/html/template/jsctx_string.go b/internal/backport/html/template/jsctx_string.go
new file mode 100644
index 0000000..dd1d87e
--- /dev/null
+++ b/internal/backport/html/template/jsctx_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type jsCtx"; DO NOT EDIT.
+
+package template
+
+import "strconv"
+
+const _jsCtx_name = "jsCtxRegexpjsCtxDivOpjsCtxUnknown"
+
+var _jsCtx_index = [...]uint8{0, 11, 21, 33}
+
+func (i jsCtx) String() string {
+	if i >= jsCtx(len(_jsCtx_index)-1) {
+		return "jsCtx(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _jsCtx_name[_jsCtx_index[i]:_jsCtx_index[i+1]]
+}
diff --git a/internal/backport/html/template/multi_test.go b/internal/backport/html/template/multi_test.go
new file mode 100644
index 0000000..23d9bbc
--- /dev/null
+++ b/internal/backport/html/template/multi_test.go
@@ -0,0 +1,277 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests for multiple-template execution, copied from text/template.
+
+package template
+
+import (
+	"bytes"
+	"testing"
+
+	"golang.org/x/website/internal/backport/osfs"
+	"golang.org/x/website/internal/backport/text/template/parse"
+)
+
+var multiExecTests = []execTest{
+	{"empty", "", "", nil, true},
+	{"text", "some text", "some text", nil, true},
+	{"invoke x", `{{template "x" .SI}}`, "TEXT", tVal, true},
+	{"invoke x no args", `{{template "x"}}`, "TEXT", tVal, true},
+	{"invoke dot int", `{{template "dot" .I}}`, "17", tVal, true},
+	{"invoke dot []int", `{{template "dot" .SI}}`, "[3 4 5]", tVal, true},
+	{"invoke dotV", `{{template "dotV" .U}}`, "v", tVal, true},
+	{"invoke nested int", `{{template "nested" .I}}`, "17", tVal, true},
+	{"variable declared by template", `{{template "nested" $x:=.SI}},{{index $x 1}}`, "[3 4 5],4", tVal, true},
+
+	// User-defined function: test argument evaluator.
+	{"testFunc literal", `{{oneArg "joe"}}`, "oneArg=joe", tVal, true},
+	{"testFunc .", `{{oneArg .}}`, "oneArg=joe", "joe", true},
+}
+
+// These strings are also in testdata/*.
+const multiText1 = `
+	{{define "x"}}TEXT{{end}}
+	{{define "dotV"}}{{.V}}{{end}}
+`
+
+const multiText2 = `
+	{{define "dot"}}{{.}}{{end}}
+	{{define "nested"}}{{template "dot" .}}{{end}}
+`
+
+func TestMultiExecute(t *testing.T) {
+	// Declare a couple of templates first.
+	template, err := New("root").Parse(multiText1)
+	if err != nil {
+		t.Fatalf("parse error for 1: %s", err)
+	}
+	_, err = template.Parse(multiText2)
+	if err != nil {
+		t.Fatalf("parse error for 2: %s", err)
+	}
+	testExecute(multiExecTests, template, t)
+}
+
+func TestParseFiles(t *testing.T) {
+	_, err := ParseFiles("DOES NOT EXIST")
+	if err == nil {
+		t.Error("expected error for non-existent file; got none")
+	}
+	template := New("root")
+	_, err = template.ParseFiles("testdata/file1.tmpl", "testdata/file2.tmpl")
+	if err != nil {
+		t.Fatalf("error parsing files: %v", err)
+	}
+	testExecute(multiExecTests, template, t)
+}
+
+func TestParseGlob(t *testing.T) {
+	_, err := ParseGlob("DOES NOT EXIST")
+	if err == nil {
+		t.Error("expected error for non-existent file; got none")
+	}
+	_, err = New("error").ParseGlob("[x")
+	if err == nil {
+		t.Error("expected error for bad pattern; got none")
+	}
+	template := New("root")
+	_, err = template.ParseGlob("testdata/file*.tmpl")
+	if err != nil {
+		t.Fatalf("error parsing files: %v", err)
+	}
+	testExecute(multiExecTests, template, t)
+}
+
+func TestParseFS(t *testing.T) {
+	fs := osfs.DirFS("testdata")
+
+	{
+		_, err := ParseFS(fs, "DOES NOT EXIST")
+		if err == nil {
+			t.Error("expected error for non-existent file; got none")
+		}
+	}
+
+	{
+		template := New("root")
+		_, err := template.ParseFS(fs, "file1.tmpl", "file2.tmpl")
+		if err != nil {
+			t.Fatalf("error parsing files: %v", err)
+		}
+		testExecute(multiExecTests, template, t)
+	}
+
+	{
+		template := New("root")
+		_, err := template.ParseFS(fs, "file*.tmpl")
+		if err != nil {
+			t.Fatalf("error parsing files: %v", err)
+		}
+		testExecute(multiExecTests, template, t)
+	}
+}
+
+// In these tests, actual content (not just template definitions) comes from the parsed files.
+
+var templateFileExecTests = []execTest{
+	{"test", `{{template "tmpl1.tmpl"}}{{template "tmpl2.tmpl"}}`, "template1\n\ny\ntemplate2\n\nx\n", 0, true},
+}
+
+func TestParseFilesWithData(t *testing.T) {
+	template, err := New("root").ParseFiles("testdata/tmpl1.tmpl", "testdata/tmpl2.tmpl")
+	if err != nil {
+		t.Fatalf("error parsing files: %v", err)
+	}
+	testExecute(templateFileExecTests, template, t)
+}
+
+func TestParseGlobWithData(t *testing.T) {
+	template, err := New("root").ParseGlob("testdata/tmpl*.tmpl")
+	if err != nil {
+		t.Fatalf("error parsing files: %v", err)
+	}
+	testExecute(templateFileExecTests, template, t)
+}
+
+const (
+	cloneText1 = `{{define "a"}}{{template "b"}}{{template "c"}}{{end}}`
+	cloneText2 = `{{define "b"}}b{{end}}`
+	cloneText3 = `{{define "c"}}root{{end}}`
+	cloneText4 = `{{define "c"}}clone{{end}}`
+)
+
+// Issue 7032
+func TestAddParseTreeToUnparsedTemplate(t *testing.T) {
+	master := "{{define \"master\"}}{{end}}"
+	tmpl := New("master")
+	tree, err := parse.Parse("master", master, "", "", nil)
+	if err != nil {
+		t.Fatalf("unexpected parse err: %v", err)
+	}
+	masterTree := tree["master"]
+	tmpl.AddParseTree("master", masterTree) // used to panic
+}
+
+func TestRedefinition(t *testing.T) {
+	var tmpl *Template
+	var err error
+	if tmpl, err = New("tmpl1").Parse(`{{define "test"}}foo{{end}}`); err != nil {
+		t.Fatalf("parse 1: %v", err)
+	}
+	if _, err = tmpl.Parse(`{{define "test"}}bar{{end}}`); err != nil {
+		t.Fatalf("got error %v, expected nil", err)
+	}
+	if _, err = tmpl.New("tmpl2").Parse(`{{define "test"}}bar{{end}}`); err != nil {
+		t.Fatalf("got error %v, expected nil", err)
+	}
+}
+
+// Issue 10879
+func TestEmptyTemplateCloneCrash(t *testing.T) {
+	t1 := New("base")
+	t1.Clone() // used to panic
+}
+
+// Issue 10910, 10926
+func TestTemplateLookUp(t *testing.T) {
+	t.Skip("broken on html/template") // TODO
+	t1 := New("foo")
+	if t1.Lookup("foo") != nil {
+		t.Error("Lookup returned non-nil value for undefined template foo")
+	}
+	t1.New("bar")
+	if t1.Lookup("bar") != nil {
+		t.Error("Lookup returned non-nil value for undefined template bar")
+	}
+	t1.Parse(`{{define "foo"}}test{{end}}`)
+	if t1.Lookup("foo") == nil {
+		t.Error("Lookup returned nil value for defined template")
+	}
+}
+
+func TestParse(t *testing.T) {
+	// In multiple calls to Parse with the same receiver template, only one call
+	// can contain text other than space, comments, and template definitions
+	t1 := New("test")
+	if _, err := t1.Parse(`{{define "test"}}{{end}}`); err != nil {
+		t.Fatalf("parsing test: %s", err)
+	}
+	if _, err := t1.Parse(`{{define "test"}}{{/* this is a comment */}}{{end}}`); err != nil {
+		t.Fatalf("parsing test: %s", err)
+	}
+	if _, err := t1.Parse(`{{define "test"}}foo{{end}}`); err != nil {
+		t.Fatalf("parsing test: %s", err)
+	}
+}
+
+func TestEmptyTemplate(t *testing.T) {
+	cases := []struct {
+		defn []string
+		in   string
+		want string
+	}{
+		{[]string{"x", "y"}, "", "y"},
+		{[]string{""}, "once", ""},
+		{[]string{"", ""}, "twice", ""},
+		{[]string{"{{.}}", "{{.}}"}, "twice", "twice"},
+		{[]string{"{{/* a comment */}}", "{{/* a comment */}}"}, "comment", ""},
+		{[]string{"{{.}}", ""}, "twice", "twice"}, // TODO: should want "" not "twice"
+	}
+
+	for i, c := range cases {
+		root := New("root")
+
+		var (
+			m   *Template
+			err error
+		)
+		for _, d := range c.defn {
+			m, err = root.New(c.in).Parse(d)
+			if err != nil {
+				t.Fatal(err)
+			}
+		}
+		buf := &bytes.Buffer{}
+		if err := m.Execute(buf, c.in); err != nil {
+			t.Error(i, err)
+			continue
+		}
+		if buf.String() != c.want {
+			t.Errorf("expected string %q: got %q", c.want, buf.String())
+		}
+	}
+}
+
+// Issue 19249 was a regression in 1.8 caused by the handling of empty
+// templates added in that release, which got different answers depending
+// on the order templates appeared in the internal map.
+func TestIssue19294(t *testing.T) {
+	// The empty block in "xhtml" should be replaced during execution
+	// by the contents of "stylesheet", but if the internal map associating
+	// names with templates is built in the wrong order, the empty block
+	// looks non-empty and this doesn't happen.
+	var inlined = map[string]string{
+		"stylesheet": `{{define "stylesheet"}}stylesheet{{end}}`,
+		"xhtml":      `{{block "stylesheet" .}}{{end}}`,
+	}
+	all := []string{"stylesheet", "xhtml"}
+	for i := 0; i < 100; i++ {
+		res, err := New("title.xhtml").Parse(`{{template "xhtml" .}}`)
+		if err != nil {
+			t.Fatal(err)
+		}
+		for _, name := range all {
+			_, err := res.New(name).Parse(inlined[name])
+			if err != nil {
+				t.Fatal(err)
+			}
+		}
+		var buf bytes.Buffer
+		res.Execute(&buf, 0)
+		if buf.String() != "stylesheet" {
+			t.Fatalf("iteration %d: got %q; expected %q", i, buf.String(), "stylesheet")
+		}
+	}
+}
diff --git a/internal/backport/html/template/state_string.go b/internal/backport/html/template/state_string.go
new file mode 100644
index 0000000..05104be
--- /dev/null
+++ b/internal/backport/html/template/state_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type state"; DO NOT EDIT.
+
+package template
+
+import "strconv"
+
+const _state_name = "stateTextstateTagstateAttrNamestateAfterNamestateBeforeValuestateHTMLCmtstateRCDATAstateAttrstateURLstateSrcsetstateJSstateJSDqStrstateJSSqStrstateJSRegexpstateJSBlockCmtstateJSLineCmtstateCSSstateCSSDqStrstateCSSSqStrstateCSSDqURLstateCSSSqURLstateCSSURLstateCSSBlockCmtstateCSSLineCmtstateError"
+
+var _state_index = [...]uint16{0, 9, 17, 30, 44, 60, 72, 83, 92, 100, 111, 118, 130, 142, 155, 170, 184, 192, 205, 218, 231, 244, 255, 271, 286, 296}
+
+func (i state) String() string {
+	if i >= state(len(_state_index)-1) {
+		return "state(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _state_name[_state_index[i]:_state_index[i+1]]
+}
diff --git a/internal/backport/html/template/template.go b/internal/backport/html/template/template.go
new file mode 100644
index 0000000..4156ed4
--- /dev/null
+++ b/internal/backport/html/template/template.go
@@ -0,0 +1,538 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"path/filepath"
+	"sync"
+
+	"golang.org/x/website/internal/backport/path"
+
+	"golang.org/x/website/internal/backport/io/fs"
+	"golang.org/x/website/internal/backport/text/template"
+	"golang.org/x/website/internal/backport/text/template/parse"
+)
+
+// Template is a specialized Template from "golang.org/x/website/internal/backport/text/template" that produces a safe
+// HTML document fragment.
+type Template struct {
+	// Sticky error if escaping fails, or escapeOK if succeeded.
+	escapeErr error
+	// We could embed the text/template field, but it's safer not to because
+	// we need to keep our version of the name space and the underlying
+	// template's in sync.
+	text *template.Template
+	// The underlying template's parse tree, updated to be HTML-safe.
+	Tree       *parse.Tree
+	*nameSpace // common to all associated templates
+}
+
+// escapeOK is a sentinel value used to indicate valid escaping.
+var escapeOK = fmt.Errorf("template escaped correctly")
+
+// nameSpace is the data structure shared by all templates in an association.
+type nameSpace struct {
+	mu      sync.Mutex
+	set     map[string]*Template
+	escaped bool
+	esc     escaper
+}
+
+// Templates returns a slice of the templates associated with t, including t
+// itself.
+func (t *Template) Templates() []*Template {
+	ns := t.nameSpace
+	ns.mu.Lock()
+	defer ns.mu.Unlock()
+	// Return a slice so we don't expose the map.
+	m := make([]*Template, 0, len(ns.set))
+	for _, v := range ns.set {
+		m = append(m, v)
+	}
+	return m
+}
+
+// Option sets options for the template. Options are described by
+// strings, either a simple string or "key=value". There can be at
+// most one equals sign in an option string. If the option string
+// is unrecognized or otherwise invalid, Option panics.
+//
+// Known options:
+//
+// missingkey: Control the behavior during execution if a map is
+// indexed with a key that is not present in the map.
+//	"missingkey=default" or "missingkey=invalid"
+//		The default behavior: Do nothing and continue execution.
+//		If printed, the result of the index operation is the string
+//		"<no value>".
+//	"missingkey=zero"
+//		The operation returns the zero value for the map type's element.
+//	"missingkey=error"
+//		Execution stops immediately with an error.
+//
+func (t *Template) Option(opt ...string) *Template {
+	t.text.Option(opt...)
+	return t
+}
+
+// checkCanParse checks whether it is OK to parse templates.
+// If not, it returns an error.
+func (t *Template) checkCanParse() error {
+	if t == nil {
+		return nil
+	}
+	t.nameSpace.mu.Lock()
+	defer t.nameSpace.mu.Unlock()
+	if t.nameSpace.escaped {
+		return fmt.Errorf("html/template: cannot Parse after Execute")
+	}
+	return nil
+}
+
+// escape escapes all associated templates.
+func (t *Template) escape() error {
+	t.nameSpace.mu.Lock()
+	defer t.nameSpace.mu.Unlock()
+	t.nameSpace.escaped = true
+	if t.escapeErr == nil {
+		if t.Tree == nil {
+			return fmt.Errorf("template: %q is an incomplete or empty template", t.Name())
+		}
+		if err := escapeTemplate(t, t.text.Root, t.Name()); err != nil {
+			return err
+		}
+	} else if t.escapeErr != escapeOK {
+		return t.escapeErr
+	}
+	return nil
+}
+
+// Execute applies a parsed template to the specified data object,
+// writing the output to wr.
+// If an error occurs executing the template or writing its output,
+// execution stops, but partial results may already have been written to
+// the output writer.
+// A template may be executed safely in parallel, although if parallel
+// executions share a Writer the output may be interleaved.
+func (t *Template) Execute(wr io.Writer, data interface{}) error {
+	if err := t.escape(); err != nil {
+		return err
+	}
+	return t.text.Execute(wr, data)
+}
+
+// ExecuteTemplate applies the template associated with t that has the given
+// name to the specified data object and writes the output to wr.
+// If an error occurs executing the template or writing its output,
+// execution stops, but partial results may already have been written to
+// the output writer.
+// A template may be executed safely in parallel, although if parallel
+// executions share a Writer the output may be interleaved.
+func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error {
+	tmpl, err := t.lookupAndEscapeTemplate(name)
+	if err != nil {
+		return err
+	}
+	return tmpl.text.Execute(wr, data)
+}
+
+// lookupAndEscapeTemplate guarantees that the template with the given name
+// is escaped, or returns an error if it cannot be. It returns the named
+// template.
+func (t *Template) lookupAndEscapeTemplate(name string) (tmpl *Template, err error) {
+	t.nameSpace.mu.Lock()
+	defer t.nameSpace.mu.Unlock()
+	t.nameSpace.escaped = true
+	tmpl = t.set[name]
+	if tmpl == nil {
+		return nil, fmt.Errorf("html/template: %q is undefined", name)
+	}
+	if tmpl.escapeErr != nil && tmpl.escapeErr != escapeOK {
+		return nil, tmpl.escapeErr
+	}
+	if tmpl.text.Tree == nil || tmpl.text.Root == nil {
+		return nil, fmt.Errorf("html/template: %q is an incomplete template", name)
+	}
+	if t.text.Lookup(name) == nil {
+		panic("html/template internal error: template escaping out of sync")
+	}
+	if tmpl.escapeErr == nil {
+		err = escapeTemplate(tmpl, tmpl.text.Root, name)
+	}
+	return tmpl, err
+}
+
+// DefinedTemplates returns a string listing the defined templates,
+// prefixed by the string "; defined templates are: ". If there are none,
+// it returns the empty string. Used to generate an error message.
+func (t *Template) DefinedTemplates() string {
+	return t.text.DefinedTemplates()
+}
+
+// Parse parses text as a template body for t.
+// Named template definitions ({{define ...}} or {{block ...}} statements) in text
+// define additional templates associated with t and are removed from the
+// definition of t itself.
+//
+// Templates can be redefined in successive calls to Parse,
+// before the first use of Execute on t or any associated template.
+// A template definition with a body containing only white space and comments
+// is considered empty and will not replace an existing template's body.
+// This allows using Parse to add new named template definitions without
+// overwriting the main template body.
+func (t *Template) Parse(text string) (*Template, error) {
+	if err := t.checkCanParse(); err != nil {
+		return nil, err
+	}
+
+	ret, err := t.text.Parse(text)
+	if err != nil {
+		return nil, err
+	}
+
+	// In general, all the named templates might have changed underfoot.
+	// Regardless, some new ones may have been defined.
+	// The template.Template set has been updated; update ours.
+	t.nameSpace.mu.Lock()
+	defer t.nameSpace.mu.Unlock()
+	for _, v := range ret.Templates() {
+		name := v.Name()
+		tmpl := t.set[name]
+		if tmpl == nil {
+			tmpl = t.new(name)
+		}
+		tmpl.text = v
+		tmpl.Tree = v.Tree
+	}
+	return t, nil
+}
+
+// AddParseTree creates a new template with the name and parse tree
+// and associates it with t.
+//
+// It returns an error if t or any associated template has already been executed.
+func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) {
+	if err := t.checkCanParse(); err != nil {
+		return nil, err
+	}
+
+	t.nameSpace.mu.Lock()
+	defer t.nameSpace.mu.Unlock()
+	text, err := t.text.AddParseTree(name, tree)
+	if err != nil {
+		return nil, err
+	}
+	ret := &Template{
+		nil,
+		text,
+		text.Tree,
+		t.nameSpace,
+	}
+	t.set[name] = ret
+	return ret, nil
+}
+
+// Clone returns a duplicate of the template, including all associated
+// templates. The actual representation is not copied, but the name space of
+// associated templates is, so further calls to Parse in the copy will add
+// templates to the copy but not to the original. Clone can be used to prepare
+// common templates and use them with variant definitions for other templates
+// by adding the variants after the clone is made.
+//
+// It returns an error if t has already been executed.
+func (t *Template) Clone() (*Template, error) {
+	t.nameSpace.mu.Lock()
+	defer t.nameSpace.mu.Unlock()
+	if t.escapeErr != nil {
+		return nil, fmt.Errorf("html/template: cannot Clone %q after it has executed", t.Name())
+	}
+	textClone, err := t.text.Clone()
+	if err != nil {
+		return nil, err
+	}
+	ns := &nameSpace{set: make(map[string]*Template)}
+	ns.esc = makeEscaper(ns)
+	ret := &Template{
+		nil,
+		textClone,
+		textClone.Tree,
+		ns,
+	}
+	ret.set[ret.Name()] = ret
+	for _, x := range textClone.Templates() {
+		name := x.Name()
+		src := t.set[name]
+		if src == nil || src.escapeErr != nil {
+			return nil, fmt.Errorf("html/template: cannot Clone %q after it has executed", t.Name())
+		}
+		x.Tree = x.Tree.Copy()
+		ret.set[name] = &Template{
+			nil,
+			x,
+			x.Tree,
+			ret.nameSpace,
+		}
+	}
+	// Return the template associated with the name of this template.
+	return ret.set[ret.Name()], nil
+}
+
+// New allocates a new HTML template with the given name.
+func New(name string) *Template {
+	ns := &nameSpace{set: make(map[string]*Template)}
+	ns.esc = makeEscaper(ns)
+	tmpl := &Template{
+		nil,
+		template.New(name),
+		nil,
+		ns,
+	}
+	tmpl.set[name] = tmpl
+	return tmpl
+}
+
+// New allocates a new HTML template associated with the given one
+// and with the same delimiters. The association, which is transitive,
+// allows one template to invoke another with a {{template}} action.
+//
+// If a template with the given name already exists, the new HTML template
+// will replace it. The existing template will be reset and disassociated with
+// t.
+func (t *Template) New(name string) *Template {
+	t.nameSpace.mu.Lock()
+	defer t.nameSpace.mu.Unlock()
+	return t.new(name)
+}
+
+// new is the implementation of New, without the lock.
+func (t *Template) new(name string) *Template {
+	tmpl := &Template{
+		nil,
+		t.text.New(name),
+		nil,
+		t.nameSpace,
+	}
+	if existing, ok := tmpl.set[name]; ok {
+		emptyTmpl := New(existing.Name())
+		*existing = *emptyTmpl
+	}
+	tmpl.set[name] = tmpl
+	return tmpl
+}
+
+// Name returns the name of the template.
+func (t *Template) Name() string {
+	return t.text.Name()
+}
+
+// FuncMap is the type of the map defining the mapping from names to
+// functions. Each function must have either a single return value, or two
+// return values of which the second has type error. In that case, if the
+// second (error) argument evaluates to non-nil during execution, execution
+// terminates and Execute returns that error. FuncMap has the same base type
+// as FuncMap in "golang.org/x/website/internal/backport/text/template", copied here so clients need not import
+// "golang.org/x/website/internal/backport/text/template".
+type FuncMap map[string]interface{}
+
+// Funcs adds the elements of the argument map to the template's function map.
+// It must be called before the template is parsed.
+// It panics if a value in the map is not a function with appropriate return
+// type. However, it is legal to overwrite elements of the map. The return
+// value is the template, so calls can be chained.
+func (t *Template) Funcs(funcMap FuncMap) *Template {
+	t.text.Funcs(template.FuncMap(funcMap))
+	return t
+}
+
+// Delims sets the action delimiters to the specified strings, to be used in
+// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template
+// definitions will inherit the settings. An empty delimiter stands for the
+// corresponding default: {{ or }}.
+// The return value is the template, so calls can be chained.
+func (t *Template) Delims(left, right string) *Template {
+	t.text.Delims(left, right)
+	return t
+}
+
+// Lookup returns the template with the given name that is associated with t,
+// or nil if there is no such template.
+func (t *Template) Lookup(name string) *Template {
+	t.nameSpace.mu.Lock()
+	defer t.nameSpace.mu.Unlock()
+	return t.set[name]
+}
+
+// Must is a helper that wraps a call to a function returning (*Template, error)
+// and panics if the error is non-nil. It is intended for use in variable initializations
+// such as
+//	var t = template.Must(template.New("name").Parse("html"))
+func Must(t *Template, err error) *Template {
+	if err != nil {
+		panic(err)
+	}
+	return t
+}
+
+// ParseFiles creates a new Template and parses the template definitions from
+// the named files. The returned template's name will have the (base) name and
+// (parsed) contents of the first file. There must be at least one file.
+// If an error occurs, parsing stops and the returned *Template is nil.
+//
+// When parsing multiple files with the same name in different directories,
+// the last one mentioned will be the one that results.
+// For instance, ParseFiles("a/foo", "b/foo") stores "b/foo" as the template
+// named "foo", while "a/foo" is unavailable.
+func ParseFiles(filenames ...string) (*Template, error) {
+	return parseFiles(nil, readFileOS, filenames...)
+}
+
+// ParseFiles parses the named files and associates the resulting templates with
+// t. If an error occurs, parsing stops and the returned template is nil;
+// otherwise it is t. There must be at least one file.
+//
+// When parsing multiple files with the same name in different directories,
+// the last one mentioned will be the one that results.
+//
+// ParseFiles returns an error if t or any associated template has already been executed.
+func (t *Template) ParseFiles(filenames ...string) (*Template, error) {
+	return parseFiles(t, readFileOS, filenames...)
+}
+
+// parseFiles is the helper for the method and function. If the argument
+// template is nil, it is created from the first file.
+func parseFiles(t *Template, readFile func(string) (string, []byte, error), filenames ...string) (*Template, error) {
+	if err := t.checkCanParse(); err != nil {
+		return nil, err
+	}
+
+	if len(filenames) == 0 {
+		// Not really a problem, but be consistent.
+		return nil, fmt.Errorf("html/template: no files named in call to ParseFiles")
+	}
+	for _, filename := range filenames {
+		name, b, err := readFile(filename)
+		if err != nil {
+			return nil, err
+		}
+		s := string(b)
+		// First template becomes return value if not already defined,
+		// and we use that one for subsequent New calls to associate
+		// all the templates together. Also, if this file has the same name
+		// as t, this file becomes the contents of t, so
+		//  t, err := New(name).Funcs(xxx).ParseFiles(name)
+		// works. Otherwise we create a new template associated with t.
+		var tmpl *Template
+		if t == nil {
+			t = New(name)
+		}
+		if name == t.Name() {
+			tmpl = t
+		} else {
+			tmpl = t.New(name)
+		}
+		_, err = tmpl.Parse(s)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return t, nil
+}
+
+// ParseGlob creates a new Template and parses the template definitions from
+// the files identified by the pattern. The files are matched according to the
+// semantics of filepath.Match, and the pattern must match at least one file.
+// The returned template will have the (base) name and (parsed) contents of the
+// first file matched by the pattern. ParseGlob is equivalent to calling
+// ParseFiles with the list of files matched by the pattern.
+//
+// When parsing multiple files with the same name in different directories,
+// the last one mentioned will be the one that results.
+func ParseGlob(pattern string) (*Template, error) {
+	return parseGlob(nil, pattern)
+}
+
+// ParseGlob parses the template definitions in the files identified by the
+// pattern and associates the resulting templates with t. The files are matched
+// according to the semantics of filepath.Match, and the pattern must match at
+// least one file. ParseGlob is equivalent to calling t.ParseFiles with the
+// list of files matched by the pattern.
+//
+// When parsing multiple files with the same name in different directories,
+// the last one mentioned will be the one that results.
+//
+// ParseGlob returns an error if t or any associated template has already been executed.
+func (t *Template) ParseGlob(pattern string) (*Template, error) {
+	return parseGlob(t, pattern)
+}
+
+// parseGlob is the implementation of the function and method ParseGlob.
+func parseGlob(t *Template, pattern string) (*Template, error) {
+	if err := t.checkCanParse(); err != nil {
+		return nil, err
+	}
+	filenames, err := filepath.Glob(pattern)
+	if err != nil {
+		return nil, err
+	}
+	if len(filenames) == 0 {
+		return nil, fmt.Errorf("html/template: pattern matches no files: %#q", pattern)
+	}
+	return parseFiles(t, readFileOS, filenames...)
+}
+
+// IsTrue reports whether the value is 'true', in the sense of not the zero of its type,
+// and whether the value has a meaningful truth value. This is the definition of
+// truth used by if and other such actions.
+func IsTrue(val interface{}) (truth, ok bool) {
+	return template.IsTrue(val)
+}
+
+// ParseFS is like ParseFiles or ParseGlob but reads from the file system fs
+// instead of the host operating system's file system.
+// It accepts a list of glob patterns.
+// (Note that most file names serve as glob patterns matching only themselves.)
+func ParseFS(fs fs.FS, patterns ...string) (*Template, error) {
+	return parseFS(nil, fs, patterns)
+}
+
+// ParseFS is like ParseFiles or ParseGlob but reads from the file system fs
+// instead of the host operating system's file system.
+// It accepts a list of glob patterns.
+// (Note that most file names serve as glob patterns matching only themselves.)
+func (t *Template) ParseFS(fs fs.FS, patterns ...string) (*Template, error) {
+	return parseFS(t, fs, patterns)
+}
+
+func parseFS(t *Template, fsys fs.FS, patterns []string) (*Template, error) {
+	var filenames []string
+	for _, pattern := range patterns {
+		list, err := fs.Glob(fsys, pattern)
+		if err != nil {
+			return nil, err
+		}
+		if len(list) == 0 {
+			return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
+		}
+		filenames = append(filenames, list...)
+	}
+	return parseFiles(t, readFileFS(fsys), filenames...)
+}
+
+func readFileOS(file string) (name string, b []byte, err error) {
+	name = filepath.Base(file)
+	b, err = ioutil.ReadFile(file)
+	return
+}
+
+func readFileFS(fsys fs.FS) func(string) (string, []byte, error) {
+	return func(file string) (name string, b []byte, err error) {
+		name = path.Base(file)
+		b, err = fs.ReadFile(fsys, file)
+		return
+	}
+}
diff --git a/internal/backport/html/template/template_test.go b/internal/backport/html/template/template_test.go
new file mode 100644
index 0000000..0f68e72
--- /dev/null
+++ b/internal/backport/html/template/template_test.go
@@ -0,0 +1,219 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template_test
+
+import (
+	"bytes"
+	"encoding/json"
+	"strings"
+	"testing"
+
+	. "golang.org/x/website/internal/backport/html/template"
+	"golang.org/x/website/internal/backport/text/template/parse"
+)
+
+func TestTemplateClone(t *testing.T) {
+	// https://golang.org/issue/12996
+	orig := New("name")
+	clone, err := orig.Clone()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(clone.Templates()) != len(orig.Templates()) {
+		t.Fatalf("Invalid length of t.Clone().Templates()")
+	}
+
+	const want = "stuff"
+	parsed := Must(clone.Parse(want))
+	var buf bytes.Buffer
+	err = parsed.Execute(&buf, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if got := buf.String(); got != want {
+		t.Fatalf("got %q; want %q", got, want)
+	}
+}
+
+func TestRedefineNonEmptyAfterExecution(t *testing.T) {
+	c := newTestCase(t)
+	c.mustParse(c.root, `foo`)
+	c.mustExecute(c.root, nil, "foo")
+	c.mustNotParse(c.root, `bar`)
+}
+
+func TestRedefineEmptyAfterExecution(t *testing.T) {
+	c := newTestCase(t)
+	c.mustParse(c.root, ``)
+	c.mustExecute(c.root, nil, "")
+	c.mustNotParse(c.root, `foo`)
+	c.mustExecute(c.root, nil, "")
+}
+
+func TestRedefineAfterNonExecution(t *testing.T) {
+	c := newTestCase(t)
+	c.mustParse(c.root, `{{if .}}<{{template "X"}}>{{end}}{{define "X"}}foo{{end}}`)
+	c.mustExecute(c.root, 0, "")
+	c.mustNotParse(c.root, `{{define "X"}}bar{{end}}`)
+	c.mustExecute(c.root, 1, "&lt;foo>")
+}
+
+func TestRedefineAfterNamedExecution(t *testing.T) {
+	c := newTestCase(t)
+	c.mustParse(c.root, `<{{template "X" .}}>{{define "X"}}foo{{end}}`)
+	c.mustExecute(c.root, nil, "&lt;foo>")
+	c.mustNotParse(c.root, `{{define "X"}}bar{{end}}`)
+	c.mustExecute(c.root, nil, "&lt;foo>")
+}
+
+func TestRedefineNestedByNameAfterExecution(t *testing.T) {
+	c := newTestCase(t)
+	c.mustParse(c.root, `{{define "X"}}foo{{end}}`)
+	c.mustExecute(c.lookup("X"), nil, "foo")
+	c.mustNotParse(c.root, `{{define "X"}}bar{{end}}`)
+	c.mustExecute(c.lookup("X"), nil, "foo")
+}
+
+func TestRedefineNestedByTemplateAfterExecution(t *testing.T) {
+	c := newTestCase(t)
+	c.mustParse(c.root, `{{define "X"}}foo{{end}}`)
+	c.mustExecute(c.lookup("X"), nil, "foo")
+	c.mustNotParse(c.lookup("X"), `bar`)
+	c.mustExecute(c.lookup("X"), nil, "foo")
+}
+
+func TestRedefineSafety(t *testing.T) {
+	c := newTestCase(t)
+	c.mustParse(c.root, `<html><a href="{{template "X"}}">{{define "X"}}{{end}}`)
+	c.mustExecute(c.root, nil, `<html><a href="">`)
+	// Note: Every version of Go prior to Go 1.8 accepted the redefinition of "X"
+	// on the next line, but luckily kept it from being used in the outer template.
+	// Now we reject it, which makes clearer that we're not going to use it.
+	c.mustNotParse(c.root, `{{define "X"}}" bar="baz{{end}}`)
+	c.mustExecute(c.root, nil, `<html><a href="">`)
+}
+
+func TestRedefineTopUse(t *testing.T) {
+	c := newTestCase(t)
+	c.mustParse(c.root, `{{template "X"}}{{.}}{{define "X"}}{{end}}`)
+	c.mustExecute(c.root, 42, `42`)
+	c.mustNotParse(c.root, `{{define "X"}}<script>{{end}}`)
+	c.mustExecute(c.root, 42, `42`)
+}
+
+func TestRedefineOtherParsers(t *testing.T) {
+	c := newTestCase(t)
+	c.mustParse(c.root, ``)
+	c.mustExecute(c.root, nil, ``)
+	if _, err := c.root.ParseFiles("no.template"); err == nil || !strings.Contains(err.Error(), "Execute") {
+		t.Errorf("ParseFiles: %v\nwanted error about already having Executed", err)
+	}
+	if _, err := c.root.ParseGlob("*.no.template"); err == nil || !strings.Contains(err.Error(), "Execute") {
+		t.Errorf("ParseGlob: %v\nwanted error about already having Executed", err)
+	}
+	if _, err := c.root.AddParseTree("t1", c.root.Tree); err == nil || !strings.Contains(err.Error(), "Execute") {
+		t.Errorf("AddParseTree: %v\nwanted error about already having Executed", err)
+	}
+}
+
+func TestNumbers(t *testing.T) {
+	c := newTestCase(t)
+	c.mustParse(c.root, `{{print 1_2.3_4}} {{print 0x0_1.e_0p+02}}`)
+	c.mustExecute(c.root, nil, "12.34 7.5")
+}
+
+func TestStringsInScriptsWithJsonContentTypeAreCorrectlyEscaped(t *testing.T) {
+	// See #33671 and #37634 for more context on this.
+	tests := []struct{ name, in string }{
+		{"empty", ""},
+		{"invalid", string(rune(-1))},
+		{"null", "\u0000"},
+		{"unit separator", "\u001F"},
+		{"tab", "\t"},
+		{"gt and lt", "<>"},
+		{"quotes", `'"`},
+		{"ASCII letters", "ASCII letters"},
+		{"Unicode", "ʕ⊙ϖ⊙ʔ"},
+		{"Pizza", "🍕"},
+	}
+	const (
+		prefix = `<script type="application/ld+json">`
+		suffix = `</script>`
+		templ  = prefix + `"{{.}}"` + suffix
+	)
+	tpl := Must(New("JS string is JSON string").Parse(templ))
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			var buf bytes.Buffer
+			if err := tpl.Execute(&buf, tt.in); err != nil {
+				t.Fatalf("Cannot render template: %v", err)
+			}
+			trimmed := bytes.TrimSuffix(bytes.TrimPrefix(buf.Bytes(), []byte(prefix)), []byte(suffix))
+			var got string
+			if err := json.Unmarshal(trimmed, &got); err != nil {
+				t.Fatalf("Cannot parse JS string %q as JSON: %v", trimmed[1:len(trimmed)-1], err)
+			}
+			if got != tt.in {
+				t.Errorf("Serialization changed the string value: got %q want %q", got, tt.in)
+			}
+		})
+	}
+}
+
+func TestSkipEscapeComments(t *testing.T) {
+	c := newTestCase(t)
+	tr := parse.New("root")
+	tr.Mode = parse.ParseComments
+	newT, err := tr.Parse("{{/* A comment */}}{{ 1 }}{{/* Another comment */}}", "", "", make(map[string]*parse.Tree))
+	if err != nil {
+		t.Fatalf("Cannot parse template text: %v", err)
+	}
+	c.root, err = c.root.AddParseTree("root", newT)
+	if err != nil {
+		t.Fatalf("Cannot add parse tree to template: %v", err)
+	}
+	c.mustExecute(c.root, nil, "1")
+}
+
+type testCase struct {
+	t    *testing.T
+	root *Template
+}
+
+func newTestCase(t *testing.T) *testCase {
+	return &testCase{
+		t:    t,
+		root: New("root"),
+	}
+}
+
+func (c *testCase) lookup(name string) *Template {
+	return c.root.Lookup(name)
+}
+
+func (c *testCase) mustParse(t *Template, text string) {
+	_, err := t.Parse(text)
+	if err != nil {
+		c.t.Fatalf("parse: %v", err)
+	}
+}
+
+func (c *testCase) mustNotParse(t *Template, text string) {
+	_, err := t.Parse(text)
+	if err == nil {
+		c.t.Fatalf("parse: unexpected success")
+	}
+}
+
+func (c *testCase) mustExecute(t *Template, val interface{}, want string) {
+	var buf bytes.Buffer
+	err := t.Execute(&buf, val)
+	if err != nil {
+		c.t.Fatalf("execute: %v", err)
+	}
+	if buf.String() != want {
+		c.t.Fatalf("template output:\n%s\nwant:\n%s", buf.String(), want)
+	}
+}
diff --git a/internal/backport/html/template/testdata/file1.tmpl b/internal/backport/html/template/testdata/file1.tmpl
new file mode 100644
index 0000000..febf9d9
--- /dev/null
+++ b/internal/backport/html/template/testdata/file1.tmpl
@@ -0,0 +1,2 @@
+{{define "x"}}TEXT{{end}}
+{{define "dotV"}}{{.V}}{{end}}
diff --git a/internal/backport/html/template/testdata/file2.tmpl b/internal/backport/html/template/testdata/file2.tmpl
new file mode 100644
index 0000000..39bf6fb
--- /dev/null
+++ b/internal/backport/html/template/testdata/file2.tmpl
@@ -0,0 +1,2 @@
+{{define "dot"}}{{.}}{{end}}
+{{define "nested"}}{{template "dot" .}}{{end}}
diff --git a/internal/backport/html/template/testdata/fs.zip b/internal/backport/html/template/testdata/fs.zip
new file mode 100644
index 0000000..8581313
--- /dev/null
+++ b/internal/backport/html/template/testdata/fs.zip
Binary files differ
diff --git a/internal/backport/html/template/testdata/tmpl1.tmpl b/internal/backport/html/template/testdata/tmpl1.tmpl
new file mode 100644
index 0000000..b72b3a3
--- /dev/null
+++ b/internal/backport/html/template/testdata/tmpl1.tmpl
@@ -0,0 +1,3 @@
+template1
+{{define "x"}}x{{end}}
+{{template "y"}}
diff --git a/internal/backport/html/template/testdata/tmpl2.tmpl b/internal/backport/html/template/testdata/tmpl2.tmpl
new file mode 100644
index 0000000..16beba6
--- /dev/null
+++ b/internal/backport/html/template/testdata/tmpl2.tmpl
@@ -0,0 +1,3 @@
+template2
+{{define "y"}}y{{end}}
+{{template "x"}}
diff --git a/internal/backport/html/template/transition.go b/internal/backport/html/template/transition.go
new file mode 100644
index 0000000..06df679
--- /dev/null
+++ b/internal/backport/html/template/transition.go
@@ -0,0 +1,592 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LIC