go.tools/cmd/godoc: copy godoc from core reposistory

The plan for godoc:
- Copy godoc source from the core repo to go.tools (this CL).
- Break godoc into several packages inside go.tools, leaving a package
  main that merely sets up a local file system, interprets the command
  line, and otherwise delegates the heavy-lifting to the new packages.
- Remove godoc from the core repo.
- Update cmd/go to install this godoc binary in $GOROOT/bin.
- Update misc/dist to include godoc when building binary distributions.

R=bradfitz
CC=golang-dev
https://golang.org/cl/11408043
diff --git a/cmd/godoc/README.godoc-app b/cmd/godoc/README.godoc-app
new file mode 100644
index 0000000..cff7d38
--- /dev/null
+++ b/cmd/godoc/README.godoc-app
@@ -0,0 +1,61 @@
+Copyright 2011 The Go Authors. All rights reserved.
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file.
+
+godoc on appengine
+------------------
+
+Prerequisites
+-------------
+
+* Go appengine SDK
+  https://developers.google.com/appengine/downloads#Google_App_Engine_SDK_for_Go
+
+* Go sources at tip under $GOROOT
+
+
+Directory structure
+-------------------
+
+* Let $APPDIR be the directory containing the app engine files.
+  (e.g., $APPDIR=$HOME/godoc-app)
+
+* $APPDIR contains the following entries (this may change depending on
+  app-engine release and version of godoc):
+
+	app.yaml
+	godoc.zip
+	godoc/
+	index.split.*
+
+* The app.yaml file is set up per app engine documentation.
+  For instance:
+
+	application: godoc-app
+	version: 1
+	runtime: go
+	api_version: go1
+
+	handlers:
+	- url: /.*
+	  script: _go_app
+
+* The godoc/ directory contains a copy of the files under $GOROOT/src/cmd/godoc
+  with doc.go excluded (it belongs to pseudo-package "documentation")
+
+
+Configuring and running godoc
+-----------------------------
+
+To configure godoc, run
+
+	bash setup-godoc-app.bash
+
+to create the godoc.zip, index.split.*, and godoc/appconfig.go files
+based on $GOROOT and $APPDIR. See the script for details on usage.
+
+To run godoc locally, using the app-engine emulator, run
+
+	<path to google_appengine>/dev_appserver.py $APPDIR
+
+godoc should come up at http://localhost:8080 .
diff --git a/cmd/godoc/appinit.go b/cmd/godoc/appinit.go
new file mode 100644
index 0000000..996b2b8
--- /dev/null
+++ b/cmd/godoc/appinit.go
@@ -0,0 +1,69 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package main
+
+// This file replaces main.go when running godoc under app-engine.
+// See README.godoc-app for details.
+
+import (
+	"archive/zip"
+	"log"
+	"net/http"
+	"path"
+)
+
+func serveError(w http.ResponseWriter, r *http.Request, relpath string, err error) {
+	w.WriteHeader(http.StatusNotFound)
+	servePage(w, Page{
+		Title:    "File " + relpath,
+		Subtitle: relpath,
+		Body:     applyTemplate(errorHTML, "errorHTML", err), // err may contain an absolute path!
+	})
+}
+
+func init() {
+	log.Println("initializing godoc ...")
+	log.Printf(".zip file   = %s", zipFilename)
+	log.Printf(".zip GOROOT = %s", zipGoroot)
+	log.Printf("index files = %s", indexFilenames)
+
+	// initialize flags for app engine
+	*goroot = path.Join("/", zipGoroot) // fsHttp paths are relative to '/'
+	*indexEnabled = true
+	*indexFiles = indexFilenames
+	*maxResults = 100    // reduce latency by limiting the number of fulltext search results
+	*indexThrottle = 0.3 // in case *indexFiles is empty (and thus the indexer is run)
+	*showPlayground = true
+
+	// read .zip file and set up file systems
+	const zipfile = zipFilename
+	rc, err := zip.OpenReader(zipfile)
+	if err != nil {
+		log.Fatalf("%s: %s\n", zipfile, err)
+	}
+	// rc is never closed (app running forever)
+	fs.Bind("/", NewZipFS(rc, zipFilename), *goroot, bindReplace)
+
+	// initialize http handlers
+	readTemplates()
+	initHandlers()
+	registerPublicHandlers(http.DefaultServeMux)
+	registerPlaygroundHandlers(http.DefaultServeMux)
+
+	// initialize default directory tree with corresponding timestamp.
+	initFSTree()
+
+	// Immediately update metadata.
+	updateMetadata()
+
+	// initialize search index
+	if *indexEnabled {
+		go indexer()
+	}
+
+	log.Println("godoc initialization complete")
+}
diff --git a/cmd/godoc/codewalk.go b/cmd/godoc/codewalk.go
new file mode 100644
index 0000000..e68c0fa
--- /dev/null
+++ b/cmd/godoc/codewalk.go
@@ -0,0 +1,494 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The /doc/codewalk/ tree is synthesized from codewalk descriptions,
+// files named $GOROOT/doc/codewalk/*.xml.
+// For an example and a description of the format, see
+// http://golang.org/doc/codewalk/codewalk or run godoc -http=:6060
+// and see http://localhost:6060/doc/codewalk/codewalk .
+// That page is itself a codewalk; the source code for it is
+// $GOROOT/doc/codewalk/codewalk.xml.
+
+package main
+
+import (
+	"encoding/xml"
+	"errors"
+	"fmt"
+	"io"
+	"log"
+	"net/http"
+	"os"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+	"text/template"
+	"unicode/utf8"
+)
+
+// Handler for /doc/codewalk/ and below.
+func codewalk(w http.ResponseWriter, r *http.Request) {
+	relpath := r.URL.Path[len("/doc/codewalk/"):]
+	abspath := r.URL.Path
+
+	r.ParseForm()
+	if f := r.FormValue("fileprint"); f != "" {
+		codewalkFileprint(w, r, f)
+		return
+	}
+
+	// If directory exists, serve list of code walks.
+	dir, err := fs.Lstat(abspath)
+	if err == nil && dir.IsDir() {
+		codewalkDir(w, r, relpath, abspath)
+		return
+	}
+
+	// If file exists, serve using standard file server.
+	if err == nil {
+		serveFile(w, r)
+		return
+	}
+
+	// Otherwise append .xml and hope to find
+	// a codewalk description, but before trim
+	// the trailing /.
+	abspath = strings.TrimRight(abspath, "/")
+	cw, err := loadCodewalk(abspath + ".xml")
+	if err != nil {
+		log.Print(err)
+		serveError(w, r, relpath, err)
+		return
+	}
+
+	// Canonicalize the path and redirect if changed
+	if redirect(w, r) {
+		return
+	}
+
+	servePage(w, Page{
+		Title:    "Codewalk: " + cw.Title,
+		Tabtitle: cw.Title,
+		Body:     applyTemplate(codewalkHTML, "codewalk", cw),
+	})
+}
+
+// A Codewalk represents a single codewalk read from an XML file.
+type Codewalk struct {
+	Title string      `xml:"title,attr"`
+	File  []string    `xml:"file"`
+	Step  []*Codestep `xml:"step"`
+}
+
+// A Codestep is a single step in a codewalk.
+type Codestep struct {
+	// Filled in from XML
+	Src   string `xml:"src,attr"`
+	Title string `xml:"title,attr"`
+	XML   string `xml:",innerxml"`
+
+	// Derived from Src; not in XML.
+	Err    error
+	File   string
+	Lo     int
+	LoByte int
+	Hi     int
+	HiByte int
+	Data   []byte
+}
+
+// String method for printing in template.
+// Formats file address nicely.
+func (st *Codestep) String() string {
+	s := st.File
+	if st.Lo != 0 || st.Hi != 0 {
+		s += fmt.Sprintf(":%d", st.Lo)
+		if st.Lo != st.Hi {
+			s += fmt.Sprintf(",%d", st.Hi)
+		}
+	}
+	return s
+}
+
+// loadCodewalk reads a codewalk from the named XML file.
+func loadCodewalk(filename string) (*Codewalk, error) {
+	f, err := fs.Open(filename)
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+	cw := new(Codewalk)
+	d := xml.NewDecoder(f)
+	d.Entity = xml.HTMLEntity
+	err = d.Decode(cw)
+	if err != nil {
+		return nil, &os.PathError{Op: "parsing", Path: filename, Err: err}
+	}
+
+	// Compute file list, evaluate line numbers for addresses.
+	m := make(map[string]bool)
+	for _, st := range cw.Step {
+		i := strings.Index(st.Src, ":")
+		if i < 0 {
+			i = len(st.Src)
+		}
+		filename := st.Src[0:i]
+		data, err := ReadFile(fs, filename)
+		if err != nil {
+			st.Err = err
+			continue
+		}
+		if i < len(st.Src) {
+			lo, hi, err := addrToByteRange(st.Src[i+1:], 0, data)
+			if err != nil {
+				st.Err = err
+				continue
+			}
+			// Expand match to line boundaries.
+			for lo > 0 && data[lo-1] != '\n' {
+				lo--
+			}
+			for hi < len(data) && (hi == 0 || data[hi-1] != '\n') {
+				hi++
+			}
+			st.Lo = byteToLine(data, lo)
+			st.Hi = byteToLine(data, hi-1)
+		}
+		st.Data = data
+		st.File = filename
+		m[filename] = true
+	}
+
+	// Make list of files
+	cw.File = make([]string, len(m))
+	i := 0
+	for f := range m {
+		cw.File[i] = f
+		i++
+	}
+	sort.Strings(cw.File)
+
+	return cw, nil
+}
+
+// codewalkDir serves the codewalk directory listing.
+// It scans the directory for subdirectories or files named *.xml
+// and prepares a table.
+func codewalkDir(w http.ResponseWriter, r *http.Request, relpath, abspath string) {
+	type elem struct {
+		Name  string
+		Title string
+	}
+
+	dir, err := fs.ReadDir(abspath)
+	if err != nil {
+		log.Print(err)
+		serveError(w, r, relpath, err)
+		return
+	}
+	var v []interface{}
+	for _, fi := range dir {
+		name := fi.Name()
+		if fi.IsDir() {
+			v = append(v, &elem{name + "/", ""})
+		} else if strings.HasSuffix(name, ".xml") {
+			cw, err := loadCodewalk(abspath + "/" + name)
+			if err != nil {
+				continue
+			}
+			v = append(v, &elem{name[0 : len(name)-len(".xml")], cw.Title})
+		}
+	}
+
+	servePage(w, Page{
+		Title: "Codewalks",
+		Body:  applyTemplate(codewalkdirHTML, "codewalkdir", v),
+	})
+}
+
+// codewalkFileprint serves requests with ?fileprint=f&lo=lo&hi=hi.
+// The filename f has already been retrieved and is passed as an argument.
+// Lo and hi are the numbers of the first and last line to highlight
+// in the response.  This format is used for the middle window pane
+// of the codewalk pages.  It is a separate iframe and does not get
+// the usual godoc HTML wrapper.
+func codewalkFileprint(w http.ResponseWriter, r *http.Request, f string) {
+	abspath := f
+	data, err := ReadFile(fs, abspath)
+	if err != nil {
+		log.Print(err)
+		serveError(w, r, f, err)
+		return
+	}
+	lo, _ := strconv.Atoi(r.FormValue("lo"))
+	hi, _ := strconv.Atoi(r.FormValue("hi"))
+	if hi < lo {
+		hi = lo
+	}
+	lo = lineToByte(data, lo)
+	hi = lineToByte(data, hi+1)
+
+	// Put the mark 4 lines before lo, so that the iframe
+	// shows a few lines of context before the highlighted
+	// section.
+	n := 4
+	mark := lo
+	for ; mark > 0 && n > 0; mark-- {
+		if data[mark-1] == '\n' {
+			if n--; n == 0 {
+				break
+			}
+		}
+	}
+
+	io.WriteString(w, `<style type="text/css">@import "/doc/codewalk/codewalk.css";</style><pre>`)
+	template.HTMLEscape(w, data[0:mark])
+	io.WriteString(w, "<a name='mark'></a>")
+	template.HTMLEscape(w, data[mark:lo])
+	if lo < hi {
+		io.WriteString(w, "<div class='codewalkhighlight'>")
+		template.HTMLEscape(w, data[lo:hi])
+		io.WriteString(w, "</div>")
+	}
+	template.HTMLEscape(w, data[hi:])
+	io.WriteString(w, "</pre>")
+}
+
+// addrToByte evaluates the given address starting at offset start in data.
+// It returns the lo and hi byte offset of the matched region within data.
+// See http://plan9.bell-labs.com/sys/doc/sam/sam.html Table II
+// for details on the syntax.
+func addrToByteRange(addr string, start int, data []byte) (lo, hi int, err error) {
+	var (
+		dir        byte
+		prevc      byte
+		charOffset bool
+	)
+	lo = start
+	hi = start
+	for addr != "" && err == nil {
+		c := addr[0]
+		switch c {
+		default:
+			err = errors.New("invalid address syntax near " + string(c))
+		case ',':
+			if len(addr) == 1 {
+				hi = len(data)
+			} else {
+				_, hi, err = addrToByteRange(addr[1:], hi, data)
+			}
+			return
+
+		case '+', '-':
+			if prevc == '+' || prevc == '-' {
+				lo, hi, err = addrNumber(data, lo, hi, prevc, 1, charOffset)
+			}
+			dir = c
+
+		case '$':
+			lo = len(data)
+			hi = len(data)
+			if len(addr) > 1 {
+				dir = '+'
+			}
+
+		case '#':
+			charOffset = true
+
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			var i int
+			for i = 1; i < len(addr); i++ {
+				if addr[i] < '0' || addr[i] > '9' {
+					break
+				}
+			}
+			var n int
+			n, err = strconv.Atoi(addr[0:i])
+			if err != nil {
+				break
+			}
+			lo, hi, err = addrNumber(data, lo, hi, dir, n, charOffset)
+			dir = 0
+			charOffset = false
+			prevc = c
+			addr = addr[i:]
+			continue
+
+		case '/':
+			var i, j int
+		Regexp:
+			for i = 1; i < len(addr); i++ {
+				switch addr[i] {
+				case '\\':
+					i++
+				case '/':
+					j = i + 1
+					break Regexp
+				}
+			}
+			if j == 0 {
+				j = i
+			}
+			pattern := addr[1:i]
+			lo, hi, err = addrRegexp(data, lo, hi, dir, pattern)
+			prevc = c
+			addr = addr[j:]
+			continue
+		}
+		prevc = c
+		addr = addr[1:]
+	}
+
+	if err == nil && dir != 0 {
+		lo, hi, err = addrNumber(data, lo, hi, dir, 1, charOffset)
+	}
+	if err != nil {
+		return 0, 0, err
+	}
+	return lo, hi, nil
+}
+
+// addrNumber applies the given dir, n, and charOffset to the address lo, hi.
+// dir is '+' or '-', n is the count, and charOffset is true if the syntax
+// used was #n.  Applying +n (or +#n) means to advance n lines
+// (or characters) after hi.  Applying -n (or -#n) means to back up n lines
+// (or characters) before lo.
+// The return value is the new lo, hi.
+func addrNumber(data []byte, lo, hi int, dir byte, n int, charOffset bool) (int, int, error) {
+	switch dir {
+	case 0:
+		lo = 0
+		hi = 0
+		fallthrough
+
+	case '+':
+		if charOffset {
+			pos := hi
+			for ; n > 0 && pos < len(data); n-- {
+				_, size := utf8.DecodeRune(data[pos:])
+				pos += size
+			}
+			if n == 0 {
+				return pos, pos, nil
+			}
+			break
+		}
+		// find next beginning of line
+		if hi > 0 {
+			for hi < len(data) && data[hi-1] != '\n' {
+				hi++
+			}
+		}
+		lo = hi
+		if n == 0 {
+			return lo, hi, nil
+		}
+		for ; hi < len(data); hi++ {
+			if data[hi] != '\n' {
+				continue
+			}
+			switch n--; n {
+			case 1:
+				lo = hi + 1
+			case 0:
+				return lo, hi + 1, nil
+			}
+		}
+
+	case '-':
+		if charOffset {
+			// Scan backward for bytes that are not UTF-8 continuation bytes.
+			pos := lo
+			for ; pos > 0 && n > 0; pos-- {
+				if data[pos]&0xc0 != 0x80 {
+					n--
+				}
+			}
+			if n == 0 {
+				return pos, pos, nil
+			}
+			break
+		}
+		// find earlier beginning of line
+		for lo > 0 && data[lo-1] != '\n' {
+			lo--
+		}
+		hi = lo
+		if n == 0 {
+			return lo, hi, nil
+		}
+		for ; lo >= 0; lo-- {
+			if lo > 0 && data[lo-1] != '\n' {
+				continue
+			}
+			switch n--; n {
+			case 1:
+				hi = lo
+			case 0:
+				return lo, hi, nil
+			}
+		}
+	}
+
+	return 0, 0, errors.New("address out of range")
+}
+
+// addrRegexp searches for pattern in the given direction starting at lo, hi.
+// The direction dir is '+' (search forward from hi) or '-' (search backward from lo).
+// Backward searches are unimplemented.
+func addrRegexp(data []byte, lo, hi int, dir byte, pattern string) (int, int, error) {
+	re, err := regexp.Compile(pattern)
+	if err != nil {
+		return 0, 0, err
+	}
+	if dir == '-' {
+		// Could implement reverse search using binary search
+		// through file, but that seems like overkill.
+		return 0, 0, errors.New("reverse search not implemented")
+	}
+	m := re.FindIndex(data[hi:])
+	if len(m) > 0 {
+		m[0] += hi
+		m[1] += hi
+	} else if hi > 0 {
+		// No match.  Wrap to beginning of data.
+		m = re.FindIndex(data)
+	}
+	if len(m) == 0 {
+		return 0, 0, errors.New("no match for " + pattern)
+	}
+	return m[0], m[1], nil
+}
+
+// lineToByte returns the byte index of the first byte of line n.
+// Line numbers begin at 1.
+func lineToByte(data []byte, n int) int {
+	if n <= 1 {
+		return 0
+	}
+	n--
+	for i, c := range data {
+		if c == '\n' {
+			if n--; n == 0 {
+				return i + 1
+			}
+		}
+	}
+	return len(data)
+}
+
+// byteToLine returns the number of the line containing the byte at index i.
+func byteToLine(data []byte, i int) int {
+	l := 1
+	for j, c := range data {
+		if j == i {
+			return l
+		}
+		if c == '\n' {
+			l++
+		}
+	}
+	return l
+}
diff --git a/cmd/godoc/dirtrees.go b/cmd/godoc/dirtrees.go
new file mode 100644
index 0000000..fda7adc
--- /dev/null
+++ b/cmd/godoc/dirtrees.go
@@ -0,0 +1,320 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains the code dealing with package directory trees.
+
+package main
+
+import (
+	"bytes"
+	"go/doc"
+	"go/parser"
+	"go/token"
+	"log"
+	"os"
+	pathpkg "path"
+	"strings"
+)
+
+// Conventional name for directories containing test data.
+// Excluded from directory trees.
+//
+const testdataDirName = "testdata"
+
+type Directory struct {
+	Depth    int
+	Path     string       // directory path; includes Name
+	Name     string       // directory name
+	HasPkg   bool         // true if the directory contains at least one package
+	Synopsis string       // package documentation, if any
+	Dirs     []*Directory // subdirectories
+}
+
+func isGoFile(fi os.FileInfo) bool {
+	name := fi.Name()
+	return !fi.IsDir() &&
+		len(name) > 0 && name[0] != '.' && // ignore .files
+		pathpkg.Ext(name) == ".go"
+}
+
+func isPkgFile(fi os.FileInfo) bool {
+	return isGoFile(fi) &&
+		!strings.HasSuffix(fi.Name(), "_test.go") // ignore test files
+}
+
+func isPkgDir(fi os.FileInfo) bool {
+	name := fi.Name()
+	return fi.IsDir() && len(name) > 0 &&
+		name[0] != '_' && name[0] != '.' // ignore _files and .files
+}
+
+type treeBuilder struct {
+	maxDepth int
+}
+
+func (b *treeBuilder) newDirTree(fset *token.FileSet, path, name string, depth int) *Directory {
+	if name == testdataDirName {
+		return nil
+	}
+
+	if depth >= b.maxDepth {
+		// return a dummy directory so that the parent directory
+		// doesn't get discarded just because we reached the max
+		// directory depth
+		return &Directory{
+			Depth: depth,
+			Path:  path,
+			Name:  name,
+		}
+	}
+
+	list, _ := fs.ReadDir(path)
+
+	// determine number of subdirectories and if there are package files
+	ndirs := 0
+	hasPkgFiles := false
+	var synopses [3]string // prioritized package documentation (0 == highest priority)
+	for _, d := range list {
+		switch {
+		case isPkgDir(d):
+			ndirs++
+		case isPkgFile(d):
+			// looks like a package file, but may just be a file ending in ".go";
+			// don't just count it yet (otherwise we may end up with hasPkgFiles even
+			// though the directory doesn't contain any real package files - was bug)
+			if synopses[0] == "" {
+				// no "optimal" package synopsis yet; continue to collect synopses
+				file, err := parseFile(fset, pathpkg.Join(path, d.Name()),
+					parser.ParseComments|parser.PackageClauseOnly)
+				if err == nil {
+					hasPkgFiles = true
+					if file.Doc != nil {
+						// prioritize documentation
+						i := -1
+						switch file.Name.Name {
+						case name:
+							i = 0 // normal case: directory name matches package name
+						case "main":
+							i = 1 // directory contains a main package
+						default:
+							i = 2 // none of the above
+						}
+						if 0 <= i && i < len(synopses) && synopses[i] == "" {
+							synopses[i] = doc.Synopsis(file.Doc.Text())
+						}
+					}
+				}
+			}
+		}
+	}
+
+	// create subdirectory tree
+	var dirs []*Directory
+	if ndirs > 0 {
+		dirs = make([]*Directory, ndirs)
+		i := 0
+		for _, d := range list {
+			if isPkgDir(d) {
+				name := d.Name()
+				dd := b.newDirTree(fset, pathpkg.Join(path, name), name, depth+1)
+				if dd != nil {
+					dirs[i] = dd
+					i++
+				}
+			}
+		}
+		dirs = dirs[0:i]
+	}
+
+	// if there are no package files and no subdirectories
+	// containing package files, ignore the directory
+	if !hasPkgFiles && len(dirs) == 0 {
+		return nil
+	}
+
+	// select the highest-priority synopsis for the directory entry, if any
+	synopsis := ""
+	for _, synopsis = range synopses {
+		if synopsis != "" {
+			break
+		}
+	}
+
+	return &Directory{
+		Depth:    depth,
+		Path:     path,
+		Name:     name,
+		HasPkg:   hasPkgFiles,
+		Synopsis: synopsis,
+		Dirs:     dirs,
+	}
+}
+
+// newDirectory creates a new package directory tree with at most maxDepth
+// levels, anchored at root. The result tree is pruned such that it only
+// contains directories that contain package files or that contain
+// subdirectories containing package files (transitively). If a non-nil
+// pathFilter is provided, directory paths additionally must be accepted
+// by the filter (i.e., pathFilter(path) must be true). If a value >= 0 is
+// provided for maxDepth, nodes at larger depths are pruned as well; they
+// are assumed to contain package files even if their contents are not known
+// (i.e., in this case the tree may contain directories w/o any package files).
+//
+func newDirectory(root string, maxDepth int) *Directory {
+	// The root could be a symbolic link so use Stat not Lstat.
+	d, err := fs.Stat(root)
+	// If we fail here, report detailed error messages; otherwise
+	// is is hard to see why a directory tree was not built.
+	switch {
+	case err != nil:
+		log.Printf("newDirectory(%s): %s", root, err)
+		return nil
+	case !isPkgDir(d):
+		log.Printf("newDirectory(%s): not a package directory", root)
+		return nil
+	}
+	if maxDepth < 0 {
+		maxDepth = 1e6 // "infinity"
+	}
+	b := treeBuilder{maxDepth}
+	// the file set provided is only for local parsing, no position
+	// information escapes and thus we don't need to save the set
+	return b.newDirTree(token.NewFileSet(), root, d.Name(), 0)
+}
+
+func (dir *Directory) writeLeafs(buf *bytes.Buffer) {
+	if dir != nil {
+		if len(dir.Dirs) == 0 {
+			buf.WriteString(dir.Path)
+			buf.WriteByte('\n')
+			return
+		}
+
+		for _, d := range dir.Dirs {
+			d.writeLeafs(buf)
+		}
+	}
+}
+
+func (dir *Directory) walk(c chan<- *Directory, skipRoot bool) {
+	if dir != nil {
+		if !skipRoot {
+			c <- dir
+		}
+		for _, d := range dir.Dirs {
+			d.walk(c, false)
+		}
+	}
+}
+
+func (dir *Directory) iter(skipRoot bool) <-chan *Directory {
+	c := make(chan *Directory)
+	go func() {
+		dir.walk(c, skipRoot)
+		close(c)
+	}()
+	return c
+}
+
+func (dir *Directory) lookupLocal(name string) *Directory {
+	for _, d := range dir.Dirs {
+		if d.Name == name {
+			return d
+		}
+	}
+	return nil
+}
+
+func splitPath(p string) []string {
+	p = strings.TrimPrefix(p, "/")
+	if p == "" {
+		return nil
+	}
+	return strings.Split(p, "/")
+}
+
+// lookup looks for the *Directory for a given path, relative to dir.
+func (dir *Directory) lookup(path string) *Directory {
+	d := splitPath(dir.Path)
+	p := splitPath(path)
+	i := 0
+	for i < len(d) {
+		if i >= len(p) || d[i] != p[i] {
+			return nil
+		}
+		i++
+	}
+	for dir != nil && i < len(p) {
+		dir = dir.lookupLocal(p[i])
+		i++
+	}
+	return dir
+}
+
+// DirEntry describes a directory entry. The Depth and Height values
+// are useful for presenting an entry in an indented fashion.
+//
+type DirEntry struct {
+	Depth    int    // >= 0
+	Height   int    // = DirList.MaxHeight - Depth, > 0
+	Path     string // directory path; includes Name, relative to DirList root
+	Name     string // directory name
+	HasPkg   bool   // true if the directory contains at least one package
+	Synopsis string // package documentation, if any
+}
+
+type DirList struct {
+	MaxHeight int // directory tree height, > 0
+	List      []DirEntry
+}
+
+// listing creates a (linear) directory listing from a directory tree.
+// If skipRoot is set, the root directory itself is excluded from the list.
+//
+func (root *Directory) listing(skipRoot bool) *DirList {
+	if root == nil {
+		return nil
+	}
+
+	// determine number of entries n and maximum height
+	n := 0
+	minDepth := 1 << 30 // infinity
+	maxDepth := 0
+	for d := range root.iter(skipRoot) {
+		n++
+		if minDepth > d.Depth {
+			minDepth = d.Depth
+		}
+		if maxDepth < d.Depth {
+			maxDepth = d.Depth
+		}
+	}
+	maxHeight := maxDepth - minDepth + 1
+
+	if n == 0 {
+		return nil
+	}
+
+	// create list
+	list := make([]DirEntry, n)
+	i := 0
+	for d := range root.iter(skipRoot) {
+		p := &list[i]
+		p.Depth = d.Depth - minDepth
+		p.Height = maxHeight - p.Depth
+		// the path is relative to root.Path - remove the root.Path
+		// prefix (the prefix should always be present but avoid
+		// crashes and check)
+		path := strings.TrimPrefix(d.Path, root.Path)
+		// remove leading separator if any - path must be relative
+		path = strings.TrimPrefix(path, "/")
+		p.Path = path
+		p.Name = d.Name
+		p.HasPkg = d.HasPkg
+		p.Synopsis = d.Synopsis
+		i++
+	}
+
+	return &DirList{maxHeight, list}
+}
diff --git a/cmd/godoc/doc.go b/cmd/godoc/doc.go
new file mode 100644
index 0000000..1fa57a8
--- /dev/null
+++ b/cmd/godoc/doc.go
@@ -0,0 +1,135 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+
+Godoc extracts and generates documentation for Go programs.
+
+It has two modes.
+
+Without the -http flag, it runs in command-line mode and prints plain text
+documentation to standard output and exits. If both a library package and
+a command with the same name exists, using the prefix cmd/ will force
+documentation on the command rather than the library package. If the -src
+flag is specified, godoc prints the exported interface of a package in Go
+source form, or the implementation of a specific exported language entity:
+
+	godoc fmt                # documentation for package fmt
+	godoc fmt Printf         # documentation for fmt.Printf
+	godoc cmd/go             # force documentation for the go command
+	godoc -src fmt           # fmt package interface in Go source form
+	godoc -src fmt Printf    # implementation of fmt.Printf
+
+In command-line mode, the -q flag enables search queries against a godoc running
+as a webserver. If no explicit server address is specified with the -server flag,
+godoc first tries localhost:6060 and then http://golang.org.
+
+	godoc -q Reader
+	godoc -q math.Sin
+	godoc -server=:6060 -q sin
+
+With the -http flag, it runs as a web server and presents the documentation as a
+web page.
+
+	godoc -http=:6060
+
+Usage:
+	godoc [flag] package [name ...]
+
+The flags are:
+	-v
+		verbose mode
+	-q
+		arguments are considered search queries: a legal query is a
+		single identifier (such as ToLower) or a qualified identifier
+		(such as math.Sin).
+	-src
+		print (exported) source in command-line mode
+	-tabwidth=4
+		width of tabs in units of spaces
+	-timestamps=true
+		show timestamps with directory listings
+	-index
+		enable identifier and full text search index
+		(no search box is shown if -index is not set)
+	-index_files=""
+		glob pattern specifying index files; if not empty,
+		the index is read from these files in sorted order
+	-index_throttle=0.75
+		index throttle value; a value of 0 means no time is allocated
+		to the indexer (the indexer will never finish), a value of 1.0
+		means that index creation is running at full throttle (other
+		goroutines may get no time while the index is built)
+	-links=true:
+		link identifiers to their declarations
+	-write_index=false
+		write index to a file; the file name must be specified with
+		-index_files
+	-maxresults=10000
+		maximum number of full text search results shown
+		(no full text index is built if maxresults <= 0)
+	-notes="BUG"
+		regular expression matching note markers to show
+		(e.g., "BUG|TODO", ".*")
+	-html
+		print HTML in command-line mode
+	-goroot=$GOROOT
+		Go root directory
+	-http=addr
+		HTTP service address (e.g., '127.0.0.1:6060' or just ':6060')
+	-server=addr
+		webserver address for command line searches
+	-templates=""
+		directory containing alternate template files; if set,
+		the directory may provide alternative template files
+		for the files in $GOROOT/lib/godoc
+	-url=path
+		print to standard output the data that would be served by
+		an HTTP request for path
+	-zip=""
+		zip file providing the file system to serve; disabled if empty
+
+By default, godoc looks at the packages it finds via $GOROOT and $GOPATH (if set).
+This behavior can be altered by providing an alternative $GOROOT with the -goroot
+flag.
+
+When godoc runs as a web server and -index is set, a search index is maintained.
+The index is created at startup.
+
+The index contains both identifier and full text search information (searchable
+via regular expressions). The maximum number of full text search results shown
+can be set with the -maxresults flag; if set to 0, no full text results are
+shown, and only an identifier index but no full text search index is created.
+
+The presentation mode of web pages served by godoc can be controlled with the
+"m" URL parameter; it accepts a comma-separated list of flag names as value:
+
+	all	show documentation for all declarations, not just the exported ones
+	methods	show all embedded methods, not just those of unexported anonymous fields
+	src	show the original source code rather then the extracted documentation
+	text	present the page in textual (command-line) form rather than HTML
+	flat	present flat (not indented) directory listings using full paths
+
+For instance, http://golang.org/pkg/math/big/?m=all,text shows the documentation
+for all (not just the exported) declarations of package big, in textual form (as
+it would appear when using godoc from the command line: "godoc -src math/big .*").
+
+By default, godoc serves files from the file system of the underlying OS.
+Instead, a .zip file may be provided via the -zip flag, which contains
+the file system to serve. The file paths stored in the .zip file must use
+slash ('/') as path separator; and they must be unrooted. $GOROOT (or -goroot)
+must be set to the .zip file directory path containing the Go root directory.
+For instance, for a .zip file created by the command:
+
+	zip go.zip $HOME/go
+
+one may run godoc as follows:
+
+	godoc -http=:6060 -zip=go.zip -goroot=$HOME/go
+
+See "Godoc: documenting Go code" for how to write good comments for godoc:
+http://golang.org/doc/articles/godoc_documenting_go_code.html
+
+*/
+package main
diff --git a/cmd/godoc/filesystem.go b/cmd/godoc/filesystem.go
new file mode 100644
index 0000000..0309d7c
--- /dev/null
+++ b/cmd/godoc/filesystem.go
@@ -0,0 +1,562 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file defines types for abstract file system access and
+// provides an implementation accessing the file system of the
+// underlying OS.
+
+package main
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"os"
+	pathpkg "path"
+	"path/filepath"
+	"sort"
+	"strings"
+	"time"
+)
+
+// fs is the file system that godoc reads from and serves.
+// It is a virtual file system that operates on slash-separated paths,
+// and its root corresponds to the Go distribution root: /src/pkg
+// holds the source tree, and so on.  This means that the URLs served by
+// the godoc server are the same as the paths in the virtual file
+// system, which helps keep things simple.
+//
+// New file trees - implementations of FileSystem - can be added to
+// the virtual file system using nameSpace's Bind method.
+// The usual setup is to bind OS(runtime.GOROOT) to the root
+// of the name space and then bind any GOPATH/src directories
+// on top of /src/pkg, so that all sources are in /src/pkg.
+//
+// For more about name spaces, see the nameSpace type's
+// documentation below.
+//
+// The use of this virtual file system means that most code processing
+// paths can assume they are slash-separated and should be using
+// package path (often imported as pathpkg) to manipulate them,
+// even on Windows.
+//
+var fs = nameSpace{} // the underlying file system for godoc
+
+// Setting debugNS = true will enable debugging prints about
+// name space translations.
+const debugNS = false
+
+// The FileSystem interface specifies the methods godoc is using
+// to access the file system for which it serves documentation.
+type FileSystem interface {
+	Open(path string) (readSeekCloser, error)
+	Lstat(path string) (os.FileInfo, error)
+	Stat(path string) (os.FileInfo, error)
+	ReadDir(path string) ([]os.FileInfo, error)
+	String() string
+}
+
+type readSeekCloser interface {
+	io.Reader
+	io.Seeker
+	io.Closer
+}
+
+// ReadFile reads the file named by path from fs and returns the contents.
+func ReadFile(fs FileSystem, path string) ([]byte, error) {
+	rc, err := fs.Open(path)
+	if err != nil {
+		return nil, err
+	}
+	defer rc.Close()
+	return ioutil.ReadAll(rc)
+}
+
+// OS returns an implementation of FileSystem reading from the
+// tree rooted at root.  Recording a root is convenient everywhere
+// but necessary on Windows, because the slash-separated path
+// passed to Open has no way to specify a drive letter.  Using a root
+// lets code refer to OS(`c:\`), OS(`d:\`) and so on.
+func OS(root string) FileSystem {
+	return osFS(root)
+}
+
+type osFS string
+
+func (root osFS) String() string { return "os(" + string(root) + ")" }
+
+func (root osFS) resolve(path string) string {
+	// Clean the path so that it cannot possibly begin with ../.
+	// If it did, the result of filepath.Join would be outside the
+	// tree rooted at root.  We probably won't ever see a path
+	// with .. in it, but be safe anyway.
+	path = pathpkg.Clean("/" + path)
+
+	return filepath.Join(string(root), path)
+}
+
+func (root osFS) Open(path string) (readSeekCloser, error) {
+	f, err := os.Open(root.resolve(path))
+	if err != nil {
+		return nil, err
+	}
+	fi, err := f.Stat()
+	if err != nil {
+		return nil, err
+	}
+	if fi.IsDir() {
+		return nil, fmt.Errorf("Open: %s is a directory", path)
+	}
+	return f, nil
+}
+
+func (root osFS) Lstat(path string) (os.FileInfo, error) {
+	return os.Lstat(root.resolve(path))
+}
+
+func (root osFS) Stat(path string) (os.FileInfo, error) {
+	return os.Stat(root.resolve(path))
+}
+
+func (root osFS) ReadDir(path string) ([]os.FileInfo, error) {
+	return ioutil.ReadDir(root.resolve(path)) // is sorted
+}
+
+// hasPathPrefix returns true if x == y or x == y + "/" + more
+func hasPathPrefix(x, y string) bool {
+	return x == y || strings.HasPrefix(x, y) && (strings.HasSuffix(y, "/") || strings.HasPrefix(x[len(y):], "/"))
+}
+
+// A nameSpace is a file system made up of other file systems
+// mounted at specific locations in the name space.
+//
+// The representation is a map from mount point locations
+// to the list of file systems mounted at that location.  A traditional
+// Unix mount table would use a single file system per mount point,
+// but we want to be able to mount multiple file systems on a single
+// mount point and have the system behave as if the union of those
+// file systems were present at the mount point.
+// For example, if the OS file system has a Go installation in
+// c:\Go and additional Go path trees in  d:\Work1 and d:\Work2, then
+// this name space creates the view we want for the godoc server:
+//
+//	nameSpace{
+//		"/": {
+//			{old: "/", fs: OS(`c:\Go`), new: "/"},
+//		},
+//		"/src/pkg": {
+//			{old: "/src/pkg", fs: OS(`c:\Go`), new: "/src/pkg"},
+//			{old: "/src/pkg", fs: OS(`d:\Work1`), new: "/src"},
+//			{old: "/src/pkg", fs: OS(`d:\Work2`), new: "/src"},
+//		},
+//	}
+//
+// This is created by executing:
+//
+//	ns := nameSpace{}
+//	ns.Bind("/", OS(`c:\Go`), "/", bindReplace)
+//	ns.Bind("/src/pkg", OS(`d:\Work1`), "/src", bindAfter)
+//	ns.Bind("/src/pkg", OS(`d:\Work2`), "/src", bindAfter)
+//
+// A particular mount point entry is a triple (old, fs, new), meaning that to
+// operate on a path beginning with old, replace that prefix (old) with new
+// and then pass that path to the FileSystem implementation fs.
+//
+// Given this name space, a ReadDir of /src/pkg/code will check each prefix
+// of the path for a mount point (first /src/pkg/code, then /src/pkg, then /src,
+// then /), stopping when it finds one.  For the above example, /src/pkg/code
+// will find the mount point at /src/pkg:
+//
+//	{old: "/src/pkg", fs: OS(`c:\Go`), new: "/src/pkg"},
+//	{old: "/src/pkg", fs: OS(`d:\Work1`), new: "/src"},
+//	{old: "/src/pkg", fs: OS(`d:\Work2`), new: "/src"},
+//
+// ReadDir will when execute these three calls and merge the results:
+//
+//	OS(`c:\Go`).ReadDir("/src/pkg/code")
+//	OS(`d:\Work1').ReadDir("/src/code")
+//	OS(`d:\Work2').ReadDir("/src/code")
+//
+// Note that the "/src/pkg" in "/src/pkg/code" has been replaced by
+// just "/src" in the final two calls.
+//
+// OS is itself an implementation of a file system: it implements
+// OS(`c:\Go`).ReadDir("/src/pkg/code") as ioutil.ReadDir(`c:\Go\src\pkg\code`).
+//
+// Because the new path is evaluated by fs (here OS(root)), another way
+// to read the mount table is to mentally combine fs+new, so that this table:
+//
+//	{old: "/src/pkg", fs: OS(`c:\Go`), new: "/src/pkg"},
+//	{old: "/src/pkg", fs: OS(`d:\Work1`), new: "/src"},
+//	{old: "/src/pkg", fs: OS(`d:\Work2`), new: "/src"},
+//
+// reads as:
+//
+//	"/src/pkg" -> c:\Go\src\pkg
+//	"/src/pkg" -> d:\Work1\src
+//	"/src/pkg" -> d:\Work2\src
+//
+// An invariant (a redundancy) of the name space representation is that
+// ns[mtpt][i].old is always equal to mtpt (in the example, ns["/src/pkg"]'s
+// mount table entries always have old == "/src/pkg").  The 'old' field is
+// useful to callers, because they receive just a []mountedFS and not any
+// other indication of which mount point was found.
+//
+type nameSpace map[string][]mountedFS
+
+// A mountedFS handles requests for path by replacing
+// a prefix 'old' with 'new' and then calling the fs methods.
+type mountedFS struct {
+	old string
+	fs  FileSystem
+	new string
+}
+
+// translate translates path for use in m, replacing old with new.
+//
+// mountedFS{"/src/pkg", fs, "/src"}.translate("/src/pkg/code") == "/src/code".
+func (m mountedFS) translate(path string) string {
+	path = pathpkg.Clean("/" + path)
+	if !hasPathPrefix(path, m.old) {
+		panic("translate " + path + " but old=" + m.old)
+	}
+	return pathpkg.Join(m.new, path[len(m.old):])
+}
+
+func (nameSpace) String() string {
+	return "ns"
+}
+
+// Fprint writes a text representation of the name space to w.
+func (ns nameSpace) Fprint(w io.Writer) {
+	fmt.Fprint(w, "name space {\n")
+	var all []string
+	for mtpt := range ns {
+		all = append(all, mtpt)
+	}
+	sort.Strings(all)
+	for _, mtpt := range all {
+		fmt.Fprintf(w, "\t%s:\n", mtpt)
+		for _, m := range ns[mtpt] {
+			fmt.Fprintf(w, "\t\t%s %s\n", m.fs, m.new)
+		}
+	}
+	fmt.Fprint(w, "}\n")
+}
+
+// clean returns a cleaned, rooted path for evaluation.
+// It canonicalizes the path so that we can use string operations
+// to analyze it.
+func (nameSpace) clean(path string) string {
+	return pathpkg.Clean("/" + path)
+}
+
+// Bind causes references to old to redirect to the path new in newfs.
+// If mode is bindReplace, old redirections are discarded.
+// If mode is bindBefore, this redirection takes priority over existing ones,
+// but earlier ones are still consulted for paths that do not exist in newfs.
+// If mode is bindAfter, this redirection happens only after existing ones
+// have been tried and failed.
+
+const (
+	bindReplace = iota
+	bindBefore
+	bindAfter
+)
+
+func (ns nameSpace) Bind(old string, newfs FileSystem, new string, mode int) {
+	old = ns.clean(old)
+	new = ns.clean(new)
+	m := mountedFS{old, newfs, new}
+	var mtpt []mountedFS
+	switch mode {
+	case bindReplace:
+		mtpt = append(mtpt, m)
+	case bindAfter:
+		mtpt = append(mtpt, ns.resolve(old)...)
+		mtpt = append(mtpt, m)
+	case bindBefore:
+		mtpt = append(mtpt, m)
+		mtpt = append(mtpt, ns.resolve(old)...)
+	}
+
+	// Extend m.old, m.new in inherited mount point entries.
+	for i := range mtpt {
+		m := &mtpt[i]
+		if m.old != old {
+			if !hasPathPrefix(old, m.old) {
+				// This should not happen.  If it does, panic so
+				// that we can see the call trace that led to it.
+				panic(fmt.Sprintf("invalid Bind: old=%q m={%q, %s, %q}", old, m.old, m.fs.String(), m.new))
+			}
+			suffix := old[len(m.old):]
+			m.old = pathpkg.Join(m.old, suffix)
+			m.new = pathpkg.Join(m.new, suffix)
+		}
+	}
+
+	ns[old] = mtpt
+}
+
+// resolve resolves a path to the list of mountedFS to use for path.
+func (ns nameSpace) resolve(path string) []mountedFS {
+	path = ns.clean(path)
+	for {
+		if m := ns[path]; m != nil {
+			if debugNS {
+				fmt.Printf("resolve %s: %v\n", path, m)
+			}
+			return m
+		}
+		if path == "/" {
+			break
+		}
+		path = pathpkg.Dir(path)
+	}
+	return nil
+}
+
+// Open implements the FileSystem Open method.
+func (ns nameSpace) Open(path string) (readSeekCloser, error) {
+	var err error
+	for _, m := range ns.resolve(path) {
+		if debugNS {
+			fmt.Printf("tx %s: %v\n", path, m.translate(path))
+		}
+		r, err1 := m.fs.Open(m.translate(path))
+		if err1 == nil {
+			return r, nil
+		}
+		if err == nil {
+			err = err1
+		}
+	}
+	if err == nil {
+		err = &os.PathError{Op: "open", Path: path, Err: os.ErrNotExist}
+	}
+	return nil, err
+}
+
+// stat implements the FileSystem Stat and Lstat methods.
+func (ns nameSpace) stat(path string, f func(FileSystem, string) (os.FileInfo, error)) (os.FileInfo, error) {
+	var err error
+	for _, m := range ns.resolve(path) {
+		fi, err1 := f(m.fs, m.translate(path))
+		if err1 == nil {
+			return fi, nil
+		}
+		if err == nil {
+			err = err1
+		}
+	}
+	if err == nil {
+		err = &os.PathError{Op: "stat", Path: path, Err: os.ErrNotExist}
+	}
+	return nil, err
+}
+
+func (ns nameSpace) Stat(path string) (os.FileInfo, error) {
+	return ns.stat(path, FileSystem.Stat)
+}
+
+func (ns nameSpace) Lstat(path string) (os.FileInfo, error) {
+	return ns.stat(path, FileSystem.Lstat)
+}
+
+// dirInfo is a trivial implementation of os.FileInfo for a directory.
+type dirInfo string
+
+func (d dirInfo) Name() string       { return string(d) }
+func (d dirInfo) Size() int64        { return 0 }
+func (d dirInfo) Mode() os.FileMode  { return os.ModeDir | 0555 }
+func (d dirInfo) ModTime() time.Time { return startTime }
+func (d dirInfo) IsDir() bool        { return true }
+func (d dirInfo) Sys() interface{}   { return nil }
+
+var startTime = time.Now()
+
+// ReadDir implements the FileSystem ReadDir method.  It's where most of the magic is.
+// (The rest is in resolve.)
+//
+// Logically, ReadDir must return the union of all the directories that are named
+// by path.  In order to avoid misinterpreting Go packages, of all the directories
+// that contain Go source code, we only include the files from the first,
+// but we include subdirectories from all.
+//
+// ReadDir must also return directory entries needed to reach mount points.
+// If the name space looks like the example in the type nameSpace comment,
+// but c:\Go does not have a src/pkg subdirectory, we still want to be able
+// to find that subdirectory, because we've mounted d:\Work1 and d:\Work2
+// there.  So if we don't see "src" in the directory listing for c:\Go, we add an
+// entry for it before returning.
+//
+func (ns nameSpace) ReadDir(path string) ([]os.FileInfo, error) {
+	path = ns.clean(path)
+
+	var (
+		haveGo   = false
+		haveName = map[string]bool{}
+		all      []os.FileInfo
+		err      error
+		first    []os.FileInfo
+	)
+
+	for _, m := range ns.resolve(path) {
+		dir, err1 := m.fs.ReadDir(m.translate(path))
+		if err1 != nil {
+			if err == nil {
+				err = err1
+			}
+			continue
+		}
+
+		if dir == nil {
+			dir = []os.FileInfo{}
+		}
+
+		if first == nil {
+			first = dir
+		}
+
+		// If we don't yet have Go files in 'all' and this directory
+		// has some, add all the files from this directory.
+		// Otherwise, only add subdirectories.
+		useFiles := false
+		if !haveGo {
+			for _, d := range dir {
+				if strings.HasSuffix(d.Name(), ".go") {
+					useFiles = true
+					haveGo = true
+					break
+				}
+			}
+		}
+
+		for _, d := range dir {
+			name := d.Name()
+			if (d.IsDir() || useFiles) && !haveName[name] {
+				haveName[name] = true
+				all = append(all, d)
+			}
+		}
+	}
+
+	// We didn't find any directories containing Go files.
+	// If some directory returned successfully, use that.
+	if !haveGo {
+		for _, d := range first {
+			if !haveName[d.Name()] {
+				haveName[d.Name()] = true
+				all = append(all, d)
+			}
+		}
+	}
+
+	// Built union.  Add any missing directories needed to reach mount points.
+	for old := range ns {
+		if hasPathPrefix(old, path) && old != path {
+			// Find next element after path in old.
+			elem := old[len(path):]
+			elem = strings.TrimPrefix(elem, "/")
+			if i := strings.Index(elem, "/"); i >= 0 {
+				elem = elem[:i]
+			}
+			if !haveName[elem] {
+				haveName[elem] = true
+				all = append(all, dirInfo(elem))
+			}
+		}
+	}
+
+	if len(all) == 0 {
+		return nil, err
+	}
+
+	sort.Sort(byName(all))
+	return all, nil
+}
+
+// byName implements sort.Interface.
+type byName []os.FileInfo
+
+func (f byName) Len() int           { return len(f) }
+func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() }
+func (f byName) Swap(i, j int)      { f[i], f[j] = f[j], f[i] }
+
+// An httpFS implements http.FileSystem using a FileSystem.
+type httpFS struct {
+	fs FileSystem
+}
+
+func (h *httpFS) Open(name string) (http.File, error) {
+	fi, err := h.fs.Stat(name)
+	if err != nil {
+		return nil, err
+	}
+	if fi.IsDir() {
+		return &httpDir{h.fs, name, nil}, nil
+	}
+	f, err := h.fs.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	return &httpFile{h.fs, f, name}, nil
+}
+
+// httpDir implements http.File for a directory in a FileSystem.
+type httpDir struct {
+	fs      FileSystem
+	name    string
+	pending []os.FileInfo
+}
+
+func (h *httpDir) Close() error               { return nil }
+func (h *httpDir) Stat() (os.FileInfo, error) { return h.fs.Stat(h.name) }
+func (h *httpDir) Read([]byte) (int, error) {
+	return 0, fmt.Errorf("cannot Read from directory %s", h.name)
+}
+
+func (h *httpDir) Seek(offset int64, whence int) (int64, error) {
+	if offset == 0 && whence == 0 {
+		h.pending = nil
+		return 0, nil
+	}
+	return 0, fmt.Errorf("unsupported Seek in directory %s", h.name)
+}
+
+func (h *httpDir) Readdir(count int) ([]os.FileInfo, error) {
+	if h.pending == nil {
+		d, err := h.fs.ReadDir(h.name)
+		if err != nil {
+			return nil, err
+		}
+		if d == nil {
+			d = []os.FileInfo{} // not nil
+		}
+		h.pending = d
+	}
+
+	if len(h.pending) == 0 && count > 0 {
+		return nil, io.EOF
+	}
+	if count <= 0 || count > len(h.pending) {
+		count = len(h.pending)
+	}
+	d := h.pending[:count]
+	h.pending = h.pending[count:]
+	return d, nil
+}
+
+// httpFile implements http.File for a file (not directory) in a FileSystem.
+type httpFile struct {
+	fs FileSystem
+	readSeekCloser
+	name string
+}
+
+func (h *httpFile) Stat() (os.FileInfo, error) { return h.fs.Stat(h.name) }
+func (h *httpFile) Readdir(int) ([]os.FileInfo, error) {
+	return nil, fmt.Errorf("cannot Readdir from file %s", h.name)
+}
diff --git a/cmd/godoc/format.go b/cmd/godoc/format.go
new file mode 100644
index 0000000..59a89c5
--- /dev/null
+++ b/cmd/godoc/format.go
@@ -0,0 +1,372 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements FormatSelections and FormatText.
+// FormatText is used to HTML-format Go and non-Go source
+// text with line numbers and highlighted sections. It is
+// built on top of FormatSelections, a generic formatter
+// for "selected" text.
+
+package main
+
+import (
+	"fmt"
+	"go/scanner"
+	"go/token"
+	"io"
+	"regexp"
+	"strconv"
+	"text/template"
+)
+
+// ----------------------------------------------------------------------------
+// Implementation of FormatSelections
+
+// A Segment describes a text segment [start, end).
+// The zero value of a Segment is a ready-to-use empty segment.
+//
+type Segment struct {
+	start, end int
+}
+
+func (seg *Segment) isEmpty() bool { return seg.start >= seg.end }
+
+// A Selection is an "iterator" function returning a text segment.
+// Repeated calls to a selection return consecutive, non-overlapping,
+// non-empty segments, followed by an infinite sequence of empty
+// segments. The first empty segment marks the end of the selection.
+//
+type Selection func() Segment
+
+// A LinkWriter writes some start or end "tag" to w for the text offset offs.
+// It is called by FormatSelections at the start or end of each link segment.
+//
+type LinkWriter func(w io.Writer, offs int, start bool)
+
+// A SegmentWriter formats a text according to selections and writes it to w.
+// The selections parameter is a bit set indicating which selections provided
+// to FormatSelections overlap with the text segment: If the n'th bit is set
+// in selections, the n'th selection provided to FormatSelections is overlapping
+// with the text.
+//
+type SegmentWriter func(w io.Writer, text []byte, selections int)
+
+// FormatSelections takes a text and writes it to w using link and segment
+// writers lw and sw as follows: lw is invoked for consecutive segment starts
+// and ends as specified through the links selection, and sw is invoked for
+// consecutive segments of text overlapped by the same selections as specified
+// by selections. The link writer lw may be nil, in which case the links
+// Selection is ignored.
+//
+func FormatSelections(w io.Writer, text []byte, lw LinkWriter, links Selection, sw SegmentWriter, selections ...Selection) {
+	// If we have a link writer, make the links
+	// selection the last entry in selections
+	if lw != nil {
+		selections = append(selections, links)
+	}
+
+	// compute the sequence of consecutive segment changes
+	changes := newMerger(selections)
+
+	// The i'th bit in bitset indicates that the text
+	// at the current offset is covered by selections[i].
+	bitset := 0
+	lastOffs := 0
+
+	// Text segments are written in a delayed fashion
+	// such that consecutive segments belonging to the
+	// same selection can be combined (peephole optimization).
+	// last describes the last segment which has not yet been written.
+	var last struct {
+		begin, end int // valid if begin < end
+		bitset     int
+	}
+
+	// flush writes the last delayed text segment
+	flush := func() {
+		if last.begin < last.end {
+			sw(w, text[last.begin:last.end], last.bitset)
+		}
+		last.begin = last.end // invalidate last
+	}
+
+	// segment runs the segment [lastOffs, end) with the selection
+	// indicated by bitset through the segment peephole optimizer.
+	segment := func(end int) {
+		if lastOffs < end { // ignore empty segments
+			if last.end != lastOffs || last.bitset != bitset {
+				// the last segment is not adjacent to or
+				// differs from the new one
+				flush()
+				// start a new segment
+				last.begin = lastOffs
+			}
+			last.end = end
+			last.bitset = bitset
+		}
+	}
+
+	for {
+		// get the next segment change
+		index, offs, start := changes.next()
+		if index < 0 || offs > len(text) {
+			// no more segment changes or the next change
+			// is past the end of the text - we're done
+			break
+		}
+		// determine the kind of segment change
+		if lw != nil && index == len(selections)-1 {
+			// we have a link segment change (see start of this function):
+			// format the previous selection segment, write the
+			// link tag and start a new selection segment
+			segment(offs)
+			flush()
+			lastOffs = offs
+			lw(w, offs, start)
+		} else {
+			// we have a selection change:
+			// format the previous selection segment, determine
+			// the new selection bitset and start a new segment
+			segment(offs)
+			lastOffs = offs
+			mask := 1 << uint(index)
+			if start {
+				bitset |= mask
+			} else {
+				bitset &^= mask
+			}
+		}
+	}
+	segment(len(text))
+	flush()
+}
+
+// A merger merges a slice of Selections and produces a sequence of
+// consecutive segment change events through repeated next() calls.
+//
+type merger struct {
+	selections []Selection
+	segments   []Segment // segments[i] is the next segment of selections[i]
+}
+
+const infinity int = 2e9
+
+func newMerger(selections []Selection) *merger {
+	segments := make([]Segment, len(selections))
+	for i, sel := range selections {
+		segments[i] = Segment{infinity, infinity}
+		if sel != nil {
+			if seg := sel(); !seg.isEmpty() {
+				segments[i] = seg
+			}
+		}
+	}
+	return &merger{selections, segments}
+}
+
+// next returns the next segment change: index specifies the Selection
+// to which the segment belongs, offs is the segment start or end offset
+// as determined by the start value. If there are no more segment changes,
+// next returns an index value < 0.
+//
+func (m *merger) next() (index, offs int, start bool) {
+	// find the next smallest offset where a segment starts or ends
+	offs = infinity
+	index = -1
+	for i, seg := range m.segments {
+		switch {
+		case seg.start < offs:
+			offs = seg.start
+			index = i
+			start = true
+		case seg.end < offs:
+			offs = seg.end
+			index = i
+			start = false
+		}
+	}
+	if index < 0 {
+		// no offset found => all selections merged
+		return
+	}
+	// offset found - it's either the start or end offset but
+	// either way it is ok to consume the start offset: set it
+	// to infinity so it won't be considered in the following
+	// next call
+	m.segments[index].start = infinity
+	if start {
+		return
+	}
+	// end offset found - consume it
+	m.segments[index].end = infinity
+	// advance to the next segment for that selection
+	seg := m.selections[index]()
+	if !seg.isEmpty() {
+		m.segments[index] = seg
+	}
+	return
+}
+
+// ----------------------------------------------------------------------------
+// Implementation of FormatText
+
+// lineSelection returns the line segments for text as a Selection.
+func lineSelection(text []byte) Selection {
+	i, j := 0, 0
+	return func() (seg Segment) {
+		// find next newline, if any
+		for j < len(text) {
+			j++
+			if text[j-1] == '\n' {
+				break
+			}
+		}
+		if i < j {
+			// text[i:j] constitutes a line
+			seg = Segment{i, j}
+			i = j
+		}
+		return
+	}
+}
+
+// tokenSelection returns, as a selection, the sequence of
+// consecutive occurrences of token sel in the Go src text.
+//
+func tokenSelection(src []byte, sel token.Token) Selection {
+	var s scanner.Scanner
+	fset := token.NewFileSet()
+	file := fset.AddFile("", fset.Base(), len(src))
+	s.Init(file, src, nil, scanner.ScanComments)
+	return func() (seg Segment) {
+		for {
+			pos, tok, lit := s.Scan()
+			if tok == token.EOF {
+				break
+			}
+			offs := file.Offset(pos)
+			if tok == sel {
+				seg = Segment{offs, offs + len(lit)}
+				break
+			}
+		}
+		return
+	}
+}
+
+// makeSelection is a helper function to make a Selection from a slice of pairs.
+// Pairs describing empty segments are ignored.
+//
+func makeSelection(matches [][]int) Selection {
+	i := 0
+	return func() Segment {
+		for i < len(matches) {
+			m := matches[i]
+			i++
+			if m[0] < m[1] {
+				// non-empty segment
+				return Segment{m[0], m[1]}
+			}
+		}
+		return Segment{}
+	}
+}
+
+// regexpSelection computes the Selection for the regular expression expr in text.
+func regexpSelection(text []byte, expr string) Selection {
+	var matches [][]int
+	if rx, err := regexp.Compile(expr); err == nil {
+		matches = rx.FindAllIndex(text, -1)
+	}
+	return makeSelection(matches)
+}
+
+var selRx = regexp.MustCompile(`^([0-9]+):([0-9]+)`)
+
+// rangeSelection computes the Selection for a text range described
+// by the argument str; the range description must match the selRx
+// regular expression.
+//
+func rangeSelection(str string) Selection {
+	m := selRx.FindStringSubmatch(str)
+	if len(m) >= 2 {
+		from, _ := strconv.Atoi(m[1])
+		to, _ := strconv.Atoi(m[2])
+		if from < to {
+			return makeSelection([][]int{{from, to}})
+		}
+	}
+	return nil
+}
+
+// Span tags for all the possible selection combinations that may
+// be generated by FormatText. Selections are indicated by a bitset,
+// and the value of the bitset specifies the tag to be used.
+//
+// bit 0: comments
+// bit 1: highlights
+// bit 2: selections
+//
+var startTags = [][]byte{
+	/* 000 */ []byte(``),
+	/* 001 */ []byte(`<span class="comment">`),
+	/* 010 */ []byte(`<span class="highlight">`),
+	/* 011 */ []byte(`<span class="highlight-comment">`),
+	/* 100 */ []byte(`<span class="selection">`),
+	/* 101 */ []byte(`<span class="selection-comment">`),
+	/* 110 */ []byte(`<span class="selection-highlight">`),
+	/* 111 */ []byte(`<span class="selection-highlight-comment">`),
+}
+
+var endTag = []byte(`</span>`)
+
+func selectionTag(w io.Writer, text []byte, selections int) {
+	if selections < len(startTags) {
+		if tag := startTags[selections]; len(tag) > 0 {
+			w.Write(tag)
+			template.HTMLEscape(w, text)
+			w.Write(endTag)
+			return
+		}
+	}
+	template.HTMLEscape(w, text)
+}
+
+// FormatText HTML-escapes text and writes it to w.
+// Consecutive text segments are wrapped in HTML spans (with tags as
+// defined by startTags and endTag) as follows:
+//
+//	- if line >= 0, line number (ln) spans are inserted before each line,
+//	  starting with the value of line
+//	- if the text is Go source, comments get the "comment" span class
+//	- each occurrence of the regular expression pattern gets the "highlight"
+//	  span class
+//	- text segments covered by selection get the "selection" span class
+//
+// Comments, highlights, and selections may overlap arbitrarily; the respective
+// HTML span classes are specified in the startTags variable.
+//
+func FormatText(w io.Writer, text []byte, line int, goSource bool, pattern string, selection Selection) {
+	var comments, highlights Selection
+	if goSource {
+		comments = tokenSelection(text, token.COMMENT)
+	}
+	if pattern != "" {
+		highlights = regexpSelection(text, pattern)
+	}
+	if line >= 0 || comments != nil || highlights != nil || selection != nil {
+		var lineTag LinkWriter
+		if line >= 0 {
+			lineTag = func(w io.Writer, _ int, start bool) {
+				if start {
+					fmt.Fprintf(w, "<a id=\"L%d\"></a><span class=\"ln\">%6d</span>\t", line, line)
+					line++
+				}
+			}
+		}
+		FormatSelections(w, text, lineTag, lineSelection(text), selectionTag, comments, highlights, selection)
+	} else {
+		template.HTMLEscape(w, text)
+	}
+}
diff --git a/cmd/godoc/godoc.go b/cmd/godoc/godoc.go
new file mode 100644
index 0000000..79d485b
--- /dev/null
+++ b/cmd/godoc/godoc.go
@@ -0,0 +1,1586 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+	"bytes"
+	"encoding/json"
+	"flag"
+	"fmt"
+	"go/ast"
+	"go/build"
+	"go/doc"
+	"go/format"
+	"go/printer"
+	"go/token"
+	htmlpkg "html"
+	"io"
+	"io/ioutil"
+	"log"
+	"net/http"
+	"net/url"
+	"os"
+	pathpkg "path"
+	"path/filepath"
+	"regexp"
+	"runtime"
+	"sort"
+	"strings"
+	"text/template"
+	"time"
+	"unicode"
+	"unicode/utf8"
+)
+
+// ----------------------------------------------------------------------------
+// Globals
+
+type delayTime struct {
+	RWValue
+}
+
+func (dt *delayTime) backoff(max time.Duration) {
+	dt.mutex.Lock()
+	v := dt.value.(time.Duration) * 2
+	if v > max {
+		v = max
+	}
+	dt.value = v
+	// don't change dt.timestamp - calling backoff indicates an error condition
+	dt.mutex.Unlock()
+}
+
+var (
+	verbose = flag.Bool("v", false, "verbose mode")
+
+	// file system roots
+	// TODO(gri) consider the invariant that goroot always end in '/'
+	goroot  = flag.String("goroot", runtime.GOROOT(), "Go root directory")
+	testDir = flag.String("testdir", "", "Go root subdirectory - for testing only (faster startups)")
+
+	// layout control
+	tabwidth       = flag.Int("tabwidth", 4, "tab width")
+	showTimestamps = flag.Bool("timestamps", false, "show timestamps with directory listings")
+	templateDir    = flag.String("templates", "", "directory containing alternate template files")
+	showPlayground = flag.Bool("play", false, "enable playground in web interface")
+	showExamples   = flag.Bool("ex", false, "show examples in command line mode")
+	declLinks      = flag.Bool("links", true, "link identifiers to their declarations")
+
+	// search index
+	indexEnabled = flag.Bool("index", false, "enable search index")
+	indexFiles   = flag.String("index_files", "", "glob pattern specifying index files;"+
+		"if not empty, the index is read from these files in sorted order")
+	maxResults    = flag.Int("maxresults", 10000, "maximum number of full text search results shown")
+	indexThrottle = flag.Float64("index_throttle", 0.75, "index throttle value; 0.0 = no time allocated, 1.0 = full throttle")
+
+	// file system information
+	fsTree      RWValue // *Directory tree of packages, updated with each sync (but sync code is removed now)
+	fsModified  RWValue // timestamp of last call to invalidateIndex
+	docMetadata RWValue // mapping from paths to *Metadata
+
+	// http handlers
+	fileServer http.Handler // default file server
+	cmdHandler docServer
+	pkgHandler docServer
+
+	// source code notes
+	notes = flag.String("notes", "BUG", "regular expression matching note markers to show")
+)
+
+func initHandlers() {
+	fileServer = http.FileServer(&httpFS{fs})
+	cmdHandler = docServer{"/cmd/", "/src/cmd"}
+	pkgHandler = docServer{"/pkg/", "/src/pkg"}
+}
+
+func registerPublicHandlers(mux *http.ServeMux) {
+	mux.Handle(cmdHandler.pattern, &cmdHandler)
+	mux.Handle(pkgHandler.pattern, &pkgHandler)
+	mux.HandleFunc("/doc/codewalk/", codewalk)
+	mux.Handle("/doc/play/", fileServer)
+	mux.HandleFunc("/search", search)
+	mux.Handle("/robots.txt", fileServer)
+	mux.HandleFunc("/opensearch.xml", serveSearchDesc)
+	mux.HandleFunc("/", serveFile)
+}
+
+func initFSTree() {
+	dir := newDirectory(pathpkg.Join("/", *testDir), -1)
+	if dir == nil {
+		log.Println("Warning: FSTree is nil")
+		return
+	}
+	fsTree.set(dir)
+	invalidateIndex()
+}
+
+// ----------------------------------------------------------------------------
+// Tab conversion
+
+var spaces = []byte("                                ") // 32 spaces seems like a good number
+
+const (
+	indenting = iota
+	collecting
+)
+
+// A tconv is an io.Writer filter for converting leading tabs into spaces.
+type tconv struct {
+	output io.Writer
+	state  int // indenting or collecting
+	indent int // valid if state == indenting
+}
+
+func (p *tconv) writeIndent() (err error) {
+	i := p.indent
+	for i >= len(spaces) {
+		i -= len(spaces)
+		if _, err = p.output.Write(spaces); err != nil {
+			return
+		}
+	}
+	// i < len(spaces)
+	if i > 0 {
+		_, err = p.output.Write(spaces[0:i])
+	}
+	return
+}
+
+func (p *tconv) Write(data []byte) (n int, err error) {
+	if len(data) == 0 {
+		return
+	}
+	pos := 0 // valid if p.state == collecting
+	var b byte
+	for n, b = range data {
+		switch p.state {
+		case indenting:
+			switch b {
+			case '\t':
+				p.indent += *tabwidth
+			case '\n':
+				p.indent = 0
+				if _, err = p.output.Write(data[n : n+1]); err != nil {
+					return
+				}
+			case ' ':
+				p.indent++
+			default:
+				p.state = collecting
+				pos = n
+				if err = p.writeIndent(); err != nil {
+					return
+				}
+			}
+		case collecting:
+			if b == '\n' {
+				p.state = indenting
+				p.indent = 0
+				if _, err = p.output.Write(data[pos : n+1]); err != nil {
+					return
+				}
+			}
+		}
+	}
+	n = len(data)
+	if pos < n && p.state == collecting {
+		_, err = p.output.Write(data[pos:])
+	}
+	return
+}
+
+// ----------------------------------------------------------------------------
+// Templates
+
+// Write an AST node to w.
+func writeNode(w io.Writer, fset *token.FileSet, x interface{}) {
+	// convert trailing tabs into spaces using a tconv filter
+	// to ensure a good outcome in most browsers (there may still
+	// be tabs in comments and strings, but converting those into
+	// the right number of spaces is much harder)
+	//
+	// TODO(gri) rethink printer flags - perhaps tconv can be eliminated
+	//           with an another printer mode (which is more efficiently
+	//           implemented in the printer than here with another layer)
+	mode := printer.TabIndent | printer.UseSpaces
+	err := (&printer.Config{Mode: mode, Tabwidth: *tabwidth}).Fprint(&tconv{output: w}, fset, x)
+	if err != nil {
+		log.Print(err)
+	}
+}
+
+func filenameFunc(path string) string {
+	_, localname := pathpkg.Split(path)
+	return localname
+}
+
+func fileInfoNameFunc(fi os.FileInfo) string {
+	name := fi.Name()
+	if fi.IsDir() {
+		name += "/"
+	}
+	return name
+}
+
+func fileInfoTimeFunc(fi os.FileInfo) string {
+	if t := fi.ModTime(); t.Unix() != 0 {
+		return t.Local().String()
+	}
+	return "" // don't return epoch if time is obviously not set
+}
+
+// The strings in infoKinds must be properly html-escaped.
+var infoKinds = [nKinds]string{
+	PackageClause: "package&nbsp;clause",
+	ImportDecl:    "import&nbsp;decl",
+	ConstDecl:     "const&nbsp;decl",
+	TypeDecl:      "type&nbsp;decl",
+	VarDecl:       "var&nbsp;decl",
+	FuncDecl:      "func&nbsp;decl",
+	MethodDecl:    "method&nbsp;decl",
+	Use:           "use",
+}
+
+func infoKind_htmlFunc(info SpotInfo) string {
+	return infoKinds[info.Kind()] // infoKind entries are html-escaped
+}
+
+func infoLineFunc(info SpotInfo) int {
+	line := info.Lori()
+	if info.IsIndex() {
+		index, _ := searchIndex.get()
+		if index != nil {
+			line = index.(*Index).Snippet(line).Line
+		} else {
+			// no line information available because
+			// we don't have an index - this should
+			// never happen; be conservative and don't
+			// crash
+			line = 0
+		}
+	}
+	return line
+}
+
+func infoSnippet_htmlFunc(info SpotInfo) string {
+	if info.IsIndex() {
+		index, _ := searchIndex.get()
+		// Snippet.Text was HTML-escaped when it was generated
+		return index.(*Index).Snippet(info.Lori()).Text
+	}
+	return `<span class="alert">no snippet text available</span>`
+}
+
+func nodeFunc(info *PageInfo, node interface{}) string {
+	var buf bytes.Buffer
+	writeNode(&buf, info.FSet, node)
+	return buf.String()
+}
+
+func node_htmlFunc(info *PageInfo, node interface{}, linkify bool) string {
+	var buf1 bytes.Buffer
+	writeNode(&buf1, info.FSet, node)
+
+	var buf2 bytes.Buffer
+	if n, _ := node.(ast.Node); n != nil && linkify && *declLinks {
+		LinkifyText(&buf2, buf1.Bytes(), n)
+	} else {
+		FormatText(&buf2, buf1.Bytes(), -1, true, "", nil)
+	}
+
+	return buf2.String()
+}
+
+func comment_htmlFunc(comment string) string {
+	var buf bytes.Buffer
+	// TODO(gri) Provide list of words (e.g. function parameters)
+	//           to be emphasized by ToHTML.
+	doc.ToHTML(&buf, comment, nil) // does html-escaping
+	return buf.String()
+}
+
+// punchCardWidth is the number of columns of fixed-width
+// characters to assume when wrapping text.  Very few people
+// use terminals or cards smaller than 80 characters, so 80 it is.
+// We do not try to sniff the environment or the tty to adapt to
+// the situation; instead, by using a constant we make sure that
+// godoc always produces the same output regardless of context,
+// a consistency that is lost otherwise.  For example, if we sniffed
+// the environment or tty, then http://golang.org/pkg/math/?m=text
+// would depend on the width of the terminal where godoc started,
+// which is clearly bogus.  More generally, the Unix tools that behave
+// differently when writing to a tty than when writing to a file have
+// a history of causing confusion (compare `ls` and `ls | cat`), and we
+// want to avoid that mistake here.
+const punchCardWidth = 80
+
+func comment_textFunc(comment, indent, preIndent string) string {
+	var buf bytes.Buffer
+	doc.ToText(&buf, comment, indent, preIndent, punchCardWidth-2*len(indent))
+	return buf.String()
+}
+
+func startsWithUppercase(s string) bool {
+	r, _ := utf8.DecodeRuneInString(s)
+	return unicode.IsUpper(r)
+}
+
+var exampleOutputRx = regexp.MustCompile(`(?i)//[[:space:]]*output:`)
+
+// stripExampleSuffix strips lowercase braz in Foo_braz or Foo_Bar_braz from name
+// while keeping uppercase Braz in Foo_Braz.
+func stripExampleSuffix(name string) string {
+	if i := strings.LastIndex(name, "_"); i != -1 {
+		if i < len(name)-1 && !startsWithUppercase(name[i+1:]) {
+			name = name[:i]
+		}
+	}
+	return name
+}
+
+func example_textFunc(info *PageInfo, funcName, indent string) string {
+	if !*showExamples {
+		return ""
+	}
+
+	var buf bytes.Buffer
+	first := true
+	for _, eg := range info.Examples {
+		name := stripExampleSuffix(eg.Name)
+		if name != funcName {
+			continue
+		}
+
+		if !first {
+			buf.WriteString("\n")
+		}
+		first = false
+
+		// print code
+		cnode := &printer.CommentedNode{Node: eg.Code, Comments: eg.Comments}
+		var buf1 bytes.Buffer
+		writeNode(&buf1, info.FSet, cnode)
+		code := buf1.String()
+		// Additional formatting if this is a function body.
+		if n := len(code); n >= 2 && code[0] == '{' && code[n-1] == '}' {
+			// remove surrounding braces
+			code = code[1 : n-1]
+			// unindent
+			code = strings.Replace(code, "\n    ", "\n", -1)
+		}
+		code = strings.Trim(code, "\n")
+		code = strings.Replace(code, "\n", "\n\t", -1)
+
+		buf.WriteString(indent)
+		buf.WriteString("Example:\n\t")
+		buf.WriteString(code)
+		buf.WriteString("\n")
+	}
+	return buf.String()
+}
+
+func example_htmlFunc(info *PageInfo, funcName string) string {
+	var buf bytes.Buffer
+	for _, eg := range info.Examples {
+		name := stripExampleSuffix(eg.Name)
+
+		if name != funcName {
+			continue
+		}
+
+		// print code
+		cnode := &printer.CommentedNode{Node: eg.Code, Comments: eg.Comments}
+		code := node_htmlFunc(info, cnode, true)
+		out := eg.Output
+		wholeFile := true
+
+		// Additional formatting if this is a function body.
+		if n := len(code); n >= 2 && code[0] == '{' && code[n-1] == '}' {
+			wholeFile = false
+			// remove surrounding braces
+			code = code[1 : n-1]
+			// unindent
+			code = strings.Replace(code, "\n    ", "\n", -1)
+			// remove output comment
+			if loc := exampleOutputRx.FindStringIndex(code); loc != nil {
+				code = strings.TrimSpace(code[:loc[0]])
+			}
+		}
+
+		// Write out the playground code in standard Go style
+		// (use tabs, no comment highlight, etc).
+		play := ""
+		if eg.Play != nil && *showPlayground {
+			var buf bytes.Buffer
+			if err := format.Node(&buf, info.FSet, eg.Play); err != nil {
+				log.Print(err)
+			} else {
+				play = buf.String()
+			}
+		}
+
+		// Drop output, as the output comment will appear in the code.
+		if wholeFile && play == "" {
+			out = ""
+		}
+
+		err := exampleHTML.Execute(&buf, struct {
+			Name, Doc, Code, Play, Output string
+		}{eg.Name, eg.Doc, code, play, out})
+		if err != nil {
+			log.Print(err)
+		}
+	}
+	return buf.String()
+}
+
+// example_nameFunc takes an example function name and returns its display
+// name. For example, "Foo_Bar_quux" becomes "Foo.Bar (Quux)".
+func example_nameFunc(s string) string {
+	name, suffix := splitExampleName(s)
+	// replace _ with . for method names
+	name = strings.Replace(name, "_", ".", 1)
+	// use "Package" if no name provided
+	if name == "" {
+		name = "Package"
+	}
+	return name + suffix
+}
+
+// example_suffixFunc takes an example function name and returns its suffix in
+// parenthesized form. For example, "Foo_Bar_quux" becomes " (Quux)".
+func example_suffixFunc(name string) string {
+	_, suffix := splitExampleName(name)
+	return suffix
+}
+
+func noteTitle(note string) string {
+	return strings.Title(strings.ToLower(note))
+}
+
+func splitExampleName(s string) (name, suffix string) {
+	i := strings.LastIndex(s, "_")
+	if 0 <= i && i < len(s)-1 && !startsWithUppercase(s[i+1:]) {
+		name = s[:i]
+		suffix = " (" + strings.Title(s[i+1:]) + ")"
+		return
+	}
+	name = s
+	return
+}
+
+func pkgLinkFunc(path string) string {
+	relpath := path[1:]
+	// because of the irregular mapping under goroot
+	// we need to correct certain relative paths
+	relpath = strings.TrimPrefix(relpath, "src/pkg/")
+	return pkgHandler.pattern[1:] + relpath // remove trailing '/' for relative URL
+}
+
+// n must be an ast.Node or a *doc.Note
+func posLink_urlFunc(info *PageInfo, n interface{}) string {
+	var pos, end token.Pos
+
+	switch n := n.(type) {
+	case ast.Node:
+		pos = n.Pos()
+		end = n.End()
+	case *doc.Note:
+		pos = n.Pos
+		end = n.End
+	default:
+		panic(fmt.Sprintf("wrong type for posLink_url template formatter: %T", n))
+	}
+
+	var relpath string
+	var line int
+	var low, high int // selection offset range
+
+	if pos.IsValid() {
+		p := info.FSet.Position(pos)
+		relpath = p.Filename
+		line = p.Line
+		low = p.Offset
+	}
+	if end.IsValid() {
+		high = info.FSet.Position(end).Offset
+	}
+
+	var buf bytes.Buffer
+	template.HTMLEscape(&buf, []byte(relpath))
+	// selection ranges are of form "s=low:high"
+	if low < high {
+		fmt.Fprintf(&buf, "?s=%d:%d", low, high) // no need for URL escaping
+		// if we have a selection, position the page
+		// such that the selection is a bit below the top
+		line -= 10
+		if line < 1 {
+			line = 1
+		}
+	}
+	// line id's in html-printed source are of the
+	// form "L%d" where %d stands for the line number
+	if line > 0 {
+		fmt.Fprintf(&buf, "#L%d", line) // no need for URL escaping
+	}
+
+	return buf.String()
+}
+
+func srcLinkFunc(s string) string {
+	return pathpkg.Clean("/" + s)
+}
+
+// fmap describes the template functions installed with all godoc templates.
+// Convention: template function names ending in "_html" or "_url" produce
+//             HTML- or URL-escaped strings; all other function results may
+//             require explicit escaping in the template.
+var fmap = template.FuncMap{
+	// various helpers
+	"filename": filenameFunc,
+	"repeat":   strings.Repeat,
+
+	// access to FileInfos (directory listings)
+	"fileInfoName": fileInfoNameFunc,
+	"fileInfoTime": fileInfoTimeFunc,
+
+	// access to search result information
+	"infoKind_html":    infoKind_htmlFunc,
+	"infoLine":         infoLineFunc,
+	"infoSnippet_html": infoSnippet_htmlFunc,
+
+	// formatting of AST nodes
+	"node":         nodeFunc,
+	"node_html":    node_htmlFunc,
+	"comment_html": comment_htmlFunc,
+	"comment_text": comment_textFunc,
+
+	// support for URL attributes
+	"pkgLink":     pkgLinkFunc,
+	"srcLink":     srcLinkFunc,
+	"posLink_url": posLink_urlFunc,
+
+	// formatting of Examples
+	"example_html":   example_htmlFunc,
+	"example_text":   example_textFunc,
+	"example_name":   example_nameFunc,
+	"example_suffix": example_suffixFunc,
+
+	// formatting of Notes
+	"noteTitle": noteTitle,
+}
+
+func readTemplate(name string) *template.Template {
+	path := "lib/godoc/" + name
+
+	// use underlying file system fs to read the template file
+	// (cannot use template ParseFile functions directly)
+	data, err := ReadFile(fs, path)
+	if err != nil {
+		log.Fatal("readTemplate: ", err)
+	}
+	// be explicit with errors (for app engine use)
+	t, err := template.New(name).Funcs(fmap).Parse(string(data))
+	if err != nil {
+		log.Fatal("readTemplate: ", err)
+	}
+	return t
+}
+
+var (
+	codewalkHTML,
+	codewalkdirHTML,
+	dirlistHTML,
+	errorHTML,
+	exampleHTML,
+	godocHTML,
+	packageHTML,
+	packageText,
+	searchHTML,
+	searchText,
+	searchDescXML *template.Template
+)
+
+func readTemplates() {
+	// have to delay until after flags processing since paths depend on goroot
+	codewalkHTML = readTemplate("codewalk.html")
+	codewalkdirHTML = readTemplate("codewalkdir.html")
+	dirlistHTML = readTemplate("dirlist.html")
+	errorHTML = readTemplate("error.html")
+	exampleHTML = readTemplate("example.html")
+	godocHTML = readTemplate("godoc.html")
+	packageHTML = readTemplate("package.html")
+	packageText = readTemplate("package.txt")
+	searchHTML = readTemplate("search.html")
+	searchText = readTemplate("search.txt")
+	searchDescXML = readTemplate("opensearch.xml")
+}
+
+// ----------------------------------------------------------------------------
+// Generic HTML wrapper
+
+// Page describes the contents of the top-level godoc webpage.
+type Page struct {
+	Title    string
+	Tabtitle string
+	Subtitle string
+	Query    string
+	Body     []byte
+
+	// filled in by servePage
+	SearchBox  bool
+	Playground bool
+	Version    string
+}
+
+func servePage(w http.ResponseWriter, page Page) {
+	if page.Tabtitle == "" {
+		page.Tabtitle = page.Title
+	}
+	page.SearchBox = *indexEnabled
+	page.Playground = *showPlayground
+	page.Version = runtime.Version()
+	if err := godocHTML.Execute(w, page); err != nil && err != http.ErrBodyNotAllowed {
+		// Only log if there's an error that's not about writing on HEAD requests.
+		// See Issues 5451 and 5454.
+		log.Printf("godocHTML.Execute: %s", err)
+	}
+}
+
+func serveText(w http.ResponseWriter, text []byte) {
+	w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+	w.Write(text)
+}
+
+// ----------------------------------------------------------------------------
+// Files
+
+var (
+	doctype   = []byte("<!DOCTYPE ")
+	jsonStart = []byte("<!--{")
+	jsonEnd   = []byte("}-->")
+)
+
+func serveHTMLDoc(w http.ResponseWriter, r *http.Request, abspath, relpath string) {
+	// get HTML body contents
+	src, err := ReadFile(fs, abspath)
+	if err != nil {
+		log.Printf("ReadFile: %s", err)
+		serveError(w, r, relpath, err)
+		return
+	}
+
+	// if it begins with "<!DOCTYPE " assume it is standalone
+	// html that doesn't need the template wrapping.
+	if bytes.HasPrefix(src, doctype) {
+		w.Write(src)
+		return
+	}
+
+	// if it begins with a JSON blob, read in the metadata.
+	meta, src, err := extractMetadata(src)
+	if err != nil {
+		log.Printf("decoding metadata %s: %v", relpath, err)
+	}
+
+	// evaluate as template if indicated
+	if meta.Template {
+		tmpl, err := template.New("main").Funcs(templateFuncs).Parse(string(src))
+		if err != nil {
+			log.Printf("parsing template %s: %v", relpath, err)
+			serveError(w, r, relpath, err)
+			return
+		}
+		var buf bytes.Buffer
+		if err := tmpl.Execute(&buf, nil); err != nil {
+			log.Printf("executing template %s: %v", relpath, err)
+			serveError(w, r, relpath, err)
+			return
+		}
+		src = buf.Bytes()
+	}
+
+	// if it's the language spec, add tags to EBNF productions
+	if strings.HasSuffix(abspath, "go_spec.html") {
+		var buf bytes.Buffer
+		Linkify(&buf, src)
+		src = buf.Bytes()
+	}
+
+	servePage(w, Page{
+		Title:    meta.Title,
+		Subtitle: meta.Subtitle,
+		Body:     src,
+	})
+}
+
+func applyTemplate(t *template.Template, name string, data interface{}) []byte {
+	var buf bytes.Buffer
+	if err := t.Execute(&buf, data); err != nil {
+		log.Printf("%s.Execute: %s", name, err)
+	}
+	return buf.Bytes()
+}
+
+func redirect(w http.ResponseWriter, r *http.Request) (redirected bool) {
+	canonical := pathpkg.Clean(r.URL.Path)
+	if !strings.HasSuffix(canonical, "/") {
+		canonical += "/"
+	}
+	if r.URL.Path != canonical {
+		url := *r.URL
+		url.Path = canonical
+		http.Redirect(w, r, url.String(), http.StatusMovedPermanently)
+		redirected = true
+	}
+	return
+}
+
+func redirectFile(w http.ResponseWriter, r *http.Request) (redirected bool) {
+	c := pathpkg.Clean(r.URL.Path)
+	c = strings.TrimRight(c, "/")
+	if r.URL.Path != c {
+		url := *r.URL
+		url.Path = c
+		http.Redirect(w, r, url.String(), http.StatusMovedPermanently)
+		redirected = true
+	}
+	return
+}
+
+func serveTextFile(w http.ResponseWriter, r *http.Request, abspath, relpath, title string) {
+	src, err := ReadFile(fs, abspath)
+	if err != nil {
+		log.Printf("ReadFile: %s", err)
+		serveError(w, r, relpath, err)
+		return
+	}
+
+	if r.FormValue("m") == "text" {
+		serveText(w, src)
+		return
+	}
+
+	var buf bytes.Buffer
+	buf.WriteString("<pre>")
+	FormatText(&buf, src, 1, pathpkg.Ext(abspath) == ".go", r.FormValue("h"), rangeSelection(r.FormValue("s")))
+	buf.WriteString("</pre>")
+	fmt.Fprintf(&buf, `<p><a href="/%s?m=text">View as plain text</a></p>`, htmlpkg.EscapeString(relpath))
+
+	servePage(w, Page{
+		Title:    title + " " + relpath,
+		Tabtitle: relpath,
+		Body:     buf.Bytes(),
+	})
+}
+
+func serveDirectory(w http.ResponseWriter, r *http.Request, abspath, relpath string) {
+	if redirect(w, r) {
+		return
+	}
+
+	list, err := fs.ReadDir(abspath)
+	if err != nil {
+		serveError(w, r, relpath, err)
+		return
+	}
+
+	servePage(w, Page{
+		Title:    "Directory " + relpath,
+		Tabtitle: relpath,
+		Body:     applyTemplate(dirlistHTML, "dirlistHTML", list),
+	})
+}
+
+func serveFile(w http.ResponseWriter, r *http.Request) {
+	relpath := r.URL.Path
+
+	// Check to see if we need to redirect or serve another file.
+	if m := metadataFor(relpath); m != nil {
+		if m.Path != relpath {
+			// Redirect to canonical path.
+			http.Redirect(w, r, m.Path, http.StatusMovedPermanently)
+			return
+		}
+		// Serve from the actual filesystem path.
+		relpath = m.filePath
+	}
+
+	abspath := relpath
+	relpath = relpath[1:] // strip leading slash
+
+	switch pathpkg.Ext(relpath) {
+	case ".html":
+		if strings.HasSuffix(relpath, "/index.html") {
+			// We'll show index.html for the directory.
+			// Use the dir/ version as canonical instead of dir/index.html.
+			http.Redirect(w, r, r.URL.Path[0:len(r.URL.Path)-len("index.html")], http.StatusMovedPermanently)
+			return
+		}
+		serveHTMLDoc(w, r, abspath, relpath)
+		return
+
+	case ".go":
+		serveTextFile(w, r, abspath, relpath, "Source file")
+		return
+	}
+
+	dir, err := fs.Lstat(abspath)
+	if err != nil {
+		log.Print(err)
+		serveError(w, r, relpath, err)
+		return
+	}
+
+	if dir != nil && dir.IsDir() {
+		if redirect(w, r) {
+			return
+		}
+		if index := pathpkg.Join(abspath, "index.html"); isTextFile(index) {
+			serveHTMLDoc(w, r, index, index)
+			return
+		}
+		serveDirectory(w, r, abspath, relpath)
+		return
+	}
+
+	if isTextFile(abspath) {
+		if redirectFile(w, r) {
+			return
+		}
+		serveTextFile(w, r, abspath, relpath, "Text file")
+		return
+	}
+
+	fileServer.ServeHTTP(w, r)
+}
+
+func serveSearchDesc(w http.ResponseWriter, r *http.Request) {
+	w.Header().Set("Content-Type", "application/opensearchdescription+xml")
+	data := map[string]interface{}{
+		"BaseURL": fmt.Sprintf("http://%s", r.Host),
+	}
+	if err := searchDescXML.Execute(w, &data); err != nil && err != http.ErrBodyNotAllowed {
+		// Only log if there's an error that's not about writing on HEAD requests.
+		// See Issues 5451 and 5454.
+		log.Printf("searchDescXML.Execute: %s", err)
+	}
+}
+
+// ----------------------------------------------------------------------------
+// Packages
+
+// Fake relative package path for built-ins. Documentation for all globals
+// (not just exported ones) will be shown for packages in this directory.
+const builtinPkgPath = "builtin"
+
+type PageInfoMode uint
+
+const (
+	noFiltering PageInfoMode = 1 << iota // do not filter exports
+	allMethods                           // show all embedded methods
+	showSource                           // show source code, do not extract documentation
+	noHtml                               // show result in textual form, do not generate HTML
+	flatDir                              // show directory in a flat (non-indented) manner
+)
+
+// modeNames defines names for each PageInfoMode flag.
+var modeNames = map[string]PageInfoMode{
+	"all":     noFiltering,
+	"methods": allMethods,
+	"src":     showSource,
+	"text":    noHtml,
+	"flat":    flatDir,
+}
+
+// getPageInfoMode computes the PageInfoMode flags by analyzing the request
+// URL form value "m". It is value is a comma-separated list of mode names
+// as defined by modeNames (e.g.: m=src,text).
+func getPageInfoMode(r *http.Request) PageInfoMode {
+	var mode PageInfoMode
+	for _, k := range strings.Split(r.FormValue("m"), ",") {
+		if m, found := modeNames[strings.TrimSpace(k)]; found {
+			mode |= m
+		}
+	}
+	return adjustPageInfoMode(r, mode)
+}
+
+// Specialized versions of godoc may adjust the PageInfoMode by overriding
+// this variable.
+var adjustPageInfoMode = func(_ *http.Request, mode PageInfoMode) PageInfoMode {
+	return mode
+}
+
+// remoteSearchURL returns the search URL for a given query as needed by
+// remoteSearch. If html is set, an html result is requested; otherwise
+// the result is in textual form.
+// Adjust this function as necessary if modeNames or FormValue parameters
+// change.
+func remoteSearchURL(query string, html bool) string {
+	s := "/search?m=text&q="
+	if html {
+		s = "/search?q="
+	}
+	return s + url.QueryEscape(query)
+}
+
+type PageInfo struct {
+	Dirname string // directory containing the package
+	Err     error  // error or nil
+
+	// package info
+	FSet     *token.FileSet         // nil if no package documentation
+	PDoc     *doc.Package           // nil if no package documentation
+	Examples []*doc.Example         // nil if no example code
+	Notes    map[string][]*doc.Note // nil if no package Notes
+	PAst     *ast.File              // nil if no AST with package exports
+	IsMain   bool                   // true for package main
+
+	// directory info
+	Dirs    *DirList  // nil if no directory information
+	DirTime time.Time // directory time stamp
+	DirFlat bool      // if set, show directory in a flat (non-indented) manner
+}
+
+func (info *PageInfo) IsEmpty() bool {
+	return info.Err != nil || info.PAst == nil && info.PDoc == nil && info.Dirs == nil
+}
+
+type docServer struct {
+	pattern string // url pattern; e.g. "/pkg/"
+	fsRoot  string // file system root to which the pattern is mapped
+}
+
+// fsReadDir implements ReadDir for the go/build package.
+func fsReadDir(dir string) ([]os.FileInfo, error) {
+	return fs.ReadDir(filepath.ToSlash(dir))
+}
+
+// fsOpenFile implements OpenFile for the go/build package.
+func fsOpenFile(name string) (r io.ReadCloser, err error) {
+	data, err := ReadFile(fs, filepath.ToSlash(name))
+	if err != nil {
+		return nil, err
+	}
+	return ioutil.NopCloser(bytes.NewReader(data)), nil
+}
+
+// packageExports is a local implementation of ast.PackageExports
+// which correctly updates each package file's comment list.
+// (The ast.PackageExports signature is frozen, hence the local
+// implementation).
+//
+func packageExports(fset *token.FileSet, pkg *ast.Package) {
+	for _, src := range pkg.Files {
+		cmap := ast.NewCommentMap(fset, src, src.Comments)
+		ast.FileExports(src)
+		src.Comments = cmap.Filter(src).Comments()
+	}
+}
+
+// addNames adds the names declared by decl to the names set.
+// Method names are added in the form ReceiverTypeName_Method.
+func addNames(names map[string]bool, decl ast.Decl) {
+	switch d := decl.(type) {
+	case *ast.FuncDecl:
+		name := d.Name.Name
+		if d.Recv != nil {
+			var typeName string
+			switch r := d.Recv.List[0].Type.(type) {
+			case *ast.StarExpr:
+				typeName = r.X.(*ast.Ident).Name
+			case *ast.Ident:
+				typeName = r.Name
+			}
+			name = typeName + "_" + name
+		}
+		names[name] = true
+	case *ast.GenDecl:
+		for _, spec := range d.Specs {
+			switch s := spec.(type) {
+			case *ast.TypeSpec:
+				names[s.Name.Name] = true
+			case *ast.ValueSpec:
+				for _, id := range s.Names {
+					names[id.Name] = true
+				}
+			}
+		}
+	}
+}
+
+// globalNames returns a set of the names declared by all package-level
+// declarations. Method names are returned in the form Receiver_Method.
+func globalNames(pkg *ast.Package) map[string]bool {
+	names := make(map[string]bool)
+	for _, file := range pkg.Files {
+		for _, decl := range file.Decls {
+			addNames(names, decl)
+		}
+	}
+	return names
+}
+
+// collectExamples collects examples for pkg from testfiles.
+func collectExamples(pkg *ast.Package, testfiles map[string]*ast.File) []*doc.Example {
+	var files []*ast.File
+	for _, f := range testfiles {
+		files = append(files, f)
+	}
+
+	var examples []*doc.Example
+	globals := globalNames(pkg)
+	for _, e := range doc.Examples(files...) {
+		name := stripExampleSuffix(e.Name)
+		if name == "" || globals[name] {
+			examples = append(examples, e)
+		} else {
+			log.Printf("skipping example 'Example%s' because '%s' is not a known function or type", e.Name, e.Name)
+		}
+	}
+
+	return examples
+}
+
+// poorMansImporter returns a (dummy) package object named
+// by the last path component of the provided package path
+// (as is the convention for packages). This is sufficient
+// to resolve package identifiers without doing an actual
+// import. It never returns an error.
+//
+func poorMansImporter(imports map[string]*ast.Object, path string) (*ast.Object, error) {
+	pkg := imports[path]
+	if pkg == nil {
+		// note that strings.LastIndex returns -1 if there is no "/"
+		pkg = ast.NewObj(ast.Pkg, path[strings.LastIndex(path, "/")+1:])
+		pkg.Data = ast.NewScope(nil) // required by ast.NewPackage for dot-import
+		imports[path] = pkg
+	}
+	return pkg, nil
+}
+
+// getPageInfo returns the PageInfo for a package directory abspath. If the
+// parameter genAST is set, an AST containing only the package exports is
+// computed (PageInfo.PAst), otherwise package documentation (PageInfo.Doc)
+// is extracted from the AST. If there is no corresponding package in the
+// directory, PageInfo.PAst and PageInfo.PDoc are nil. If there are no sub-
+// directories, PageInfo.Dirs is nil. If an error occurred, PageInfo.Err is
+// set to the respective error but the error is not logged.
+//
+func (h *docServer) getPageInfo(abspath, relpath string, mode PageInfoMode) *PageInfo {
+	info := &PageInfo{Dirname: abspath}
+
+	// Restrict to the package files that would be used when building
+	// the package on this system.  This makes sure that if there are
+	// separate implementations for, say, Windows vs Unix, we don't
+	// jumble them all together.
+	// Note: Uses current binary's GOOS/GOARCH.
+	// To use different pair, such as if we allowed the user to choose,
+	// set ctxt.GOOS and ctxt.GOARCH before calling ctxt.ImportDir.
+	ctxt := build.Default
+	ctxt.IsAbsPath = pathpkg.IsAbs
+	ctxt.ReadDir = fsReadDir
+	ctxt.OpenFile = fsOpenFile
+	pkginfo, err := ctxt.ImportDir(abspath, 0)
+	// continue if there are no Go source files; we still want the directory info
+	if _, nogo := err.(*build.NoGoError); err != nil && !nogo {
+		info.Err = err
+		return info
+	}
+
+	// collect package files
+	pkgname := pkginfo.Name
+	pkgfiles := append(pkginfo.GoFiles, pkginfo.CgoFiles...)
+	if len(pkgfiles) == 0 {
+		// Commands written in C have no .go files in the build.
+		// Instead, documentation may be found in an ignored file.
+		// The file may be ignored via an explicit +build ignore
+		// constraint (recommended), or by defining the package
+		// documentation (historic).
+		pkgname = "main" // assume package main since pkginfo.Name == ""
+		pkgfiles = pkginfo.IgnoredGoFiles
+	}
+
+	// get package information, if any
+	if len(pkgfiles) > 0 {
+		// build package AST
+		fset := token.NewFileSet()
+		files, err := parseFiles(fset, abspath, pkgfiles)
+		if err != nil {
+			info.Err = err
+			return info
+		}
+
+		// ignore any errors - they are due to unresolved identifiers
+		pkg, _ := ast.NewPackage(fset, files, poorMansImporter, nil)
+
+		// extract package documentation
+		info.FSet = fset
+		if mode&showSource == 0 {
+			// show extracted documentation
+			var m doc.Mode
+			if mode&noFiltering != 0 {
+				m = doc.AllDecls
+			}
+			if mode&allMethods != 0 {
+				m |= doc.AllMethods
+			}
+			info.PDoc = doc.New(pkg, pathpkg.Clean(relpath), m) // no trailing '/' in importpath
+
+			// collect examples
+			testfiles := append(pkginfo.TestGoFiles, pkginfo.XTestGoFiles...)
+			files, err = parseFiles(fset, abspath, testfiles)
+			if err != nil {
+				log.Println("parsing examples:", err)
+			}
+			info.Examples = collectExamples(pkg, files)
+
+			// collect any notes that we want to show
+			if info.PDoc.Notes != nil {
+				// could regexp.Compile only once per godoc, but probably not worth it
+				if rx, err := regexp.Compile(*notes); err == nil {
+					for m, n := range info.PDoc.Notes {
+						if rx.MatchString(m) {
+							if info.Notes == nil {
+								info.Notes = make(map[string][]*doc.Note)
+							}
+							info.Notes[m] = n
+						}
+					}
+				}
+			}
+
+		} else {
+			// show source code
+			// TODO(gri) Consider eliminating export filtering in this mode,
+			//           or perhaps eliminating the mode altogether.
+			if mode&noFiltering == 0 {
+				packageExports(fset, pkg)
+			}
+			info.PAst = ast.MergePackageFiles(pkg, 0)
+		}
+		info.IsMain = pkgname == "main"
+	}
+
+	// get directory information, if any
+	var dir *Directory
+	var timestamp time.Time
+	if tree, ts := fsTree.get(); tree != nil && tree.(*Directory) != nil {
+		// directory tree is present; lookup respective directory
+		// (may still fail if the file system was updated and the
+		// new directory tree has not yet been computed)
+		dir = tree.(*Directory).lookup(abspath)
+		timestamp = ts
+	}
+	if dir == nil {
+		// no directory tree present (too early after startup or
+		// command-line mode); compute one level for this page
+		// note: cannot use path filter here because in general
+		//       it doesn't contain the fsTree path
+		dir = newDirectory(abspath, 1)
+		timestamp = time.Now()
+	}
+	info.Dirs = dir.listing(true)
+	info.DirTime = timestamp
+	info.DirFlat = mode&flatDir != 0
+
+	return info
+}
+
+func (h *docServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	if redirect(w, r) {
+		return
+	}
+
+	relpath := pathpkg.Clean(r.URL.Path[len(h.pattern):])
+	abspath := pathpkg.Join(h.fsRoot, relpath)
+	mode := getPageInfoMode(r)
+	if relpath == builtinPkgPath {
+		mode = noFiltering
+	}
+	info := h.getPageInfo(abspath, relpath, mode)
+	if info.Err != nil {
+		log.Print(info.Err)
+		serveError(w, r, relpath, info.Err)
+		return
+	}
+
+	if mode&noHtml != 0 {
+		serveText(w, applyTemplate(packageText, "packageText", info))
+		return
+	}
+
+	var tabtitle, title, subtitle string
+	switch {
+	case info.PAst != nil:
+		tabtitle = info.PAst.Name.Name
+	case info.PDoc != nil:
+		tabtitle = info.PDoc.Name
+	default:
+		tabtitle = info.Dirname
+		title = "Directory "
+		if *showTimestamps {
+			subtitle = "Last update: " + info.DirTime.String()
+		}
+	}
+	if title == "" {
+		if info.IsMain {
+			// assume that the directory name is the command name
+			_, tabtitle = pathpkg.Split(relpath)
+			title = "Command "
+		} else {
+			title = "Package "
+		}
+	}
+	title += tabtitle
+
+	// special cases for top-level package/command directories
+	switch tabtitle {
+	case "/src/pkg":
+		tabtitle = "Packages"
+	case "/src/cmd":
+		tabtitle = "Commands"
+	}
+
+	servePage(w, Page{
+		Title:    title,
+		Tabtitle: tabtitle,
+		Subtitle: subtitle,
+		Body:     applyTemplate(packageHTML, "packageHTML", info),
+	})
+}
+
+// ----------------------------------------------------------------------------
+// Search
+
+var searchIndex RWValue
+
+type SearchResult struct {
+	Query string
+	Alert string // error or warning message
+
+	// identifier matches
+	Pak HitList       // packages matching Query
+	Hit *LookupResult // identifier matches of Query
+	Alt *AltWords     // alternative identifiers to look for
+
+	// textual matches
+	Found    int         // number of textual occurrences found
+	Textual  []FileLines // textual matches of Query
+	Complete bool        // true if all textual occurrences of Query are reported
+}
+
+func lookup(query string) (result SearchResult) {
+	result.Query = query
+
+	index, timestamp := searchIndex.get()
+	if index != nil {
+		index := index.(*Index)
+
+		// identifier search
+		var err error
+		result.Pak, result.Hit, result.Alt, err = index.Lookup(query)
+		if err != nil && *maxResults <= 0 {
+			// ignore the error if full text search is enabled
+			// since the query may be a valid regular expression
+			result.Alert = "Error in query string: " + err.Error()
+			return
+		}
+
+		// full text search
+		if *maxResults > 0 && query != "" {
+			rx, err := regexp.Compile(query)
+			if err != nil {
+				result.Alert = "Error in query regular expression: " + err.Error()
+				return
+			}
+			// If we get maxResults+1 results we know that there are more than
+			// maxResults results and thus the result may be incomplete (to be
+			// precise, we should remove one result from the result set, but
+			// nobody is going to count the results on the result page).
+			result.Found, result.Textual = index.LookupRegexp(rx, *maxResults+1)
+			result.Complete = result.Found <= *maxResults
+			if !result.Complete {
+				result.Found-- // since we looked for maxResults+1
+			}
+		}
+	}
+
+	// is the result accurate?
+	if *indexEnabled {
+		if _, ts := fsModified.get(); timestamp.Before(ts) {
+			// The index is older than the latest file system change under godoc's observation.
+			result.Alert = "Indexing in progress: result may be inaccurate"
+		}
+	} else {
+		result.Alert = "Search index disabled: no results available"
+	}
+
+	return
+}
+
+func search(w http.ResponseWriter, r *http.Request) {
+	query := strings.TrimSpace(r.FormValue("q"))
+	result := lookup(query)
+
+	if getPageInfoMode(r)&noHtml != 0 {
+		serveText(w, applyTemplate(searchText, "searchText", result))
+		return
+	}
+
+	var title string
+	if result.Hit != nil || len(result.Textual) > 0 {
+		title = fmt.Sprintf(`Results for query %q`, query)
+	} else {
+		title = fmt.Sprintf(`No results found for query %q`, query)
+	}
+
+	servePage(w, Page{
+		Title:    title,
+		Tabtitle: query,
+		Query:    query,
+		Body:     applyTemplate(searchHTML, "searchHTML", result),
+	})
+}
+
+// ----------------------------------------------------------------------------
+// Documentation Metadata
+
+type Metadata struct {
+	Title    string
+	Subtitle string
+	Template bool   // execute as template
+	Path     string // canonical path for this page
+	filePath string // filesystem path relative to goroot
+}
+
+// extractMetadata extracts the Metadata from a byte slice.
+// It returns the Metadata value and the remaining data.
+// If no metadata is present the original byte slice is returned.
+//
+func extractMetadata(b []byte) (meta Metadata, tail []byte, err error) {
+	tail = b
+	if !bytes.HasPrefix(b, jsonStart) {
+		return
+	}
+	end := bytes.Index(b, jsonEnd)
+	if end < 0 {
+		return
+	}
+	b = b[len(jsonStart)-1 : end+1] // drop leading <!-- and include trailing }
+	if err = json.Unmarshal(b, &meta); err != nil {
+		return
+	}
+	tail = tail[end+len(jsonEnd):]
+	return
+}
+
+// updateMetadata scans $GOROOT/doc for HTML files, reads their metadata,
+// and updates the docMetadata map.
+//
+func updateMetadata() {
+	metadata := make(map[string]*Metadata)
+	var scan func(string) // scan is recursive
+	scan = func(dir string) {
+		fis, err := fs.ReadDir(dir)
+		if err != nil {
+			log.Println("updateMetadata:", err)
+			return
+		}
+		for _, fi := range fis {
+			name := pathpkg.Join(dir, fi.Name())
+			if fi.IsDir() {
+				scan(name) // recurse
+				continue
+			}
+			if !strings.HasSuffix(name, ".html") {
+				continue
+			}
+			// Extract metadata from the file.
+			b, err := ReadFile(fs, name)
+			if err != nil {
+				log.Printf("updateMetadata %s: %v", name, err)
+				continue
+			}
+			meta, _, err := extractMetadata(b)
+			if err != nil {
+				log.Printf("updateMetadata: %s: %v", name, err)
+				continue
+			}
+			// Store relative filesystem path in Metadata.
+			meta.filePath = name
+			if meta.Path == "" {
+				// If no Path, canonical path is actual path.
+				meta.Path = meta.filePath
+			}
+			// Store under both paths.
+			metadata[meta.Path] = &meta
+			metadata[meta.filePath] = &meta
+		}
+	}
+	scan("/doc")
+	docMetadata.set(metadata)
+}
+
+// Send a value on this channel to trigger a metadata refresh.
+// It is buffered so that if a signal is not lost if sent during a refresh.
+//
+var refreshMetadataSignal = make(chan bool, 1)
+
+// refreshMetadata sends a signal to update docMetadata. If a refresh is in
+// progress the metadata will be refreshed again afterward.
+//
+func refreshMetadata() {
+	select {
+	case refreshMetadataSignal <- true:
+	default:
+	}
+}
+
+// refreshMetadataLoop runs forever, updating docMetadata when the underlying
+// file system changes. It should be launched in a goroutine by main.
+//
+func refreshMetadataLoop() {
+	for {
+		<-refreshMetadataSignal
+		updateMetadata()
+		time.Sleep(10 * time.Second) // at most once every 10 seconds
+	}
+}
+
+// metadataFor returns the *Metadata for a given relative path or nil if none
+// exists.
+//
+func metadataFor(relpath string) *Metadata {
+	if m, _ := docMetadata.get(); m != nil {
+		meta := m.(map[string]*Metadata)
+		// If metadata for this relpath exists, return it.
+		if p := meta[relpath]; p != nil {
+			return p
+		}
+		// Try with or without trailing slash.
+		if strings.HasSuffix(relpath, "/") {
+			relpath = relpath[:len(relpath)-1]
+		} else {
+			relpath = relpath + "/"
+		}
+		return meta[relpath]
+	}
+	return nil
+}
+
+// ----------------------------------------------------------------------------
+// Indexer
+
+// invalidateIndex should be called whenever any of the file systems
+// under godoc's observation change so that the indexer is kicked on.
+//
+func invalidateIndex() {
+	fsModified.set(nil)
+	refreshMetadata()
+}
+
+// indexUpToDate() returns true if the search index is not older
+// than any of the file systems under godoc's observation.
+//
+func indexUpToDate() bool {
+	_, fsTime := fsModified.get()
+	_, siTime := searchIndex.get()
+	return !fsTime.After(siTime)
+}
+
+// feedDirnames feeds the directory names of all directories
+// under the file system given by root to channel c.
+//
+func feedDirnames(root *RWValue, c chan<- string) {
+	if dir, _ := root.get(); dir != nil {
+		for d := range dir.(*Directory).iter(false) {
+			c <- d.Path
+		}
+	}
+}
+
+// fsDirnames() returns a channel sending all directory names
+// of all the file systems under godoc's observation.
+//
+func fsDirnames() <-chan string {
+	c := make(chan string, 256) // buffered for fewer context switches
+	go func() {
+		feedDirnames(&fsTree, c)
+		close(c)
+	}()
+	return c
+}
+
+func readIndex(filenames string) error {
+	matches, err := filepath.Glob(filenames)
+	if err != nil {
+		return err
+	} else if matches == nil {
+		return fmt.Errorf("no index files match %q", filenames)
+	}
+	sort.Strings(matches) // make sure files are in the right order
+	files := make([]io.Reader, 0, len(matches))
+	for _, filename := range matches {
+		f, err := os.Open(filename)
+		if err != nil {
+			return err
+		}
+		defer f.Close()
+		files = append(files, f)
+	}
+	x := new(Index)
+	if err := x.Read(io.MultiReader(files...)); err != nil {
+		return err
+	}
+	searchIndex.set(x)
+	return nil
+}
+
+func updateIndex() {
+	if *verbose {
+		log.Printf("updating index...")
+	}
+	start := time.Now()
+	index := NewIndex(fsDirnames(), *maxResults > 0, *indexThrottle)
+	stop := time.Now()
+	searchIndex.set(index)
+	if *verbose {
+		secs := stop.Sub(start).Seconds()
+		stats := index.Stats()
+		log.Printf("index updated (%gs, %d bytes of source, %d files, %d lines, %d unique words, %d spots)",
+			secs, stats.Bytes, stats.Files, stats.Lines, stats.Words, stats.Spots)
+	}
+	memstats := new(runtime.MemStats)
+	runtime.ReadMemStats(memstats)
+	log.Printf("before GC: bytes = %d footprint = %d", memstats.HeapAlloc, memstats.Sys)
+	runtime.GC()
+	runtime.ReadMemStats(memstats)
+	log.Printf("after  GC: bytes = %d footprint = %d", memstats.HeapAlloc, memstats.Sys)
+}
+
+func indexer() {
+	// initialize the index from disk if possible
+	if *indexFiles != "" {
+		if err := readIndex(*indexFiles); err != nil {
+			log.Printf("error reading index: %s", err)
+		}
+	}
+
+	// repeatedly update the index when it goes out of date
+	for {
+		if !indexUpToDate() {
+			// index possibly out of date - make a new one
+			updateIndex()
+		}
+		delay := 60 * time.Second // by default, try every 60s
+		if *testDir != "" {
+			// in test mode, try once a second for fast startup
+			delay = 1 * time.Second
+		}
+		time.Sleep(delay)
+	}
+}
diff --git a/cmd/godoc/index.go b/cmd/godoc/index.go
new file mode 100644
index 0000000..d1292d5
--- /dev/null
+++ b/cmd/godoc/index.go
@@ -0,0 +1,1079 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains the infrastructure to create an
+// identifier and full-text index for a set of Go files.
+//
+// Algorithm for identifier index:
+// - traverse all .go files of the file tree specified by root
+// - for each identifier (word) encountered, collect all occurrences (spots)
+//   into a list; this produces a list of spots for each word
+// - reduce the lists: from a list of spots to a list of FileRuns,
+//   and from a list of FileRuns into a list of PakRuns
+// - make a HitList from the PakRuns
+//
+// Details:
+// - keep two lists per word: one containing package-level declarations
+//   that have snippets, and one containing all other spots
+// - keep the snippets in a separate table indexed by snippet index
+//   and store the snippet index in place of the line number in a SpotInfo
+//   (the line number for spots with snippets is stored in the snippet)
+// - at the end, create lists of alternative spellings for a given
+//   word
+//
+// Algorithm for full text index:
+// - concatenate all source code in a byte buffer (in memory)
+// - add the files to a file set in lockstep as they are added to the byte
+//   buffer such that a byte buffer offset corresponds to the Pos value for
+//   that file location
+// - create a suffix array from the concatenated sources
+//
+// String lookup in full text index:
+// - use the suffix array to lookup a string's offsets - the offsets
+//   correspond to the Pos values relative to the file set
+// - translate the Pos values back into file and line information and
+//   sort the result
+
+package main
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/gob"
+	"errors"
+	"go/ast"
+	"go/parser"
+	"go/token"
+	"index/suffixarray"
+	"io"
+	"os"
+	pathpkg "path"
+	"regexp"
+	"sort"
+	"strings"
+	"time"
+	"unicode"
+)
+
+// ----------------------------------------------------------------------------
+// InterfaceSlice is a helper type for sorting interface
+// slices according to some slice-specific sort criteria.
+
+type Comparer func(x, y interface{}) bool
+
+type InterfaceSlice struct {
+	slice []interface{}
+	less  Comparer
+}
+
+func (p *InterfaceSlice) Len() int           { return len(p.slice) }
+func (p *InterfaceSlice) Less(i, j int) bool { return p.less(p.slice[i], p.slice[j]) }
+func (p *InterfaceSlice) Swap(i, j int)      { p.slice[i], p.slice[j] = p.slice[j], p.slice[i] }
+
+// ----------------------------------------------------------------------------
+// RunList
+
+// A RunList is a list of entries that can be sorted according to some
+// criteria. A RunList may be compressed by grouping "runs" of entries
+// which are equal (according to the sort critera) into a new RunList of
+// runs. For instance, a RunList containing pairs (x, y) may be compressed
+// into a RunList containing pair runs (x, {y}) where each run consists of
+// a list of y's with the same x.
+type RunList []interface{}
+
+func (h RunList) sort(less Comparer) {
+	sort.Sort(&InterfaceSlice{h, less})
+}
+
+// Compress entries which are the same according to a sort criteria
+// (specified by less) into "runs".
+func (h RunList) reduce(less Comparer, newRun func(h RunList) interface{}) RunList {
+	if len(h) == 0 {
+		return nil
+	}
+	// len(h) > 0
+
+	// create runs of entries with equal values
+	h.sort(less)
+
+	// for each run, make a new run object and collect them in a new RunList
+	var hh RunList
+	i, x := 0, h[0]
+	for j, y := range h {
+		if less(x, y) {
+			hh = append(hh, newRun(h[i:j]))
+			i, x = j, h[j] // start a new run
+		}
+	}
+	// add final run, if any
+	if i < len(h) {
+		hh = append(hh, newRun(h[i:]))
+	}
+
+	return hh
+}
+
+// ----------------------------------------------------------------------------
+// SpotInfo
+
+// A SpotInfo value describes a particular identifier spot in a given file;
+// It encodes three values: the SpotKind (declaration or use), a line or
+// snippet index "lori", and whether it's a line or index.
+//
+// The following encoding is used:
+//
+//   bits    32   4    1       0
+//   value    [lori|kind|isIndex]
+//
+type SpotInfo uint32
+
+// SpotKind describes whether an identifier is declared (and what kind of
+// declaration) or used.
+type SpotKind uint32
+
+const (
+	PackageClause SpotKind = iota
+	ImportDecl
+	ConstDecl
+	TypeDecl
+	VarDecl
+	FuncDecl
+	MethodDecl
+	Use
+	nKinds
+)
+
+func init() {
+	// sanity check: if nKinds is too large, the SpotInfo
+	// accessor functions may need to be updated
+	if nKinds > 8 {
+		panic("internal error: nKinds > 8")
+	}
+}
+
+// makeSpotInfo makes a SpotInfo.
+func makeSpotInfo(kind SpotKind, lori int, isIndex bool) SpotInfo {
+	// encode lori: bits [4..32)
+	x := SpotInfo(lori) << 4
+	if int(x>>4) != lori {
+		// lori value doesn't fit - since snippet indices are
+		// most certainly always smaller then 1<<28, this can
+		// only happen for line numbers; give it no line number (= 0)
+		x = 0
+	}
+	// encode kind: bits [1..4)
+	x |= SpotInfo(kind) << 1
+	// encode isIndex: bit 0
+	if isIndex {
+		x |= 1
+	}
+	return x
+}
+
+func (x SpotInfo) Kind() SpotKind { return SpotKind(x >> 1 & 7) }
+func (x SpotInfo) Lori() int      { return int(x >> 4) }
+func (x SpotInfo) IsIndex() bool  { return x&1 != 0 }
+
+// ----------------------------------------------------------------------------
+// KindRun
+
+// Debugging support. Disable to see multiple entries per line.
+const removeDuplicates = true
+
+// A KindRun is a run of SpotInfos of the same kind in a given file.
+// The kind (3 bits) is stored in each SpotInfo element; to find the
+// kind of a KindRun, look at any of it's elements.
+type KindRun []SpotInfo
+
+// KindRuns are sorted by line number or index. Since the isIndex bit
+// is always the same for all infos in one list we can compare lori's.
+func (k KindRun) Len() int           { return len(k) }
+func (k KindRun) Less(i, j int) bool { return k[i].Lori() < k[j].Lori() }
+func (k KindRun) Swap(i, j int)      { k[i], k[j] = k[j], k[i] }
+
+// FileRun contents are sorted by Kind for the reduction into KindRuns.
+func lessKind(x, y interface{}) bool { return x.(SpotInfo).Kind() < y.(SpotInfo).Kind() }
+
+// newKindRun allocates a new KindRun from the SpotInfo run h.
+func newKindRun(h RunList) interface{} {
+	run := make(KindRun, len(h))
+	for i, x := range h {
+		run[i] = x.(SpotInfo)
+	}
+
+	// Spots were sorted by file and kind to create this run.
+	// Within this run, sort them by line number or index.
+	sort.Sort(run)
+
+	if removeDuplicates {
+		// Since both the lori and kind field must be
+		// same for duplicates, and since the isIndex
+		// bit is always the same for all infos in one
+		// list we can simply compare the entire info.
+		k := 0
+		prev := SpotInfo(1<<32 - 1) // an unlikely value
+		for _, x := range run {
+			if x != prev {
+				run[k] = x
+				k++
+				prev = x
+			}
+		}
+		run = run[0:k]
+	}
+
+	return run
+}
+
+// ----------------------------------------------------------------------------
+// FileRun
+
+// A Pak describes a Go package.
+type Pak struct {
+	Path string // path of directory containing the package
+	Name string // package name as declared by package clause
+}
+
+// Paks are sorted by name (primary key) and by import path (secondary key).
+func (p *Pak) less(q *Pak) bool {
+	return p.Name < q.Name || p.Name == q.Name && p.Path < q.Path
+}
+
+// A File describes a Go file.
+type File struct {
+	Name string // directory-local file name
+	Pak  *Pak   // the package to which the file belongs
+}
+
+// Path returns the file path of f.
+func (f *File) Path() string {
+	return pathpkg.Join(f.Pak.Path, f.Name)
+}
+
+// A Spot describes a single occurrence of a word.
+type Spot struct {
+	File *File
+	Info SpotInfo
+}
+
+// A FileRun is a list of KindRuns belonging to the same file.
+type FileRun struct {
+	File   *File
+	Groups []KindRun
+}
+
+// Spots are sorted by file path for the reduction into FileRuns.
+func lessSpot(x, y interface{}) bool {
+	fx := x.(Spot).File
+	fy := y.(Spot).File
+	// same as "return fx.Path() < fy.Path()" but w/o computing the file path first
+	px := fx.Pak.Path
+	py := fy.Pak.Path
+	return px < py || px == py && fx.Name < fy.Name
+}
+
+// newFileRun allocates a new FileRun from the Spot run h.
+func newFileRun(h RunList) interface{} {
+	file := h[0].(Spot).File
+
+	// reduce the list of Spots into a list of KindRuns
+	h1 := make(RunList, len(h))
+	for i, x := range h {
+		h1[i] = x.(Spot).Info
+	}
+	h2 := h1.reduce(lessKind, newKindRun)
+
+	// create the FileRun
+	groups := make([]KindRun, len(h2))
+	for i, x := range h2 {
+		groups[i] = x.(KindRun)
+	}
+	return &FileRun{file, groups}
+}
+
+// ----------------------------------------------------------------------------
+// PakRun
+
+// A PakRun describes a run of *FileRuns of a package.
+type PakRun struct {
+	Pak   *Pak
+	Files []*FileRun
+}
+
+// Sorting support for files within a PakRun.
+func (p *PakRun) Len() int           { return len(p.Files) }
+func (p *PakRun) Less(i, j int) bool { return p.Files[i].File.Name < p.Files[j].File.Name }
+func (p *PakRun) Swap(i, j int)      { p.Files[i], p.Files[j] = p.Files[j], p.Files[i] }
+
+// FileRuns are sorted by package for the reduction into PakRuns.
+func lessFileRun(x, y interface{}) bool {
+	return x.(*FileRun).File.Pak.less(y.(*FileRun).File.Pak)
+}
+
+// newPakRun allocates a new PakRun from the *FileRun run h.
+func newPakRun(h RunList) interface{} {
+	pak := h[0].(*FileRun).File.Pak
+	files := make([]*FileRun, len(h))
+	for i, x := range h {
+		files[i] = x.(*FileRun)
+	}
+	run := &PakRun{pak, files}
+	sort.Sort(run) // files were sorted by package; sort them by file now
+	return run
+}
+
+// ----------------------------------------------------------------------------
+// HitList
+
+// A HitList describes a list of PakRuns.
+type HitList []*PakRun
+
+// PakRuns are sorted by package.
+func lessPakRun(x, y interface{}) bool { return x.(*PakRun).Pak.less(y.(*PakRun).Pak) }
+
+func reduce(h0 RunList) HitList {
+	// reduce a list of Spots into a list of FileRuns
+	h1 := h0.reduce(lessSpot, newFileRun)
+	// reduce a list of FileRuns into a list of PakRuns
+	h2 := h1.reduce(lessFileRun, newPakRun)
+	// sort the list of PakRuns by package
+	h2.sort(lessPakRun)
+	// create a HitList
+	h := make(HitList, len(h2))
+	for i, p := range h2 {
+		h[i] = p.(*PakRun)
+	}
+	return h
+}
+
+// filter returns a new HitList created by filtering
+// all PakRuns from h that have a matching pakname.
+func (h HitList) filter(pakname string) HitList {
+	var hh HitList
+	for _, p := range h {
+		if p.Pak.Name == pakname {
+			hh = append(hh, p)
+		}
+	}
+	return hh
+}
+
+// ----------------------------------------------------------------------------
+// AltWords
+
+type wordPair struct {
+	canon string // canonical word spelling (all lowercase)
+	alt   string // alternative spelling
+}
+
+// An AltWords describes a list of alternative spellings for a
+// canonical (all lowercase) spelling of a word.
+type AltWords struct {
+	Canon string   // canonical word spelling (all lowercase)
+	Alts  []string // alternative spelling for the same word
+}
+
+// wordPairs are sorted by their canonical spelling.
+func lessWordPair(x, y interface{}) bool { return x.(*wordPair).canon < y.(*wordPair).canon }
+
+// newAltWords allocates a new AltWords from the *wordPair run h.
+func newAltWords(h RunList) interface{} {
+	canon := h[0].(*wordPair).canon
+	alts := make([]string, len(h))
+	for i, x := range h {
+		alts[i] = x.(*wordPair).alt
+	}
+	return &AltWords{canon, alts}
+}
+
+func (a *AltWords) filter(s string) *AltWords {
+	var alts []string
+	for _, w := range a.Alts {
+		if w != s {
+			alts = append(alts, w)
+		}
+	}
+	if len(alts) > 0 {
+		return &AltWords{a.Canon, alts}
+	}
+	return nil
+}
+
+// ----------------------------------------------------------------------------
+// Indexer
+
+// Adjust these flags as seems best.
+const includeMainPackages = true
+const includeTestFiles = true
+
+type IndexResult struct {
+	Decls  RunList // package-level declarations (with snippets)
+	Others RunList // all other occurrences
+}
+
+// Statistics provides statistics information for an index.
+type Statistics struct {
+	Bytes int // total size of indexed source files
+	Files int // number of indexed source files
+	Lines int // number of lines (all files)
+	Words int // number of different identifiers
+	Spots int // number of identifier occurrences
+}
+
+// An Indexer maintains the data structures and provides the machinery
+// for indexing .go files under a file tree. It implements the path.Visitor
+// interface for walking file trees, and the ast.Visitor interface for
+// walking Go ASTs.
+type Indexer struct {
+	fset     *token.FileSet          // file set for all indexed files
+	sources  bytes.Buffer            // concatenated sources
+	packages map[string]*Pak         // map of canonicalized *Paks
+	words    map[string]*IndexResult // RunLists of Spots
+	snippets []*Snippet              // indices are stored in SpotInfos
+	current  *token.File             // last file added to file set
+	file     *File                   // AST for current file
+	decl     ast.Decl                // AST for current decl
+	stats    Statistics
+}
+
+func (x *Indexer) lookupPackage(path, name string) *Pak {
+	// In the source directory tree, more than one package may
+	// live in the same directory. For the packages map, construct
+	// a key that includes both the directory path and the package
+	// name.
+	key := path + ":" + name
+	pak := x.packages[key]
+	if pak == nil {
+		pak = &Pak{path, name}
+		x.packages[key] = pak
+	}
+	return pak
+}
+
+func (x *Indexer) addSnippet(s *Snippet) int {
+	index := len(x.snippets)
+	x.snippets = append(x.snippets, s)
+	return index
+}
+
+func (x *Indexer) visitIdent(kind SpotKind, id *ast.Ident) {
+	if id != nil {
+		lists, found := x.words[id.Name]
+		if !found {
+			lists = new(IndexResult)
+			x.words[id.Name] = lists
+		}
+
+		if kind == Use || x.decl == nil {
+			// not a declaration or no snippet required
+			info := makeSpotInfo(kind, x.current.Line(id.Pos()), false)
+			lists.Others = append(lists.Others, Spot{x.file, info})
+		} else {
+			// a declaration with snippet
+			index := x.addSnippet(NewSnippet(x.fset, x.decl, id))
+			info := makeSpotInfo(kind, index, true)
+			lists.Decls = append(lists.Decls, Spot{x.file, info})
+		}
+
+		x.stats.Spots++
+	}
+}
+
+func (x *Indexer) visitFieldList(kind SpotKind, list *ast.FieldList) {
+	for _, f := range list.List {
+		x.decl = nil // no snippets for fields
+		for _, name := range f.Names {
+			x.visitIdent(kind, name)
+		}
+		ast.Walk(x, f.Type)
+		// ignore tag - not indexed at the moment
+	}
+}
+
+func (x *Indexer) visitSpec(kind SpotKind, spec ast.Spec) {
+	switch n := spec.(type) {
+	case *ast.ImportSpec:
+		x.visitIdent(ImportDecl, n.Name)
+		// ignore path - not indexed at the moment
+
+	case *ast.ValueSpec:
+		for _, n := range n.Names {
+			x.visitIdent(kind, n)
+		}
+		ast.Walk(x, n.Type)
+		for _, v := range n.Values {
+			ast.Walk(x, v)
+		}
+
+	case *ast.TypeSpec:
+		x.visitIdent(TypeDecl, n.Name)
+		ast.Walk(x, n.Type)
+	}
+}
+
+func (x *Indexer) visitGenDecl(decl *ast.GenDecl) {
+	kind := VarDecl
+	if decl.Tok == token.CONST {
+		kind = ConstDecl
+	}
+	x.decl = decl
+	for _, s := range decl.Specs {
+		x.visitSpec(kind, s)
+	}
+}
+
+func (x *Indexer) Visit(node ast.Node) ast.Visitor {
+	switch n := node.(type) {
+	case nil:
+		// nothing to do
+
+	case *ast.Ident:
+		x.visitIdent(Use, n)
+
+	case *ast.FieldList:
+		x.visitFieldList(VarDecl, n)
+
+	case *ast.InterfaceType:
+		x.visitFieldList(MethodDecl, n.Methods)
+
+	case *ast.DeclStmt:
+		// local declarations should only be *ast.GenDecls;
+		// ignore incorrect ASTs
+		if decl, ok := n.Decl.(*ast.GenDecl); ok {
+			x.decl = nil // no snippets for local declarations
+			x.visitGenDecl(decl)
+		}
+
+	case *ast.GenDecl:
+		x.decl = n
+		x.visitGenDecl(n)
+
+	case *ast.FuncDecl:
+		kind := FuncDecl
+		if n.Recv != nil {
+			kind = MethodDecl
+			ast.Walk(x, n.Recv)
+		}
+		x.decl = n
+		x.visitIdent(kind, n.Name)
+		ast.Walk(x, n.Type)
+		if n.Body != nil {
+			ast.Walk(x, n.Body)
+		}
+
+	case *ast.File:
+		x.decl = nil
+		x.visitIdent(PackageClause, n.Name)
+		for _, d := range n.Decls {
+			ast.Walk(x, d)
+		}
+
+	default:
+		return x
+	}
+
+	return nil
+}
+
+func pkgName(filename string) string {
+	// use a new file set each time in order to not pollute the indexer's
+	// file set (which must stay in sync with the concatenated source code)
+	file, err := parser.ParseFile(token.NewFileSet(), filename, nil, parser.PackageClauseOnly)
+	if err != nil || file == nil {
+		return ""
+	}
+	return file.Name.Name
+}
+
+// addFile adds a file to the index if possible and returns the file set file
+// and the file's AST if it was successfully parsed as a Go file. If addFile
+// failed (that is, if the file was not added), it returns file == nil.
+func (x *Indexer) addFile(filename string, goFile bool) (file *token.File, ast *ast.File) {
+	// open file
+	f, err := fs.Open(filename)
+	if err != nil {
+		return
+	}
+	defer f.Close()
+
+	// The file set's base offset and x.sources size must be in lock-step;
+	// this permits the direct mapping of suffix array lookup results to
+	// to corresponding Pos values.
+	//
+	// When a file is added to the file set, its offset base increases by
+	// the size of the file + 1; and the initial base offset is 1. Add an
+	// extra byte to the sources here.
+	x.sources.WriteByte(0)
+
+	// If the sources length doesn't match the file set base at this point
+	// the file set implementation changed or we have another error.
+	base := x.fset.Base()
+	if x.sources.Len() != base {
+		panic("internal error: file base incorrect")
+	}
+
+	// append file contents (src) to x.sources
+	if _, err := x.sources.ReadFrom(f); err == nil {
+		src := x.sources.Bytes()[base:]
+
+		if goFile {
+			// parse the file and in the process add it to the file set
+			if ast, err = parser.ParseFile(x.fset, filename, src, parser.ParseComments); err == nil {
+				file = x.fset.File(ast.Pos()) // ast.Pos() is inside the file
+				return
+			}
+			// file has parse errors, and the AST may be incorrect -
+			// set lines information explicitly and index as ordinary
+			// text file (cannot fall through to the text case below
+			// because the file has already been added to the file set
+			// by the parser)
+			file = x.fset.File(token.Pos(base)) // token.Pos(base) is inside the file
+			file.SetLinesForContent(src)
+			ast = nil
+			return
+		}
+
+		if isText(src) {
+			// only add the file to the file set (for the full text index)
+			file = x.fset.AddFile(filename, x.fset.Base(), len(src))
+			file.SetLinesForContent(src)
+			return
+		}
+	}
+
+	// discard possibly added data
+	x.sources.Truncate(base - 1) // -1 to remove added byte 0 since no file was added
+	return
+}
+
+// Design note: Using an explicit white list of permitted files for indexing
+// makes sure that the important files are included and massively reduces the
+// number of files to index. The advantage over a blacklist is that unexpected
+// (non-blacklisted) files won't suddenly explode the index.
+
+// Files are whitelisted if they have a file name or extension
+// present as key in whitelisted.
+var whitelisted = map[string]bool{
+	".bash":        true,
+	".c":           true,
+	".cc":          true,
+	".cpp":         true,
+	".cxx":         true,
+	".css":         true,
+	".go":          true,
+	".goc":         true,
+	".h":           true,
+	".hh":          true,
+	".hpp":         true,
+	".hxx":         true,
+	".html":        true,
+	".js":          true,
+	".out":         true,
+	".py":          true,
+	".s":           true,
+	".sh":          true,
+	".txt":         true,
+	".xml":         true,
+	"AUTHORS":      true,
+	"CONTRIBUTORS": true,
+	"LICENSE":      true,
+	"Makefile":     true,
+	"PATENTS":      true,
+	"README":       true,
+}
+
+// isWhitelisted returns true if a file is on the list
+// of "permitted" files for indexing. The filename must
+// be the directory-local name of the file.
+func isWhitelisted(filename string) bool {
+	key := pathpkg.Ext(filename)
+	if key == "" {
+		// file has no extension - use entire filename
+		key = filename
+	}
+	return whitelisted[key]
+}
+
+func (x *Indexer) visitFile(dirname string, f os.FileInfo, fulltextIndex bool) {
+	if f.IsDir() {
+		return
+	}
+
+	filename := pathpkg.Join(dirname, f.Name())
+	goFile := false
+
+	switch {
+	case isGoFile(f):
+		if !includeTestFiles && (!isPkgFile(f) || strings.HasPrefix(filename, "test/")) {
+			return
+		}
+		if !includeMainPackages && pkgName(filename) == "main" {
+			return
+		}
+		goFile = true
+
+	case !fulltextIndex || !isWhitelisted(f.Name()):
+		return
+	}
+
+	file, fast := x.addFile(filename, goFile)
+	if file == nil {
+		return // addFile failed
+	}
+
+	if fast != nil {
+		// we've got a Go file to index
+		x.current = file
+		pak := x.lookupPackage(dirname, fast.Name.Name)
+		x.file = &File{f.Name(), pak}
+		ast.Walk(x, fast)
+	}
+
+	// update statistics
+	x.stats.Bytes += file.Size()
+	x.stats.Files++
+	x.stats.Lines += file.LineCount()
+}
+
+// ----------------------------------------------------------------------------
+// Index
+
+type LookupResult struct {
+	Decls  HitList // package-level declarations (with snippets)
+	Others HitList // all other occurrences
+}
+
+type Index struct {
+	fset     *token.FileSet           // file set used during indexing; nil if no textindex
+	suffixes *suffixarray.Index       // suffixes for concatenated sources; nil if no textindex
+	words    map[string]*LookupResult // maps words to hit lists
+	alts     map[string]*AltWords     // maps canonical(words) to lists of alternative spellings
+	snippets []*Snippet               // all snippets, indexed by snippet index
+	stats    Statistics
+}
+
+func canonical(w string) string { return strings.ToLower(w) }
+
+// NewIndex creates a new index for the .go files
+// in the directories given by dirnames.
+//
+func NewIndex(dirnames <-chan string, fulltextIndex bool, throttle float64) *Index {
+	var x Indexer
+	th := NewThrottle(throttle, 100*time.Millisecond) // run at least 0.1s at a time
+
+	// initialize Indexer
+	// (use some reasonably sized maps to start)
+	x.fset = token.NewFileSet()
+	x.packages = make(map[string]*Pak, 256)
+	x.words = make(map[string]*IndexResult, 8192)
+
+	// index all files in the directories given by dirnames
+	for dirname := range dirnames {
+		list, err := fs.ReadDir(dirname)
+		if err != nil {
+			continue // ignore this directory
+		}
+		for _, f := range list {
+			if !f.IsDir() {
+				x.visitFile(dirname, f, fulltextIndex)
+			}
+			th.Throttle()
+		}
+	}
+
+	if !fulltextIndex {
+		// the file set, the current file, and the sources are
+		// not needed after indexing if no text index is built -
+		// help GC and clear them
+		x.fset = nil
+		x.sources.Reset()
+		x.current = nil // contains reference to fset!
+	}
+
+	// for each word, reduce the RunLists into a LookupResult;
+	// also collect the word with its canonical spelling in a
+	// word list for later computation of alternative spellings
+	words := make(map[string]*LookupResult)
+	var wlist RunList
+	for w, h := range x.words {
+		decls := reduce(h.Decls)
+		others := reduce(h.Others)
+		words[w] = &LookupResult{
+			Decls:  decls,
+			Others: others,
+		}
+		wlist = append(wlist, &wordPair{canonical(w), w})
+		th.Throttle()
+	}
+	x.stats.Words = len(words)
+
+	// reduce the word list {canonical(w), w} into
+	// a list of AltWords runs {canonical(w), {w}}
+	alist := wlist.reduce(lessWordPair, newAltWords)
+
+	// convert alist into a map of alternative spellings
+	alts := make(map[string]*AltWords)
+	for i := 0; i < len(alist); i++ {
+		a := alist[i].(*AltWords)
+		alts[a.Canon] = a
+	}
+
+	// create text index
+	var suffixes *suffixarray.Index
+	if fulltextIndex {
+		suffixes = suffixarray.New(x.sources.Bytes())
+	}
+
+	return &Index{x.fset, suffixes, words, alts, x.snippets, x.stats}
+}
+
+type fileIndex struct {
+	Words    map[string]*LookupResult
+	Alts     map[string]*AltWords
+	Snippets []*Snippet
+	Fulltext bool
+}
+
+func (x *fileIndex) Write(w io.Writer) error {
+	return gob.NewEncoder(w).Encode(x)
+}
+
+func (x *fileIndex) Read(r io.Reader) error {
+	return gob.NewDecoder(r).Decode(x)
+}
+
+// Write writes the index x to w.
+func (x *Index) Write(w io.Writer) error {
+	fulltext := false
+	if x.suffixes != nil {
+		fulltext = true
+	}
+	fx := fileIndex{
+		x.words,
+		x.alts,
+		x.snippets,
+		fulltext,
+	}
+	if err := fx.Write(w); err != nil {
+		return err
+	}
+	if fulltext {
+		encode := func(x interface{}) error {
+			return gob.NewEncoder(w).Encode(x)
+		}
+		if err := x.fset.Write(encode); err != nil {
+			return err
+		}
+		if err := x.suffixes.Write(w); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// Read reads the index from r into x; x must not be nil.
+// If r does not also implement io.ByteReader, it will be wrapped in a bufio.Reader.
+func (x *Index) Read(r io.Reader) error {
+	// We use the ability to read bytes as a plausible surrogate for buffering.
+	if _, ok := r.(io.ByteReader); !ok {
+		r = bufio.NewReader(r)
+	}
+	var fx fileIndex
+	if err := fx.Read(r); err != nil {
+		return err
+	}
+	x.words = fx.Words
+	x.alts = fx.Alts
+	x.snippets = fx.Snippets
+	if fx.Fulltext {
+		x.fset = token.NewFileSet()
+		decode := func(x interface{}) error {
+			return gob.NewDecoder(r).Decode(x)
+		}
+		if err := x.fset.Read(decode); err != nil {
+			return err
+		}
+		x.suffixes = new(suffixarray.Index)
+		if err := x.suffixes.Read(r); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// Stats() returns index statistics.
+func (x *Index) Stats() Statistics {
+	return x.stats
+}
+
+func (x *Index) lookupWord(w string) (match *LookupResult, alt *AltWords) {
+	match = x.words[w]
+	alt = x.alts[canonical(w)]
+	// remove current spelling from alternatives
+	// (if there is no match, the alternatives do
+	// not contain the current spelling)
+	if match != nil && alt != nil {
+		alt = alt.filter(w)
+	}
+	return
+}
+
+// isIdentifier reports whether s is a Go identifier.
+func isIdentifier(s string) bool {
+	for i, ch := range s {
+		if unicode.IsLetter(ch) || ch == ' ' || i > 0 && unicode.IsDigit(ch) {
+			continue
+		}
+		return false
+	}
+	return len(s) > 0
+}
+
+// For a given query, which is either a single identifier or a qualified
+// identifier, Lookup returns a list of packages, a LookupResult, and a
+// list of alternative spellings, if any. Any and all results may be nil.
+// If the query syntax is wrong, an error is reported.
+func (x *Index) Lookup(query string) (paks HitList, match *LookupResult, alt *AltWords, err error) {
+	ss := strings.Split(query, ".")
+
+	// check query syntax
+	for _, s := range ss {
+		if !isIdentifier(s) {
+			err = errors.New("all query parts must be identifiers")
+			return
+		}
+	}
+
+	// handle simple and qualified identifiers
+	switch len(ss) {
+	case 1:
+		ident := ss[0]
+		match, alt = x.lookupWord(ident)
+		if match != nil {
+			// found a match - filter packages with same name
+			// for the list of packages called ident, if any
+			paks = match.Others.filter(ident)
+		}
+
+	case 2:
+		pakname, ident := ss[0], ss[1]
+		match, alt = x.lookupWord(ident)
+		if match != nil {
+			// found a match - filter by package name
+			// (no paks - package names are not qualified)
+			decls := match.Decls.filter(pakname)
+			others := match.Others.filter(pakname)
+			match = &LookupResult{decls, others}
+		}
+
+	default:
+		err = errors.New("query is not a (qualified) identifier")
+	}
+
+	return
+}
+
+func (x *Index) Snippet(i int) *Snippet {
+	// handle illegal snippet indices gracefully
+	if 0 <= i && i < len(x.snippets) {
+		return x.snippets[i]
+	}
+	return nil
+}
+
+type positionList []struct {
+	filename string
+	line     int
+}
+
+func (list positionList) Len() int           { return len(list) }
+func (list positionList) Less(i, j int) bool { return list[i].filename < list[j].filename }
+func (list positionList) Swap(i, j int)      { list[i], list[j] = list[j], list[i] }
+
+// unique returns the list sorted and with duplicate entries removed
+func unique(list []int) []int {
+	sort.Ints(list)
+	var last int
+	i := 0
+	for _, x := range list {
+		if i == 0 || x != last {
+			last = x
+			list[i] = x
+			i++
+		}
+	}
+	return list[0:i]
+}
+
+// A FileLines value specifies a file and line numbers within that file.
+type FileLines struct {
+	Filename string
+	Lines    []int
+}
+
+// LookupRegexp returns the number of matches and the matches where a regular
+// expression r is found in the full text index. At most n matches are
+// returned (thus found <= n).
+//
+func (x *Index) LookupRegexp(r *regexp.Regexp, n int) (found int, result []FileLines) {
+	if x.suffixes == nil || n <= 0 {
+		return
+	}
+	// n > 0
+
+	var list positionList
+	// FindAllIndex may returns matches that span across file boundaries.
+	// Such matches are unlikely, buf after eliminating them we may end up
+	// with fewer than n matches. If we don't have enough at the end, redo
+	// the search with an increased value n1, but only if FindAllIndex
+	// returned all the requested matches in the first place (if it
+	// returned fewer than that there cannot be more).
+	for n1 := n; found < n; n1 += n - found {
+		found = 0
+		matches := x.suffixes.FindAllIndex(r, n1)
+		// compute files, exclude matches that span file boundaries,
+		// and map offsets to file-local offsets
+		list = make(positionList, len(matches))
+		for _, m := range matches {
+			// by construction, an offset corresponds to the Pos value
+			// for the file set - use it to get the file and line
+			p := token.Pos(m[0])
+			if file := x.fset.File(p); file != nil {
+				if base := file.Base(); base <= m[1] && m[1] <= base+file.Size() {
+					// match [m[0], m[1]) is within the file boundaries
+					list[found].filename = file.Name()
+					list[found].line = file.Line(p)
+					found++
+				}
+			}
+		}
+		if found == n || len(matches) < n1 {
+			// found all matches or there's no chance to find more
+			break
+		}
+	}
+	list = list[0:found]
+	sort.Sort(list) // sort by filename
+
+	// collect matches belonging to the same file
+	var last string
+	var lines []int
+	addLines := func() {
+		if len(lines) > 0 {
+			// remove duplicate lines
+			result = append(result, FileLines{last, unique(lines)})
+			lines = nil
+		}
+	}
+	for _, m := range list {
+		if m.filename != last {
+			addLines()
+			last = m.filename
+		}
+		lines = append(lines, m.line)
+	}
+	addLines()
+
+	return
+}
diff --git a/cmd/godoc/linkify.go b/cmd/godoc/linkify.go
new file mode 100644
index 0000000..7213abb
--- /dev/null
+++ b/cmd/godoc/linkify.go
@@ -0,0 +1,234 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements LinkifyText which introduces
+// links for identifiers pointing to their declarations.
+// The approach does not cover all cases because godoc
+// doesn't have complete type information, but it's
+// reasonably good for browsing.
+
+package main
+
+import (
+	"fmt"
+	"go/ast"
+	"go/token"
+	"io"
+	"strconv"
+)
+
+// LinkifyText HTML-escapes source text and writes it to w.
+// Identifiers that are in a "use" position (i.e., that are
+// not being declared), are wrapped with HTML links pointing
+// to the respective declaration, if possible. Comments are
+// formatted the same way as with FormatText.
+//
+func LinkifyText(w io.Writer, text []byte, n ast.Node) {
+	links := linksFor(n)
+
+	i := 0     // links index
+	prev := "" // prev HTML tag
+	linkWriter := func(w io.Writer, _ int, start bool) {
+		// end tag
+		if !start {
+			if prev != "" {
+				fmt.Fprintf(w, `</%s>`, prev)
+				prev = ""
+			}
+			return
+		}
+
+		// start tag
+		prev = ""
+		if i < len(links) {
+			switch info := links[i]; {
+			case info.path != "" && info.name == "":
+				// package path
+				fmt.Fprintf(w, `<a href="/pkg/%s/">`, info.path)
+				prev = "a"
+			case info.path != "" && info.name != "":
+				// qualified identifier
+				fmt.Fprintf(w, `<a href="/pkg/%s/#%s">`, info.path, info.name)
+				prev = "a"
+			case info.path == "" && info.name != "":
+				// local identifier
+				if info.mode == identVal {
+					fmt.Fprintf(w, `<span id="%s">`, info.name)
+					prev = "span"
+				} else if ast.IsExported(info.name) {
+					fmt.Fprintf(w, `<a href="#%s">`, info.name)
+					prev = "a"
+				}
+			}
+			i++
+		}
+	}
+
+	idents := tokenSelection(text, token.IDENT)
+	comments := tokenSelection(text, token.COMMENT)
+	FormatSelections(w, text, linkWriter, idents, selectionTag, comments)
+}
+
+// A link describes the (HTML) link information for an identifier.
+// The zero value of a link represents "no link".
+//
+type link struct {
+	mode       identMode
+	path, name string // package path, identifier name
+}
+
+// linksFor returns the list of links for the identifiers used
+// by node in the same order as they appear in the source.
+//
+func linksFor(node ast.Node) (list []link) {
+	modes := identModesFor(node)
+
+	// NOTE: We are expecting ast.Inspect to call the
+	//       callback function in source text order.
+	ast.Inspect(node, func(node ast.Node) bool {
+		switch n := node.(type) {
+		case *ast.Ident:
+			m := modes[n]
+			info := link{mode: m}
+			switch m {
+			case identUse:
+				if n.Obj == nil && predeclared[n.Name] {
+					info.path = builtinPkgPath
+				}
+				info.name = n.Name
+			case identDef:
+				// any declaration expect const or var - empty link
+			case identVal:
+				// const or var declaration
+				info.name = n.Name
+			}
+			list = append(list, info)
+			return false
+		case *ast.SelectorExpr:
+			// Detect qualified identifiers of the form pkg.ident.
+			// If anything fails we return true and collect individual
+			// identifiers instead.
+			if x, _ := n.X.(*ast.Ident); x != nil {
+				// x must be a package for a qualified identifier
+				if obj := x.Obj; obj != nil && obj.Kind == ast.Pkg {
+					if spec, _ := obj.Decl.(*ast.ImportSpec); spec != nil {
+						// spec.Path.Value is the import path
+						if path, err := strconv.Unquote(spec.Path.Value); err == nil {
+							// Register two links, one for the package
+							// and one for the qualified identifier.
+							info := link{path: path}
+							list = append(list, info)
+							info.name = n.Sel.Name
+							list = append(list, info)
+							return false
+						}
+					}
+				}
+			}
+		}
+		return true
+	})
+
+	return
+}
+
+// The identMode describes how an identifier is "used" at its source location.
+type identMode int
+
+const (
+	identUse identMode = iota // identifier is used (must be zero value for identMode)
+	identDef                  // identifier is defined
+	identVal                  // identifier is defined in a const or var declaration
+)
+
+// identModesFor returns a map providing the identMode for each identifier used by node.
+func identModesFor(node ast.Node) map[*ast.Ident]identMode {
+	m := make(map[*ast.Ident]identMode)
+
+	ast.Inspect(node, func(node ast.Node) bool {
+		switch n := node.(type) {
+		case *ast.Field:
+			for _, n := range n.Names {
+				m[n] = identDef
+			}
+		case *ast.ImportSpec:
+			if name := n.Name; name != nil {
+				m[name] = identDef
+			}
+		case *ast.ValueSpec:
+			for _, n := range n.Names {
+				m[n] = identVal
+			}
+		case *ast.TypeSpec:
+			m[n.Name] = identDef
+		case *ast.FuncDecl:
+			m[n.Name] = identDef
+		case *ast.AssignStmt:
+			// Short variable declarations only show up if we apply
+			// this code to all source code (as opposed to exported
+			// declarations only).
+			if n.Tok == token.DEFINE {
+				// Some of the lhs variables may be re-declared,
+				// so technically they are not defs. We don't
+				// care for now.
+				for _, x := range n.Lhs {
+					// Each lhs expression should be an
+					// ident, but we are conservative and check.
+					if n, _ := x.(*ast.Ident); n != nil {
+						m[n] = identVal
+					}
+				}
+			}
+		}
+		return true
+	})
+
+	return m
+}
+
+// The predeclared map represents the set of all predeclared identifiers.
+// TODO(gri) This information is also encoded in similar maps in go/doc,
+//           but not exported. Consider exporting an accessor and using
+//           it instead.
+var predeclared = map[string]bool{
+	"bool":       true,
+	"byte":       true,
+	"complex64":  true,
+	"complex128": true,
+	"error":      true,
+	"float32":    true,
+	"float64":    true,
+	"int":        true,
+	"int8":       true,
+	"int16":      true,
+	"int32":      true,
+	"int64":      true,
+	"rune":       true,
+	"string":     true,
+	"uint":       true,
+	"uint8":      true,
+	"uint16":     true,
+	"uint32":     true,
+	"uint64":     true,
+	"uintptr":    true,
+	"true":       true,
+	"false":      true,
+	"iota":       true,
+	"nil":        true,
+	"append":     true,
+	"cap":        true,
+	"close":      true,
+	"complex":    true,
+	"copy":       true,
+	"delete":     true,
+	"imag":       true,
+	"len":        true,
+	"make":       true,
+	"new":        true,
+	"panic":      true,
+	"print":      true,
+	"println":    true,
+	"real":       true,
+	"recover":    true,
+}
diff --git a/cmd/godoc/main.go b/cmd/godoc/main.go
new file mode 100644
index 0000000..81e739d
--- /dev/null
+++ b/cmd/godoc/main.go
@@ -0,0 +1,470 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// godoc: Go Documentation Server
+
+// Web server tree:
+//
+//	http://godoc/		main landing page
+//	http://godoc/doc/	serve from $GOROOT/doc - spec, mem, etc.
+//	http://godoc/src/	serve files from $GOROOT/src; .go gets pretty-printed
+//	http://godoc/cmd/	serve documentation about commands
+//	http://godoc/pkg/	serve documentation about packages
+//				(idea is if you say import "compress/zlib", you go to
+//				http://godoc/pkg/compress/zlib)
+//
+// Command-line interface:
+//
+//	godoc packagepath [name ...]
+//
+//	godoc compress/zlib
+//		- prints doc for package compress/zlib
+//	godoc crypto/block Cipher NewCMAC
+//		- prints doc for Cipher and NewCMAC in package crypto/block
+
+// +build !appengine
+
+package main
+
+import (
+	"archive/zip"
+	"bytes"
+	"errors"
+	_ "expvar" // to serve /debug/vars
+	"flag"
+	"fmt"
+	"go/ast"
+	"go/build"
+	"go/printer"
+	"io"
+	"log"
+	"net/http"
+	_ "net/http/pprof" // to serve /debug/pprof/*
+	"net/url"
+	"os"
+	pathpkg "path"
+	"path/filepath"
+	"regexp"
+	"runtime"
+	"strings"
+)
+
+const defaultAddr = ":6060" // default webserver address
+
+var (
+	// file system to serve
+	// (with e.g.: zip -r go.zip $GOROOT -i \*.go -i \*.html -i \*.css -i \*.js -i \*.txt -i \*.c -i \*.h -i \*.s -i \*.png -i \*.jpg -i \*.sh -i favicon.ico)
+	zipfile = flag.String("zip", "", "zip file providing the file system to serve; disabled if empty")
+
+	// file-based index
+	writeIndex = flag.Bool("write_index", false, "write index to a file; the file name must be specified with -index_files")
+
+	// network
+	httpAddr   = flag.String("http", "", "HTTP service address (e.g., '"+defaultAddr+"')")
+	serverAddr = flag.String("server", "", "webserver address for command line searches")
+
+	// layout control
+	html    = flag.Bool("html", false, "print HTML in command-line mode")
+	srcMode = flag.Bool("src", false, "print (exported) source in command-line mode")
+	urlFlag = flag.String("url", "", "print HTML for named URL")
+
+	// command-line searches
+	query = flag.Bool("q", false, "arguments are considered search queries")
+)
+
+func serveError(w http.ResponseWriter, r *http.Request, relpath string, err error) {
+	w.WriteHeader(http.StatusNotFound)
+	servePage(w, Page{
+		Title:    "File " + relpath,
+		Subtitle: relpath,
+		Body:     applyTemplate(errorHTML, "errorHTML", err), // err may contain an absolute path!
+	})
+}
+
+func usage() {
+	fmt.Fprintf(os.Stderr,
+		"usage: godoc package [name ...]\n"+
+			"	godoc -http="+defaultAddr+"\n")
+	flag.PrintDefaults()
+	os.Exit(2)
+}
+
+func loggingHandler(h http.Handler) http.Handler {
+	return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+		log.Printf("%s\t%s", req.RemoteAddr, req.URL)
+		h.ServeHTTP(w, req)
+	})
+}
+
+func remoteSearch(query string) (res *http.Response, err error) {
+	// list of addresses to try
+	var addrs []string
+	if *serverAddr != "" {
+		// explicit server address - only try this one
+		addrs = []string{*serverAddr}
+	} else {
+		addrs = []string{
+			defaultAddr,
+			"golang.org",
+		}
+	}
+
+	// remote search
+	search := remoteSearchURL(query, *html)
+	for _, addr := range addrs {
+		url := "http://" + addr + search
+		res, err = http.Get(url)
+		if err == nil && res.StatusCode == http.StatusOK {
+			break
+		}
+	}
+
+	if err == nil && res.StatusCode != http.StatusOK {
+		err = errors.New(res.Status)
+	}
+
+	return
+}
+
+// Does s look like a regular expression?
+func isRegexp(s string) bool {
+	return strings.IndexAny(s, ".(|)*+?^$[]") >= 0
+}
+
+// Make a regular expression of the form
+// names[0]|names[1]|...names[len(names)-1].
+// Returns nil if the regular expression is illegal.
+func makeRx(names []string) (rx *regexp.Regexp) {
+	if len(names) > 0 {
+		s := ""
+		for i, name := range names {
+			if i > 0 {
+				s += "|"
+			}
+			if isRegexp(name) {
+				s += name
+			} else {
+				s += "^" + name + "$" // must match exactly
+			}
+		}
+		rx, _ = regexp.Compile(s) // rx is nil if there's a compilation error
+	}
+	return
+}
+
+func main() {
+	flag.Usage = usage
+	flag.Parse()
+
+	// Check usage: either server and no args, command line and args, or index creation mode
+	if (*httpAddr != "" || *urlFlag != "") != (flag.NArg() == 0) && !*writeIndex {
+		usage()
+	}
+
+	if *tabwidth < 0 {
+		log.Fatalf("negative tabwidth %d", *tabwidth)
+	}
+
+	// Determine file system to use.
+	// TODO(gri) - fs and fsHttp should really be the same. Try to unify.
+	//           - fsHttp doesn't need to be set up in command-line mode,
+	//             same is true for the http handlers in initHandlers.
+	if *zipfile == "" {
+		// use file system of underlying OS
+		fs.Bind("/", OS(*goroot), "/", bindReplace)
+		if *templateDir != "" {
+			fs.Bind("/lib/godoc", OS(*templateDir), "/", bindBefore)
+		}
+	} else {
+		// use file system specified via .zip file (path separator must be '/')
+		rc, err := zip.OpenReader(*zipfile)
+		if err != nil {
+			log.Fatalf("%s: %s\n", *zipfile, err)
+		}
+		defer rc.Close() // be nice (e.g., -writeIndex mode)
+		fs.Bind("/", NewZipFS(rc, *zipfile), *goroot, bindReplace)
+	}
+
+	// Bind $GOPATH trees into Go root.
+	for _, p := range filepath.SplitList(build.Default.GOPATH) {
+		fs.Bind("/src/pkg", OS(p), "/src", bindAfter)
+	}
+
+	readTemplates()
+	initHandlers()
+
+	if *writeIndex {
+		// Write search index and exit.
+		if *indexFiles == "" {
+			log.Fatal("no index file specified")
+		}
+
+		log.Println("initialize file systems")
+		*verbose = true // want to see what happens
+		initFSTree()
+
+		*indexThrottle = 1
+		updateIndex()
+
+		log.Println("writing index file", *indexFiles)
+		f, err := os.Create(*indexFiles)
+		if err != nil {
+			log.Fatal(err)
+		}
+		index, _ := searchIndex.get()
+		err = index.(*Index).Write(f)
+		if err != nil {
+			log.Fatal(err)
+		}
+
+		log.Println("done")
+		return
+	}
+
+	// Print content that would be served at the URL *urlFlag.
+	if *urlFlag != "" {
+		registerPublicHandlers(http.DefaultServeMux)
+		initFSTree()
+		updateMetadata()
+		// Try up to 10 fetches, following redirects.
+		urlstr := *urlFlag
+		for i := 0; i < 10; i++ {
+			// Prepare request.
+			u, err := url.Parse(urlstr)
+			if err != nil {
+				log.Fatal(err)
+			}
+			req := &http.Request{
+				URL: u,
+			}
+
+			// Invoke default HTTP handler to serve request
+			// to our buffering httpWriter.
+			w := &httpWriter{h: http.Header{}, code: 200}
+			http.DefaultServeMux.ServeHTTP(w, req)
+
+			// Return data, error, or follow redirect.
+			switch w.code {
+			case 200: // ok
+				os.Stdout.Write(w.Bytes())
+				return
+			case 301, 302, 303, 307: // redirect
+				redirect := w.h.Get("Location")
+				if redirect == "" {
+					log.Fatalf("HTTP %d without Location header", w.code)
+				}
+				urlstr = redirect
+			default:
+				log.Fatalf("HTTP error %d", w.code)
+			}
+		}
+		log.Fatalf("too many redirects")
+	}
+
+	if *httpAddr != "" {
+		// HTTP server mode.
+		var handler http.Handler = http.DefaultServeMux
+		if *verbose {
+			log.Printf("Go Documentation Server")
+			log.Printf("version = %s", runtime.Version())
+			log.Printf("address = %s", *httpAddr)
+			log.Printf("goroot = %s", *goroot)
+			log.Printf("tabwidth = %d", *tabwidth)
+			switch {
+			case !*indexEnabled:
+				log.Print("search index disabled")
+			case *maxResults > 0:
+				log.Printf("full text index enabled (maxresults = %d)", *maxResults)
+			default:
+				log.Print("identifier search index enabled")
+			}
+			fs.Fprint(os.Stderr)
+			handler = loggingHandler(handler)
+		}
+
+		registerPublicHandlers(http.DefaultServeMux)
+		registerPlaygroundHandlers(http.DefaultServeMux)
+
+		// Initialize default directory tree with corresponding timestamp.
+		// (Do it in a goroutine so that launch is quick.)
+		go initFSTree()
+
+		// Immediately update metadata.
+		updateMetadata()
+		// Periodically refresh metadata.
+		go refreshMetadataLoop()
+
+		// Initialize search index.
+		if *indexEnabled {
+			go indexer()
+		}
+
+		// Start http server.
+		if err := http.ListenAndServe(*httpAddr, handler); err != nil {
+			log.Fatalf("ListenAndServe %s: %v", *httpAddr, err)
+		}
+
+		return
+	}
+
+	// Command line mode.
+	if *html {
+		packageText = packageHTML
+		searchText = packageHTML
+	}
+
+	if *query {
+		// Command-line queries.
+		for i := 0; i < flag.NArg(); i++ {
+			res, err := remoteSearch(flag.Arg(i))
+			if err != nil {
+				log.Fatalf("remoteSearch: %s", err)
+			}
+			io.Copy(os.Stdout, res.Body)
+		}
+		return
+	}
+
+	// Determine paths.
+	//
+	// If we are passed an operating system path like . or ./foo or /foo/bar or c:\mysrc,
+	// we need to map that path somewhere in the fs name space so that routines
+	// like getPageInfo will see it.  We use the arbitrarily-chosen virtual path "/target"
+	// for this.  That is, if we get passed a directory like the above, we map that
+	// directory so that getPageInfo sees it as /target.
+	const target = "/target"
+	const cmdPrefix = "cmd/"
+	path := flag.Arg(0)
+	var forceCmd bool
+	var abspath, relpath string
+	if filepath.IsAbs(path) {
+		fs.Bind(target, OS(path), "/", bindReplace)
+		abspath = target
+	} else if build.IsLocalImport(path) {
+		cwd, _ := os.Getwd() // ignore errors
+		path = filepath.Join(cwd, path)
+		fs.Bind(target, OS(path), "/", bindReplace)
+		abspath = target
+	} else if strings.HasPrefix(path, cmdPrefix) {
+		path = strings.TrimPrefix(path, cmdPrefix)
+		forceCmd = true
+	} else if bp, _ := build.Import(path, "", build.FindOnly); bp.Dir != "" && bp.ImportPath != "" {
+		fs.Bind(target, OS(bp.Dir), "/", bindReplace)
+		abspath = target
+		relpath = bp.ImportPath
+	} else {
+		abspath = pathpkg.Join(pkgHandler.fsRoot, path)
+	}
+	if relpath == "" {
+		relpath = abspath
+	}
+
+	var mode PageInfoMode
+	if relpath == builtinPkgPath {
+		// the fake built-in package contains unexported identifiers
+		mode = noFiltering
+	}
+	if *srcMode {
+		// only filter exports if we don't have explicit command-line filter arguments
+		if flag.NArg() > 1 {
+			mode |= noFiltering
+		}
+		mode |= showSource
+	}
+
+	// first, try as package unless forced as command
+	var info *PageInfo
+	if !forceCmd {
+		info = pkgHandler.getPageInfo(abspath, relpath, mode)
+	}
+
+	// second, try as command unless the path is absolute
+	// (the go command invokes godoc w/ absolute paths; don't override)
+	var cinfo *PageInfo
+	if !filepath.IsAbs(path) {
+		abspath = pathpkg.Join(cmdHandler.fsRoot, path)
+		cinfo = cmdHandler.getPageInfo(abspath, relpath, mode)
+	}
+
+	// determine what to use
+	if info == nil || info.IsEmpty() {
+		if cinfo != nil && !cinfo.IsEmpty() {
+			// only cinfo exists - switch to cinfo
+			info = cinfo
+		}
+	} else if cinfo != nil && !cinfo.IsEmpty() {
+		// both info and cinfo exist - use cinfo if info
+		// contains only subdirectory information
+		if info.PAst == nil && info.PDoc == nil {
+			info = cinfo
+		} else {
+			fmt.Printf("use 'godoc %s%s' for documentation on the %s command \n\n", cmdPrefix, relpath, relpath)
+		}
+	}
+
+	if info == nil {
+		log.Fatalf("%s: no such directory or package", flag.Arg(0))
+	}
+	if info.Err != nil {
+		log.Fatalf("%v", info.Err)
+	}
+
+	if info.PDoc != nil && info.PDoc.ImportPath == target {
+		// Replace virtual /target with actual argument from command line.
+		info.PDoc.ImportPath = flag.Arg(0)
+	}
+
+	// If we have more than one argument, use the remaining arguments for filtering.
+	if flag.NArg() > 1 {
+		args := flag.Args()[1:]
+		rx := makeRx(args)
+		if rx == nil {
+			log.Fatalf("illegal regular expression from %v", args)
+		}
+
+		filter := func(s string) bool { return rx.MatchString(s) }
+		switch {
+		case info.PAst != nil:
+			cmap := ast.NewCommentMap(info.FSet, info.PAst, info.PAst.Comments)
+			ast.FilterFile(info.PAst, filter)
+			// Special case: Don't use templates for printing
+			// so we only get the filtered declarations without
+			// package clause or extra whitespace.
+			for i, d := range info.PAst.Decls {
+				// determine the comments associated with d only
+				comments := cmap.Filter(d).Comments()
+				cn := &printer.CommentedNode{Node: d, Comments: comments}
+				if i > 0 {
+					fmt.Println()
+				}
+				if *html {
+					var buf bytes.Buffer
+					writeNode(&buf, info.FSet, cn)
+					FormatText(os.Stdout, buf.Bytes(), -1, true, "", nil)
+				} else {
+					writeNode(os.Stdout, info.FSet, cn)
+				}
+				fmt.Println()
+			}
+			return
+
+		case info.PDoc != nil:
+			info.PDoc.Filter(filter)
+		}
+	}
+
+	if err := packageText.Execute(os.Stdout, info); err != nil {
+		log.Printf("packageText.Execute: %s", err)
+	}
+}
+
+// An httpWriter is an http.ResponseWriter writing to a bytes.Buffer.
+type httpWriter struct {
+	bytes.Buffer
+	h    http.Header
+	code int
+}
+
+func (w *httpWriter) Header() http.Header  { return w.h }
+func (w *httpWriter) WriteHeader(code int) { w.code = code }
diff --git a/cmd/godoc/parser.go b/cmd/godoc/parser.go
new file mode 100644
index 0000000..42a5d2d
--- /dev/null
+++ b/cmd/godoc/parser.go
@@ -0,0 +1,37 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains support functions for parsing .go files
+// accessed via godoc's file system fs.
+
+package main
+
+import (
+	"go/ast"
+	"go/parser"
+	"go/token"
+	pathpkg "path"
+)
+
+func parseFile(fset *token.FileSet, filename string, mode parser.Mode) (*ast.File, error) {
+	src, err := ReadFile(fs, filename)
+	if err != nil {
+		return nil, err
+	}
+	return parser.ParseFile(fset, filename, src, mode)
+}
+
+func parseFiles(fset *token.FileSet, abspath string, localnames []string) (map[string]*ast.File, error) {
+	files := make(map[string]*ast.File)
+	for _, f := range localnames {
+		absname := pathpkg.Join(abspath, f)
+		file, err := parseFile(fset, absname, parser.ParseComments)
+		if err != nil {
+			return nil, err
+		}
+		files[absname] = file
+	}
+
+	return files, nil
+}
diff --git a/cmd/godoc/play-appengine.go b/cmd/godoc/play-appengine.go
new file mode 100644
index 0000000..9e351d1
--- /dev/null
+++ b/cmd/godoc/play-appengine.go
@@ -0,0 +1,35 @@
+// Copyright 2012 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// App Engine godoc Playground functionality.
+
+// +build appengine
+
+package main
+
+import (
+	"io"
+	"net/http"
+
+	"appengine"
+	"appengine/urlfetch"
+)
+
+func bounceToPlayground(w http.ResponseWriter, req *http.Request) {
+	c := appengine.NewContext(req)
+	client := urlfetch.Client(c)
+	url := playgroundBaseURL + req.URL.Path
+	defer req.Body.Close()
+	resp, err := client.Post(url, req.Header.Get("Content-type"), req.Body)
+	if err != nil {
+		http.Error(w, "Internal Server Error", 500)
+		c.Errorf("making POST request: %v", err)
+		return
+	}
+	defer resp.Body.Close()
+	if _, err := io.Copy(w, resp.Body); err != nil {
+		http.Error(w, "Internal Server Error", 500)
+		c.Errorf("making POST request: %v", err)
+	}
+}
diff --git a/cmd/godoc/play-local.go b/cmd/godoc/play-local.go
new file mode 100644
index 0000000..637ce5e
--- /dev/null
+++ b/cmd/godoc/play-local.go
@@ -0,0 +1,41 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Stand-alone godoc Playground functionality.
+
+// +build !appengine
+
+package main
+
+import (
+	"io"
+	"net/http"
+	"net/url"
+)
+
+var playgroundScheme, playgroundHost string
+
+func init() {
+	u, err := url.Parse(playgroundBaseURL)
+	if err != nil {
+		panic(err)
+	}
+	playgroundScheme = u.Scheme
+	playgroundHost = u.Host
+}
+
+// bounceToPlayground forwards the request to play.golang.org.
+func bounceToPlayground(w http.ResponseWriter, req *http.Request) {
+	defer req.Body.Close()
+	req.URL.Scheme = playgroundScheme
+	req.URL.Host = playgroundHost
+	resp, err := http.Post(req.URL.String(), req.Header.Get("Content-type"), req.Body)
+	if err != nil {
+		http.Error(w, err.Error(), 500)
+		return
+	}
+	w.WriteHeader(resp.StatusCode)
+	io.Copy(w, resp.Body)
+	resp.Body.Close()
+}
diff --git a/cmd/godoc/play.go b/cmd/godoc/play.go
new file mode 100644
index 0000000..47a11f6
--- /dev/null
+++ b/cmd/godoc/play.go
@@ -0,0 +1,52 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Common Playground functionality.
+
+package main
+
+import (
+	"encoding/json"
+	"fmt"
+	"go/format"
+	"net/http"
+)
+
+// The server that will service compile and share requests.
+const playgroundBaseURL = "http://play.golang.org"
+
+func registerPlaygroundHandlers(mux *http.ServeMux) {
+	if *showPlayground {
+		mux.HandleFunc("/compile", bounceToPlayground)
+		mux.HandleFunc("/share", bounceToPlayground)
+	} else {
+		mux.HandleFunc("/compile", disabledHandler)
+		mux.HandleFunc("/share", disabledHandler)
+	}
+	http.HandleFunc("/fmt", fmtHandler)
+}
+
+type fmtResponse struct {
+	Body  string
+	Error string
+}
+
+// fmtHandler takes a Go program in its "body" form value, formats it with
+// standard gofmt formatting, and writes a fmtResponse as a JSON object.
+func fmtHandler(w http.ResponseWriter, r *http.Request) {
+	resp := new(fmtResponse)
+	body, err := format.Source([]byte(r.FormValue("body")))
+	if err != nil {
+		resp.Error = err.Error()
+	} else {
+		resp.Body = string(body)
+	}
+	json.NewEncoder(w).Encode(resp)
+}
+
+// disabledHandler serves a 501 "Not Implemented" response.
+func disabledHandler(w http.ResponseWriter, r *http.Request) {
+	w.WriteHeader(http.StatusNotImplemented)
+	fmt.Fprint(w, "This functionality is not available via local godoc.")
+}
diff --git a/cmd/godoc/setup-godoc-app.bash b/cmd/godoc/setup-godoc-app.bash
new file mode 100755
index 0000000..792e0d4
--- /dev/null
+++ b/cmd/godoc/setup-godoc-app.bash
@@ -0,0 +1,140 @@
+#!/usr/bin/env bash
+
+# Copyright 2011 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# This script creates a complete godoc app in $APPDIR.
+# It copies the cmd/godoc and src/pkg/go/... sources from GOROOT,
+# synthesizes an app.yaml file, and creates the .zip, index, and
+# configuration files.
+#
+# If an argument is provided it is assumed to be the app-engine godoc directory.
+# Without an argument, $APPDIR is used instead. If GOROOT is not set, "go env"
+# is consulted to find the $GOROOT.
+#
+# The script creates a .zip file representing the $GOROOT file system
+# and computes the correspondig search index files. These files are then
+# copied to $APPDIR. A corresponding godoc configuration file is created
+# in $APPDIR/appconfig.go.
+
+ZIPFILE=godoc.zip
+INDEXFILE=godoc.index
+SPLITFILES=index.split.
+CONFIGFILE=godoc/appconfig.go
+
+error() {
+	echo "error: $1"
+	exit 2
+}
+
+getArgs() {
+	if [ -z $GOROOT ]; then
+		GOROOT=$(go env GOROOT)
+		echo "GOROOT not set explicitly, using $GOROOT instead"
+	fi
+	if [ -z $APPDIR ]; then
+		if [ $# == 0 ]; then
+			error "APPDIR not set, and no argument provided"
+		fi
+		APPDIR=$1
+		echo "APPDIR not set, using argument instead"
+	fi
+	
+	# safety checks
+	if [ ! -d $GOROOT ]; then
+		error "$GOROOT is not a directory"
+	fi
+	if [ ! -x $GOROOT/bin/godoc ]; then
+		error "$GOROOT/bin/godoc does not exist or is not executable"
+	fi
+	if [ -e $APPDIR ]; then
+		error "$APPDIR exists; check and remove it before trying again"
+	fi
+
+	# reporting
+	echo "GOROOT = $GOROOT"
+	echo "APPDIR = $APPDIR"
+}
+
+copyGodoc() {
+	echo "*** copy $GOROOT/src/cmd/godoc to $APPDIR/godoc"
+	cp -r $GOROOT/src/cmd/godoc $APPDIR/godoc
+}
+
+copyGoPackages() {
+	echo "*** copy $GOROOT/src/pkg/go to $APPDIR/newgo and rewrite imports"
+	cp -r $GOROOT/src/pkg/go $APPDIR/newgo
+	find $APPDIR/newgo -type d -name testdata | xargs rm -r
+	gofiles=$(find $APPDIR -name '*.go')
+	sed -i '' 's_^\(."\)\(go/[a-z]*\)"$_\1new\2"_' $gofiles
+	sed -i '' 's_^\(import "\)\(go/[a-z]*\)"$_\1new\2"_' $gofiles
+}
+
+makeAppYaml() {
+	echo "*** make $APPDIR/app.yaml"
+	cat > $APPDIR/app.yaml <<EOF
+application: godoc
+version: 1
+runtime: go
+api_version: go1
+
+handlers:
+- url: /.*
+  script: _go_app
+EOF
+}
+
+makeZipfile() {
+	echo "*** make $APPDIR/$ZIPFILE"
+	zip -q -r $APPDIR/$ZIPFILE $GOROOT -i \*.go -i \*.html -i \*.xml -i \*.css -i \*.js -i \*.txt -i \*.c -i \*.h -i \*.s -i \*.png -i \*.jpg -i \*.sh -i \*.ico
+}
+
+makeIndexfile() {
+	echo "*** make $APPDIR/$INDEXFILE"
+	OUT=/tmp/godoc.out
+	$GOROOT/bin/godoc -write_index -index_files=$APPDIR/$INDEXFILE -zip=$APPDIR/$ZIPFILE 2> $OUT
+	if [ $? != 0 ]; then
+		error "$GOROOT/bin/godoc failed - see $OUT for details"
+	fi
+}
+
+splitIndexfile() {
+	echo "*** split $APPDIR/$INDEXFILE"
+	split -b8m $APPDIR/$INDEXFILE $APPDIR/$SPLITFILES
+}
+
+makeConfigfile() {
+	echo "*** make $APPDIR/$CONFIGFILE"
+	cat > $APPDIR/$CONFIGFILE <<EOF
+package main
+
+// GENERATED FILE - DO NOT MODIFY BY HAND.
+// (generated by $GOROOT/src/cmd/godoc/setup-godoc-app.bash)
+
+const (
+	// .zip filename
+	zipFilename = "$ZIPFILE"
+
+	// goroot directory in .zip file
+	zipGoroot = "$GOROOT"
+
+	// glob pattern describing search index files
+	// (if empty, the index is built at run-time)
+	indexFilenames = "$SPLITFILES*"
+)
+EOF
+}
+
+getArgs "$@"
+set -e
+mkdir $APPDIR
+copyGodoc
+copyGoPackages
+makeAppYaml
+makeZipfile
+makeIndexfile
+splitIndexfile
+makeConfigfile
+
+echo "*** setup complete"
diff --git a/cmd/godoc/snippet.go b/cmd/godoc/snippet.go
new file mode 100644
index 0000000..b482b74
--- /dev/null
+++ b/cmd/godoc/snippet.go
@@ -0,0 +1,112 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains the infrastructure to create a code
+// snippet for search results.
+//
+// Note: At the moment, this only creates HTML snippets.
+
+package main
+
+import (
+	"bytes"
+	"fmt"
+	"go/ast"
+	"go/token"
+)
+
+type Snippet struct {
+	Line int
+	Text string // HTML-escaped
+}
+
+func newSnippet(fset *token.FileSet, decl ast.Decl, id *ast.Ident) *Snippet {
+	// TODO instead of pretty-printing the node, should use the original source instead
+	var buf1 bytes.Buffer
+	writeNode(&buf1, fset, decl)
+	// wrap text with <pre> tag
+	var buf2 bytes.Buffer
+	buf2.WriteString("<pre>")
+	FormatText(&buf2, buf1.Bytes(), -1, true, id.Name, nil)
+	buf2.WriteString("</pre>")
+	return &Snippet{fset.Position(id.Pos()).Line, buf2.String()}
+}
+
+func findSpec(list []ast.Spec, id *ast.Ident) ast.Spec {
+	for _, spec := range list {
+		switch s := spec.(type) {
+		case *ast.ImportSpec:
+			if s.Name == id {
+				return s
+			}
+		case *ast.ValueSpec:
+			for _, n := range s.Names {
+				if n == id {
+					return s
+				}
+			}
+		case *ast.TypeSpec:
+			if s.Name == id {
+				return s
+			}
+		}
+	}
+	return nil
+}
+
+func genSnippet(fset *token.FileSet, d *ast.GenDecl, id *ast.Ident) *Snippet {
+	s := findSpec(d.Specs, id)
+	if s == nil {
+		return nil //  declaration doesn't contain id - exit gracefully
+	}
+
+	// only use the spec containing the id for the snippet
+	dd := &ast.GenDecl{
+		Doc:    d.Doc,
+		TokPos: d.Pos(),
+		Tok:    d.Tok,
+		Lparen: d.Lparen,
+		Specs:  []ast.Spec{s},
+		Rparen: d.Rparen,
+	}
+
+	return newSnippet(fset, dd, id)
+}
+
+func funcSnippet(fset *token.FileSet, d *ast.FuncDecl, id *ast.Ident) *Snippet {
+	if d.Name != id {
+		return nil //  declaration doesn't contain id - exit gracefully
+	}
+
+	// only use the function signature for the snippet
+	dd := &ast.FuncDecl{
+		Doc:  d.Doc,
+		Recv: d.Recv,
+		Name: d.Name,
+		Type: d.Type,
+	}
+
+	return newSnippet(fset, dd, id)
+}
+
+// NewSnippet creates a text snippet from a declaration decl containing an
+// identifier id. Parts of the declaration not containing the identifier
+// may be removed for a more compact snippet.
+//
+func NewSnippet(fset *token.FileSet, decl ast.Decl, id *ast.Ident) (s *Snippet) {
+	switch d := decl.(type) {
+	case *ast.GenDecl:
+		s = genSnippet(fset, d, id)
+	case *ast.FuncDecl:
+		s = funcSnippet(fset, d, id)
+	}
+
+	// handle failure gracefully
+	if s == nil {
+		var buf bytes.Buffer
+		fmt.Fprintf(&buf, `<span class="alert">could not generate a snippet for <span class="highlight">%s</span></span>`, id.Name)
+		s = &Snippet{fset.Position(id.Pos()).Line, buf.String()}
+	}
+	return
+}
diff --git a/cmd/godoc/spec.go b/cmd/godoc/spec.go
new file mode 100644
index 0000000..c11f25d
--- /dev/null
+++ b/cmd/godoc/spec.go
@@ -0,0 +1,179 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// This file contains the mechanism to "linkify" html source
+// text containing EBNF sections (as found in go_spec.html).
+// The result is the input source text with the EBNF sections
+// modified such that identifiers are linked to the respective
+// definitions.
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"text/scanner"
+)
+
+type ebnfParser struct {
+	out     io.Writer // parser output
+	src     []byte    // parser input
+	scanner scanner.Scanner
+	prev    int    // offset of previous token
+	pos     int    // offset of current token
+	tok     rune   // one token look-ahead
+	lit     string // token literal
+}
+
+func (p *ebnfParser) flush() {
+	p.out.Write(p.src[p.prev:p.pos])
+	p.prev = p.pos
+}
+
+func (p *ebnfParser) next() {
+	p.tok = p.scanner.Scan()
+	p.pos = p.scanner.Position.Offset
+	p.lit = p.scanner.TokenText()
+}
+
+func (p *ebnfParser) printf(format string, args ...interface{}) {
+	p.flush()
+	fmt.Fprintf(p.out, format, args...)
+}
+
+func (p *ebnfParser) errorExpected(msg string) {
+	p.printf(`<span class="highlight">error: expected %s, found %s</span>`, msg, scanner.TokenString(p.tok))
+}
+
+func (p *ebnfParser) expect(tok rune) {
+	if p.tok != tok {
+		p.errorExpected(scanner.TokenString(tok))
+	}
+	p.next() // make progress in any case
+}
+
+func (p *ebnfParser) parseIdentifier(def bool) {
+	if p.tok == scanner.Ident {
+		name := p.lit
+		if def {
+			p.printf(`<a id="%s">%s</a>`, name, name)
+		} else {
+			p.printf(`<a href="#%s" class="noline">%s</a>`, name, name)
+		}
+		p.prev += len(name) // skip identifier when printing next time
+		p.next()
+	} else {
+		p.expect(scanner.Ident)
+	}
+}
+
+func (p *ebnfParser) parseTerm() bool {
+	switch p.tok {
+	case scanner.Ident:
+		p.parseIdentifier(false)
+
+	case scanner.String:
+		p.next()
+		const ellipsis = '…' // U+2026, the horizontal ellipsis character
+		if p.tok == ellipsis {
+			p.next()
+			p.expect(scanner.String)
+		}
+
+	case '(':
+		p.next()
+		p.parseExpression()
+		p.expect(')')
+
+	case '[':
+		p.next()
+		p.parseExpression()
+		p.expect(']')
+
+	case '{':
+		p.next()
+		p.parseExpression()
+		p.expect('}')
+
+	default:
+		return false // no term found
+	}
+
+	return true
+}
+
+func (p *ebnfParser) parseSequence() {
+	if !p.parseTerm() {
+		p.errorExpected("term")
+	}
+	for p.parseTerm() {
+	}
+}
+
+func (p *ebnfParser) parseExpression() {
+	for {
+		p.parseSequence()
+		if p.tok != '|' {
+			break
+		}
+		p.next()
+	}
+}
+
+func (p *ebnfParser) parseProduction() {
+	p.parseIdentifier(true)
+	p.expect('=')
+	if p.tok != '.' {
+		p.parseExpression()
+	}
+	p.expect('.')
+}
+
+func (p *ebnfParser) parse(out io.Writer, src []byte) {
+	// initialize ebnfParser
+	p.out = out
+	p.src = src
+	p.scanner.Init(bytes.NewBuffer(src))
+	p.next() // initializes pos, tok, lit
+
+	// process source
+	for p.tok != scanner.EOF {
+		p.parseProduction()
+	}
+	p.flush()
+}
+
+// Markers around EBNF sections
+var (
+	openTag  = []byte(`<pre class="ebnf">`)
+	closeTag = []byte(`</pre>`)
+)
+
+func Linkify(out io.Writer, src []byte) {
+	for len(src) > 0 {
+		// i: beginning of EBNF text (or end of source)
+		i := bytes.Index(src, openTag)
+		if i < 0 {
+			i = len(src) - len(openTag)
+		}
+		i += len(openTag)
+
+		// j: end of EBNF text (or end of source)
+		j := bytes.Index(src[i:], closeTag) // close marker
+		if j < 0 {
+			j = len(src) - i
+		}
+		j += i
+
+		// write text before EBNF
+		out.Write(src[0:i])
+		// process EBNF
+		var p ebnfParser
+		p.parse(out, src[i:j])
+
+		// advance
+		src = src[j:]
+	}
+}
diff --git a/cmd/godoc/template.go b/cmd/godoc/template.go
new file mode 100644
index 0000000..7b9b9cf
--- /dev/null
+++ b/cmd/godoc/template.go
@@ -0,0 +1,182 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Template support for writing HTML documents.
+// Documents that include Template: true in their
+// metadata are executed as input to text/template.
+//
+// This file defines functions for those templates to invoke.
+
+// The template uses the function "code" to inject program
+// source into the output by extracting code from files and
+// injecting them as HTML-escaped <pre> blocks.
+//
+// The syntax is simple: 1, 2, or 3 space-separated arguments:
+//
+// Whole file:
+//	{{code "foo.go"}}
+// One line (here the signature of main):
+//	{{code "foo.go" `/^func.main/`}}
+// Block of text, determined by start and end (here the body of main):
+//	{{code "foo.go" `/^func.main/` `/^}/`
+//
+// Patterns can be `/regular expression/`, a decimal number, or "$"
+// to signify the end of the file. In multi-line matches,
+// lines that end with the four characters
+//	OMIT
+// are omitted from the output, making it easy to provide marker
+// lines in the input that will not appear in the output but are easy
+// to identify by pattern.
+
+package main
+
+import (
+	"bytes"
+	"fmt"
+	"log"
+	"regexp"
+	"strings"
+	"text/template"
+)
+
+// Functions in this file panic on error, but the panic is recovered
+// to an error by 'code'.
+
+var templateFuncs = template.FuncMap{
+	"code": code,
+}
+
+// contents reads and returns the content of the named file
+// (from the virtual file system, so for example /doc refers to $GOROOT/doc).
+func contents(name string) string {
+	file, err := ReadFile(fs, name)
+	if err != nil {
+		log.Panic(err)
+	}
+	return string(file)
+}
+
+// stringFor returns a textual representation of the arg, formatted according to its nature.
+func stringFor(arg interface{}) string {
+	switch arg := arg.(type) {
+	case int:
+		return fmt.Sprintf("%d", arg)
+	case string:
+		if len(arg) > 2 && arg[0] == '/' && arg[len(arg)-1] == '/' {
+			return fmt.Sprintf("%#q", arg)
+		}
+		return fmt.Sprintf("%q", arg)
+	default:
+		log.Panicf("unrecognized argument: %v type %T", arg, arg)
+	}
+	return ""
+}
+
+func code(file string, arg ...interface{}) (s string, err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			err = fmt.Errorf("%v", r)
+		}
+	}()
+
+	text := contents(file)
+	var command string
+	switch len(arg) {
+	case 0:
+		// text is already whole file.
+		command = fmt.Sprintf("code %q", file)
+	case 1:
+		command = fmt.Sprintf("code %q %s", file, stringFor(arg[0]))
+		text = oneLine(file, text, arg[0])
+	case 2:
+		command = fmt.Sprintf("code %q %s %s", file, stringFor(arg[0]), stringFor(arg[1]))
+		text = multipleLines(file, text, arg[0], arg[1])
+	default:
+		return "", fmt.Errorf("incorrect code invocation: code %q %q", file, arg)
+	}
+	// Trim spaces from output.
+	text = strings.Trim(text, "\n")
+	// Replace tabs by spaces, which work better in HTML.
+	text = strings.Replace(text, "\t", "    ", -1)
+	var buf bytes.Buffer
+	// HTML-escape text and syntax-color comments like elsewhere.
+	FormatText(&buf, []byte(text), -1, true, "", nil)
+	// Include the command as a comment.
+	text = fmt.Sprintf("<pre><!--{{%s}}\n-->%s</pre>", command, buf.Bytes())
+	return text, nil
+}
+
+// parseArg returns the integer or string value of the argument and tells which it is.
+func parseArg(arg interface{}, file string, max int) (ival int, sval string, isInt bool) {
+	switch n := arg.(type) {
+	case int:
+		if n <= 0 || n > max {
+			log.Panicf("%q:%d is out of range", file, n)
+		}
+		return n, "", true
+	case string:
+		return 0, n, false
+	}
+	log.Panicf("unrecognized argument %v type %T", arg, arg)
+	return
+}
+
+// oneLine returns the single line generated by a two-argument code invocation.
+func oneLine(file, text string, arg interface{}) string {
+	lines := strings.SplitAfter(contents(file), "\n")
+	line, pattern, isInt := parseArg(arg, file, len(lines))
+	if isInt {
+		return lines[line-1]
+	}
+	return lines[match(file, 0, lines, pattern)-1]
+}
+
+// multipleLines returns the text generated by a three-argument code invocation.
+func multipleLines(file, text string, arg1, arg2 interface{}) string {
+	lines := strings.SplitAfter(contents(file), "\n")
+	line1, pattern1, isInt1 := parseArg(arg1, file, len(lines))
+	line2, pattern2, isInt2 := parseArg(arg2, file, len(lines))
+	if !isInt1 {
+		line1 = match(file, 0, lines, pattern1)
+	}
+	if !isInt2 {
+		line2 = match(file, line1, lines, pattern2)
+	} else if line2 < line1 {
+		log.Panicf("lines out of order for %q: %d %d", text, line1, line2)
+	}
+	for k := line1 - 1; k < line2; k++ {
+		if strings.HasSuffix(lines[k], "OMIT\n") {
+			lines[k] = ""
+		}
+	}
+	return strings.Join(lines[line1-1:line2], "")
+}
+
+// match identifies the input line that matches the pattern in a code invocation.
+// If start>0, match lines starting there rather than at the beginning.
+// The return value is 1-indexed.
+func match(file string, start int, lines []string, pattern string) int {
+	// $ matches the end of the file.
+	if pattern == "$" {
+		if len(lines) == 0 {
+			log.Panicf("%q: empty file", file)
+		}
+		return len(lines)
+	}
+	// /regexp/ matches the line that matches the regexp.
+	if len(pattern) > 2 && pattern[0] == '/' && pattern[len(pattern)-1] == '/' {
+		re, err := regexp.Compile(pattern[1 : len(pattern)-1])
+		if err != nil {
+			log.Panic(err)
+		}
+		for i := start; i < len(lines); i++ {
+			if re.MatchString(lines[i]) {
+				return i + 1
+			}
+		}
+		log.Panicf("%s: no match for %#q", file, pattern)
+	}
+	log.Panicf("unrecognized pattern: %q", pattern)
+	return 0
+}
diff --git a/cmd/godoc/throttle.go b/cmd/godoc/throttle.go
new file mode 100644
index 0000000..ac18b44
--- /dev/null
+++ b/cmd/godoc/throttle.go
@@ -0,0 +1,88 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "time"
+
+// A Throttle permits throttling of a goroutine by
+// calling the Throttle method repeatedly.
+//
+type Throttle struct {
+	f  float64       // f = (1-r)/r for 0 < r < 1
+	dt time.Duration // minimum run time slice; >= 0
+	tr time.Duration // accumulated time running
+	ts time.Duration // accumulated time stopped
+	tt time.Time     // earliest throttle time (= time Throttle returned + tm)
+}
+
+// NewThrottle creates a new Throttle with a throttle value r and
+// a minimum allocated run time slice of dt:
+//
+//	r == 0: "empty" throttle; the goroutine is always sleeping
+//	r == 1: full throttle; the goroutine is never sleeping
+//
+// A value of r == 0.6 throttles a goroutine such that it runs
+// approx. 60% of the time, and sleeps approx. 40% of the time.
+// Values of r < 0 or r > 1 are clamped down to values between 0 and 1.
+// Values of dt < 0 are set to 0.
+//
+func NewThrottle(r float64, dt time.Duration) *Throttle {
+	var f float64
+	switch {
+	case r <= 0:
+		f = -1 // indicates always sleep
+	case r >= 1:
+		f = 0 // assume r == 1 (never sleep)
+	default:
+		// 0 < r < 1
+		f = (1 - r) / r
+	}
+	if dt < 0 {
+		dt = 0
+	}
+	return &Throttle{f: f, dt: dt, tt: time.Now().Add(dt)}
+}
+
+// Throttle calls time.Sleep such that over time the ratio tr/ts between
+// accumulated run (tr) and sleep times (ts) approximates the value 1/(1-r)
+// where r is the throttle value. Throttle returns immediately (w/o sleeping)
+// if less than tm ns have passed since the last call to Throttle.
+//
+func (p *Throttle) Throttle() {
+	if p.f < 0 {
+		select {} // always sleep
+	}
+
+	t0 := time.Now()
+	if t0.Before(p.tt) {
+		return // keep running (minimum time slice not exhausted yet)
+	}
+
+	// accumulate running time
+	p.tr += t0.Sub(p.tt) + p.dt
+
+	// compute sleep time
+	// Over time we want:
+	//
+	//	tr/ts = r/(1-r)
+	//
+	// Thus:
+	//
+	//	ts = tr*f with f = (1-r)/r
+	//
+	// After some incremental run time δr added to the total run time
+	// tr, the incremental sleep-time δs to get to the same ratio again
+	// after waking up from time.Sleep is:
+	if δs := time.Duration(float64(p.tr)*p.f) - p.ts; δs > 0 {
+		time.Sleep(δs)
+	}
+
+	// accumulate (actual) sleep time
+	t1 := time.Now()
+	p.ts += t1.Sub(t0)
+
+	// set earliest next throttle time
+	p.tt = t1.Add(p.dt)
+}
diff --git a/cmd/godoc/utils.go b/cmd/godoc/utils.go
new file mode 100644
index 0000000..0cdb7ff
--- /dev/null
+++ b/cmd/godoc/utils.go
@@ -0,0 +1,91 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains support functionality for godoc.
+
+package main
+
+import (
+	pathpkg "path"
+	"sync"
+	"time"
+	"unicode/utf8"
+)
+
+// An RWValue wraps a value and permits mutually exclusive
+// access to it and records the time the value was last set.
+//
+type RWValue struct {
+	mutex     sync.RWMutex
+	value     interface{}
+	timestamp time.Time // time of last set()
+}
+
+func (v *RWValue) set(value interface{}) {
+	v.mutex.Lock()
+	v.value = value
+	v.timestamp = time.Now()
+	v.mutex.Unlock()
+}
+
+func (v *RWValue) get() (interface{}, time.Time) {
+	v.mutex.RLock()
+	defer v.mutex.RUnlock()
+	return v.value, v.timestamp
+}
+
+// isText returns true if a significant prefix of s looks like correct UTF-8;
+// that is, if it is likely that s is human-readable text.
+//
+func isText(s []byte) bool {
+	const max = 1024 // at least utf8.UTFMax
+	if len(s) > max {
+		s = s[0:max]
+	}
+	for i, c := range string(s) {
+		if i+utf8.UTFMax > len(s) {
+			// last char may be incomplete - ignore
+			break
+		}
+		if c == 0xFFFD || c < ' ' && c != '\n' && c != '\t' && c != '\f' {
+			// decoding error or control character - not a text file
+			return false
+		}
+	}
+	return true
+}
+
+// textExt[x] is true if the extension x indicates a text file, and false otherwise.
+var textExt = map[string]bool{
+	".css": false, // must be served raw
+	".js":  false, // must be served raw
+}
+
+// isTextFile returns true if the file has a known extension indicating
+// a text file, or if a significant chunk of the specified file looks like
+// correct UTF-8; that is, if it is likely that the file contains human-
+// readable text.
+//
+func isTextFile(filename string) bool {
+	// if the extension is known, use it for decision making
+	if isText, found := textExt[pathpkg.Ext(filename)]; found {
+		return isText
+	}
+
+	// the extension is not known; read an initial chunk
+	// of the file and check if it looks like text
+	f, err := fs.Open(filename)
+	if err != nil {
+		return false
+	}
+	defer f.Close()
+
+	var buf [1024]byte
+	n, err := f.Read(buf[0:])
+	if err != nil {
+		return false
+	}
+
+	return isText(buf[0:n])
+}
diff --git a/cmd/godoc/zip.go b/cmd/godoc/zip.go
new file mode 100644
index 0000000..620eb4f
--- /dev/null
+++ b/cmd/godoc/zip.go
@@ -0,0 +1,236 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file provides an implementation of the FileSystem
+// interface based on the contents of a .zip file.
+//
+// Assumptions:
+//
+// - The file paths stored in the zip file must use a slash ('/') as path
+//   separator; and they must be relative (i.e., they must not start with
+//   a '/' - this is usually the case if the file was created w/o special
+//   options).
+// - The zip file system treats the file paths found in the zip internally
+//   like absolute paths w/o a leading '/'; i.e., the paths are considered
+//   relative to the root of the file system.
+// - All path arguments to file system methods must be absolute paths.
+
+package main
+
+import (
+	"archive/zip"
+	"fmt"
+	"io"
+	"os"
+	"path"
+	"sort"
+	"strings"
+	"time"
+)
+
+// zipFI is the zip-file based implementation of FileInfo
+type zipFI struct {
+	name string    // directory-local name
+	file *zip.File // nil for a directory
+}
+
+func (fi zipFI) Name() string {
+	return fi.name
+}
+
+func (fi zipFI) Size() int64 {
+	if f := fi.file; f != nil {
+		return int64(f.UncompressedSize)
+	}
+	return 0 // directory
+}
+
+func (fi zipFI) ModTime() time.Time {
+	if f := fi.file; f != nil {
+		return f.ModTime()
+	}
+	return time.Time{} // directory has no modified time entry
+}
+
+func (fi zipFI) Mode() os.FileMode {
+	if fi.file == nil {
+		// Unix directories typically are executable, hence 555.
+		return os.ModeDir | 0555
+	}
+	return 0444
+}
+
+func (fi zipFI) IsDir() bool {
+	return fi.file == nil
+}
+
+func (fi zipFI) Sys() interface{} {
+	return nil
+}
+
+// zipFS is the zip-file based implementation of FileSystem
+type zipFS struct {
+	*zip.ReadCloser
+	list zipList
+	name string
+}
+
+func (fs *zipFS) String() string {
+	return "zip(" + fs.name + ")"
+}
+
+func (fs *zipFS) Close() error {
+	fs.list = nil
+	return fs.ReadCloser.Close()
+}
+
+func zipPath(name string) string {
+	name = path.Clean(name)
+	if !path.IsAbs(name) {
+		panic(fmt.Sprintf("stat: not an absolute path: %s", name))
+	}
+	return name[1:] // strip leading '/'
+}
+
+func (fs *zipFS) stat(abspath string) (int, zipFI, error) {
+	i, exact := fs.list.lookup(abspath)
+	if i < 0 {
+		// abspath has leading '/' stripped - print it explicitly
+		return -1, zipFI{}, fmt.Errorf("file not found: /%s", abspath)
+	}
+	_, name := path.Split(abspath)
+	var file *zip.File
+	if exact {
+		file = fs.list[i] // exact match found - must be a file
+	}
+	return i, zipFI{name, file}, nil
+}
+
+func (fs *zipFS) Open(abspath string) (readSeekCloser, error) {
+	_, fi, err := fs.stat(zipPath(abspath))
+	if err != nil {
+		return nil, err
+	}
+	if fi.IsDir() {
+		return nil, fmt.Errorf("Open: %s is a directory", abspath)
+	}
+	r, err := fi.file.Open()
+	if err != nil {
+		return nil, err
+	}
+	return &zipSeek{fi.file, r}, nil
+}
+
+type zipSeek struct {
+	file *zip.File
+	io.ReadCloser
+}
+
+func (f *zipSeek) Seek(offset int64, whence int) (int64, error) {
+	if whence == 0 && offset == 0 {
+		r, err := f.file.Open()
+		if err != nil {
+			return 0, err
+		}
+		f.Close()
+		f.ReadCloser = r
+		return 0, nil
+	}
+	return 0, fmt.Errorf("unsupported Seek in %s", f.file.Name)
+}
+
+func (fs *zipFS) Lstat(abspath string) (os.FileInfo, error) {
+	_, fi, err := fs.stat(zipPath(abspath))
+	return fi, err
+}
+
+func (fs *zipFS) Stat(abspath string) (os.FileInfo, error) {
+	_, fi, err := fs.stat(zipPath(abspath))
+	return fi, err
+}
+
+func (fs *zipFS) ReadDir(abspath string) ([]os.FileInfo, error) {
+	path := zipPath(abspath)
+	i, fi, err := fs.stat(path)
+	if err != nil {
+		return nil, err
+	}
+	if !fi.IsDir() {
+		return nil, fmt.Errorf("ReadDir: %s is not a directory", abspath)
+	}
+
+	var list []os.FileInfo
+	dirname := path + "/"
+	prevname := ""
+	for _, e := range fs.list[i:] {
+		if !strings.HasPrefix(e.Name, dirname) {
+			break // not in the same directory anymore
+		}
+		name := e.Name[len(dirname):] // local name
+		file := e
+		if i := strings.IndexRune(name, '/'); i >= 0 {
+			// We infer directories from files in subdirectories.
+			// If we have x/y, return a directory entry for x.
+			name = name[0:i] // keep local directory name only
+			file = nil
+		}
+		// If we have x/y and x/z, don't return two directory entries for x.
+		// TODO(gri): It should be possible to do this more efficiently
+		// by determining the (fs.list) range of local directory entries
+		// (via two binary searches).
+		if name != prevname {
+			list = append(list, zipFI{name, file})
+			prevname = name
+		}
+	}
+
+	return list, nil
+}
+
+func NewZipFS(rc *zip.ReadCloser, name string) FileSystem {
+	list := make(zipList, len(rc.File))
+	copy(list, rc.File) // sort a copy of rc.File
+	sort.Sort(list)
+	return &zipFS{rc, list, name}
+}
+
+type zipList []*zip.File
+
+// zipList implements sort.Interface
+func (z zipList) Len() int           { return len(z) }
+func (z zipList) Less(i, j int) bool { return z[i].Name < z[j].Name }
+func (z zipList) Swap(i, j int)      { z[i], z[j] = z[j], z[i] }
+
+// lookup returns the smallest index of an entry with an exact match
+// for name, or an inexact match starting with name/. If there is no
+// such entry, the result is -1, false.
+func (z zipList) lookup(name string) (index int, exact bool) {
+	// look for exact match first (name comes before name/ in z)
+	i := sort.Search(len(z), func(i int) bool {
+		return name <= z[i].Name
+	})
+	if i >= len(z) {
+		return -1, false
+	}
+	// 0 <= i < len(z)
+	if z[i].Name == name {
+		return i, true
+	}
+
+	// look for inexact match (must be in z[i:], if present)
+	z = z[i:]
+	name += "/"
+	j := sort.Search(len(z), func(i int) bool {
+		return name <= z[i].Name
+	})
+	if j >= len(z) {
+		return -1, false
+	}
+	// 0 <= j < len(z)
+	if strings.HasPrefix(z[j].Name, name) {
+		return i + j, false
+	}
+
+	return -1, false
+}