gddo: Re-vendor all dependencies

This re-vendors all dependencies for gddo-server, lintapp, and
talksapp. It also fixes the current problem with talksapp failing on
travis-ci.

Change-Id: I11e9a9d75a0116cde53812593a8b08067c94eb12
Reviewed-on: https://go-review.googlesource.com/33200
Reviewed-by: Tuo Shan <shantuo@google.com>
diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
index b79aab2..3b66e2b 100644
--- a/Godeps/Godeps.json
+++ b/Godeps/Godeps.json
@@ -3,7 +3,9 @@
 	"GoVersion": "go1.7",
 	"GodepVersion": "v74",
 	"Packages": [
-		"github.com/golang/gddo/gddo-server"
+		"github.com/golang/gddo/gddo-server",
+		"github.com/golang/gddo/talksapp",
+		"github.com/golang/gddo/lintapp"
 	],
 	"Deps": [
 		{
@@ -52,36 +54,40 @@
 			"Rev": "ffa8d46ada782d81cfda81a0fbd9f45ceae448e8"
 		},
 		{
+			"ImportPath": "github.com/golang/lint",
+			"Rev": "3390df4df2787994aea98de825b964ac7944b817"
+		},
+		{
 			"ImportPath": "github.com/golang/protobuf/proto",
-			"Rev": "df1d3ca07d2d07bba352d5b73c4313b4e2a6203e"
+			"Rev": "4bd1920723d7b7c925de087aa32e2187708897f7"
 		},
 		{
 			"ImportPath": "github.com/golang/protobuf/ptypes",
-			"Rev": "df1d3ca07d2d07bba352d5b73c4313b4e2a6203e"
+			"Rev": "4bd1920723d7b7c925de087aa32e2187708897f7"
 		},
 		{
 			"ImportPath": "github.com/golang/protobuf/ptypes/any",
-			"Rev": "df1d3ca07d2d07bba352d5b73c4313b4e2a6203e"
+			"Rev": "4bd1920723d7b7c925de087aa32e2187708897f7"
 		},
 		{
 			"ImportPath": "github.com/golang/protobuf/ptypes/duration",
-			"Rev": "df1d3ca07d2d07bba352d5b73c4313b4e2a6203e"
+			"Rev": "4bd1920723d7b7c925de087aa32e2187708897f7"
 		},
 		{
 			"ImportPath": "github.com/golang/protobuf/ptypes/empty",
-			"Rev": "df1d3ca07d2d07bba352d5b73c4313b4e2a6203e"
+			"Rev": "4bd1920723d7b7c925de087aa32e2187708897f7"
 		},
 		{
 			"ImportPath": "github.com/golang/protobuf/ptypes/struct",
-			"Rev": "df1d3ca07d2d07bba352d5b73c4313b4e2a6203e"
+			"Rev": "4bd1920723d7b7c925de087aa32e2187708897f7"
 		},
 		{
 			"ImportPath": "github.com/golang/protobuf/ptypes/timestamp",
-			"Rev": "df1d3ca07d2d07bba352d5b73c4313b4e2a6203e"
+			"Rev": "4bd1920723d7b7c925de087aa32e2187708897f7"
 		},
 		{
 			"ImportPath": "github.com/golang/protobuf/ptypes/wrappers",
-			"Rev": "df1d3ca07d2d07bba352d5b73c4313b4e2a6203e"
+			"Rev": "4bd1920723d7b7c925de087aa32e2187708897f7"
 		},
 		{
 			"ImportPath": "github.com/golang/snappy",
@@ -101,35 +107,35 @@
 		},
 		{
 			"ImportPath": "golang.org/x/net/context",
-			"Rev": "f4b625ec9b21d620bb5ce57f2dfc3e08ca97fce6"
+			"Rev": "0e2717dc3cc05907dc23096ef3a9086ea93f567f"
 		},
 		{
 			"ImportPath": "golang.org/x/net/context/ctxhttp",
-			"Rev": "f4b625ec9b21d620bb5ce57f2dfc3e08ca97fce6"
+			"Rev": "0e2717dc3cc05907dc23096ef3a9086ea93f567f"
 		},
 		{
 			"ImportPath": "golang.org/x/net/http2",
-			"Rev": "f4b625ec9b21d620bb5ce57f2dfc3e08ca97fce6"
+			"Rev": "0e2717dc3cc05907dc23096ef3a9086ea93f567f"
 		},
 		{
 			"ImportPath": "golang.org/x/net/http2/hpack",
-			"Rev": "f4b625ec9b21d620bb5ce57f2dfc3e08ca97fce6"
+			"Rev": "0e2717dc3cc05907dc23096ef3a9086ea93f567f"
 		},
 		{
 			"ImportPath": "golang.org/x/net/idna",
-			"Rev": "f4b625ec9b21d620bb5ce57f2dfc3e08ca97fce6"
+			"Rev": "0e2717dc3cc05907dc23096ef3a9086ea93f567f"
 		},
 		{
 			"ImportPath": "golang.org/x/net/internal/timeseries",
-			"Rev": "f4b625ec9b21d620bb5ce57f2dfc3e08ca97fce6"
+			"Rev": "0e2717dc3cc05907dc23096ef3a9086ea93f567f"
 		},
 		{
 			"ImportPath": "golang.org/x/net/lex/httplex",
-			"Rev": "f4b625ec9b21d620bb5ce57f2dfc3e08ca97fce6"
+			"Rev": "0e2717dc3cc05907dc23096ef3a9086ea93f567f"
 		},
 		{
 			"ImportPath": "golang.org/x/net/trace",
-			"Rev": "f4b625ec9b21d620bb5ce57f2dfc3e08ca97fce6"
+			"Rev": "0e2717dc3cc05907dc23096ef3a9086ea93f567f"
 		},
 		{
 			"ImportPath": "golang.org/x/oauth2",
@@ -152,6 +158,14 @@
 			"Rev": "1e695b1c8febf17aad3bfa7bf0a819ef94b98ad5"
 		},
 		{
+			"ImportPath": "golang.org/x/tools/go/gcimporter15",
+			"Rev": "5e2ae75eb72a62985e086eed33a5982a929e4fff"
+		},
+		{
+			"ImportPath": "golang.org/x/tools/present",
+			"Rev": "5e2ae75eb72a62985e086eed33a5982a929e4fff"
+		},
+		{
 			"ImportPath": "google.golang.org/api/internal",
 			"Rev": "3cf64a039723963488f603d140d0aec154fdcd20"
 		},
@@ -169,53 +183,98 @@
 		},
 		{
 			"ImportPath": "google.golang.org/appengine",
-			"Comment": "v1.0.0-4-g3f4dbbc",
-			"Rev": "3f4dbbc0ec153a39878fd524ece9f39732bd4998"
+			"Comment": "v1.0.0-16-gc7b8227",
+			"Rev": "c7b8227c83007befd67b324a64c969ebc1d7475d"
+		},
+		{
+			"ImportPath": "google.golang.org/appengine/aetest",
+			"Comment": "v1.0.0-16-gc7b8227",
+			"Rev": "c7b8227c83007befd67b324a64c969ebc1d7475d"
+		},
+		{
+			"ImportPath": "google.golang.org/appengine/datastore",
+			"Comment": "v1.0.0-16-gc7b8227",
+			"Rev": "c7b8227c83007befd67b324a64c969ebc1d7475d"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine/internal",
-			"Comment": "v1.0.0-4-g3f4dbbc",
-			"Rev": "3f4dbbc0ec153a39878fd524ece9f39732bd4998"
+			"Comment": "v1.0.0-16-gc7b8227",
+			"Rev": "c7b8227c83007befd67b324a64c969ebc1d7475d"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine/internal/app_identity",
-			"Comment": "v1.0.0-4-g3f4dbbc",
-			"Rev": "3f4dbbc0ec153a39878fd524ece9f39732bd4998"
+			"Comment": "v1.0.0-16-gc7b8227",
+			"Rev": "c7b8227c83007befd67b324a64c969ebc1d7475d"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine/internal/base",
-			"Comment": "v1.0.0-4-g3f4dbbc",
-			"Rev": "3f4dbbc0ec153a39878fd524ece9f39732bd4998"
+			"Comment": "v1.0.0-16-gc7b8227",
+			"Rev": "c7b8227c83007befd67b324a64c969ebc1d7475d"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine/internal/datastore",
-			"Comment": "v1.0.0-4-g3f4dbbc",
-			"Rev": "3f4dbbc0ec153a39878fd524ece9f39732bd4998"
+			"Comment": "v1.0.0-16-gc7b8227",
+			"Rev": "c7b8227c83007befd67b324a64c969ebc1d7475d"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine/internal/log",
-			"Comment": "v1.0.0-4-g3f4dbbc",
-			"Rev": "3f4dbbc0ec153a39878fd524ece9f39732bd4998"
+			"Comment": "v1.0.0-16-gc7b8227",
+			"Rev": "c7b8227c83007befd67b324a64c969ebc1d7475d"
+		},
+		{
+			"ImportPath": "google.golang.org/appengine/internal/memcache",
+			"Comment": "v1.0.0-16-gc7b8227",
+			"Rev": "c7b8227c83007befd67b324a64c969ebc1d7475d"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine/internal/modules",
-			"Comment": "v1.0.0-4-g3f4dbbc",
-			"Rev": "3f4dbbc0ec153a39878fd524ece9f39732bd4998"
+			"Comment": "v1.0.0-16-gc7b8227",
+			"Rev": "c7b8227c83007befd67b324a64c969ebc1d7475d"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine/internal/remote_api",
-			"Comment": "v1.0.0-4-g3f4dbbc",
-			"Rev": "3f4dbbc0ec153a39878fd524ece9f39732bd4998"
+			"Comment": "v1.0.0-16-gc7b8227",
+			"Rev": "c7b8227c83007befd67b324a64c969ebc1d7475d"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine/internal/search",
-			"Comment": "v1.0.0-4-g3f4dbbc",
-			"Rev": "3f4dbbc0ec153a39878fd524ece9f39732bd4998"
+			"Comment": "v1.0.0-16-gc7b8227",
+			"Rev": "c7b8227c83007befd67b324a64c969ebc1d7475d"
+		},
+		{
+			"ImportPath": "google.golang.org/appengine/internal/urlfetch",
+			"Comment": "v1.0.0-16-gc7b8227",
+			"Rev": "c7b8227c83007befd67b324a64c969ebc1d7475d"
+		},
+		{
+			"ImportPath": "google.golang.org/appengine/internal/user",
+			"Comment": "v1.0.0-16-gc7b8227",
+			"Rev": "c7b8227c83007befd67b324a64c969ebc1d7475d"
+		},
+		{
+			"ImportPath": "google.golang.org/appengine/log",
+			"Comment": "v1.0.0-16-gc7b8227",
+			"Rev": "c7b8227c83007befd67b324a64c969ebc1d7475d"
+		},
+		{
+			"ImportPath": "google.golang.org/appengine/memcache",
+			"Comment": "v1.0.0-16-gc7b8227",
+			"Rev": "c7b8227c83007befd67b324a64c969ebc1d7475d"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine/search",
-			"Comment": "v1.0.0-4-g3f4dbbc",
-			"Rev": "3f4dbbc0ec153a39878fd524ece9f39732bd4998"
+			"Comment": "v1.0.0-16-gc7b8227",
+			"Rev": "c7b8227c83007befd67b324a64c969ebc1d7475d"
+		},
+		{
+			"ImportPath": "google.golang.org/appengine/urlfetch",
+			"Comment": "v1.0.0-16-gc7b8227",
+			"Rev": "c7b8227c83007befd67b324a64c969ebc1d7475d"
+		},
+		{
+			"ImportPath": "google.golang.org/appengine/user",
+			"Comment": "v1.0.0-16-gc7b8227",
+			"Rev": "c7b8227c83007befd67b324a64c969ebc1d7475d"
 		},
 		{
 			"ImportPath": "google.golang.org/genproto/googleapis/api/label",
diff --git a/vendor/github.com/golang/lint/.travis.yml b/vendor/github.com/golang/lint/.travis.yml
new file mode 100644
index 0000000..3865f0a
--- /dev/null
+++ b/vendor/github.com/golang/lint/.travis.yml
@@ -0,0 +1,10 @@
+language: go
+go:
+  - 1.6
+  - 1.7
+
+install:
+  - go get -t -v ./...
+
+script:
+  - go test -v ./...
diff --git a/vendor/github.com/golang/lint/CONTRIBUTING.md b/vendor/github.com/golang/lint/CONTRIBUTING.md
new file mode 100644
index 0000000..971da12
--- /dev/null
+++ b/vendor/github.com/golang/lint/CONTRIBUTING.md
@@ -0,0 +1,15 @@
+# Contributing to Golint
+
+## Before filing an issue:
+
+### Are you having trouble building golint?
+
+Check you have the latest version of its dependencies. Run
+```
+go get -u github.com/golang/lint
+```
+If you still have problems, consider searching for existing issues before filing a new issue.
+
+## Before sending a pull request:
+
+Have you understood the purpose of golint? Make sure to carefully read `README`.
diff --git a/vendor/github.com/golang/lint/LICENSE b/vendor/github.com/golang/lint/LICENSE
new file mode 100644
index 0000000..65d761b
--- /dev/null
+++ b/vendor/github.com/golang/lint/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2013 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/golang/lint/README.md b/vendor/github.com/golang/lint/README.md
new file mode 100644
index 0000000..2906b68
--- /dev/null
+++ b/vendor/github.com/golang/lint/README.md
@@ -0,0 +1,82 @@
+Golint is a linter for Go source code.
+
+[![Build Status](https://travis-ci.org/golang/lint.svg?branch=master)](https://travis-ci.org/golang/lint)
+
+## Installation
+
+Golint requires Go 1.6 or later.
+
+    go get -u github.com/golang/lint/golint
+
+## Usage
+
+Invoke `golint` with one or more filenames, a directory, or a package named
+by its import path. Golint uses the same
+[import path syntax](https://golang.org/cmd/go/#hdr-Import_path_syntax) as
+the `go` command and therefore
+also supports relative import paths like `./...`. Additionally the `...`
+wildcard can be used as suffix on relative and absolute file paths to recurse
+into them.
+
+The output of this tool is a list of suggestions in Vim quickfix format,
+which is accepted by lots of different editors.
+
+## Purpose
+
+Golint differs from gofmt. Gofmt reformats Go source code, whereas
+golint prints out style mistakes.
+
+Golint differs from govet. Govet is concerned with correctness, whereas
+golint is concerned with coding style. Golint is in use at Google, and it
+seeks to match the accepted style of the open source Go project.
+
+The suggestions made by golint are exactly that: suggestions.
+Golint is not perfect, and has both false positives and false negatives.
+Do not treat its output as a gold standard. We will not be adding pragmas
+or other knobs to suppress specific warnings, so do not expect or require
+code to be completely "lint-free".
+In short, this tool is not, and will never be, trustworthy enough for its
+suggestions to be enforced automatically, for example as part of a build process.
+Golint makes suggestions for many of the mechanically checkable items listed in
+[Effective Go](https://golang.org/doc/effective_go.html) and the
+[CodeReviewComments wiki page](https://golang.org/wiki/CodeReviewComments).
+
+If you find an established style that is frequently violated, and which
+you think golint could statically check,
+[file an issue](https://github.com/golang/lint/issues).
+
+## Contributions
+
+Contributions to this project are welcome, though please send mail before
+starting work on anything major. Contributors retain their copyright, so we
+need you to fill out
+[a short form](https://developers.google.com/open-source/cla/individual)
+before we can accept your contribution.
+
+## Vim
+
+Add this to your ~/.vimrc:
+
+    set rtp+=$GOPATH/src/github.com/golang/lint/misc/vim
+
+If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value.
+
+Running `:Lint` will run golint on the current file and populate the quickfix list.
+
+Optionally, add this to your `~/.vimrc` to automatically run `golint` on `:w`
+
+    autocmd BufWritePost,FileWritePost *.go execute 'Lint' | cwindow
+
+
+## Emacs
+
+Add this to your `.emacs` file:
+
+    (add-to-list 'load-path (concat (getenv "GOPATH")  "/src/github.com/golang/lint/misc/emacs"))
+    (require 'golint)
+
+If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value.
+
+Running M-x golint will run golint on the current file.
+
+For more usage, see [Compilation-Mode](http://www.gnu.org/software/emacs/manual/html_node/emacs/Compilation-Mode.html).
diff --git a/vendor/github.com/golang/lint/lint.go b/vendor/github.com/golang/lint/lint.go
new file mode 100644
index 0000000..59fea7c
--- /dev/null
+++ b/vendor/github.com/golang/lint/lint.go
@@ -0,0 +1,1600 @@
+// Copyright (c) 2013 The Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file or at
+// https://developers.google.com/open-source/licenses/bsd.
+
+// Package lint contains a linter for Go source code.
+package lint
+
+import (
+	"bytes"
+	"fmt"
+	"go/ast"
+	"go/parser"
+	"go/printer"
+	"go/token"
+	"go/types"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+
+	"golang.org/x/tools/go/gcimporter15"
+)
+
+const styleGuideBase = "https://golang.org/wiki/CodeReviewComments"
+
+// A Linter lints Go source code.
+type Linter struct {
+}
+
+// Problem represents a problem in some source code.
+type Problem struct {
+	Position   token.Position // position in source file
+	Text       string         // the prose that describes the problem
+	Link       string         // (optional) the link to the style guide for the problem
+	Confidence float64        // a value in (0,1] estimating the confidence in this problem's correctness
+	LineText   string         // the source line
+	Category   string         // a short name for the general category of the problem
+
+	// If the problem has a suggested fix (the minority case),
+	// ReplacementLine is a full replacement for the relevant line of the source file.
+	ReplacementLine string
+}
+
+func (p *Problem) String() string {
+	if p.Link != "" {
+		return p.Text + "\n\n" + p.Link
+	}
+	return p.Text
+}
+
+type byPosition []Problem
+
+func (p byPosition) Len() int      { return len(p) }
+func (p byPosition) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p byPosition) Less(i, j int) bool {
+	pi, pj := p[i].Position, p[j].Position
+
+	if pi.Filename != pj.Filename {
+		return pi.Filename < pj.Filename
+	}
+	if pi.Line != pj.Line {
+		return pi.Line < pj.Line
+	}
+	if pi.Column != pj.Column {
+		return pi.Column < pj.Column
+	}
+
+	return p[i].Text < p[j].Text
+}
+
+// Lint lints src.
+func (l *Linter) Lint(filename string, src []byte) ([]Problem, error) {
+	return l.LintFiles(map[string][]byte{filename: src})
+}
+
+// LintFiles lints a set of files of a single package.
+// The argument is a map of filename to source.
+func (l *Linter) LintFiles(files map[string][]byte) ([]Problem, error) {
+	if len(files) == 0 {
+		return nil, nil
+	}
+	pkg := &pkg{
+		fset:  token.NewFileSet(),
+		files: make(map[string]*file),
+	}
+	var pkgName string
+	for filename, src := range files {
+		f, err := parser.ParseFile(pkg.fset, filename, src, parser.ParseComments)
+		if err != nil {
+			return nil, err
+		}
+		if pkgName == "" {
+			pkgName = f.Name.Name
+		} else if f.Name.Name != pkgName {
+			return nil, fmt.Errorf("%s is in package %s, not %s", filename, f.Name.Name, pkgName)
+		}
+		pkg.files[filename] = &file{
+			pkg:      pkg,
+			f:        f,
+			fset:     pkg.fset,
+			src:      src,
+			filename: filename,
+		}
+	}
+	return pkg.lint(), nil
+}
+
+// pkg represents a package being linted.
+type pkg struct {
+	fset  *token.FileSet
+	files map[string]*file
+
+	typesPkg  *types.Package
+	typesInfo *types.Info
+
+	// sortable is the set of types in the package that implement sort.Interface.
+	sortable map[string]bool
+	// main is whether this is a "main" package.
+	main bool
+
+	problems []Problem
+}
+
+func (p *pkg) lint() []Problem {
+	if err := p.typeCheck(); err != nil {
+		/* TODO(dsymonds): Consider reporting these errors when golint operates on entire packages.
+		if e, ok := err.(types.Error); ok {
+			pos := p.fset.Position(e.Pos)
+			conf := 1.0
+			if strings.Contains(e.Msg, "can't find import: ") {
+				// Golint is probably being run in a context that doesn't support
+				// typechecking (e.g. package files aren't found), so don't warn about it.
+				conf = 0
+			}
+			if conf > 0 {
+				p.errorfAt(pos, conf, category("typechecking"), e.Msg)
+			}
+
+			// TODO(dsymonds): Abort if !e.Soft?
+		}
+		*/
+	}
+
+	p.scanSortable()
+	p.main = p.isMain()
+
+	for _, f := range p.files {
+		f.lint()
+	}
+
+	sort.Sort(byPosition(p.problems))
+
+	return p.problems
+}
+
+// file represents a file being linted.
+type file struct {
+	pkg      *pkg
+	f        *ast.File
+	fset     *token.FileSet
+	src      []byte
+	filename string
+}
+
+func (f *file) isTest() bool { return strings.HasSuffix(f.filename, "_test.go") }
+
+func (f *file) lint() {
+	f.lintPackageComment()
+	f.lintImports()
+	f.lintBlankImports()
+	f.lintExported()
+	f.lintNames()
+	f.lintVarDecls()
+	f.lintElses()
+	f.lintRanges()
+	f.lintErrorf()
+	f.lintErrors()
+	f.lintErrorStrings()
+	f.lintReceiverNames()
+	f.lintIncDec()
+	f.lintErrorReturn()
+	f.lintUnexportedReturn()
+	f.lintTimeNames()
+	f.lintContextKeyTypes()
+	f.lintContextArgs()
+}
+
+type link string
+type category string
+
+// The variadic arguments may start with link and category types,
+// and must end with a format string and any arguments.
+// It returns the new Problem.
+func (f *file) errorf(n ast.Node, confidence float64, args ...interface{}) *Problem {
+	pos := f.fset.Position(n.Pos())
+	if pos.Filename == "" {
+		pos.Filename = f.filename
+	}
+	return f.pkg.errorfAt(pos, confidence, args...)
+}
+
+func (p *pkg) errorfAt(pos token.Position, confidence float64, args ...interface{}) *Problem {
+	problem := Problem{
+		Position:   pos,
+		Confidence: confidence,
+	}
+	if pos.Filename != "" {
+		// The file might not exist in our mapping if a //line directive was encountered.
+		if f, ok := p.files[pos.Filename]; ok {
+			problem.LineText = srcLine(f.src, pos)
+		}
+	}
+
+argLoop:
+	for len(args) > 1 { // always leave at least the format string in args
+		switch v := args[0].(type) {
+		case link:
+			problem.Link = string(v)
+		case category:
+			problem.Category = string(v)
+		default:
+			break argLoop
+		}
+		args = args[1:]
+	}
+
+	problem.Text = fmt.Sprintf(args[0].(string), args[1:]...)
+
+	p.problems = append(p.problems, problem)
+	return &p.problems[len(p.problems)-1]
+}
+
+var gcImporter = gcimporter.Import
+
+// importer implements go/types.Importer{,From}.
+type importer struct {
+	impFn    func(packages map[string]*types.Package, path, srcDir string) (*types.Package, error)
+	packages map[string]*types.Package
+}
+
+func (i importer) Import(path string) (*types.Package, error) {
+	return i.impFn(i.packages, path, "")
+}
+
+func (i importer) ImportFrom(path, srcDir string, mode types.ImportMode) (*types.Package, error) {
+	return i.impFn(i.packages, path, srcDir)
+}
+
+func (p *pkg) typeCheck() error {
+	config := &types.Config{
+		// By setting a no-op error reporter, the type checker does as much work as possible.
+		Error: func(error) {},
+		Importer: importer{
+			impFn:    gcImporter,
+			packages: make(map[string]*types.Package),
+		},
+	}
+	info := &types.Info{
+		Types:  make(map[ast.Expr]types.TypeAndValue),
+		Defs:   make(map[*ast.Ident]types.Object),
+		Uses:   make(map[*ast.Ident]types.Object),
+		Scopes: make(map[ast.Node]*types.Scope),
+	}
+	var anyFile *file
+	var astFiles []*ast.File
+	for _, f := range p.files {
+		anyFile = f
+		astFiles = append(astFiles, f.f)
+	}
+	pkg, err := config.Check(anyFile.f.Name.Name, p.fset, astFiles, info)
+	// Remember the typechecking info, even if config.Check failed,
+	// since we will get partial information.
+	p.typesPkg = pkg
+	p.typesInfo = info
+	return err
+}
+
+func (p *pkg) typeOf(expr ast.Expr) types.Type {
+	if p.typesInfo == nil {
+		return nil
+	}
+	return p.typesInfo.TypeOf(expr)
+}
+
+func (p *pkg) isNamedType(typ types.Type, importPath, name string) bool {
+	n, ok := typ.(*types.Named)
+	if !ok {
+		return false
+	}
+	tn := n.Obj()
+	return tn != nil && tn.Pkg() != nil && tn.Pkg().Path() == importPath && tn.Name() == name
+}
+
+// scopeOf returns the tightest scope encompassing id.
+func (p *pkg) scopeOf(id *ast.Ident) *types.Scope {
+	var scope *types.Scope
+	if obj := p.typesInfo.ObjectOf(id); obj != nil {
+		scope = obj.Parent()
+	}
+	if scope == p.typesPkg.Scope() {
+		// We were given a top-level identifier.
+		// Use the file-level scope instead of the package-level scope.
+		pos := id.Pos()
+		for _, f := range p.files {
+			if f.f.Pos() <= pos && pos < f.f.End() {
+				scope = p.typesInfo.Scopes[f.f]
+				break
+			}
+		}
+	}
+	return scope
+}
+
+func (p *pkg) scanSortable() {
+	p.sortable = make(map[string]bool)
+
+	// bitfield for which methods exist on each type.
+	const (
+		Len = 1 << iota
+		Less
+		Swap
+	)
+	nmap := map[string]int{"Len": Len, "Less": Less, "Swap": Swap}
+	has := make(map[string]int)
+	for _, f := range p.files {
+		f.walk(func(n ast.Node) bool {
+			fn, ok := n.(*ast.FuncDecl)
+			if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 {
+				return true
+			}
+			// TODO(dsymonds): We could check the signature to be more precise.
+			recv := receiverType(fn)
+			if i, ok := nmap[fn.Name.Name]; ok {
+				has[recv] |= i
+			}
+			return false
+		})
+	}
+	for typ, ms := range has {
+		if ms == Len|Less|Swap {
+			p.sortable[typ] = true
+		}
+	}
+}
+
+func (p *pkg) isMain() bool {
+	for _, f := range p.files {
+		if f.isMain() {
+			return true
+		}
+	}
+	return false
+}
+
+func (f *file) isMain() bool {
+	if f.f.Name.Name == "main" {
+		return true
+	}
+	return false
+}
+
+// lintPackageComment checks package comments. It complains if
+// there is no package comment, or if it is not of the right form.
+// This has a notable false positive in that a package comment
+// could rightfully appear in a different file of the same package,
+// but that's not easy to fix since this linter is file-oriented.
+func (f *file) lintPackageComment() {
+	if f.isTest() {
+		return
+	}
+
+	const ref = styleGuideBase + "#package-comments"
+	prefix := "Package " + f.f.Name.Name + " "
+
+	// Look for a detached package comment.
+	// First, scan for the last comment that occurs before the "package" keyword.
+	var lastCG *ast.CommentGroup
+	for _, cg := range f.f.Comments {
+		if cg.Pos() > f.f.Package {
+			// Gone past "package" keyword.
+			break
+		}
+		lastCG = cg
+	}
+	if lastCG != nil && strings.HasPrefix(lastCG.Text(), prefix) {
+		endPos := f.fset.Position(lastCG.End())
+		pkgPos := f.fset.Position(f.f.Package)
+		if endPos.Line+1 < pkgPos.Line {
+			// There isn't a great place to anchor this error;
+			// the start of the blank lines between the doc and the package statement
+			// is at least pointing at the location of the problem.
+			pos := token.Position{
+				Filename: endPos.Filename,
+				// Offset not set; it is non-trivial, and doesn't appear to be needed.
+				Line:   endPos.Line + 1,
+				Column: 1,
+			}
+			f.pkg.errorfAt(pos, 0.9, link(ref), category("comments"), "package comment is detached; there should be no blank lines between it and the package statement")
+			return
+		}
+	}
+
+	if f.f.Doc == nil {
+		f.errorf(f.f, 0.2, link(ref), category("comments"), "should have a package comment, unless it's in another file for this package")
+		return
+	}
+	s := f.f.Doc.Text()
+	if ts := strings.TrimLeft(s, " \t"); ts != s {
+		f.errorf(f.f.Doc, 1, link(ref), category("comments"), "package comment should not have leading space")
+		s = ts
+	}
+	// Only non-main packages need to keep to this form.
+	if !f.pkg.main && !strings.HasPrefix(s, prefix) {
+		f.errorf(f.f.Doc, 1, link(ref), category("comments"), `package comment should be of the form "%s..."`, prefix)
+	}
+}
+
+// lintBlankImports complains if a non-main package has blank imports that are
+// not documented.
+func (f *file) lintBlankImports() {
+	// In package main and in tests, we don't complain about blank imports.
+	if f.pkg.main || f.isTest() {
+		return
+	}
+
+	// The first element of each contiguous group of blank imports should have
+	// an explanatory comment of some kind.
+	for i, imp := range f.f.Imports {
+		pos := f.fset.Position(imp.Pos())
+
+		if !isBlank(imp.Name) {
+			continue // Ignore non-blank imports.
+		}
+		if i > 0 {
+			prev := f.f.Imports[i-1]
+			prevPos := f.fset.Position(prev.Pos())
+			if isBlank(prev.Name) && prevPos.Line+1 == pos.Line {
+				continue // A subsequent blank in a group.
+			}
+		}
+
+		// This is the first blank import of a group.
+		if imp.Doc == nil && imp.Comment == nil {
+			ref := ""
+			f.errorf(imp, 1, link(ref), category("imports"), "a blank import should be only in a main or test package, or have a comment justifying it")
+		}
+	}
+}
+
+// lintImports examines import blocks.
+func (f *file) lintImports() {
+
+	for i, is := range f.f.Imports {
+		_ = i
+		if is.Name != nil && is.Name.Name == "." && !f.isTest() {
+			f.errorf(is, 1, link(styleGuideBase+"#import-dot"), category("imports"), "should not use dot imports")
+		}
+
+	}
+
+}
+
+const docCommentsLink = styleGuideBase + "#doc-comments"
+
+// lintExported examines the exported names.
+// It complains if any required doc comments are missing,
+// or if they are not of the right form. The exact rules are in
+// lintFuncDoc, lintTypeDoc and lintValueSpecDoc; this function
+// also tracks the GenDecl structure being traversed to permit
+// doc comments for constants to be on top of the const block.
+// It also complains if the names stutter when combined with
+// the package name.
+func (f *file) lintExported() {
+	if f.isTest() {
+		return
+	}
+
+	var lastGen *ast.GenDecl // last GenDecl entered.
+
+	// Set of GenDecls that have already had missing comments flagged.
+	genDeclMissingComments := make(map[*ast.GenDecl]bool)
+
+	f.walk(func(node ast.Node) bool {
+		switch v := node.(type) {
+		case *ast.GenDecl:
+			if v.Tok == token.IMPORT {
+				return false
+			}
+			// token.CONST, token.TYPE or token.VAR
+			lastGen = v
+			return true
+		case *ast.FuncDecl:
+			f.lintFuncDoc(v)
+			if v.Recv == nil {
+				// Only check for stutter on functions, not methods.
+				// Method names are not used package-qualified.
+				f.checkStutter(v.Name, "func")
+			}
+			// Don't proceed inside funcs.
+			return false
+		case *ast.TypeSpec:
+			// inside a GenDecl, which usually has the doc
+			doc := v.Doc
+			if doc == nil {
+				doc = lastGen.Doc
+			}
+			f.lintTypeDoc(v, doc)
+			f.checkStutter(v.Name, "type")
+			// Don't proceed inside types.
+			return false
+		case *ast.ValueSpec:
+			f.lintValueSpecDoc(v, lastGen, genDeclMissingComments)
+			return false
+		}
+		return true
+	})
+}
+
+var allCapsRE = regexp.MustCompile(`^[A-Z0-9_]+$`)
+
+// knownNameExceptions is a set of names that are known to be exempt from naming checks.
+// This is usually because they are constrained by having to match names in the
+// standard library.
+var knownNameExceptions = map[string]bool{
+	"LastInsertId": true, // must match database/sql
+	"kWh":          true,
+}
+
+// lintNames examines all names in the file.
+// It complains if any use underscores or incorrect known initialisms.
+func (f *file) lintNames() {
+	// Package names need slightly different handling than other names.
+	if strings.Contains(f.f.Name.Name, "_") && !strings.HasSuffix(f.f.Name.Name, "_test") {
+		f.errorf(f.f, 1, link("http://golang.org/doc/effective_go.html#package-names"), category("naming"), "don't use an underscore in package name")
+	}
+
+	check := func(id *ast.Ident, thing string) {
+		if id.Name == "_" {
+			return
+		}
+		if knownNameExceptions[id.Name] {
+			return
+		}
+
+		// Handle two common styles from other languages that don't belong in Go.
+		if len(id.Name) >= 5 && allCapsRE.MatchString(id.Name) && strings.Contains(id.Name, "_") {
+			f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use ALL_CAPS in Go names; use CamelCase")
+			return
+		}
+		if len(id.Name) > 2 && id.Name[0] == 'k' && id.Name[1] >= 'A' && id.Name[1] <= 'Z' {
+			should := string(id.Name[1]+'a'-'A') + id.Name[2:]
+			f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use leading k in Go names; %s %s should be %s", thing, id.Name, should)
+		}
+
+		should := lintName(id.Name)
+		if id.Name == should {
+			return
+		}
+		if len(id.Name) > 2 && strings.Contains(id.Name[1:], "_") {
+			f.errorf(id, 0.9, link("http://golang.org/doc/effective_go.html#mixed-caps"), category("naming"), "don't use underscores in Go names; %s %s should be %s", thing, id.Name, should)
+			return
+		}
+		f.errorf(id, 0.8, link(styleGuideBase+"#initialisms"), category("naming"), "%s %s should be %s", thing, id.Name, should)
+	}
+	checkList := func(fl *ast.FieldList, thing string) {
+		if fl == nil {
+			return
+		}
+		for _, f := range fl.List {
+			for _, id := range f.Names {
+				check(id, thing)
+			}
+		}
+	}
+	f.walk(func(node ast.Node) bool {
+		switch v := node.(type) {
+		case *ast.AssignStmt:
+			if v.Tok == token.ASSIGN {
+				return true
+			}
+			for _, exp := range v.Lhs {
+				if id, ok := exp.(*ast.Ident); ok {
+					check(id, "var")
+				}
+			}
+		case *ast.FuncDecl:
+			if f.isTest() && (strings.HasPrefix(v.Name.Name, "Example") || strings.HasPrefix(v.Name.Name, "Test") || strings.HasPrefix(v.Name.Name, "Benchmark")) {
+				return true
+			}
+
+			thing := "func"
+			if v.Recv != nil {
+				thing = "method"
+			}
+
+			check(v.Name, thing)
+
+			checkList(v.Type.Params, thing+" parameter")
+			checkList(v.Type.Results, thing+" result")
+		case *ast.GenDecl:
+			if v.Tok == token.IMPORT {
+				return true
+			}
+			var thing string
+			switch v.Tok {
+			case token.CONST:
+				thing = "const"
+			case token.TYPE:
+				thing = "type"
+			case token.VAR:
+				thing = "var"
+			}
+			for _, spec := range v.Specs {
+				switch s := spec.(type) {
+				case *ast.TypeSpec:
+					check(s.Name, thing)
+				case *ast.ValueSpec:
+					for _, id := range s.Names {
+						check(id, thing)
+					}
+				}
+			}
+		case *ast.InterfaceType:
+			// Do not check interface method names.
+			// They are often constrainted by the method names of concrete types.
+			for _, x := range v.Methods.List {
+				ft, ok := x.Type.(*ast.FuncType)
+				if !ok { // might be an embedded interface name
+					continue
+				}
+				checkList(ft.Params, "interface method parameter")
+				checkList(ft.Results, "interface method result")
+			}
+		case *ast.RangeStmt:
+			if v.Tok == token.ASSIGN {
+				return true
+			}
+			if id, ok := v.Key.(*ast.Ident); ok {
+				check(id, "range var")
+			}
+			if id, ok := v.Value.(*ast.Ident); ok {
+				check(id, "range var")
+			}
+		case *ast.StructType:
+			for _, f := range v.Fields.List {
+				for _, id := range f.Names {
+					check(id, "struct field")
+				}
+			}
+		}
+		return true
+	})
+}
+
+// lintName returns a different name if it should be different.
+func lintName(name string) (should string) {
+	// Fast path for simple cases: "_" and all lowercase.
+	if name == "_" {
+		return name
+	}
+	allLower := true
+	for _, r := range name {
+		if !unicode.IsLower(r) {
+			allLower = false
+			break
+		}
+	}
+	if allLower {
+		return name
+	}
+
+	// Split camelCase at any lower->upper transition, and split on underscores.
+	// Check each word for common initialisms.
+	runes := []rune(name)
+	w, i := 0, 0 // index of start of word, scan
+	for i+1 <= len(runes) {
+		eow := false // whether we hit the end of a word
+		if i+1 == len(runes) {
+			eow = true
+		} else if runes[i+1] == '_' {
+			// underscore; shift the remainder forward over any run of underscores
+			eow = true
+			n := 1
+			for i+n+1 < len(runes) && runes[i+n+1] == '_' {
+				n++
+			}
+
+			// Leave at most one underscore if the underscore is between two digits
+			if i+n+1 < len(runes) && unicode.IsDigit(runes[i]) && unicode.IsDigit(runes[i+n+1]) {
+				n--
+			}
+
+			copy(runes[i+1:], runes[i+n+1:])
+			runes = runes[:len(runes)-n]
+		} else if unicode.IsLower(runes[i]) && !unicode.IsLower(runes[i+1]) {
+			// lower->non-lower
+			eow = true
+		}
+		i++
+		if !eow {
+			continue
+		}
+
+		// [w,i) is a word.
+		word := string(runes[w:i])
+		if u := strings.ToUpper(word); commonInitialisms[u] {
+			// Keep consistent case, which is lowercase only at the start.
+			if w == 0 && unicode.IsLower(runes[w]) {
+				u = strings.ToLower(u)
+			}
+			// All the common initialisms are ASCII,
+			// so we can replace the bytes exactly.
+			copy(runes[w:], []rune(u))
+		} else if w > 0 && strings.ToLower(word) == word {
+			// already all lowercase, and not the first word, so uppercase the first character.
+			runes[w] = unicode.ToUpper(runes[w])
+		}
+		w = i
+	}
+	return string(runes)
+}
+
+// commonInitialisms is a set of common initialisms.
+// Only add entries that are highly unlikely to be non-initialisms.
+// For instance, "ID" is fine (Freudian code is rare), but "AND" is not.
+var commonInitialisms = map[string]bool{
+	"ACL":   true,
+	"API":   true,
+	"ASCII": true,
+	"CPU":   true,
+	"CSS":   true,
+	"DNS":   true,
+	"EOF":   true,
+	"GUID":  true,
+	"HTML":  true,
+	"HTTP":  true,
+	"HTTPS": true,
+	"ID":    true,
+	"IP":    true,
+	"JSON":  true,
+	"LHS":   true,
+	"QPS":   true,
+	"RAM":   true,
+	"RHS":   true,
+	"RPC":   true,
+	"SLA":   true,
+	"SMTP":  true,
+	"SQL":   true,
+	"SSH":   true,
+	"TCP":   true,
+	"TLS":   true,
+	"TTL":   true,
+	"UDP":   true,
+	"UI":    true,
+	"UID":   true,
+	"UUID":  true,
+	"URI":   true,
+	"URL":   true,
+	"UTF8":  true,
+	"VM":    true,
+	"XML":   true,
+	"XMPP":  true,
+	"XSRF":  true,
+	"XSS":   true,
+}
+
+// lintTypeDoc examines the doc comment on a type.
+// It complains if they are missing from an exported type,
+// or if they are not of the standard form.
+func (f *file) lintTypeDoc(t *ast.TypeSpec, doc *ast.CommentGroup) {
+	if !ast.IsExported(t.Name.Name) {
+		return
+	}
+	if doc == nil {
+		f.errorf(t, 1, link(docCommentsLink), category("comments"), "exported type %v should have comment or be unexported", t.Name)
+		return
+	}
+
+	s := doc.Text()
+	articles := [...]string{"A", "An", "The"}
+	for _, a := range articles {
+		if strings.HasPrefix(s, a+" ") {
+			s = s[len(a)+1:]
+			break
+		}
+	}
+	if !strings.HasPrefix(s, t.Name.Name+" ") {
+		f.errorf(doc, 1, link(docCommentsLink), category("comments"), `comment on exported type %v should be of the form "%v ..." (with optional leading article)`, t.Name, t.Name)
+	}
+}
+
+var commonMethods = map[string]bool{
+	"Error":     true,
+	"Read":      true,
+	"ServeHTTP": true,
+	"String":    true,
+	"Write":     true,
+}
+
+// lintFuncDoc examines doc comments on functions and methods.
+// It complains if they are missing, or not of the right form.
+// It has specific exclusions for well-known methods (see commonMethods above).
+func (f *file) lintFuncDoc(fn *ast.FuncDecl) {
+	if !ast.IsExported(fn.Name.Name) {
+		// func is unexported
+		return
+	}
+	kind := "function"
+	name := fn.Name.Name
+	if fn.Recv != nil && len(fn.Recv.List) > 0 {
+		// method
+		kind = "method"
+		recv := receiverType(fn)
+		if !ast.IsExported(recv) {
+			// receiver is unexported
+			return
+		}
+		if commonMethods[name] {
+			return
+		}
+		switch name {
+		case "Len", "Less", "Swap":
+			if f.pkg.sortable[recv] {
+				return
+			}
+		}
+		name = recv + "." + name
+	}
+	if fn.Doc == nil {
+		f.errorf(fn, 1, link(docCommentsLink), category("comments"), "exported %s %s should have comment or be unexported", kind, name)
+		return
+	}
+	s := fn.Doc.Text()
+	prefix := fn.Name.Name + " "
+	if !strings.HasPrefix(s, prefix) {
+		f.errorf(fn.Doc, 1, link(docCommentsLink), category("comments"), `comment on exported %s %s should be of the form "%s..."`, kind, name, prefix)
+	}
+}
+
+// lintValueSpecDoc examines package-global variables and constants.
+// It complains if they are not individually declared,
+// or if they are not suitably documented in the right form (unless they are in a block that is commented).
+func (f *file) lintValueSpecDoc(vs *ast.ValueSpec, gd *ast.GenDecl, genDeclMissingComments map[*ast.GenDecl]bool) {
+	kind := "var"
+	if gd.Tok == token.CONST {
+		kind = "const"
+	}
+
+	if len(vs.Names) > 1 {
+		// Check that none are exported except for the first.
+		for _, n := range vs.Names[1:] {
+			if ast.IsExported(n.Name) {
+				f.errorf(vs, 1, category("comments"), "exported %s %s should have its own declaration", kind, n.Name)
+				return
+			}
+		}
+	}
+
+	// Only one name.
+	name := vs.Names[0].Name
+	if !ast.IsExported(name) {
+		return
+	}
+
+	if vs.Doc == nil && gd.Doc == nil {
+		if genDeclMissingComments[gd] {
+			return
+		}
+		block := ""
+		if kind == "const" && gd.Lparen.IsValid() {
+			block = " (or a comment on this block)"
+		}
+		f.errorf(vs, 1, link(docCommentsLink), category("comments"), "exported %s %s should have comment%s or be unexported", kind, name, block)
+		genDeclMissingComments[gd] = true
+		return
+	}
+	// If this GenDecl has parens and a comment, we don't check its comment form.
+	if gd.Lparen.IsValid() && gd.Doc != nil {
+		return
+	}
+	// The relevant text to check will be on either vs.Doc or gd.Doc.
+	// Use vs.Doc preferentially.
+	doc := vs.Doc
+	if doc == nil {
+		doc = gd.Doc
+	}
+	prefix := name + " "
+	if !strings.HasPrefix(doc.Text(), prefix) {
+		f.errorf(doc, 1, link(docCommentsLink), category("comments"), `comment on exported %s %s should be of the form "%s..."`, kind, name, prefix)
+	}
+}
+
+func (f *file) checkStutter(id *ast.Ident, thing string) {
+	pkg, name := f.f.Name.Name, id.Name
+	if !ast.IsExported(name) {
+		// unexported name
+		return
+	}
+	// A name stutters if the package name is a strict prefix
+	// and the next character of the name starts a new word.
+	if len(name) <= len(pkg) {
+		// name is too short to stutter.
+		// This permits the name to be the same as the package name.
+		return
+	}
+	if !strings.EqualFold(pkg, name[:len(pkg)]) {
+		return
+	}
+	// We can assume the name is well-formed UTF-8.
+	// If the next rune after the package name is uppercase or an underscore
+	// the it's starting a new word and thus this name stutters.
+	rem := name[len(pkg):]
+	if next, _ := utf8.DecodeRuneInString(rem); next == '_' || unicode.IsUpper(next) {
+		f.errorf(id, 0.8, link(styleGuideBase+"#package-names"), category("naming"), "%s name will be used as %s.%s by other packages, and that stutters; consider calling this %s", thing, pkg, name, rem)
+	}
+}
+
+// zeroLiteral is a set of ast.BasicLit values that are zero values.
+// It is not exhaustive.
+var zeroLiteral = map[string]bool{
+	"false": true, // bool
+	// runes
+	`'\x00'`: true,
+	`'\000'`: true,
+	// strings
+	`""`: true,
+	"``": true,
+	// numerics
+	"0":   true,
+	"0.":  true,
+	"0.0": true,
+	"0i":  true,
+}
+
+// lintVarDecls examines variable declarations. It complains about declarations with
+// redundant LHS types that can be inferred from the RHS.
+func (f *file) lintVarDecls() {
+	var lastGen *ast.GenDecl // last GenDecl entered.
+
+	f.walk(func(node ast.Node) bool {
+		switch v := node.(type) {
+		case *ast.GenDecl:
+			if v.Tok != token.CONST && v.Tok != token.VAR {
+				return false
+			}
+			lastGen = v
+			return true
+		case *ast.ValueSpec:
+			if lastGen.Tok == token.CONST {
+				return false
+			}
+			if len(v.Names) > 1 || v.Type == nil || len(v.Values) == 0 {
+				return false
+			}
+			rhs := v.Values[0]
+			// An underscore var appears in a common idiom for compile-time interface satisfaction,
+			// as in "var _ Interface = (*Concrete)(nil)".
+			if isIdent(v.Names[0], "_") {
+				return false
+			}
+			// If the RHS is a zero value, suggest dropping it.
+			zero := false
+			if lit, ok := rhs.(*ast.BasicLit); ok {
+				zero = zeroLiteral[lit.Value]
+			} else if isIdent(rhs, "nil") {
+				zero = true
+			}
+			if zero {
+				f.errorf(rhs, 0.9, category("zero-value"), "should drop = %s from declaration of var %s; it is the zero value", f.render(rhs), v.Names[0])
+				return false
+			}
+			lhsTyp := f.pkg.typeOf(v.Type)
+			rhsTyp := f.pkg.typeOf(rhs)
+
+			if !validType(lhsTyp) || !validType(rhsTyp) {
+				// Type checking failed (often due to missing imports).
+				return false
+			}
+
+			if !types.Identical(lhsTyp, rhsTyp) {
+				// Assignment to a different type is not redundant.
+				return false
+			}
+
+			// The next three conditions are for suppressing the warning in situations
+			// where we were unable to typecheck.
+
+			// If the LHS type is an interface, don't warn, since it is probably a
+			// concrete type on the RHS. Note that our feeble lexical check here
+			// will only pick up interface{} and other literal interface types;
+			// that covers most of the cases we care to exclude right now.
+			if _, ok := v.Type.(*ast.InterfaceType); ok {
+				return false
+			}
+			// If the RHS is an untyped const, only warn if the LHS type is its default type.
+			if defType, ok := f.isUntypedConst(rhs); ok && !isIdent(v.Type, defType) {
+				return false
+			}
+
+			f.errorf(v.Type, 0.8, category("type-inference"), "should omit type %s from declaration of var %s; it will be inferred from the right-hand side", f.render(v.Type), v.Names[0])
+			return false
+		}
+		return true
+	})
+}
+
+func validType(T types.Type) bool {
+	return T != nil &&
+		T != types.Typ[types.Invalid] &&
+		!strings.Contains(T.String(), "invalid type") // good but not foolproof
+}
+
+// lintElses examines else blocks. It complains about any else block whose if block ends in a return.
+func (f *file) lintElses() {
+	// We don't want to flag if { } else if { } else { } constructions.
+	// They will appear as an IfStmt whose Else field is also an IfStmt.
+	// Record such a node so we ignore it when we visit it.
+	ignore := make(map[*ast.IfStmt]bool)
+
+	f.walk(func(node ast.Node) bool {
+		ifStmt, ok := node.(*ast.IfStmt)
+		if !ok || ifStmt.Else == nil {
+			return true
+		}
+		if ignore[ifStmt] {
+			return true
+		}
+		if elseif, ok := ifStmt.Else.(*ast.IfStmt); ok {
+			ignore[elseif] = true
+			return true
+		}
+		if _, ok := ifStmt.Else.(*ast.BlockStmt); !ok {
+			// only care about elses without conditions
+			return true
+		}
+		if len(ifStmt.Body.List) == 0 {
+			return true
+		}
+		shortDecl := false // does the if statement have a ":=" initialization statement?
+		if ifStmt.Init != nil {
+			if as, ok := ifStmt.Init.(*ast.AssignStmt); ok && as.Tok == token.DEFINE {
+				shortDecl = true
+			}
+		}
+		lastStmt := ifStmt.Body.List[len(ifStmt.Body.List)-1]
+		if _, ok := lastStmt.(*ast.ReturnStmt); ok {
+			extra := ""
+			if shortDecl {
+				extra = " (move short variable declaration to its own line if necessary)"
+			}
+			f.errorf(ifStmt.Else, 1, link(styleGuideBase+"#indent-error-flow"), category("indent"), "if block ends with a return statement, so drop this else and outdent its block"+extra)
+		}
+		return true
+	})
+}
+
+// lintRanges examines range clauses. It complains about redundant constructions.
+func (f *file) lintRanges() {
+	f.walk(func(node ast.Node) bool {
+		rs, ok := node.(*ast.RangeStmt)
+		if !ok {
+			return true
+		}
+		if rs.Value == nil {
+			// for x = range m { ... }
+			return true // single var form
+		}
+		if !isIdent(rs.Value, "_") {
+			// for ?, y = range m { ... }
+			return true
+		}
+
+		p := f.errorf(rs.Value, 1, category("range-loop"), "should omit 2nd value from range; this loop is equivalent to `for %s %s range ...`", f.render(rs.Key), rs.Tok)
+
+		newRS := *rs // shallow copy
+		newRS.Value = nil
+		p.ReplacementLine = f.firstLineOf(&newRS, rs)
+
+		return true
+	})
+}
+
+// lintErrorf examines errors.New and testing.Error calls. It complains if its only argument is an fmt.Sprintf invocation.
+func (f *file) lintErrorf() {
+	f.walk(func(node ast.Node) bool {
+		ce, ok := node.(*ast.CallExpr)
+		if !ok || len(ce.Args) != 1 {
+			return true
+		}
+		isErrorsNew := isPkgDot(ce.Fun, "errors", "New")
+		var isTestingError bool
+		se, ok := ce.Fun.(*ast.SelectorExpr)
+		if ok && se.Sel.Name == "Error" {
+			if typ := f.pkg.typeOf(se.X); typ != nil {
+				isTestingError = typ.String() == "*testing.T"
+			}
+		}
+		if !isErrorsNew && !isTestingError {
+			return true
+		}
+		arg := ce.Args[0]
+		ce, ok = arg.(*ast.CallExpr)
+		if !ok || !isPkgDot(ce.Fun, "fmt", "Sprintf") {
+			return true
+		}
+		errorfPrefix := "fmt"
+		if isTestingError {
+			errorfPrefix = f.render(se.X)
+		}
+		p := f.errorf(node, 1, category("errors"), "should replace %s(fmt.Sprintf(...)) with %s.Errorf(...)", f.render(se), errorfPrefix)
+
+		m := f.srcLineWithMatch(ce, `^(.*)`+f.render(se)+`\(fmt\.Sprintf\((.*)\)\)(.*)$`)
+		if m != nil {
+			p.ReplacementLine = m[1] + errorfPrefix + ".Errorf(" + m[2] + ")" + m[3]
+		}
+
+		return true
+	})
+}
+
+// lintErrors examines global error vars. It complains if they aren't named in the standard way.
+func (f *file) lintErrors() {
+	for _, decl := range f.f.Decls {
+		gd, ok := decl.(*ast.GenDecl)
+		if !ok || gd.Tok != token.VAR {
+			continue
+		}
+		for _, spec := range gd.Specs {
+			spec := spec.(*ast.ValueSpec)
+			if len(spec.Names) != 1 || len(spec.Values) != 1 {
+				continue
+			}
+			ce, ok := spec.Values[0].(*ast.CallExpr)
+			if !ok {
+				continue
+			}
+			if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") {
+				continue
+			}
+
+			id := spec.Names[0]
+			prefix := "err"
+			if id.IsExported() {
+				prefix = "Err"
+			}
+			if !strings.HasPrefix(id.Name, prefix) {
+				f.errorf(id, 0.9, category("naming"), "error var %s should have name of the form %sFoo", id.Name, prefix)
+			}
+		}
+	}
+}
+
+func lintCapAndPunct(s string) (isCap, isPunct bool) {
+	first, firstN := utf8.DecodeRuneInString(s)
+	last, _ := utf8.DecodeLastRuneInString(s)
+	isPunct = last == '.' || last == ':' || last == '!'
+	isCap = unicode.IsUpper(first)
+	if isCap && len(s) > firstN {
+		// Don't flag strings starting with something that looks like an initialism.
+		if second, _ := utf8.DecodeRuneInString(s[firstN:]); unicode.IsUpper(second) {
+			isCap = false
+		}
+	}
+	return
+}
+
+// lintErrorStrings examines error strings. It complains if they are capitalized or end in punctuation.
+func (f *file) lintErrorStrings() {
+	f.walk(func(node ast.Node) bool {
+		ce, ok := node.(*ast.CallExpr)
+		if !ok {
+			return true
+		}
+		if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") {
+			return true
+		}
+		if len(ce.Args) < 1 {
+			return true
+		}
+		str, ok := ce.Args[0].(*ast.BasicLit)
+		if !ok || str.Kind != token.STRING {
+			return true
+		}
+		s, _ := strconv.Unquote(str.Value) // can assume well-formed Go
+		if s == "" {
+			return true
+		}
+		isCap, isPunct := lintCapAndPunct(s)
+		var msg string
+		switch {
+		case isCap && isPunct:
+			msg = "error strings should not be capitalized and should not end with punctuation"
+		case isCap:
+			msg = "error strings should not be capitalized"
+		case isPunct:
+			msg = "error strings should not end with punctuation"
+		default:
+			return true
+		}
+		// People use proper nouns and exported Go identifiers in error strings,
+		// so decrease the confidence of warnings for capitalization.
+		conf := 0.8
+		if isCap {
+			conf = 0.6
+		}
+		f.errorf(str, conf, link(styleGuideBase+"#error-strings"), category("errors"), msg)
+		return true
+	})
+}
+
+// lintReceiverNames examines receiver names. It complains about inconsistent
+// names used for the same type and names such as "this".
+func (f *file) lintReceiverNames() {
+	typeReceiver := map[string]string{}
+	f.walk(func(n ast.Node) bool {
+		fn, ok := n.(*ast.FuncDecl)
+		if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 {
+			return true
+		}
+		names := fn.Recv.List[0].Names
+		if len(names) < 1 {
+			return true
+		}
+		name := names[0].Name
+		const ref = styleGuideBase + "#receiver-names"
+		if name == "_" {
+			f.errorf(n, 1, link(ref), category("naming"), `receiver name should not be an underscore`)
+			return true
+		}
+		if name == "this" || name == "self" {
+			f.errorf(n, 1, link(ref), category("naming"), `receiver name should be a reflection of its identity; don't use generic names such as "this" or "self"`)
+			return true
+		}
+		recv := receiverType(fn)
+		if prev, ok := typeReceiver[recv]; ok && prev != name {
+			f.errorf(n, 1, link(ref), category("naming"), "receiver name %s should be consistent with previous receiver name %s for %s", name, prev, recv)
+			return true
+		}
+		typeReceiver[recv] = name
+		return true
+	})
+}
+
+// lintIncDec examines statements that increment or decrement a variable.
+// It complains if they don't use x++ or x--.
+func (f *file) lintIncDec() {
+	f.walk(func(n ast.Node) bool {
+		as, ok := n.(*ast.AssignStmt)
+		if !ok {
+			return true
+		}
+		if len(as.Lhs) != 1 {
+			return true
+		}
+		if !isOne(as.Rhs[0]) {
+			return true
+		}
+		var suffix string
+		switch as.Tok {
+		case token.ADD_ASSIGN:
+			suffix = "++"
+		case token.SUB_ASSIGN:
+			suffix = "--"
+		default:
+			return true
+		}
+		f.errorf(as, 0.8, category("unary-op"), "should replace %s with %s%s", f.render(as), f.render(as.Lhs[0]), suffix)
+		return true
+	})
+}
+
+// lintErrorReturn examines function declarations that return an error.
+// It complains if the error isn't the last parameter.
+func (f *file) lintErrorReturn() {
+	f.walk(func(n ast.Node) bool {
+		fn, ok := n.(*ast.FuncDecl)
+		if !ok || fn.Type.Results == nil {
+			return true
+		}
+		ret := fn.Type.Results.List
+		if len(ret) <= 1 {
+			return true
+		}
+		// An error return parameter should be the last parameter.
+		// Flag any error parameters found before the last.
+		for _, r := range ret[:len(ret)-1] {
+			if isIdent(r.Type, "error") {
+				f.errorf(fn, 0.9, category("arg-order"), "error should be the last type when returning multiple items")
+				break // only flag one
+			}
+		}
+		return true
+	})
+}
+
+// lintUnexportedReturn examines exported function declarations.
+// It complains if any return an unexported type.
+func (f *file) lintUnexportedReturn() {
+	f.walk(func(n ast.Node) bool {
+		fn, ok := n.(*ast.FuncDecl)
+		if !ok {
+			return true
+		}
+		if fn.Type.Results == nil {
+			return false
+		}
+		if !fn.Name.IsExported() {
+			return false
+		}
+		thing := "func"
+		if fn.Recv != nil && len(fn.Recv.List) > 0 {
+			thing = "method"
+			if !ast.IsExported(receiverType(fn)) {
+				// Don't report exported methods of unexported types,
+				// such as private implementations of sort.Interface.
+				return false
+			}
+		}
+		for _, ret := range fn.Type.Results.List {
+			typ := f.pkg.typeOf(ret.Type)
+			if exportedType(typ) {
+				continue
+			}
+			f.errorf(ret.Type, 0.8, category("unexported-type-in-api"),
+				"exported %s %s returns unexported type %s, which can be annoying to use",
+				thing, fn.Name.Name, typ)
+			break // only flag one
+		}
+		return false
+	})
+}
+
+// exportedType reports whether typ is an exported type.
+// It is imprecise, and will err on the side of returning true,
+// such as for composite types.
+func exportedType(typ types.Type) bool {
+	switch T := typ.(type) {
+	case *types.Named:
+		// Builtin types have no package.
+		return T.Obj().Pkg() == nil || T.Obj().Exported()
+	case *types.Map:
+		return exportedType(T.Key()) && exportedType(T.Elem())
+	case interface {
+		Elem() types.Type
+	}: // array, slice, pointer, chan
+		return exportedType(T.Elem())
+	}
+	// Be conservative about other types, such as struct, interface, etc.
+	return true
+}
+
+// timeSuffixes is a list of name suffixes that imply a time unit.
+// This is not an exhaustive list.
+var timeSuffixes = []string{
+	"Sec", "Secs", "Seconds",
+	"Msec", "Msecs",
+	"Milli", "Millis", "Milliseconds",
+	"Usec", "Usecs", "Microseconds",
+	"MS", "Ms",
+}
+
+func (f *file) lintTimeNames() {
+	f.walk(func(node ast.Node) bool {
+		v, ok := node.(*ast.ValueSpec)
+		if !ok {
+			return true
+		}
+		for _, name := range v.Names {
+			origTyp := f.pkg.typeOf(name)
+			// Look for time.Duration or *time.Duration;
+			// the latter is common when using flag.Duration.
+			typ := origTyp
+			if pt, ok := typ.(*types.Pointer); ok {
+				typ = pt.Elem()
+			}
+			if !f.pkg.isNamedType(typ, "time", "Duration") {
+				continue
+			}
+			suffix := ""
+			for _, suf := range timeSuffixes {
+				if strings.HasSuffix(name.Name, suf) {
+					suffix = suf
+					break
+				}
+			}
+			if suffix == "" {
+				continue
+			}
+			f.errorf(v, 0.9, category("time"), "var %s is of type %v; don't use unit-specific suffix %q", name.Name, origTyp, suffix)
+		}
+		return true
+	})
+}
+
+// lintContextKeyTypes checks for call expressions to context.WithValue with
+// basic types used for the key argument.
+// See: https://golang.org/issue/17293
+func (f *file) lintContextKeyTypes() {
+	f.walk(func(node ast.Node) bool {
+		switch node := node.(type) {
+		case *ast.CallExpr:
+			f.checkContextKeyType(node)
+		}
+
+		return true
+	})
+}
+
+// checkContextKeyType reports an error if the call expression calls
+// context.WithValue with a key argument of basic type.
+func (f *file) checkContextKeyType(x *ast.CallExpr) {
+	sel, ok := x.Fun.(*ast.SelectorExpr)
+	if !ok {
+		return
+	}
+	pkg, ok := sel.X.(*ast.Ident)
+	if !ok || pkg.Name != "context" {
+		return
+	}
+	if sel.Sel.Name != "WithValue" {
+		return
+	}
+
+	// key is second argument to context.WithValue
+	if len(x.Args) != 3 {
+		return
+	}
+	key := f.pkg.typesInfo.Types[x.Args[1]]
+
+	if _, ok := key.Type.(*types.Basic); ok {
+		f.errorf(x, 1.0, category("context"), fmt.Sprintf("should not use basic type %s as key in context.WithValue", key.Type))
+	}
+}
+
+// lintContextArgs examines function declarations that contain an
+// argument with a type of context.Context
+// It complains if that argument isn't the first parameter.
+func (f *file) lintContextArgs() {
+	f.walk(func(n ast.Node) bool {
+		fn, ok := n.(*ast.FuncDecl)
+		if !ok || len(fn.Type.Params.List) <= 1 {
+			return true
+		}
+		// A context.Context should be the first parameter of a function.
+		// Flag any that show up after the first.
+		for _, arg := range fn.Type.Params.List[1:] {
+			if isPkgDot(arg.Type, "context", "Context") {
+				f.errorf(fn, 0.9, link("https://golang.org/pkg/context/"), category("arg-order"), "context.Context should be the first parameter of a function")
+				break // only flag one
+			}
+		}
+		return true
+	})
+}
+
+// receiverType returns the named type of the method receiver, sans "*",
+// or "invalid-type" if fn.Recv is ill formed.
+func receiverType(fn *ast.FuncDecl) string {
+	switch e := fn.Recv.List[0].Type.(type) {
+	case *ast.Ident:
+		return e.Name
+	case *ast.StarExpr:
+		if id, ok := e.X.(*ast.Ident); ok {
+			return id.Name
+		}
+	}
+	// The parser accepts much more than just the legal forms.
+	return "invalid-type"
+}
+
+func (f *file) walk(fn func(ast.Node) bool) {
+	ast.Walk(walker(fn), f.f)
+}
+
+func (f *file) render(x interface{}) string {
+	var buf bytes.Buffer
+	if err := printer.Fprint(&buf, f.fset, x); err != nil {
+		panic(err)
+	}
+	return buf.String()
+}
+
+func (f *file) debugRender(x interface{}) string {
+	var buf bytes.Buffer
+	if err := ast.Fprint(&buf, f.fset, x, nil); err != nil {
+		panic(err)
+	}
+	return buf.String()
+}
+
+// walker adapts a function to satisfy the ast.Visitor interface.
+// The function return whether the walk should proceed into the node's children.
+type walker func(ast.Node) bool
+
+func (w walker) Visit(node ast.Node) ast.Visitor {
+	if w(node) {
+		return w
+	}
+	return nil
+}
+
+func isIdent(expr ast.Expr, ident string) bool {
+	id, ok := expr.(*ast.Ident)
+	return ok && id.Name == ident
+}
+
+// isBlank returns whether id is the blank identifier "_".
+// If id == nil, the answer is false.
+func isBlank(id *ast.Ident) bool { return id != nil && id.Name == "_" }
+
+func isPkgDot(expr ast.Expr, pkg, name string) bool {
+	sel, ok := expr.(*ast.SelectorExpr)
+	return ok && isIdent(sel.X, pkg) && isIdent(sel.Sel, name)
+}
+
+func isZero(expr ast.Expr) bool {
+	lit, ok := expr.(*ast.BasicLit)
+	return ok && lit.Kind == token.INT && lit.Value == "0"
+}
+
+func isOne(expr ast.Expr) bool {
+	lit, ok := expr.(*ast.BasicLit)
+	return ok && lit.Kind == token.INT && lit.Value == "1"
+}
+
+var basicTypeKinds = map[types.BasicKind]string{
+	types.UntypedBool:    "bool",
+	types.UntypedInt:     "int",
+	types.UntypedRune:    "rune",
+	types.UntypedFloat:   "float64",
+	types.UntypedComplex: "complex128",
+	types.UntypedString:  "string",
+}
+
+// isUntypedConst reports whether expr is an untyped constant,
+// and indicates what its default type is.
+// scope may be nil.
+func (f *file) isUntypedConst(expr ast.Expr) (defType string, ok bool) {
+	// Re-evaluate expr outside of its context to see if it's untyped.
+	// (An expr evaluated within, for example, an assignment context will get the type of the LHS.)
+	exprStr := f.render(expr)
+	tv, err := types.Eval(f.fset, f.pkg.typesPkg, expr.Pos(), exprStr)
+	if err != nil {
+		return "", false
+	}
+	if b, ok := tv.Type.(*types.Basic); ok {
+		if dt, ok := basicTypeKinds[b.Kind()]; ok {
+			return dt, true
+		}
+	}
+
+	return "", false
+}
+
+// firstLineOf renders the given node and returns its first line.
+// It will also match the indentation of another node.
+func (f *file) firstLineOf(node, match ast.Node) string {
+	line := f.render(node)
+	if i := strings.Index(line, "\n"); i >= 0 {
+		line = line[:i]
+	}
+	return f.indentOf(match) + line
+}
+
+func (f *file) indentOf(node ast.Node) string {
+	line := srcLine(f.src, f.fset.Position(node.Pos()))
+	for i, r := range line {
+		switch r {
+		case ' ', '\t':
+		default:
+			return line[:i]
+		}
+	}
+	return line // unusual or empty line
+}
+
+func (f *file) srcLineWithMatch(node ast.Node, pattern string) (m []string) {
+	line := srcLine(f.src, f.fset.Position(node.Pos()))
+	line = strings.TrimSuffix(line, "\n")
+	rx := regexp.MustCompile(pattern)
+	return rx.FindStringSubmatch(line)
+}
+
+// srcLine returns the complete line at p, including the terminating newline.
+func srcLine(src []byte, p token.Position) string {
+	// Run to end of line in both directions if not at line start/end.
+	lo, hi := p.Offset, p.Offset+1
+	for lo > 0 && src[lo-1] != '\n' {
+		lo--
+	}
+	for hi < len(src) && src[hi-1] != '\n' {
+		hi++
+	}
+	return string(src[lo:hi])
+}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
index 04dcb88..aa20729 100644
--- a/vendor/github.com/golang/protobuf/proto/decode.go
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -61,7 +61,6 @@
 // int32, int64, uint32, uint64, bool, and enum
 // protocol buffer types.
 func DecodeVarint(buf []byte) (x uint64, n int) {
-	// x, n already 0
 	for shift := uint(0); shift < 64; shift += 7 {
 		if n >= len(buf) {
 			return 0, 0
@@ -78,13 +77,7 @@
 	return 0, 0
 }
 
-// DecodeVarint reads a varint-encoded integer from the Buffer.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func (p *Buffer) DecodeVarint() (x uint64, err error) {
-	// x, err already 0
-
+func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
 	i := p.index
 	l := len(p.buf)
 
@@ -107,6 +100,107 @@
 	return
 }
 
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+	i := p.index
+	buf := p.buf
+
+	if i >= len(buf) {
+		return 0, io.ErrUnexpectedEOF
+	} else if buf[i] < 0x80 {
+		p.index++
+		return uint64(buf[i]), nil
+	} else if len(buf)-i < 10 {
+		return p.decodeVarintSlow()
+	}
+
+	var b uint64
+	// we already checked the first byte
+	x = uint64(buf[i]) - 0x80
+	i++
+
+	b = uint64(buf[i])
+	i++
+	x += b << 7
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 7
+
+	b = uint64(buf[i])
+	i++
+	x += b << 14
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 14
+
+	b = uint64(buf[i])
+	i++
+	x += b << 21
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 21
+
+	b = uint64(buf[i])
+	i++
+	x += b << 28
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 28
+
+	b = uint64(buf[i])
+	i++
+	x += b << 35
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 35
+
+	b = uint64(buf[i])
+	i++
+	x += b << 42
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 42
+
+	b = uint64(buf[i])
+	i++
+	x += b << 49
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 49
+
+	b = uint64(buf[i])
+	i++
+	x += b << 56
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 56
+
+	b = uint64(buf[i])
+	i++
+	x += b << 63
+	if b&0x80 == 0 {
+		goto done
+	}
+	// x -= 0x80 << 63 // Always zero.
+
+	return 0, errOverflow
+
+done:
+	p.index = i
+	return x, nil
+}
+
 // DecodeFixed64 reads a 64-bit integer from the Buffer.
 // This is the format for the
 // fixed64, sfixed64, and double protocol buffer types.
@@ -340,6 +434,8 @@
 // Buffer and places the decoded result in pb.  If the struct
 // underlying pb does not match the data in the buffer, the results can be
 // unpredictable.
+//
+// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
 func (p *Buffer) Unmarshal(pb Message) error {
 	// If the object can unmarshal itself, let it.
 	if u, ok := pb.(Unmarshaler); ok {
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
index 4fd0531..61f83c1 100644
--- a/vendor/github.com/golang/protobuf/proto/text_parser.go
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -592,7 +592,11 @@
 			props = oop.Prop
 			nv := reflect.New(oop.Type.Elem())
 			dst = nv.Elem().Field(0)
-			sv.Field(oop.Field).Set(nv)
+			field := sv.Field(oop.Field)
+			if !field.IsNil() {
+				return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
+			}
+			field.Set(nv)
 		}
 		if !dst.IsValid() {
 			return p.errorf("unknown field name %q in %v", name, st)
diff --git a/vendor/golang.org/x/net/http2/go18.go b/vendor/golang.org/x/net/http2/go18.go
index c2ae167..8c0dd25 100644
--- a/vendor/golang.org/x/net/http2/go18.go
+++ b/vendor/golang.org/x/net/http2/go18.go
@@ -6,6 +6,36 @@
 
 package http2
 
-import "crypto/tls"
+import (
+	"crypto/tls"
+	"net/http"
+)
 
 func cloneTLSConfig(c *tls.Config) *tls.Config { return c.Clone() }
+
+var _ http.Pusher = (*responseWriter)(nil)
+
+// Push implements http.Pusher.
+func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
+	internalOpts := pushOptions{}
+	if opts != nil {
+		internalOpts.Method = opts.Method
+		internalOpts.Header = opts.Header
+	}
+	return w.push(target, internalOpts)
+}
+
+func configureServer18(h1 *http.Server, h2 *Server) error {
+	if h2.IdleTimeout == 0 {
+		if h1.IdleTimeout != 0 {
+			h2.IdleTimeout = h1.IdleTimeout
+		} else {
+			h2.IdleTimeout = h1.ReadTimeout
+		}
+	}
+	return nil
+}
+
+func shouldLogPanic(panicValue interface{}) bool {
+	return panicValue != nil && panicValue != http.ErrAbortHandler
+}
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
index 40b46ae..a91fed5 100644
--- a/vendor/golang.org/x/net/http2/http2.go
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -36,6 +36,7 @@
 	VerboseLogs    bool
 	logFrameWrites bool
 	logFrameReads  bool
+	inTests        bool
 )
 
 func init() {
@@ -77,13 +78,23 @@
 
 type streamState int
 
+// HTTP/2 stream states.
+//
+// See http://tools.ietf.org/html/rfc7540#section-5.1.
+//
+// For simplicity, the server code merges "reserved (local)" into
+// "half-closed (remote)". This is one less state transition to track.
+// The only downside is that we send PUSH_PROMISEs slightly less
+// liberally than allowable. More discussion here:
+// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html
+//
+// "reserved (remote)" is omitted since the client code does not
+// support server push.
 const (
 	stateIdle streamState = iota
 	stateOpen
 	stateHalfClosedLocal
 	stateHalfClosedRemote
-	stateResvLocal
-	stateResvRemote
 	stateClosed
 )
 
@@ -92,8 +103,6 @@
 	stateOpen:             "Open",
 	stateHalfClosedLocal:  "HalfClosedLocal",
 	stateHalfClosedRemote: "HalfClosedRemote",
-	stateResvLocal:        "ResvLocal",
-	stateResvRemote:       "ResvRemote",
 	stateClosed:           "Closed",
 }
 
@@ -253,14 +262,27 @@
 	return &bufferedWriter{w: w}
 }
 
+// bufWriterPoolBufferSize is the size of bufio.Writer's
+// buffers created using bufWriterPool.
+//
+// TODO: pick a less arbitrary value? this is a bit under
+// (3 x typical 1500 byte MTU) at least. Other than that,
+// not much thought went into it.
+const bufWriterPoolBufferSize = 4 << 10
+
 var bufWriterPool = sync.Pool{
 	New: func() interface{} {
-		// TODO: pick something better? this is a bit under
-		// (3 x typical 1500 byte MTU) at least.
-		return bufio.NewWriterSize(nil, 4<<10)
+		return bufio.NewWriterSize(nil, bufWriterPoolBufferSize)
 	},
 }
 
+func (w *bufferedWriter) Available() int {
+	if w.bw == nil {
+		return bufWriterPoolBufferSize
+	}
+	return w.bw.Available()
+}
+
 func (w *bufferedWriter) Write(p []byte) (n int, err error) {
 	if w.bw == nil {
 		bw := bufWriterPool.Get().(*bufio.Writer)
diff --git a/vendor/golang.org/x/net/http2/not_go18.go b/vendor/golang.org/x/net/http2/not_go18.go
new file mode 100644
index 0000000..2e600dc
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/not_go18.go
@@ -0,0 +1,18 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.8
+
+package http2
+
+import "net/http"
+
+func configureServer18(h1 *http.Server, h2 *Server) error {
+	// No IdleTimeout to sync prior to Go 1.8.
+	return nil
+}
+
+func shouldLogPanic(panicValue interface{}) bool {
+	return panicValue != nil
+}
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index c986bc1..ea260da 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -2,17 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// TODO: replace all <-sc.doneServing with reads from the stream's cw
-// instead, and make sure that on close we close all open
-// streams. then remove doneServing?
-
-// TODO: re-audit GOAWAY support. Consider each incoming frame type and
-// whether it should be ignored during graceful shutdown.
-
-// TODO: disconnect idle clients. GFE seems to do 4 minutes. make
-// configurable?  or maximum number of idle clients and remove the
-// oldest?
-
 // TODO: turn off the serve goroutine when idle, so
 // an idle conn only has the readFrames goroutine active. (which could
 // also be optimized probably to pin less memory in crypto/tls). This
@@ -44,6 +33,7 @@
 	"fmt"
 	"io"
 	"log"
+	"math"
 	"net"
 	"net/http"
 	"net/textproto"
@@ -114,6 +104,15 @@
 	// PermitProhibitedCipherSuites, if true, permits the use of
 	// cipher suites prohibited by the HTTP/2 spec.
 	PermitProhibitedCipherSuites bool
+
+	// IdleTimeout specifies how long until idle clients should be
+	// closed with a GOAWAY frame. PING frames are not considered
+	// activity for the purposes of IdleTimeout.
+	IdleTimeout time.Duration
+
+	// NewWriteScheduler constructs a write scheduler for a connection.
+	// If nil, a default scheduler is chosen.
+	NewWriteScheduler func() WriteScheduler
 }
 
 func (s *Server) maxReadFrameSize() uint32 {
@@ -136,9 +135,15 @@
 //
 // ConfigureServer must be called before s begins serving.
 func ConfigureServer(s *http.Server, conf *Server) error {
+	if s == nil {
+		panic("nil *http.Server")
+	}
 	if conf == nil {
 		conf = new(Server)
 	}
+	if err := configureServer18(s, conf); err != nil {
+		return err
+	}
 
 	if s.TLSConfig == nil {
 		s.TLSConfig = new(tls.Config)
@@ -183,9 +188,6 @@
 	if !haveNPN {
 		s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS)
 	}
-	// h2-14 is temporary (as of 2015-03-05) while we wait for all browsers
-	// to switch to "h2".
-	s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "h2-14")
 
 	if s.TLSNextProto == nil {
 		s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}
@@ -200,7 +202,6 @@
 		})
 	}
 	s.TLSNextProto[NextProtoTLS] = protoHandler
-	s.TLSNextProto["h2-14"] = protoHandler // temporary; see above.
 	return nil
 }
 
@@ -254,29 +255,35 @@
 	defer cancel()
 
 	sc := &serverConn{
-		srv:              s,
-		hs:               opts.baseConfig(),
-		conn:             c,
-		baseCtx:          baseCtx,
-		remoteAddrStr:    c.RemoteAddr().String(),
-		bw:               newBufferedWriter(c),
-		handler:          opts.handler(),
-		streams:          make(map[uint32]*stream),
-		readFrameCh:      make(chan readFrameResult),
-		wantWriteFrameCh: make(chan frameWriteMsg, 8),
-		wroteFrameCh:     make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
-		bodyReadCh:       make(chan bodyReadMsg),         // buffering doesn't matter either way
-		doneServing:      make(chan struct{}),
-		advMaxStreams:    s.maxConcurrentStreams(),
-		writeSched: writeScheduler{
-			maxFrameSize: initialMaxFrameSize,
-		},
+		srv:               s,
+		hs:                opts.baseConfig(),
+		conn:              c,
+		baseCtx:           baseCtx,
+		remoteAddrStr:     c.RemoteAddr().String(),
+		bw:                newBufferedWriter(c),
+		handler:           opts.handler(),
+		streams:           make(map[uint32]*stream),
+		readFrameCh:       make(chan readFrameResult),
+		wantWriteFrameCh:  make(chan FrameWriteRequest, 8),
+		wantStartPushCh:   make(chan startPushRequest, 8),
+		wroteFrameCh:      make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
+		bodyReadCh:        make(chan bodyReadMsg),         // buffering doesn't matter either way
+		doneServing:       make(chan struct{}),
+		clientMaxStreams:  math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
+		advMaxStreams:     s.maxConcurrentStreams(),
 		initialWindowSize: initialWindowSize,
+		maxFrameSize:      initialMaxFrameSize,
 		headerTableSize:   initialHeaderTableSize,
 		serveG:            newGoroutineLock(),
 		pushEnabled:       true,
 	}
 
+	if s.NewWriteScheduler != nil {
+		sc.writeSched = s.NewWriteScheduler()
+	} else {
+		sc.writeSched = NewRandomWriteScheduler()
+	}
+
 	sc.flow.add(initialWindowSize)
 	sc.inflow.add(initialWindowSize)
 	sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
@@ -356,16 +363,18 @@
 	handler          http.Handler
 	baseCtx          contextContext
 	framer           *Framer
-	doneServing      chan struct{}         // closed when serverConn.serve ends
-	readFrameCh      chan readFrameResult  // written by serverConn.readFrames
-	wantWriteFrameCh chan frameWriteMsg    // from handlers -> serve
-	wroteFrameCh     chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
-	bodyReadCh       chan bodyReadMsg      // from handlers -> serve
-	testHookCh       chan func(int)        // code to run on the serve loop
-	flow             flow                  // conn-wide (not stream-specific) outbound flow control
-	inflow           flow                  // conn-wide inbound flow control
-	tlsState         *tls.ConnectionState  // shared by all handlers, like net/http
+	doneServing      chan struct{}          // closed when serverConn.serve ends
+	readFrameCh      chan readFrameResult   // written by serverConn.readFrames
+	wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve
+	wantStartPushCh  chan startPushRequest  // from handlers -> serve
+	wroteFrameCh     chan frameWriteResult  // from writeFrameAsync -> serve, tickles more frame writes
+	bodyReadCh       chan bodyReadMsg       // from handlers -> serve
+	testHookCh       chan func(int)         // code to run on the serve loop
+	flow             flow                   // conn-wide (not stream-specific) outbound flow control
+	inflow           flow                   // conn-wide inbound flow control
+	tlsState         *tls.ConnectionState   // shared by all handlers, like net/http
 	remoteAddrStr    string
+	writeSched       WriteScheduler
 
 	// Everything following is owned by the serve loop; use serveG.check():
 	serveG                goroutineLock // used to verify funcs are on serve()
@@ -375,22 +384,27 @@
 	unackedSettings       int    // how many SETTINGS have we sent without ACKs?
 	clientMaxStreams      uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
 	advMaxStreams         uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
-	curOpenStreams        uint32 // client's number of open streams
-	maxStreamID           uint32 // max ever seen
+	curClientStreams      uint32 // number of open streams initiated by the client
+	curPushedStreams      uint32 // number of open streams initiated by server push
+	maxClientStreamID     uint32 // max ever seen from client (odd), or 0 if there have been no client requests
+	maxPushPromiseID      uint32 // ID of the last push promise (even), or 0 if there have been no pushes
 	streams               map[uint32]*stream
 	initialWindowSize     int32
+	maxFrameSize          int32
 	headerTableSize       uint32
 	peerMaxHeaderListSize uint32            // zero means unknown (default)
 	canonHeader           map[string]string // http2-lower-case -> Go-Canonical-Case
-	writingFrame          bool              // started write goroutine but haven't heard back on wroteFrameCh
+	writingFrame          bool              // started writing a frame (on serve goroutine or separate)
+	writingFrameAsync     bool              // started a frame on its own goroutine but haven't heard back on wroteFrameCh
 	needsFrameFlush       bool              // last frame write wasn't a flush
-	writeSched            writeScheduler
-	inGoAway              bool // we've started to or sent GOAWAY
-	needToSendGoAway      bool // we need to schedule a GOAWAY frame write
+	inGoAway              bool              // we've started to or sent GOAWAY
+	inFrameScheduleLoop   bool              // whether we're in the scheduleFrameWrite loop
+	needToSendGoAway      bool              // we need to schedule a GOAWAY frame write
 	goAwayCode            ErrCode
 	shutdownTimerCh       <-chan time.Time // nil until used
 	shutdownTimer         *time.Timer      // nil until used
-	freeRequestBodyBuf    []byte           // if non-nil, a free initialWindowSize buffer for getRequestBodyBuf
+	idleTimer             *time.Timer      // nil if unused
+	idleTimerCh           <-chan time.Time // nil if unused
 
 	// Owned by the writeFrameAsync goroutine:
 	headerWriteBuf bytes.Buffer
@@ -434,11 +448,11 @@
 	numTrailerValues int64
 	weight           uint8
 	state            streamState
-	sentReset        bool // only true once detached from streams map
-	gotReset         bool // only true once detacted from streams map
-	gotTrailerHeader bool // HEADER frame for trailers was seen
-	wroteHeaders     bool // whether we wrote headers (not status 100)
-	reqBuf           []byte
+	sentReset        bool   // only true once detached from streams map
+	gotReset         bool   // only true once detacted from streams map
+	gotTrailerHeader bool   // HEADER frame for trailers was seen
+	wroteHeaders     bool   // whether we wrote headers (not status 100)
+	reqBuf           []byte // if non-nil, body pipe buffer to return later at EOF
 
 	trailer    http.Header // accumulated trailers
 	reqTrailer http.Header // handler's Request.Trailer
@@ -453,7 +467,7 @@
 
 func (sc *serverConn) state(streamID uint32) (streamState, *stream) {
 	sc.serveG.check()
-	// http://http2.github.io/http2-spec/#rfc.section.5.1
+	// http://tools.ietf.org/html/rfc7540#section-5.1
 	if st, ok := sc.streams[streamID]; ok {
 		return st.state, st
 	}
@@ -463,8 +477,14 @@
 	// a client sends a HEADERS frame on stream 7 without ever sending a
 	// frame on stream 5, then stream 5 transitions to the "closed"
 	// state when the first frame for stream 7 is sent or received."
-	if streamID <= sc.maxStreamID {
-		return stateClosed, nil
+	if streamID%2 == 1 {
+		if streamID <= sc.maxClientStreamID {
+			return stateClosed, nil
+		}
+	} else {
+		if streamID <= sc.maxPushPromiseID {
+			return stateClosed, nil
+		}
 	}
 	return stateIdle, nil
 }
@@ -603,17 +623,17 @@
 
 // frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.
 type frameWriteResult struct {
-	wm  frameWriteMsg // what was written (or attempted)
-	err error         // result of the writeFrame call
+	wr  FrameWriteRequest // what was written (or attempted)
+	err error             // result of the writeFrame call
 }
 
 // writeFrameAsync runs in its own goroutine and writes a single frame
 // and then reports when it's done.
 // At most one goroutine can be running writeFrameAsync at a time per
 // serverConn.
-func (sc *serverConn) writeFrameAsync(wm frameWriteMsg) {
-	err := wm.write.writeFrame(sc)
-	sc.wroteFrameCh <- frameWriteResult{wm, err}
+func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) {
+	err := wr.write.writeFrame(sc)
+	sc.wroteFrameCh <- frameWriteResult{wr, err}
 }
 
 func (sc *serverConn) closeAllStreamsOnConnClose() {
@@ -657,7 +677,7 @@
 		sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
 	}
 
-	sc.writeFrame(frameWriteMsg{
+	sc.writeFrame(FrameWriteRequest{
 		write: writeSettings{
 			{SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
 			{SettingMaxConcurrentStreams, sc.advMaxStreams},
@@ -682,6 +702,17 @@
 	sc.setConnState(http.StateActive)
 	sc.setConnState(http.StateIdle)
 
+	if sc.srv.IdleTimeout != 0 {
+		sc.idleTimer = time.NewTimer(sc.srv.IdleTimeout)
+		defer sc.idleTimer.Stop()
+		sc.idleTimerCh = sc.idleTimer.C
+	}
+
+	var gracefulShutdownCh <-chan struct{}
+	if sc.hs != nil {
+		gracefulShutdownCh = h1ServerShutdownChan(sc.hs)
+	}
+
 	go sc.readFrames() // closed by defer sc.conn.Close above
 
 	settingsTimer := time.NewTimer(firstSettingsTimeout)
@@ -689,8 +720,10 @@
 	for {
 		loopNum++
 		select {
-		case wm := <-sc.wantWriteFrameCh:
-			sc.writeFrame(wm)
+		case wr := <-sc.wantWriteFrameCh:
+			sc.writeFrame(wr)
+		case spr := <-sc.wantStartPushCh:
+			sc.startPush(spr)
 		case res := <-sc.wroteFrameCh:
 			sc.wroteFrame(res)
 		case res := <-sc.readFrameCh:
@@ -707,12 +740,22 @@
 		case <-settingsTimer.C:
 			sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
 			return
+		case <-gracefulShutdownCh:
+			gracefulShutdownCh = nil
+			sc.goAwayIn(ErrCodeNo, 0)
 		case <-sc.shutdownTimerCh:
 			sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
 			return
+		case <-sc.idleTimerCh:
+			sc.vlogf("connection is idle")
+			sc.goAway(ErrCodeNo)
 		case fn := <-sc.testHookCh:
 			fn(loopNum)
 		}
+
+		if sc.inGoAway && sc.curClientStreams == 0 && !sc.needToSendGoAway && !sc.writingFrame {
+			return
+		}
 	}
 }
 
@@ -760,7 +803,7 @@
 	ch := errChanPool.Get().(chan error)
 	writeArg := writeDataPool.Get().(*writeData)
 	*writeArg = writeData{stream.id, data, endStream}
-	err := sc.writeFrameFromHandler(frameWriteMsg{
+	err := sc.writeFrameFromHandler(FrameWriteRequest{
 		write:  writeArg,
 		stream: stream,
 		done:   ch,
@@ -796,17 +839,17 @@
 	return err
 }
 
-// writeFrameFromHandler sends wm to sc.wantWriteFrameCh, but aborts
+// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts
 // if the connection has gone away.
 //
 // This must not be run from the serve goroutine itself, else it might
 // deadlock writing to sc.wantWriteFrameCh (which is only mildly
 // buffered and is read by serve itself). If you're on the serve
 // goroutine, call writeFrame instead.
-func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) error {
+func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error {
 	sc.serveG.checkNotOn() // NOT
 	select {
-	case sc.wantWriteFrameCh <- wm:
+	case sc.wantWriteFrameCh <- wr:
 		return nil
 	case <-sc.doneServing:
 		// Serve loop is gone.
@@ -823,38 +866,38 @@
 // make it onto the wire
 //
 // If you're not on the serve goroutine, use writeFrameFromHandler instead.
-func (sc *serverConn) writeFrame(wm frameWriteMsg) {
+func (sc *serverConn) writeFrame(wr FrameWriteRequest) {
 	sc.serveG.check()
 
 	var ignoreWrite bool
 
 	// Don't send a 100-continue response if we've already sent headers.
 	// See golang.org/issue/14030.
-	switch wm.write.(type) {
+	switch wr.write.(type) {
 	case *writeResHeaders:
-		wm.stream.wroteHeaders = true
+		wr.stream.wroteHeaders = true
 	case write100ContinueHeadersFrame:
-		if wm.stream.wroteHeaders {
+		if wr.stream.wroteHeaders {
 			ignoreWrite = true
 		}
 	}
 
 	if !ignoreWrite {
-		sc.writeSched.add(wm)
+		sc.writeSched.Push(wr)
 	}
 	sc.scheduleFrameWrite()
 }
 
-// startFrameWrite starts a goroutine to write wm (in a separate
+// startFrameWrite starts a goroutine to write wr (in a separate
 // goroutine since that might block on the network), and updates the
-// serve goroutine's state about the world, updated from info in wm.
-func (sc *serverConn) startFrameWrite(wm frameWriteMsg) {
+// serve goroutine's state about the world, updated from info in wr.
+func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) {
 	sc.serveG.check()
 	if sc.writingFrame {
 		panic("internal error: can only be writing one frame at a time")
 	}
 
-	st := wm.stream
+	st := wr.stream
 	if st != nil {
 		switch st.state {
 		case stateHalfClosedLocal:
@@ -865,13 +908,31 @@
 				sc.scheduleFrameWrite()
 				return
 			}
-			panic(fmt.Sprintf("internal error: attempt to send a write %v on a closed stream", wm))
+			panic(fmt.Sprintf("internal error: attempt to send a write %v on a closed stream", wr))
+		}
+	}
+	if wpp, ok := wr.write.(*writePushPromise); ok {
+		var err error
+		wpp.promisedID, err = wpp.allocatePromisedID()
+		if err != nil {
+			sc.writingFrameAsync = false
+			if wr.done != nil {
+				wr.done <- err
+			}
+			return
 		}
 	}
 
 	sc.writingFrame = true
 	sc.needsFrameFlush = true
-	go sc.writeFrameAsync(wm)
+	if wr.write.staysWithinBuffer(sc.bw.Available()) {
+		sc.writingFrameAsync = false
+		err := wr.write.writeFrame(sc)
+		sc.wroteFrame(frameWriteResult{wr, err})
+	} else {
+		sc.writingFrameAsync = true
+		go sc.writeFrameAsync(wr)
+	}
 }
 
 // errHandlerPanicked is the error given to any callers blocked in a read from
@@ -887,25 +948,26 @@
 		panic("internal error: expected to be already writing a frame")
 	}
 	sc.writingFrame = false
+	sc.writingFrameAsync = false
 
-	wm := res.wm
-	st := wm.stream
+	wr := res.wr
+	st := wr.stream
 
-	closeStream := endsStream(wm.write)
+	closeStream := endsStream(wr.write)
 
-	if _, ok := wm.write.(handlerPanicRST); ok {
+	if _, ok := wr.write.(handlerPanicRST); ok {
 		sc.closeStream(st, errHandlerPanicked)
 	}
 
 	// Reply (if requested) to the blocked ServeHTTP goroutine.
-	if ch := wm.done; ch != nil {
+	if ch := wr.done; ch != nil {
 		select {
 		case ch <- res.err:
 		default:
-			panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wm.write))
+			panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write))
 		}
 	}
-	wm.write = nil // prevent use (assume it's tainted after wm.done send)
+	wr.write = nil // prevent use (assume it's tainted after wr.done send)
 
 	if closeStream {
 		if st == nil {
@@ -916,11 +978,11 @@
 			// Here we would go to stateHalfClosedLocal in
 			// theory, but since our handler is done and
 			// the net/http package provides no mechanism
-			// for finishing writing to a ResponseWriter
-			// while still reading data (see possible TODO
-			// at top of this file), we go into closed
-			// state here anyway, after telling the peer
-			// we're hanging up on them.
+			// for closing a ResponseWriter while still
+			// reading data (see possible TODO at top of
+			// this file), we go into closed state here
+			// anyway, after telling the peer we're
+			// hanging up on them.
 			st.state = stateHalfClosedLocal // won't last long, but necessary for closeStream via resetStream
 			errCancel := streamError(st.id, ErrCodeCancel)
 			sc.resetStream(errCancel)
@@ -946,47 +1008,61 @@
 // flush the write buffer.
 func (sc *serverConn) scheduleFrameWrite() {
 	sc.serveG.check()
-	if sc.writingFrame {
+	if sc.writingFrame || sc.inFrameScheduleLoop {
 		return
 	}
-	if sc.needToSendGoAway {
-		sc.needToSendGoAway = false
-		sc.startFrameWrite(frameWriteMsg{
-			write: &writeGoAway{
-				maxStreamID: sc.maxStreamID,
-				code:        sc.goAwayCode,
-			},
-		})
-		return
-	}
-	if sc.needToSendSettingsAck {
-		sc.needToSendSettingsAck = false
-		sc.startFrameWrite(frameWriteMsg{write: writeSettingsAck{}})
-		return
-	}
-	if !sc.inGoAway {
-		if wm, ok := sc.writeSched.take(); ok {
-			sc.startFrameWrite(wm)
-			return
+	sc.inFrameScheduleLoop = true
+	for !sc.writingFrameAsync {
+		if sc.needToSendGoAway {
+			sc.needToSendGoAway = false
+			sc.startFrameWrite(FrameWriteRequest{
+				write: &writeGoAway{
+					maxStreamID: sc.maxClientStreamID,
+					code:        sc.goAwayCode,
+				},
+			})
+			continue
 		}
+		if sc.needToSendSettingsAck {
+			sc.needToSendSettingsAck = false
+			sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}})
+			continue
+		}
+		if !sc.inGoAway || sc.goAwayCode == ErrCodeNo {
+			if wr, ok := sc.writeSched.Pop(); ok {
+				sc.startFrameWrite(wr)
+				continue
+			}
+		}
+		if sc.needsFrameFlush {
+			sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}})
+			sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
+			continue
+		}
+		break
 	}
-	if sc.needsFrameFlush {
-		sc.startFrameWrite(frameWriteMsg{write: flushFrameWriter{}})
-		sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
-		return
-	}
+	sc.inFrameScheduleLoop = false
 }
 
 func (sc *serverConn) goAway(code ErrCode) {
 	sc.serveG.check()
+	var forceCloseIn time.Duration
+	if code != ErrCodeNo {
+		forceCloseIn = 250 * time.Millisecond
+	} else {
+		// TODO: configurable
+		forceCloseIn = 1 * time.Second
+	}
+	sc.goAwayIn(code, forceCloseIn)
+}
+
+func (sc *serverConn) goAwayIn(code ErrCode, forceCloseIn time.Duration) {
+	sc.serveG.check()
 	if sc.inGoAway {
 		return
 	}
-	if code != ErrCodeNo {
-		sc.shutDownIn(250 * time.Millisecond)
-	} else {
-		// TODO: configurable
-		sc.shutDownIn(1 * time.Second)
+	if forceCloseIn != 0 {
+		sc.shutDownIn(forceCloseIn)
 	}
 	sc.inGoAway = true
 	sc.needToSendGoAway = true
@@ -1002,7 +1078,7 @@
 
 func (sc *serverConn) resetStream(se StreamError) {
 	sc.serveG.check()
-	sc.writeFrame(frameWriteMsg{write: se})
+	sc.writeFrame(FrameWriteRequest{write: se})
 	if st, ok := sc.streams[se.StreamID]; ok {
 		st.sentReset = true
 		sc.closeStream(st, se)
@@ -1090,6 +1166,8 @@
 		return sc.processResetStream(f)
 	case *PriorityFrame:
 		return sc.processPriority(f)
+	case *GoAwayFrame:
+		return sc.processGoAway(f)
 	case *PushPromiseFrame:
 		// A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
 		// frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
@@ -1115,7 +1193,10 @@
 		// PROTOCOL_ERROR."
 		return ConnectionError(ErrCodeProtocol)
 	}
-	sc.writeFrame(frameWriteMsg{write: writePingAck{f}})
+	if sc.inGoAway && sc.goAwayCode != ErrCodeNo {
+		return nil
+	}
+	sc.writeFrame(FrameWriteRequest{write: writePingAck{f}})
 	return nil
 }
 
@@ -1123,7 +1204,14 @@
 	sc.serveG.check()
 	switch {
 	case f.StreamID != 0: // stream-level flow control
-		st := sc.streams[f.StreamID]
+		state, st := sc.state(f.StreamID)
+		if state == stateIdle {
+			// Section 5.1: "Receiving any frame other than HEADERS
+			// or PRIORITY on a stream in this state MUST be
+			// treated as a connection error (Section 5.4.1) of
+			// type PROTOCOL_ERROR."
+			return ConnectionError(ErrCodeProtocol)
+		}
 		if st == nil {
 			// "WINDOW_UPDATE can be sent by a peer that has sent a
 			// frame bearing the END_STREAM flag. This means that a
@@ -1170,11 +1258,18 @@
 		panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
 	}
 	st.state = stateClosed
-	sc.curOpenStreams--
-	if sc.curOpenStreams == 0 {
+	if st.isPushed() {
+		sc.curPushedStreams--
+	} else {
+		sc.curClientStreams--
+	}
+	if sc.curClientStreams+sc.curPushedStreams == 0 {
 		sc.setConnState(http.StateIdle)
 	}
 	delete(sc.streams, st.id)
+	if len(sc.streams) == 0 && sc.srv.IdleTimeout != 0 {
+		sc.idleTimer.Reset(sc.srv.IdleTimeout)
+	}
 	if p := st.body; p != nil {
 		// Return any buffered unread bytes worth of conn-level flow control.
 		// See golang.org/issue/16481
@@ -1183,19 +1278,7 @@
 		p.CloseWithError(err)
 	}
 	st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
-	sc.writeSched.forgetStream(st.id)
-	if st.reqBuf != nil {
-		// Stash this request body buffer (64k) away for reuse
-		// by a future POST/PUT/etc.
-		//
-		// TODO(bradfitz): share on the server? sync.Pool?
-		// Server requires locks and might hurt contention.
-		// sync.Pool might work, or might be worse, depending
-		// on goroutine CPU migrations. (get and put on
-		// separate CPUs).  Maybe a mix of strategies. But
-		// this is an easy win for now.
-		sc.freeRequestBodyBuf = st.reqBuf
-	}
+	sc.writeSched.CloseStream(st.id)
 }
 
 func (sc *serverConn) processSettings(f *SettingsFrame) error {
@@ -1237,7 +1320,7 @@
 	case SettingInitialWindowSize:
 		return sc.processSettingInitialWindowSize(s.Val)
 	case SettingMaxFrameSize:
-		sc.writeSched.maxFrameSize = s.Val
+		sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31
 	case SettingMaxHeaderListSize:
 		sc.peerMaxHeaderListSize = s.Val
 	default:
@@ -1281,14 +1364,24 @@
 
 func (sc *serverConn) processData(f *DataFrame) error {
 	sc.serveG.check()
+	if sc.inGoAway && sc.goAwayCode != ErrCodeNo {
+		return nil
+	}
 	data := f.Data()
 
 	// "If a DATA frame is received whose stream is not in "open"
 	// or "half closed (local)" state, the recipient MUST respond
 	// with a stream error (Section 5.4.2) of type STREAM_CLOSED."
 	id := f.Header().StreamID
-	st, ok := sc.streams[id]
-	if !ok || st.state != stateOpen || st.gotTrailerHeader {
+	state, st := sc.state(id)
+	if id == 0 || state == stateIdle {
+		// Section 5.1: "Receiving any frame other than HEADERS
+		// or PRIORITY on a stream in this state MUST be
+		// treated as a connection error (Section 5.4.1) of
+		// type PROTOCOL_ERROR."
+		return ConnectionError(ErrCodeProtocol)
+	}
+	if st == nil || state != stateOpen || st.gotTrailerHeader {
 		// This includes sending a RST_STREAM if the stream is
 		// in stateHalfClosedLocal (which currently means that
 		// the http.Handler returned, so it's done reading &
@@ -1350,6 +1443,25 @@
 	return nil
 }
 
+func (sc *serverConn) processGoAway(f *GoAwayFrame) error {
+	sc.serveG.check()
+	if f.ErrCode != ErrCodeNo {
+		sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f)
+	} else {
+		sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f)
+	}
+	sc.goAwayIn(ErrCodeNo, 0)
+	// http://tools.ietf.org/html/rfc7540#section-6.8
+	// We should not create any new streams, which means we should disable push.
+	sc.pushEnabled = false
+	return nil
+}
+
+// isPushed reports whether the stream is server-initiated.
+func (st *stream) isPushed() bool {
+	return st.id%2 == 0
+}
+
 // endStream closes a Request.Body's pipe. It is called when a DATA
 // frame says a request body is over (or after trailers).
 func (st *stream) endStream() {
@@ -1379,12 +1491,12 @@
 
 func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
 	sc.serveG.check()
-	id := f.Header().StreamID
+	id := f.StreamID
 	if sc.inGoAway {
 		// Ignore.
 		return nil
 	}
-	// http://http2.github.io/http2-spec/#rfc.section.5.1.1
+	// http://tools.ietf.org/html/rfc7540#section-5.1.1
 	// Streams initiated by a client MUST use odd-numbered stream
 	// identifiers. [...] An endpoint that receives an unexpected
 	// stream identifier MUST respond with a connection error
@@ -1396,8 +1508,7 @@
 	// send a trailer for an open one. If we already have a stream
 	// open, let it process its own HEADERS frame (trailers at this
 	// point, if it's valid).
-	st := sc.streams[f.Header().StreamID]
-	if st != nil {
+	if st := sc.streams[f.StreamID]; st != nil {
 		return st.processTrailerHeaders(f)
 	}
 
@@ -1406,54 +1517,45 @@
 	// endpoint has opened or reserved. [...]  An endpoint that
 	// receives an unexpected stream identifier MUST respond with
 	// a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
-	if id <= sc.maxStreamID {
+	if id <= sc.maxClientStreamID {
 		return ConnectionError(ErrCodeProtocol)
 	}
-	sc.maxStreamID = id
+	sc.maxClientStreamID = id
 
-	ctx, cancelCtx := contextWithCancel(sc.baseCtx)
-	st = &stream{
-		sc:        sc,
-		id:        id,
-		state:     stateOpen,
-		ctx:       ctx,
-		cancelCtx: cancelCtx,
+	if sc.idleTimer != nil {
+		sc.idleTimer.Stop()
 	}
-	if f.StreamEnded() {
-		st.state = stateHalfClosedRemote
-	}
-	st.cw.Init()
 
-	st.flow.conn = &sc.flow // link to conn-level counter
-	st.flow.add(sc.initialWindowSize)
-	st.inflow.conn = &sc.inflow      // link to conn-level counter
-	st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings
-
-	sc.streams[id] = st
-	if f.HasPriority() {
-		adjustStreamPriority(sc.streams, st.id, f.Priority)
-	}
-	sc.curOpenStreams++
-	if sc.curOpenStreams == 1 {
-		sc.setConnState(http.StateActive)
-	}
-	if sc.curOpenStreams > sc.advMaxStreams {
-		// "Endpoints MUST NOT exceed the limit set by their
-		// peer. An endpoint that receives a HEADERS frame
-		// that causes their advertised concurrent stream
-		// limit to be exceeded MUST treat this as a stream
-		// error (Section 5.4.2) of type PROTOCOL_ERROR or
-		// REFUSED_STREAM."
+	// http://tools.ietf.org/html/rfc7540#section-5.1.2
+	// [...] Endpoints MUST NOT exceed the limit set by their peer. An
+	// endpoint that receives a HEADERS frame that causes their
+	// advertised concurrent stream limit to be exceeded MUST treat
+	// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR
+	// or REFUSED_STREAM.
+	if sc.curClientStreams+1 > sc.advMaxStreams {
 		if sc.unackedSettings == 0 {
 			// They should know better.
-			return streamError(st.id, ErrCodeProtocol)
+			return streamError(id, ErrCodeProtocol)
 		}
 		// Assume it's a network race, where they just haven't
 		// received our last SETTINGS update. But actually
 		// this can't happen yet, because we don't yet provide
 		// a way for users to adjust server parameters at
 		// runtime.
-		return streamError(st.id, ErrCodeRefusedStream)
+		return streamError(id, ErrCodeRefusedStream)
+	}
+
+	initialState := stateOpen
+	if f.StreamEnded() {
+		initialState = stateHalfClosedRemote
+	}
+	st := sc.newStream(id, 0, initialState)
+
+	if f.HasPriority() {
+		if err := checkPriority(f.StreamID, f.Priority); err != nil {
+			return err
+		}
+		sc.writeSched.AdjustStream(st.id, f.Priority)
 	}
 
 	rw, req, err := sc.newWriterAndRequest(st, f)
@@ -1471,19 +1573,17 @@
 	if f.Truncated {
 		// Their header list was too long. Send a 431 error.
 		handler = handleHeaderListTooLong
-	} else if err := checkValidHTTP2Request(req); err != nil {
+	} else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil {
 		handler = new400Handler(err)
 	}
 
 	// The net/http package sets the read deadline from the
 	// http.Server.ReadTimeout during the TLS handshake, but then
 	// passes the connection off to us with the deadline already
-	// set. Disarm it here after the request headers are read, similar
-	// to how the http1 server works.
-	// Unlike http1, though, we never re-arm it yet, though.
-	// TODO(bradfitz): figure out golang.org/issue/14204
-	// (IdleTimeout) and how this relates. Maybe the default
-	// IdleTimeout is ReadTimeout.
+	// set. Disarm it here after the request headers are read,
+	// similar to how the http1 server works. Here it's
+	// technically more like the http1 Server's ReadHeaderTimeout
+	// (in Go 1.8), though. That's a more sane option anyway.
 	if sc.hs.ReadTimeout != 0 {
 		sc.conn.SetReadDeadline(time.Time{})
 	}
@@ -1522,62 +1622,78 @@
 	return nil
 }
 
-func (sc *serverConn) processPriority(f *PriorityFrame) error {
-	adjustStreamPriority(sc.streams, f.StreamID, f.PriorityParam)
+func checkPriority(streamID uint32, p PriorityParam) error {
+	if streamID == p.StreamDep {
+		// Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat
+		// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR."
+		// Section 5.3.3 says that a stream can depend on one of its dependencies,
+		// so it's only self-dependencies that are forbidden.
+		return streamError(streamID, ErrCodeProtocol)
+	}
 	return nil
 }
 
-func adjustStreamPriority(streams map[uint32]*stream, streamID uint32, priority PriorityParam) {
-	st, ok := streams[streamID]
-	if !ok {
-		// TODO: not quite correct (this streamID might
-		// already exist in the dep tree, but be closed), but
-		// close enough for now.
-		return
+func (sc *serverConn) processPriority(f *PriorityFrame) error {
+	if sc.inGoAway {
+		return nil
 	}
-	st.weight = priority.Weight
-	parent := streams[priority.StreamDep] // might be nil
-	if parent == st {
-		// if client tries to set this stream to be the parent of itself
-		// ignore and keep going
-		return
+	if err := checkPriority(f.StreamID, f.PriorityParam); err != nil {
+		return err
+	}
+	sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam)
+	return nil
+}
+
+func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream {
+	sc.serveG.check()
+	if id == 0 {
+		panic("internal error: cannot create stream with id 0")
 	}
 
-	// section 5.3.3: If a stream is made dependent on one of its
-	// own dependencies, the formerly dependent stream is first
-	// moved to be dependent on the reprioritized stream's previous
-	// parent. The moved dependency retains its weight.
-	for piter := parent; piter != nil; piter = piter.parent {
-		if piter == st {
-			parent.parent = st.parent
-			break
-		}
+	ctx, cancelCtx := contextWithCancel(sc.baseCtx)
+	st := &stream{
+		sc:        sc,
+		id:        id,
+		state:     state,
+		ctx:       ctx,
+		cancelCtx: cancelCtx,
 	}
-	st.parent = parent
-	if priority.Exclusive && (st.parent != nil || priority.StreamDep == 0) {
-		for _, openStream := range streams {
-			if openStream != st && openStream.parent == st.parent {
-				openStream.parent = st
-			}
-		}
+	st.cw.Init()
+	st.flow.conn = &sc.flow // link to conn-level counter
+	st.flow.add(sc.initialWindowSize)
+	st.inflow.conn = &sc.inflow      // link to conn-level counter
+	st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings
+
+	sc.streams[id] = st
+	sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID})
+	if st.isPushed() {
+		sc.curPushedStreams++
+	} else {
+		sc.curClientStreams++
 	}
+	if sc.curClientStreams+sc.curPushedStreams == 1 {
+		sc.setConnState(http.StateActive)
+	}
+
+	return st
 }
 
 func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) {
 	sc.serveG.check()
 
-	method := f.PseudoValue("method")
-	path := f.PseudoValue("path")
-	scheme := f.PseudoValue("scheme")
-	authority := f.PseudoValue("authority")
+	rp := requestParam{
+		method:    f.PseudoValue("method"),
+		scheme:    f.PseudoValue("scheme"),
+		authority: f.PseudoValue("authority"),
+		path:      f.PseudoValue("path"),
+	}
 
-	isConnect := method == "CONNECT"
+	isConnect := rp.method == "CONNECT"
 	if isConnect {
-		if path != "" || scheme != "" || authority == "" {
+		if rp.path != "" || rp.scheme != "" || rp.authority == "" {
 			return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
 		}
-	} else if method == "" || path == "" ||
-		(scheme != "https" && scheme != "http") {
+	} else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
 		// See 8.1.2.6 Malformed Requests and Responses:
 		//
 		// Malformed requests or responses that are detected
@@ -1592,36 +1708,64 @@
 	}
 
 	bodyOpen := !f.StreamEnded()
-	if method == "HEAD" && bodyOpen {
+	if rp.method == "HEAD" && bodyOpen {
 		// HEAD requests can't have bodies
 		return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
 	}
-	var tlsState *tls.ConnectionState // nil if not scheme https
 
-	if scheme == "https" {
+	rp.header = make(http.Header)
+	for _, hf := range f.RegularFields() {
+		rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value)
+	}
+	if rp.authority == "" {
+		rp.authority = rp.header.Get("Host")
+	}
+
+	rw, req, err := sc.newWriterAndRequestNoBody(st, rp)
+	if err != nil {
+		return nil, nil, err
+	}
+	if bodyOpen {
+		st.reqBuf = getRequestBodyBuf()
+		req.Body.(*requestBody).pipe = &pipe{
+			b: &fixedBuffer{buf: st.reqBuf},
+		}
+
+		if vv, ok := rp.header["Content-Length"]; ok {
+			req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
+		} else {
+			req.ContentLength = -1
+		}
+	}
+	return rw, req, nil
+}
+
+type requestParam struct {
+	method                  string
+	scheme, authority, path string
+	header                  http.Header
+}
+
+func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) {
+	sc.serveG.check()
+
+	var tlsState *tls.ConnectionState // nil if not scheme https
+	if rp.scheme == "https" {
 		tlsState = sc.tlsState
 	}
 
-	header := make(http.Header)
-	for _, hf := range f.RegularFields() {
-		header.Add(sc.canonicalHeader(hf.Name), hf.Value)
-	}
-
-	if authority == "" {
-		authority = header.Get("Host")
-	}
-	needsContinue := header.Get("Expect") == "100-continue"
+	needsContinue := rp.header.Get("Expect") == "100-continue"
 	if needsContinue {
-		header.Del("Expect")
+		rp.header.Del("Expect")
 	}
 	// Merge Cookie headers into one "; "-delimited value.
-	if cookies := header["Cookie"]; len(cookies) > 1 {
-		header.Set("Cookie", strings.Join(cookies, "; "))
+	if cookies := rp.header["Cookie"]; len(cookies) > 1 {
+		rp.header.Set("Cookie", strings.Join(cookies, "; "))
 	}
 
 	// Setup Trailers
 	var trailer http.Header
-	for _, v := range header["Trailer"] {
+	for _, v := range rp.header["Trailer"] {
 		for _, key := range strings.Split(v, ",") {
 			key = http.CanonicalHeaderKey(strings.TrimSpace(key))
 			switch key {
@@ -1636,57 +1780,42 @@
 			}
 		}
 	}
-	delete(header, "Trailer")
+	delete(rp.header, "Trailer")
+
+	var url_ *url.URL
+	var requestURI string
+	if rp.method == "CONNECT" {
+		url_ = &url.URL{Host: rp.authority}
+		requestURI = rp.authority // mimic HTTP/1 server behavior
+	} else {
+		var err error
+		url_, err = url.ParseRequestURI(rp.path)
+		if err != nil {
+			return nil, nil, streamError(st.id, ErrCodeProtocol)
+		}
+		requestURI = rp.path
+	}
 
 	body := &requestBody{
 		conn:          sc,
 		stream:        st,
 		needsContinue: needsContinue,
 	}
-	var url_ *url.URL
-	var requestURI string
-	if isConnect {
-		url_ = &url.URL{Host: authority}
-		requestURI = authority // mimic HTTP/1 server behavior
-	} else {
-		var err error
-		url_, err = url.ParseRequestURI(path)
-		if err != nil {
-			return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
-		}
-		requestURI = path
-	}
 	req := &http.Request{
-		Method:     method,
+		Method:     rp.method,
 		URL:        url_,
 		RemoteAddr: sc.remoteAddrStr,
-		Header:     header,
+		Header:     rp.header,
 		RequestURI: requestURI,
 		Proto:      "HTTP/2.0",
 		ProtoMajor: 2,
 		ProtoMinor: 0,
 		TLS:        tlsState,
-		Host:       authority,
+		Host:       rp.authority,
 		Body:       body,
 		Trailer:    trailer,
 	}
 	req = requestWithContext(req, st.ctx)
-	if bodyOpen {
-		// Disabled, per golang.org/issue/14960:
-		// st.reqBuf = sc.getRequestBodyBuf()
-		// TODO: remove this 64k of garbage per request (again, but without a data race):
-		buf := make([]byte, initialWindowSize)
-
-		body.pipe = &pipe{
-			b: &fixedBuffer{buf: buf},
-		}
-
-		if vv, ok := header["Content-Length"]; ok {
-			req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
-		} else {
-			req.ContentLength = -1
-		}
-	}
 
 	rws := responseWriterStatePool.Get().(*responseWriterState)
 	bwSave := rws.bw
@@ -1702,13 +1831,22 @@
 	return rw, req, nil
 }
 
-func (sc *serverConn) getRequestBodyBuf() []byte {
-	sc.serveG.check()
-	if buf := sc.freeRequestBodyBuf; buf != nil {
-		sc.freeRequestBodyBuf = nil
-		return buf
+var reqBodyCache = make(chan []byte, 8)
+
+func getRequestBodyBuf() []byte {
+	select {
+	case b := <-reqBodyCache:
+		return b
+	default:
+		return make([]byte, initialWindowSize)
 	}
-	return make([]byte, initialWindowSize)
+}
+
+func putRequestBodyBuf(b []byte) {
+	select {
+	case reqBodyCache <- b:
+	default:
+	}
 }
 
 // Run on its own goroutine.
@@ -1718,15 +1856,17 @@
 		rw.rws.stream.cancelCtx()
 		if didPanic {
 			e := recover()
-			// Same as net/http:
-			const size = 64 << 10
-			buf := make([]byte, size)
-			buf = buf[:runtime.Stack(buf, false)]
-			sc.writeFrameFromHandler(frameWriteMsg{
+			sc.writeFrameFromHandler(FrameWriteRequest{
 				write:  handlerPanicRST{rw.rws.stream.id},
 				stream: rw.rws.stream,
 			})
-			sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf)
+			// Same as net/http:
+			if shouldLogPanic(e) {
+				const size = 64 << 10
+				buf := make([]byte, size)
+				buf = buf[:runtime.Stack(buf, false)]
+				sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf)
+			}
 			return
 		}
 		rw.handlerDone()
@@ -1757,7 +1897,7 @@
 		// mutates it.
 		errc = errChanPool.Get().(chan error)
 	}
-	if err := sc.writeFrameFromHandler(frameWriteMsg{
+	if err := sc.writeFrameFromHandler(FrameWriteRequest{
 		write:  headerData,
 		stream: st,
 		done:   errc,
@@ -1780,7 +1920,7 @@
 
 // called from handler goroutines.
 func (sc *serverConn) write100ContinueHeaders(st *stream) {
-	sc.writeFrameFromHandler(frameWriteMsg{
+	sc.writeFrameFromHandler(FrameWriteRequest{
 		write:  write100ContinueHeadersFrame{st.id},
 		stream: st,
 	})
@@ -1796,11 +1936,19 @@
 // called from handler goroutines.
 // Notes that the handler for the given stream ID read n bytes of its body
 // and schedules flow control tokens to be sent.
-func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int) {
+func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) {
 	sc.serveG.checkNotOn() // NOT on
-	select {
-	case sc.bodyReadCh <- bodyReadMsg{st, n}:
-	case <-sc.doneServing:
+	if n > 0 {
+		select {
+		case sc.bodyReadCh <- bodyReadMsg{st, n}:
+		case <-sc.doneServing:
+		}
+	}
+	if err == io.EOF {
+		if buf := st.reqBuf; buf != nil {
+			st.reqBuf = nil // shouldn't matter; field unused by other
+			putRequestBodyBuf(buf)
+		}
 	}
 }
 
@@ -1843,7 +1991,7 @@
 	if st != nil {
 		streamID = st.id
 	}
-	sc.writeFrame(frameWriteMsg{
+	sc.writeFrame(FrameWriteRequest{
 		write:  writeWindowUpdate{streamID: streamID, n: uint32(n)},
 		stream: st,
 	})
@@ -1858,16 +2006,19 @@
 	}
 }
 
+// requestBody is the Handler's Request.Body type.
+// Read and Close may be called concurrently.
 type requestBody struct {
 	stream        *stream
 	conn          *serverConn
-	closed        bool
+	closed        bool  // for use by Close only
+	sawEOF        bool  // for use by Read only
 	pipe          *pipe // non-nil if we have a HTTP entity message body
 	needsContinue bool  // need to send a 100-continue
 }
 
 func (b *requestBody) Close() error {
-	if b.pipe != nil {
+	if b.pipe != nil && !b.closed {
 		b.pipe.BreakWithError(errClosedBody)
 	}
 	b.closed = true
@@ -1879,13 +2030,17 @@
 		b.needsContinue = false
 		b.conn.write100ContinueHeaders(b.stream)
 	}
-	if b.pipe == nil {
+	if b.pipe == nil || b.sawEOF {
 		return 0, io.EOF
 	}
 	n, err = b.pipe.Read(p)
-	if n > 0 {
-		b.conn.noteBodyReadFromHandler(b.stream, n)
+	if err == io.EOF {
+		b.sawEOF = true
 	}
+	if b.conn == nil && inTests {
+		return
+	}
+	b.conn.noteBodyReadFromHandler(b.stream, n, err)
 	return
 }
 
@@ -2123,8 +2278,9 @@
 	if ch == nil {
 		ch = make(chan bool, 1)
 		rws.closeNotifierCh = ch
+		cw := rws.stream.cw
 		go func() {
-			rws.stream.cw.Wait() // wait for close
+			cw.Wait() // wait for close
 			ch <- true
 		}()
 	}
@@ -2220,6 +2376,200 @@
 	responseWriterStatePool.Put(rws)
 }
 
+// Push errors.
+var (
+	ErrRecursivePush    = errors.New("http2: recursive push not allowed")
+	ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS")
+)
+
+// pushOptions is the internal version of http.PushOptions, which we
+// cannot include here because it's only defined in Go 1.8 and later.
+type pushOptions struct {
+	Method string
+	Header http.Header
+}
+
+func (w *responseWriter) push(target string, opts pushOptions) error {
+	st := w.rws.stream
+	sc := st.sc
+	sc.serveG.checkNotOn()
+
+	// No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream."
+	// http://tools.ietf.org/html/rfc7540#section-6.6
+	if st.isPushed() {
+		return ErrRecursivePush
+	}
+
+	// Default options.
+	if opts.Method == "" {
+		opts.Method = "GET"
+	}
+	if opts.Header == nil {
+		opts.Header = http.Header{}
+	}
+	wantScheme := "http"
+	if w.rws.req.TLS != nil {
+		wantScheme = "https"
+	}
+
+	// Validate the request.
+	u, err := url.Parse(target)
+	if err != nil {
+		return err
+	}
+	if u.Scheme == "" {
+		if !strings.HasPrefix(target, "/") {
+			return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target)
+		}
+		u.Scheme = wantScheme
+		u.Host = w.rws.req.Host
+	} else {
+		if u.Scheme != wantScheme {
+			return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme)
+		}
+		if u.Host == "" {
+			return errors.New("URL must have a host")
+		}
+	}
+	for k := range opts.Header {
+		if strings.HasPrefix(k, ":") {
+			return fmt.Errorf("promised request headers cannot include psuedo header %q", k)
+		}
+		// These headers are meaningful only if the request has a body,
+		// but PUSH_PROMISE requests cannot have a body.
+		// http://tools.ietf.org/html/rfc7540#section-8.2
+		// Also disallow Host, since the promised URL must be absolute.
+		switch strings.ToLower(k) {
+		case "content-length", "content-encoding", "trailer", "te", "expect", "host":
+			return fmt.Errorf("promised request headers cannot include %q", k)
+		}
+	}
+	if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil {
+		return err
+	}
+
+	// The RFC effectively limits promised requests to GET and HEAD:
+	// "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]"
+	// http://tools.ietf.org/html/rfc7540#section-8.2
+	if opts.Method != "GET" && opts.Method != "HEAD" {
+		return fmt.Errorf("method %q must be GET or HEAD", opts.Method)
+	}
+
+	msg := startPushRequest{
+		parent: st,
+		method: opts.Method,
+		url:    u,
+		header: cloneHeader(opts.Header),
+		done:   errChanPool.Get().(chan error),
+	}
+
+	select {
+	case <-sc.doneServing:
+		return errClientDisconnected
+	case <-st.cw:
+		return errStreamClosed
+	case sc.wantStartPushCh <- msg:
+	}
+
+	select {
+	case <-sc.doneServing:
+		return errClientDisconnected
+	case <-st.cw:
+		return errStreamClosed
+	case err := <-msg.done:
+		errChanPool.Put(msg.done)
+		return err
+	}
+}
+
+type startPushRequest struct {
+	parent *stream
+	method string
+	url    *url.URL
+	header http.Header
+	done   chan error
+}
+
+func (sc *serverConn) startPush(msg startPushRequest) {
+	sc.serveG.check()
+
+	// http://tools.ietf.org/html/rfc7540#section-6.6.
+	// PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that
+	// is in either the "open" or "half-closed (remote)" state.
+	if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote {
+		// responseWriter.Push checks that the stream is peer-initiaed.
+		msg.done <- errStreamClosed
+		return
+	}
+
+	// http://tools.ietf.org/html/rfc7540#section-6.6.
+	if !sc.pushEnabled {
+		msg.done <- http.ErrNotSupported
+		return
+	}
+
+	// PUSH_PROMISE frames must be sent in increasing order by stream ID, so
+	// we allocate an ID for the promised stream lazily, when the PUSH_PROMISE
+	// is written. Once the ID is allocated, we start the request handler.
+	allocatePromisedID := func() (uint32, error) {
+		sc.serveG.check()
+
+		// Check this again, just in case. Technically, we might have received
+		// an updated SETTINGS by the time we got around to writing this frame.
+		if !sc.pushEnabled {
+			return 0, http.ErrNotSupported
+		}
+		// http://tools.ietf.org/html/rfc7540#section-6.5.2.
+		if sc.curPushedStreams+1 > sc.clientMaxStreams {
+			return 0, ErrPushLimitReached
+		}
+
+		// http://tools.ietf.org/html/rfc7540#section-5.1.1.
+		// Streams initiated by the server MUST use even-numbered identifiers.
+		// A server that is unable to establish a new stream identifier can send a GOAWAY
+		// frame so that the client is forced to open a new connection for new streams.
+		if sc.maxPushPromiseID+2 >= 1<<31 {
+			sc.goAwayIn(ErrCodeNo, 0)
+			return 0, ErrPushLimitReached
+		}
+		sc.maxPushPromiseID += 2
+		promisedID := sc.maxPushPromiseID
+
+		// http://tools.ietf.org/html/rfc7540#section-8.2.
+		// Strictly speaking, the new stream should start in "reserved (local)", then
+		// transition to "half closed (remote)" after sending the initial HEADERS, but
+		// we start in "half closed (remote)" for simplicity.
+		// See further comments at the definition of stateHalfClosedRemote.
+		promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote)
+		rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{
+			method:    msg.method,
+			scheme:    msg.url.Scheme,
+			authority: msg.url.Host,
+			path:      msg.url.RequestURI(),
+			header:    msg.header,
+		})
+		if err != nil {
+			// Should not happen, since we've already validated msg.url.
+			panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err))
+		}
+
+		go sc.runHandler(rw, req, sc.handler.ServeHTTP)
+		return promisedID, nil
+	}
+
+	sc.writeFrame(FrameWriteRequest{
+		write: &writePushPromise{
+			streamID:           msg.parent.id,
+			method:             msg.method,
+			url:                msg.url,
+			h:                  msg.header,
+			allocatePromisedID: allocatePromisedID,
+		},
+		stream: msg.parent,
+		done:   msg.done,
+	})
+}
+
 // foreachHeaderElement splits v according to the "#rule" construction
 // in RFC 2616 section 2.1 and calls fn for each non-empty element.
 func foreachHeaderElement(v string, fn func(string)) {
@@ -2247,16 +2597,16 @@
 	"Upgrade",
 }
 
-// checkValidHTTP2Request checks whether req is a valid HTTP/2 request,
+// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request,
 // per RFC 7540 Section 8.1.2.2.
 // The returned error is reported to users.
-func checkValidHTTP2Request(req *http.Request) error {
-	for _, h := range connHeaders {
-		if _, ok := req.Header[h]; ok {
-			return fmt.Errorf("request header %q is not valid in HTTP/2", h)
+func checkValidHTTP2RequestHeaders(h http.Header) error {
+	for _, k := range connHeaders {
+		if _, ok := h[k]; ok {
+			return fmt.Errorf("request header %q is not valid in HTTP/2", k)
 		}
 	}
-	te := req.Header["Te"]
+	te := h["Te"]
 	if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) {
 		return errors.New(`request header "TE" may only be "trailers" in HTTP/2`)
 	}
@@ -2303,3 +2653,28 @@
 	"Transfer-Encoding":   true,
 	"Www-Authenticate":    true,
 }
+
+// h1ServerShutdownChan returns a channel that will be closed when the
+// provided *http.Server wants to shut down.
+//
+// This is a somewhat hacky way to get at http1 innards. It works
+// when the http2 code is bundled into the net/http package in the
+// standard library. The alternatives ended up making the cmd/go tool
+// depend on http Servers. This is the lightest option for now.
+// This is tested via the TestServeShutdown* tests in net/http.
+func h1ServerShutdownChan(hs *http.Server) <-chan struct{} {
+	if fn := testh1ServerShutdownChan; fn != nil {
+		return fn(hs)
+	}
+	var x interface{} = hs
+	type I interface {
+		getDoneChan() <-chan struct{}
+	}
+	if hs, ok := x.(I); ok {
+		return hs.getDoneChan()
+	}
+	return nil
+}
+
+// optional test hook for h1ServerShutdownChan.
+var testh1ServerShutdownChan func(hs *http.Server) <-chan struct{}
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index b939fed..8f5f844 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -199,6 +199,7 @@
 	bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
 	readErr     error // sticky read error; owned by transportResponseBody.Read
 	stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu
+	didReset    bool  // whether we sent a RST_STREAM to the server; guarded by cc.mu
 
 	peerReset chan struct{} // closed on peer reset
 	resetErr  error         // populated before peerReset is closed
@@ -226,15 +227,26 @@
 	}
 	select {
 	case <-req.Cancel:
+		cs.cancelStream()
 		cs.bufPipe.CloseWithError(errRequestCanceled)
-		cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
 	case <-ctx.Done():
+		cs.cancelStream()
 		cs.bufPipe.CloseWithError(ctx.Err())
-		cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
 	case <-cs.done:
 	}
 }
 
+func (cs *clientStream) cancelStream() {
+	cs.cc.mu.Lock()
+	didReset := cs.didReset
+	cs.didReset = true
+	cs.cc.mu.Unlock()
+
+	if !didReset {
+		cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+	}
+}
+
 // checkResetOrDone reports any error sent in a RST_STREAM frame by the
 // server, or errStreamClosed if the stream is complete.
 func (cs *clientStream) checkResetOrDone() error {
@@ -635,39 +647,17 @@
 	return nil
 }
 
-func bodyAndLength(req *http.Request) (body io.Reader, contentLen int64) {
-	body = req.Body
-	if body == nil {
-		return nil, 0
+// actualContentLength returns a sanitized version of
+// req.ContentLength, where 0 actually means zero (not unknown) and -1
+// means unknown.
+func actualContentLength(req *http.Request) int64 {
+	if req.Body == nil {
+		return 0
 	}
 	if req.ContentLength != 0 {
-		return req.Body, req.ContentLength
+		return req.ContentLength
 	}
-	// Don't try to sniff the size if they're doing an expect
-	// request (Issue 16002):
-	if req.Header.Get("Expect") == "100-continue" {
-		return req.Body, -1
-	}
-
-	// We have a body but a zero content length. Test to see if
-	// it's actually zero or just unset.
-	var buf [1]byte
-	n, rerr := body.Read(buf[:])
-	if rerr != nil && rerr != io.EOF {
-		return errorReader{rerr}, -1
-	}
-	if n == 1 {
-		// Oh, guess there is data in this Body Reader after all.
-		// The ContentLength field just wasn't set.
-		// Stitch the Body back together again, re-attaching our
-		// consumed byte.
-		if rerr == io.EOF {
-			return bytes.NewReader(buf[:]), 1
-		}
-		return io.MultiReader(bytes.NewReader(buf[:]), body), -1
-	}
-	// Body is actually zero bytes.
-	return nil, 0
+	return -1
 }
 
 func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
@@ -691,8 +681,9 @@
 		return nil, errClientConnUnusable
 	}
 
-	body, contentLen := bodyAndLength(req)
+	body := req.Body
 	hasBody := body != nil
+	contentLen := actualContentLength(req)
 
 	// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
 	var requestedGzip bool
@@ -1687,9 +1678,10 @@
 			cc.bw.Flush()
 			cc.wmu.Unlock()
 		}
+		didReset := cs.didReset
 		cc.mu.Unlock()
 
-		if len(data) > 0 {
+		if len(data) > 0 && !didReset {
 			if _, err := cs.bufPipe.Write(data); err != nil {
 				rl.endStreamError(cs, err)
 				return err
diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go
index 27ef0dd..1c135fd 100644
--- a/vendor/golang.org/x/net/http2/write.go
+++ b/vendor/golang.org/x/net/http2/write.go
@@ -9,6 +9,7 @@
 	"fmt"
 	"log"
 	"net/http"
+	"net/url"
 	"time"
 
 	"golang.org/x/net/http2/hpack"
@@ -18,6 +19,11 @@
 // writeFramer is implemented by any type that is used to write frames.
 type writeFramer interface {
 	writeFrame(writeContext) error
+
+	// staysWithinBuffer reports whether this writer promises that
+	// it will only write less than or equal to size bytes, and it
+	// won't Flush the write context.
+	staysWithinBuffer(size int) bool
 }
 
 // writeContext is the interface needed by the various frame writer
@@ -62,8 +68,16 @@
 	return ctx.Flush()
 }
 
+func (flushFrameWriter) staysWithinBuffer(max int) bool { return false }
+
 type writeSettings []Setting
 
+func (s writeSettings) staysWithinBuffer(max int) bool {
+	const settingSize = 6 // uint16 + uint32
+	return frameHeaderLen+settingSize*len(s) <= max
+
+}
+
 func (s writeSettings) writeFrame(ctx writeContext) error {
 	return ctx.Framer().WriteSettings([]Setting(s)...)
 }
@@ -83,6 +97,8 @@
 	return err
 }
 
+func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes
+
 type writeData struct {
 	streamID  uint32
 	p         []byte
@@ -97,6 +113,10 @@
 	return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
 }
 
+func (w *writeData) staysWithinBuffer(max int) bool {
+	return frameHeaderLen+len(w.p) <= max
+}
+
 // handlerPanicRST is the message sent from handler goroutines when
 // the handler panics.
 type handlerPanicRST struct {
@@ -107,22 +127,57 @@
 	return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal)
 }
 
+func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
+
 func (se StreamError) writeFrame(ctx writeContext) error {
 	return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
 }
 
+func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
+
 type writePingAck struct{ pf *PingFrame }
 
 func (w writePingAck) writeFrame(ctx writeContext) error {
 	return ctx.Framer().WritePing(true, w.pf.Data)
 }
 
+func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max }
+
 type writeSettingsAck struct{}
 
 func (writeSettingsAck) writeFrame(ctx writeContext) error {
 	return ctx.Framer().WriteSettingsAck()
 }
 
+func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max }
+
+// splitHeaderBlock splits headerBlock into fragments so that each fragment fits
+// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true
+// for the first/last fragment, respectively.
+func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error {
+	// For now we're lazy and just pick the minimum MAX_FRAME_SIZE
+	// that all peers must support (16KB). Later we could care
+	// more and send larger frames if the peer advertised it, but
+	// there's little point. Most headers are small anyway (so we
+	// generally won't have CONTINUATION frames), and extra frames
+	// only waste 9 bytes anyway.
+	const maxFrameSize = 16384
+
+	first := true
+	for len(headerBlock) > 0 {
+		frag := headerBlock
+		if len(frag) > maxFrameSize {
+			frag = frag[:maxFrameSize]
+		}
+		headerBlock = headerBlock[len(frag):]
+		if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil {
+			return err
+		}
+		first = false
+	}
+	return nil
+}
+
 // writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
 // for HTTP response headers or trailers from a server handler.
 type writeResHeaders struct {
@@ -144,6 +199,17 @@
 	enc.WriteField(hpack.HeaderField{Name: k, Value: v})
 }
 
+func (w *writeResHeaders) staysWithinBuffer(max int) bool {
+	// TODO: this is a common one. It'd be nice to return true
+	// here and get into the fast path if we could be clever and
+	// calculate the size fast enough, or at least a conservative
+	// uppper bound that usually fires. (Maybe if w.h and
+	// w.trailers are nil, so we don't need to enumerate it.)
+	// Otherwise I'm afraid that just calculating the length to
+	// answer this question would be slower than the ~2µs benefit.
+	return false
+}
+
 func (w *writeResHeaders) writeFrame(ctx writeContext) error {
 	enc, buf := ctx.HeaderEncoder()
 	buf.Reset()
@@ -169,39 +235,69 @@
 		panic("unexpected empty hpack")
 	}
 
-	// For now we're lazy and just pick the minimum MAX_FRAME_SIZE
-	// that all peers must support (16KB). Later we could care
-	// more and send larger frames if the peer advertised it, but
-	// there's little point. Most headers are small anyway (so we
-	// generally won't have CONTINUATION frames), and extra frames
-	// only waste 9 bytes anyway.
-	const maxFrameSize = 16384
+	return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
+}
 
-	first := true
-	for len(headerBlock) > 0 {
-		frag := headerBlock
-		if len(frag) > maxFrameSize {
-			frag = frag[:maxFrameSize]
-		}
-		headerBlock = headerBlock[len(frag):]
-		endHeaders := len(headerBlock) == 0
-		var err error
-		if first {
-			first = false
-			err = ctx.Framer().WriteHeaders(HeadersFrameParam{
-				StreamID:      w.streamID,
-				BlockFragment: frag,
-				EndStream:     w.endStream,
-				EndHeaders:    endHeaders,
-			})
-		} else {
-			err = ctx.Framer().WriteContinuation(w.streamID, endHeaders, frag)
-		}
-		if err != nil {
-			return err
-		}
+func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {
+	if firstFrag {
+		return ctx.Framer().WriteHeaders(HeadersFrameParam{
+			StreamID:      w.streamID,
+			BlockFragment: frag,
+			EndStream:     w.endStream,
+			EndHeaders:    lastFrag,
+		})
+	} else {
+		return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
 	}
-	return nil
+}
+
+// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames.
+type writePushPromise struct {
+	streamID uint32   // pusher stream
+	method   string   // for :method
+	url      *url.URL // for :scheme, :authority, :path
+	h        http.Header
+
+	// Creates an ID for a pushed stream. This runs on serveG just before
+	// the frame is written. The returned ID is copied to promisedID.
+	allocatePromisedID func() (uint32, error)
+	promisedID         uint32
+}
+
+func (w *writePushPromise) staysWithinBuffer(max int) bool {
+	// TODO: see writeResHeaders.staysWithinBuffer
+	return false
+}
+
+func (w *writePushPromise) writeFrame(ctx writeContext) error {
+	enc, buf := ctx.HeaderEncoder()
+	buf.Reset()
+
+	encKV(enc, ":method", w.method)
+	encKV(enc, ":scheme", w.url.Scheme)
+	encKV(enc, ":authority", w.url.Host)
+	encKV(enc, ":path", w.url.RequestURI())
+	encodeHeaders(enc, w.h, nil)
+
+	headerBlock := buf.Bytes()
+	if len(headerBlock) == 0 {
+		panic("unexpected empty hpack")
+	}
+
+	return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
+}
+
+func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {
+	if firstFrag {
+		return ctx.Framer().WritePushPromise(PushPromiseParam{
+			StreamID:      w.streamID,
+			PromiseID:     w.promisedID,
+			BlockFragment: frag,
+			EndHeaders:    lastFrag,
+		})
+	} else {
+		return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
+	}
 }
 
 type write100ContinueHeadersFrame struct {
@@ -220,15 +316,24 @@
 	})
 }
 
+func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool {
+	// Sloppy but conservative:
+	return 9+2*(len(":status")+len("100")) <= max
+}
+
 type writeWindowUpdate struct {
 	streamID uint32 // or 0 for conn-level
 	n        uint32
 }
 
+func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
+
 func (wu writeWindowUpdate) writeFrame(ctx writeContext) error {
 	return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
 }
 
+// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k])
+// is encoded only only if k is in keys.
 func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
 	if keys == nil {
 		sorter := sorterPool.Get().(*sorter)
diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go
index c24316c..9f3e1b3 100644
--- a/vendor/golang.org/x/net/http2/writesched.go
+++ b/vendor/golang.org/x/net/http2/writesched.go
@@ -6,14 +6,51 @@
 
 import "fmt"
 
-// frameWriteMsg is a request to write a frame.
-type frameWriteMsg struct {
+// WriteScheduler is the interface implemented by HTTP/2 write schedulers.
+// Methods are never called concurrently.
+type WriteScheduler interface {
+	// OpenStream opens a new stream in the write scheduler.
+	// It is illegal to call this with streamID=0 or with a streamID that is
+	// already open -- the call may panic.
+	OpenStream(streamID uint32, options OpenStreamOptions)
+
+	// CloseStream closes a stream in the write scheduler. Any frames queued on
+	// this stream should be discarded. It is illegal to call this on a stream
+	// that is not open -- the call may panic.
+	CloseStream(streamID uint32)
+
+	// AdjustStream adjusts the priority of the given stream. This may be called
+	// on a stream that has not yet been opened or has been closed. Note that
+	// RFC 7540 allows PRIORITY frames to be sent on streams in any state. See:
+	// https://tools.ietf.org/html/rfc7540#section-5.1
+	AdjustStream(streamID uint32, priority PriorityParam)
+
+	// Push queues a frame in the scheduler.
+	Push(wr FrameWriteRequest)
+
+	// Pop dequeues the next frame to write. Returns false if no frames can
+	// be written. Frames with a given wr.StreamID() are Pop'd in the same
+	// order they are Push'd.
+	Pop() (wr FrameWriteRequest, ok bool)
+}
+
+// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream.
+type OpenStreamOptions struct {
+	// PusherID is zero if the stream was initiated by the client. Otherwise,
+	// PusherID names the stream that pushed the newly opened stream.
+	PusherID uint32
+}
+
+// FrameWriteRequest is a request to write a frame.
+type FrameWriteRequest struct {
 	// write is the interface value that does the writing, once the
-	// writeScheduler (below) has decided to select this frame
-	// to write. The write functions are all defined in write.go.
+	// WriteScheduler has selected this frame to write. The write
+	// functions are all defined in write.go.
 	write writeFramer
 
-	stream *stream // used for prioritization. nil for non-stream frames.
+	// stream is the stream on which this frame will be written.
+	// nil for non-stream frames like PING and SETTINGS.
+	stream *stream
 
 	// done, if non-nil, must be a buffered channel with space for
 	// 1 message and is sent the return value from write (or an
@@ -21,263 +58,166 @@
 	done chan error
 }
 
-// for debugging only:
-func (wm frameWriteMsg) String() string {
-	var streamID uint32
-	if wm.stream != nil {
-		streamID = wm.stream.id
-	}
-	var des string
-	if s, ok := wm.write.(fmt.Stringer); ok {
-		des = s.String()
-	} else {
-		des = fmt.Sprintf("%T", wm.write)
-	}
-	return fmt.Sprintf("[frameWriteMsg stream=%d, ch=%v, type: %v]", streamID, wm.done != nil, des)
-}
-
-// writeScheduler tracks pending frames to write, priorities, and decides
-// the next one to use. It is not thread-safe.
-type writeScheduler struct {
-	// zero are frames not associated with a specific stream.
-	// They're sent before any stream-specific freams.
-	zero writeQueue
-
-	// maxFrameSize is the maximum size of a DATA frame
-	// we'll write. Must be non-zero and between 16K-16M.
-	maxFrameSize uint32
-
-	// sq contains the stream-specific queues, keyed by stream ID.
-	// when a stream is idle, it's deleted from the map.
-	sq map[uint32]*writeQueue
-
-	// canSend is a slice of memory that's reused between frame
-	// scheduling decisions to hold the list of writeQueues (from sq)
-	// which have enough flow control data to send. After canSend is
-	// built, the best is selected.
-	canSend []*writeQueue
-
-	// pool of empty queues for reuse.
-	queuePool []*writeQueue
-}
-
-func (ws *writeScheduler) putEmptyQueue(q *writeQueue) {
-	if len(q.s) != 0 {
-		panic("queue must be empty")
-	}
-	ws.queuePool = append(ws.queuePool, q)
-}
-
-func (ws *writeScheduler) getEmptyQueue() *writeQueue {
-	ln := len(ws.queuePool)
-	if ln == 0 {
-		return new(writeQueue)
-	}
-	q := ws.queuePool[ln-1]
-	ws.queuePool = ws.queuePool[:ln-1]
-	return q
-}
-
-func (ws *writeScheduler) empty() bool { return ws.zero.empty() && len(ws.sq) == 0 }
-
-func (ws *writeScheduler) add(wm frameWriteMsg) {
-	st := wm.stream
-	if st == nil {
-		ws.zero.push(wm)
-	} else {
-		ws.streamQueue(st.id).push(wm)
-	}
-}
-
-func (ws *writeScheduler) streamQueue(streamID uint32) *writeQueue {
-	if q, ok := ws.sq[streamID]; ok {
-		return q
-	}
-	if ws.sq == nil {
-		ws.sq = make(map[uint32]*writeQueue)
-	}
-	q := ws.getEmptyQueue()
-	ws.sq[streamID] = q
-	return q
-}
-
-// take returns the most important frame to write and removes it from the scheduler.
-// It is illegal to call this if the scheduler is empty or if there are no connection-level
-// flow control bytes available.
-func (ws *writeScheduler) take() (wm frameWriteMsg, ok bool) {
-	if ws.maxFrameSize == 0 {
-		panic("internal error: ws.maxFrameSize not initialized or invalid")
-	}
-
-	// If there any frames not associated with streams, prefer those first.
-	// These are usually SETTINGS, etc.
-	if !ws.zero.empty() {
-		return ws.zero.shift(), true
-	}
-	if len(ws.sq) == 0 {
-		return
-	}
-
-	// Next, prioritize frames on streams that aren't DATA frames (no cost).
-	for id, q := range ws.sq {
-		if q.firstIsNoCost() {
-			return ws.takeFrom(id, q)
-		}
-	}
-
-	// Now, all that remains are DATA frames with non-zero bytes to
-	// send. So pick the best one.
-	if len(ws.canSend) != 0 {
-		panic("should be empty")
-	}
-	for _, q := range ws.sq {
-		if n := ws.streamWritableBytes(q); n > 0 {
-			ws.canSend = append(ws.canSend, q)
-		}
-	}
-	if len(ws.canSend) == 0 {
-		return
-	}
-	defer ws.zeroCanSend()
-
-	// TODO: find the best queue
-	q := ws.canSend[0]
-
-	return ws.takeFrom(q.streamID(), q)
-}
-
-// zeroCanSend is defered from take.
-func (ws *writeScheduler) zeroCanSend() {
-	for i := range ws.canSend {
-		ws.canSend[i] = nil
-	}
-	ws.canSend = ws.canSend[:0]
-}
-
-// streamWritableBytes returns the number of DATA bytes we could write
-// from the given queue's stream, if this stream/queue were
-// selected. It is an error to call this if q's head isn't a
-// *writeData.
-func (ws *writeScheduler) streamWritableBytes(q *writeQueue) int32 {
-	wm := q.head()
-	ret := wm.stream.flow.available() // max we can write
-	if ret == 0 {
+// StreamID returns the id of the stream this frame will be written to.
+// 0 is used for non-stream frames such as PING and SETTINGS.
+func (wr FrameWriteRequest) StreamID() uint32 {
+	if wr.stream == nil {
 		return 0
 	}
-	if int32(ws.maxFrameSize) < ret {
-		ret = int32(ws.maxFrameSize)
-	}
-	if ret == 0 {
-		panic("internal error: ws.maxFrameSize not initialized or invalid")
-	}
-	wd := wm.write.(*writeData)
-	if len(wd.p) < int(ret) {
-		ret = int32(len(wd.p))
-	}
-	return ret
+	return wr.stream.id
 }
 
-func (ws *writeScheduler) takeFrom(id uint32, q *writeQueue) (wm frameWriteMsg, ok bool) {
-	wm = q.head()
-	// If the first item in this queue costs flow control tokens
-	// and we don't have enough, write as much as we can.
-	if wd, ok := wm.write.(*writeData); ok && len(wd.p) > 0 {
-		allowed := wm.stream.flow.available() // max we can write
-		if allowed == 0 {
-			// No quota available. Caller can try the next stream.
-			return frameWriteMsg{}, false
-		}
-		if int32(ws.maxFrameSize) < allowed {
-			allowed = int32(ws.maxFrameSize)
-		}
-		// TODO: further restrict the allowed size, because even if
-		// the peer says it's okay to write 16MB data frames, we might
-		// want to write smaller ones to properly weight competing
-		// streams' priorities.
-
-		if len(wd.p) > int(allowed) {
-			wm.stream.flow.take(allowed)
-			chunk := wd.p[:allowed]
-			wd.p = wd.p[allowed:]
-			// Make up a new write message of a valid size, rather
-			// than shifting one off the queue.
-			return frameWriteMsg{
-				stream: wm.stream,
-				write: &writeData{
-					streamID: wd.streamID,
-					p:        chunk,
-					// even if the original had endStream set, there
-					// arebytes remaining because len(wd.p) > allowed,
-					// so we know endStream is false:
-					endStream: false,
-				},
-				// our caller is blocking on the final DATA frame, not
-				// these intermediates, so no need to wait:
-				done: nil,
-			}, true
-		}
-		wm.stream.flow.take(int32(len(wd.p)))
+// DataSize returns the number of flow control bytes that must be consumed
+// to write this entire frame. This is 0 for non-DATA frames.
+func (wr FrameWriteRequest) DataSize() int {
+	if wd, ok := wr.write.(*writeData); ok {
+		return len(wd.p)
 	}
-
-	q.shift()
-	if q.empty() {
-		ws.putEmptyQueue(q)
-		delete(ws.sq, id)
-	}
-	return wm, true
+	return 0
 }
 
-func (ws *writeScheduler) forgetStream(id uint32) {
-	q, ok := ws.sq[id]
-	if !ok {
-		return
-	}
-	delete(ws.sq, id)
+// Consume consumes min(n, available) bytes from this frame, where available
+// is the number of flow control bytes available on the stream. Consume returns
+// 0, 1, or 2 frames, where the integer return value gives the number of frames
+// returned.
+//
+// If flow control prevents consuming any bytes, this returns (_, _, 0). If
+// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this
+// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and
+// 'rest' contains the remaining bytes. The consumed bytes are deducted from the
+// underlying stream's flow control budget.
+func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) {
+	var empty FrameWriteRequest
 
-	// But keep it for others later.
-	for i := range q.s {
-		q.s[i] = frameWriteMsg{}
+	// Non-DATA frames are always consumed whole.
+	wd, ok := wr.write.(*writeData)
+	if !ok || len(wd.p) == 0 {
+		return wr, empty, 1
 	}
-	q.s = q.s[:0]
-	ws.putEmptyQueue(q)
+
+	// Might need to split after applying limits.
+	allowed := wr.stream.flow.available()
+	if n < allowed {
+		allowed = n
+	}
+	if wr.stream.sc.maxFrameSize < allowed {
+		allowed = wr.stream.sc.maxFrameSize
+	}
+	if allowed <= 0 {
+		return empty, empty, 0
+	}
+	if len(wd.p) > int(allowed) {
+		wr.stream.flow.take(allowed)
+		consumed := FrameWriteRequest{
+			stream: wr.stream,
+			write: &writeData{
+				streamID: wd.streamID,
+				p:        wd.p[:allowed],
+				// Even if the original had endStream set, there
+				// are bytes remaining because len(wd.p) > allowed,
+				// so we know endStream is false.
+				endStream: false,
+			},
+			// Our caller is blocking on the final DATA frame, not
+			// this intermediate frame, so no need to wait.
+			done: nil,
+		}
+		rest := FrameWriteRequest{
+			stream: wr.stream,
+			write: &writeData{
+				streamID:  wd.streamID,
+				p:         wd.p[allowed:],
+				endStream: wd.endStream,
+			},
+			done: wr.done,
+		}
+		return consumed, rest, 2
+	}
+
+	// The frame is consumed whole.
+	// NB: This cast cannot overflow because allowed is <= math.MaxInt32.
+	wr.stream.flow.take(int32(len(wd.p)))
+	return wr, empty, 1
 }
 
+// String is for debugging only.
+func (wr FrameWriteRequest) String() string {
+	var streamID uint32
+	if wr.stream != nil {
+		streamID = wr.stream.id
+	}
+	var des string
+	if s, ok := wr.write.(fmt.Stringer); ok {
+		des = s.String()
+	} else {
+		des = fmt.Sprintf("%T", wr.write)
+	}
+	return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", streamID, wr.done != nil, des)
+}
+
+// writeQueue is used by implementations of WriteScheduler.
 type writeQueue struct {
-	s []frameWriteMsg
+	s []FrameWriteRequest
 }
 
-// streamID returns the stream ID for a non-empty stream-specific queue.
-func (q *writeQueue) streamID() uint32 { return q.s[0].stream.id }
-
 func (q *writeQueue) empty() bool { return len(q.s) == 0 }
 
-func (q *writeQueue) push(wm frameWriteMsg) {
-	q.s = append(q.s, wm)
+func (q *writeQueue) push(wr FrameWriteRequest) {
+	q.s = append(q.s, wr)
 }
 
-// head returns the next item that would be removed by shift.
-func (q *writeQueue) head() frameWriteMsg {
+func (q *writeQueue) shift() FrameWriteRequest {
 	if len(q.s) == 0 {
 		panic("invalid use of queue")
 	}
-	return q.s[0]
-}
-
-func (q *writeQueue) shift() frameWriteMsg {
-	if len(q.s) == 0 {
-		panic("invalid use of queue")
-	}
-	wm := q.s[0]
+	wr := q.s[0]
 	// TODO: less copy-happy queue.
 	copy(q.s, q.s[1:])
-	q.s[len(q.s)-1] = frameWriteMsg{}
+	q.s[len(q.s)-1] = FrameWriteRequest{}
 	q.s = q.s[:len(q.s)-1]
-	return wm
+	return wr
 }
 
-func (q *writeQueue) firstIsNoCost() bool {
-	if df, ok := q.s[0].write.(*writeData); ok {
-		return len(df.p) == 0
+// consume consumes up to n bytes from q.s[0]. If the frame is
+// entirely consumed, it is removed from the queue. If the frame
+// is partially consumed, the frame is kept with the consumed
+// bytes removed. Returns true iff any bytes were consumed.
+func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) {
+	if len(q.s) == 0 {
+		return FrameWriteRequest{}, false
 	}
-	return true
+	consumed, rest, numresult := q.s[0].Consume(n)
+	switch numresult {
+	case 0:
+		return FrameWriteRequest{}, false
+	case 1:
+		q.shift()
+	case 2:
+		q.s[0] = rest
+	}
+	return consumed, true
+}
+
+type writeQueuePool []*writeQueue
+
+// put inserts an unused writeQueue into the pool.
+func (p *writeQueuePool) put(q *writeQueue) {
+	for i := range q.s {
+		q.s[i] = FrameWriteRequest{}
+	}
+	q.s = q.s[:0]
+	*p = append(*p, q)
+}
+
+// get returns an empty writeQueue.
+func (p *writeQueuePool) get() *writeQueue {
+	ln := len(*p)
+	if ln == 0 {
+		return new(writeQueue)
+	}
+	x := ln - 1
+	q := (*p)[x]
+	(*p)[x] = nil
+	*p = (*p)[:x]
+	return q
 }
diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go
new file mode 100644
index 0000000..40108b0
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched_priority.go
@@ -0,0 +1,444 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+	"fmt"
+	"math"
+	"sort"
+)
+
+// RFC 7540, Section 5.3.5: the default weight is 16.
+const priorityDefaultWeight = 15 // 16 = 15 + 1
+
+// PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
+type PriorityWriteSchedulerConfig struct {
+	// MaxClosedNodesInTree controls the maximum number of closed streams to
+	// retain in the priority tree. Setting this to zero saves a small amount
+	// of memory at the cost of performance.
+	//
+	// See RFC 7540, Section 5.3.4:
+	//   "It is possible for a stream to become closed while prioritization
+	//   information ... is in transit. ... This potentially creates suboptimal
+	//   prioritization, since the stream could be given a priority that is
+	//   different from what is intended. To avoid these problems, an endpoint
+	//   SHOULD retain stream prioritization state for a period after streams
+	//   become closed. The longer state is retained, the lower the chance that
+	//   streams are assigned incorrect or default priority values."
+	MaxClosedNodesInTree int
+
+	// MaxIdleNodesInTree controls the maximum number of idle streams to
+	// retain in the priority tree. Setting this to zero saves a small amount
+	// of memory at the cost of performance.
+	//
+	// See RFC 7540, Section 5.3.4:
+	//   Similarly, streams that are in the "idle" state can be assigned
+	//   priority or become a parent of other streams. This allows for the
+	//   creation of a grouping node in the dependency tree, which enables
+	//   more flexible expressions of priority. Idle streams begin with a
+	//   default priority (Section 5.3.5).
+	MaxIdleNodesInTree int
+
+	// ThrottleOutOfOrderWrites enables write throttling to help ensure that
+	// data is delivered in priority order. This works around a race where
+	// stream B depends on stream A and both streams are about to call Write
+	// to queue DATA frames. If B wins the race, a naive scheduler would eagerly
+	// write as much data from B as possible, but this is suboptimal because A
+	// is a higher-priority stream. With throttling enabled, we write a small
+	// amount of data from B to minimize the amount of bandwidth that B can
+	// steal from A.
+	ThrottleOutOfOrderWrites bool
+}
+
+// NewPriorityWriteScheduler constructs a WriteScheduler that schedules
+// frames by following HTTP/2 priorities as described in RFC 7340 Section 5.3.
+// If cfg is nil, default options are used.
+func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler {
+	if cfg == nil {
+		// For justification of these defaults, see:
+		// https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY
+		cfg = &PriorityWriteSchedulerConfig{
+			MaxClosedNodesInTree:     10,
+			MaxIdleNodesInTree:       10,
+			ThrottleOutOfOrderWrites: false,
+		}
+	}
+
+	ws := &priorityWriteScheduler{
+		nodes:                make(map[uint32]*priorityNode),
+		maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
+		maxIdleNodesInTree:   cfg.MaxIdleNodesInTree,
+		enableWriteThrottle:  cfg.ThrottleOutOfOrderWrites,
+	}
+	ws.nodes[0] = &ws.root
+	if cfg.ThrottleOutOfOrderWrites {
+		ws.writeThrottleLimit = 1024
+	} else {
+		ws.writeThrottleLimit = math.MaxInt32
+	}
+	return ws
+}
+
+type priorityNodeState int
+
+const (
+	priorityNodeOpen priorityNodeState = iota
+	priorityNodeClosed
+	priorityNodeIdle
+)
+
+// priorityNode is a node in an HTTP/2 priority tree.
+// Each node is associated with a single stream ID.
+// See RFC 7540, Section 5.3.
+type priorityNode struct {
+	q            writeQueue        // queue of pending frames to write
+	id           uint32            // id of the stream, or 0 for the root of the tree
+	weight       uint8             // the actual weight is weight+1, so the value is in [1,256]
+	state        priorityNodeState // open | closed | idle
+	bytes        int64             // number of bytes written by this node, or 0 if closed
+	subtreeBytes int64             // sum(node.bytes) of all nodes in this subtree
+
+	// These links form the priority tree.
+	parent     *priorityNode
+	kids       *priorityNode // start of the kids list
+	prev, next *priorityNode // doubly-linked list of siblings
+}
+
+func (n *priorityNode) setParent(parent *priorityNode) {
+	if n == parent {
+		panic("setParent to self")
+	}
+	if n.parent == parent {
+		return
+	}
+	// Unlink from current parent.
+	if parent := n.parent; parent != nil {
+		if n.prev == nil {
+			parent.kids = n.next
+		} else {
+			n.prev.next = n.next
+		}
+		if n.next != nil {
+			n.next.prev = n.prev
+		}
+	}
+	// Link to new parent.
+	// If parent=nil, remove n from the tree.
+	// Always insert at the head of parent.kids (this is assumed by walkReadyInOrder).
+	n.parent = parent
+	if parent == nil {
+		n.next = nil
+		n.prev = nil
+	} else {
+		n.next = parent.kids
+		n.prev = nil
+		if n.next != nil {
+			n.next.prev = n
+		}
+		parent.kids = n
+	}
+}
+
+func (n *priorityNode) addBytes(b int64) {
+	n.bytes += b
+	for ; n != nil; n = n.parent {
+		n.subtreeBytes += b
+	}
+}
+
+// walkReadyInOrder iterates over the tree in priority order, calling f for each node
+// with a non-empty write queue. When f returns true, this funcion returns true and the
+// walk halts. tmp is used as scratch space for sorting.
+//
+// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
+// if any ancestor p of n is still open (ignoring the root node).
+func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool {
+	if !n.q.empty() && f(n, openParent) {
+		return true
+	}
+	if n.kids == nil {
+		return false
+	}
+
+	// Don't consider the root "open" when updating openParent since
+	// we can't send data frames on the root stream (only control frames).
+	if n.id != 0 {
+		openParent = openParent || (n.state == priorityNodeOpen)
+	}
+
+	// Common case: only one kid or all kids have the same weight.
+	// Some clients don't use weights; other clients (like web browsers)
+	// use mostly-linear priority trees.
+	w := n.kids.weight
+	needSort := false
+	for k := n.kids.next; k != nil; k = k.next {
+		if k.weight != w {
+			needSort = true
+			break
+		}
+	}
+	if !needSort {
+		for k := n.kids; k != nil; k = k.next {
+			if k.walkReadyInOrder(openParent, tmp, f) {
+				return true
+			}
+		}
+		return false
+	}
+
+	// Uncommon case: sort the child nodes. We remove the kids from the parent,
+	// then re-insert after sorting so we can reuse tmp for future sort calls.
+	*tmp = (*tmp)[:0]
+	for n.kids != nil {
+		*tmp = append(*tmp, n.kids)
+		n.kids.setParent(nil)
+	}
+	sort.Sort(sortPriorityNodeSiblings(*tmp))
+	for i := len(*tmp) - 1; i >= 0; i-- {
+		(*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
+	}
+	for k := n.kids; k != nil; k = k.next {
+		if k.walkReadyInOrder(openParent, tmp, f) {
+			return true
+		}
+	}
+	return false
+}
+
+type sortPriorityNodeSiblings []*priorityNode
+
+func (z sortPriorityNodeSiblings) Len() int      { return len(z) }
+func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
+func (z sortPriorityNodeSiblings) Less(i, k int) bool {
+	// Prefer the subtree that has sent fewer bytes relative to its weight.
+	// See sections 5.3.2 and 5.3.4.
+	wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
+	wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes)
+	if bi == 0 && bk == 0 {
+		return wi >= wk
+	}
+	if bk == 0 {
+		return false
+	}
+	return bi/bk <= wi/wk
+}
+
+type priorityWriteScheduler struct {
+	// root is the root of the priority tree, where root.id = 0.
+	// The root queues control frames that are not associated with any stream.
+	root priorityNode
+
+	// nodes maps stream ids to priority tree nodes.
+	nodes map[uint32]*priorityNode
+
+	// maxID is the maximum stream id in nodes.
+	maxID uint32
+
+	// lists of nodes that have been closed or are idle, but are kept in
+	// the tree for improved prioritization. When the lengths exceed either
+	// maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
+	closedNodes, idleNodes []*priorityNode
+
+	// From the config.
+	maxClosedNodesInTree int
+	maxIdleNodesInTree   int
+	writeThrottleLimit   int32
+	enableWriteThrottle  bool
+
+	// tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
+	tmp []*priorityNode
+
+	// pool of empty queues for reuse.
+	queuePool writeQueuePool
+}
+
+func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
+	// The stream may be currently idle but cannot be opened or closed.
+	if curr := ws.nodes[streamID]; curr != nil {
+		if curr.state != priorityNodeIdle {
+			panic(fmt.Sprintf("stream %d already opened", streamID))
+		}
+		curr.state = priorityNodeOpen
+		return
+	}
+
+	// RFC 7540, Section 5.3.5:
+	//  "All streams are initially assigned a non-exclusive dependency on stream 0x0.
+	//  Pushed streams initially depend on their associated stream. In both cases,
+	//  streams are assigned a default weight of 16."
+	parent := ws.nodes[options.PusherID]
+	if parent == nil {
+		parent = &ws.root
+	}
+	n := &priorityNode{
+		q:      *ws.queuePool.get(),
+		id:     streamID,
+		weight: priorityDefaultWeight,
+		state:  priorityNodeOpen,
+	}
+	n.setParent(parent)
+	ws.nodes[streamID] = n
+	if streamID > ws.maxID {
+		ws.maxID = streamID
+	}
+}
+
+func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
+	if streamID == 0 {
+		panic("violation of WriteScheduler interface: cannot close stream 0")
+	}
+	if ws.nodes[streamID] == nil {
+		panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
+	}
+	if ws.nodes[streamID].state != priorityNodeOpen {
+		panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
+	}
+
+	n := ws.nodes[streamID]
+	n.state = priorityNodeClosed
+	n.addBytes(-n.bytes)
+
+	q := n.q
+	ws.queuePool.put(&q)
+	n.q.s = nil
+	if ws.maxClosedNodesInTree > 0 {
+		ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n)
+	} else {
+		ws.removeNode(n)
+	}
+}
+
+func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
+	if streamID == 0 {
+		panic("adjustPriority on root")
+	}
+
+	// If streamID does not exist, there are two cases:
+	// - A closed stream that has been removed (this will have ID <= maxID)
+	// - An idle stream that is being used for "grouping" (this will have ID > maxID)
+	n := ws.nodes[streamID]
+	if n == nil {
+		if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 {
+			return
+		}
+		ws.maxID = streamID
+		n = &priorityNode{
+			q:      *ws.queuePool.get(),
+			id:     streamID,
+			weight: priorityDefaultWeight,
+			state:  priorityNodeIdle,
+		}
+		n.setParent(&ws.root)
+		ws.nodes[streamID] = n
+		ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n)
+	}
+
+	// Section 5.3.1: A dependency on a stream that is not currently in the tree
+	// results in that stream being given a default priority (Section 5.3.5).
+	parent := ws.nodes[priority.StreamDep]
+	if parent == nil {
+		n.setParent(&ws.root)
+		n.weight = priorityDefaultWeight
+		return
+	}
+
+	// Ignore if the client tries to make a node its own parent.
+	if n == parent {
+		return
+	}
+
+	// Section 5.3.3:
+	//   "If a stream is made dependent on one of its own dependencies, the
+	//   formerly dependent stream is first moved to be dependent on the
+	//   reprioritized stream's previous parent. The moved dependency retains
+	//   its weight."
+	//
+	// That is: if parent depends on n, move parent to depend on n.parent.
+	for x := parent.parent; x != nil; x = x.parent {
+		if x == n {
+			parent.setParent(n.parent)
+			break
+		}
+	}
+
+	// Section 5.3.3: The exclusive flag causes the stream to become the sole
+	// dependency of its parent stream, causing other dependencies to become
+	// dependent on the exclusive stream.
+	if priority.Exclusive {
+		k := parent.kids
+		for k != nil {
+			next := k.next
+			if k != n {
+				k.setParent(n)
+			}
+			k = next
+		}
+	}
+
+	n.setParent(parent)
+	n.weight = priority.Weight
+}
+
+func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
+	var n *priorityNode
+	if id := wr.StreamID(); id == 0 {
+		n = &ws.root
+	} else {
+		n = ws.nodes[id]
+		if n == nil {
+			panic("add on non-open stream")
+		}
+	}
+	n.q.push(wr)
+}
+
+func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
+	ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool {
+		limit := int32(math.MaxInt32)
+		if openParent {
+			limit = ws.writeThrottleLimit
+		}
+		wr, ok = n.q.consume(limit)
+		if !ok {
+			return false
+		}
+		n.addBytes(int64(wr.DataSize()))
+		// If B depends on A and B continuously has data available but A
+		// does not, gradually increase the throttling limit to allow B to
+		// steal more and more bandwidth from A.
+		if openParent {
+			ws.writeThrottleLimit += 1024
+			if ws.writeThrottleLimit < 0 {
+				ws.writeThrottleLimit = math.MaxInt32
+			}
+		} else if ws.enableWriteThrottle {
+			ws.writeThrottleLimit = 1024
+		}
+		return true
+	})
+	return wr, ok
+}
+
+func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) {
+	if maxSize == 0 {
+		return
+	}
+	if len(*list) == maxSize {
+		// Remove the oldest node, then shift left.
+		ws.removeNode((*list)[0])
+		x := (*list)[1:]
+		copy(*list, x)
+		*list = (*list)[:len(x)]
+	}
+	*list = append(*list, n)
+}
+
+func (ws *priorityWriteScheduler) removeNode(n *priorityNode) {
+	for k := n.kids; k != nil; k = k.next {
+		k.setParent(n.parent)
+	}
+	n.setParent(nil)
+	delete(ws.nodes, n.id)
+}
diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go
new file mode 100644
index 0000000..36d7919
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched_random.go
@@ -0,0 +1,72 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import "math"
+
+// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2
+// priorities. Control frames like SETTINGS and PING are written before DATA
+// frames, but if no control frames are queued and multiple streams have queued
+// HEADERS or DATA frames, Pop selects a ready stream arbitrarily.
+func NewRandomWriteScheduler() WriteScheduler {
+	return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)}
+}
+
+type randomWriteScheduler struct {
+	// zero are frames not associated with a specific stream.
+	zero writeQueue
+
+	// sq contains the stream-specific queues, keyed by stream ID.
+	// When a stream is idle or closed, it's deleted from the map.
+	sq map[uint32]*writeQueue
+
+	// pool of empty queues for reuse.
+	queuePool writeQueuePool
+}
+
+func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
+	// no-op: idle streams are not tracked
+}
+
+func (ws *randomWriteScheduler) CloseStream(streamID uint32) {
+	q, ok := ws.sq[streamID]
+	if !ok {
+		return
+	}
+	delete(ws.sq, streamID)
+	ws.queuePool.put(q)
+}
+
+func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
+	// no-op: priorities are ignored
+}
+
+func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) {
+	id := wr.StreamID()
+	if id == 0 {
+		ws.zero.push(wr)
+		return
+	}
+	q, ok := ws.sq[id]
+	if !ok {
+		q = ws.queuePool.get()
+		ws.sq[id] = q
+	}
+	q.push(wr)
+}
+
+func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) {
+	// Control frames first.
+	if !ws.zero.empty() {
+		return ws.zero.shift(), true
+	}
+	// Iterate over all non-idle streams until finding one that can be consumed.
+	for _, q := range ws.sq {
+		if wr, ok := q.consume(math.MaxInt32); ok {
+			return wr, true
+		}
+	}
+	return FrameWriteRequest{}, false
+}
diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/vendor/golang.org/x/tools/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/tools/PATENTS b/vendor/golang.org/x/tools/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/vendor/golang.org/x/tools/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go.  This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation.  If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/tools/go/gcimporter15/bexport.go b/vendor/golang.org/x/tools/go/gcimporter15/bexport.go
new file mode 100644
index 0000000..af982ab
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/gcimporter15/bexport.go
@@ -0,0 +1,824 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Binary package export.
+// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go;
+// see that file for specification of the format.
+
+package gcimporter
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"go/ast"
+	"go/constant"
+	"go/token"
+	"go/types"
+	"log"
+	"math"
+	"math/big"
+	"sort"
+	"strings"
+)
+
+// If debugFormat is set, each integer and string value is preceded by a marker
+// and position information in the encoding. This mechanism permits an importer
+// to recognize immediately when it is out of sync. The importer recognizes this
+// mode automatically (i.e., it can import export data produced with debugging
+// support even if debugFormat is not set at the time of import). This mode will
+// lead to massively larger export data (by a factor of 2 to 3) and should only
+// be enabled during development and debugging.
+//
+// NOTE: This flag is the first flag to enable if importing dies because of
+// (suspected) format errors, and whenever a change is made to the format.
+const debugFormat = false // default: false
+
+// If trace is set, debugging output is printed to std out.
+const trace = false // default: false
+
+// Current export format version. Increase with each format change.
+// 3: added aliasTag and export of aliases
+// 2: removed unused bool in ODCL export (compiler only)
+// 1: header format change (more regular), export package for _ struct fields
+// 0: Go1.7 encoding
+const exportVersion = 3
+
+// trackAllTypes enables cycle tracking for all types, not just named
+// types. The existing compiler invariants assume that unnamed types
+// that are not completely set up are not used, or else there are spurious
+// errors.
+// If disabled, only named types are tracked, possibly leading to slightly
+// less efficient encoding in rare cases. It also prevents the export of
+// some corner-case type declarations (but those are not handled correctly
+// with with the textual export format either).
+// TODO(gri) enable and remove once issues caused by it are fixed
+const trackAllTypes = false
+
+type exporter struct {
+	fset *token.FileSet
+	out  bytes.Buffer
+
+	// object -> index maps, indexed in order of serialization
+	strIndex map[string]int
+	pkgIndex map[*types.Package]int
+	typIndex map[types.Type]int
+
+	// track objects that we've reexported already
+	reexported map[types.Object]bool
+
+	// position encoding
+	posInfoFormat bool
+	prevFile      string
+	prevLine      int
+
+	// debugging support
+	written int // bytes written
+	indent  int // for trace
+}
+
+// BExportData returns binary export data for pkg.
+// If no file set is provided, position info will be missing.
+func BExportData(fset *token.FileSet, pkg *types.Package) []byte {
+	p := exporter{
+		fset:          fset,
+		strIndex:      map[string]int{"": 0}, // empty string is mapped to 0
+		pkgIndex:      make(map[*types.Package]int),
+		typIndex:      make(map[types.Type]int),
+		reexported:    make(map[types.Object]bool),
+		posInfoFormat: true, // TODO(gri) might become a flag, eventually
+	}
+
+	// write version info
+	// The version string must start with "version %d" where %d is the version
+	// number. Additional debugging information may follow after a blank; that
+	// text is ignored by the importer.
+	p.rawStringln(fmt.Sprintf("version %d", exportVersion))
+	var debug string
+	if debugFormat {
+		debug = "debug"
+	}
+	p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly
+	p.bool(trackAllTypes)
+	p.bool(p.posInfoFormat)
+
+	// --- generic export data ---
+
+	// populate type map with predeclared "known" types
+	for index, typ := range predeclared {
+		p.typIndex[typ] = index
+	}
+	if len(p.typIndex) != len(predeclared) {
+		log.Fatalf("gcimporter: duplicate entries in type map?")
+	}
+
+	// write package data
+	p.pkg(pkg, true)
+	if trace {
+		p.tracef("\n")
+	}
+
+	// write objects
+	objcount := 0
+	scope := pkg.Scope()
+	for _, name := range scope.Names() {
+		if !ast.IsExported(name) {
+			continue
+		}
+		if trace {
+			p.tracef("\n")
+		}
+		p.obj(scope.Lookup(name))
+		objcount++
+	}
+
+	// indicate end of list
+	if trace {
+		p.tracef("\n")
+	}
+	p.tag(endTag)
+
+	// for self-verification only (redundant)
+	p.int(objcount)
+
+	if trace {
+		p.tracef("\n")
+	}
+
+	// --- end of export data ---
+
+	return p.out.Bytes()
+}
+
+func (p *exporter) pkg(pkg *types.Package, emptypath bool) {
+	if pkg == nil {
+		log.Fatalf("gcimporter: unexpected nil pkg")
+	}
+
+	// if we saw the package before, write its index (>= 0)
+	if i, ok := p.pkgIndex[pkg]; ok {
+		p.index('P', i)
+		return
+	}
+
+	// otherwise, remember the package, write the package tag (< 0) and package data
+	if trace {
+		p.tracef("P%d = { ", len(p.pkgIndex))
+		defer p.tracef("} ")
+	}
+	p.pkgIndex[pkg] = len(p.pkgIndex)
+
+	p.tag(packageTag)
+	p.string(pkg.Name())
+	if emptypath {
+		p.string("")
+	} else {
+		p.string(pkg.Path())
+	}
+}
+
+func (p *exporter) obj(obj types.Object) {
+	switch obj := obj.(type) {
+	case *types.Const:
+		p.tag(constTag)
+		p.pos(obj)
+		p.qualifiedName(obj)
+		p.typ(obj.Type())
+		p.value(obj.Val())
+
+	case *types.TypeName:
+		p.tag(typeTag)
+		p.typ(obj.Type())
+
+	case *types.Var:
+		p.tag(varTag)
+		p.pos(obj)
+		p.qualifiedName(obj)
+		p.typ(obj.Type())
+
+	case *types.Func:
+		p.tag(funcTag)
+		p.pos(obj)
+		p.qualifiedName(obj)
+		sig := obj.Type().(*types.Signature)
+		p.paramList(sig.Params(), sig.Variadic())
+		p.paramList(sig.Results(), false)
+
+	// Alias-related code. Keep for now.
+	// case *types_Alias:
+	// 	// make sure the original is exported before the alias
+	// 	// (if the alias declaration was invalid, orig will be nil)
+	// 	orig := original(obj)
+	// 	if orig != nil && !p.reexported[orig] {
+	// 		p.obj(orig)
+	// 		p.reexported[orig] = true
+	// 	}
+
+	// 	p.tag(aliasTag)
+	// 	p.pos(obj)
+	// 	p.string(obj.Name())
+	// 	p.qualifiedName(orig)
+
+	default:
+		log.Fatalf("gcimporter: unexpected object %v (%T)", obj, obj)
+	}
+}
+
+func (p *exporter) pos(obj types.Object) {
+	if !p.posInfoFormat {
+		return
+	}
+
+	file, line := p.fileLine(obj)
+	if file == p.prevFile {
+		// common case: write line delta
+		// delta == 0 means different file or no line change
+		delta := line - p.prevLine
+		p.int(delta)
+		if delta == 0 {
+			p.int(-1) // -1 means no file change
+		}
+	} else {
+		// different file
+		p.int(0)
+		// Encode filename as length of common prefix with previous
+		// filename, followed by (possibly empty) suffix. Filenames
+		// frequently share path prefixes, so this can save a lot
+		// of space and make export data size less dependent on file
+		// path length. The suffix is unlikely to be empty because
+		// file names tend to end in ".go".
+		n := commonPrefixLen(p.prevFile, file)
+		p.int(n)           // n >= 0
+		p.string(file[n:]) // write suffix only
+		p.prevFile = file
+		p.int(line)
+	}
+	p.prevLine = line
+}
+
+func (p *exporter) fileLine(obj types.Object) (file string, line int) {
+	if p.fset != nil {
+		pos := p.fset.Position(obj.Pos())
+		file = pos.Filename
+		line = pos.Line
+	}
+	return
+}
+
+func commonPrefixLen(a, b string) int {
+	if len(a) > len(b) {
+		a, b = b, a
+	}
+	// len(a) <= len(b)
+	i := 0
+	for i < len(a) && a[i] == b[i] {
+		i++
+	}
+	return i
+}
+
+func (p *exporter) qualifiedName(obj types.Object) {
+	if obj == nil {
+		p.string("")
+		return
+	}
+	p.string(obj.Name())
+	p.pkg(obj.Pkg(), false)
+}
+
+func (p *exporter) typ(t types.Type) {
+	if t == nil {
+		log.Fatalf("gcimporter: nil type")
+	}
+
+	// Possible optimization: Anonymous pointer types *T where
+	// T is a named type are common. We could canonicalize all
+	// such types *T to a single type PT = *T. This would lead
+	// to at most one *T entry in typIndex, and all future *T's
+	// would be encoded as the respective index directly. Would
+	// save 1 byte (pointerTag) per *T and reduce the typIndex
+	// size (at the cost of a canonicalization map). We can do
+	// this later, without encoding format change.
+
+	// if we saw the type before, write its index (>= 0)
+	if i, ok := p.typIndex[t]; ok {
+		p.index('T', i)
+		return
+	}
+
+	// otherwise, remember the type, write the type tag (< 0) and type data
+	if trackAllTypes {
+		if trace {
+			p.tracef("T%d = {>\n", len(p.typIndex))
+			defer p.tracef("<\n} ")
+		}
+		p.typIndex[t] = len(p.typIndex)
+	}
+
+	switch t := t.(type) {
+	case *types.Named:
+		if !trackAllTypes {
+			// if we don't track all types, track named types now
+			p.typIndex[t] = len(p.typIndex)
+		}
+
+		p.tag(namedTag)
+		p.pos(t.Obj())
+		p.qualifiedName(t.Obj())
+		p.typ(t.Underlying())
+		if !types.IsInterface(t) {
+			p.assocMethods(t)
+		}
+
+	case *types.Array:
+		p.tag(arrayTag)
+		p.int64(t.Len())
+		p.typ(t.Elem())
+
+	case *types.Slice:
+		p.tag(sliceTag)
+		p.typ(t.Elem())
+
+	case *dddSlice:
+		p.tag(dddTag)
+		p.typ(t.elem)
+
+	case *types.Struct:
+		p.tag(structTag)
+		p.fieldList(t)
+
+	case *types.Pointer:
+		p.tag(pointerTag)
+		p.typ(t.Elem())
+
+	case *types.Signature:
+		p.tag(signatureTag)
+		p.paramList(t.Params(), t.Variadic())
+		p.paramList(t.Results(), false)
+
+	case *types.Interface:
+		p.tag(interfaceTag)
+		p.iface(t)
+
+	case *types.Map:
+		p.tag(mapTag)
+		p.typ(t.Key())
+		p.typ(t.Elem())
+
+	case *types.Chan:
+		p.tag(chanTag)
+		p.int(int(3 - t.Dir())) // hack
+		p.typ(t.Elem())
+
+	default:
+		log.Fatalf("gcimporter: unexpected type %T: %s", t, t)
+	}
+}
+
+func (p *exporter) assocMethods(named *types.Named) {
+	// Sort methods (for determinism).
+	var methods []*types.Func
+	for i := 0; i < named.NumMethods(); i++ {
+		methods = append(methods, named.Method(i))
+	}
+	sort.Sort(methodsByName(methods))
+
+	p.int(len(methods))
+
+	if trace && methods != nil {
+		p.tracef("associated methods {>\n")
+	}
+
+	for i, m := range methods {
+		if trace && i > 0 {
+			p.tracef("\n")
+		}
+
+		p.pos(m)
+		name := m.Name()
+		p.string(name)
+		if !exported(name) {
+			p.pkg(m.Pkg(), false)
+		}
+
+		sig := m.Type().(*types.Signature)
+		p.paramList(types.NewTuple(sig.Recv()), false)
+		p.paramList(sig.Params(), sig.Variadic())
+		p.paramList(sig.Results(), false)
+		p.int(0) // dummy value for go:nointerface pragma - ignored by importer
+	}
+
+	if trace && methods != nil {
+		p.tracef("<\n} ")
+	}
+}
+
+type methodsByName []*types.Func
+
+func (x methodsByName) Len() int           { return len(x) }
+func (x methodsByName) Swap(i, j int)      { x[i], x[j] = x[j], x[i] }
+func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() }
+
+func (p *exporter) fieldList(t *types.Struct) {
+	if trace && t.NumFields() > 0 {
+		p.tracef("fields {>\n")
+		defer p.tracef("<\n} ")
+	}
+
+	p.int(t.NumFields())
+	for i := 0; i < t.NumFields(); i++ {
+		if trace && i > 0 {
+			p.tracef("\n")
+		}
+		p.field(t.Field(i))
+		p.string(t.Tag(i))
+	}
+}
+
+func (p *exporter) field(f *types.Var) {
+	if !f.IsField() {
+		log.Fatalf("gcimporter: field expected")
+	}
+
+	p.pos(f)
+	p.fieldName(f)
+	p.typ(f.Type())
+}
+
+func (p *exporter) iface(t *types.Interface) {
+	// TODO(gri): enable importer to load embedded interfaces,
+	// then emit Embeddeds and ExplicitMethods separately here.
+	p.int(0)
+
+	n := t.NumMethods()
+	if trace && n > 0 {
+		p.tracef("methods {>\n")
+		defer p.tracef("<\n} ")
+	}
+	p.int(n)
+	for i := 0; i < n; i++ {
+		if trace && i > 0 {
+			p.tracef("\n")
+		}
+		p.method(t.Method(i))
+	}
+}
+
+func (p *exporter) method(m *types.Func) {
+	sig := m.Type().(*types.Signature)
+	if sig.Recv() == nil {
+		log.Fatalf("gcimporter: method expected")
+	}
+
+	p.pos(m)
+	p.string(m.Name())
+	if m.Name() != "_" && !ast.IsExported(m.Name()) {
+		p.pkg(m.Pkg(), false)
+	}
+
+	// interface method; no need to encode receiver.
+	p.paramList(sig.Params(), sig.Variadic())
+	p.paramList(sig.Results(), false)
+}
+
+// fieldName is like qualifiedName but it doesn't record the package for exported names.
+func (p *exporter) fieldName(f *types.Var) {
+	name := f.Name()
+
+	// anonymous field with unexported base type name: use "?" as field name
+	// (bname != "" per spec, but we are conservative in case of errors)
+	if f.Anonymous() {
+		base := f.Type()
+		if ptr, ok := base.(*types.Pointer); ok {
+			base = ptr.Elem()
+		}
+		if named, ok := base.(*types.Named); ok && !named.Obj().Exported() {
+			// anonymous field with unexported base type name
+			name = "?" // unexported name to force export of package
+		}
+	}
+	p.string(name)
+	if !f.Exported() {
+		p.pkg(f.Pkg(), false)
+	}
+}
+
+func (p *exporter) paramList(params *types.Tuple, variadic bool) {
+	// use negative length to indicate unnamed parameters
+	// (look at the first parameter only since either all
+	// names are present or all are absent)
+	n := params.Len()
+	if n > 0 && params.At(0).Name() == "" {
+		n = -n
+	}
+	p.int(n)
+	for i := 0; i < params.Len(); i++ {
+		q := params.At(i)
+		t := q.Type()
+		if variadic && i == params.Len()-1 {
+			t = &dddSlice{t.(*types.Slice).Elem()}
+		}
+		p.typ(t)
+		if n > 0 {
+			name := q.Name()
+			p.string(name)
+			if name != "_" {
+				p.pkg(q.Pkg(), false)
+			}
+		}
+		p.string("") // no compiler-specific info
+	}
+}
+
+func (p *exporter) value(x constant.Value) {
+	if trace {
+		p.tracef("= ")
+	}
+
+	switch x.Kind() {
+	case constant.Bool:
+		tag := falseTag
+		if constant.BoolVal(x) {
+			tag = trueTag
+		}
+		p.tag(tag)
+
+	case constant.Int:
+		if v, exact := constant.Int64Val(x); exact {
+			// common case: x fits into an int64 - use compact encoding
+			p.tag(int64Tag)
+			p.int64(v)
+			return
+		}
+		// uncommon case: large x - use float encoding
+		// (powers of 2 will be encoded efficiently with exponent)
+		p.tag(floatTag)
+		p.float(constant.ToFloat(x))
+
+	case constant.Float:
+		p.tag(floatTag)
+		p.float(x)
+
+	case constant.Complex:
+		p.tag(complexTag)
+		p.float(constant.Real(x))
+		p.float(constant.Imag(x))
+
+	case constant.String:
+		p.tag(stringTag)
+		p.string(constant.StringVal(x))
+
+	case constant.Unknown:
+		// package contains type errors
+		p.tag(unknownTag)
+
+	default:
+		log.Fatalf("gcimporter: unexpected value %v (%T)", x, x)
+	}
+}
+
+func (p *exporter) float(x constant.Value) {
+	if x.Kind() != constant.Float {
+		log.Fatalf("gcimporter: unexpected constant %v, want float", x)
+	}
+	// extract sign (there is no -0)
+	sign := constant.Sign(x)
+	if sign == 0 {
+		// x == 0
+		p.int(0)
+		return
+	}
+	// x != 0
+
+	var f big.Float
+	if v, exact := constant.Float64Val(x); exact {
+		// float64
+		f.SetFloat64(v)
+	} else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
+		// TODO(gri): add big.Rat accessor to constant.Value.
+		r := valueToRat(num)
+		f.SetRat(r.Quo(r, valueToRat(denom)))
+	} else {
+		// Value too large to represent as a fraction => inaccessible.
+		// TODO(gri): add big.Float accessor to constant.Value.
+		f.SetFloat64(math.MaxFloat64) // FIXME
+	}
+
+	// extract exponent such that 0.5 <= m < 1.0
+	var m big.Float
+	exp := f.MantExp(&m)
+
+	// extract mantissa as *big.Int
+	// - set exponent large enough so mant satisfies mant.IsInt()
+	// - get *big.Int from mant
+	m.SetMantExp(&m, int(m.MinPrec()))
+	mant, acc := m.Int(nil)
+	if acc != big.Exact {
+		log.Fatalf("gcimporter: internal error")
+	}
+
+	p.int(sign)
+	p.int(exp)
+	p.string(string(mant.Bytes()))
+}
+
+func valueToRat(x constant.Value) *big.Rat {
+	// Convert little-endian to big-endian.
+	// I can't believe this is necessary.
+	bytes := constant.Bytes(x)
+	for i := 0; i < len(bytes)/2; i++ {
+		bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i]
+	}
+	return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes))
+}
+
+func (p *exporter) bool(b bool) bool {
+	if trace {
+		p.tracef("[")
+		defer p.tracef("= %v] ", b)
+	}
+
+	x := 0
+	if b {
+		x = 1
+	}
+	p.int(x)
+	return b
+}
+
+// ----------------------------------------------------------------------------
+// Low-level encoders
+
+func (p *exporter) index(marker byte, index int) {
+	if index < 0 {
+		log.Fatalf("gcimporter: invalid index < 0")
+	}
+	if debugFormat {
+		p.marker('t')
+	}
+	if trace {
+		p.tracef("%c%d ", marker, index)
+	}
+	p.rawInt64(int64(index))
+}
+
+func (p *exporter) tag(tag int) {
+	if tag >= 0 {
+		log.Fatalf("gcimporter: invalid tag >= 0")
+	}
+	if debugFormat {
+		p.marker('t')
+	}
+	if trace {
+		p.tracef("%s ", tagString[-tag])
+	}
+	p.rawInt64(int64(tag))
+}
+
+func (p *exporter) int(x int) {
+	p.int64(int64(x))
+}
+
+func (p *exporter) int64(x int64) {
+	if debugFormat {
+		p.marker('i')
+	}
+	if trace {
+		p.tracef("%d ", x)
+	}
+	p.rawInt64(x)
+}
+
+func (p *exporter) string(s string) {
+	if debugFormat {
+		p.marker('s')
+	}
+	if trace {
+		p.tracef("%q ", s)
+	}
+	// if we saw the string before, write its index (>= 0)
+	// (the empty string is mapped to 0)
+	if i, ok := p.strIndex[s]; ok {
+		p.rawInt64(int64(i))
+		return
+	}
+	// otherwise, remember string and write its negative length and bytes
+	p.strIndex[s] = len(p.strIndex)
+	p.rawInt64(-int64(len(s)))
+	for i := 0; i < len(s); i++ {
+		p.rawByte(s[i])
+	}
+}
+
+// marker emits a marker byte and position information which makes
+// it easy for a reader to detect if it is "out of sync". Used for
+// debugFormat format only.
+func (p *exporter) marker(m byte) {
+	p.rawByte(m)
+	// Enable this for help tracking down the location
+	// of an incorrect marker when running in debugFormat.
+	if false && trace {
+		p.tracef("#%d ", p.written)
+	}
+	p.rawInt64(int64(p.written))
+}
+
+// rawInt64 should only be used by low-level encoders.
+func (p *exporter) rawInt64(x int64) {
+	var tmp [binary.MaxVarintLen64]byte
+	n := binary.PutVarint(tmp[:], x)
+	for i := 0; i < n; i++ {
+		p.rawByte(tmp[i])
+	}
+}
+
+// rawStringln should only be used to emit the initial version string.
+func (p *exporter) rawStringln(s string) {
+	for i := 0; i < len(s); i++ {
+		p.rawByte(s[i])
+	}
+	p.rawByte('\n')
+}
+
+// rawByte is the bottleneck interface to write to p.out.
+// rawByte escapes b as follows (any encoding does that
+// hides '$'):
+//
+//	'$'  => '|' 'S'
+//	'|'  => '|' '|'
+//
+// Necessary so other tools can find the end of the
+// export data by searching for "$$".
+// rawByte should only be used by low-level encoders.
+func (p *exporter) rawByte(b byte) {
+	switch b {
+	case '$':
+		// write '$' as '|' 'S'
+		b = 'S'
+		fallthrough
+	case '|':
+		// write '|' as '|' '|'
+		p.out.WriteByte('|')
+		p.written++
+	}
+	p.out.WriteByte(b)
+	p.written++
+}
+
+// tracef is like fmt.Printf but it rewrites the format string
+// to take care of indentation.
+func (p *exporter) tracef(format string, args ...interface{}) {
+	if strings.IndexAny(format, "<>\n") >= 0 {
+		var buf bytes.Buffer
+		for i := 0; i < len(format); i++ {
+			// no need to deal with runes
+			ch := format[i]
+			switch ch {
+			case '>':
+				p.indent++
+				continue
+			case '<':
+				p.indent--
+				continue
+			}
+			buf.WriteByte(ch)
+			if ch == '\n' {
+				for j := p.indent; j > 0; j-- {
+					buf.WriteString(".  ")
+				}
+			}
+		}
+		format = buf.String()
+	}
+	fmt.Printf(format, args...)
+}
+
+// Debugging support.
+// (tagString is only used when tracing is enabled)
+var tagString = [...]string{
+	// Packages:
+	-packageTag: "package",
+
+	// Types:
+	-namedTag:     "named type",
+	-arrayTag:     "array",
+	-sliceTag:     "slice",
+	-dddTag:       "ddd",
+	-structTag:    "struct",
+	-pointerTag:   "pointer",
+	-signatureTag: "signature",
+	-interfaceTag: "interface",
+	-mapTag:       "map",
+	-chanTag:      "chan",
+
+	// Values:
+	-falseTag:    "false",
+	-trueTag:     "true",
+	-int64Tag:    "int64",
+	-floatTag:    "float",
+	-fractionTag: "fraction",
+	-complexTag:  "complex",
+	-stringTag:   "string",
+	-unknownTag:  "unknown",
+}
diff --git a/vendor/golang.org/x/tools/go/gcimporter15/bimport.go b/vendor/golang.org/x/tools/go/gcimporter15/bimport.go
new file mode 100644
index 0000000..ce4b687
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/gcimporter15/bimport.go
@@ -0,0 +1,965 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go.
+
+package gcimporter
+
+import (
+	"encoding/binary"
+	"fmt"
+	"go/constant"
+	"go/token"
+	"go/types"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"unicode"
+	"unicode/utf8"
+)
+
+type importer struct {
+	imports map[string]*types.Package
+	data    []byte
+	path    string
+	buf     []byte // for reading strings
+	version int    // export format version
+
+	// object lists
+	strList       []string         // in order of appearance
+	pkgList       []*types.Package // in order of appearance
+	typList       []types.Type     // in order of appearance
+	trackAllTypes bool
+
+	// position encoding
+	posInfoFormat bool
+	prevFile      string
+	prevLine      int
+	fset          *token.FileSet
+	files         map[string]*token.File
+
+	// debugging support
+	debugFormat bool
+	read        int // bytes read
+}
+
+// BImportData imports a package from the serialized package data
+// and returns the number of bytes consumed and a reference to the package.
+// If the export data version is not recognized or the format is otherwise
+// compromised, an error is returned.
+func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, _ *types.Package, err error) {
+	// catch panics and return them as errors
+	defer func() {
+		if e := recover(); e != nil {
+			// The package (filename) causing the problem is added to this
+			// error by a wrapper in the caller (Import in gcimporter.go).
+			err = fmt.Errorf("cannot import, possibly version skew (%v) - reinstall package", e)
+		}
+	}()
+
+	p := importer{
+		imports: imports,
+		data:    data,
+		path:    path,
+		version: -1,           // unknown version
+		strList: []string{""}, // empty string is mapped to 0
+		fset:    fset,
+		files:   make(map[string]*token.File),
+	}
+
+	// read version info
+	var versionstr string
+	if b := p.rawByte(); b == 'c' || b == 'd' {
+		// Go1.7 encoding; first byte encodes low-level
+		// encoding format (compact vs debug).
+		// For backward-compatibility only (avoid problems with
+		// old installed packages). Newly compiled packages use
+		// the extensible format string.
+		// TODO(gri) Remove this support eventually; after Go1.8.
+		if b == 'd' {
+			p.debugFormat = true
+		}
+		p.trackAllTypes = p.rawByte() == 'a'
+		p.posInfoFormat = p.int() != 0
+		versionstr = p.string()
+		if versionstr == "v1" {
+			p.version = 0
+		}
+	} else {
+		// Go1.8 extensible encoding
+		// read version string and extract version number (ignore anything after the version number)
+		versionstr = p.rawStringln(b)
+		if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" {
+			if v, err := strconv.Atoi(s[1]); err == nil && v > 0 {
+				p.version = v
+			}
+		}
+	}
+
+	// read version specific flags - extend as necessary
+	switch p.version {
+	// case 4:
+	// 	...
+	//	fallthrough
+	case 3, 2, 1:
+		p.debugFormat = p.rawStringln(p.rawByte()) == "debug"
+		p.trackAllTypes = p.int() != 0
+		p.posInfoFormat = p.int() != 0
+	case 0:
+		// Go1.7 encoding format - nothing to do here
+	default:
+		errorf("unknown export format version %d (%q)", p.version, versionstr)
+	}
+
+	// --- generic export data ---
+
+	// populate typList with predeclared "known" types
+	p.typList = append(p.typList, predeclared...)
+
+	// read package data
+	pkg := p.pkg()
+
+	// read objects of phase 1 only (see cmd/compiler/internal/gc/bexport.go)
+	objcount := 0
+	for {
+		tag := p.tagOrIndex()
+		if tag == endTag {
+			break
+		}
+		p.obj(tag)
+		objcount++
+	}
+
+	// self-verification
+	if count := p.int(); count != objcount {
+		errorf("got %d objects; want %d", objcount, count)
+	}
+
+	// ignore compiler-specific import data
+
+	// complete interfaces
+	for _, typ := range p.typList {
+		// If we only record named types (!p.trackAllTypes),
+		// we must check the underlying types here. If we
+		// track all types, the Underlying() method call is
+		// not needed.
+		// TODO(gri) Remove if p.trackAllTypes is gone.
+		if it, ok := typ.Underlying().(*types.Interface); ok {
+			it.Complete()
+		}
+	}
+
+	// record all referenced packages as imports
+	list := append(([]*types.Package)(nil), p.pkgList[1:]...)
+	sort.Sort(byPath(list))
+	pkg.SetImports(list)
+
+	// package was imported completely and without errors
+	pkg.MarkComplete()
+
+	return p.read, pkg, nil
+}
+
+func errorf(format string, args ...interface{}) {
+	panic(fmt.Sprintf(format, args...))
+}
+
+func (p *importer) pkg() *types.Package {
+	// if the package was seen before, i is its index (>= 0)
+	i := p.tagOrIndex()
+	if i >= 0 {
+		return p.pkgList[i]
+	}
+
+	// otherwise, i is the package tag (< 0)
+	if i != packageTag {
+		errorf("unexpected package tag %d", i)
+	}
+
+	// read package data
+	name := p.string()
+	path := p.string()
+
+	// we should never see an empty package name
+	if name == "" {
+		errorf("empty package name in import")
+	}
+
+	// an empty path denotes the package we are currently importing;
+	// it must be the first package we see
+	if (path == "") != (len(p.pkgList) == 0) {
+		errorf("package path %q for pkg index %d", path, len(p.pkgList))
+	}
+
+	// if the package was imported before, use that one; otherwise create a new one
+	if path == "" {
+		path = p.path
+	}
+	pkg := p.imports[path]
+	if pkg == nil {
+		pkg = types.NewPackage(path, name)
+		p.imports[path] = pkg
+	} else if pkg.Name() != name {
+		errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path)
+	}
+	p.pkgList = append(p.pkgList, pkg)
+
+	return pkg
+}
+
+// objTag returns the tag value for each object kind.
+// obj must not be a *types.Alias.
+func objTag(obj types.Object) int {
+	switch obj.(type) {
+	case *types.Const:
+		return constTag
+	case *types.TypeName:
+		return typeTag
+	case *types.Var:
+		return varTag
+	case *types.Func:
+		return funcTag
+	// Aliases are not exported multiple times, thus we should not see them here.
+	default:
+		errorf("unexpected object: %v (%T)", obj, obj) // panics
+		panic("unreachable")
+	}
+}
+
+func sameObj(a, b types.Object) bool {
+	// Because unnamed types are not canonicalized, we cannot simply compare types for
+	// (pointer) identity.
+	// Ideally we'd check equality of constant values as well, but this is good enough.
+	return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type())
+}
+
+func (p *importer) declare(obj types.Object) {
+	pkg := obj.Pkg()
+	if alt := pkg.Scope().Insert(obj); alt != nil {
+		// This can only trigger if we import a (non-type) object a second time.
+		// Excluding aliases, this cannot happen because 1) we only import a package
+		// once; and b) we ignore compiler-specific export data which may contain
+		// functions whose inlined function bodies refer to other functions that
+		// were already imported.
+		// However, aliases require reexporting the original object, so we need
+		// to allow it (see also the comment in cmd/compile/internal/gc/bimport.go,
+		// method importer.obj, switch case importing functions).
+		// Note that the original itself cannot be an alias.
+		if !sameObj(obj, alt) {
+			errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt)
+		}
+	}
+}
+
+func (p *importer) obj(tag int) {
+	switch tag {
+	case constTag:
+		pos := p.pos()
+		pkg, name := p.qualifiedName()
+		typ := p.typ(nil)
+		val := p.value()
+		p.declare(types.NewConst(pos, pkg, name, typ, val))
+
+	case typeTag:
+		p.typ(nil)
+
+	case varTag:
+		pos := p.pos()
+		pkg, name := p.qualifiedName()
+		typ := p.typ(nil)
+		p.declare(types.NewVar(pos, pkg, name, typ))
+
+	case funcTag:
+		pos := p.pos()
+		pkg, name := p.qualifiedName()
+		params, isddd := p.paramList()
+		result, _ := p.paramList()
+		sig := types.NewSignature(nil, params, result, isddd)
+		p.declare(types.NewFunc(pos, pkg, name, sig))
+
+	case aliasTag:
+		pos := p.pos()
+		name := p.string()
+		var orig types.Object
+		if pkg, name := p.qualifiedName(); pkg != nil {
+			orig = pkg.Scope().Lookup(name)
+		}
+		// Alias-related code. Keep for now.
+		_ = pos
+		_ = name
+		_ = orig
+		// p.declare(types.NewAlias(pos, p.pkgList[0], name, orig))
+
+	default:
+		errorf("unexpected object tag %d", tag)
+	}
+}
+
+func (p *importer) pos() token.Pos {
+	if !p.posInfoFormat {
+		return token.NoPos
+	}
+
+	file := p.prevFile
+	line := p.prevLine
+	if delta := p.int(); delta != 0 {
+		// line changed
+		line += delta
+	} else if n := p.int(); n >= 0 {
+		// file changed
+		file = p.prevFile[:n] + p.string()
+		p.prevFile = file
+		line = p.int()
+	}
+	p.prevLine = line
+
+	// Synthesize a token.Pos
+
+	// Since we don't know the set of needed file positions, we
+	// reserve maxlines positions per file.
+	const maxlines = 64 * 1024
+	f := p.files[file]
+	if f == nil {
+		f = p.fset.AddFile(file, -1, maxlines)
+		p.files[file] = f
+		// Allocate the fake linebreak indices on first use.
+		// TODO(adonovan): opt: save ~512KB using a more complex scheme?
+		fakeLinesOnce.Do(func() {
+			fakeLines = make([]int, maxlines)
+			for i := range fakeLines {
+				fakeLines[i] = i
+			}
+		})
+		f.SetLines(fakeLines)
+	}
+
+	if line > maxlines {
+		line = 1
+	}
+
+	// Treat the file as if it contained only newlines
+	// and column=1: use the line number as the offset.
+	return f.Pos(line - 1)
+}
+
+var (
+	fakeLines     []int
+	fakeLinesOnce sync.Once
+)
+
+func (p *importer) qualifiedName() (pkg *types.Package, name string) {
+	name = p.string()
+	if name != "" {
+		pkg = p.pkg()
+	}
+	return
+}
+
+func (p *importer) record(t types.Type) {
+	p.typList = append(p.typList, t)
+}
+
+// A dddSlice is a types.Type representing ...T parameters.
+// It only appears for parameter types and does not escape
+// the importer.
+type dddSlice struct {
+	elem types.Type
+}
+
+func (t *dddSlice) Underlying() types.Type { return t }
+func (t *dddSlice) String() string         { return "..." + t.elem.String() }
+
+// parent is the package which declared the type; parent == nil means
+// the package currently imported. The parent package is needed for
+// exported struct fields and interface methods which don't contain
+// explicit package information in the export data.
+func (p *importer) typ(parent *types.Package) types.Type {
+	// if the type was seen before, i is its index (>= 0)
+	i := p.tagOrIndex()
+	if i >= 0 {
+		return p.typList[i]
+	}
+
+	// otherwise, i is the type tag (< 0)
+	switch i {
+	case namedTag:
+		// read type object
+		pos := p.pos()
+		parent, name := p.qualifiedName()
+		scope := parent.Scope()
+		obj := scope.Lookup(name)
+
+		// if the object doesn't exist yet, create and insert it
+		if obj == nil {
+			obj = types.NewTypeName(pos, parent, name, nil)
+			scope.Insert(obj)
+		}
+
+		if _, ok := obj.(*types.TypeName); !ok {
+			errorf("pkg = %s, name = %s => %s", parent, name, obj)
+		}
+
+		// associate new named type with obj if it doesn't exist yet
+		t0 := types.NewNamed(obj.(*types.TypeName), nil, nil)
+
+		// but record the existing type, if any
+		t := obj.Type().(*types.Named)
+		p.record(t)
+
+		// read underlying type
+		t0.SetUnderlying(p.typ(parent))
+
+		// interfaces don't have associated methods
+		if types.IsInterface(t0) {
+			return t
+		}
+
+		// read associated methods
+		for i := p.int(); i > 0; i-- {
+			// TODO(gri) replace this with something closer to fieldName
+			pos := p.pos()
+			name := p.string()
+			if !exported(name) {
+				p.pkg()
+			}
+
+			recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver?
+			params, isddd := p.paramList()
+			result, _ := p.paramList()
+			p.int() // go:nointerface pragma - discarded
+
+			sig := types.NewSignature(recv.At(0), params, result, isddd)
+			t0.AddMethod(types.NewFunc(pos, parent, name, sig))
+		}
+
+		return t
+
+	case arrayTag:
+		t := new(types.Array)
+		if p.trackAllTypes {
+			p.record(t)
+		}
+
+		n := p.int64()
+		*t = *types.NewArray(p.typ(parent), n)
+		return t
+
+	case sliceTag:
+		t := new(types.Slice)
+		if p.trackAllTypes {
+			p.record(t)
+		}
+
+		*t = *types.NewSlice(p.typ(parent))
+		return t
+
+	case dddTag:
+		t := new(dddSlice)
+		if p.trackAllTypes {
+			p.record(t)
+		}
+
+		t.elem = p.typ(parent)
+		return t
+
+	case structTag:
+		t := new(types.Struct)
+		if p.trackAllTypes {
+			p.record(t)
+		}
+
+		*t = *types.NewStruct(p.fieldList(parent))
+		return t
+
+	case pointerTag:
+		t := new(types.Pointer)
+		if p.trackAllTypes {
+			p.record(t)
+		}
+
+		*t = *types.NewPointer(p.typ(parent))
+		return t
+
+	case signatureTag:
+		t := new(types.Signature)
+		if p.trackAllTypes {
+			p.record(t)
+		}
+
+		params, isddd := p.paramList()
+		result, _ := p.paramList()
+		*t = *types.NewSignature(nil, params, result, isddd)
+		return t
+
+	case interfaceTag:
+		// Create a dummy entry in the type list. This is safe because we
+		// cannot expect the interface type to appear in a cycle, as any
+		// such cycle must contain a named type which would have been
+		// first defined earlier.
+		n := len(p.typList)
+		if p.trackAllTypes {
+			p.record(nil)
+		}
+
+		// no embedded interfaces with gc compiler
+		if p.int() != 0 {
+			errorf("unexpected embedded interface")
+		}
+
+		t := types.NewInterface(p.methodList(parent), nil)
+		if p.trackAllTypes {
+			p.typList[n] = t
+		}
+		return t
+
+	case mapTag:
+		t := new(types.Map)
+		if p.trackAllTypes {
+			p.record(t)
+		}
+
+		key := p.typ(parent)
+		val := p.typ(parent)
+		*t = *types.NewMap(key, val)
+		return t
+
+	case chanTag:
+		t := new(types.Chan)
+		if p.trackAllTypes {
+			p.record(t)
+		}
+
+		var dir types.ChanDir
+		// tag values must match the constants in cmd/compile/internal/gc/go.go
+		switch d := p.int(); d {
+		case 1 /* Crecv */ :
+			dir = types.RecvOnly
+		case 2 /* Csend */ :
+			dir = types.SendOnly
+		case 3 /* Cboth */ :
+			dir = types.SendRecv
+		default:
+			errorf("unexpected channel dir %d", d)
+		}
+		val := p.typ(parent)
+		*t = *types.NewChan(dir, val)
+		return t
+
+	default:
+		errorf("unexpected type tag %d", i) // panics
+		panic("unreachable")
+	}
+}
+
+func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) {
+	if n := p.int(); n > 0 {
+		fields = make([]*types.Var, n)
+		tags = make([]string, n)
+		for i := range fields {
+			fields[i] = p.field(parent)
+			tags[i] = p.string()
+		}
+	}
+	return
+}
+
+func (p *importer) field(parent *types.Package) *types.Var {
+	pos := p.pos()
+	pkg, name := p.fieldName(parent)
+	typ := p.typ(parent)
+
+	anonymous := false
+	if name == "" {
+		// anonymous field - typ must be T or *T and T must be a type name
+		switch typ := deref(typ).(type) {
+		case *types.Basic: // basic types are named types
+			pkg = nil // // objects defined in Universe scope have no package
+			name = typ.Name()
+		case *types.Named:
+			name = typ.Obj().Name()
+		default:
+			errorf("anonymous field expected")
+		}
+		anonymous = true
+	}
+
+	return types.NewField(pos, pkg, name, typ, anonymous)
+}
+
+func (p *importer) methodList(parent *types.Package) (methods []*types.Func) {
+	if n := p.int(); n > 0 {
+		methods = make([]*types.Func, n)
+		for i := range methods {
+			methods[i] = p.method(parent)
+		}
+	}
+	return
+}
+
+func (p *importer) method(parent *types.Package) *types.Func {
+	pos := p.pos()
+	pkg, name := p.fieldName(parent)
+	params, isddd := p.paramList()
+	result, _ := p.paramList()
+	sig := types.NewSignature(nil, params, result, isddd)
+	return types.NewFunc(pos, pkg, name, sig)
+}
+
+func (p *importer) fieldName(parent *types.Package) (*types.Package, string) {
+	name := p.string()
+	pkg := parent
+	if pkg == nil {
+		// use the imported package instead
+		pkg = p.pkgList[0]
+	}
+	if p.version == 0 && name == "_" {
+		// version 0 didn't export a package for _ fields
+		// see issue #15514
+
+		// For bug-compatibility with gc, pretend all imported
+		// blank fields belong to the same dummy package.
+		// This avoids spurious "cannot assign A to B" errors
+		// from go/types caused by types changing as they are
+		// re-exported.
+		const blankpkg = "<_>"
+		pkg := p.imports[blankpkg]
+		if pkg == nil {
+			pkg = types.NewPackage(blankpkg, blankpkg)
+			p.imports[blankpkg] = pkg
+		}
+
+		return pkg, name
+	}
+	if name != "" && !exported(name) {
+		if name == "?" {
+			name = ""
+		}
+		pkg = p.pkg()
+	}
+	return pkg, name
+}
+
+func (p *importer) paramList() (*types.Tuple, bool) {
+	n := p.int()
+	if n == 0 {
+		return nil, false
+	}
+	// negative length indicates unnamed parameters
+	named := true
+	if n < 0 {
+		n = -n
+		named = false
+	}
+	// n > 0
+	params := make([]*types.Var, n)
+	isddd := false
+	for i := range params {
+		params[i], isddd = p.param(named)
+	}
+	return types.NewTuple(params...), isddd
+}
+
+func (p *importer) param(named bool) (*types.Var, bool) {
+	t := p.typ(nil)
+	td, isddd := t.(*dddSlice)
+	if isddd {
+		t = types.NewSlice(td.elem)
+	}
+
+	var pkg *types.Package
+	var name string
+	if named {
+		name = p.string()
+		if name == "" {
+			errorf("expected named parameter")
+		}
+		if name != "_" {
+			pkg = p.pkg()
+		}
+		if i := strings.Index(name, "·"); i > 0 {
+			name = name[:i] // cut off gc-specific parameter numbering
+		}
+	}
+
+	// read and discard compiler-specific info
+	p.string()
+
+	return types.NewVar(token.NoPos, pkg, name, t), isddd
+}
+
+func exported(name string) bool {
+	ch, _ := utf8.DecodeRuneInString(name)
+	return unicode.IsUpper(ch)
+}
+
+func (p *importer) value() constant.Value {
+	switch tag := p.tagOrIndex(); tag {
+	case falseTag:
+		return constant.MakeBool(false)
+	case trueTag:
+		return constant.MakeBool(true)
+	case int64Tag:
+		return constant.MakeInt64(p.int64())
+	case floatTag:
+		return p.float()
+	case complexTag:
+		re := p.float()
+		im := p.float()
+		return constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
+	case stringTag:
+		return constant.MakeString(p.string())
+	case unknownTag:
+		return constant.MakeUnknown()
+	default:
+		errorf("unexpected value tag %d", tag) // panics
+		panic("unreachable")
+	}
+}
+
+func (p *importer) float() constant.Value {
+	sign := p.int()
+	if sign == 0 {
+		return constant.MakeInt64(0)
+	}
+
+	exp := p.int()
+	mant := []byte(p.string()) // big endian
+
+	// remove leading 0's if any
+	for len(mant) > 0 && mant[0] == 0 {
+		mant = mant[1:]
+	}
+
+	// convert to little endian
+	// TODO(gri) go/constant should have a more direct conversion function
+	//           (e.g., once it supports a big.Float based implementation)
+	for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 {
+		mant[i], mant[j] = mant[j], mant[i]
+	}
+
+	// adjust exponent (constant.MakeFromBytes creates an integer value,
+	// but mant represents the mantissa bits such that 0.5 <= mant < 1.0)
+	exp -= len(mant) << 3
+	if len(mant) > 0 {
+		for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 {
+			exp++
+		}
+	}
+
+	x := constant.MakeFromBytes(mant)
+	switch {
+	case exp < 0:
+		d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp))
+		x = constant.BinaryOp(x, token.QUO, d)
+	case exp > 0:
+		x = constant.Shift(x, token.SHL, uint(exp))
+	}
+
+	if sign < 0 {
+		x = constant.UnaryOp(token.SUB, x, 0)
+	}
+	return x
+}
+
+// ----------------------------------------------------------------------------
+// Low-level decoders
+
+func (p *importer) tagOrIndex() int {
+	if p.debugFormat {
+		p.marker('t')
+	}
+
+	return int(p.rawInt64())
+}
+
+func (p *importer) int() int {
+	x := p.int64()
+	if int64(int(x)) != x {
+		errorf("exported integer too large")
+	}
+	return int(x)
+}
+
+func (p *importer) int64() int64 {
+	if p.debugFormat {
+		p.marker('i')
+	}
+
+	return p.rawInt64()
+}
+
+func (p *importer) string() string {
+	if p.debugFormat {
+		p.marker('s')
+	}
+	// if the string was seen before, i is its index (>= 0)
+	// (the empty string is at index 0)
+	i := p.rawInt64()
+	if i >= 0 {
+		return p.strList[i]
+	}
+	// otherwise, i is the negative string length (< 0)
+	if n := int(-i); n <= cap(p.buf) {
+		p.buf = p.buf[:n]
+	} else {
+		p.buf = make([]byte, n)
+	}
+	for i := range p.buf {
+		p.buf[i] = p.rawByte()
+	}
+	s := string(p.buf)
+	p.strList = append(p.strList, s)
+	return s
+}
+
+func (p *importer) marker(want byte) {
+	if got := p.rawByte(); got != want {
+		errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read)
+	}
+
+	pos := p.read
+	if n := int(p.rawInt64()); n != pos {
+		errorf("incorrect position: got %d; want %d", n, pos)
+	}
+}
+
+// rawInt64 should only be used by low-level decoders.
+func (p *importer) rawInt64() int64 {
+	i, err := binary.ReadVarint(p)
+	if err != nil {
+		errorf("read error: %v", err)
+	}
+	return i
+}
+
+// rawStringln should only be used to read the initial version string.
+func (p *importer) rawStringln(b byte) string {
+	p.buf = p.buf[:0]
+	for b != '\n' {
+		p.buf = append(p.buf, b)
+		b = p.rawByte()
+	}
+	return string(p.buf)
+}
+
+// needed for binary.ReadVarint in rawInt64
+func (p *importer) ReadByte() (byte, error) {
+	return p.rawByte(), nil
+}
+
+// byte is the bottleneck interface for reading p.data.
+// It unescapes '|' 'S' to '$' and '|' '|' to '|'.
+// rawByte should only be used by low-level decoders.
+func (p *importer) rawByte() byte {
+	b := p.data[0]
+	r := 1
+	if b == '|' {
+		b = p.data[1]
+		r = 2
+		switch b {
+		case 'S':
+			b = '$'
+		case '|':
+			// nothing to do
+		default:
+			errorf("unexpected escape sequence in export data")
+		}
+	}
+	p.data = p.data[r:]
+	p.read += r
+	return b
+
+}
+
+// ----------------------------------------------------------------------------
+// Export format
+
+// Tags. Must be < 0.
+const (
+	// Objects
+	packageTag = -(iota + 1)
+	constTag
+	typeTag
+	varTag
+	funcTag
+	endTag
+
+	// Types
+	namedTag
+	arrayTag
+	sliceTag
+	dddTag
+	structTag
+	pointerTag
+	signatureTag
+	interfaceTag
+	mapTag
+	chanTag
+
+	// Values
+	falseTag
+	trueTag
+	int64Tag
+	floatTag
+	fractionTag // not used by gc
+	complexTag
+	stringTag
+	nilTag     // only used by gc (appears in exported inlined function bodies)
+	unknownTag // not used by gc (only appears in packages with errors)
+
+	// Aliases
+	aliasTag
+)
+
+var predeclared = []types.Type{
+	// basic types
+	types.Typ[types.Bool],
+	types.Typ[types.Int],
+	types.Typ[types.Int8],
+	types.Typ[types.Int16],
+	types.Typ[types.Int32],
+	types.Typ[types.Int64],
+	types.Typ[types.Uint],
+	types.Typ[types.Uint8],
+	types.Typ[types.Uint16],
+	types.Typ[types.Uint32],
+	types.Typ[types.Uint64],
+	types.Typ[types.Uintptr],
+	types.Typ[types.Float32],
+	types.Typ[types.Float64],
+	types.Typ[types.Complex64],
+	types.Typ[types.Complex128],
+	types.Typ[types.String],
+
+	// aliases
+	types.Universe.Lookup("byte").Type(),
+	types.Universe.Lookup("rune").Type(),
+
+	// error
+	types.Universe.Lookup("error").Type(),
+
+	// untyped types
+	types.Typ[types.UntypedBool],
+	types.Typ[types.UntypedInt],
+	types.Typ[types.UntypedRune],
+	types.Typ[types.UntypedFloat],
+	types.Typ[types.UntypedComplex],
+	types.Typ[types.UntypedString],
+	types.Typ[types.UntypedNil],
+
+	// package unsafe
+	types.Typ[types.UnsafePointer],
+
+	// invalid type
+	types.Typ[types.Invalid], // only appears in packages with errors
+
+	// used internally by gc; never used by this package or in .a files
+	anyType{},
+}
+
+type anyType struct{}
+
+func (t anyType) Underlying() types.Type { return t }
+func (t anyType) String() string         { return "any" }
diff --git a/vendor/golang.org/x/tools/go/gcimporter15/exportdata.go b/vendor/golang.org/x/tools/go/gcimporter15/exportdata.go
new file mode 100644
index 0000000..f33dc56
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/gcimporter15/exportdata.go
@@ -0,0 +1,93 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go.
+
+// This file implements FindExportData.
+
+package gcimporter
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+)
+
+func readGopackHeader(r *bufio.Reader) (name string, size int, err error) {
+	// See $GOROOT/include/ar.h.
+	hdr := make([]byte, 16+12+6+6+8+10+2)
+	_, err = io.ReadFull(r, hdr)
+	if err != nil {
+		return
+	}
+	// leave for debugging
+	if false {
+		fmt.Printf("header: %s", hdr)
+	}
+	s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10]))
+	size, err = strconv.Atoi(s)
+	if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' {
+		err = fmt.Errorf("invalid archive header")
+		return
+	}
+	name = strings.TrimSpace(string(hdr[:16]))
+	return
+}
+
+// FindExportData positions the reader r at the beginning of the
+// export data section of an underlying GC-created object/archive
+// file by reading from it. The reader must be positioned at the
+// start of the file before calling this function. The hdr result
+// is the string before the export data, either "$$" or "$$B".
+//
+func FindExportData(r *bufio.Reader) (hdr string, err error) {
+	// Read first line to make sure this is an object file.
+	line, err := r.ReadSlice('\n')
+	if err != nil {
+		err = fmt.Errorf("can't find export data (%v)", err)
+		return
+	}
+
+	if string(line) == "!<arch>\n" {
+		// Archive file. Scan to __.PKGDEF.
+		var name string
+		if name, _, err = readGopackHeader(r); err != nil {
+			return
+		}
+
+		// First entry should be __.PKGDEF.
+		if name != "__.PKGDEF" {
+			err = fmt.Errorf("go archive is missing __.PKGDEF")
+			return
+		}
+
+		// Read first line of __.PKGDEF data, so that line
+		// is once again the first line of the input.
+		if line, err = r.ReadSlice('\n'); err != nil {
+			err = fmt.Errorf("can't find export data (%v)", err)
+			return
+		}
+	}
+
+	// Now at __.PKGDEF in archive or still at beginning of file.
+	// Either way, line should begin with "go object ".
+	if !strings.HasPrefix(string(line), "go object ") {
+		err = fmt.Errorf("not a Go object file")
+		return
+	}
+
+	// Skip over object header to export data.
+	// Begins after first line starting with $$.
+	for line[0] != '$' {
+		if line, err = r.ReadSlice('\n'); err != nil {
+			err = fmt.Errorf("can't find export data (%v)", err)
+			return
+		}
+	}
+	hdr = string(line)
+
+	return
+}
diff --git a/vendor/golang.org/x/tools/go/gcimporter15/gcimporter.go b/vendor/golang.org/x/tools/go/gcimporter15/gcimporter.go
new file mode 100644
index 0000000..370203b
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/gcimporter15/gcimporter.go
@@ -0,0 +1,1041 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go,
+// but it also contains the original source-based importer code for Go1.6.
+// Once we stop supporting 1.6, we can remove that code.
+
+// Package gcimporter15 provides various functions for reading
+// gc-generated object files that can be used to implement the
+// Importer interface defined by the Go 1.5 standard library package.
+//
+// Deprecated: this package will be deleted in October 2017.
+// New code should use golang.org/x/tools/go/gcexportdata.
+//
+package gcimporter
+
+import (
+	"bufio"
+	"errors"
+	"fmt"
+	"go/build"
+	exact "go/constant"
+	"go/token"
+	"go/types"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"sort"
+	"strconv"
+	"strings"
+	"text/scanner"
+)
+
+// debugging/development support
+const debug = false
+
+var pkgExts = [...]string{".a", ".o"}
+
+// FindPkg returns the filename and unique package id for an import
+// path based on package information provided by build.Import (using
+// the build.Default build.Context). A relative srcDir is interpreted
+// relative to the current working directory.
+// If no file was found, an empty filename is returned.
+//
+func FindPkg(path, srcDir string) (filename, id string) {
+	if path == "" {
+		return
+	}
+
+	var noext string
+	switch {
+	default:
+		// "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
+		// Don't require the source files to be present.
+		if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
+			srcDir = abs
+		}
+		bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
+		if bp.PkgObj == "" {
+			return
+		}
+		noext = strings.TrimSuffix(bp.PkgObj, ".a")
+		id = bp.ImportPath
+
+	case build.IsLocalImport(path):
+		// "./x" -> "/this/directory/x.ext", "/this/directory/x"
+		noext = filepath.Join(srcDir, path)
+		id = noext
+
+	case filepath.IsAbs(path):
+		// for completeness only - go/build.Import
+		// does not support absolute imports
+		// "/x" -> "/x.ext", "/x"
+		noext = path
+		id = path
+	}
+
+	if false { // for debugging
+		if path != id {
+			fmt.Printf("%s -> %s\n", path, id)
+		}
+	}
+
+	// try extensions
+	for _, ext := range pkgExts {
+		filename = noext + ext
+		if f, err := os.Stat(filename); err == nil && !f.IsDir() {
+			return
+		}
+	}
+
+	filename = "" // not found
+	return
+}
+
+// ImportData imports a package by reading the gc-generated export data,
+// adds the corresponding package object to the packages map indexed by id,
+// and returns the object.
+//
+// The packages map must contains all packages already imported. The data
+// reader position must be the beginning of the export data section. The
+// filename is only used in error messages.
+//
+// If packages[id] contains the completely imported package, that package
+// can be used directly, and there is no need to call this function (but
+// there is also no harm but for extra time used).
+//
+func ImportData(packages map[string]*types.Package, filename, id string, data io.Reader) (pkg *types.Package, err error) {
+	// support for parser error handling
+	defer func() {
+		switch r := recover().(type) {
+		case nil:
+			// nothing to do
+		case importError:
+			err = r
+		default:
+			panic(r) // internal error
+		}
+	}()
+
+	var p parser
+	p.init(filename, id, data, packages)
+	pkg = p.parseExport()
+
+	return
+}
+
+// Import imports a gc-generated package given its import path and srcDir, adds
+// the corresponding package object to the packages map, and returns the object.
+// The packages map must contain all packages already imported.
+//
+func Import(packages map[string]*types.Package, path, srcDir string) (pkg *types.Package, err error) {
+	filename, id := FindPkg(path, srcDir)
+	if filename == "" {
+		if path == "unsafe" {
+			return types.Unsafe, nil
+		}
+		err = fmt.Errorf("can't find import: %s", id)
+		return
+	}
+
+	// no need to re-import if the package was imported completely before
+	if pkg = packages[id]; pkg != nil && pkg.Complete() {
+		return
+	}
+
+	// open file
+	f, err := os.Open(filename)
+	if err != nil {
+		return
+	}
+	defer func() {
+		f.Close()
+		if err != nil {
+			// add file name to error
+			err = fmt.Errorf("reading export data: %s: %v", filename, err)
+		}
+	}()
+
+	var hdr string
+	buf := bufio.NewReader(f)
+	if hdr, err = FindExportData(buf); err != nil {
+		return
+	}
+
+	switch hdr {
+	case "$$\n":
+		return ImportData(packages, filename, id, buf)
+	case "$$B\n":
+		var data []byte
+		data, err = ioutil.ReadAll(buf)
+		if err == nil {
+			fset := token.NewFileSet()
+			_, pkg, err = BImportData(fset, packages, data, id)
+			return
+		}
+	default:
+		err = fmt.Errorf("unknown export data header: %q", hdr)
+	}
+
+	return
+}
+
+// ----------------------------------------------------------------------------
+// Parser
+
+// TODO(gri) Imported objects don't have position information.
+//           Ideally use the debug table line info; alternatively
+//           create some fake position (or the position of the
+//           import). That way error messages referring to imported
+//           objects can print meaningful information.
+
+// parser parses the exports inside a gc compiler-produced
+// object/archive file and populates its scope with the results.
+type parser struct {
+	scanner    scanner.Scanner
+	tok        rune                      // current token
+	lit        string                    // literal string; only valid for Ident, Int, String tokens
+	id         string                    // package id of imported package
+	sharedPkgs map[string]*types.Package // package id -> package object (across importer)
+	localPkgs  map[string]*types.Package // package id -> package object (just this package)
+}
+
+func (p *parser) init(filename, id string, src io.Reader, packages map[string]*types.Package) {
+	p.scanner.Init(src)
+	p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) }
+	p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanChars | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments
+	p.scanner.Whitespace = 1<<'\t' | 1<<' '
+	p.scanner.Filename = filename // for good error messages
+	p.next()
+	p.id = id
+	p.sharedPkgs = packages
+	if debug {
+		// check consistency of packages map
+		for _, pkg := range packages {
+			if pkg.Name() == "" {
+				fmt.Printf("no package name for %s\n", pkg.Path())
+			}
+		}
+	}
+}
+
+func (p *parser) next() {
+	p.tok = p.scanner.Scan()
+	switch p.tok {
+	case scanner.Ident, scanner.Int, scanner.Char, scanner.String, '·':
+		p.lit = p.scanner.TokenText()
+	default:
+		p.lit = ""
+	}
+	if debug {
+		fmt.Printf("%s: %q -> %q\n", scanner.TokenString(p.tok), p.scanner.TokenText(), p.lit)
+	}
+}
+
+func declTypeName(pkg *types.Package, name string) *types.TypeName {
+	scope := pkg.Scope()
+	if obj := scope.Lookup(name); obj != nil {
+		return obj.(*types.TypeName)
+	}
+	obj := types.NewTypeName(token.NoPos, pkg, name, nil)
+	// a named type may be referred to before the underlying type
+	// is known - set it up
+	types.NewNamed(obj, nil, nil)
+	scope.Insert(obj)
+	return obj
+}
+
+// ----------------------------------------------------------------------------
+// Error handling
+
+// Internal errors are boxed as importErrors.
+type importError struct {
+	pos scanner.Position
+	err error
+}
+
+func (e importError) Error() string {
+	return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err)
+}
+
+func (p *parser) error(err interface{}) {
+	if s, ok := err.(string); ok {
+		err = errors.New(s)
+	}
+	// panic with a runtime.Error if err is not an error
+	panic(importError{p.scanner.Pos(), err.(error)})
+}
+
+func (p *parser) errorf(format string, args ...interface{}) {
+	p.error(fmt.Sprintf(format, args...))
+}
+
+func (p *parser) expect(tok rune) string {
+	lit := p.lit
+	if p.tok != tok {
+		p.errorf("expected %s, got %s (%s)", scanner.TokenString(tok), scanner.TokenString(p.tok), lit)
+	}
+	p.next()
+	return lit
+}
+
+func (p *parser) expectSpecial(tok string) {
+	sep := 'x' // not white space
+	i := 0
+	for i < len(tok) && p.tok == rune(tok[i]) && sep > ' ' {
+		sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token
+		p.next()
+		i++
+	}
+	if i < len(tok) {
+		p.errorf("expected %q, got %q", tok, tok[0:i])
+	}
+}
+
+func (p *parser) expectKeyword(keyword string) {
+	lit := p.expect(scanner.Ident)
+	if lit != keyword {
+		p.errorf("expected keyword %s, got %q", keyword, lit)
+	}
+}
+
+// ----------------------------------------------------------------------------
+// Qualified and unqualified names
+
+// PackageId = string_lit .
+//
+func (p *parser) parsePackageId() string {
+	id, err := strconv.Unquote(p.expect(scanner.String))
+	if err != nil {
+		p.error(err)
+	}
+	// id == "" stands for the imported package id
+	// (only known at time of package installation)
+	if id == "" {
+		id = p.id
+	}
+	return id
+}
+
+// PackageName = ident .
+//
+func (p *parser) parsePackageName() string {
+	return p.expect(scanner.Ident)
+}
+
+// dotIdentifier = ( ident | '·' ) { ident | int | '·' } .
+func (p *parser) parseDotIdent() string {
+	ident := ""
+	if p.tok != scanner.Int {
+		sep := 'x' // not white space
+		for (p.tok == scanner.Ident || p.tok == scanner.Int || p.tok == '·') && sep > ' ' {
+			ident += p.lit
+			sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token
+			p.next()
+		}
+	}
+	if ident == "" {
+		p.expect(scanner.Ident) // use expect() for error handling
+	}
+	return ident
+}
+
+// QualifiedName = "@" PackageId "." ( "?" | dotIdentifier ) .
+//
+func (p *parser) parseQualifiedName() (id, name string) {
+	p.expect('@')
+	id = p.parsePackageId()
+	p.expect('.')
+	// Per rev f280b8a485fd (10/2/2013), qualified names may be used for anonymous fields.
+	if p.tok == '?' {
+		p.next()
+	} else {
+		name = p.parseDotIdent()
+	}
+	return
+}
+
+// getPkg returns the package for a given id. If the package is
+// not found, create the package and add it to the p.localPkgs
+// and p.sharedPkgs maps. name is the (expected) name of the
+// package. If name == "", the package name is expected to be
+// set later via an import clause in the export data.
+//
+// id identifies a package, usually by a canonical package path like
+// "encoding/json" but possibly by a non-canonical import path like
+// "./json".
+//
+func (p *parser) getPkg(id, name string) *types.Package {
+	// package unsafe is not in the packages maps - handle explicitly
+	if id == "unsafe" {
+		return types.Unsafe
+	}
+
+	pkg := p.localPkgs[id]
+	if pkg == nil {
+		// first import of id from this package
+		pkg = p.sharedPkgs[id]
+		if pkg == nil {
+			// first import of id by this importer;
+			// add (possibly unnamed) pkg to shared packages
+			pkg = types.NewPackage(id, name)
+			p.sharedPkgs[id] = pkg
+		}
+		// add (possibly unnamed) pkg to local packages
+		if p.localPkgs == nil {
+			p.localPkgs = make(map[string]*types.Package)
+		}
+		p.localPkgs[id] = pkg
+	} else if name != "" {
+		// package exists already and we have an expected package name;
+		// make sure names match or set package name if necessary
+		if pname := pkg.Name(); pname == "" {
+			pkg.SetName(name)
+		} else if pname != name {
+			p.errorf("%s package name mismatch: %s (given) vs %s (expected)", id, pname, name)
+		}
+	}
+	return pkg
+}
+
+// parseExportedName is like parseQualifiedName, but
+// the package id is resolved to an imported *types.Package.
+//
+func (p *parser) parseExportedName() (pkg *types.Package, name string) {
+	id, name := p.parseQualifiedName()
+	pkg = p.getPkg(id, "")
+	return
+}
+
+// ----------------------------------------------------------------------------
+// Types
+
+// BasicType = identifier .
+//
+func (p *parser) parseBasicType() types.Type {
+	id := p.expect(scanner.Ident)
+	obj := types.Universe.Lookup(id)
+	if obj, ok := obj.(*types.TypeName); ok {
+		return obj.Type()
+	}
+	p.errorf("not a basic type: %s", id)
+	return nil
+}
+
+// ArrayType = "[" int_lit "]" Type .
+//
+func (p *parser) parseArrayType(parent *types.Package) types.Type {
+	// "[" already consumed and lookahead known not to be "]"
+	lit := p.expect(scanner.Int)
+	p.expect(']')
+	elem := p.parseType(parent)
+	n, err := strconv.ParseInt(lit, 10, 64)
+	if err != nil {
+		p.error(err)
+	}
+	return types.NewArray(elem, n)
+}
+
+// MapType = "map" "[" Type "]" Type .
+//
+func (p *parser) parseMapType(parent *types.Package) types.Type {
+	p.expectKeyword("map")
+	p.expect('[')
+	key := p.parseType(parent)
+	p.expect(']')
+	elem := p.parseType(parent)
+	return types.NewMap(key, elem)
+}
+
+// Name = identifier | "?" | QualifiedName .
+//
+// For unqualified and anonymous names, the returned package is the parent
+// package unless parent == nil, in which case the returned package is the
+// package being imported. (The parent package is not nil if the the name
+// is an unqualified struct field or interface method name belonging to a
+// type declared in another package.)
+//
+// For qualified names, the returned package is nil (and not created if
+// it doesn't exist yet) unless materializePkg is set (which creates an
+// unnamed package with valid package path). In the latter case, a
+// subsequent import clause is expected to provide a name for the package.
+//
+func (p *parser) parseName(parent *types.Package, materializePkg bool) (pkg *types.Package, name string) {
+	pkg = parent
+	if pkg == nil {
+		pkg = p.sharedPkgs[p.id]
+	}
+	switch p.tok {
+	case scanner.Ident:
+		name = p.lit
+		p.next()
+	case '?':
+		// anonymous
+		p.next()
+	case '@':
+		// exported name prefixed with package path
+		pkg = nil
+		var id string
+		id, name = p.parseQualifiedName()
+		if materializePkg {
+			pkg = p.getPkg(id, "")
+		}
+	default:
+		p.error("name expected")
+	}
+	return
+}
+
+func deref(typ types.Type) types.Type {
+	if p, _ := typ.(*types.Pointer); p != nil {
+		return p.Elem()
+	}
+	return typ
+}
+
+// Field = Name Type [ string_lit ] .
+//
+func (p *parser) parseField(parent *types.Package) (*types.Var, string) {
+	pkg, name := p.parseName(parent, true)
+
+	if name == "_" {
+		// Blank fields should be package-qualified because they
+		// are unexported identifiers, but gc does not qualify them.
+		// Assuming that the ident belongs to the current package
+		// causes types to change during re-exporting, leading
+		// to spurious "can't assign A to B" errors from go/types.
+		// As a workaround, pretend all blank fields belong
+		// to the same unique dummy package.
+		const blankpkg = "<_>"
+		pkg = p.getPkg(blankpkg, blankpkg)
+	}
+
+	typ := p.parseType(parent)
+	anonymous := false
+	if name == "" {
+		// anonymous field - typ must be T or *T and T must be a type name
+		switch typ := deref(typ).(type) {
+		case *types.Basic: // basic types are named types
+			pkg = nil // objects defined in Universe scope have no package
+			name = typ.Name()
+		case *types.Named:
+			name = typ.Obj().Name()
+		default:
+			p.errorf("anonymous field expected")
+		}
+		anonymous = true
+	}
+	tag := ""
+	if p.tok == scanner.String {
+		s := p.expect(scanner.String)
+		var err error
+		tag, err = strconv.Unquote(s)
+		if err != nil {
+			p.errorf("invalid struct tag %s: %s", s, err)
+		}
+	}
+	return types.NewField(token.NoPos, pkg, name, typ, anonymous), tag
+}
+
+// StructType = "struct" "{" [ FieldList ] "}" .
+// FieldList  = Field { ";" Field } .
+//
+func (p *parser) parseStructType(parent *types.Package) types.Type {
+	var fields []*types.Var
+	var tags []string
+
+	p.expectKeyword("struct")
+	p.expect('{')
+	for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ {
+		if i > 0 {
+			p.expect(';')
+		}
+		fld, tag := p.parseField(parent)
+		if tag != "" && tags == nil {
+			tags = make([]string, i)
+		}
+		if tags != nil {
+			tags = append(tags, tag)
+		}
+		fields = append(fields, fld)
+	}
+	p.expect('}')
+
+	return types.NewStruct(fields, tags)
+}
+
+// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] .
+//
+func (p *parser) parseParameter() (par *types.Var, isVariadic bool) {
+	_, name := p.parseName(nil, false)
+	// remove gc-specific parameter numbering
+	if i := strings.Index(name, "·"); i >= 0 {
+		name = name[:i]
+	}
+	if p.tok == '.' {
+		p.expectSpecial("...")
+		isVariadic = true
+	}
+	typ := p.parseType(nil)
+	if isVariadic {
+		typ = types.NewSlice(typ)
+	}
+	// ignore argument tag (e.g. "noescape")
+	if p.tok == scanner.String {
+		p.next()
+	}
+	// TODO(gri) should we provide a package?
+	par = types.NewVar(token.NoPos, nil, name, typ)
+	return
+}
+
+// Parameters    = "(" [ ParameterList ] ")" .
+// ParameterList = { Parameter "," } Parameter .
+//
+func (p *parser) parseParameters() (list []*types.Var, isVariadic bool) {
+	p.expect('(')
+	for p.tok != ')' && p.tok != scanner.EOF {
+		if len(list) > 0 {
+			p.expect(',')
+		}
+		par, variadic := p.parseParameter()
+		list = append(list, par)
+		if variadic {
+			if isVariadic {
+				p.error("... not on final argument")
+			}
+			isVariadic = true
+		}
+	}
+	p.expect(')')
+
+	return
+}
+
+// Signature = Parameters [ Result ] .
+// Result    = Type | Parameters .
+//
+func (p *parser) parseSignature(recv *types.Var) *types.Signature {
+	params, isVariadic := p.parseParameters()
+
+	// optional result type
+	var results []*types.Var
+	if p.tok == '(' {
+		var variadic bool
+		results, variadic = p.parseParameters()
+		if variadic {
+			p.error("... not permitted on result type")
+		}
+	}
+
+	return types.NewSignature(recv, types.NewTuple(params...), types.NewTuple(results...), isVariadic)
+}
+
+// InterfaceType = "interface" "{" [ MethodList ] "}" .
+// MethodList    = Method { ";" Method } .
+// Method        = Name Signature .
+//
+// The methods of embedded interfaces are always "inlined"
+// by the compiler and thus embedded interfaces are never
+// visible in the export data.
+//
+func (p *parser) parseInterfaceType(parent *types.Package) types.Type {
+	var methods []*types.Func
+
+	p.expectKeyword("interface")
+	p.expect('{')
+	for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ {
+		if i > 0 {
+			p.expect(';')
+		}
+		pkg, name := p.parseName(parent, true)
+		sig := p.parseSignature(nil)
+		methods = append(methods, types.NewFunc(token.NoPos, pkg, name, sig))
+	}
+	p.expect('}')
+
+	// Complete requires the type's embedded interfaces to be fully defined,
+	// but we do not define any
+	return types.NewInterface(methods, nil).Complete()
+}
+
+// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type .
+//
+func (p *parser) parseChanType(parent *types.Package) types.Type {
+	dir := types.SendRecv
+	if p.tok == scanner.Ident {
+		p.expectKeyword("chan")
+		if p.tok == '<' {
+			p.expectSpecial("<-")
+			dir = types.SendOnly
+		}
+	} else {
+		p.expectSpecial("<-")
+		p.expectKeyword("chan")
+		dir = types.RecvOnly
+	}
+	elem := p.parseType(parent)
+	return types.NewChan(dir, elem)
+}
+
+// Type =
+//	BasicType | TypeName | ArrayType | SliceType | StructType |
+//      PointerType | FuncType | InterfaceType | MapType | ChanType |
+//      "(" Type ")" .
+//
+// BasicType   = ident .
+// TypeName    = ExportedName .
+// SliceType   = "[" "]" Type .
+// PointerType = "*" Type .
+// FuncType    = "func" Signature .
+//
+func (p *parser) parseType(parent *types.Package) types.Type {
+	switch p.tok {
+	case scanner.Ident:
+		switch p.lit {
+		default:
+			return p.parseBasicType()
+		case "struct":
+			return p.parseStructType(parent)
+		case "func":
+			// FuncType
+			p.next()
+			return p.parseSignature(nil)
+		case "interface":
+			return p.parseInterfaceType(parent)
+		case "map":
+			return p.parseMapType(parent)
+		case "chan":
+			return p.parseChanType(parent)
+		}
+	case '@':
+		// TypeName
+		pkg, name := p.parseExportedName()
+		return declTypeName(pkg, name).Type()
+	case '[':
+		p.next() // look ahead
+		if p.tok == ']' {
+			// SliceType
+			p.next()
+			return types.NewSlice(p.parseType(parent))
+		}
+		return p.parseArrayType(parent)
+	case '*':
+		// PointerType
+		p.next()
+		return types.NewPointer(p.parseType(parent))
+	case '<':
+		return p.parseChanType(parent)
+	case '(':
+		// "(" Type ")"
+		p.next()
+		typ := p.parseType(parent)
+		p.expect(')')
+		return typ
+	}
+	p.errorf("expected type, got %s (%q)", scanner.TokenString(p.tok), p.lit)
+	return nil
+}
+
+// ----------------------------------------------------------------------------
+// Declarations
+
+// ImportDecl = "import" PackageName PackageId .
+//
+func (p *parser) parseImportDecl() {
+	p.expectKeyword("import")
+	name := p.parsePackageName()
+	p.getPkg(p.parsePackageId(), name)
+}
+
+// int_lit = [ "+" | "-" ] { "0" ... "9" } .
+//
+func (p *parser) parseInt() string {
+	s := ""
+	switch p.tok {
+	case '-':
+		s = "-"
+		p.next()
+	case '+':
+		p.next()
+	}
+	return s + p.expect(scanner.Int)
+}
+
+// number = int_lit [ "p" int_lit ] .
+//
+func (p *parser) parseNumber() (typ *types.Basic, val exact.Value) {
+	// mantissa
+	mant := exact.MakeFromLiteral(p.parseInt(), token.INT, 0)
+	if mant == nil {
+		panic("invalid mantissa")
+	}
+
+	if p.lit == "p" {
+		// exponent (base 2)
+		p.next()
+		exp, err := strconv.ParseInt(p.parseInt(), 10, 0)
+		if err != nil {
+			p.error(err)
+		}
+		if exp < 0 {
+			denom := exact.MakeInt64(1)
+			denom = exact.Shift(denom, token.SHL, uint(-exp))
+			typ = types.Typ[types.UntypedFloat]
+			val = exact.BinaryOp(mant, token.QUO, denom)
+			return
+		}
+		if exp > 0 {
+			mant = exact.Shift(mant, token.SHL, uint(exp))
+		}
+		typ = types.Typ[types.UntypedFloat]
+		val = mant
+		return
+	}
+
+	typ = types.Typ[types.UntypedInt]
+	val = mant
+	return
+}
+
+// ConstDecl   = "const" ExportedName [ Type ] "=" Literal .
+// Literal     = bool_lit | int_lit | float_lit | complex_lit | rune_lit | string_lit .
+// bool_lit    = "true" | "false" .
+// complex_lit = "(" float_lit "+" float_lit "i" ")" .
+// rune_lit    = "(" int_lit "+" int_lit ")" .
+// string_lit  = `"` { unicode_char } `"` .
+//
+func (p *parser) parseConstDecl() {
+	p.expectKeyword("const")
+	pkg, name := p.parseExportedName()
+
+	var typ0 types.Type
+	if p.tok != '=' {
+		// constant types are never structured - no need for parent type
+		typ0 = p.parseType(nil)
+	}
+
+	p.expect('=')
+	var typ types.Type
+	var val exact.Value
+	switch p.tok {
+	case scanner.Ident:
+		// bool_lit
+		if p.lit != "true" && p.lit != "false" {
+			p.error("expected true or false")
+		}
+		typ = types.Typ[types.UntypedBool]
+		val = exact.MakeBool(p.lit == "true")
+		p.next()
+
+	case '-', scanner.Int:
+		// int_lit
+		typ, val = p.parseNumber()
+
+	case '(':
+		// complex_lit or rune_lit
+		p.next()
+		if p.tok == scanner.Char {
+			p.next()
+			p.expect('+')
+			typ = types.Typ[types.UntypedRune]
+			_, val = p.parseNumber()
+			p.expect(')')
+			break
+		}
+		_, re := p.parseNumber()
+		p.expect('+')
+		_, im := p.parseNumber()
+		p.expectKeyword("i")
+		p.expect(')')
+		typ = types.Typ[types.UntypedComplex]
+		val = exact.BinaryOp(re, token.ADD, exact.MakeImag(im))
+
+	case scanner.Char:
+		// rune_lit
+		typ = types.Typ[types.UntypedRune]
+		val = exact.MakeFromLiteral(p.lit, token.CHAR, 0)
+		p.next()
+
+	case scanner.String:
+		// string_lit
+		typ = types.Typ[types.UntypedString]
+		val = exact.MakeFromLiteral(p.lit, token.STRING, 0)
+		p.next()
+
+	default:
+		p.errorf("expected literal got %s", scanner.TokenString(p.tok))
+	}
+
+	if typ0 == nil {
+		typ0 = typ
+	}
+
+	pkg.Scope().Insert(types.NewConst(token.NoPos, pkg, name, typ0, val))
+}
+
+// TypeDecl = "type" ExportedName Type .
+//
+func (p *parser) parseTypeDecl() {
+	p.expectKeyword("type")
+	pkg, name := p.parseExportedName()
+	obj := declTypeName(pkg, name)
+
+	// The type object may have been imported before and thus already
+	// have a type associated with it. We still need to parse the type
+	// structure, but throw it away if the object already has a type.
+	// This ensures that all imports refer to the same type object for
+	// a given type declaration.
+	typ := p.parseType(pkg)
+
+	if name := obj.Type().(*types.Named); name.Underlying() == nil {
+		name.SetUnderlying(typ)
+	}
+}
+
+// VarDecl = "var" ExportedName Type .
+//
+func (p *parser) parseVarDecl() {
+	p.expectKeyword("var")
+	pkg, name := p.parseExportedName()
+	typ := p.parseType(pkg)
+	pkg.Scope().Insert(types.NewVar(token.NoPos, pkg, name, typ))
+}
+
+// Func = Signature [ Body ] .
+// Body = "{" ... "}" .
+//
+func (p *parser) parseFunc(recv *types.Var) *types.Signature {
+	sig := p.parseSignature(recv)
+	if p.tok == '{' {
+		p.next()
+		for i := 1; i > 0; p.next() {
+			switch p.tok {
+			case '{':
+				i++
+			case '}':
+				i--
+			}
+		}
+	}
+	return sig
+}
+
+// MethodDecl = "func" Receiver Name Func .
+// Receiver   = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" .
+//
+func (p *parser) parseMethodDecl() {
+	// "func" already consumed
+	p.expect('(')
+	recv, _ := p.parseParameter() // receiver
+	p.expect(')')
+
+	// determine receiver base type object
+	base := deref(recv.Type()).(*types.Named)
+
+	// parse method name, signature, and possibly inlined body
+	_, name := p.parseName(nil, false)
+	sig := p.parseFunc(recv)
+
+	// methods always belong to the same package as the base type object
+	pkg := base.Obj().Pkg()
+
+	// add method to type unless type was imported before
+	// and method exists already
+	// TODO(gri) This leads to a quadratic algorithm - ok for now because method counts are small.
+	base.AddMethod(types.NewFunc(token.NoPos, pkg, name, sig))
+}
+
+// FuncDecl = "func" ExportedName Func .
+//
+func (p *parser) parseFuncDecl() {
+	// "func" already consumed
+	pkg, name := p.parseExportedName()
+	typ := p.parseFunc(nil)
+	pkg.Scope().Insert(types.NewFunc(token.NoPos, pkg, name, typ))
+}
+
+// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" .
+//
+func (p *parser) parseDecl() {
+	if p.tok == scanner.Ident {
+		switch p.lit {
+		case "import":
+			p.parseImportDecl()
+		case "const":
+			p.parseConstDecl()
+		case "type":
+			p.parseTypeDecl()
+		case "var":
+			p.parseVarDecl()
+		case "func":
+			p.next() // look ahead
+			if p.tok == '(' {
+				p.parseMethodDecl()
+			} else {
+				p.parseFuncDecl()
+			}
+		}
+	}
+	p.expect('\n')
+}
+
+// ----------------------------------------------------------------------------
+// Export
+
+// Export        = "PackageClause { Decl } "$$" .
+// PackageClause = "package" PackageName [ "safe" ] "\n" .
+//
+func (p *parser) parseExport() *types.Package {
+	p.expectKeyword("package")
+	name := p.parsePackageName()
+	if p.tok == scanner.Ident && p.lit == "safe" {
+		// package was compiled with -u option - ignore
+		p.next()
+	}
+	p.expect('\n')
+
+	pkg := p.getPkg(p.id, name)
+
+	for p.tok != '$' && p.tok != scanner.EOF {
+		p.parseDecl()
+	}
+
+	if ch := p.scanner.Peek(); p.tok != '$' || ch != '$' {
+		// don't call next()/expect() since reading past the
+		// export data may cause scanner errors (e.g. NUL chars)
+		p.errorf("expected '$$', got %s %c", scanner.TokenString(p.tok), ch)
+	}
+
+	if n := p.scanner.ErrorCount; n != 0 {
+		p.errorf("expected no scanner errors, got %d", n)
+	}
+
+	// Record all locally referenced packages as imports.
+	var imports []*types.Package
+	for id, pkg2 := range p.localPkgs {
+		if pkg2.Name() == "" {
+			p.errorf("%s package has no name", id)
+		}
+		if id == p.id {
+			continue // avoid self-edge
+		}
+		imports = append(imports, pkg2)
+	}
+	sort.Sort(byPath(imports))
+	pkg.SetImports(imports)
+
+	// package was imported completely and without errors
+	pkg.MarkComplete()
+
+	return pkg
+}
+
+type byPath []*types.Package
+
+func (a byPath) Len() int           { return len(a) }
+func (a byPath) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() }
diff --git a/vendor/golang.org/x/tools/present/args.go b/vendor/golang.org/x/tools/present/args.go
new file mode 100644
index 0000000..49ee1a9
--- /dev/null
+++ b/vendor/golang.org/x/tools/present/args.go
@@ -0,0 +1,229 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package present
+
+import (
+	"errors"
+	"regexp"
+	"strconv"
+	"unicode/utf8"
+)
+
+// This file is stolen from go/src/cmd/godoc/codewalk.go.
+// It's an evaluator for the file address syntax implemented by acme and sam,
+// but using Go-native regular expressions.
+// To keep things reasonably close, this version uses (?m:re) for all user-provided
+// regular expressions. That is the only change to the code from codewalk.go.
+// See http://plan9.bell-labs.com/sys/doc/sam/sam.html Table II
+// for details on the syntax.
+
+// addrToByte evaluates the given address starting at offset start in data.
+// It returns the lo and hi byte offset of the matched region within data.
+func addrToByteRange(addr string, start int, data []byte) (lo, hi int, err error) {
+	if addr == "" {
+		lo, hi = start, len(data)
+		return
+	}
+	var (
+		dir        byte
+		prevc      byte
+		charOffset bool
+	)
+	lo = start
+	hi = start
+	for addr != "" && err == nil {
+		c := addr[0]
+		switch c {
+		default:
+			err = errors.New("invalid address syntax near " + string(c))
+		case ',':
+			if len(addr) == 1 {
+				hi = len(data)
+			} else {
+				_, hi, err = addrToByteRange(addr[1:], hi, data)
+			}
+			return
+
+		case '+', '-':
+			if prevc == '+' || prevc == '-' {
+				lo, hi, err = addrNumber(data, lo, hi, prevc, 1, charOffset)
+			}
+			dir = c
+
+		case '$':
+			lo = len(data)
+			hi = len(data)
+			if len(addr) > 1 {
+				dir = '+'
+			}
+
+		case '#':
+			charOffset = true
+
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			var i int
+			for i = 1; i < len(addr); i++ {
+				if addr[i] < '0' || addr[i] > '9' {
+					break
+				}
+			}
+			var n int
+			n, err = strconv.Atoi(addr[0:i])
+			if err != nil {
+				break
+			}
+			lo, hi, err = addrNumber(data, lo, hi, dir, n, charOffset)
+			dir = 0
+			charOffset = false
+			prevc = c
+			addr = addr[i:]
+			continue
+
+		case '/':
+			var i, j int
+		Regexp:
+			for i = 1; i < len(addr); i++ {
+				switch addr[i] {
+				case '\\':
+					i++
+				case '/':
+					j = i + 1
+					break Regexp
+				}
+			}
+			if j == 0 {
+				j = i
+			}
+			pattern := addr[1:i]
+			lo, hi, err = addrRegexp(data, lo, hi, dir, pattern)
+			prevc = c
+			addr = addr[j:]
+			continue
+		}
+		prevc = c
+		addr = addr[1:]
+	}
+
+	if err == nil && dir != 0 {
+		lo, hi, err = addrNumber(data, lo, hi, dir, 1, charOffset)
+	}
+	if err != nil {
+		return 0, 0, err
+	}
+	return lo, hi, nil
+}
+
+// addrNumber applies the given dir, n, and charOffset to the address lo, hi.
+// dir is '+' or '-', n is the count, and charOffset is true if the syntax
+// used was #n.  Applying +n (or +#n) means to advance n lines
+// (or characters) after hi.  Applying -n (or -#n) means to back up n lines
+// (or characters) before lo.
+// The return value is the new lo, hi.
+func addrNumber(data []byte, lo, hi int, dir byte, n int, charOffset bool) (int, int, error) {
+	switch dir {
+	case 0:
+		lo = 0
+		hi = 0
+		fallthrough
+
+	case '+':
+		if charOffset {
+			pos := hi
+			for ; n > 0 && pos < len(data); n-- {
+				_, size := utf8.DecodeRune(data[pos:])
+				pos += size
+			}
+			if n == 0 {
+				return pos, pos, nil
+			}
+			break
+		}
+		// find next beginning of line
+		if hi > 0 {
+			for hi < len(data) && data[hi-1] != '\n' {
+				hi++
+			}
+		}
+		lo = hi
+		if n == 0 {
+			return lo, hi, nil
+		}
+		for ; hi < len(data); hi++ {
+			if data[hi] != '\n' {
+				continue
+			}
+			switch n--; n {
+			case 1:
+				lo = hi + 1
+			case 0:
+				return lo, hi + 1, nil
+			}
+		}
+
+	case '-':
+		if charOffset {
+			// Scan backward for bytes that are not UTF-8 continuation bytes.
+			pos := lo
+			for ; pos > 0 && n > 0; pos-- {
+				if data[pos]&0xc0 != 0x80 {
+					n--
+				}
+			}
+			if n == 0 {
+				return pos, pos, nil
+			}
+			break
+		}
+		// find earlier beginning of line
+		for lo > 0 && data[lo-1] != '\n' {
+			lo--
+		}
+		hi = lo
+		if n == 0 {
+			return lo, hi, nil
+		}
+		for ; lo >= 0; lo-- {
+			if lo > 0 && data[lo-1] != '\n' {
+				continue
+			}
+			switch n--; n {
+			case 1:
+				hi = lo
+			case 0:
+				return lo, hi, nil
+			}
+		}
+	}
+
+	return 0, 0, errors.New("address out of range")
+}
+
+// addrRegexp searches for pattern in the given direction starting at lo, hi.
+// The direction dir is '+' (search forward from hi) or '-' (search backward from lo).
+// Backward searches are unimplemented.
+func addrRegexp(data []byte, lo, hi int, dir byte, pattern string) (int, int, error) {
+	// We want ^ and $ to work as in sam/acme, so use ?m.
+	re, err := regexp.Compile("(?m:" + pattern + ")")
+	if err != nil {
+		return 0, 0, err
+	}
+	if dir == '-' {
+		// Could implement reverse search using binary search
+		// through file, but that seems like overkill.
+		return 0, 0, errors.New("reverse search not implemented")
+	}
+	m := re.FindIndex(data[hi:])
+	if len(m) > 0 {
+		m[0] += hi
+		m[1] += hi
+	} else if hi > 0 {
+		// No match.  Wrap to beginning of data.
+		m = re.FindIndex(data)
+	}
+	if len(m) == 0 {
+		return 0, 0, errors.New("no match for " + pattern)
+	}
+	return m[0], m[1], nil
+}
diff --git a/vendor/golang.org/x/tools/present/background.go b/vendor/golang.org/x/tools/present/background.go
new file mode 100644
index 0000000..0a6216a
--- /dev/null
+++ b/vendor/golang.org/x/tools/present/background.go
@@ -0,0 +1,25 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package present
+
+import (
+	"strings"
+)
+
+func init() {
+	Register("background", parseBackground)
+}
+
+type Background struct {
+	URL string
+}
+
+func (i Background) TemplateName() string { return "background" }
+
+func parseBackground(ctx *Context, fileName string, lineno int, text string) (Elem, error) {
+	args := strings.Fields(text)
+	background := Background{URL: args[1]}
+	return background, nil
+}
diff --git a/vendor/golang.org/x/tools/present/caption.go b/vendor/golang.org/x/tools/present/caption.go
new file mode 100644
index 0000000..00e0b5d
--- /dev/null
+++ b/vendor/golang.org/x/tools/present/caption.go
@@ -0,0 +1,22 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package present
+
+import "strings"
+
+func init() {
+	Register("caption", parseCaption)
+}
+
+type Caption struct {
+	Text string
+}
+
+func (c Caption) TemplateName() string { return "caption" }
+
+func parseCaption(_ *Context, _ string, _ int, text string) (Elem, error) {
+	text = strings.TrimSpace(strings.TrimPrefix(text, ".caption"))
+	return Caption{text}, nil
+}
diff --git a/vendor/golang.org/x/tools/present/code.go b/vendor/golang.org/x/tools/present/code.go
new file mode 100644
index 0000000..67a79db
--- /dev/null
+++ b/vendor/golang.org/x/tools/present/code.go
@@ -0,0 +1,308 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package present
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"html/template"
+	"path/filepath"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+// PlayEnabled specifies whether runnable playground snippets should be
+// displayed in the present user interface.
+var PlayEnabled = false
+
+// TODO(adg): replace the PlayEnabled flag with something less spaghetti-like.
+// Instead this will probably be determined by a template execution Context
+// value that contains various global metadata required when rendering
+// templates.
+
+// NotesEnabled specifies whether presenter notes should be displayed in the
+// present user interface.
+var NotesEnabled = false
+
+func init() {
+	Register("code", parseCode)
+	Register("play", parseCode)
+}
+
+type Code struct {
+	Text     template.HTML
+	Play     bool   // runnable code
+	Edit     bool   // editable code
+	FileName string // file name
+	Ext      string // file extension
+	Raw      []byte // content of the file
+}
+
+func (c Code) TemplateName() string { return "code" }
+
+// The input line is a .code or .play entry with a file name and an optional HLfoo marker on the end.
+// Anything between the file and HL (if any) is an address expression, which we treat as a string here.
+// We pick off the HL first, for easy parsing.
+var (
+	highlightRE = regexp.MustCompile(`\s+HL([a-zA-Z0-9_]+)?$`)
+	hlCommentRE = regexp.MustCompile(`(.+) // HL(.*)$`)
+	codeRE      = regexp.MustCompile(`\.(code|play)\s+((?:(?:-edit|-numbers)\s+)*)([^\s]+)(?:\s+(.*))?$`)
+)
+
+// parseCode parses a code present directive. Its syntax:
+//   .code [-numbers] [-edit] <filename> [address] [highlight]
+// The directive may also be ".play" if the snippet is executable.
+func parseCode(ctx *Context, sourceFile string, sourceLine int, cmd string) (Elem, error) {
+	cmd = strings.TrimSpace(cmd)
+
+	// Pull off the HL, if any, from the end of the input line.
+	highlight := ""
+	if hl := highlightRE.FindStringSubmatchIndex(cmd); len(hl) == 4 {
+		if hl[2] < 0 || hl[3] < 0 {
+			return nil, fmt.Errorf("%s:%d invalid highlight syntax", sourceFile, sourceLine)
+		}
+		highlight = cmd[hl[2]:hl[3]]
+		cmd = cmd[:hl[2]-2]
+	}
+
+	// Parse the remaining command line.
+	// Arguments:
+	// args[0]: whole match
+	// args[1]:  .code/.play
+	// args[2]: flags ("-edit -numbers")
+	// args[3]: file name
+	// args[4]: optional address
+	args := codeRE.FindStringSubmatch(cmd)
+	if len(args) != 5 {
+		return nil, fmt.Errorf("%s:%d: syntax error for .code/.play invocation", sourceFile, sourceLine)
+	}
+	command, flags, file, addr := args[1], args[2], args[3], strings.TrimSpace(args[4])
+	play := command == "play" && PlayEnabled
+
+	// Read in code file and (optionally) match address.
+	filename := filepath.Join(filepath.Dir(sourceFile), file)
+	textBytes, err := ctx.ReadFile(filename)
+	if err != nil {
+		return nil, fmt.Errorf("%s:%d: %v", sourceFile, sourceLine, err)
+	}
+	lo, hi, err := addrToByteRange(addr, 0, textBytes)
+	if err != nil {
+		return nil, fmt.Errorf("%s:%d: %v", sourceFile, sourceLine, err)
+	}
+	if lo > hi {
+		// The search in addrToByteRange can wrap around so we might
+		// end up with the range ending before its starting point
+		hi, lo = lo, hi
+	}
+
+	// Acme pattern matches can stop mid-line,
+	// so run to end of line in both directions if not at line start/end.
+	for lo > 0 && textBytes[lo-1] != '\n' {
+		lo--
+	}
+	if hi > 0 {
+		for hi < len(textBytes) && textBytes[hi-1] != '\n' {
+			hi++
+		}
+	}
+
+	lines := codeLines(textBytes, lo, hi)
+
+	data := &codeTemplateData{
+		Lines:   formatLines(lines, highlight),
+		Edit:    strings.Contains(flags, "-edit"),
+		Numbers: strings.Contains(flags, "-numbers"),
+	}
+
+	// Include before and after in a hidden span for playground code.
+	if play {
+		data.Prefix = textBytes[:lo]
+		data.Suffix = textBytes[hi:]
+	}
+
+	var buf bytes.Buffer
+	if err := codeTemplate.Execute(&buf, data); err != nil {
+		return nil, err
+	}
+	return Code{
+		Text:     template.HTML(buf.String()),
+		Play:     play,
+		Edit:     data.Edit,
+		FileName: filepath.Base(filename),
+		Ext:      filepath.Ext(filename),
+		Raw:      rawCode(lines),
+	}, nil
+}
+
+// formatLines returns a new slice of codeLine with the given lines
+// replacing tabs with spaces and adding highlighting where needed.
+func formatLines(lines []codeLine, highlight string) []codeLine {
+	formatted := make([]codeLine, len(lines))
+	for i, line := range lines {
+		// Replace tabs with spaces, which work better in HTML.
+		line.L = strings.Replace(line.L, "\t", "    ", -1)
+
+		// Highlight lines that end with "// HL[highlight]"
+		// and strip the magic comment.
+		if m := hlCommentRE.FindStringSubmatch(line.L); m != nil {
+			line.L = m[1]
+			line.HL = m[2] == highlight
+		}
+
+		formatted[i] = line
+	}
+	return formatted
+}
+
+// rawCode returns the code represented by the given codeLines without any kind
+// of formatting.
+func rawCode(lines []codeLine) []byte {
+	b := new(bytes.Buffer)
+	for _, line := range lines {
+		b.WriteString(line.L)
+		b.WriteByte('\n')
+	}
+	return b.Bytes()
+}
+
+type codeTemplateData struct {
+	Lines          []codeLine
+	Prefix, Suffix []byte
+	Edit, Numbers  bool
+}
+
+var leadingSpaceRE = regexp.MustCompile(`^[ \t]*`)
+
+var codeTemplate = template.Must(template.New("code").Funcs(template.FuncMap{
+	"trimSpace":    strings.TrimSpace,
+	"leadingSpace": leadingSpaceRE.FindString,
+}).Parse(codeTemplateHTML))
+
+const codeTemplateHTML = `
+{{with .Prefix}}<pre style="display: none"><span>{{printf "%s" .}}</span></pre>{{end}}
+
+<pre{{if .Edit}} contenteditable="true" spellcheck="false"{{end}}{{if .Numbers}} class="numbers"{{end}}>{{/*
+	*/}}{{range .Lines}}<span num="{{.N}}">{{/*
+	*/}}{{if .HL}}{{leadingSpace .L}}<b>{{trimSpace .L}}</b>{{/*
+	*/}}{{else}}{{.L}}{{end}}{{/*
+*/}}</span>
+{{end}}</pre>
+
+{{with .Suffix}}<pre style="display: none"><span>{{printf "%s" .}}</span></pre>{{end}}
+`
+
+// codeLine represents a line of code extracted from a source file.
+type codeLine struct {
+	L  string // The line of code.
+	N  int    // The line number from the source file.
+	HL bool   // Whether the line should be highlighted.
+}
+
+// codeLines takes a source file and returns the lines that
+// span the byte range specified by start and end.
+// It discards lines that end in "OMIT".
+func codeLines(src []byte, start, end int) (lines []codeLine) {
+	startLine := 1
+	for i, b := range src {
+		if i == start {
+			break
+		}
+		if b == '\n' {
+			startLine++
+		}
+	}
+	s := bufio.NewScanner(bytes.NewReader(src[start:end]))
+	for n := startLine; s.Scan(); n++ {
+		l := s.Text()
+		if strings.HasSuffix(l, "OMIT") {
+			continue
+		}
+		lines = append(lines, codeLine{L: l, N: n})
+	}
+	// Trim leading and trailing blank lines.
+	for len(lines) > 0 && len(lines[0].L) == 0 {
+		lines = lines[1:]
+	}
+	for len(lines) > 0 && len(lines[len(lines)-1].L) == 0 {
+		lines = lines[:len(lines)-1]
+	}
+	return
+}
+
+func parseArgs(name string, line int, args []string) (res []interface{}, err error) {
+	res = make([]interface{}, len(args))
+	for i, v := range args {
+		if len(v) == 0 {
+			return nil, fmt.Errorf("%s:%d bad code argument %q", name, line, v)
+		}
+		switch v[0] {
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			n, err := strconv.Atoi(v)
+			if err != nil {
+				return nil, fmt.Errorf("%s:%d bad code argument %q", name, line, v)
+			}
+			res[i] = n
+		case '/':
+			if len(v) < 2 || v[len(v)-1] != '/' {
+				return nil, fmt.Errorf("%s:%d bad code argument %q", name, line, v)
+			}
+			res[i] = v
+		case '$':
+			res[i] = "$"
+		case '_':
+			if len(v) == 1 {
+				// Do nothing; "_" indicates an intentionally empty parameter.
+				break
+			}
+			fallthrough
+		default:
+			return nil, fmt.Errorf("%s:%d bad code argument %q", name, line, v)
+		}
+	}
+	return
+}
+
+// parseArg returns the integer or string value of the argument and tells which it is.
+func parseArg(arg interface{}, max int) (ival int, sval string, isInt bool, err error) {
+	switch n := arg.(type) {
+	case int:
+		if n <= 0 || n > max {
+			return 0, "", false, fmt.Errorf("%d is out of range", n)
+		}
+		return n, "", true, nil
+	case string:
+		return 0, n, false, nil
+	}
+	return 0, "", false, fmt.Errorf("unrecognized argument %v type %T", arg, arg)
+}
+
+// match identifies the input line that matches the pattern in a code invocation.
+// If start>0, match lines starting there rather than at the beginning.
+// The return value is 1-indexed.
+func match(file string, start int, lines []string, pattern string) (int, error) {
+	// $ matches the end of the file.
+	if pattern == "$" {
+		if len(lines) == 0 {
+			return 0, fmt.Errorf("%q: empty file", file)
+		}
+		return len(lines), nil
+	}
+	// /regexp/ matches the line that matches the regexp.
+	if len(pattern) > 2 && pattern[0] == '/' && pattern[len(pattern)-1] == '/' {
+		re, err := regexp.Compile(pattern[1 : len(pattern)-1])
+		if err != nil {
+			return 0, err
+		}
+		for i := start; i < len(lines); i++ {
+			if re.MatchString(lines[i]) {
+				return i + 1, nil
+			}
+		}
+		return 0, fmt.Errorf("%s: no match for %#q", file, pattern)
+	}
+	return 0, fmt.Errorf("unrecognized pattern: %q", pattern)
+}
diff --git a/vendor/golang.org/x/tools/present/doc.go b/vendor/golang.org/x/tools/present/doc.go
new file mode 100644
index 0000000..d251df9
--- /dev/null
+++ b/vendor/golang.org/x/tools/present/doc.go
@@ -0,0 +1,258 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+The present file format
+
+Present files have the following format.  The first non-blank non-comment
+line is the title, so the header looks like
+
+	Title of document
+	Subtitle of document
+	15:04 2 Jan 2006
+	Tags: foo, bar, baz
+	<blank line>
+	Author Name
+	Job title, Company
+	joe@example.com
+	http://url/
+	@twitter_name
+
+The subtitle, date, and tags lines are optional.
+
+The date line may be written without a time:
+	2 Jan 2006
+In this case, the time will be interpreted as 10am UTC on that date.
+
+The tags line is a comma-separated list of tags that may be used to categorize
+the document.
+
+The author section may contain a mixture of text, twitter names, and links.
+For slide presentations, only the plain text lines will be displayed on the
+first slide.
+
+Multiple presenters may be specified, separated by a blank line.
+
+After that come slides/sections, each after a blank line:
+
+	* Title of slide or section (must have asterisk)
+
+	Some Text
+
+	** Subsection
+
+	- bullets
+	- more bullets
+	- a bullet with
+
+	*** Sub-subsection
+
+	Some More text
+
+	  Preformatted text
+	  is indented (however you like)
+
+	Further Text, including invocations like:
+
+	.code x.go /^func main/,/^}/
+	.play y.go
+	.image image.jpg
+	.background image.jpg
+	.iframe http://foo
+	.link http://foo label
+	.html file.html
+	.caption _Gopher_ by [[http://www.reneefrench.com][Renée French]]
+
+	Again, more text
+
+Blank lines are OK (not mandatory) after the title and after the
+text.  Text, bullets, and .code etc. are all optional; title is
+not.
+
+Lines starting with # in column 1 are commentary.
+
+Fonts:
+
+Within the input for plain text or lists, text bracketed by font
+markers will be presented in italic, bold, or program font.
+Marker characters are _ (italic), * (bold) and ` (program font).
+Unmatched markers appear as plain text.
+Within marked text, a single marker character becomes a space
+and a doubled single marker quotes the marker character.
+
+	_italic_
+	*bold*
+	`program`
+	_this_is_all_italic_
+	_Why_use_scoped__ptr_? Use plain ***ptr* instead.
+
+Inline links:
+
+Links can be included in any text with the form [[url][label]], or
+[[url]] to use the URL itself as the label.
+
+Functions:
+
+A number of template functions are available through invocations
+in the input text. Each such invocation contains a period as the
+first character on the line, followed immediately by the name of
+the function, followed by any arguments. A typical invocation might
+be
+	.play demo.go /^func show/,/^}/
+(except that the ".play" must be at the beginning of the line and
+not be indented like this.)
+
+Here follows a description of the functions:
+
+code:
+
+Injects program source into the output by extracting code from files
+and injecting them as HTML-escaped <pre> blocks.  The argument is
+a file name followed by an optional address that specifies what
+section of the file to display. The address syntax is similar in
+its simplest form to that of ed, but comes from sam and is more
+general. See
+	http://plan9.bell-labs.com/sys/doc/sam/sam.html Table II
+for full details. The displayed block is always rounded out to a
+full line at both ends.
+
+If no pattern is present, the entire file is displayed.
+
+Any line in the program that ends with the four characters
+	OMIT
+is deleted from the source before inclusion, making it easy
+to write things like
+	.code test.go /START OMIT/,/END OMIT/
+to find snippets like this
+	tedious_code = boring_function()
+	// START OMIT
+	interesting_code = fascinating_function()
+	// END OMIT
+and see only this:
+	interesting_code = fascinating_function()
+
+Also, inside the displayed text a line that ends
+	// HL
+will be highlighted in the display; the 'h' key in the browser will
+toggle extra emphasis of any highlighted lines. A highlighting mark
+may have a suffix word, such as
+	// HLxxx
+Such highlights are enabled only if the code invocation ends with
+"HL" followed by the word:
+	.code test.go /^type Foo/,/^}/ HLxxx
+
+The .code function may take one or more flags immediately preceding
+the filename. This command shows test.go in an editable text area:
+	.code -edit test.go
+This command shows test.go with line numbers:
+	.code -numbers test.go
+
+play:
+
+The function "play" is the same as "code" but puts a button
+on the displayed source so the program can be run from the browser.
+Although only the selected text is shown, all the source is included
+in the HTML output so it can be presented to the compiler.
+
+link:
+
+Create a hyperlink. The syntax is 1 or 2 space-separated arguments.
+The first argument is always the HTTP URL.  If there is a second
+argument, it is the text label to display for this link.
+
+	.link http://golang.org golang.org
+
+image:
+
+The template uses the function "image" to inject picture files.
+
+The syntax is simple: 1 or 3 space-separated arguments.
+The first argument is always the file name.
+If there are more arguments, they are the height and width;
+both must be present, or substituted with an underscore.
+Replacing a dimension argument with the underscore parameter
+preserves the aspect ratio of the image when scaling.
+
+	.image images/betsy.jpg 100 200
+
+	.image images/janet.jpg _ 300
+
+video:
+
+The template uses the function "video" to inject video files.
+
+The syntax is simple: 2 or 4 space-separated arguments.
+The first argument is always the file name.
+The second argument is always the file content-type.
+If there are more arguments, they are the height and width;
+both must be present, or substituted with an underscore.
+Replacing a dimension argument with the underscore parameter
+preserves the aspect ratio of the video when scaling.
+
+	.video videos/evangeline.mp4 video/mp4 400 600
+
+	.video videos/mabel.ogg video/ogg 500 _
+
+background:
+
+The template uses the function "background" to set the background image for
+a slide.  The only argument is the file name of the image.
+
+	.background images/susan.jpg
+
+caption:
+
+The template uses the function "caption" to inject figure captions.
+
+The text after ".caption" is embedded in a figcaption element after
+processing styling and links as in standard text lines.
+
+	.caption _Gopher_ by [[http://www.reneefrench.com][Renée French]]
+
+iframe:
+
+The function "iframe" injects iframes (pages inside pages).
+Its syntax is the same as that of image.
+
+html:
+
+The function html includes the contents of the specified file as
+unescaped HTML. This is useful for including custom HTML elements
+that cannot be created using only the slide format.
+It is your responsibilty to make sure the included HTML is valid and safe.
+
+	.html file.html
+
+Presenter notes:
+
+Presenter notes may be enabled by appending the "-notes" flag when you run
+your "present" binary.
+
+This will allow you to open a second window by pressing 'N' from your browser
+displaying your slides. The second window is completely synced with your main
+window, except that presenter notes are only visible on the second window.
+
+Lines that begin with ": " are treated as presenter notes.
+
+	* Title of slide
+
+	Some Text
+
+	: Presenter notes (first paragraph)
+	: Presenter notes (subsequent paragraph(s))
+
+Notes may appear anywhere within the slide text. For example:
+
+	* Title of slide
+
+	: Presenter notes (first paragraph)
+
+	Some Text
+
+	: Presenter notes (subsequent paragraph(s))
+
+This has the same result as the example above.
+
+*/
+package present
diff --git a/vendor/golang.org/x/tools/present/html.go b/vendor/golang.org/x/tools/present/html.go
new file mode 100644
index 0000000..cca90ef
--- /dev/null
+++ b/vendor/golang.org/x/tools/present/html.go
@@ -0,0 +1,31 @@
+package present
+
+import (
+	"errors"
+	"html/template"
+	"path/filepath"
+	"strings"
+)
+
+func init() {
+	Register("html", parseHTML)
+}
+
+func parseHTML(ctx *Context, fileName string, lineno int, text string) (Elem, error) {
+	p := strings.Fields(text)
+	if len(p) != 2 {
+		return nil, errors.New("invalid .html args")
+	}
+	name := filepath.Join(filepath.Dir(fileName), p[1])
+	b, err := ctx.ReadFile(name)
+	if err != nil {
+		return nil, err
+	}
+	return HTML{template.HTML(b)}, nil
+}
+
+type HTML struct {
+	template.HTML
+}
+
+func (s HTML) TemplateName() string { return "html" }
diff --git a/vendor/golang.org/x/tools/present/iframe.go b/vendor/golang.org/x/tools/present/iframe.go
new file mode 100644
index 0000000..2f3c5e5
--- /dev/null
+++ b/vendor/golang.org/x/tools/present/iframe.go
@@ -0,0 +1,45 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package present
+
+import (
+	"fmt"
+	"strings"
+)
+
+func init() {
+	Register("iframe", parseIframe)
+}
+
+type Iframe struct {
+	URL    string
+	Width  int
+	Height int
+}
+
+func (i Iframe) TemplateName() string { return "iframe" }
+
+func parseIframe(ctx *Context, fileName string, lineno int, text string) (Elem, error) {
+	args := strings.Fields(text)
+	i := Iframe{URL: args[1]}
+	a, err := parseArgs(fileName, lineno, args[2:])
+	if err != nil {
+		return nil, err
+	}
+	switch len(a) {
+	case 0:
+		// no size parameters
+	case 2:
+		if v, ok := a[0].(int); ok {
+			i.Height = v
+		}
+		if v, ok := a[1].(int); ok {
+			i.Width = v
+		}
+	default:
+		return nil, fmt.Errorf("incorrect image invocation: %q", text)
+	}
+	return i, nil
+}
diff --git a/vendor/golang.org/x/tools/present/image.go b/vendor/golang.org/x/tools/present/image.go
new file mode 100644
index 0000000..cfa2af9
--- /dev/null
+++ b/vendor/golang.org/x/tools/present/image.go
@@ -0,0 +1,50 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package present
+
+import (
+	"fmt"
+	"strings"
+)
+
+func init() {
+	Register("image", parseImage)
+}
+
+type Image struct {
+	URL    string
+	Width  int
+	Height int
+}
+
+func (i Image) TemplateName() string { return "image" }
+
+func parseImage(ctx *Context, fileName string, lineno int, text string) (Elem, error) {
+	args := strings.Fields(text)
+	img := Image{URL: args[1]}
+	a, err := parseArgs(fileName, lineno, args[2:])
+	if err != nil {
+		return nil, err
+	}
+	switch len(a) {
+	case 0:
+		// no size parameters
+	case 2:
+		// If a parameter is empty (underscore) or invalid
+		// leave the field set to zero. The "image" action
+		// template will then omit that img tag attribute and
+		// the browser will calculate the value to preserve
+		// the aspect ratio.
+		if v, ok := a[0].(int); ok {
+			img.Height = v
+		}
+		if v, ok := a[1].(int); ok {
+			img.Width = v
+		}
+	default:
+		return nil, fmt.Errorf("incorrect image invocation: %q", text)
+	}
+	return img, nil
+}
diff --git a/vendor/golang.org/x/tools/present/link.go b/vendor/golang.org/x/tools/present/link.go
new file mode 100644
index 0000000..6b0968f
--- /dev/null
+++ b/vendor/golang.org/x/tools/present/link.go
@@ -0,0 +1,97 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package present
+
+import (
+	"fmt"
+	"log"
+	"net/url"
+	"strings"
+)
+
+func init() {
+	Register("link", parseLink)
+}
+
+type Link struct {
+	URL   *url.URL
+	Label string
+}
+
+func (l Link) TemplateName() string { return "link" }
+
+func parseLink(ctx *Context, fileName string, lineno int, text string) (Elem, error) {
+	args := strings.Fields(text)
+	url, err := url.Parse(args[1])
+	if err != nil {
+		return nil, err
+	}
+	label := ""
+	if len(args) > 2 {
+		label = strings.Join(args[2:], " ")
+	} else {
+		scheme := url.Scheme + "://"
+		if url.Scheme == "mailto" {
+			scheme = "mailto:"
+		}
+		label = strings.Replace(url.String(), scheme, "", 1)
+	}
+	return Link{url, label}, nil
+}
+
+func renderLink(href, text string) string {
+	text = font(text)
+	if text == "" {
+		text = href
+	}
+	// Open links in new window only when their url is absolute.
+	target := "_blank"
+	if u, err := url.Parse(href); err != nil {
+		log.Println("rendernLink parsing url:", err)
+	} else if !u.IsAbs() || u.Scheme == "javascript" {
+		target = "_self"
+	}
+
+	return fmt.Sprintf(`<a href="%s" target="%s">%s</a>`, href, target, text)
+}
+
+// parseInlineLink parses an inline link at the start of s, and returns
+// a rendered HTML link and the total length of the raw inline link.
+// If no inline link is present, it returns all zeroes.
+func parseInlineLink(s string) (link string, length int) {
+	if !strings.HasPrefix(s, "[[") {
+		return
+	}
+	end := strings.Index(s, "]]")
+	if end == -1 {
+		return
+	}
+	urlEnd := strings.Index(s, "]")
+	rawURL := s[2:urlEnd]
+	const badURLChars = `<>"{}|\^[] ` + "`" // per RFC2396 section 2.4.3
+	if strings.ContainsAny(rawURL, badURLChars) {
+		return
+	}
+	if urlEnd == end {
+		simpleUrl := ""
+		url, err := url.Parse(rawURL)
+		if err == nil {
+			// If the URL is http://foo.com, drop the http://
+			// In other words, render [[http://golang.org]] as:
+			//   <a href="http://golang.org">golang.org</a>
+			if strings.HasPrefix(rawURL, url.Scheme+"://") {
+				simpleUrl = strings.TrimPrefix(rawURL, url.Scheme+"://")
+			} else if strings.HasPrefix(rawURL, url.Scheme+":") {
+				simpleUrl = strings.TrimPrefix(rawURL, url.Scheme+":")
+			}
+		}
+		return renderLink(rawURL, simpleUrl), end + 2
+	}
+	if s[urlEnd:urlEnd+2] != "][" {
+		return
+	}
+	text := s[urlEnd+2 : end]
+	return renderLink(rawURL, text), end + 2
+}
diff --git a/vendor/golang.org/x/tools/present/parse.go b/vendor/golang.org/x/tools/present/parse.go
new file mode 100644
index 0000000..b26d65b
--- /dev/null
+++ b/vendor/golang.org/x/tools/present/parse.go
@@ -0,0 +1,534 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package present
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"fmt"
+	"html/template"
+	"io"
+	"io/ioutil"
+	"log"
+	"net/url"
+	"regexp"
+	"strings"
+	"time"
+	"unicode"
+	"unicode/utf8"
+)
+
+var (
+	parsers = make(map[string]ParseFunc)
+	funcs   = template.FuncMap{}
+)
+
+// Template returns an empty template with the action functions in its FuncMap.
+func Template() *template.Template {
+	return template.New("").Funcs(funcs)
+}
+
+// Render renders the doc to the given writer using the provided template.
+func (d *Doc) Render(w io.Writer, t *template.Template) error {
+	data := struct {
+		*Doc
+		Template     *template.Template
+		PlayEnabled  bool
+		NotesEnabled bool
+	}{d, t, PlayEnabled, NotesEnabled}
+	return t.ExecuteTemplate(w, "root", data)
+}
+
+// Render renders the section to the given writer using the provided template.
+func (s *Section) Render(w io.Writer, t *template.Template) error {
+	data := struct {
+		*Section
+		Template    *template.Template
+		PlayEnabled bool
+	}{s, t, PlayEnabled}
+	return t.ExecuteTemplate(w, "section", data)
+}
+
+type ParseFunc func(ctx *Context, fileName string, lineNumber int, inputLine string) (Elem, error)
+
+// Register binds the named action, which does not begin with a period, to the
+// specified parser to be invoked when the name, with a period, appears in the
+// present input text.
+func Register(name string, parser ParseFunc) {
+	if len(name) == 0 || name[0] == ';' {
+		panic("bad name in Register: " + name)
+	}
+	parsers["."+name] = parser
+}
+
+// Doc represents an entire document.
+type Doc struct {
+	Title      string
+	Subtitle   string
+	Time       time.Time
+	Authors    []Author
+	TitleNotes []string
+	Sections   []Section
+	Tags       []string
+}
+
+// Author represents the person who wrote and/or is presenting the document.
+type Author struct {
+	Elem []Elem
+}
+
+// TextElem returns the first text elements of the author details.
+// This is used to display the author' name, job title, and company
+// without the contact details.
+func (p *Author) TextElem() (elems []Elem) {
+	for _, el := range p.Elem {
+		if _, ok := el.(Text); !ok {
+			break
+		}
+		elems = append(elems, el)
+	}
+	return
+}
+
+// Section represents a section of a document (such as a presentation slide)
+// comprising a title and a list of elements.
+type Section struct {
+	Number []int
+	Title  string
+	Elem   []Elem
+	Notes  []string
+}
+
+func (s Section) Sections() (sections []Section) {
+	for _, e := range s.Elem {
+		if section, ok := e.(Section); ok {
+			sections = append(sections, section)
+		}
+	}
+	return
+}
+
+// Level returns the level of the given section.
+// The document title is level 1, main section 2, etc.
+func (s Section) Level() int {
+	return len(s.Number) + 1
+}
+
+// FormattedNumber returns a string containing the concatenation of the
+// numbers identifying a Section.
+func (s Section) FormattedNumber() string {
+	b := &bytes.Buffer{}
+	for _, n := range s.Number {
+		fmt.Fprintf(b, "%v.", n)
+	}
+	return b.String()
+}
+
+func (s Section) TemplateName() string { return "section" }
+
+// Elem defines the interface for a present element. That is, something that
+// can provide the name of the template used to render the element.
+type Elem interface {
+	TemplateName() string
+}
+
+// renderElem implements the elem template function, used to render
+// sub-templates.
+func renderElem(t *template.Template, e Elem) (template.HTML, error) {
+	var data interface{} = e
+	if s, ok := e.(Section); ok {
+		data = struct {
+			Section
+			Template *template.Template
+		}{s, t}
+	}
+	return execTemplate(t, e.TemplateName(), data)
+}
+
+func init() {
+	funcs["elem"] = renderElem
+}
+
+// execTemplate is a helper to execute a template and return the output as a
+// template.HTML value.
+func execTemplate(t *template.Template, name string, data interface{}) (template.HTML, error) {
+	b := new(bytes.Buffer)
+	err := t.ExecuteTemplate(b, name, data)
+	if err != nil {
+		return "", err
+	}
+	return template.HTML(b.String()), nil
+}
+
+// Text represents an optionally preformatted paragraph.
+type Text struct {
+	Lines []string
+	Pre   bool
+}
+
+func (t Text) TemplateName() string { return "text" }
+
+// List represents a bulleted list.
+type List struct {
+	Bullet []string
+}
+
+func (l List) TemplateName() string { return "list" }
+
+// Lines is a helper for parsing line-based input.
+type Lines struct {
+	line int // 0 indexed, so has 1-indexed number of last line returned
+	text []string
+}
+
+func readLines(r io.Reader) (*Lines, error) {
+	var lines []string
+	s := bufio.NewScanner(r)
+	for s.Scan() {
+		lines = append(lines, s.Text())
+	}
+	if err := s.Err(); err != nil {
+		return nil, err
+	}
+	return &Lines{0, lines}, nil
+}
+
+func (l *Lines) next() (text string, ok bool) {
+	for {
+		current := l.line
+		l.line++
+		if current >= len(l.text) {
+			return "", false
+		}
+		text = l.text[current]
+		// Lines starting with # are comments.
+		if len(text) == 0 || text[0] != '#' {
+			ok = true
+			break
+		}
+	}
+	return
+}
+
+func (l *Lines) back() {
+	l.line--
+}
+
+func (l *Lines) nextNonEmpty() (text string, ok bool) {
+	for {
+		text, ok = l.next()
+		if !ok {
+			return
+		}
+		if len(text) > 0 {
+			break
+		}
+	}
+	return
+}
+
+// A Context specifies the supporting context for parsing a presentation.
+type Context struct {
+	// ReadFile reads the file named by filename and returns the contents.
+	ReadFile func(filename string) ([]byte, error)
+}
+
+// ParseMode represents flags for the Parse function.
+type ParseMode int
+
+const (
+	// If set, parse only the title and subtitle.
+	TitlesOnly ParseMode = 1
+)
+
+// Parse parses a document from r.
+func (ctx *Context) Parse(r io.Reader, name string, mode ParseMode) (*Doc, error) {
+	doc := new(Doc)
+	lines, err := readLines(r)
+	if err != nil {
+		return nil, err
+	}
+
+	for i := lines.line; i < len(lines.text); i++ {
+		if strings.HasPrefix(lines.text[i], "*") {
+			break
+		}
+
+		if isSpeakerNote(lines.text[i]) {
+			doc.TitleNotes = append(doc.TitleNotes, lines.text[i][2:])
+		}
+	}
+
+	err = parseHeader(doc, lines)
+	if err != nil {
+		return nil, err
+	}
+	if mode&TitlesOnly != 0 {
+		return doc, nil
+	}
+
+	// Authors
+	if doc.Authors, err = parseAuthors(lines); err != nil {
+		return nil, err
+	}
+	// Sections
+	if doc.Sections, err = parseSections(ctx, name, lines, []int{}); err != nil {
+		return nil, err
+	}
+	return doc, nil
+}
+
+// Parse parses a document from r. Parse reads assets used by the presentation
+// from the file system using ioutil.ReadFile.
+func Parse(r io.Reader, name string, mode ParseMode) (*Doc, error) {
+	ctx := Context{ReadFile: ioutil.ReadFile}
+	return ctx.Parse(r, name, mode)
+}
+
+// isHeading matches any section heading.
+var isHeading = regexp.MustCompile(`^\*+ `)
+
+// lesserHeading returns true if text is a heading of a lesser or equal level
+// than that denoted by prefix.
+func lesserHeading(text, prefix string) bool {
+	return isHeading.MatchString(text) && !strings.HasPrefix(text, prefix+"*")
+}
+
+// parseSections parses Sections from lines for the section level indicated by
+// number (a nil number indicates the top level).
+func parseSections(ctx *Context, name string, lines *Lines, number []int) ([]Section, error) {
+	var sections []Section
+	for i := 1; ; i++ {
+		// Next non-empty line is title.
+		text, ok := lines.nextNonEmpty()
+		for ok && text == "" {
+			text, ok = lines.next()
+		}
+		if !ok {
+			break
+		}
+		prefix := strings.Repeat("*", len(number)+1)
+		if !strings.HasPrefix(text, prefix+" ") {
+			lines.back()
+			break
+		}
+		section := Section{
+			Number: append(append([]int{}, number...), i),
+			Title:  text[len(prefix)+1:],
+		}
+		text, ok = lines.nextNonEmpty()
+		for ok && !lesserHeading(text, prefix) {
+			var e Elem
+			r, _ := utf8.DecodeRuneInString(text)
+			switch {
+			case unicode.IsSpace(r):
+				i := strings.IndexFunc(text, func(r rune) bool {
+					return !unicode.IsSpace(r)
+				})
+				if i < 0 {
+					break
+				}
+				indent := text[:i]
+				var s []string
+				for ok && (strings.HasPrefix(text, indent) || text == "") {
+					if text != "" {
+						text = text[i:]
+					}
+					s = append(s, text)
+					text, ok = lines.next()
+				}
+				lines.back()
+				pre := strings.Join(s, "\n")
+				pre = strings.Replace(pre, "\t", "    ", -1) // browsers treat tabs badly
+				pre = strings.TrimRightFunc(pre, unicode.IsSpace)
+				e = Text{Lines: []string{pre}, Pre: true}
+			case strings.HasPrefix(text, "- "):
+				var b []string
+				for ok && strings.HasPrefix(text, "- ") {
+					b = append(b, text[2:])
+					text, ok = lines.next()
+				}
+				lines.back()
+				e = List{Bullet: b}
+			case isSpeakerNote(text):
+				section.Notes = append(section.Notes, text[2:])
+			case strings.HasPrefix(text, prefix+"* "):
+				lines.back()
+				subsecs, err := parseSections(ctx, name, lines, section.Number)
+				if err != nil {
+					return nil, err
+				}
+				for _, ss := range subsecs {
+					section.Elem = append(section.Elem, ss)
+				}
+			case strings.HasPrefix(text, "."):
+				args := strings.Fields(text)
+				parser := parsers[args[0]]
+				if parser == nil {
+					return nil, fmt.Errorf("%s:%d: unknown command %q\n", name, lines.line, text)
+				}
+				t, err := parser(ctx, name, lines.line, text)
+				if err != nil {
+					return nil, err
+				}
+				e = t
+			default:
+				var l []string
+				for ok && strings.TrimSpace(text) != "" {
+					if text[0] == '.' { // Command breaks text block.
+						lines.back()
+						break
+					}
+					if strings.HasPrefix(text, `\.`) { // Backslash escapes initial period.
+						text = text[1:]
+					}
+					l = append(l, text)
+					text, ok = lines.next()
+				}
+				if len(l) > 0 {
+					e = Text{Lines: l}
+				}
+			}
+			if e != nil {
+				section.Elem = append(section.Elem, e)
+			}
+			text, ok = lines.nextNonEmpty()
+		}
+		if isHeading.MatchString(text) {
+			lines.back()
+		}
+		sections = append(sections, section)
+	}
+	return sections, nil
+}
+
+func parseHeader(doc *Doc, lines *Lines) error {
+	var ok bool
+	// First non-empty line starts header.
+	doc.Title, ok = lines.nextNonEmpty()
+	if !ok {
+		return errors.New("unexpected EOF; expected title")
+	}
+	for {
+		text, ok := lines.next()
+		if !ok {
+			return errors.New("unexpected EOF")
+		}
+		if text == "" {
+			break
+		}
+		if isSpeakerNote(text) {
+			continue
+		}
+		const tagPrefix = "Tags:"
+		if strings.HasPrefix(text, tagPrefix) {
+			tags := strings.Split(text[len(tagPrefix):], ",")
+			for i := range tags {
+				tags[i] = strings.TrimSpace(tags[i])
+			}
+			doc.Tags = append(doc.Tags, tags...)
+		} else if t, ok := parseTime(text); ok {
+			doc.Time = t
+		} else if doc.Subtitle == "" {
+			doc.Subtitle = text
+		} else {
+			return fmt.Errorf("unexpected header line: %q", text)
+		}
+	}
+	return nil
+}
+
+func parseAuthors(lines *Lines) (authors []Author, err error) {
+	// This grammar demarcates authors with blanks.
+
+	// Skip blank lines.
+	if _, ok := lines.nextNonEmpty(); !ok {
+		return nil, errors.New("unexpected EOF")
+	}
+	lines.back()
+
+	var a *Author
+	for {
+		text, ok := lines.next()
+		if !ok {
+			return nil, errors.New("unexpected EOF")
+		}
+
+		// If we find a section heading, we're done.
+		if strings.HasPrefix(text, "* ") {
+			lines.back()
+			break
+		}
+
+		if isSpeakerNote(text) {
+			continue
+		}
+
+		// If we encounter a blank we're done with this author.
+		if a != nil && len(text) == 0 {
+			authors = append(authors, *a)
+			a = nil
+			continue
+		}
+		if a == nil {
+			a = new(Author)
+		}
+
+		// Parse the line. Those that
+		// - begin with @ are twitter names,
+		// - contain slashes are links, or
+		// - contain an @ symbol are an email address.
+		// The rest is just text.
+		var el Elem
+		switch {
+		case strings.HasPrefix(text, "@"):
+			el = parseURL("http://twitter.com/" + text[1:])
+		case strings.Contains(text, ":"):
+			el = parseURL(text)
+		case strings.Contains(text, "@"):
+			el = parseURL("mailto:" + text)
+		}
+		if l, ok := el.(Link); ok {
+			l.Label = text
+			el = l
+		}
+		if el == nil {
+			el = Text{Lines: []string{text}}
+		}
+		a.Elem = append(a.Elem, el)
+	}
+	if a != nil {
+		authors = append(authors, *a)
+	}
+	return authors, nil
+}
+
+func parseURL(text string) Elem {
+	u, err := url.Parse(text)
+	if err != nil {
+		log.Printf("Parse(%q): %v", text, err)
+		return nil
+	}
+	return Link{URL: u}
+}
+
+func parseTime(text string) (t time.Time, ok bool) {
+	t, err := time.Parse("15:04 2 Jan 2006", text)
+	if err == nil {
+		return t, true
+	}
+	t, err = time.Parse("2 Jan 2006", text)
+	if err == nil {
+		// at 11am UTC it is the same date everywhere
+		t = t.Add(time.Hour * 11)
+		return t, true
+	}
+	return time.Time{}, false
+}
+
+func isSpeakerNote(s string) bool {
+	return strings.HasPrefix(s, ": ")
+}
diff --git a/vendor/golang.org/x/tools/present/style.go b/vendor/golang.org/x/tools/present/style.go
new file mode 100644
index 0000000..1cd240d
--- /dev/null
+++ b/vendor/golang.org/x/tools/present/style.go
@@ -0,0 +1,166 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package present
+
+import (
+	"bytes"
+	"html"
+	"html/template"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+/*
+	Fonts are demarcated by an initial and final char bracketing a
+	space-delimited word, plus possibly some terminal punctuation.
+	The chars are
+		_ for italic
+		* for bold
+		` (back quote) for fixed width.
+	Inner appearances of the char become spaces. For instance,
+		_this_is_italic_!
+	becomes
+		<i>this is italic</i>!
+*/
+
+func init() {
+	funcs["style"] = Style
+}
+
+// Style returns s with HTML entities escaped and font indicators turned into
+// HTML font tags.
+func Style(s string) template.HTML {
+	return template.HTML(font(html.EscapeString(s)))
+}
+
+// font returns s with font indicators turned into HTML font tags.
+func font(s string) string {
+	if strings.IndexAny(s, "[`_*") == -1 {
+		return s
+	}
+	words := split(s)
+	var b bytes.Buffer
+Word:
+	for w, word := range words {
+		if len(word) < 2 {
+			continue Word
+		}
+		if link, _ := parseInlineLink(word); link != "" {
+			words[w] = link
+			continue Word
+		}
+		const punctuation = `.,;:()!?—–'"`
+		const marker = "_*`"
+		// Initial punctuation is OK but must be peeled off.
+		first := strings.IndexAny(word, marker)
+		if first == -1 {
+			continue Word
+		}
+		// Is the marker prefixed only by punctuation?
+		for _, r := range word[:first] {
+			if !strings.ContainsRune(punctuation, r) {
+				continue Word
+			}
+		}
+		open, word := word[:first], word[first:]
+		char := word[0] // ASCII is OK.
+		close := ""
+		switch char {
+		default:
+			continue Word
+		case '_':
+			open += "<i>"
+			close = "</i>"
+		case '*':
+			open += "<b>"
+			close = "</b>"
+		case '`':
+			open += "<code>"
+			close = "</code>"
+		}
+		// Terminal punctuation is OK but must be peeled off.
+		last := strings.LastIndex(word, word[:1])
+		if last == 0 {
+			continue Word
+		}
+		head, tail := word[:last+1], word[last+1:]
+		for _, r := range tail {
+			if !strings.ContainsRune(punctuation, r) {
+				continue Word
+			}
+		}
+		b.Reset()
+		b.WriteString(open)
+		var wid int
+		for i := 1; i < len(head)-1; i += wid {
+			var r rune
+			r, wid = utf8.DecodeRuneInString(head[i:])
+			if r != rune(char) {
+				// Ordinary character.
+				b.WriteRune(r)
+				continue
+			}
+			if head[i+1] != char {
+				// Inner char becomes space.
+				b.WriteRune(' ')
+				continue
+			}
+			// Doubled char becomes real char.
+			// Not worth worrying about "_x__".
+			b.WriteByte(char)
+			wid++ // Consumed two chars, both ASCII.
+		}
+		b.WriteString(close) // Write closing tag.
+		b.WriteString(tail)  // Restore trailing punctuation.
+		words[w] = b.String()
+	}
+	return strings.Join(words, "")
+}
+
+// split is like strings.Fields but also returns the runs of spaces
+// and treats inline links as distinct words.
+func split(s string) []string {
+	var (
+		words = make([]string, 0, 10)
+		start = 0
+	)
+
+	// appendWord appends the string s[start:end] to the words slice.
+	// If the word contains the beginning of a link, the non-link portion
+	// of the word and the entire link are appended as separate words,
+	// and the start index is advanced to the end of the link.
+	appendWord := func(end int) {
+		if j := strings.Index(s[start:end], "[["); j > -1 {
+			if _, l := parseInlineLink(s[start+j:]); l > 0 {
+				// Append portion before link, if any.
+				if j > 0 {
+					words = append(words, s[start:start+j])
+				}
+				// Append link itself.
+				words = append(words, s[start+j:start+j+l])
+				// Advance start index to end of link.
+				start = start + j + l
+				return
+			}
+		}
+		// No link; just add the word.
+		words = append(words, s[start:end])
+		start = end
+	}
+
+	wasSpace := false
+	for i, r := range s {
+		isSpace := unicode.IsSpace(r)
+		if i > start && isSpace != wasSpace {
+			appendWord(i)
+		}
+		wasSpace = isSpace
+	}
+	for start < len(s) {
+		appendWord(len(s))
+	}
+	return words
+}
diff --git a/vendor/golang.org/x/tools/present/video.go b/vendor/golang.org/x/tools/present/video.go
new file mode 100644
index 0000000..913822e
--- /dev/null
+++ b/vendor/golang.org/x/tools/present/video.go
@@ -0,0 +1,51 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package present
+
+import (
+	"fmt"
+	"strings"
+)
+
+func init() {
+	Register("video", parseVideo)
+}
+
+type Video struct {
+	URL        string
+	SourceType string
+	Width      int
+	Height     int
+}
+
+func (v Video) TemplateName() string { return "video" }
+
+func parseVideo(ctx *Context, fileName string, lineno int, text string) (Elem, error) {
+	args := strings.Fields(text)
+	vid := Video{URL: args[1], SourceType: args[2]}
+	a, err := parseArgs(fileName, lineno, args[3:])
+	if err != nil {
+		return nil, err
+	}
+	switch len(a) {
+	case 0:
+		// no size parameters
+	case 2:
+		// If a parameter is empty (underscore) or invalid
+		// leave the field set to zero. The "video" action
+		// template will then omit that vid tag attribute and
+		// the browser will calculate the value to preserve
+		// the aspect ratio.
+		if v, ok := a[0].(int); ok {
+			vid.Height = v
+		}
+		if v, ok := a[1].(int); ok {
+			vid.Width = v
+		}
+	default:
+		return nil, fmt.Errorf("incorrect video invocation: %q", text)
+	}
+	return vid, nil
+}
diff --git a/vendor/google.golang.org/appengine/aetest/doc.go b/vendor/google.golang.org/appengine/aetest/doc.go
new file mode 100644
index 0000000..86ce8c2
--- /dev/null
+++ b/vendor/google.golang.org/appengine/aetest/doc.go
@@ -0,0 +1,42 @@
+/*
+Package aetest provides an API for running dev_appserver for use in tests.
+
+An example test file:
+
+	package foo_test
+
+	import (
+		"testing"
+
+		"google.golang.org/appengine/memcache"
+		"google.golang.org/appengine/aetest"
+	)
+
+	func TestFoo(t *testing.T) {
+		ctx, done, err := aetest.NewContext()
+		if err != nil {
+			t.Fatal(err)
+		}
+		defer done()
+
+		it := &memcache.Item{
+			Key:   "some-key",
+			Value: []byte("some-value"),
+		}
+		err = memcache.Set(ctx, it)
+		if err != nil {
+			t.Fatalf("Set err: %v", err)
+		}
+		it, err = memcache.Get(ctx, "some-key")
+		if err != nil {
+			t.Fatalf("Get err: %v; want no error", err)
+		}
+		if g, w := string(it.Value), "some-value" ; g != w {
+			t.Errorf("retrieved Item.Value = %q, want %q", g, w)
+		}
+	}
+
+The environment variable APPENGINE_DEV_APPSERVER specifies the location of the
+dev_appserver.py executable to use. If unset, the system PATH is consulted.
+*/
+package aetest
diff --git a/vendor/google.golang.org/appengine/aetest/instance.go b/vendor/google.golang.org/appengine/aetest/instance.go
new file mode 100644
index 0000000..a8f99d8
--- /dev/null
+++ b/vendor/google.golang.org/appengine/aetest/instance.go
@@ -0,0 +1,51 @@
+package aetest
+
+import (
+	"io"
+	"net/http"
+
+	"golang.org/x/net/context"
+	"google.golang.org/appengine"
+)
+
+// Instance represents a running instance of the development API Server.
+type Instance interface {
+	// Close kills the child api_server.py process, releasing its resources.
+	io.Closer
+	// NewRequest returns an *http.Request associated with this instance.
+	NewRequest(method, urlStr string, body io.Reader) (*http.Request, error)
+}
+
+// Options is used to specify options when creating an Instance.
+type Options struct {
+	// AppID specifies the App ID to use during tests.
+	// By default, "testapp".
+	AppID string
+	// StronglyConsistentDatastore is whether the local datastore should be
+	// strongly consistent. This will diverge from production behaviour.
+	StronglyConsistentDatastore bool
+}
+
+// NewContext starts an instance of the development API server, and returns
+// a context that will route all API calls to that server, as well as a
+// closure that must be called when the Context is no longer required.
+func NewContext() (context.Context, func(), error) {
+	inst, err := NewInstance(nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	req, err := inst.NewRequest("GET", "/", nil)
+	if err != nil {
+		inst.Close()
+		return nil, nil, err
+	}
+	ctx := appengine.NewContext(req)
+	return ctx, func() {
+		inst.Close()
+	}, nil
+}
+
+// PrepareDevAppserver is a hook which, if set, will be called before the
+// dev_appserver.py is started, each time it is started. If aetest.NewContext
+// is invoked from the goapp test tool, this hook is unnecessary.
+var PrepareDevAppserver func() error
diff --git a/vendor/google.golang.org/appengine/aetest/instance_classic.go b/vendor/google.golang.org/appengine/aetest/instance_classic.go
new file mode 100644
index 0000000..fbceaa5
--- /dev/null
+++ b/vendor/google.golang.org/appengine/aetest/instance_classic.go
@@ -0,0 +1,21 @@
+// +build appengine
+
+package aetest
+
+import "appengine/aetest"
+
+// NewInstance launches a running instance of api_server.py which can be used
+// for multiple test Contexts that delegate all App Engine API calls to that
+// instance.
+// If opts is nil the default values are used.
+func NewInstance(opts *Options) (Instance, error) {
+	aetest.PrepareDevAppserver = PrepareDevAppserver
+	var aeOpts *aetest.Options
+	if opts != nil {
+		aeOpts = &aetest.Options{
+			AppID: opts.AppID,
+			StronglyConsistentDatastore: opts.StronglyConsistentDatastore,
+		}
+	}
+	return aetest.NewInstance(aeOpts)
+}
diff --git a/vendor/google.golang.org/appengine/aetest/instance_vm.go b/vendor/google.golang.org/appengine/aetest/instance_vm.go
new file mode 100644
index 0000000..9d78999
--- /dev/null
+++ b/vendor/google.golang.org/appengine/aetest/instance_vm.go
@@ -0,0 +1,277 @@
+// +build !appengine
+
+package aetest
+
+import (
+	"bufio"
+	"crypto/rand"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"regexp"
+	"time"
+
+	"golang.org/x/net/context"
+	"google.golang.org/appengine/internal"
+)
+
+// NewInstance launches a running instance of api_server.py which can be used
+// for multiple test Contexts that delegate all App Engine API calls to that
+// instance.
+// If opts is nil the default values are used.
+func NewInstance(opts *Options) (Instance, error) {
+	i := &instance{
+		opts:  opts,
+		appID: "testapp",
+	}
+	if opts != nil && opts.AppID != "" {
+		i.appID = opts.AppID
+	}
+	if err := i.startChild(); err != nil {
+		return nil, err
+	}
+	return i, nil
+}
+
+func newSessionID() string {
+	var buf [16]byte
+	io.ReadFull(rand.Reader, buf[:])
+	return fmt.Sprintf("%x", buf[:])
+}
+
+// instance implements the Instance interface.
+type instance struct {
+	opts     *Options
+	child    *exec.Cmd
+	apiURL   *url.URL // base URL of API HTTP server
+	adminURL string   // base URL of admin HTTP server
+	appDir   string
+	appID    string
+	relFuncs []func() // funcs to release any associated contexts
+}
+
+// NewRequest returns an *http.Request associated with this instance.
+func (i *instance) NewRequest(method, urlStr string, body io.Reader) (*http.Request, error) {
+	req, err := http.NewRequest(method, urlStr, body)
+	if err != nil {
+		return nil, err
+	}
+
+	// Associate this request.
+	release := internal.RegisterTestRequest(req, i.apiURL, func(ctx context.Context) context.Context {
+		ctx = internal.WithAppIDOverride(ctx, "dev~"+i.appID)
+		return ctx
+	})
+	i.relFuncs = append(i.relFuncs, release)
+
+	return req, nil
+}
+
+// Close kills the child api_server.py process, releasing its resources.
+func (i *instance) Close() (err error) {
+	for _, rel := range i.relFuncs {
+		rel()
+	}
+	i.relFuncs = nil
+	child := i.child
+	if child == nil {
+		return nil
+	}
+	defer func() {
+		i.child = nil
+		err1 := os.RemoveAll(i.appDir)
+		if err == nil {
+			err = err1
+		}
+	}()
+
+	if p := child.Process; p != nil {
+		errc := make(chan error, 1)
+		go func() {
+			errc <- child.Wait()
+		}()
+
+		// Call the quit handler on the admin server.
+		res, err := http.Get(i.adminURL + "/quit")
+		if err != nil {
+			p.Kill()
+			return fmt.Errorf("unable to call /quit handler: %v", err)
+		}
+		res.Body.Close()
+
+		select {
+		case <-time.After(15 * time.Second):
+			p.Kill()
+			return errors.New("timeout killing child process")
+		case err = <-errc:
+			// Do nothing.
+		}
+	}
+	return
+}
+
+func fileExists(path string) bool {
+	_, err := os.Stat(path)
+	return err == nil
+}
+
+func findPython() (path string, err error) {
+	for _, name := range []string{"python2.7", "python"} {
+		path, err = exec.LookPath(name)
+		if err == nil {
+			return
+		}
+	}
+	return
+}
+
+func findDevAppserver() (string, error) {
+	if p := os.Getenv("APPENGINE_DEV_APPSERVER"); p != "" {
+		if fileExists(p) {
+			return p, nil
+		}
+		return "", fmt.Errorf("invalid APPENGINE_DEV_APPSERVER environment variable; path %q doesn't exist", p)
+	}
+	return exec.LookPath("dev_appserver.py")
+}
+
+var apiServerAddrRE = regexp.MustCompile(`Starting API server at: (\S+)`)
+var adminServerAddrRE = regexp.MustCompile(`Starting admin server at: (\S+)`)
+
+func (i *instance) startChild() (err error) {
+	if PrepareDevAppserver != nil {
+		if err := PrepareDevAppserver(); err != nil {
+			return err
+		}
+	}
+	python, err := findPython()
+	if err != nil {
+		return fmt.Errorf("Could not find python interpreter: %v", err)
+	}
+	devAppserver, err := findDevAppserver()
+	if err != nil {
+		return fmt.Errorf("Could not find dev_appserver.py: %v", err)
+	}
+
+	i.appDir, err = ioutil.TempDir("", "appengine-aetest")
+	if err != nil {
+		return err
+	}
+	defer func() {
+		if err != nil {
+			os.RemoveAll(i.appDir)
+		}
+	}()
+	err = os.Mkdir(filepath.Join(i.appDir, "app"), 0755)
+	if err != nil {
+		return err
+	}
+	err = ioutil.WriteFile(filepath.Join(i.appDir, "app", "app.yaml"), []byte(i.appYAML()), 0644)
+	if err != nil {
+		return err
+	}
+	err = ioutil.WriteFile(filepath.Join(i.appDir, "app", "stubapp.go"), []byte(appSource), 0644)
+	if err != nil {
+		return err
+	}
+
+	appserverArgs := []string{
+		devAppserver,
+		"--port=0",
+		"--api_port=0",
+		"--admin_port=0",
+		"--automatic_restart=false",
+		"--skip_sdk_update_check=true",
+		"--clear_datastore=true",
+		"--clear_search_indexes=true",
+		"--datastore_path", filepath.Join(i.appDir, "datastore"),
+	}
+	if i.opts != nil && i.opts.StronglyConsistentDatastore {
+		appserverArgs = append(appserverArgs, "--datastore_consistency_policy=consistent")
+	}
+	appserverArgs = append(appserverArgs, filepath.Join(i.appDir, "app"))
+
+	i.child = exec.Command(python,
+		appserverArgs...,
+	)
+	i.child.Stdout = os.Stdout
+	var stderr io.Reader
+	stderr, err = i.child.StderrPipe()
+	if err != nil {
+		return err
+	}
+	stderr = io.TeeReader(stderr, os.Stderr)
+	if err = i.child.Start(); err != nil {
+		return err
+	}
+
+	// Read stderr until we have read the URLs of the API server and admin interface.
+	errc := make(chan error, 1)
+	go func() {
+		s := bufio.NewScanner(stderr)
+		for s.Scan() {
+			if match := apiServerAddrRE.FindStringSubmatch(s.Text()); match != nil {
+				u, err := url.Parse(match[1])
+				if err != nil {
+					errc <- fmt.Errorf("failed to parse API URL %q: %v", match[1], err)
+					return
+				}
+				i.apiURL = u
+			}
+			if match := adminServerAddrRE.FindStringSubmatch(s.Text()); match != nil {
+				i.adminURL = match[1]
+			}
+			if i.adminURL != "" && i.apiURL != nil {
+				break
+			}
+		}
+		errc <- s.Err()
+	}()
+
+	select {
+	case <-time.After(15 * time.Second):
+		if p := i.child.Process; p != nil {
+			p.Kill()
+		}
+		return errors.New("timeout starting child process")
+	case err := <-errc:
+		if err != nil {
+			return fmt.Errorf("error reading child process stderr: %v", err)
+		}
+	}
+	if i.adminURL == "" {
+		return errors.New("unable to find admin server URL")
+	}
+	if i.apiURL == nil {
+		return errors.New("unable to find API server URL")
+	}
+	return nil
+}
+
+func (i *instance) appYAML() string {
+	return fmt.Sprintf(appYAMLTemplate, i.appID)
+}
+
+const appYAMLTemplate = `
+application: %s
+version: 1
+runtime: go
+api_version: go1
+vm: true
+
+handlers:
+- url: /.*
+  script: _go_app
+`
+
+const appSource = `
+package main
+import "google.golang.org/appengine"
+func main() { appengine.Main() }
+`
diff --git a/vendor/google.golang.org/appengine/aetest/user.go b/vendor/google.golang.org/appengine/aetest/user.go
new file mode 100644
index 0000000..bf9266f
--- /dev/null
+++ b/vendor/google.golang.org/appengine/aetest/user.go
@@ -0,0 +1,36 @@
+package aetest
+
+import (
+	"hash/crc32"
+	"net/http"
+	"strconv"
+
+	"google.golang.org/appengine/user"
+)
+
+// Login causes the provided Request to act as though issued by the given user.
+func Login(u *user.User, req *http.Request) {
+	req.Header.Set("X-AppEngine-User-Email", u.Email)
+	id := u.ID
+	if id == "" {
+		id = strconv.Itoa(int(crc32.Checksum([]byte(u.Email), crc32.IEEETable)))
+	}
+	req.Header.Set("X-AppEngine-User-Id", id)
+	req.Header.Set("X-AppEngine-User-Federated-Identity", u.Email)
+	req.Header.Set("X-AppEngine-User-Federated-Provider", u.FederatedProvider)
+	if u.Admin {
+		req.Header.Set("X-AppEngine-User-Is-Admin", "1")
+	} else {
+		req.Header.Set("X-AppEngine-User-Is-Admin", "0")
+	}
+}
+
+// Logout causes the provided Request to act as though issued by a logged-out
+// user.
+func Logout(req *http.Request) {
+	req.Header.Del("X-AppEngine-User-Email")
+	req.Header.Del("X-AppEngine-User-Id")
+	req.Header.Del("X-AppEngine-User-Is-Admin")
+	req.Header.Del("X-AppEngine-User-Federated-Identity")
+	req.Header.Del("X-AppEngine-User-Federated-Provider")
+}
diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go
index 8865c49..e1e4b3d 100644
--- a/vendor/google.golang.org/appengine/appengine.go
+++ b/vendor/google.golang.org/appengine/appengine.go
@@ -28,7 +28,8 @@
 // See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests
 // for details on how to do your own health checking.
 //
-// Main is not yet supported on App Engine Standard.
+// On App Engine Standard it ensures the server has started and is prepared to
+// receive requests.
 //
 // Main never returns.
 //
diff --git a/vendor/google.golang.org/appengine/datastore/datastore.go b/vendor/google.golang.org/appengine/datastore/datastore.go
new file mode 100644
index 0000000..576bc50
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/datastore.go
@@ -0,0 +1,407 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+
+	"github.com/golang/protobuf/proto"
+	"golang.org/x/net/context"
+
+	"google.golang.org/appengine"
+	"google.golang.org/appengine/internal"
+	pb "google.golang.org/appengine/internal/datastore"
+)
+
+var (
+	// ErrInvalidEntityType is returned when functions like Get or Next are
+	// passed a dst or src argument of invalid type.
+	ErrInvalidEntityType = errors.New("datastore: invalid entity type")
+	// ErrInvalidKey is returned when an invalid key is presented.
+	ErrInvalidKey = errors.New("datastore: invalid key")
+	// ErrNoSuchEntity is returned when no entity was found for a given key.
+	ErrNoSuchEntity = errors.New("datastore: no such entity")
+)
+
+// ErrFieldMismatch is returned when a field is to be loaded into a different
+// type than the one it was stored from, or when a field is missing or
+// unexported in the destination struct.
+// StructType is the type of the struct pointed to by the destination argument
+// passed to Get or to Iterator.Next.
+type ErrFieldMismatch struct {
+	StructType reflect.Type
+	FieldName  string
+	Reason     string
+}
+
+func (e *ErrFieldMismatch) Error() string {
+	return fmt.Sprintf("datastore: cannot load field %q into a %q: %s",
+		e.FieldName, e.StructType, e.Reason)
+}
+
+// protoToKey converts a Reference proto to a *Key. If the key is invalid,
+// protoToKey will return the invalid key along with ErrInvalidKey.
+func protoToKey(r *pb.Reference) (k *Key, err error) {
+	appID := r.GetApp()
+	namespace := r.GetNameSpace()
+	for _, e := range r.Path.Element {
+		k = &Key{
+			kind:      e.GetType(),
+			stringID:  e.GetName(),
+			intID:     e.GetId(),
+			parent:    k,
+			appID:     appID,
+			namespace: namespace,
+		}
+		if !k.valid() {
+			return k, ErrInvalidKey
+		}
+	}
+	return
+}
+
+// keyToProto converts a *Key to a Reference proto.
+func keyToProto(defaultAppID string, k *Key) *pb.Reference {
+	appID := k.appID
+	if appID == "" {
+		appID = defaultAppID
+	}
+	n := 0
+	for i := k; i != nil; i = i.parent {
+		n++
+	}
+	e := make([]*pb.Path_Element, n)
+	for i := k; i != nil; i = i.parent {
+		n--
+		e[n] = &pb.Path_Element{
+			Type: &i.kind,
+		}
+		// At most one of {Name,Id} should be set.
+		// Neither will be set for incomplete keys.
+		if i.stringID != "" {
+			e[n].Name = &i.stringID
+		} else if i.intID != 0 {
+			e[n].Id = &i.intID
+		}
+	}
+	var namespace *string
+	if k.namespace != "" {
+		namespace = proto.String(k.namespace)
+	}
+	return &pb.Reference{
+		App:       proto.String(appID),
+		NameSpace: namespace,
+		Path: &pb.Path{
+			Element: e,
+		},
+	}
+}
+
+// multiKeyToProto is a batch version of keyToProto.
+func multiKeyToProto(appID string, key []*Key) []*pb.Reference {
+	ret := make([]*pb.Reference, len(key))
+	for i, k := range key {
+		ret[i] = keyToProto(appID, k)
+	}
+	return ret
+}
+
+// multiValid is a batch version of Key.valid. It returns an error, not a
+// []bool.
+func multiValid(key []*Key) error {
+	invalid := false
+	for _, k := range key {
+		if !k.valid() {
+			invalid = true
+			break
+		}
+	}
+	if !invalid {
+		return nil
+	}
+	err := make(appengine.MultiError, len(key))
+	for i, k := range key {
+		if !k.valid() {
+			err[i] = ErrInvalidKey
+		}
+	}
+	return err
+}
+
+// It's unfortunate that the two semantically equivalent concepts pb.Reference
+// and pb.PropertyValue_ReferenceValue aren't the same type. For example, the
+// two have different protobuf field numbers.
+
+// referenceValueToKey is the same as protoToKey except the input is a
+// PropertyValue_ReferenceValue instead of a Reference.
+func referenceValueToKey(r *pb.PropertyValue_ReferenceValue) (k *Key, err error) {
+	appID := r.GetApp()
+	namespace := r.GetNameSpace()
+	for _, e := range r.Pathelement {
+		k = &Key{
+			kind:      e.GetType(),
+			stringID:  e.GetName(),
+			intID:     e.GetId(),
+			parent:    k,
+			appID:     appID,
+			namespace: namespace,
+		}
+		if !k.valid() {
+			return nil, ErrInvalidKey
+		}
+	}
+	return
+}
+
+// keyToReferenceValue is the same as keyToProto except the output is a
+// PropertyValue_ReferenceValue instead of a Reference.
+func keyToReferenceValue(defaultAppID string, k *Key) *pb.PropertyValue_ReferenceValue {
+	ref := keyToProto(defaultAppID, k)
+	pe := make([]*pb.PropertyValue_ReferenceValue_PathElement, len(ref.Path.Element))
+	for i, e := range ref.Path.Element {
+		pe[i] = &pb.PropertyValue_ReferenceValue_PathElement{
+			Type: e.Type,
+			Id:   e.Id,
+			Name: e.Name,
+		}
+	}
+	return &pb.PropertyValue_ReferenceValue{
+		App:         ref.App,
+		NameSpace:   ref.NameSpace,
+		Pathelement: pe,
+	}
+}
+
+type multiArgType int
+
+const (
+	multiArgTypeInvalid multiArgType = iota
+	multiArgTypePropertyLoadSaver
+	multiArgTypeStruct
+	multiArgTypeStructPtr
+	multiArgTypeInterface
+)
+
+// checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct
+// type S, for some interface type I, or some non-interface non-pointer type P
+// such that P or *P implements PropertyLoadSaver.
+//
+// It returns what category the slice's elements are, and the reflect.Type
+// that represents S, I or P.
+//
+// As a special case, PropertyList is an invalid type for v.
+func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) {
+	if v.Kind() != reflect.Slice {
+		return multiArgTypeInvalid, nil
+	}
+	if v.Type() == typeOfPropertyList {
+		return multiArgTypeInvalid, nil
+	}
+	elemType = v.Type().Elem()
+	if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) {
+		return multiArgTypePropertyLoadSaver, elemType
+	}
+	switch elemType.Kind() {
+	case reflect.Struct:
+		return multiArgTypeStruct, elemType
+	case reflect.Interface:
+		return multiArgTypeInterface, elemType
+	case reflect.Ptr:
+		elemType = elemType.Elem()
+		if elemType.Kind() == reflect.Struct {
+			return multiArgTypeStructPtr, elemType
+		}
+	}
+	return multiArgTypeInvalid, nil
+}
+
+// Get loads the entity stored for k into dst, which must be a struct pointer
+// or implement PropertyLoadSaver. If there is no such entity for the key, Get
+// returns ErrNoSuchEntity.
+//
+// The values of dst's unmatched struct fields are not modified, and matching
+// slice-typed fields are not reset before appending to them. In particular, it
+// is recommended to pass a pointer to a zero valued struct on each Get call.
+//
+// ErrFieldMismatch is returned when a field is to be loaded into a different
+// type than the one it was stored from, or when a field is missing or
+// unexported in the destination struct. ErrFieldMismatch is only returned if
+// dst is a struct pointer.
+func Get(c context.Context, key *Key, dst interface{}) error {
+	if dst == nil { // GetMulti catches nil interface; we need to catch nil ptr here
+		return ErrInvalidEntityType
+	}
+	err := GetMulti(c, []*Key{key}, []interface{}{dst})
+	if me, ok := err.(appengine.MultiError); ok {
+		return me[0]
+	}
+	return err
+}
+
+// GetMulti is a batch version of Get.
+//
+// dst must be a []S, []*S, []I or []P, for some struct type S, some interface
+// type I, or some non-interface non-pointer type P such that P or *P
+// implements PropertyLoadSaver. If an []I, each element must be a valid dst
+// for Get: it must be a struct pointer or implement PropertyLoadSaver.
+//
+// As a special case, PropertyList is an invalid type for dst, even though a
+// PropertyList is a slice of structs. It is treated as invalid to avoid being
+// mistakenly passed when []PropertyList was intended.
+func GetMulti(c context.Context, key []*Key, dst interface{}) error {
+	v := reflect.ValueOf(dst)
+	multiArgType, _ := checkMultiArg(v)
+	if multiArgType == multiArgTypeInvalid {
+		return errors.New("datastore: dst has invalid type")
+	}
+	if len(key) != v.Len() {
+		return errors.New("datastore: key and dst slices have different length")
+	}
+	if len(key) == 0 {
+		return nil
+	}
+	if err := multiValid(key); err != nil {
+		return err
+	}
+	req := &pb.GetRequest{
+		Key: multiKeyToProto(internal.FullyQualifiedAppID(c), key),
+	}
+	res := &pb.GetResponse{}
+	if err := internal.Call(c, "datastore_v3", "Get", req, res); err != nil {
+		return err
+	}
+	if len(key) != len(res.Entity) {
+		return errors.New("datastore: internal error: server returned the wrong number of entities")
+	}
+	multiErr, any := make(appengine.MultiError, len(key)), false
+	for i, e := range res.Entity {
+		if e.Entity == nil {
+			multiErr[i] = ErrNoSuchEntity
+		} else {
+			elem := v.Index(i)
+			if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
+				elem = elem.Addr()
+			}
+			if multiArgType == multiArgTypeStructPtr && elem.IsNil() {
+				elem.Set(reflect.New(elem.Type().Elem()))
+			}
+			multiErr[i] = loadEntity(elem.Interface(), e.Entity)
+		}
+		if multiErr[i] != nil {
+			any = true
+		}
+	}
+	if any {
+		return multiErr
+	}
+	return nil
+}
+
+// Put saves the entity src into the datastore with key k. src must be a struct
+// pointer or implement PropertyLoadSaver; if a struct pointer then any
+// unexported fields of that struct will be skipped. If k is an incomplete key,
+// the returned key will be a unique key generated by the datastore.
+func Put(c context.Context, key *Key, src interface{}) (*Key, error) {
+	k, err := PutMulti(c, []*Key{key}, []interface{}{src})
+	if err != nil {
+		if me, ok := err.(appengine.MultiError); ok {
+			return nil, me[0]
+		}
+		return nil, err
+	}
+	return k[0], nil
+}
+
+// PutMulti is a batch version of Put.
+//
+// src must satisfy the same conditions as the dst argument to GetMulti.
+func PutMulti(c context.Context, key []*Key, src interface{}) ([]*Key, error) {
+	v := reflect.ValueOf(src)
+	multiArgType, _ := checkMultiArg(v)
+	if multiArgType == multiArgTypeInvalid {
+		return nil, errors.New("datastore: src has invalid type")
+	}
+	if len(key) != v.Len() {
+		return nil, errors.New("datastore: key and src slices have different length")
+	}
+	if len(key) == 0 {
+		return nil, nil
+	}
+	appID := internal.FullyQualifiedAppID(c)
+	if err := multiValid(key); err != nil {
+		return nil, err
+	}
+	req := &pb.PutRequest{}
+	for i := range key {
+		elem := v.Index(i)
+		if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
+			elem = elem.Addr()
+		}
+		sProto, err := saveEntity(appID, key[i], elem.Interface())
+		if err != nil {
+			return nil, err
+		}
+		req.Entity = append(req.Entity, sProto)
+	}
+	res := &pb.PutResponse{}
+	if err := internal.Call(c, "datastore_v3", "Put", req, res); err != nil {
+		return nil, err
+	}
+	if len(key) != len(res.Key) {
+		return nil, errors.New("datastore: internal error: server returned the wrong number of keys")
+	}
+	ret := make([]*Key, len(key))
+	for i := range ret {
+		var err error
+		ret[i], err = protoToKey(res.Key[i])
+		if err != nil || ret[i].Incomplete() {
+			return nil, errors.New("datastore: internal error: server returned an invalid key")
+		}
+	}
+	return ret, nil
+}
+
+// Delete deletes the entity for the given key.
+func Delete(c context.Context, key *Key) error {
+	err := DeleteMulti(c, []*Key{key})
+	if me, ok := err.(appengine.MultiError); ok {
+		return me[0]
+	}
+	return err
+}
+
+// DeleteMulti is a batch version of Delete.
+func DeleteMulti(c context.Context, key []*Key) error {
+	if len(key) == 0 {
+		return nil
+	}
+	if err := multiValid(key); err != nil {
+		return err
+	}
+	req := &pb.DeleteRequest{
+		Key: multiKeyToProto(internal.FullyQualifiedAppID(c), key),
+	}
+	res := &pb.DeleteResponse{}
+	return internal.Call(c, "datastore_v3", "Delete", req, res)
+}
+
+func namespaceMod(m proto.Message, namespace string) {
+	// pb.Query is the only type that has a name_space field.
+	// All other namespace support in datastore is in the keys.
+	switch m := m.(type) {
+	case *pb.Query:
+		if m.NameSpace == nil {
+			m.NameSpace = &namespace
+		}
+	}
+}
+
+func init() {
+	internal.NamespaceMods["datastore_v3"] = namespaceMod
+	internal.RegisterErrorCodeMap("datastore_v3", pb.Error_ErrorCode_name)
+	internal.RegisterTimeoutErrorCode("datastore_v3", int32(pb.Error_TIMEOUT))
+}
diff --git a/vendor/google.golang.org/appengine/datastore/doc.go b/vendor/google.golang.org/appengine/datastore/doc.go
new file mode 100644
index 0000000..0d1cb5c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/doc.go
@@ -0,0 +1,351 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package datastore provides a client for App Engine's datastore service.
+
+
+Basic Operations
+
+Entities are the unit of storage and are associated with a key. A key
+consists of an optional parent key, a string application ID, a string kind
+(also known as an entity type), and either a StringID or an IntID. A
+StringID is also known as an entity name or key name.
+
+It is valid to create a key with a zero StringID and a zero IntID; this is
+called an incomplete key, and does not refer to any saved entity. Putting an
+entity into the datastore under an incomplete key will cause a unique key
+to be generated for that entity, with a non-zero IntID.
+
+An entity's contents are a mapping from case-sensitive field names to values.
+Valid value types are:
+  - signed integers (int, int8, int16, int32 and int64),
+  - bool,
+  - string,
+  - float32 and float64,
+  - []byte (up to 1 megabyte in length),
+  - any type whose underlying type is one of the above predeclared types,
+  - ByteString,
+  - *Key,
+  - time.Time (stored with microsecond precision),
+  - appengine.BlobKey,
+  - appengine.GeoPoint,
+  - structs whose fields are all valid value types,
+  - slices of any of the above.
+
+Slices of structs are valid, as are structs that contain slices. However, if
+one struct contains another, then at most one of those can be repeated. This
+disqualifies recursively defined struct types: any struct T that (directly or
+indirectly) contains a []T.
+
+The Get and Put functions load and save an entity's contents. An entity's
+contents are typically represented by a struct pointer.
+
+Example code:
+
+	type Entity struct {
+		Value string
+	}
+
+	func handle(w http.ResponseWriter, r *http.Request) {
+		ctx := appengine.NewContext(r)
+
+		k := datastore.NewKey(ctx, "Entity", "stringID", 0, nil)
+		e := new(Entity)
+		if err := datastore.Get(ctx, k, e); err != nil {
+			http.Error(w, err.Error(), 500)
+			return
+		}
+
+		old := e.Value
+		e.Value = r.URL.Path
+
+		if _, err := datastore.Put(ctx, k, e); err != nil {
+			http.Error(w, err.Error(), 500)
+			return
+		}
+
+		w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+		fmt.Fprintf(w, "old=%q\nnew=%q\n", old, e.Value)
+	}
+
+GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and
+Delete functions. They take a []*Key instead of a *Key, and may return an
+appengine.MultiError when encountering partial failure.
+
+
+Properties
+
+An entity's contents can be represented by a variety of types. These are
+typically struct pointers, but can also be any type that implements the
+PropertyLoadSaver interface. If using a struct pointer, you do not have to
+explicitly implement the PropertyLoadSaver interface; the datastore will
+automatically convert via reflection. If a struct pointer does implement that
+interface then those methods will be used in preference to the default
+behavior for struct pointers. Struct pointers are more strongly typed and are
+easier to use; PropertyLoadSavers are more flexible.
+
+The actual types passed do not have to match between Get and Put calls or even
+across different App Engine requests. It is valid to put a *PropertyList and
+get that same entity as a *myStruct, or put a *myStruct0 and get a *myStruct1.
+Conceptually, any entity is saved as a sequence of properties, and is loaded
+into the destination value on a property-by-property basis. When loading into
+a struct pointer, an entity that cannot be completely represented (such as a
+missing field) will result in an ErrFieldMismatch error but it is up to the
+caller whether this error is fatal, recoverable or ignorable.
+
+By default, for struct pointers, all properties are potentially indexed, and
+the property name is the same as the field name (and hence must start with an
+upper case letter). Fields may have a `datastore:"name,options"` tag. The tag
+name is the property name, which must be one or more valid Go identifiers
+joined by ".", but may start with a lower case letter. An empty tag name means
+to just use the field name. A "-" tag name means that the datastore will
+ignore that field. If options is "noindex" then the field will not be indexed.
+If the options is "" then the comma may be omitted. There are no other
+recognized options.
+
+Fields (except for []byte) are indexed by default. Strings longer than 1500
+bytes cannot be indexed; fields used to store long strings should be
+tagged with "noindex". Similarly, ByteStrings longer than 1500 bytes cannot be
+indexed.
+
+Example code:
+
+	// A and B are renamed to a and b.
+	// A, C and J are not indexed.
+	// D's tag is equivalent to having no tag at all (E).
+	// I is ignored entirely by the datastore.
+	// J has tag information for both the datastore and json packages.
+	type TaggedStruct struct {
+		A int `datastore:"a,noindex"`
+		B int `datastore:"b"`
+		C int `datastore:",noindex"`
+		D int `datastore:""`
+		E int
+		I int `datastore:"-"`
+		J int `datastore:",noindex" json:"j"`
+	}
+
+
+Structured Properties
+
+If the struct pointed to contains other structs, then the nested or embedded
+structs are flattened. For example, given these definitions:
+
+	type Inner1 struct {
+		W int32
+		X string
+	}
+
+	type Inner2 struct {
+		Y float64
+	}
+
+	type Inner3 struct {
+		Z bool
+	}
+
+	type Outer struct {
+		A int16
+		I []Inner1
+		J Inner2
+		Inner3
+	}
+
+then an Outer's properties would be equivalent to those of:
+
+	type OuterEquivalent struct {
+		A     int16
+		IDotW []int32  `datastore:"I.W"`
+		IDotX []string `datastore:"I.X"`
+		JDotY float64  `datastore:"J.Y"`
+		Z     bool
+	}
+
+If Outer's embedded Inner3 field was tagged as `datastore:"Foo"` then the
+equivalent field would instead be: FooDotZ bool `datastore:"Foo.Z"`.
+
+If an outer struct is tagged "noindex" then all of its implicit flattened
+fields are effectively "noindex".
+
+
+The PropertyLoadSaver Interface
+
+An entity's contents can also be represented by any type that implements the
+PropertyLoadSaver interface. This type may be a struct pointer, but it does
+not have to be. The datastore package will call Load when getting the entity's
+contents, and Save when putting the entity's contents.
+Possible uses include deriving non-stored fields, verifying fields, or indexing
+a field only if its value is positive.
+
+Example code:
+
+	type CustomPropsExample struct {
+		I, J int
+		// Sum is not stored, but should always be equal to I + J.
+		Sum int `datastore:"-"`
+	}
+
+	func (x *CustomPropsExample) Load(ps []datastore.Property) error {
+		// Load I and J as usual.
+		if err := datastore.LoadStruct(x, ps); err != nil {
+			return err
+		}
+		// Derive the Sum field.
+		x.Sum = x.I + x.J
+		return nil
+	}
+
+	func (x *CustomPropsExample) Save() ([]datastore.Property, error) {
+		// Validate the Sum field.
+		if x.Sum != x.I + x.J {
+			return errors.New("CustomPropsExample has inconsistent sum")
+		}
+		// Save I and J as usual. The code below is equivalent to calling
+		// "return datastore.SaveStruct(x)", but is done manually for
+		// demonstration purposes.
+		return []datastore.Property{
+			{
+				Name:  "I",
+				Value: int64(x.I),
+			},
+			{
+				Name:  "J",
+				Value: int64(x.J),
+			},
+		}
+	}
+
+The *PropertyList type implements PropertyLoadSaver, and can therefore hold an
+arbitrary entity's contents.
+
+
+Queries
+
+Queries retrieve entities based on their properties or key's ancestry. Running
+a query yields an iterator of results: either keys or (key, entity) pairs.
+Queries are re-usable and it is safe to call Query.Run from concurrent
+goroutines. Iterators are not safe for concurrent use.
+
+Queries are immutable, and are either created by calling NewQuery, or derived
+from an existing query by calling a method like Filter or Order that returns a
+new query value. A query is typically constructed by calling NewQuery followed
+by a chain of zero or more such methods. These methods are:
+  - Ancestor and Filter constrain the entities returned by running a query.
+  - Order affects the order in which they are returned.
+  - Project constrains the fields returned.
+  - Distinct de-duplicates projected entities.
+  - KeysOnly makes the iterator return only keys, not (key, entity) pairs.
+  - Start, End, Offset and Limit define which sub-sequence of matching entities
+    to return. Start and End take cursors, Offset and Limit take integers. Start
+    and Offset affect the first result, End and Limit affect the last result.
+    If both Start and Offset are set, then the offset is relative to Start.
+    If both End and Limit are set, then the earliest constraint wins. Limit is
+    relative to Start+Offset, not relative to End. As a special case, a
+    negative limit means unlimited.
+
+Example code:
+
+	type Widget struct {
+		Description string
+		Price       int
+	}
+
+	func handle(w http.ResponseWriter, r *http.Request) {
+		ctx := appengine.NewContext(r)
+		q := datastore.NewQuery("Widget").
+			Filter("Price <", 1000).
+			Order("-Price")
+		b := new(bytes.Buffer)
+		for t := q.Run(ctx); ; {
+			var x Widget
+			key, err := t.Next(&x)
+			if err == datastore.Done {
+				break
+			}
+			if err != nil {
+				serveError(ctx, w, err)
+				return
+			}
+			fmt.Fprintf(b, "Key=%v\nWidget=%#v\n\n", key, x)
+		}
+		w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+		io.Copy(w, b)
+	}
+
+
+Transactions
+
+RunInTransaction runs a function in a transaction.
+
+Example code:
+
+	type Counter struct {
+		Count int
+	}
+
+	func inc(ctx context.Context, key *datastore.Key) (int, error) {
+		var x Counter
+		if err := datastore.Get(ctx, key, &x); err != nil && err != datastore.ErrNoSuchEntity {
+			return 0, err
+		}
+		x.Count++
+		if _, err := datastore.Put(ctx, key, &x); err != nil {
+			return 0, err
+		}
+		return x.Count, nil
+	}
+
+	func handle(w http.ResponseWriter, r *http.Request) {
+		ctx := appengine.NewContext(r)
+		var count int
+		err := datastore.RunInTransaction(ctx, func(ctx context.Context) error {
+			var err1 error
+			count, err1 = inc(ctx, datastore.NewKey(ctx, "Counter", "singleton", 0, nil))
+			return err1
+		}, nil)
+		if err != nil {
+			serveError(ctx, w, err)
+			return
+		}
+		w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+		fmt.Fprintf(w, "Count=%d", count)
+	}
+
+
+Metadata
+
+The datastore package provides access to some of App Engine's datastore
+metadata. This metadata includes information about the entity groups,
+namespaces, entity kinds, and properties in the datastore, as well as the
+property representations for each property.
+
+Example code:
+
+	func handle(w http.ResponseWriter, r *http.Request) {
+		// Print all the kinds in the datastore, with all the indexed
+		// properties (and their representations) for each.
+		ctx := appengine.NewContext(r)
+
+		kinds, err := datastore.Kinds(ctx)
+		if err != nil {
+			serveError(ctx, w, err)
+			return
+		}
+
+		w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+		for _, kind := range kinds {
+			fmt.Fprintf(w, "%s:\n", kind)
+			props, err := datastore.KindProperties(ctx, kind)
+			if err != nil {
+				fmt.Fprintln(w, "\t(unable to retrieve properties)")
+				continue
+			}
+			for p, rep := range props {
+				fmt.Fprintf(w, "\t-%s (%s)\n", p, strings.Join(", ", rep))
+			}
+		}
+	}
+*/
+package datastore
diff --git a/vendor/google.golang.org/appengine/datastore/key.go b/vendor/google.golang.org/appengine/datastore/key.go
new file mode 100644
index 0000000..ac1f002
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/key.go
@@ -0,0 +1,309 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+	"bytes"
+	"encoding/base64"
+	"encoding/gob"
+	"errors"
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/golang/protobuf/proto"
+	"golang.org/x/net/context"
+
+	"google.golang.org/appengine/internal"
+	pb "google.golang.org/appengine/internal/datastore"
+)
+
+// Key represents the datastore key for a stored entity, and is immutable.
+type Key struct {
+	kind      string
+	stringID  string
+	intID     int64
+	parent    *Key
+	appID     string
+	namespace string
+}
+
+// Kind returns the key's kind (also known as entity type).
+func (k *Key) Kind() string {
+	return k.kind
+}
+
+// StringID returns the key's string ID (also known as an entity name or key
+// name), which may be "".
+func (k *Key) StringID() string {
+	return k.stringID
+}
+
+// IntID returns the key's integer ID, which may be 0.
+func (k *Key) IntID() int64 {
+	return k.intID
+}
+
+// Parent returns the key's parent key, which may be nil.
+func (k *Key) Parent() *Key {
+	return k.parent
+}
+
+// AppID returns the key's application ID.
+func (k *Key) AppID() string {
+	return k.appID
+}
+
+// Namespace returns the key's namespace.
+func (k *Key) Namespace() string {
+	return k.namespace
+}
+
+// Incomplete returns whether the key does not refer to a stored entity.
+// In particular, whether the key has a zero StringID and a zero IntID.
+func (k *Key) Incomplete() bool {
+	return k.stringID == "" && k.intID == 0
+}
+
+// valid returns whether the key is valid.
+func (k *Key) valid() bool {
+	if k == nil {
+		return false
+	}
+	for ; k != nil; k = k.parent {
+		if k.kind == "" || k.appID == "" {
+			return false
+		}
+		if k.stringID != "" && k.intID != 0 {
+			return false
+		}
+		if k.parent != nil {
+			if k.parent.Incomplete() {
+				return false
+			}
+			if k.parent.appID != k.appID || k.parent.namespace != k.namespace {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// Equal returns whether two keys are equal.
+func (k *Key) Equal(o *Key) bool {
+	for k != nil && o != nil {
+		if k.kind != o.kind || k.stringID != o.stringID || k.intID != o.intID || k.appID != o.appID || k.namespace != o.namespace {
+			return false
+		}
+		k, o = k.parent, o.parent
+	}
+	return k == o
+}
+
+// root returns the furthest ancestor of a key, which may be itself.
+func (k *Key) root() *Key {
+	for k.parent != nil {
+		k = k.parent
+	}
+	return k
+}
+
+// marshal marshals the key's string representation to the buffer.
+func (k *Key) marshal(b *bytes.Buffer) {
+	if k.parent != nil {
+		k.parent.marshal(b)
+	}
+	b.WriteByte('/')
+	b.WriteString(k.kind)
+	b.WriteByte(',')
+	if k.stringID != "" {
+		b.WriteString(k.stringID)
+	} else {
+		b.WriteString(strconv.FormatInt(k.intID, 10))
+	}
+}
+
+// String returns a string representation of the key.
+func (k *Key) String() string {
+	if k == nil {
+		return ""
+	}
+	b := bytes.NewBuffer(make([]byte, 0, 512))
+	k.marshal(b)
+	return b.String()
+}
+
+type gobKey struct {
+	Kind      string
+	StringID  string
+	IntID     int64
+	Parent    *gobKey
+	AppID     string
+	Namespace string
+}
+
+func keyToGobKey(k *Key) *gobKey {
+	if k == nil {
+		return nil
+	}
+	return &gobKey{
+		Kind:      k.kind,
+		StringID:  k.stringID,
+		IntID:     k.intID,
+		Parent:    keyToGobKey(k.parent),
+		AppID:     k.appID,
+		Namespace: k.namespace,
+	}
+}
+
+func gobKeyToKey(gk *gobKey) *Key {
+	if gk == nil {
+		return nil
+	}
+	return &Key{
+		kind:      gk.Kind,
+		stringID:  gk.StringID,
+		intID:     gk.IntID,
+		parent:    gobKeyToKey(gk.Parent),
+		appID:     gk.AppID,
+		namespace: gk.Namespace,
+	}
+}
+
+func (k *Key) GobEncode() ([]byte, error) {
+	buf := new(bytes.Buffer)
+	if err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), nil
+}
+
+func (k *Key) GobDecode(buf []byte) error {
+	gk := new(gobKey)
+	if err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil {
+		return err
+	}
+	*k = *gobKeyToKey(gk)
+	return nil
+}
+
+func (k *Key) MarshalJSON() ([]byte, error) {
+	return []byte(`"` + k.Encode() + `"`), nil
+}
+
+func (k *Key) UnmarshalJSON(buf []byte) error {
+	if len(buf) < 2 || buf[0] != '"' || buf[len(buf)-1] != '"' {
+		return errors.New("datastore: bad JSON key")
+	}
+	k2, err := DecodeKey(string(buf[1 : len(buf)-1]))
+	if err != nil {
+		return err
+	}
+	*k = *k2
+	return nil
+}
+
+// Encode returns an opaque representation of the key
+// suitable for use in HTML and URLs.
+// This is compatible with the Python and Java runtimes.
+func (k *Key) Encode() string {
+	ref := keyToProto("", k)
+
+	b, err := proto.Marshal(ref)
+	if err != nil {
+		panic(err)
+	}
+
+	// Trailing padding is stripped.
+	return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
+}
+
+// DecodeKey decodes a key from the opaque representation returned by Encode.
+func DecodeKey(encoded string) (*Key, error) {
+	// Re-add padding.
+	if m := len(encoded) % 4; m != 0 {
+		encoded += strings.Repeat("=", 4-m)
+	}
+
+	b, err := base64.URLEncoding.DecodeString(encoded)
+	if err != nil {
+		return nil, err
+	}
+
+	ref := new(pb.Reference)
+	if err := proto.Unmarshal(b, ref); err != nil {
+		return nil, err
+	}
+
+	return protoToKey(ref)
+}
+
+// NewIncompleteKey creates a new incomplete key.
+// kind cannot be empty.
+func NewIncompleteKey(c context.Context, kind string, parent *Key) *Key {
+	return NewKey(c, kind, "", 0, parent)
+}
+
+// NewKey creates a new key.
+// kind cannot be empty.
+// Either one or both of stringID and intID must be zero. If both are zero,
+// the key returned is incomplete.
+// parent must either be a complete key or nil.
+func NewKey(c context.Context, kind, stringID string, intID int64, parent *Key) *Key {
+	// If there's a parent key, use its namespace.
+	// Otherwise, use any namespace attached to the context.
+	var namespace string
+	if parent != nil {
+		namespace = parent.namespace
+	} else {
+		namespace = internal.NamespaceFromContext(c)
+	}
+
+	return &Key{
+		kind:      kind,
+		stringID:  stringID,
+		intID:     intID,
+		parent:    parent,
+		appID:     internal.FullyQualifiedAppID(c),
+		namespace: namespace,
+	}
+}
+
+// AllocateIDs returns a range of n integer IDs with the given kind and parent
+// combination. kind cannot be empty; parent may be nil. The IDs in the range
+// returned will not be used by the datastore's automatic ID sequence generator
+// and may be used with NewKey without conflict.
+//
+// The range is inclusive at the low end and exclusive at the high end. In
+// other words, valid intIDs x satisfy low <= x && x < high.
+//
+// If no error is returned, low + n == high.
+func AllocateIDs(c context.Context, kind string, parent *Key, n int) (low, high int64, err error) {
+	if kind == "" {
+		return 0, 0, errors.New("datastore: AllocateIDs given an empty kind")
+	}
+	if n < 0 {
+		return 0, 0, fmt.Errorf("datastore: AllocateIDs given a negative count: %d", n)
+	}
+	if n == 0 {
+		return 0, 0, nil
+	}
+	req := &pb.AllocateIdsRequest{
+		ModelKey: keyToProto("", NewIncompleteKey(c, kind, parent)),
+		Size:     proto.Int64(int64(n)),
+	}
+	res := &pb.AllocateIdsResponse{}
+	if err := internal.Call(c, "datastore_v3", "AllocateIds", req, res); err != nil {
+		return 0, 0, err
+	}
+	// The protobuf is inclusive at both ends. Idiomatic Go (e.g. slices, for loops)
+	// is inclusive at the low end and exclusive at the high end, so we add 1.
+	low = res.GetStart()
+	high = res.GetEnd() + 1
+	if low+int64(n) != high {
+		return 0, 0, fmt.Errorf("datastore: internal error: could not allocate %d IDs", n)
+	}
+	return low, high, nil
+}
diff --git a/vendor/google.golang.org/appengine/datastore/load.go b/vendor/google.golang.org/appengine/datastore/load.go
new file mode 100644
index 0000000..7878cbf
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/load.go
@@ -0,0 +1,430 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	"google.golang.org/appengine"
+	pb "google.golang.org/appengine/internal/datastore"
+)
+
+var (
+	typeOfBlobKey    = reflect.TypeOf(appengine.BlobKey(""))
+	typeOfByteSlice  = reflect.TypeOf([]byte(nil))
+	typeOfByteString = reflect.TypeOf(ByteString(nil))
+	typeOfGeoPoint   = reflect.TypeOf(appengine.GeoPoint{})
+	typeOfTime       = reflect.TypeOf(time.Time{})
+	typeOfKeyPtr     = reflect.TypeOf(&Key{})
+	typeOfEntityPtr  = reflect.TypeOf(&Entity{})
+)
+
+// typeMismatchReason returns a string explaining why the property p could not
+// be stored in an entity field of type v.Type().
+func typeMismatchReason(pValue interface{}, v reflect.Value) string {
+	entityType := "empty"
+	switch pValue.(type) {
+	case int64:
+		entityType = "int"
+	case bool:
+		entityType = "bool"
+	case string:
+		entityType = "string"
+	case float64:
+		entityType = "float"
+	case *Key:
+		entityType = "*datastore.Key"
+	case time.Time:
+		entityType = "time.Time"
+	case appengine.BlobKey:
+		entityType = "appengine.BlobKey"
+	case appengine.GeoPoint:
+		entityType = "appengine.GeoPoint"
+	case ByteString:
+		entityType = "datastore.ByteString"
+	case []byte:
+		entityType = "[]byte"
+	}
+	return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type())
+}
+
+type propertyLoader struct {
+	// m holds the number of times a substruct field like "Foo.Bar.Baz" has
+	// been seen so far. The map is constructed lazily.
+	m map[string]int
+}
+
+func (l *propertyLoader) load(codec *structCodec, structValue reflect.Value, p Property, requireSlice bool) string {
+	var v reflect.Value
+	var sliceIndex int
+
+	name := p.Name
+	for name != "" {
+		// First we try to find a field with name matching
+		// the value of 'name' exactly.
+		decoder, ok := codec.fields[name]
+		if ok {
+			name = ""
+		} else {
+			// Now try for legacy flattened nested field (named eg. "A.B.C.D").
+
+			parent := name
+			child := ""
+
+			// Cut off the last field (delimited by ".") and find its parent
+			// in the codec.
+			// eg. for name "A.B.C.D", split off "A.B.C" and try to
+			// find a field in the codec with this name.
+			// Loop again with "A.B", etc.
+			for !ok {
+				i := strings.LastIndex(parent, ".")
+				if i < 0 {
+					return "no such struct field"
+				}
+				if i == len(name)-1 {
+					return "field name cannot end with '.'"
+				}
+				parent, child = name[:i], name[i+1:]
+				decoder, ok = codec.fields[parent]
+			}
+
+			name = child
+		}
+
+		v = initField(structValue, decoder.path)
+		if !v.IsValid() {
+			return "no such struct field"
+		}
+		if !v.CanSet() {
+			return "cannot set struct field"
+		}
+
+		if decoder.structCodec != nil {
+			codec = decoder.structCodec
+			structValue = v
+		}
+
+		if v.Kind() == reflect.Slice && v.Type() != typeOfByteSlice {
+			if l.m == nil {
+				l.m = make(map[string]int)
+			}
+			sliceIndex = l.m[p.Name]
+			l.m[p.Name] = sliceIndex + 1
+			for v.Len() <= sliceIndex {
+				v.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem()))
+			}
+			structValue = v.Index(sliceIndex)
+			requireSlice = false
+		}
+	}
+
+	var slice reflect.Value
+	if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
+		slice = v
+		v = reflect.New(v.Type().Elem()).Elem()
+	} else if requireSlice {
+		return "multiple-valued property requires a slice field type"
+	}
+
+	// Convert indexValues to a Go value with a meaning derived from the
+	// destination type.
+	pValue := p.Value
+	if iv, ok := pValue.(indexValue); ok {
+		meaning := pb.Property_NO_MEANING
+		switch v.Type() {
+		case typeOfBlobKey:
+			meaning = pb.Property_BLOBKEY
+		case typeOfByteSlice:
+			meaning = pb.Property_BLOB
+		case typeOfByteString:
+			meaning = pb.Property_BYTESTRING
+		case typeOfGeoPoint:
+			meaning = pb.Property_GEORSS_POINT
+		case typeOfTime:
+			meaning = pb.Property_GD_WHEN
+		case typeOfEntityPtr:
+			meaning = pb.Property_ENTITY_PROTO
+		}
+		var err error
+		pValue, err = propValue(iv.value, meaning)
+		if err != nil {
+			return err.Error()
+		}
+	}
+
+	if errReason := setVal(v, pValue); errReason != "" {
+		// Set the slice back to its zero value.
+		if slice.IsValid() {
+			slice.Set(reflect.Zero(slice.Type()))
+		}
+		return errReason
+	}
+
+	if slice.IsValid() {
+		slice.Index(sliceIndex).Set(v)
+	}
+
+	return ""
+}
+
+// setVal sets v to the value pValue.
+func setVal(v reflect.Value, pValue interface{}) string {
+	switch v.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		x, ok := pValue.(int64)
+		if !ok && pValue != nil {
+			return typeMismatchReason(pValue, v)
+		}
+		if v.OverflowInt(x) {
+			return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
+		}
+		v.SetInt(x)
+	case reflect.Bool:
+		x, ok := pValue.(bool)
+		if !ok && pValue != nil {
+			return typeMismatchReason(pValue, v)
+		}
+		v.SetBool(x)
+	case reflect.String:
+		switch x := pValue.(type) {
+		case appengine.BlobKey:
+			v.SetString(string(x))
+		case ByteString:
+			v.SetString(string(x))
+		case string:
+			v.SetString(x)
+		default:
+			if pValue != nil {
+				return typeMismatchReason(pValue, v)
+			}
+		}
+	case reflect.Float32, reflect.Float64:
+		x, ok := pValue.(float64)
+		if !ok && pValue != nil {
+			return typeMismatchReason(pValue, v)
+		}
+		if v.OverflowFloat(x) {
+			return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
+		}
+		v.SetFloat(x)
+	case reflect.Ptr:
+		x, ok := pValue.(*Key)
+		if !ok && pValue != nil {
+			return typeMismatchReason(pValue, v)
+		}
+		if _, ok := v.Interface().(*Key); !ok {
+			return typeMismatchReason(pValue, v)
+		}
+		v.Set(reflect.ValueOf(x))
+	case reflect.Struct:
+		switch v.Type() {
+		case typeOfTime:
+			x, ok := pValue.(time.Time)
+			if !ok && pValue != nil {
+				return typeMismatchReason(pValue, v)
+			}
+			v.Set(reflect.ValueOf(x))
+		case typeOfGeoPoint:
+			x, ok := pValue.(appengine.GeoPoint)
+			if !ok && pValue != nil {
+				return typeMismatchReason(pValue, v)
+			}
+			v.Set(reflect.ValueOf(x))
+		default:
+			ent, ok := pValue.(*Entity)
+			if !ok {
+				return typeMismatchReason(pValue, v)
+			}
+
+			// Recursively load nested struct
+			pls, err := newStructPLS(v.Addr().Interface())
+			if err != nil {
+				return err.Error()
+			}
+
+			// if ent has a Key value and our struct has a Key field,
+			// load the Entity's Key value into the Key field on the struct.
+			if ent.Key != nil && pls.codec.keyField != -1 {
+
+				pls.v.Field(pls.codec.keyField).Set(reflect.ValueOf(ent.Key))
+			}
+
+			err = pls.Load(ent.Properties)
+			if err != nil {
+				return err.Error()
+			}
+		}
+	case reflect.Slice:
+		x, ok := pValue.([]byte)
+		if !ok {
+			if y, yok := pValue.(ByteString); yok {
+				x, ok = []byte(y), true
+			}
+		}
+		if !ok && pValue != nil {
+			return typeMismatchReason(pValue, v)
+		}
+		if v.Type().Elem().Kind() != reflect.Uint8 {
+			return typeMismatchReason(pValue, v)
+		}
+		v.SetBytes(x)
+	default:
+		return typeMismatchReason(pValue, v)
+	}
+	return ""
+}
+
+// initField is similar to reflect's Value.FieldByIndex, in that it
+// returns the nested struct field corresponding to index, but it
+// initialises any nil pointers encountered when traversing the structure.
+func initField(val reflect.Value, index []int) reflect.Value {
+	for _, i := range index[:len(index)-1] {
+		val = val.Field(i)
+		if val.Kind() == reflect.Ptr {
+			if val.IsNil() {
+				val.Set(reflect.New(val.Type().Elem()))
+			}
+			val = val.Elem()
+		}
+	}
+	return val.Field(index[len(index)-1])
+}
+
+// loadEntity loads an EntityProto into PropertyLoadSaver or struct pointer.
+func loadEntity(dst interface{}, src *pb.EntityProto) (err error) {
+	ent, err := protoToEntity(src)
+	if err != nil {
+		return err
+	}
+	if e, ok := dst.(PropertyLoadSaver); ok {
+		return e.Load(ent.Properties)
+	}
+	return LoadStruct(dst, ent.Properties)
+}
+
+func (s structPLS) Load(props []Property) error {
+	var fieldName, reason string
+	var l propertyLoader
+	for _, p := range props {
+		if errStr := l.load(s.codec, s.v, p, p.Multiple); errStr != "" {
+			// We don't return early, as we try to load as many properties as possible.
+			// It is valid to load an entity into a struct that cannot fully represent it.
+			// That case returns an error, but the caller is free to ignore it.
+			fieldName, reason = p.Name, errStr
+		}
+	}
+	if reason != "" {
+		return &ErrFieldMismatch{
+			StructType: s.v.Type(),
+			FieldName:  fieldName,
+			Reason:     reason,
+		}
+	}
+	return nil
+}
+
+func protoToEntity(src *pb.EntityProto) (*Entity, error) {
+	props, rawProps := src.Property, src.RawProperty
+	outProps := make([]Property, 0, len(props)+len(rawProps))
+	for {
+		var (
+			x       *pb.Property
+			noIndex bool
+		)
+		if len(props) > 0 {
+			x, props = props[0], props[1:]
+		} else if len(rawProps) > 0 {
+			x, rawProps = rawProps[0], rawProps[1:]
+			noIndex = true
+		} else {
+			break
+		}
+
+		var value interface{}
+		if x.Meaning != nil && *x.Meaning == pb.Property_INDEX_VALUE {
+			value = indexValue{x.Value}
+		} else {
+			var err error
+			value, err = propValue(x.Value, x.GetMeaning())
+			if err != nil {
+				return nil, err
+			}
+		}
+		outProps = append(outProps, Property{
+			Name:     x.GetName(),
+			Value:    value,
+			NoIndex:  noIndex,
+			Multiple: x.GetMultiple(),
+		})
+	}
+
+	var key *Key
+	if src.Key != nil {
+		// Ignore any error, since nested entity values
+		// are allowed to have an invalid key.
+		key, _ = protoToKey(src.Key)
+	}
+	return &Entity{key, outProps}, nil
+}
+
+// propValue returns a Go value that combines the raw PropertyValue with a
+// meaning. For example, an Int64Value with GD_WHEN becomes a time.Time.
+func propValue(v *pb.PropertyValue, m pb.Property_Meaning) (interface{}, error) {
+	switch {
+	case v.Int64Value != nil:
+		if m == pb.Property_GD_WHEN {
+			return fromUnixMicro(*v.Int64Value), nil
+		} else {
+			return *v.Int64Value, nil
+		}
+	case v.BooleanValue != nil:
+		return *v.BooleanValue, nil
+	case v.StringValue != nil:
+		if m == pb.Property_BLOB {
+			return []byte(*v.StringValue), nil
+		} else if m == pb.Property_BLOBKEY {
+			return appengine.BlobKey(*v.StringValue), nil
+		} else if m == pb.Property_BYTESTRING {
+			return ByteString(*v.StringValue), nil
+		} else if m == pb.Property_ENTITY_PROTO {
+			var ent pb.EntityProto
+			err := proto.Unmarshal([]byte(*v.StringValue), &ent)
+			if err != nil {
+				return nil, err
+			}
+			return protoToEntity(&ent)
+		} else {
+			return *v.StringValue, nil
+		}
+	case v.DoubleValue != nil:
+		return *v.DoubleValue, nil
+	case v.Referencevalue != nil:
+		key, err := referenceValueToKey(v.Referencevalue)
+		if err != nil {
+			return nil, err
+		}
+		return key, nil
+	case v.Pointvalue != nil:
+		// NOTE: Strangely, latitude maps to X, longitude to Y.
+		return appengine.GeoPoint{Lat: v.Pointvalue.GetX(), Lng: v.Pointvalue.GetY()}, nil
+	}
+	return nil, nil
+}
+
+// indexValue is a Property value that is created when entities are loaded from
+// an index, such as from a projection query.
+//
+// Such Property values do not contain all of the metadata required to be
+// faithfully represented as a Go value, and are instead represented as an
+// opaque indexValue. Load the properties into a concrete struct type (e.g. by
+// passing a struct pointer to Iterator.Next) to reconstruct actual Go values
+// of type int, string, time.Time, etc.
+type indexValue struct {
+	value *pb.PropertyValue
+}
diff --git a/vendor/google.golang.org/appengine/datastore/metadata.go b/vendor/google.golang.org/appengine/datastore/metadata.go
new file mode 100644
index 0000000..67995f9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/metadata.go
@@ -0,0 +1,78 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import "golang.org/x/net/context"
+
+// Datastore kinds for the metadata entities.
+const (
+	namespaceKind = "__namespace__"
+	kindKind      = "__kind__"
+	propertyKind  = "__property__"
+)
+
+// Namespaces returns all the datastore namespaces.
+func Namespaces(ctx context.Context) ([]string, error) {
+	// TODO(djd): Support range queries.
+	q := NewQuery(namespaceKind).KeysOnly()
+	keys, err := q.GetAll(ctx, nil)
+	if err != nil {
+		return nil, err
+	}
+	// The empty namespace key uses a numeric ID (==1), but luckily
+	// the string ID defaults to "" for numeric IDs anyway.
+	return keyNames(keys), nil
+}
+
+// Kinds returns the names of all the kinds in the current namespace.
+func Kinds(ctx context.Context) ([]string, error) {
+	// TODO(djd): Support range queries.
+	q := NewQuery(kindKind).KeysOnly()
+	keys, err := q.GetAll(ctx, nil)
+	if err != nil {
+		return nil, err
+	}
+	return keyNames(keys), nil
+}
+
+// keyNames returns a slice of the provided keys' names (string IDs).
+func keyNames(keys []*Key) []string {
+	n := make([]string, 0, len(keys))
+	for _, k := range keys {
+		n = append(n, k.StringID())
+	}
+	return n
+}
+
+// KindProperties returns all the indexed properties for the given kind.
+// The properties are returned as a map of property names to a slice of the
+// representation types. The representation types for the supported Go property
+// types are:
+//   "INT64":     signed integers and time.Time
+//   "DOUBLE":    float32 and float64
+//   "BOOLEAN":   bool
+//   "STRING":    string, []byte and ByteString
+//   "POINT":     appengine.GeoPoint
+//   "REFERENCE": *Key
+//   "USER":      (not used in the Go runtime)
+func KindProperties(ctx context.Context, kind string) (map[string][]string, error) {
+	// TODO(djd): Support range queries.
+	kindKey := NewKey(ctx, kindKind, kind, 0, nil)
+	q := NewQuery(propertyKind).Ancestor(kindKey)
+
+	propMap := map[string][]string{}
+	props := []struct {
+		Repr []string `datastore:property_representation`
+	}{}
+
+	keys, err := q.GetAll(ctx, &props)
+	if err != nil {
+		return nil, err
+	}
+	for i, p := range props {
+		propMap[keys[i].StringID()] = p.Repr
+	}
+	return propMap, nil
+}
diff --git a/vendor/google.golang.org/appengine/datastore/prop.go b/vendor/google.golang.org/appengine/datastore/prop.go
new file mode 100644
index 0000000..55ab9fb
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/prop.go
@@ -0,0 +1,323 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+	"unicode"
+)
+
+// Entities with more than this many indexed properties will not be saved.
+const maxIndexedProperties = 20000
+
+// []byte fields more than 1 megabyte long will not be loaded or saved.
+const maxBlobLen = 1 << 20
+
+// Property is a name/value pair plus some metadata. A datastore entity's
+// contents are loaded and saved as a sequence of Properties. An entity can
+// have multiple Properties with the same name, provided that p.Multiple is
+// true on all of that entity's Properties with that name.
+type Property struct {
+	// Name is the property name.
+	Name string
+	// Value is the property value. The valid types are:
+	//	- int64
+	//	- bool
+	//	- string
+	//	- float64
+	//	- ByteString
+	//	- *Key
+	//	- time.Time
+	//	- appengine.BlobKey
+	//	- appengine.GeoPoint
+	//	- []byte (up to 1 megabyte in length)
+	//	- *Entity (representing a nested struct)
+	// This set is smaller than the set of valid struct field types that the
+	// datastore can load and save. A Property Value cannot be a slice (apart
+	// from []byte); use multiple Properties instead. Also, a Value's type
+	// must be explicitly on the list above; it is not sufficient for the
+	// underlying type to be on that list. For example, a Value of "type
+	// myInt64 int64" is invalid. Smaller-width integers and floats are also
+	// invalid. Again, this is more restrictive than the set of valid struct
+	// field types.
+	//
+	// A Value will have an opaque type when loading entities from an index,
+	// such as via a projection query. Load entities into a struct instead
+	// of a PropertyLoadSaver when using a projection query.
+	//
+	// A Value may also be the nil interface value; this is equivalent to
+	// Python's None but not directly representable by a Go struct. Loading
+	// a nil-valued property into a struct will set that field to the zero
+	// value.
+	Value interface{}
+	// NoIndex is whether the datastore cannot index this property.
+	NoIndex bool
+	// Multiple is whether the entity can have multiple properties with
+	// the same name. Even if a particular instance only has one property with
+	// a certain name, Multiple should be true if a struct would best represent
+	// it as a field of type []T instead of type T.
+	Multiple bool
+}
+
+// An Entity is the value type for a nested struct.
+// This type is only used for a Property's Value.
+type Entity struct {
+	Key        *Key
+	Properties []Property
+}
+
+// ByteString is a short byte slice (up to 1500 bytes) that can be indexed.
+type ByteString []byte
+
+// PropertyLoadSaver can be converted from and to a slice of Properties.
+type PropertyLoadSaver interface {
+	Load([]Property) error
+	Save() ([]Property, error)
+}
+
+// PropertyList converts a []Property to implement PropertyLoadSaver.
+type PropertyList []Property
+
+var (
+	typeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem()
+	typeOfPropertyList      = reflect.TypeOf(PropertyList(nil))
+)
+
+// Load loads all of the provided properties into l.
+// It does not first reset *l to an empty slice.
+func (l *PropertyList) Load(p []Property) error {
+	*l = append(*l, p...)
+	return nil
+}
+
+// Save saves all of l's properties as a slice or Properties.
+func (l *PropertyList) Save() ([]Property, error) {
+	return *l, nil
+}
+
+// validPropertyName returns whether name consists of one or more valid Go
+// identifiers joined by ".".
+func validPropertyName(name string) bool {
+	if name == "" {
+		return false
+	}
+	for _, s := range strings.Split(name, ".") {
+		if s == "" {
+			return false
+		}
+		first := true
+		for _, c := range s {
+			if first {
+				first = false
+				if c != '_' && !unicode.IsLetter(c) {
+					return false
+				}
+			} else {
+				if c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+					return false
+				}
+			}
+		}
+	}
+	return true
+}
+
+// structCodec describes how to convert a struct to and from a sequence of
+// properties.
+type structCodec struct {
+	// fields gives the field codec for the structTag with the given name.
+	fields map[string]fieldCodec
+	// hasSlice is whether a struct or any of its nested or embedded structs
+	// has a slice-typed field (other than []byte).
+	hasSlice bool
+	// keyField is the index of a *Key field with structTag __key__.
+	// This field is not relevant for the top level struct, only for
+	// nested structs.
+	keyField int
+	// complete is whether the structCodec is complete. An incomplete
+	// structCodec may be encountered when walking a recursive struct.
+	complete bool
+}
+
+// fieldCodec is a struct field's index and, if that struct field's type is
+// itself a struct, that substruct's structCodec.
+type fieldCodec struct {
+	// path is the index path to the field
+	path    []int
+	noIndex bool
+	// structCodec is the codec fot the struct field at index 'path',
+	// or nil if the field is not a struct.
+	structCodec *structCodec
+}
+
+// structCodecs collects the structCodecs that have already been calculated.
+var (
+	structCodecsMutex sync.Mutex
+	structCodecs      = make(map[reflect.Type]*structCodec)
+)
+
+// getStructCodec returns the structCodec for the given struct type.
+func getStructCodec(t reflect.Type) (*structCodec, error) {
+	structCodecsMutex.Lock()
+	defer structCodecsMutex.Unlock()
+	return getStructCodecLocked(t)
+}
+
+// getStructCodecLocked implements getStructCodec. The structCodecsMutex must
+// be held when calling this function.
+func getStructCodecLocked(t reflect.Type) (ret *structCodec, retErr error) {
+	c, ok := structCodecs[t]
+	if ok {
+		return c, nil
+	}
+	c = &structCodec{
+		fields: make(map[string]fieldCodec),
+		// We initialize keyField to -1 so that the zero-value is not
+		// misinterpreted as index 0.
+		keyField: -1,
+	}
+
+	// Add c to the structCodecs map before we are sure it is good. If t is
+	// a recursive type, it needs to find the incomplete entry for itself in
+	// the map.
+	structCodecs[t] = c
+	defer func() {
+		if retErr != nil {
+			delete(structCodecs, t)
+		}
+	}()
+
+	for i := 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+		// Skip unexported fields.
+		// Note that if f is an anonymous, unexported struct field,
+		// we will not promote its fields. We will skip f entirely.
+		if f.PkgPath != "" {
+			continue
+		}
+
+		tags := strings.Split(f.Tag.Get("datastore"), ",")
+		name := tags[0]
+		opts := make(map[string]bool)
+		for _, t := range tags[1:] {
+			opts[t] = true
+		}
+		switch {
+		case name == "":
+			if !f.Anonymous {
+				name = f.Name
+			}
+		case name == "-":
+			continue
+		case name == "__key__":
+			if f.Type != typeOfKeyPtr {
+				return nil, fmt.Errorf("datastore: __key__ field on struct %v is not a *datastore.Key", t)
+			}
+			c.keyField = i
+		case !validPropertyName(name):
+			return nil, fmt.Errorf("datastore: struct tag has invalid property name: %q", name)
+		}
+
+		substructType, fIsSlice := reflect.Type(nil), false
+		switch f.Type.Kind() {
+		case reflect.Struct:
+			substructType = f.Type
+		case reflect.Slice:
+			if f.Type.Elem().Kind() == reflect.Struct {
+				substructType = f.Type.Elem()
+			}
+			fIsSlice = f.Type != typeOfByteSlice
+			c.hasSlice = c.hasSlice || fIsSlice
+		}
+
+		var sub *structCodec
+		if substructType != nil && substructType != typeOfTime && substructType != typeOfGeoPoint {
+			var err error
+			sub, err = getStructCodecLocked(substructType)
+			if err != nil {
+				return nil, err
+			}
+			if !sub.complete {
+				return nil, fmt.Errorf("datastore: recursive struct: field %q", f.Name)
+			}
+			if fIsSlice && sub.hasSlice {
+				return nil, fmt.Errorf(
+					"datastore: flattening nested structs leads to a slice of slices: field %q", f.Name)
+			}
+			c.hasSlice = c.hasSlice || sub.hasSlice
+			// If name is empty at this point, f is an anonymous struct field.
+			// In this case, we promote the substruct's fields up to this level
+			// in the linked list of struct codecs.
+			if name == "" {
+				for subname, subfield := range sub.fields {
+					if _, ok := c.fields[subname]; ok {
+						return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", subname)
+					}
+					c.fields[subname] = fieldCodec{
+						path:        append([]int{i}, subfield.path...),
+						noIndex:     subfield.noIndex || opts["noindex"],
+						structCodec: subfield.structCodec,
+					}
+				}
+				continue
+			}
+		}
+
+		if _, ok := c.fields[name]; ok {
+			return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", name)
+		}
+		c.fields[name] = fieldCodec{
+			path:        []int{i},
+			noIndex:     opts["noindex"],
+			structCodec: sub,
+		}
+	}
+	c.complete = true
+	return c, nil
+}
+
+// structPLS adapts a struct to be a PropertyLoadSaver.
+type structPLS struct {
+	v     reflect.Value
+	codec *structCodec
+}
+
+// newStructPLS returns a structPLS, which implements the
+// PropertyLoadSaver interface, for the struct pointer p.
+func newStructPLS(p interface{}) (*structPLS, error) {
+	v := reflect.ValueOf(p)
+	if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct {
+		return nil, ErrInvalidEntityType
+	}
+	v = v.Elem()
+	codec, err := getStructCodec(v.Type())
+	if err != nil {
+		return nil, err
+	}
+	return &structPLS{v, codec}, nil
+}
+
+// LoadStruct loads the properties from p to dst.
+// dst must be a struct pointer.
+func LoadStruct(dst interface{}, p []Property) error {
+	x, err := newStructPLS(dst)
+	if err != nil {
+		return err
+	}
+	return x.Load(p)
+}
+
+// SaveStruct returns the properties from src as a slice of Properties.
+// src must be a struct pointer.
+func SaveStruct(src interface{}) ([]Property, error) {
+	x, err := newStructPLS(src)
+	if err != nil {
+		return nil, err
+	}
+	return x.Save()
+}
diff --git a/vendor/google.golang.org/appengine/datastore/query.go b/vendor/google.golang.org/appengine/datastore/query.go
new file mode 100644
index 0000000..3847b0f
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/query.go
@@ -0,0 +1,724 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+	"encoding/base64"
+	"errors"
+	"fmt"
+	"math"
+	"reflect"
+	"strings"
+
+	"github.com/golang/protobuf/proto"
+	"golang.org/x/net/context"
+
+	"google.golang.org/appengine/internal"
+	pb "google.golang.org/appengine/internal/datastore"
+)
+
+type operator int
+
+const (
+	lessThan operator = iota
+	lessEq
+	equal
+	greaterEq
+	greaterThan
+)
+
+var operatorToProto = map[operator]*pb.Query_Filter_Operator{
+	lessThan:    pb.Query_Filter_LESS_THAN.Enum(),
+	lessEq:      pb.Query_Filter_LESS_THAN_OR_EQUAL.Enum(),
+	equal:       pb.Query_Filter_EQUAL.Enum(),
+	greaterEq:   pb.Query_Filter_GREATER_THAN_OR_EQUAL.Enum(),
+	greaterThan: pb.Query_Filter_GREATER_THAN.Enum(),
+}
+
+// filter is a conditional filter on query results.
+type filter struct {
+	FieldName string
+	Op        operator
+	Value     interface{}
+}
+
+type sortDirection int
+
+const (
+	ascending sortDirection = iota
+	descending
+)
+
+var sortDirectionToProto = map[sortDirection]*pb.Query_Order_Direction{
+	ascending:  pb.Query_Order_ASCENDING.Enum(),
+	descending: pb.Query_Order_DESCENDING.Enum(),
+}
+
+// order is a sort order on query results.
+type order struct {
+	FieldName string
+	Direction sortDirection
+}
+
+// NewQuery creates a new Query for a specific entity kind.
+//
+// An empty kind means to return all entities, including entities created and
+// managed by other App Engine features, and is called a kindless query.
+// Kindless queries cannot include filters or sort orders on property values.
+func NewQuery(kind string) *Query {
+	return &Query{
+		kind:  kind,
+		limit: -1,
+	}
+}
+
+// Query represents a datastore query.
+type Query struct {
+	kind       string
+	ancestor   *Key
+	filter     []filter
+	order      []order
+	projection []string
+
+	distinct bool
+	keysOnly bool
+	eventual bool
+	limit    int32
+	offset   int32
+	start    *pb.CompiledCursor
+	end      *pb.CompiledCursor
+
+	err error
+}
+
+func (q *Query) clone() *Query {
+	x := *q
+	// Copy the contents of the slice-typed fields to a new backing store.
+	if len(q.filter) > 0 {
+		x.filter = make([]filter, len(q.filter))
+		copy(x.filter, q.filter)
+	}
+	if len(q.order) > 0 {
+		x.order = make([]order, len(q.order))
+		copy(x.order, q.order)
+	}
+	return &x
+}
+
+// Ancestor returns a derivative query with an ancestor filter.
+// The ancestor should not be nil.
+func (q *Query) Ancestor(ancestor *Key) *Query {
+	q = q.clone()
+	if ancestor == nil {
+		q.err = errors.New("datastore: nil query ancestor")
+		return q
+	}
+	q.ancestor = ancestor
+	return q
+}
+
+// EventualConsistency returns a derivative query that returns eventually
+// consistent results.
+// It only has an effect on ancestor queries.
+func (q *Query) EventualConsistency() *Query {
+	q = q.clone()
+	q.eventual = true
+	return q
+}
+
+// Filter returns a derivative query with a field-based filter.
+// The filterStr argument must be a field name followed by optional space,
+// followed by an operator, one of ">", "<", ">=", "<=", or "=".
+// Fields are compared against the provided value using the operator.
+// Multiple filters are AND'ed together.
+func (q *Query) Filter(filterStr string, value interface{}) *Query {
+	q = q.clone()
+	filterStr = strings.TrimSpace(filterStr)
+	if len(filterStr) < 1 {
+		q.err = errors.New("datastore: invalid filter: " + filterStr)
+		return q
+	}
+	f := filter{
+		FieldName: strings.TrimRight(filterStr, " ><=!"),
+		Value:     value,
+	}
+	switch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op {
+	case "<=":
+		f.Op = lessEq
+	case ">=":
+		f.Op = greaterEq
+	case "<":
+		f.Op = lessThan
+	case ">":
+		f.Op = greaterThan
+	case "=":
+		f.Op = equal
+	default:
+		q.err = fmt.Errorf("datastore: invalid operator %q in filter %q", op, filterStr)
+		return q
+	}
+	q.filter = append(q.filter, f)
+	return q
+}
+
+// Order returns a derivative query with a field-based sort order. Orders are
+// applied in the order they are added. The default order is ascending; to sort
+// in descending order prefix the fieldName with a minus sign (-).
+func (q *Query) Order(fieldName string) *Query {
+	q = q.clone()
+	fieldName = strings.TrimSpace(fieldName)
+	o := order{
+		Direction: ascending,
+		FieldName: fieldName,
+	}
+	if strings.HasPrefix(fieldName, "-") {
+		o.Direction = descending
+		o.FieldName = strings.TrimSpace(fieldName[1:])
+	} else if strings.HasPrefix(fieldName, "+") {
+		q.err = fmt.Errorf("datastore: invalid order: %q", fieldName)
+		return q
+	}
+	if len(o.FieldName) == 0 {
+		q.err = errors.New("datastore: empty order")
+		return q
+	}
+	q.order = append(q.order, o)
+	return q
+}
+
+// Project returns a derivative query that yields only the given fields. It
+// cannot be used with KeysOnly.
+func (q *Query) Project(fieldNames ...string) *Query {
+	q = q.clone()
+	q.projection = append([]string(nil), fieldNames...)
+	return q
+}
+
+// Distinct returns a derivative query that yields de-duplicated entities with
+// respect to the set of projected fields. It is only used for projection
+// queries.
+func (q *Query) Distinct() *Query {
+	q = q.clone()
+	q.distinct = true
+	return q
+}
+
+// KeysOnly returns a derivative query that yields only keys, not keys and
+// entities. It cannot be used with projection queries.
+func (q *Query) KeysOnly() *Query {
+	q = q.clone()
+	q.keysOnly = true
+	return q
+}
+
+// Limit returns a derivative query that has a limit on the number of results
+// returned. A negative value means unlimited.
+func (q *Query) Limit(limit int) *Query {
+	q = q.clone()
+	if limit < math.MinInt32 || limit > math.MaxInt32 {
+		q.err = errors.New("datastore: query limit overflow")
+		return q
+	}
+	q.limit = int32(limit)
+	return q
+}
+
+// Offset returns a derivative query that has an offset of how many keys to
+// skip over before returning results. A negative value is invalid.
+func (q *Query) Offset(offset int) *Query {
+	q = q.clone()
+	if offset < 0 {
+		q.err = errors.New("datastore: negative query offset")
+		return q
+	}
+	if offset > math.MaxInt32 {
+		q.err = errors.New("datastore: query offset overflow")
+		return q
+	}
+	q.offset = int32(offset)
+	return q
+}
+
+// Start returns a derivative query with the given start point.
+func (q *Query) Start(c Cursor) *Query {
+	q = q.clone()
+	if c.cc == nil {
+		q.err = errors.New("datastore: invalid cursor")
+		return q
+	}
+	q.start = c.cc
+	return q
+}
+
+// End returns a derivative query with the given end point.
+func (q *Query) End(c Cursor) *Query {
+	q = q.clone()
+	if c.cc == nil {
+		q.err = errors.New("datastore: invalid cursor")
+		return q
+	}
+	q.end = c.cc
+	return q
+}
+
+// toProto converts the query to a protocol buffer.
+func (q *Query) toProto(dst *pb.Query, appID string) error {
+	if len(q.projection) != 0 && q.keysOnly {
+		return errors.New("datastore: query cannot both project and be keys-only")
+	}
+	dst.Reset()
+	dst.App = proto.String(appID)
+	if q.kind != "" {
+		dst.Kind = proto.String(q.kind)
+	}
+	if q.ancestor != nil {
+		dst.Ancestor = keyToProto(appID, q.ancestor)
+		if q.eventual {
+			dst.Strong = proto.Bool(false)
+		}
+	}
+	if q.projection != nil {
+		dst.PropertyName = q.projection
+		if q.distinct {
+			dst.GroupByPropertyName = q.projection
+		}
+	}
+	if q.keysOnly {
+		dst.KeysOnly = proto.Bool(true)
+		dst.RequirePerfectPlan = proto.Bool(true)
+	}
+	for _, qf := range q.filter {
+		if qf.FieldName == "" {
+			return errors.New("datastore: empty query filter field name")
+		}
+		p, errStr := valueToProto(appID, qf.FieldName, reflect.ValueOf(qf.Value), false)
+		if errStr != "" {
+			return errors.New("datastore: bad query filter value type: " + errStr)
+		}
+		xf := &pb.Query_Filter{
+			Op:       operatorToProto[qf.Op],
+			Property: []*pb.Property{p},
+		}
+		if xf.Op == nil {
+			return errors.New("datastore: unknown query filter operator")
+		}
+		dst.Filter = append(dst.Filter, xf)
+	}
+	for _, qo := range q.order {
+		if qo.FieldName == "" {
+			return errors.New("datastore: empty query order field name")
+		}
+		xo := &pb.Query_Order{
+			Property:  proto.String(qo.FieldName),
+			Direction: sortDirectionToProto[qo.Direction],
+		}
+		if xo.Direction == nil {
+			return errors.New("datastore: unknown query order direction")
+		}
+		dst.Order = append(dst.Order, xo)
+	}
+	if q.limit >= 0 {
+		dst.Limit = proto.Int32(q.limit)
+	}
+	if q.offset != 0 {
+		dst.Offset = proto.Int32(q.offset)
+	}
+	dst.CompiledCursor = q.start
+	dst.EndCompiledCursor = q.end
+	dst.Compile = proto.Bool(true)
+	return nil
+}
+
+// Count returns the number of results for the query.
+//
+// The running time and number of API calls made by Count scale linearly with
+// the sum of the query's offset and limit. Unless the result count is
+// expected to be small, it is best to specify a limit; otherwise Count will
+// continue until it finishes counting or the provided context expires.
+func (q *Query) Count(c context.Context) (int, error) {
+	// Check that the query is well-formed.
+	if q.err != nil {
+		return 0, q.err
+	}
+
+	// Run a copy of the query, with keysOnly true (if we're not a projection,
+	// since the two are incompatible), and an adjusted offset. We also set the
+	// limit to zero, as we don't want any actual entity data, just the number
+	// of skipped results.
+	newQ := q.clone()
+	newQ.keysOnly = len(newQ.projection) == 0
+	newQ.limit = 0
+	if q.limit < 0 {
+		// If the original query was unlimited, set the new query's offset to maximum.
+		newQ.offset = math.MaxInt32
+	} else {
+		newQ.offset = q.offset + q.limit
+		if newQ.offset < 0 {
+			// Do the best we can, in the presence of overflow.
+			newQ.offset = math.MaxInt32
+		}
+	}
+	req := &pb.Query{}
+	if err := newQ.toProto(req, internal.FullyQualifiedAppID(c)); err != nil {
+		return 0, err
+	}
+	res := &pb.QueryResult{}
+	if err := internal.Call(c, "datastore_v3", "RunQuery", req, res); err != nil {
+		return 0, err
+	}
+
+	// n is the count we will return. For example, suppose that our original
+	// query had an offset of 4 and a limit of 2008: the count will be 2008,
+	// provided that there are at least 2012 matching entities. However, the
+	// RPCs will only skip 1000 results at a time. The RPC sequence is:
+	//   call RunQuery with (offset, limit) = (2012, 0)  // 2012 == newQ.offset
+	//   response has (skippedResults, moreResults) = (1000, true)
+	//   n += 1000  // n == 1000
+	//   call Next     with (offset, limit) = (1012, 0)  // 1012 == newQ.offset - n
+	//   response has (skippedResults, moreResults) = (1000, true)
+	//   n += 1000  // n == 2000
+	//   call Next     with (offset, limit) = (12, 0)    // 12 == newQ.offset - n
+	//   response has (skippedResults, moreResults) = (12, false)
+	//   n += 12    // n == 2012
+	//   // exit the loop
+	//   n -= 4     // n == 2008
+	var n int32
+	for {
+		// The QueryResult should have no actual entity data, just skipped results.
+		if len(res.Result) != 0 {
+			return 0, errors.New("datastore: internal error: Count request returned too much data")
+		}
+		n += res.GetSkippedResults()
+		if !res.GetMoreResults() {
+			break
+		}
+		if err := callNext(c, res, newQ.offset-n, 0); err != nil {
+			return 0, err
+		}
+	}
+	n -= q.offset
+	if n < 0 {
+		// If the offset was greater than the number of matching entities,
+		// return 0 instead of negative.
+		n = 0
+	}
+	return int(n), nil
+}
+
+// callNext issues a datastore_v3/Next RPC to advance a cursor, such as that
+// returned by a query with more results.
+func callNext(c context.Context, res *pb.QueryResult, offset, limit int32) error {
+	if res.Cursor == nil {
+		return errors.New("datastore: internal error: server did not return a cursor")
+	}
+	req := &pb.NextRequest{
+		Cursor: res.Cursor,
+	}
+	if limit >= 0 {
+		req.Count = proto.Int32(limit)
+	}
+	if offset != 0 {
+		req.Offset = proto.Int32(offset)
+	}
+	if res.CompiledCursor != nil {
+		req.Compile = proto.Bool(true)
+	}
+	res.Reset()
+	return internal.Call(c, "datastore_v3", "Next", req, res)
+}
+
+// GetAll runs the query in the given context and returns all keys that match
+// that query, as well as appending the values to dst.
+//
+// dst must have type *[]S or *[]*S or *[]P, for some struct type S or some non-
+// interface, non-pointer type P such that P or *P implements PropertyLoadSaver.
+//
+// As a special case, *PropertyList is an invalid type for dst, even though a
+// PropertyList is a slice of structs. It is treated as invalid to avoid being
+// mistakenly passed when *[]PropertyList was intended.
+//
+// The keys returned by GetAll will be in a 1-1 correspondence with the entities
+// added to dst.
+//
+// If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys.
+//
+// The running time and number of API calls made by GetAll scale linearly with
+// with the sum of the query's offset and limit. Unless the result count is
+// expected to be small, it is best to specify a limit; otherwise GetAll will
+// continue until it finishes collecting results or the provided context
+// expires.
+func (q *Query) GetAll(c context.Context, dst interface{}) ([]*Key, error) {
+	var (
+		dv               reflect.Value
+		mat              multiArgType
+		elemType         reflect.Type
+		errFieldMismatch error
+	)
+	if !q.keysOnly {
+		dv = reflect.ValueOf(dst)
+		if dv.Kind() != reflect.Ptr || dv.IsNil() {
+			return nil, ErrInvalidEntityType
+		}
+		dv = dv.Elem()
+		mat, elemType = checkMultiArg(dv)
+		if mat == multiArgTypeInvalid || mat == multiArgTypeInterface {
+			return nil, ErrInvalidEntityType
+		}
+	}
+
+	var keys []*Key
+	for t := q.Run(c); ; {
+		k, e, err := t.next()
+		if err == Done {
+			break
+		}
+		if err != nil {
+			return keys, err
+		}
+		if !q.keysOnly {
+			ev := reflect.New(elemType)
+			if elemType.Kind() == reflect.Map {
+				// This is a special case. The zero values of a map type are
+				// not immediately useful; they have to be make'd.
+				//
+				// Funcs and channels are similar, in that a zero value is not useful,
+				// but even a freshly make'd channel isn't useful: there's no fixed
+				// channel buffer size that is always going to be large enough, and
+				// there's no goroutine to drain the other end. Theoretically, these
+				// types could be supported, for example by sniffing for a constructor
+				// method or requiring prior registration, but for now it's not a
+				// frequent enough concern to be worth it. Programmers can work around
+				// it by explicitly using Iterator.Next instead of the Query.GetAll
+				// convenience method.
+				x := reflect.MakeMap(elemType)
+				ev.Elem().Set(x)
+			}
+			if err = loadEntity(ev.Interface(), e); err != nil {
+				if _, ok := err.(*ErrFieldMismatch); ok {
+					// We continue loading entities even in the face of field mismatch errors.
+					// If we encounter any other error, that other error is returned. Otherwise,
+					// an ErrFieldMismatch is returned.
+					errFieldMismatch = err
+				} else {
+					return keys, err
+				}
+			}
+			if mat != multiArgTypeStructPtr {
+				ev = ev.Elem()
+			}
+			dv.Set(reflect.Append(dv, ev))
+		}
+		keys = append(keys, k)
+	}
+	return keys, errFieldMismatch
+}
+
+// Run runs the query in the given context.
+func (q *Query) Run(c context.Context) *Iterator {
+	if q.err != nil {
+		return &Iterator{err: q.err}
+	}
+	t := &Iterator{
+		c:      c,
+		limit:  q.limit,
+		q:      q,
+		prevCC: q.start,
+	}
+	var req pb.Query
+	if err := q.toProto(&req, internal.FullyQualifiedAppID(c)); err != nil {
+		t.err = err
+		return t
+	}
+	if err := internal.Call(c, "datastore_v3", "RunQuery", &req, &t.res); err != nil {
+		t.err = err
+		return t
+	}
+	offset := q.offset - t.res.GetSkippedResults()
+	for offset > 0 && t.res.GetMoreResults() {
+		t.prevCC = t.res.CompiledCursor
+		if err := callNext(t.c, &t.res, offset, t.limit); err != nil {
+			t.err = err
+			break
+		}
+		skip := t.res.GetSkippedResults()
+		if skip < 0 {
+			t.err = errors.New("datastore: internal error: negative number of skipped_results")
+			break
+		}
+		offset -= skip
+	}
+	if offset < 0 {
+		t.err = errors.New("datastore: internal error: query offset was overshot")
+	}
+	return t
+}
+
+// Iterator is the result of running a query.
+type Iterator struct {
+	c   context.Context
+	err error
+	// res is the result of the most recent RunQuery or Next API call.
+	res pb.QueryResult
+	// i is how many elements of res.Result we have iterated over.
+	i int
+	// limit is the limit on the number of results this iterator should return.
+	// A negative value means unlimited.
+	limit int32
+	// q is the original query which yielded this iterator.
+	q *Query
+	// prevCC is the compiled cursor that marks the end of the previous batch
+	// of results.
+	prevCC *pb.CompiledCursor
+}
+
+// Done is returned when a query iteration has completed.
+var Done = errors.New("datastore: query has no more results")
+
+// Next returns the key of the next result. When there are no more results,
+// Done is returned as the error.
+//
+// If the query is not keys only and dst is non-nil, it also loads the entity
+// stored for that key into the struct pointer or PropertyLoadSaver dst, with
+// the same semantics and possible errors as for the Get function.
+func (t *Iterator) Next(dst interface{}) (*Key, error) {
+	k, e, err := t.next()
+	if err != nil {
+		return nil, err
+	}
+	if dst != nil && !t.q.keysOnly {
+		err = loadEntity(dst, e)
+	}
+	return k, err
+}
+
+func (t *Iterator) next() (*Key, *pb.EntityProto, error) {
+	if t.err != nil {
+		return nil, nil, t.err
+	}
+
+	// Issue datastore_v3/Next RPCs as necessary.
+	for t.i == len(t.res.Result) {
+		if !t.res.GetMoreResults() {
+			t.err = Done
+			return nil, nil, t.err
+		}
+		t.prevCC = t.res.CompiledCursor
+		if err := callNext(t.c, &t.res, 0, t.limit); err != nil {
+			t.err = err
+			return nil, nil, t.err
+		}
+		if t.res.GetSkippedResults() != 0 {
+			t.err = errors.New("datastore: internal error: iterator has skipped results")
+			return nil, nil, t.err
+		}
+		t.i = 0
+		if t.limit >= 0 {
+			t.limit -= int32(len(t.res.Result))
+			if t.limit < 0 {
+				t.err = errors.New("datastore: internal error: query returned more results than the limit")
+				return nil, nil, t.err
+			}
+		}
+	}
+
+	// Extract the key from the t.i'th element of t.res.Result.
+	e := t.res.Result[t.i]
+	t.i++
+	if e.Key == nil {
+		return nil, nil, errors.New("datastore: internal error: server did not return a key")
+	}
+	k, err := protoToKey(e.Key)
+	if err != nil || k.Incomplete() {
+		return nil, nil, errors.New("datastore: internal error: server returned an invalid key")
+	}
+	return k, e, nil
+}
+
+// Cursor returns a cursor for the iterator's current location.
+func (t *Iterator) Cursor() (Cursor, error) {
+	if t.err != nil && t.err != Done {
+		return Cursor{}, t.err
+	}
+	// If we are at either end of the current batch of results,
+	// return the compiled cursor at that end.
+	skipped := t.res.GetSkippedResults()
+	if t.i == 0 && skipped == 0 {
+		if t.prevCC == nil {
+			// A nil pointer (of type *pb.CompiledCursor) means no constraint:
+			// passing it as the end cursor of a new query means unlimited results
+			// (glossing over the integer limit parameter for now).
+			// A non-nil pointer to an empty pb.CompiledCursor means the start:
+			// passing it as the end cursor of a new query means 0 results.
+			// If prevCC was nil, then the original query had no start cursor, but
+			// Iterator.Cursor should return "the start" instead of unlimited.
+			return Cursor{&zeroCC}, nil
+		}
+		return Cursor{t.prevCC}, nil
+	}
+	if t.i == len(t.res.Result) {
+		return Cursor{t.res.CompiledCursor}, nil
+	}
+	// Otherwise, re-run the query offset to this iterator's position, starting from
+	// the most recent compiled cursor. This is done on a best-effort basis, as it
+	// is racy; if a concurrent process has added or removed entities, then the
+	// cursor returned may be inconsistent.
+	q := t.q.clone()
+	q.start = t.prevCC
+	q.offset = skipped + int32(t.i)
+	q.limit = 0
+	q.keysOnly = len(q.projection) == 0
+	t1 := q.Run(t.c)
+	_, _, err := t1.next()
+	if err != Done {
+		if err == nil {
+			err = fmt.Errorf("datastore: internal error: zero-limit query did not have zero results")
+		}
+		return Cursor{}, err
+	}
+	return Cursor{t1.res.CompiledCursor}, nil
+}
+
+var zeroCC pb.CompiledCursor
+
+// Cursor is an iterator's position. It can be converted to and from an opaque
+// string. A cursor can be used from different HTTP requests, but only with a
+// query with the same kind, ancestor, filter and order constraints.
+type Cursor struct {
+	cc *pb.CompiledCursor
+}
+
+// String returns a base-64 string representation of a cursor.
+func (c Cursor) String() string {
+	if c.cc == nil {
+		return ""
+	}
+	b, err := proto.Marshal(c.cc)
+	if err != nil {
+		// The only way to construct a Cursor with a non-nil cc field is to
+		// unmarshal from the byte representation. We panic if the unmarshal
+		// succeeds but the marshaling of the unchanged protobuf value fails.
+		panic(fmt.Sprintf("datastore: internal error: malformed cursor: %v", err))
+	}
+	return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
+}
+
+// Decode decodes a cursor from its base-64 string representation.
+func DecodeCursor(s string) (Cursor, error) {
+	if s == "" {
+		return Cursor{&zeroCC}, nil
+	}
+	if n := len(s) % 4; n != 0 {
+		s += strings.Repeat("=", 4-n)
+	}
+	b, err := base64.URLEncoding.DecodeString(s)
+	if err != nil {
+		return Cursor{}, err
+	}
+	cc := &pb.CompiledCursor{}
+	if err := proto.Unmarshal(b, cc); err != nil {
+		return Cursor{}, err
+	}
+	return Cursor{cc}, nil
+}
diff --git a/vendor/google.golang.org/appengine/datastore/save.go b/vendor/google.golang.org/appengine/datastore/save.go
new file mode 100644
index 0000000..b09e8b3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/save.go
@@ -0,0 +1,294 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"reflect"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+
+	"google.golang.org/appengine"
+	pb "google.golang.org/appengine/internal/datastore"
+)
+
+func toUnixMicro(t time.Time) int64 {
+	// We cannot use t.UnixNano() / 1e3 because we want to handle times more than
+	// 2^63 nanoseconds (which is about 292 years) away from 1970, and those cannot
+	// be represented in the numerator of a single int64 divide.
+	return t.Unix()*1e6 + int64(t.Nanosecond()/1e3)
+}
+
+func fromUnixMicro(t int64) time.Time {
+	return time.Unix(t/1e6, (t%1e6)*1e3).UTC()
+}
+
+var (
+	minTime = time.Unix(int64(math.MinInt64)/1e6, (int64(math.MinInt64)%1e6)*1e3)
+	maxTime = time.Unix(int64(math.MaxInt64)/1e6, (int64(math.MaxInt64)%1e6)*1e3)
+)
+
+// valueToProto converts a named value to a newly allocated Property.
+// The returned error string is empty on success.
+func valueToProto(defaultAppID, name string, v reflect.Value, multiple bool) (p *pb.Property, errStr string) {
+	var (
+		pv          pb.PropertyValue
+		unsupported bool
+	)
+	switch v.Kind() {
+	case reflect.Invalid:
+		// No-op.
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		pv.Int64Value = proto.Int64(v.Int())
+	case reflect.Bool:
+		pv.BooleanValue = proto.Bool(v.Bool())
+	case reflect.String:
+		pv.StringValue = proto.String(v.String())
+	case reflect.Float32, reflect.Float64:
+		pv.DoubleValue = proto.Float64(v.Float())
+	case reflect.Ptr:
+		if k, ok := v.Interface().(*Key); ok {
+			if k != nil {
+				pv.Referencevalue = keyToReferenceValue(defaultAppID, k)
+			}
+		} else {
+			unsupported = true
+		}
+	case reflect.Struct:
+		switch t := v.Interface().(type) {
+		case time.Time:
+			if t.Before(minTime) || t.After(maxTime) {
+				return nil, "time value out of range"
+			}
+			pv.Int64Value = proto.Int64(toUnixMicro(t))
+		case appengine.GeoPoint:
+			if !t.Valid() {
+				return nil, "invalid GeoPoint value"
+			}
+			// NOTE: Strangely, latitude maps to X, longitude to Y.
+			pv.Pointvalue = &pb.PropertyValue_PointValue{X: &t.Lat, Y: &t.Lng}
+		default:
+			unsupported = true
+		}
+	case reflect.Slice:
+		if b, ok := v.Interface().([]byte); ok {
+			pv.StringValue = proto.String(string(b))
+		} else {
+			// nvToProto should already catch slice values.
+			// If we get here, we have a slice of slice values.
+			unsupported = true
+		}
+	default:
+		unsupported = true
+	}
+	if unsupported {
+		return nil, "unsupported datastore value type: " + v.Type().String()
+	}
+	p = &pb.Property{
+		Name:     proto.String(name),
+		Value:    &pv,
+		Multiple: proto.Bool(multiple),
+	}
+	if v.IsValid() {
+		switch v.Interface().(type) {
+		case []byte:
+			p.Meaning = pb.Property_BLOB.Enum()
+		case ByteString:
+			p.Meaning = pb.Property_BYTESTRING.Enum()
+		case appengine.BlobKey:
+			p.Meaning = pb.Property_BLOBKEY.Enum()
+		case time.Time:
+			p.Meaning = pb.Property_GD_WHEN.Enum()
+		case appengine.GeoPoint:
+			p.Meaning = pb.Property_GEORSS_POINT.Enum()
+		}
+	}
+	return p, ""
+}
+
+// saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer.
+func saveEntity(defaultAppID string, key *Key, src interface{}) (*pb.EntityProto, error) {
+	var err error
+	var props []Property
+	if e, ok := src.(PropertyLoadSaver); ok {
+		props, err = e.Save()
+	} else {
+		props, err = SaveStruct(src)
+	}
+	if err != nil {
+		return nil, err
+	}
+	return propertiesToProto(defaultAppID, key, props)
+}
+
+func saveStructProperty(props *[]Property, name string, noIndex, multiple bool, v reflect.Value) error {
+	p := Property{
+		Name:     name,
+		NoIndex:  noIndex,
+		Multiple: multiple,
+	}
+	switch x := v.Interface().(type) {
+	case *Key:
+		p.Value = x
+	case time.Time:
+		p.Value = x
+	case appengine.BlobKey:
+		p.Value = x
+	case appengine.GeoPoint:
+		p.Value = x
+	case ByteString:
+		p.Value = x
+	default:
+		switch v.Kind() {
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			p.Value = v.Int()
+		case reflect.Bool:
+			p.Value = v.Bool()
+		case reflect.String:
+			p.Value = v.String()
+		case reflect.Float32, reflect.Float64:
+			p.Value = v.Float()
+		case reflect.Slice:
+			if v.Type().Elem().Kind() == reflect.Uint8 {
+				p.NoIndex = true
+				p.Value = v.Bytes()
+			}
+		case reflect.Struct:
+			if !v.CanAddr() {
+				return fmt.Errorf("datastore: unsupported struct field: value is unaddressable")
+			}
+			sub, err := newStructPLS(v.Addr().Interface())
+			if err != nil {
+				return fmt.Errorf("datastore: unsupported struct field: %v", err)
+			}
+			return sub.save(props, name+".", noIndex, multiple)
+		}
+	}
+	if p.Value == nil {
+		return fmt.Errorf("datastore: unsupported struct field type: %v", v.Type())
+	}
+	*props = append(*props, p)
+	return nil
+}
+
+func (s structPLS) Save() ([]Property, error) {
+	var props []Property
+	if err := s.save(&props, "", false, false); err != nil {
+		return nil, err
+	}
+	return props, nil
+}
+
+func (s structPLS) save(props *[]Property, prefix string, noIndex, multiple bool) error {
+	for name, f := range s.codec.fields {
+		name = prefix + name
+		v := s.v.FieldByIndex(f.path)
+		if !v.IsValid() || !v.CanSet() {
+			continue
+		}
+		noIndex1 := noIndex || f.noIndex
+		// For slice fields that aren't []byte, save each element.
+		if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
+			for j := 0; j < v.Len(); j++ {
+				if err := saveStructProperty(props, name, noIndex1, true, v.Index(j)); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		// Otherwise, save the field itself.
+		if err := saveStructProperty(props, name, noIndex1, multiple, v); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func propertiesToProto(defaultAppID string, key *Key, props []Property) (*pb.EntityProto, error) {
+	e := &pb.EntityProto{
+		Key: keyToProto(defaultAppID, key),
+	}
+	if key.parent == nil {
+		e.EntityGroup = &pb.Path{}
+	} else {
+		e.EntityGroup = keyToProto(defaultAppID, key.root()).Path
+	}
+	prevMultiple := make(map[string]bool)
+
+	for _, p := range props {
+		if pm, ok := prevMultiple[p.Name]; ok {
+			if !pm || !p.Multiple {
+				return nil, fmt.Errorf("datastore: multiple Properties with Name %q, but Multiple is false", p.Name)
+			}
+		} else {
+			prevMultiple[p.Name] = p.Multiple
+		}
+
+		x := &pb.Property{
+			Name:     proto.String(p.Name),
+			Value:    new(pb.PropertyValue),
+			Multiple: proto.Bool(p.Multiple),
+		}
+		switch v := p.Value.(type) {
+		case int64:
+			x.Value.Int64Value = proto.Int64(v)
+		case bool:
+			x.Value.BooleanValue = proto.Bool(v)
+		case string:
+			x.Value.StringValue = proto.String(v)
+			if p.NoIndex {
+				x.Meaning = pb.Property_TEXT.Enum()
+			}
+		case float64:
+			x.Value.DoubleValue = proto.Float64(v)
+		case *Key:
+			if v != nil {
+				x.Value.Referencevalue = keyToReferenceValue(defaultAppID, v)
+			}
+		case time.Time:
+			if v.Before(minTime) || v.After(maxTime) {
+				return nil, fmt.Errorf("datastore: time value out of range")
+			}
+			x.Value.Int64Value = proto.Int64(toUnixMicro(v))
+			x.Meaning = pb.Property_GD_WHEN.Enum()
+		case appengine.BlobKey:
+			x.Value.StringValue = proto.String(string(v))
+			x.Meaning = pb.Property_BLOBKEY.Enum()
+		case appengine.GeoPoint:
+			if !v.Valid() {
+				return nil, fmt.Errorf("datastore: invalid GeoPoint value")
+			}
+			// NOTE: Strangely, latitude maps to X, longitude to Y.
+			x.Value.Pointvalue = &pb.PropertyValue_PointValue{X: &v.Lat, Y: &v.Lng}
+			x.Meaning = pb.Property_GEORSS_POINT.Enum()
+		case []byte:
+			x.Value.StringValue = proto.String(string(v))
+			x.Meaning = pb.Property_BLOB.Enum()
+			if !p.NoIndex {
+				return nil, fmt.Errorf("datastore: cannot index a []byte valued Property with Name %q", p.Name)
+			}
+		case ByteString:
+			x.Value.StringValue = proto.String(string(v))
+			x.Meaning = pb.Property_BYTESTRING.Enum()
+		default:
+			if p.Value != nil {
+				return nil, fmt.Errorf("datastore: invalid Value type for a Property with Name %q", p.Name)
+			}
+		}
+
+		if p.NoIndex {
+			e.RawProperty = append(e.RawProperty, x)
+		} else {
+			e.Property = append(e.Property, x)
+			if len(e.Property) > maxIndexedProperties {
+				return nil, errors.New("datastore: too many indexed properties")
+			}
+		}
+	}
+	return e, nil
+}
diff --git a/vendor/google.golang.org/appengine/datastore/transaction.go b/vendor/google.golang.org/appengine/datastore/transaction.go
new file mode 100644
index 0000000..a7f3f2b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/transaction.go
@@ -0,0 +1,87 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+	"errors"
+
+	"golang.org/x/net/context"
+
+	"google.golang.org/appengine/internal"
+	pb "google.golang.org/appengine/internal/datastore"
+)
+
+func init() {
+	internal.RegisterTransactionSetter(func(x *pb.Query, t *pb.Transaction) {
+		x.Transaction = t
+	})
+	internal.RegisterTransactionSetter(func(x *pb.GetRequest, t *pb.Transaction) {
+		x.Transaction = t
+	})
+	internal.RegisterTransactionSetter(func(x *pb.PutRequest, t *pb.Transaction) {
+		x.Transaction = t
+	})
+	internal.RegisterTransactionSetter(func(x *pb.DeleteRequest, t *pb.Transaction) {
+		x.Transaction = t
+	})
+}
+
+// ErrConcurrentTransaction is returned when a transaction is rolled back due
+// to a conflict with a concurrent transaction.
+var ErrConcurrentTransaction = errors.New("datastore: concurrent transaction")
+
+// RunInTransaction runs f in a transaction. It calls f with a transaction
+// context tc that f should use for all App Engine operations.
+//
+// If f returns nil, RunInTransaction attempts to commit the transaction,
+// returning nil if it succeeds. If the commit fails due to a conflicting
+// transaction, RunInTransaction retries f, each time with a new transaction
+// context. It gives up and returns ErrConcurrentTransaction after three
+// failed attempts. The number of attempts can be configured by specifying
+// TransactionOptions.Attempts.
+//
+// If f returns non-nil, then any datastore changes will not be applied and
+// RunInTransaction returns that same error. The function f is not retried.
+//
+// Note that when f returns, the transaction is not yet committed. Calling code
+// must be careful not to assume that any of f's changes have been committed
+// until RunInTransaction returns nil.
+//
+// Since f may be called multiple times, f should usually be idempotent.
+// datastore.Get is not idempotent when unmarshaling slice fields.
+//
+// Nested transactions are not supported; c may not be a transaction context.
+func RunInTransaction(c context.Context, f func(tc context.Context) error, opts *TransactionOptions) error {
+	xg := false
+	if opts != nil {
+		xg = opts.XG
+	}
+	attempts := 3
+	if opts != nil && opts.Attempts > 0 {
+		attempts = opts.Attempts
+	}
+	for i := 0; i < attempts; i++ {
+		if err := internal.RunTransactionOnce(c, f, xg); err != internal.ErrConcurrentTransaction {
+			return err
+		}
+	}
+	return ErrConcurrentTransaction
+}
+
+// TransactionOptions are the options for running a transaction.
+type TransactionOptions struct {
+	// XG is whether the transaction can cross multiple entity groups. In
+	// comparison, a single group transaction is one where all datastore keys
+	// used have the same root key. Note that cross group transactions do not
+	// have the same behavior as single group transactions. In particular, it
+	// is much more likely to see partially applied transactions in different
+	// entity groups, in global queries.
+	// It is valid to set XG to true even if the transaction is within a
+	// single entity group.
+	XG bool
+	// Attempts controls the number of retries to perform when commits fail
+	// due to a conflicting transaction. If omitted, it defaults to 3.
+	Attempts int
+}
diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go
index e9c56d4..aba0f83 100644
--- a/vendor/google.golang.org/appengine/internal/api.go
+++ b/vendor/google.golang.org/appengine/internal/api.go
@@ -32,7 +32,8 @@
 )
 
 const (
-	apiPath = "/rpc_http"
+	apiPath             = "/rpc_http"
+	defaultTicketSuffix = "/default.20150612t184001.0"
 )
 
 var (
@@ -269,8 +270,13 @@
 	return withContext(parent, c)
 }
 
-func getDefaultTicket() string {
+// DefaultTicket returns a ticket used for background context or dev_appserver.
+func DefaultTicket() string {
 	defaultTicketOnce.Do(func() {
+		if IsDevAppServer() {
+			defaultTicket = "testapp" + defaultTicketSuffix
+			return
+		}
 		appID := partitionlessAppID()
 		escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
 		majVersion := VersionID(nil)
@@ -291,7 +297,7 @@
 	}
 
 	// Compute background security ticket.
-	ticket := getDefaultTicket()
+	ticket := DefaultTicket()
 
 	ctxs.bg = &context{
 		req: &http.Request{
@@ -485,9 +491,15 @@
 	}
 
 	ticket := c.req.Header.Get(ticketHeader)
-	// Fall back to use background ticket when the request ticket is not available in Flex.
+	// Use a test ticket under test environment.
 	if ticket == "" {
-		ticket = getDefaultTicket()
+		if appid := ctx.Value(&appIDOverrideKey); appid != nil {
+			ticket = appid.(string) + defaultTicketSuffix
+		}
+	}
+	// Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver.
+	if ticket == "" {
+		ticket = DefaultTicket()
 	}
 	req := &remotepb.Request{
 		ServiceName: &service,
@@ -564,6 +576,9 @@
 }
 
 func logf(c *context, level int64, format string, args ...interface{}) {
+	if c == nil {
+		panic("not an App Engine context")
+	}
 	s := fmt.Sprintf(format, args...)
 	s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
 	c.addLogLine(&logpb.UserAppLogLine{
diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go
index 2db33a7..8c3eece 100644
--- a/vendor/google.golang.org/appengine/internal/api_common.go
+++ b/vendor/google.golang.org/appengine/internal/api_common.go
@@ -5,6 +5,8 @@
 package internal
 
 import (
+	"os"
+
 	"github.com/golang/protobuf/proto"
 	netcontext "golang.org/x/net/context"
 )
@@ -84,3 +86,31 @@
 func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context {
 	return withNamespace(ctx, namespace)
 }
+
+// SetTestEnv sets the env variables for testing background ticket in Flex.
+func SetTestEnv() func() {
+	var environ = []struct {
+		key, value string
+	}{
+		{"GAE_LONG_APP_ID", "my-app-id"},
+		{"GAE_MINOR_VERSION", "067924799508853122"},
+		{"GAE_MODULE_INSTANCE", "0"},
+		{"GAE_MODULE_NAME", "default"},
+		{"GAE_MODULE_VERSION", "20150612t184001"},
+	}
+
+	for _, v := range environ {
+		old := os.Getenv(v.key)
+		os.Setenv(v.key, v.value)
+		v.value = old
+	}
+	return func() { // Restore old environment after the test completes.
+		for _, v := range environ {
+			if v.value == "" {
+				os.Unsetenv(v.key)
+				continue
+			}
+			os.Setenv(v.key, v.value)
+		}
+	}
+}
diff --git a/vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go
new file mode 100644
index 0000000..252fef8
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go
@@ -0,0 +1,938 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/memcache/memcache_service.proto
+// DO NOT EDIT!
+
+/*
+Package memcache is a generated protocol buffer package.
+
+It is generated from these files:
+	google.golang.org/appengine/internal/memcache/memcache_service.proto
+
+It has these top-level messages:
+	MemcacheServiceError
+	AppOverride
+	MemcacheGetRequest
+	MemcacheGetResponse
+	MemcacheSetRequest
+	MemcacheSetResponse
+	MemcacheDeleteRequest
+	MemcacheDeleteResponse
+	MemcacheIncrementRequest
+	MemcacheIncrementResponse
+	MemcacheBatchIncrementRequest
+	MemcacheBatchIncrementResponse
+	MemcacheFlushRequest
+	MemcacheFlushResponse
+	MemcacheStatsRequest
+	MergedNamespaceStats
+	MemcacheStatsResponse
+	MemcacheGrabTailRequest
+	MemcacheGrabTailResponse
+*/
+package memcache
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type MemcacheServiceError_ErrorCode int32
+
+const (
+	MemcacheServiceError_OK                MemcacheServiceError_ErrorCode = 0
+	MemcacheServiceError_UNSPECIFIED_ERROR MemcacheServiceError_ErrorCode = 1
+	MemcacheServiceError_NAMESPACE_NOT_SET MemcacheServiceError_ErrorCode = 2
+	MemcacheServiceError_PERMISSION_DENIED MemcacheServiceError_ErrorCode = 3
+	MemcacheServiceError_INVALID_VALUE     MemcacheServiceError_ErrorCode = 6
+)
+
+var MemcacheServiceError_ErrorCode_name = map[int32]string{
+	0: "OK",
+	1: "UNSPECIFIED_ERROR",
+	2: "NAMESPACE_NOT_SET",
+	3: "PERMISSION_DENIED",
+	6: "INVALID_VALUE",
+}
+var MemcacheServiceError_ErrorCode_value = map[string]int32{
+	"OK":                0,
+	"UNSPECIFIED_ERROR": 1,
+	"NAMESPACE_NOT_SET": 2,
+	"PERMISSION_DENIED": 3,
+	"INVALID_VALUE":     6,
+}
+
+func (x MemcacheServiceError_ErrorCode) Enum() *MemcacheServiceError_ErrorCode {
+	p := new(MemcacheServiceError_ErrorCode)
+	*p = x
+	return p
+}
+func (x MemcacheServiceError_ErrorCode) String() string {
+	return proto.EnumName(MemcacheServiceError_ErrorCode_name, int32(x))
+}
+func (x *MemcacheServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(MemcacheServiceError_ErrorCode_value, data, "MemcacheServiceError_ErrorCode")
+	if err != nil {
+		return err
+	}
+	*x = MemcacheServiceError_ErrorCode(value)
+	return nil
+}
+
+type MemcacheSetRequest_SetPolicy int32
+
+const (
+	MemcacheSetRequest_SET     MemcacheSetRequest_SetPolicy = 1
+	MemcacheSetRequest_ADD     MemcacheSetRequest_SetPolicy = 2
+	MemcacheSetRequest_REPLACE MemcacheSetRequest_SetPolicy = 3
+	MemcacheSetRequest_CAS     MemcacheSetRequest_SetPolicy = 4
+)
+
+var MemcacheSetRequest_SetPolicy_name = map[int32]string{
+	1: "SET",
+	2: "ADD",
+	3: "REPLACE",
+	4: "CAS",
+}
+var MemcacheSetRequest_SetPolicy_value = map[string]int32{
+	"SET":     1,
+	"ADD":     2,
+	"REPLACE": 3,
+	"CAS":     4,
+}
+
+func (x MemcacheSetRequest_SetPolicy) Enum() *MemcacheSetRequest_SetPolicy {
+	p := new(MemcacheSetRequest_SetPolicy)
+	*p = x
+	return p
+}
+func (x MemcacheSetRequest_SetPolicy) String() string {
+	return proto.EnumName(MemcacheSetRequest_SetPolicy_name, int32(x))
+}
+func (x *MemcacheSetRequest_SetPolicy) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(MemcacheSetRequest_SetPolicy_value, data, "MemcacheSetRequest_SetPolicy")
+	if err != nil {
+		return err
+	}
+	*x = MemcacheSetRequest_SetPolicy(value)
+	return nil
+}
+
+type MemcacheSetResponse_SetStatusCode int32
+
+const (
+	MemcacheSetResponse_STORED     MemcacheSetResponse_SetStatusCode = 1
+	MemcacheSetResponse_NOT_STORED MemcacheSetResponse_SetStatusCode = 2
+	MemcacheSetResponse_ERROR      MemcacheSetResponse_SetStatusCode = 3
+	MemcacheSetResponse_EXISTS     MemcacheSetResponse_SetStatusCode = 4
+)
+
+var MemcacheSetResponse_SetStatusCode_name = map[int32]string{
+	1: "STORED",
+	2: "NOT_STORED",
+	3: "ERROR",
+	4: "EXISTS",
+}
+var MemcacheSetResponse_SetStatusCode_value = map[string]int32{
+	"STORED":     1,
+	"NOT_STORED": 2,
+	"ERROR":      3,
+	"EXISTS":     4,
+}
+
+func (x MemcacheSetResponse_SetStatusCode) Enum() *MemcacheSetResponse_SetStatusCode {
+	p := new(MemcacheSetResponse_SetStatusCode)
+	*p = x
+	return p
+}
+func (x MemcacheSetResponse_SetStatusCode) String() string {
+	return proto.EnumName(MemcacheSetResponse_SetStatusCode_name, int32(x))
+}
+func (x *MemcacheSetResponse_SetStatusCode) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(MemcacheSetResponse_SetStatusCode_value, data, "MemcacheSetResponse_SetStatusCode")
+	if err != nil {
+		return err
+	}
+	*x = MemcacheSetResponse_SetStatusCode(value)
+	return nil
+}
+
+type MemcacheDeleteResponse_DeleteStatusCode int32
+
+const (
+	MemcacheDeleteResponse_DELETED   MemcacheDeleteResponse_DeleteStatusCode = 1
+	MemcacheDeleteResponse_NOT_FOUND MemcacheDeleteResponse_DeleteStatusCode = 2
+)
+
+var MemcacheDeleteResponse_DeleteStatusCode_name = map[int32]string{
+	1: "DELETED",
+	2: "NOT_FOUND",
+}
+var MemcacheDeleteResponse_DeleteStatusCode_value = map[string]int32{
+	"DELETED":   1,
+	"NOT_FOUND": 2,
+}
+
+func (x MemcacheDeleteResponse_DeleteStatusCode) Enum() *MemcacheDeleteResponse_DeleteStatusCode {
+	p := new(MemcacheDeleteResponse_DeleteStatusCode)
+	*p = x
+	return p
+}
+func (x MemcacheDeleteResponse_DeleteStatusCode) String() string {
+	return proto.EnumName(MemcacheDeleteResponse_DeleteStatusCode_name, int32(x))
+}
+func (x *MemcacheDeleteResponse_DeleteStatusCode) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(MemcacheDeleteResponse_DeleteStatusCode_value, data, "MemcacheDeleteResponse_DeleteStatusCode")
+	if err != nil {
+		return err
+	}
+	*x = MemcacheDeleteResponse_DeleteStatusCode(value)
+	return nil
+}
+
+type MemcacheIncrementRequest_Direction int32
+
+const (
+	MemcacheIncrementRequest_INCREMENT MemcacheIncrementRequest_Direction = 1
+	MemcacheIncrementRequest_DECREMENT MemcacheIncrementRequest_Direction = 2
+)
+
+var MemcacheIncrementRequest_Direction_name = map[int32]string{
+	1: "INCREMENT",
+	2: "DECREMENT",
+}
+var MemcacheIncrementRequest_Direction_value = map[string]int32{
+	"INCREMENT": 1,
+	"DECREMENT": 2,
+}
+
+func (x MemcacheIncrementRequest_Direction) Enum() *MemcacheIncrementRequest_Direction {
+	p := new(MemcacheIncrementRequest_Direction)
+	*p = x
+	return p
+}
+func (x MemcacheIncrementRequest_Direction) String() string {
+	return proto.EnumName(MemcacheIncrementRequest_Direction_name, int32(x))
+}
+func (x *MemcacheIncrementRequest_Direction) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(MemcacheIncrementRequest_Direction_value, data, "MemcacheIncrementRequest_Direction")
+	if err != nil {
+		return err
+	}
+	*x = MemcacheIncrementRequest_Direction(value)
+	return nil
+}
+
+type MemcacheIncrementResponse_IncrementStatusCode int32
+
+const (
+	MemcacheIncrementResponse_OK          MemcacheIncrementResponse_IncrementStatusCode = 1
+	MemcacheIncrementResponse_NOT_CHANGED MemcacheIncrementResponse_IncrementStatusCode = 2
+	MemcacheIncrementResponse_ERROR       MemcacheIncrementResponse_IncrementStatusCode = 3
+)
+
+var MemcacheIncrementResponse_IncrementStatusCode_name = map[int32]string{
+	1: "OK",
+	2: "NOT_CHANGED",
+	3: "ERROR",
+}
+var MemcacheIncrementResponse_IncrementStatusCode_value = map[string]int32{
+	"OK":          1,
+	"NOT_CHANGED": 2,
+	"ERROR":       3,
+}
+
+func (x MemcacheIncrementResponse_IncrementStatusCode) Enum() *MemcacheIncrementResponse_IncrementStatusCode {
+	p := new(MemcacheIncrementResponse_IncrementStatusCode)
+	*p = x
+	return p
+}
+func (x MemcacheIncrementResponse_IncrementStatusCode) String() string {
+	return proto.EnumName(MemcacheIncrementResponse_IncrementStatusCode_name, int32(x))
+}
+func (x *MemcacheIncrementResponse_IncrementStatusCode) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(MemcacheIncrementResponse_IncrementStatusCode_value, data, "MemcacheIncrementResponse_IncrementStatusCode")
+	if err != nil {
+		return err
+	}
+	*x = MemcacheIncrementResponse_IncrementStatusCode(value)
+	return nil
+}
+
+type MemcacheServiceError struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheServiceError) Reset()         { *m = MemcacheServiceError{} }
+func (m *MemcacheServiceError) String() string { return proto.CompactTextString(m) }
+func (*MemcacheServiceError) ProtoMessage()    {}
+
+type AppOverride struct {
+	AppId                    *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+	NumMemcachegBackends     *int32  `protobuf:"varint,2,opt,name=num_memcacheg_backends" json:"num_memcacheg_backends,omitempty"`
+	IgnoreShardlock          *bool   `protobuf:"varint,3,opt,name=ignore_shardlock" json:"ignore_shardlock,omitempty"`
+	MemcachePoolHint         *string `protobuf:"bytes,4,opt,name=memcache_pool_hint" json:"memcache_pool_hint,omitempty"`
+	MemcacheShardingStrategy []byte  `protobuf:"bytes,5,opt,name=memcache_sharding_strategy" json:"memcache_sharding_strategy,omitempty"`
+	XXX_unrecognized         []byte  `json:"-"`
+}
+
+func (m *AppOverride) Reset()         { *m = AppOverride{} }
+func (m *AppOverride) String() string { return proto.CompactTextString(m) }
+func (*AppOverride) ProtoMessage()    {}
+
+func (m *AppOverride) GetAppId() string {
+	if m != nil && m.AppId != nil {
+		return *m.AppId
+	}
+	return ""
+}
+
+func (m *AppOverride) GetNumMemcachegBackends() int32 {
+	if m != nil && m.NumMemcachegBackends != nil {
+		return *m.NumMemcachegBackends
+	}
+	return 0
+}
+
+func (m *AppOverride) GetIgnoreShardlock() bool {
+	if m != nil && m.IgnoreShardlock != nil {
+		return *m.IgnoreShardlock
+	}
+	return false
+}
+
+func (m *AppOverride) GetMemcachePoolHint() string {
+	if m != nil && m.MemcachePoolHint != nil {
+		return *m.MemcachePoolHint
+	}
+	return ""
+}
+
+func (m *AppOverride) GetMemcacheShardingStrategy() []byte {
+	if m != nil {
+		return m.MemcacheShardingStrategy
+	}
+	return nil
+}
+
+type MemcacheGetRequest struct {
+	Key              [][]byte     `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+	NameSpace        *string      `protobuf:"bytes,2,opt,name=name_space,def=" json:"name_space,omitempty"`
+	ForCas           *bool        `protobuf:"varint,4,opt,name=for_cas" json:"for_cas,omitempty"`
+	Override         *AppOverride `protobuf:"bytes,5,opt,name=override" json:"override,omitempty"`
+	XXX_unrecognized []byte       `json:"-"`
+}
+
+func (m *MemcacheGetRequest) Reset()         { *m = MemcacheGetRequest{} }
+func (m *MemcacheGetRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGetRequest) ProtoMessage()    {}
+
+func (m *MemcacheGetRequest) GetKey() [][]byte {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *MemcacheGetRequest) GetNameSpace() string {
+	if m != nil && m.NameSpace != nil {
+		return *m.NameSpace
+	}
+	return ""
+}
+
+func (m *MemcacheGetRequest) GetForCas() bool {
+	if m != nil && m.ForCas != nil {
+		return *m.ForCas
+	}
+	return false
+}
+
+func (m *MemcacheGetRequest) GetOverride() *AppOverride {
+	if m != nil {
+		return m.Override
+	}
+	return nil
+}
+
+type MemcacheGetResponse struct {
+	Item             []*MemcacheGetResponse_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"`
+	XXX_unrecognized []byte                      `json:"-"`
+}
+
+func (m *MemcacheGetResponse) Reset()         { *m = MemcacheGetResponse{} }
+func (m *MemcacheGetResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGetResponse) ProtoMessage()    {}
+
+func (m *MemcacheGetResponse) GetItem() []*MemcacheGetResponse_Item {
+	if m != nil {
+		return m.Item
+	}
+	return nil
+}
+
+type MemcacheGetResponse_Item struct {
+	Key              []byte  `protobuf:"bytes,2,req,name=key" json:"key,omitempty"`
+	Value            []byte  `protobuf:"bytes,3,req,name=value" json:"value,omitempty"`
+	Flags            *uint32 `protobuf:"fixed32,4,opt,name=flags" json:"flags,omitempty"`
+	CasId            *uint64 `protobuf:"fixed64,5,opt,name=cas_id" json:"cas_id,omitempty"`
+	ExpiresInSeconds *int32  `protobuf:"varint,6,opt,name=expires_in_seconds" json:"expires_in_seconds,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *MemcacheGetResponse_Item) Reset()         { *m = MemcacheGetResponse_Item{} }
+func (m *MemcacheGetResponse_Item) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGetResponse_Item) ProtoMessage()    {}
+
+func (m *MemcacheGetResponse_Item) GetKey() []byte {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *MemcacheGetResponse_Item) GetValue() []byte {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func (m *MemcacheGetResponse_Item) GetFlags() uint32 {
+	if m != nil && m.Flags != nil {
+		return *m.Flags
+	}
+	return 0
+}
+
+func (m *MemcacheGetResponse_Item) GetCasId() uint64 {
+	if m != nil && m.CasId != nil {
+		return *m.CasId
+	}
+	return 0
+}
+
+func (m *MemcacheGetResponse_Item) GetExpiresInSeconds() int32 {
+	if m != nil && m.ExpiresInSeconds != nil {
+		return *m.ExpiresInSeconds
+	}
+	return 0
+}
+
+type MemcacheSetRequest struct {
+	Item             []*MemcacheSetRequest_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"`
+	NameSpace        *string                    `protobuf:"bytes,7,opt,name=name_space,def=" json:"name_space,omitempty"`
+	Override         *AppOverride               `protobuf:"bytes,10,opt,name=override" json:"override,omitempty"`
+	XXX_unrecognized []byte                     `json:"-"`
+}
+
+func (m *MemcacheSetRequest) Reset()         { *m = MemcacheSetRequest{} }
+func (m *MemcacheSetRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheSetRequest) ProtoMessage()    {}
+
+func (m *MemcacheSetRequest) GetItem() []*MemcacheSetRequest_Item {
+	if m != nil {
+		return m.Item
+	}
+	return nil
+}
+
+func (m *MemcacheSetRequest) GetNameSpace() string {
+	if m != nil && m.NameSpace != nil {
+		return *m.NameSpace
+	}
+	return ""
+}
+
+func (m *MemcacheSetRequest) GetOverride() *AppOverride {
+	if m != nil {
+		return m.Override
+	}
+	return nil
+}
+
+type MemcacheSetRequest_Item struct {
+	Key              []byte                        `protobuf:"bytes,2,req,name=key" json:"key,omitempty"`
+	Value            []byte                        `protobuf:"bytes,3,req,name=value" json:"value,omitempty"`
+	Flags            *uint32                       `protobuf:"fixed32,4,opt,name=flags" json:"flags,omitempty"`
+	SetPolicy        *MemcacheSetRequest_SetPolicy `protobuf:"varint,5,opt,name=set_policy,enum=appengine.MemcacheSetRequest_SetPolicy,def=1" json:"set_policy,omitempty"`
+	ExpirationTime   *uint32                       `protobuf:"fixed32,6,opt,name=expiration_time,def=0" json:"expiration_time,omitempty"`
+	CasId            *uint64                       `protobuf:"fixed64,8,opt,name=cas_id" json:"cas_id,omitempty"`
+	ForCas           *bool                         `protobuf:"varint,9,opt,name=for_cas" json:"for_cas,omitempty"`
+	XXX_unrecognized []byte                        `json:"-"`
+}
+
+func (m *MemcacheSetRequest_Item) Reset()         { *m = MemcacheSetRequest_Item{} }
+func (m *MemcacheSetRequest_Item) String() string { return proto.CompactTextString(m) }
+func (*MemcacheSetRequest_Item) ProtoMessage()    {}
+
+const Default_MemcacheSetRequest_Item_SetPolicy MemcacheSetRequest_SetPolicy = MemcacheSetRequest_SET
+const Default_MemcacheSetRequest_Item_ExpirationTime uint32 = 0
+
+func (m *MemcacheSetRequest_Item) GetKey() []byte {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *MemcacheSetRequest_Item) GetValue() []byte {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func (m *MemcacheSetRequest_Item) GetFlags() uint32 {
+	if m != nil && m.Flags != nil {
+		return *m.Flags
+	}
+	return 0
+}
+
+func (m *MemcacheSetRequest_Item) GetSetPolicy() MemcacheSetRequest_SetPolicy {
+	if m != nil && m.SetPolicy != nil {
+		return *m.SetPolicy
+	}
+	return Default_MemcacheSetRequest_Item_SetPolicy
+}
+
+func (m *MemcacheSetRequest_Item) GetExpirationTime() uint32 {
+	if m != nil && m.ExpirationTime != nil {
+		return *m.ExpirationTime
+	}
+	return Default_MemcacheSetRequest_Item_ExpirationTime
+}
+
+func (m *MemcacheSetRequest_Item) GetCasId() uint64 {
+	if m != nil && m.CasId != nil {
+		return *m.CasId
+	}
+	return 0
+}
+
+func (m *MemcacheSetRequest_Item) GetForCas() bool {
+	if m != nil && m.ForCas != nil {
+		return *m.ForCas
+	}
+	return false
+}
+
+type MemcacheSetResponse struct {
+	SetStatus        []MemcacheSetResponse_SetStatusCode `protobuf:"varint,1,rep,name=set_status,enum=appengine.MemcacheSetResponse_SetStatusCode" json:"set_status,omitempty"`
+	XXX_unrecognized []byte                              `json:"-"`
+}
+
+func (m *MemcacheSetResponse) Reset()         { *m = MemcacheSetResponse{} }
+func (m *MemcacheSetResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheSetResponse) ProtoMessage()    {}
+
+func (m *MemcacheSetResponse) GetSetStatus() []MemcacheSetResponse_SetStatusCode {
+	if m != nil {
+		return m.SetStatus
+	}
+	return nil
+}
+
+type MemcacheDeleteRequest struct {
+	Item             []*MemcacheDeleteRequest_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"`
+	NameSpace        *string                       `protobuf:"bytes,4,opt,name=name_space,def=" json:"name_space,omitempty"`
+	Override         *AppOverride                  `protobuf:"bytes,5,opt,name=override" json:"override,omitempty"`
+	XXX_unrecognized []byte                        `json:"-"`
+}
+
+func (m *MemcacheDeleteRequest) Reset()         { *m = MemcacheDeleteRequest{} }
+func (m *MemcacheDeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheDeleteRequest) ProtoMessage()    {}
+
+func (m *MemcacheDeleteRequest) GetItem() []*MemcacheDeleteRequest_Item {
+	if m != nil {
+		return m.Item
+	}
+	return nil
+}
+
+func (m *MemcacheDeleteRequest) GetNameSpace() string {
+	if m != nil && m.NameSpace != nil {
+		return *m.NameSpace
+	}
+	return ""
+}
+
+func (m *MemcacheDeleteRequest) GetOverride() *AppOverride {
+	if m != nil {
+		return m.Override
+	}
+	return nil
+}
+
+type MemcacheDeleteRequest_Item struct {
+	Key              []byte  `protobuf:"bytes,2,req,name=key" json:"key,omitempty"`
+	DeleteTime       *uint32 `protobuf:"fixed32,3,opt,name=delete_time,def=0" json:"delete_time,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *MemcacheDeleteRequest_Item) Reset()         { *m = MemcacheDeleteRequest_Item{} }
+func (m *MemcacheDeleteRequest_Item) String() string { return proto.CompactTextString(m) }
+func (*MemcacheDeleteRequest_Item) ProtoMessage()    {}
+
+const Default_MemcacheDeleteRequest_Item_DeleteTime uint32 = 0
+
+func (m *MemcacheDeleteRequest_Item) GetKey() []byte {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *MemcacheDeleteRequest_Item) GetDeleteTime() uint32 {
+	if m != nil && m.DeleteTime != nil {
+		return *m.DeleteTime
+	}
+	return Default_MemcacheDeleteRequest_Item_DeleteTime
+}
+
+type MemcacheDeleteResponse struct {
+	DeleteStatus     []MemcacheDeleteResponse_DeleteStatusCode `protobuf:"varint,1,rep,name=delete_status,enum=appengine.MemcacheDeleteResponse_DeleteStatusCode" json:"delete_status,omitempty"`
+	XXX_unrecognized []byte                                    `json:"-"`
+}
+
+func (m *MemcacheDeleteResponse) Reset()         { *m = MemcacheDeleteResponse{} }
+func (m *MemcacheDeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheDeleteResponse) ProtoMessage()    {}
+
+func (m *MemcacheDeleteResponse) GetDeleteStatus() []MemcacheDeleteResponse_DeleteStatusCode {
+	if m != nil {
+		return m.DeleteStatus
+	}
+	return nil
+}
+
+type MemcacheIncrementRequest struct {
+	Key              []byte                              `protobuf:"bytes,1,req,name=key" json:"key,omitempty"`
+	NameSpace        *string                             `protobuf:"bytes,4,opt,name=name_space,def=" json:"name_space,omitempty"`
+	Delta            *uint64                             `protobuf:"varint,2,opt,name=delta,def=1" json:"delta,omitempty"`
+	Direction        *MemcacheIncrementRequest_Direction `protobuf:"varint,3,opt,name=direction,enum=appengine.MemcacheIncrementRequest_Direction,def=1" json:"direction,omitempty"`
+	InitialValue     *uint64                             `protobuf:"varint,5,opt,name=initial_value" json:"initial_value,omitempty"`
+	InitialFlags     *uint32                             `protobuf:"fixed32,6,opt,name=initial_flags" json:"initial_flags,omitempty"`
+	Override         *AppOverride                        `protobuf:"bytes,7,opt,name=override" json:"override,omitempty"`
+	XXX_unrecognized []byte                              `json:"-"`
+}
+
+func (m *MemcacheIncrementRequest) Reset()         { *m = MemcacheIncrementRequest{} }
+func (m *MemcacheIncrementRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheIncrementRequest) ProtoMessage()    {}
+
+const Default_MemcacheIncrementRequest_Delta uint64 = 1
+const Default_MemcacheIncrementRequest_Direction MemcacheIncrementRequest_Direction = MemcacheIncrementRequest_INCREMENT
+
+func (m *MemcacheIncrementRequest) GetKey() []byte {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *MemcacheIncrementRequest) GetNameSpace() string {
+	if m != nil && m.NameSpace != nil {
+		return *m.NameSpace
+	}
+	return ""
+}
+
+func (m *MemcacheIncrementRequest) GetDelta() uint64 {
+	if m != nil && m.Delta != nil {
+		return *m.Delta
+	}
+	return Default_MemcacheIncrementRequest_Delta
+}
+
+func (m *MemcacheIncrementRequest) GetDirection() MemcacheIncrementRequest_Direction {
+	if m != nil && m.Direction != nil {
+		return *m.Direction
+	}
+	return Default_MemcacheIncrementRequest_Direction
+}
+
+func (m *MemcacheIncrementRequest) GetInitialValue() uint64 {
+	if m != nil && m.InitialValue != nil {
+		return *m.InitialValue
+	}
+	return 0
+}
+
+func (m *MemcacheIncrementRequest) GetInitialFlags() uint32 {
+	if m != nil && m.InitialFlags != nil {
+		return *m.InitialFlags
+	}
+	return 0
+}
+
+func (m *MemcacheIncrementRequest) GetOverride() *AppOverride {
+	if m != nil {
+		return m.Override
+	}
+	return nil
+}
+
+type MemcacheIncrementResponse struct {
+	NewValue         *uint64                                        `protobuf:"varint,1,opt,name=new_value" json:"new_value,omitempty"`
+	IncrementStatus  *MemcacheIncrementResponse_IncrementStatusCode `protobuf:"varint,2,opt,name=increment_status,enum=appengine.MemcacheIncrementResponse_IncrementStatusCode" json:"increment_status,omitempty"`
+	XXX_unrecognized []byte                                         `json:"-"`
+}
+
+func (m *MemcacheIncrementResponse) Reset()         { *m = MemcacheIncrementResponse{} }
+func (m *MemcacheIncrementResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheIncrementResponse) ProtoMessage()    {}
+
+func (m *MemcacheIncrementResponse) GetNewValue() uint64 {
+	if m != nil && m.NewValue != nil {
+		return *m.NewValue
+	}
+	return 0
+}
+
+func (m *MemcacheIncrementResponse) GetIncrementStatus() MemcacheIncrementResponse_IncrementStatusCode {
+	if m != nil && m.IncrementStatus != nil {
+		return *m.IncrementStatus
+	}
+	return MemcacheIncrementResponse_OK
+}
+
+type MemcacheBatchIncrementRequest struct {
+	NameSpace        *string                     `protobuf:"bytes,1,opt,name=name_space,def=" json:"name_space,omitempty"`
+	Item             []*MemcacheIncrementRequest `protobuf:"bytes,2,rep,name=item" json:"item,omitempty"`
+	Override         *AppOverride                `protobuf:"bytes,3,opt,name=override" json:"override,omitempty"`
+	XXX_unrecognized []byte                      `json:"-"`
+}
+
+func (m *MemcacheBatchIncrementRequest) Reset()         { *m = MemcacheBatchIncrementRequest{} }
+func (m *MemcacheBatchIncrementRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheBatchIncrementRequest) ProtoMessage()    {}
+
+func (m *MemcacheBatchIncrementRequest) GetNameSpace() string {
+	if m != nil && m.NameSpace != nil {
+		return *m.NameSpace
+	}
+	return ""
+}
+
+func (m *MemcacheBatchIncrementRequest) GetItem() []*MemcacheIncrementRequest {
+	if m != nil {
+		return m.Item
+	}
+	return nil
+}
+
+func (m *MemcacheBatchIncrementRequest) GetOverride() *AppOverride {
+	if m != nil {
+		return m.Override
+	}
+	return nil
+}
+
+type MemcacheBatchIncrementResponse struct {
+	Item             []*MemcacheIncrementResponse `protobuf:"bytes,1,rep,name=item" json:"item,omitempty"`
+	XXX_unrecognized []byte                       `json:"-"`
+}
+
+func (m *MemcacheBatchIncrementResponse) Reset()         { *m = MemcacheBatchIncrementResponse{} }
+func (m *MemcacheBatchIncrementResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheBatchIncrementResponse) ProtoMessage()    {}
+
+func (m *MemcacheBatchIncrementResponse) GetItem() []*MemcacheIncrementResponse {
+	if m != nil {
+		return m.Item
+	}
+	return nil
+}
+
+type MemcacheFlushRequest struct {
+	Override         *AppOverride `protobuf:"bytes,1,opt,name=override" json:"override,omitempty"`
+	XXX_unrecognized []byte       `json:"-"`
+}
+
+func (m *MemcacheFlushRequest) Reset()         { *m = MemcacheFlushRequest{} }
+func (m *MemcacheFlushRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheFlushRequest) ProtoMessage()    {}
+
+func (m *MemcacheFlushRequest) GetOverride() *AppOverride {
+	if m != nil {
+		return m.Override
+	}
+	return nil
+}
+
+type MemcacheFlushResponse struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheFlushResponse) Reset()         { *m = MemcacheFlushResponse{} }
+func (m *MemcacheFlushResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheFlushResponse) ProtoMessage()    {}
+
+type MemcacheStatsRequest struct {
+	Override         *AppOverride `protobuf:"bytes,1,opt,name=override" json:"override,omitempty"`
+	XXX_unrecognized []byte       `json:"-"`
+}
+
+func (m *MemcacheStatsRequest) Reset()         { *m = MemcacheStatsRequest{} }
+func (m *MemcacheStatsRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheStatsRequest) ProtoMessage()    {}
+
+func (m *MemcacheStatsRequest) GetOverride() *AppOverride {
+	if m != nil {
+		return m.Override
+	}
+	return nil
+}
+
+type MergedNamespaceStats struct {
+	Hits             *uint64 `protobuf:"varint,1,req,name=hits" json:"hits,omitempty"`
+	Misses           *uint64 `protobuf:"varint,2,req,name=misses" json:"misses,omitempty"`
+	ByteHits         *uint64 `protobuf:"varint,3,req,name=byte_hits" json:"byte_hits,omitempty"`
+	Items            *uint64 `protobuf:"varint,4,req,name=items" json:"items,omitempty"`
+	Bytes            *uint64 `protobuf:"varint,5,req,name=bytes" json:"bytes,omitempty"`
+	OldestItemAge    *uint32 `protobuf:"fixed32,6,req,name=oldest_item_age" json:"oldest_item_age,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *MergedNamespaceStats) Reset()         { *m = MergedNamespaceStats{} }
+func (m *MergedNamespaceStats) String() string { return proto.CompactTextString(m) }
+func (*MergedNamespaceStats) ProtoMessage()    {}
+
+func (m *MergedNamespaceStats) GetHits() uint64 {
+	if m != nil && m.Hits != nil {
+		return *m.Hits
+	}
+	return 0
+}
+
+func (m *MergedNamespaceStats) GetMisses() uint64 {
+	if m != nil && m.Misses != nil {
+		return *m.Misses
+	}
+	return 0
+}
+
+func (m *MergedNamespaceStats) GetByteHits() uint64 {
+	if m != nil && m.ByteHits != nil {
+		return *m.ByteHits
+	}
+	return 0
+}
+
+func (m *MergedNamespaceStats) GetItems() uint64 {
+	if m != nil && m.Items != nil {
+		return *m.Items
+	}
+	return 0
+}
+
+func (m *MergedNamespaceStats) GetBytes() uint64 {
+	if m != nil && m.Bytes != nil {
+		return *m.Bytes
+	}
+	return 0
+}
+
+func (m *MergedNamespaceStats) GetOldestItemAge() uint32 {
+	if m != nil && m.OldestItemAge != nil {
+		return *m.OldestItemAge
+	}
+	return 0
+}
+
+type MemcacheStatsResponse struct {
+	Stats            *MergedNamespaceStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"`
+	XXX_unrecognized []byte                `json:"-"`
+}
+
+func (m *MemcacheStatsResponse) Reset()         { *m = MemcacheStatsResponse{} }
+func (m *MemcacheStatsResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheStatsResponse) ProtoMessage()    {}
+
+func (m *MemcacheStatsResponse) GetStats() *MergedNamespaceStats {
+	if m != nil {
+		return m.Stats
+	}
+	return nil
+}
+
+type MemcacheGrabTailRequest struct {
+	ItemCount        *int32       `protobuf:"varint,1,req,name=item_count" json:"item_count,omitempty"`
+	NameSpace        *string      `protobuf:"bytes,2,opt,name=name_space,def=" json:"name_space,omitempty"`
+	Override         *AppOverride `protobuf:"bytes,3,opt,name=override" json:"override,omitempty"`
+	XXX_unrecognized []byte       `json:"-"`
+}
+
+func (m *MemcacheGrabTailRequest) Reset()         { *m = MemcacheGrabTailRequest{} }
+func (m *MemcacheGrabTailRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGrabTailRequest) ProtoMessage()    {}
+
+func (m *MemcacheGrabTailRequest) GetItemCount() int32 {
+	if m != nil && m.ItemCount != nil {
+		return *m.ItemCount
+	}
+	return 0
+}
+
+func (m *MemcacheGrabTailRequest) GetNameSpace() string {
+	if m != nil && m.NameSpace != nil {
+		return *m.NameSpace
+	}
+	return ""
+}
+
+func (m *MemcacheGrabTailRequest) GetOverride() *AppOverride {
+	if m != nil {
+		return m.Override
+	}
+	return nil
+}
+
+type MemcacheGrabTailResponse struct {
+	Item             []*MemcacheGrabTailResponse_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"`
+	XXX_unrecognized []byte                           `json:"-"`
+}
+
+func (m *MemcacheGrabTailResponse) Reset()         { *m = MemcacheGrabTailResponse{} }
+func (m *MemcacheGrabTailResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGrabTailResponse) ProtoMessage()    {}
+
+func (m *MemcacheGrabTailResponse) GetItem() []*MemcacheGrabTailResponse_Item {
+	if m != nil {
+		return m.Item
+	}
+	return nil
+}
+
+type MemcacheGrabTailResponse_Item struct {
+	Value            []byte  `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+	Flags            *uint32 `protobuf:"fixed32,3,opt,name=flags" json:"flags,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *MemcacheGrabTailResponse_Item) Reset()         { *m = MemcacheGrabTailResponse_Item{} }
+func (m *MemcacheGrabTailResponse_Item) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGrabTailResponse_Item) ProtoMessage()    {}
+
+func (m *MemcacheGrabTailResponse_Item) GetValue() []byte {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func (m *MemcacheGrabTailResponse_Item) GetFlags() uint32 {
+	if m != nil && m.Flags != nil {
+		return *m.Flags
+	}
+	return 0
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto
new file mode 100644
index 0000000..5f0edcd
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto
@@ -0,0 +1,165 @@
+syntax = "proto2";
+option go_package = "memcache";
+
+package appengine;
+
+message MemcacheServiceError {
+  enum ErrorCode {
+    OK = 0;
+    UNSPECIFIED_ERROR = 1;
+    NAMESPACE_NOT_SET = 2;
+    PERMISSION_DENIED = 3;
+    INVALID_VALUE = 6;
+  }
+}
+
+message AppOverride {
+  required string app_id = 1;
+
+  optional int32 num_memcacheg_backends = 2 [deprecated=true];
+  optional bool ignore_shardlock = 3 [deprecated=true];
+  optional string memcache_pool_hint = 4 [deprecated=true];
+  optional bytes memcache_sharding_strategy = 5 [deprecated=true];
+}
+
+message MemcacheGetRequest {
+  repeated bytes key = 1;
+  optional string name_space = 2 [default = ""];
+  optional bool for_cas = 4;
+  optional AppOverride override = 5;
+}
+
+message MemcacheGetResponse {
+  repeated group Item = 1 {
+    required bytes key = 2;
+    required bytes value = 3;
+    optional fixed32 flags = 4;
+    optional fixed64 cas_id = 5;
+    optional int32 expires_in_seconds = 6;
+  }
+}
+
+message MemcacheSetRequest {
+  enum SetPolicy {
+    SET = 1;
+    ADD = 2;
+    REPLACE = 3;
+    CAS = 4;
+  }
+  repeated group Item = 1 {
+    required bytes key = 2;
+    required bytes value = 3;
+
+    optional fixed32 flags = 4;
+    optional SetPolicy set_policy = 5 [default = SET];
+    optional fixed32 expiration_time = 6 [default = 0];
+
+    optional fixed64 cas_id = 8;
+    optional bool for_cas = 9;
+  }
+  optional string name_space = 7 [default = ""];
+  optional AppOverride override = 10;
+}
+
+message MemcacheSetResponse {
+  enum SetStatusCode {
+    STORED = 1;
+    NOT_STORED = 2;
+    ERROR = 3;
+    EXISTS = 4;
+  }
+  repeated SetStatusCode set_status = 1;
+}
+
+message MemcacheDeleteRequest {
+  repeated group Item = 1 {
+    required bytes key = 2;
+    optional fixed32 delete_time = 3 [default = 0];
+  }
+  optional string name_space = 4 [default = ""];
+  optional AppOverride override = 5;
+}
+
+message MemcacheDeleteResponse {
+  enum DeleteStatusCode {
+    DELETED = 1;
+    NOT_FOUND = 2;
+  }
+  repeated DeleteStatusCode delete_status = 1;
+}
+
+message MemcacheIncrementRequest {
+  enum Direction {
+    INCREMENT = 1;
+    DECREMENT = 2;
+  }
+  required bytes key = 1;
+  optional string name_space = 4 [default = ""];
+
+  optional uint64 delta = 2 [default = 1];
+  optional Direction direction = 3 [default = INCREMENT];
+
+  optional uint64 initial_value = 5;
+  optional fixed32 initial_flags = 6;
+  optional AppOverride override = 7;
+}
+
+message MemcacheIncrementResponse {
+  enum IncrementStatusCode {
+    OK = 1;
+    NOT_CHANGED = 2;
+    ERROR = 3;
+  }
+
+  optional uint64 new_value = 1;
+  optional IncrementStatusCode increment_status = 2;
+}
+
+message MemcacheBatchIncrementRequest {
+  optional string name_space = 1 [default = ""];
+  repeated MemcacheIncrementRequest item = 2;
+  optional AppOverride override = 3;
+}
+
+message MemcacheBatchIncrementResponse {
+  repeated MemcacheIncrementResponse item = 1;
+}
+
+message MemcacheFlushRequest {
+  optional AppOverride override = 1;
+}
+
+message MemcacheFlushResponse {
+}
+
+message MemcacheStatsRequest {
+  optional AppOverride override = 1;
+}
+
+message MergedNamespaceStats {
+  required uint64 hits = 1;
+  required uint64 misses = 2;
+  required uint64 byte_hits = 3;
+
+  required uint64 items = 4;
+  required uint64 bytes = 5;
+
+  required fixed32 oldest_item_age = 6;
+}
+
+message MemcacheStatsResponse {
+  optional MergedNamespaceStats stats = 1;
+}
+
+message MemcacheGrabTailRequest {
+  required int32 item_count = 1;
+  optional string name_space = 2 [default = ""];
+  optional AppOverride override = 3;
+}
+
+message MemcacheGrabTailResponse {
+  repeated group Item = 1 {
+    required bytes value = 2;
+    optional fixed32 flags = 3;
+  }
+}
diff --git a/vendor/google.golang.org/appengine/internal/search/search.pb.go b/vendor/google.golang.org/appengine/internal/search/search.pb.go
index 7d8d11d..3b280e4 100644
--- a/vendor/google.golang.org/appengine/internal/search/search.pb.go
+++ b/vendor/google.golang.org/appengine/internal/search/search.pb.go
@@ -65,6 +65,12 @@
 var _ = fmt.Errorf
 var _ = math.Inf
 
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
 type Scope_Type int32
 
 const (
@@ -112,6 +118,7 @@
 	*x = Scope_Type(value)
 	return nil
 }
+func (Scope_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
 
 type Entry_Permission int32
 
@@ -148,6 +155,7 @@
 	*x = Entry_Permission(value)
 	return nil
 }
+func (Entry_Permission) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} }
 
 type FieldValue_ContentType int32
 
@@ -193,6 +201,7 @@
 	*x = FieldValue_ContentType(value)
 	return nil
 }
+func (FieldValue_ContentType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} }
 
 type FacetValue_ContentType int32
 
@@ -226,6 +235,41 @@
 	*x = FacetValue_ContentType(value)
 	return nil
 }
+func (FacetValue_ContentType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} }
+
+type Document_OrderIdSource int32
+
+const (
+	Document_DEFAULTED Document_OrderIdSource = 0
+	Document_SUPPLIED  Document_OrderIdSource = 1
+)
+
+var Document_OrderIdSource_name = map[int32]string{
+	0: "DEFAULTED",
+	1: "SUPPLIED",
+}
+var Document_OrderIdSource_value = map[string]int32{
+	"DEFAULTED": 0,
+	"SUPPLIED":  1,
+}
+
+func (x Document_OrderIdSource) Enum() *Document_OrderIdSource {
+	p := new(Document_OrderIdSource)
+	*p = x
+	return p
+}
+func (x Document_OrderIdSource) String() string {
+	return proto.EnumName(Document_OrderIdSource_name, int32(x))
+}
+func (x *Document_OrderIdSource) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(Document_OrderIdSource_value, data, "Document_OrderIdSource")
+	if err != nil {
+		return err
+	}
+	*x = Document_OrderIdSource(value)
+	return nil
+}
+func (Document_OrderIdSource) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{10, 0} }
 
 type Document_Storage int32
 
@@ -256,6 +300,7 @@
 	*x = Document_Storage(value)
 	return nil
 }
+func (Document_Storage) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{10, 1} }
 
 type SearchServiceError_ErrorCode int32
 
@@ -304,6 +349,9 @@
 	*x = SearchServiceError_ErrorCode(value)
 	return nil
 }
+func (SearchServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor0, []int{11, 0}
+}
 
 type IndexSpec_Consistency int32
 
@@ -337,6 +385,7 @@
 	*x = IndexSpec_Consistency(value)
 	return nil
 }
+func (IndexSpec_Consistency) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{13, 0} }
 
 type IndexSpec_Source int32
 
@@ -373,6 +422,7 @@
 	*x = IndexSpec_Source(value)
 	return nil
 }
+func (IndexSpec_Source) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{13, 1} }
 
 type IndexSpec_Mode int32
 
@@ -406,6 +456,7 @@
 	*x = IndexSpec_Mode(value)
 	return nil
 }
+func (IndexSpec_Mode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{13, 2} }
 
 type IndexDocumentParams_Freshness int32
 
@@ -439,6 +490,9 @@
 	*x = IndexDocumentParams_Freshness(value)
 	return nil
 }
+func (IndexDocumentParams_Freshness) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor0, []int{15, 0}
+}
 
 type ScorerSpec_Scorer int32
 
@@ -472,6 +526,7 @@
 	*x = ScorerSpec_Scorer(value)
 	return nil
 }
+func (ScorerSpec_Scorer) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{31, 0} }
 
 type SearchParams_CursorType int32
 
@@ -508,6 +563,7 @@
 	*x = SearchParams_CursorType(value)
 	return nil
 }
+func (SearchParams_CursorType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{38, 0} }
 
 type SearchParams_ParsingMode int32
 
@@ -541,6 +597,7 @@
 	*x = SearchParams_ParsingMode(value)
 	return nil
 }
+func (SearchParams_ParsingMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{38, 1} }
 
 type Scope struct {
 	Type             *Scope_Type `protobuf:"varint,1,opt,name=type,enum=search.Scope_Type" json:"type,omitempty"`
@@ -548,9 +605,10 @@
 	XXX_unrecognized []byte      `json:"-"`
 }
 
-func (m *Scope) Reset()         { *m = Scope{} }
-func (m *Scope) String() string { return proto.CompactTextString(m) }
-func (*Scope) ProtoMessage()    {}
+func (m *Scope) Reset()                    { *m = Scope{} }
+func (m *Scope) String() string            { return proto.CompactTextString(m) }
+func (*Scope) ProtoMessage()               {}
+func (*Scope) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
 
 func (m *Scope) GetType() Scope_Type {
 	if m != nil && m.Type != nil {
@@ -569,13 +627,14 @@
 type Entry struct {
 	Scope            *Scope            `protobuf:"bytes,1,opt,name=scope" json:"scope,omitempty"`
 	Permission       *Entry_Permission `protobuf:"varint,2,opt,name=permission,enum=search.Entry_Permission" json:"permission,omitempty"`
-	DisplayName      *string           `protobuf:"bytes,3,opt,name=display_name" json:"display_name,omitempty"`
+	DisplayName      *string           `protobuf:"bytes,3,opt,name=display_name,json=displayName" json:"display_name,omitempty"`
 	XXX_unrecognized []byte            `json:"-"`
 }
 
-func (m *Entry) Reset()         { *m = Entry{} }
-func (m *Entry) String() string { return proto.CompactTextString(m) }
-func (*Entry) ProtoMessage()    {}
+func (m *Entry) Reset()                    { *m = Entry{} }
+func (m *Entry) String() string            { return proto.CompactTextString(m) }
+func (*Entry) ProtoMessage()               {}
+func (*Entry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
 
 func (m *Entry) GetScope() *Scope {
 	if m != nil {
@@ -604,9 +663,10 @@
 	XXX_unrecognized []byte   `json:"-"`
 }
 
-func (m *AccessControlList) Reset()         { *m = AccessControlList{} }
-func (m *AccessControlList) String() string { return proto.CompactTextString(m) }
-func (*AccessControlList) ProtoMessage()    {}
+func (m *AccessControlList) Reset()                    { *m = AccessControlList{} }
+func (m *AccessControlList) String() string            { return proto.CompactTextString(m) }
+func (*AccessControlList) ProtoMessage()               {}
+func (*AccessControlList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
 
 func (m *AccessControlList) GetOwner() string {
 	if m != nil && m.Owner != nil {
@@ -625,14 +685,15 @@
 type FieldValue struct {
 	Type             *FieldValue_ContentType `protobuf:"varint,1,opt,name=type,enum=search.FieldValue_ContentType,def=0" json:"type,omitempty"`
 	Language         *string                 `protobuf:"bytes,2,opt,name=language,def=en" json:"language,omitempty"`
-	StringValue      *string                 `protobuf:"bytes,3,opt,name=string_value" json:"string_value,omitempty"`
-	Geo              *FieldValue_Geo         `protobuf:"group,4,opt,name=Geo" json:"geo,omitempty"`
+	StringValue      *string                 `protobuf:"bytes,3,opt,name=string_value,json=stringValue" json:"string_value,omitempty"`
+	Geo              *FieldValue_Geo         `protobuf:"group,4,opt,name=Geo,json=geo" json:"geo,omitempty"`
 	XXX_unrecognized []byte                  `json:"-"`
 }
 
-func (m *FieldValue) Reset()         { *m = FieldValue{} }
-func (m *FieldValue) String() string { return proto.CompactTextString(m) }
-func (*FieldValue) ProtoMessage()    {}
+func (m *FieldValue) Reset()                    { *m = FieldValue{} }
+func (m *FieldValue) String() string            { return proto.CompactTextString(m) }
+func (*FieldValue) ProtoMessage()               {}
+func (*FieldValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
 
 const Default_FieldValue_Type FieldValue_ContentType = FieldValue_TEXT
 const Default_FieldValue_Language string = "en"
@@ -671,9 +732,10 @@
 	XXX_unrecognized []byte   `json:"-"`
 }
 
-func (m *FieldValue_Geo) Reset()         { *m = FieldValue_Geo{} }
-func (m *FieldValue_Geo) String() string { return proto.CompactTextString(m) }
-func (*FieldValue_Geo) ProtoMessage()    {}
+func (m *FieldValue_Geo) Reset()                    { *m = FieldValue_Geo{} }
+func (m *FieldValue_Geo) String() string            { return proto.CompactTextString(m) }
+func (*FieldValue_Geo) ProtoMessage()               {}
+func (*FieldValue_Geo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} }
 
 func (m *FieldValue_Geo) GetLat() float64 {
 	if m != nil && m.Lat != nil {
@@ -695,9 +757,10 @@
 	XXX_unrecognized []byte      `json:"-"`
 }
 
-func (m *Field) Reset()         { *m = Field{} }
-func (m *Field) String() string { return proto.CompactTextString(m) }
-func (*Field) ProtoMessage()    {}
+func (m *Field) Reset()                    { *m = Field{} }
+func (m *Field) String() string            { return proto.CompactTextString(m) }
+func (*Field) ProtoMessage()               {}
+func (*Field) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
 
 func (m *Field) GetName() string {
 	if m != nil && m.Name != nil {
@@ -719,9 +782,10 @@
 	XXX_unrecognized []byte                   `json:"-"`
 }
 
-func (m *FieldTypes) Reset()         { *m = FieldTypes{} }
-func (m *FieldTypes) String() string { return proto.CompactTextString(m) }
-func (*FieldTypes) ProtoMessage()    {}
+func (m *FieldTypes) Reset()                    { *m = FieldTypes{} }
+func (m *FieldTypes) String() string            { return proto.CompactTextString(m) }
+func (*FieldTypes) ProtoMessage()               {}
+func (*FieldTypes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
 
 func (m *FieldTypes) GetName() string {
 	if m != nil && m.Name != nil {
@@ -738,16 +802,17 @@
 }
 
 type IndexShardSettings struct {
-	PrevNumShards            []int32 `protobuf:"varint,1,rep,name=prev_num_shards" json:"prev_num_shards,omitempty"`
-	NumShards                *int32  `protobuf:"varint,2,req,name=num_shards,def=1" json:"num_shards,omitempty"`
-	PrevNumShardsSearchFalse []int32 `protobuf:"varint,3,rep,name=prev_num_shards_search_false" json:"prev_num_shards_search_false,omitempty"`
-	LocalReplica             *string `protobuf:"bytes,4,opt,name=local_replica,def=" json:"local_replica,omitempty"`
+	PrevNumShards            []int32 `protobuf:"varint,1,rep,name=prev_num_shards,json=prevNumShards" json:"prev_num_shards,omitempty"`
+	NumShards                *int32  `protobuf:"varint,2,req,name=num_shards,json=numShards,def=1" json:"num_shards,omitempty"`
+	PrevNumShardsSearchFalse []int32 `protobuf:"varint,3,rep,name=prev_num_shards_search_false,json=prevNumShardsSearchFalse" json:"prev_num_shards_search_false,omitempty"`
+	LocalReplica             *string `protobuf:"bytes,4,opt,name=local_replica,json=localReplica,def=" json:"local_replica,omitempty"`
 	XXX_unrecognized         []byte  `json:"-"`
 }
 
-func (m *IndexShardSettings) Reset()         { *m = IndexShardSettings{} }
-func (m *IndexShardSettings) String() string { return proto.CompactTextString(m) }
-func (*IndexShardSettings) ProtoMessage()    {}
+func (m *IndexShardSettings) Reset()                    { *m = IndexShardSettings{} }
+func (m *IndexShardSettings) String() string            { return proto.CompactTextString(m) }
+func (*IndexShardSettings) ProtoMessage()               {}
+func (*IndexShardSettings) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
 
 const Default_IndexShardSettings_NumShards int32 = 1
 
@@ -781,13 +846,14 @@
 
 type FacetValue struct {
 	Type             *FacetValue_ContentType `protobuf:"varint,1,opt,name=type,enum=search.FacetValue_ContentType,def=2" json:"type,omitempty"`
-	StringValue      *string                 `protobuf:"bytes,3,opt,name=string_value" json:"string_value,omitempty"`
+	StringValue      *string                 `protobuf:"bytes,3,opt,name=string_value,json=stringValue" json:"string_value,omitempty"`
 	XXX_unrecognized []byte                  `json:"-"`
 }
 
-func (m *FacetValue) Reset()         { *m = FacetValue{} }
-func (m *FacetValue) String() string { return proto.CompactTextString(m) }
-func (*FacetValue) ProtoMessage()    {}
+func (m *FacetValue) Reset()                    { *m = FacetValue{} }
+func (m *FacetValue) String() string            { return proto.CompactTextString(m) }
+func (*FacetValue) ProtoMessage()               {}
+func (*FacetValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
 
 const Default_FacetValue_Type FacetValue_ContentType = FacetValue_ATOM
 
@@ -811,9 +877,10 @@
 	XXX_unrecognized []byte      `json:"-"`
 }
 
-func (m *Facet) Reset()         { *m = Facet{} }
-func (m *Facet) String() string { return proto.CompactTextString(m) }
-func (*Facet) ProtoMessage()    {}
+func (m *Facet) Reset()                    { *m = Facet{} }
+func (m *Facet) String() string            { return proto.CompactTextString(m) }
+func (*Facet) ProtoMessage()               {}
+func (*Facet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
 
 func (m *Facet) GetName() string {
 	if m != nil && m.Name != nil {
@@ -831,13 +898,14 @@
 
 type DocumentMetadata struct {
 	Version            *int64 `protobuf:"varint,1,opt,name=version" json:"version,omitempty"`
-	CommittedStVersion *int64 `protobuf:"varint,2,opt,name=committed_st_version" json:"committed_st_version,omitempty"`
+	CommittedStVersion *int64 `protobuf:"varint,2,opt,name=committed_st_version,json=committedStVersion" json:"committed_st_version,omitempty"`
 	XXX_unrecognized   []byte `json:"-"`
 }
 
-func (m *DocumentMetadata) Reset()         { *m = DocumentMetadata{} }
-func (m *DocumentMetadata) String() string { return proto.CompactTextString(m) }
-func (*DocumentMetadata) ProtoMessage()    {}
+func (m *DocumentMetadata) Reset()                    { *m = DocumentMetadata{} }
+func (m *DocumentMetadata) String() string            { return proto.CompactTextString(m) }
+func (*DocumentMetadata) ProtoMessage()               {}
+func (*DocumentMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
 
 func (m *DocumentMetadata) GetVersion() int64 {
 	if m != nil && m.Version != nil {
@@ -854,20 +922,23 @@
 }
 
 type Document struct {
-	Id               *string           `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
-	Language         *string           `protobuf:"bytes,2,opt,name=language,def=en" json:"language,omitempty"`
-	Field            []*Field          `protobuf:"bytes,3,rep,name=field" json:"field,omitempty"`
-	OrderId          *int32            `protobuf:"varint,4,opt,name=order_id" json:"order_id,omitempty"`
-	Storage          *Document_Storage `protobuf:"varint,5,opt,name=storage,enum=search.Document_Storage,def=0" json:"storage,omitempty"`
-	Facet            []*Facet          `protobuf:"bytes,8,rep,name=facet" json:"facet,omitempty"`
-	XXX_unrecognized []byte            `json:"-"`
+	Id               *string                 `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
+	Language         *string                 `protobuf:"bytes,2,opt,name=language,def=en" json:"language,omitempty"`
+	Field            []*Field                `protobuf:"bytes,3,rep,name=field" json:"field,omitempty"`
+	OrderId          *int32                  `protobuf:"varint,4,opt,name=order_id,json=orderId" json:"order_id,omitempty"`
+	OrderIdSource    *Document_OrderIdSource `protobuf:"varint,6,opt,name=order_id_source,json=orderIdSource,enum=search.Document_OrderIdSource,def=1" json:"order_id_source,omitempty"`
+	Storage          *Document_Storage       `protobuf:"varint,5,opt,name=storage,enum=search.Document_Storage,def=0" json:"storage,omitempty"`
+	Facet            []*Facet                `protobuf:"bytes,8,rep,name=facet" json:"facet,omitempty"`
+	XXX_unrecognized []byte                  `json:"-"`
 }
 
-func (m *Document) Reset()         { *m = Document{} }
-func (m *Document) String() string { return proto.CompactTextString(m) }
-func (*Document) ProtoMessage()    {}
+func (m *Document) Reset()                    { *m = Document{} }
+func (m *Document) String() string            { return proto.CompactTextString(m) }
+func (*Document) ProtoMessage()               {}
+func (*Document) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
 
 const Default_Document_Language string = "en"
+const Default_Document_OrderIdSource Document_OrderIdSource = Document_SUPPLIED
 const Default_Document_Storage Document_Storage = Document_DISK
 
 func (m *Document) GetId() string {
@@ -898,6 +969,13 @@
 	return 0
 }
 
+func (m *Document) GetOrderIdSource() Document_OrderIdSource {
+	if m != nil && m.OrderIdSource != nil {
+		return *m.OrderIdSource
+	}
+	return Default_Document_OrderIdSource
+}
+
 func (m *Document) GetStorage() Document_Storage {
 	if m != nil && m.Storage != nil {
 		return *m.Storage
@@ -916,20 +994,22 @@
 	XXX_unrecognized []byte `json:"-"`
 }
 
-func (m *SearchServiceError) Reset()         { *m = SearchServiceError{} }
-func (m *SearchServiceError) String() string { return proto.CompactTextString(m) }
-func (*SearchServiceError) ProtoMessage()    {}
+func (m *SearchServiceError) Reset()                    { *m = SearchServiceError{} }
+func (m *SearchServiceError) String() string            { return proto.CompactTextString(m) }
+func (*SearchServiceError) ProtoMessage()               {}
+func (*SearchServiceError) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
 
 type RequestStatus struct {
 	Code             *SearchServiceError_ErrorCode `protobuf:"varint,1,req,name=code,enum=search.SearchServiceError_ErrorCode" json:"code,omitempty"`
-	ErrorDetail      *string                       `protobuf:"bytes,2,opt,name=error_detail" json:"error_detail,omitempty"`
-	CanonicalCode    *int32                        `protobuf:"varint,3,opt,name=canonical_code" json:"canonical_code,omitempty"`
+	ErrorDetail      *string                       `protobuf:"bytes,2,opt,name=error_detail,json=errorDetail" json:"error_detail,omitempty"`
+	CanonicalCode    *int32                        `protobuf:"varint,3,opt,name=canonical_code,json=canonicalCode" json:"canonical_code,omitempty"`
 	XXX_unrecognized []byte                        `json:"-"`
 }
 
-func (m *RequestStatus) Reset()         { *m = RequestStatus{} }
-func (m *RequestStatus) String() string { return proto.CompactTextString(m) }
-func (*RequestStatus) ProtoMessage()    {}
+func (m *RequestStatus) Reset()                    { *m = RequestStatus{} }
+func (m *RequestStatus) String() string            { return proto.CompactTextString(m) }
+func (*RequestStatus) ProtoMessage()               {}
+func (*RequestStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
 
 func (m *RequestStatus) GetCode() SearchServiceError_ErrorCode {
 	if m != nil && m.Code != nil {
@@ -962,9 +1042,10 @@
 	XXX_unrecognized []byte                 `json:"-"`
 }
 
-func (m *IndexSpec) Reset()         { *m = IndexSpec{} }
-func (m *IndexSpec) String() string { return proto.CompactTextString(m) }
-func (*IndexSpec) ProtoMessage()    {}
+func (m *IndexSpec) Reset()                    { *m = IndexSpec{} }
+func (m *IndexSpec) String() string            { return proto.CompactTextString(m) }
+func (*IndexSpec) ProtoMessage()               {}
+func (*IndexSpec) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
 
 const Default_IndexSpec_Consistency IndexSpec_Consistency = IndexSpec_PER_DOCUMENT
 const Default_IndexSpec_Source IndexSpec_Source = IndexSpec_SEARCH
@@ -1013,15 +1094,16 @@
 }
 
 type IndexMetadata struct {
-	IndexSpec        *IndexSpec             `protobuf:"bytes,1,req,name=index_spec" json:"index_spec,omitempty"`
+	IndexSpec        *IndexSpec             `protobuf:"bytes,1,req,name=index_spec,json=indexSpec" json:"index_spec,omitempty"`
 	Field            []*FieldTypes          `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
 	Storage          *IndexMetadata_Storage `protobuf:"bytes,3,opt,name=storage" json:"storage,omitempty"`
 	XXX_unrecognized []byte                 `json:"-"`
 }
 
-func (m *IndexMetadata) Reset()         { *m = IndexMetadata{} }
-func (m *IndexMetadata) String() string { return proto.CompactTextString(m) }
-func (*IndexMetadata) ProtoMessage()    {}
+func (m *IndexMetadata) Reset()                    { *m = IndexMetadata{} }
+func (m *IndexMetadata) String() string            { return proto.CompactTextString(m) }
+func (*IndexMetadata) ProtoMessage()               {}
+func (*IndexMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
 
 func (m *IndexMetadata) GetIndexSpec() *IndexSpec {
 	if m != nil {
@@ -1045,14 +1127,15 @@
 }
 
 type IndexMetadata_Storage struct {
-	AmountUsed       *int64 `protobuf:"varint,1,opt,name=amount_used" json:"amount_used,omitempty"`
+	AmountUsed       *int64 `protobuf:"varint,1,opt,name=amount_used,json=amountUsed" json:"amount_used,omitempty"`
 	Limit            *int64 `protobuf:"varint,2,opt,name=limit" json:"limit,omitempty"`
 	XXX_unrecognized []byte `json:"-"`
 }
 
-func (m *IndexMetadata_Storage) Reset()         { *m = IndexMetadata_Storage{} }
-func (m *IndexMetadata_Storage) String() string { return proto.CompactTextString(m) }
-func (*IndexMetadata_Storage) ProtoMessage()    {}
+func (m *IndexMetadata_Storage) Reset()                    { *m = IndexMetadata_Storage{} }
+func (m *IndexMetadata_Storage) String() string            { return proto.CompactTextString(m) }
+func (*IndexMetadata_Storage) ProtoMessage()               {}
+func (*IndexMetadata_Storage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14, 0} }
 
 func (m *IndexMetadata_Storage) GetAmountUsed() int64 {
 	if m != nil && m.AmountUsed != nil {
@@ -1071,13 +1154,14 @@
 type IndexDocumentParams struct {
 	Document         []*Document                    `protobuf:"bytes,1,rep,name=document" json:"document,omitempty"`
 	Freshness        *IndexDocumentParams_Freshness `protobuf:"varint,2,opt,name=freshness,enum=search.IndexDocumentParams_Freshness,def=0" json:"freshness,omitempty"`
-	IndexSpec        *IndexSpec                     `protobuf:"bytes,3,req,name=index_spec" json:"index_spec,omitempty"`
+	IndexSpec        *IndexSpec                     `protobuf:"bytes,3,req,name=index_spec,json=indexSpec" json:"index_spec,omitempty"`
 	XXX_unrecognized []byte                         `json:"-"`
 }
 
-func (m *IndexDocumentParams) Reset()         { *m = IndexDocumentParams{} }
-func (m *IndexDocumentParams) String() string { return proto.CompactTextString(m) }
-func (*IndexDocumentParams) ProtoMessage()    {}
+func (m *IndexDocumentParams) Reset()                    { *m = IndexDocumentParams{} }
+func (m *IndexDocumentParams) String() string            { return proto.CompactTextString(m) }
+func (*IndexDocumentParams) ProtoMessage()               {}
+func (*IndexDocumentParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
 
 const Default_IndexDocumentParams_Freshness IndexDocumentParams_Freshness = IndexDocumentParams_SYNCHRONOUSLY
 
@@ -1104,13 +1188,14 @@
 
 type IndexDocumentRequest struct {
 	Params           *IndexDocumentParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
-	AppId            []byte               `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+	AppId            []byte               `protobuf:"bytes,3,opt,name=app_id,json=appId" json:"app_id,omitempty"`
 	XXX_unrecognized []byte               `json:"-"`
 }
 
-func (m *IndexDocumentRequest) Reset()         { *m = IndexDocumentRequest{} }
-func (m *IndexDocumentRequest) String() string { return proto.CompactTextString(m) }
-func (*IndexDocumentRequest) ProtoMessage()    {}
+func (m *IndexDocumentRequest) Reset()                    { *m = IndexDocumentRequest{} }
+func (m *IndexDocumentRequest) String() string            { return proto.CompactTextString(m) }
+func (*IndexDocumentRequest) ProtoMessage()               {}
+func (*IndexDocumentRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
 
 func (m *IndexDocumentRequest) GetParams() *IndexDocumentParams {
 	if m != nil {
@@ -1128,13 +1213,14 @@
 
 type IndexDocumentResponse struct {
 	Status           []*RequestStatus `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"`
-	DocId            []string         `protobuf:"bytes,2,rep,name=doc_id" json:"doc_id,omitempty"`
+	DocId            []string         `protobuf:"bytes,2,rep,name=doc_id,json=docId" json:"doc_id,omitempty"`
 	XXX_unrecognized []byte           `json:"-"`
 }
 
-func (m *IndexDocumentResponse) Reset()         { *m = IndexDocumentResponse{} }
-func (m *IndexDocumentResponse) String() string { return proto.CompactTextString(m) }
-func (*IndexDocumentResponse) ProtoMessage()    {}
+func (m *IndexDocumentResponse) Reset()                    { *m = IndexDocumentResponse{} }
+func (m *IndexDocumentResponse) String() string            { return proto.CompactTextString(m) }
+func (*IndexDocumentResponse) ProtoMessage()               {}
+func (*IndexDocumentResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
 
 func (m *IndexDocumentResponse) GetStatus() []*RequestStatus {
 	if m != nil {
@@ -1151,14 +1237,15 @@
 }
 
 type DeleteDocumentParams struct {
-	DocId            []string   `protobuf:"bytes,1,rep,name=doc_id" json:"doc_id,omitempty"`
-	IndexSpec        *IndexSpec `protobuf:"bytes,2,req,name=index_spec" json:"index_spec,omitempty"`
+	DocId            []string   `protobuf:"bytes,1,rep,name=doc_id,json=docId" json:"doc_id,omitempty"`
+	IndexSpec        *IndexSpec `protobuf:"bytes,2,req,name=index_spec,json=indexSpec" json:"index_spec,omitempty"`
 	XXX_unrecognized []byte     `json:"-"`
 }
 
-func (m *DeleteDocumentParams) Reset()         { *m = DeleteDocumentParams{} }
-func (m *DeleteDocumentParams) String() string { return proto.CompactTextString(m) }
-func (*DeleteDocumentParams) ProtoMessage()    {}
+func (m *DeleteDocumentParams) Reset()                    { *m = DeleteDocumentParams{} }
+func (m *DeleteDocumentParams) String() string            { return proto.CompactTextString(m) }
+func (*DeleteDocumentParams) ProtoMessage()               {}
+func (*DeleteDocumentParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
 
 func (m *DeleteDocumentParams) GetDocId() []string {
 	if m != nil {
@@ -1176,13 +1263,14 @@
 
 type DeleteDocumentRequest struct {
 	Params           *DeleteDocumentParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
-	AppId            []byte                `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+	AppId            []byte                `protobuf:"bytes,3,opt,name=app_id,json=appId" json:"app_id,omitempty"`
 	XXX_unrecognized []byte                `json:"-"`
 }
 
-func (m *DeleteDocumentRequest) Reset()         { *m = DeleteDocumentRequest{} }
-func (m *DeleteDocumentRequest) String() string { return proto.CompactTextString(m) }
-func (*DeleteDocumentRequest) ProtoMessage()    {}
+func (m *DeleteDocumentRequest) Reset()                    { *m = DeleteDocumentRequest{} }
+func (m *DeleteDocumentRequest) String() string            { return proto.CompactTextString(m) }
+func (*DeleteDocumentRequest) ProtoMessage()               {}
+func (*DeleteDocumentRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
 
 func (m *DeleteDocumentRequest) GetParams() *DeleteDocumentParams {
 	if m != nil {
@@ -1203,9 +1291,10 @@
 	XXX_unrecognized []byte           `json:"-"`
 }
 
-func (m *DeleteDocumentResponse) Reset()         { *m = DeleteDocumentResponse{} }
-func (m *DeleteDocumentResponse) String() string { return proto.CompactTextString(m) }
-func (*DeleteDocumentResponse) ProtoMessage()    {}
+func (m *DeleteDocumentResponse) Reset()                    { *m = DeleteDocumentResponse{} }
+func (m *DeleteDocumentResponse) String() string            { return proto.CompactTextString(m) }
+func (*DeleteDocumentResponse) ProtoMessage()               {}
+func (*DeleteDocumentResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
 
 func (m *DeleteDocumentResponse) GetStatus() []*RequestStatus {
 	if m != nil {
@@ -1215,17 +1304,18 @@
 }
 
 type ListDocumentsParams struct {
-	IndexSpec        *IndexSpec `protobuf:"bytes,1,req,name=index_spec" json:"index_spec,omitempty"`
-	StartDocId       *string    `protobuf:"bytes,2,opt,name=start_doc_id" json:"start_doc_id,omitempty"`
-	IncludeStartDoc  *bool      `protobuf:"varint,3,opt,name=include_start_doc,def=1" json:"include_start_doc,omitempty"`
+	IndexSpec        *IndexSpec `protobuf:"bytes,1,req,name=index_spec,json=indexSpec" json:"index_spec,omitempty"`
+	StartDocId       *string    `protobuf:"bytes,2,opt,name=start_doc_id,json=startDocId" json:"start_doc_id,omitempty"`
+	IncludeStartDoc  *bool      `protobuf:"varint,3,opt,name=include_start_doc,json=includeStartDoc,def=1" json:"include_start_doc,omitempty"`
 	Limit            *int32     `protobuf:"varint,4,opt,name=limit,def=100" json:"limit,omitempty"`
-	KeysOnly         *bool      `protobuf:"varint,5,opt,name=keys_only" json:"keys_only,omitempty"`
+	KeysOnly         *bool      `protobuf:"varint,5,opt,name=keys_only,json=keysOnly" json:"keys_only,omitempty"`
 	XXX_unrecognized []byte     `json:"-"`
 }
 
-func (m *ListDocumentsParams) Reset()         { *m = ListDocumentsParams{} }
-func (m *ListDocumentsParams) String() string { return proto.CompactTextString(m) }
-func (*ListDocumentsParams) ProtoMessage()    {}
+func (m *ListDocumentsParams) Reset()                    { *m = ListDocumentsParams{} }
+func (m *ListDocumentsParams) String() string            { return proto.CompactTextString(m) }
+func (*ListDocumentsParams) ProtoMessage()               {}
+func (*ListDocumentsParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
 
 const Default_ListDocumentsParams_IncludeStartDoc bool = true
 const Default_ListDocumentsParams_Limit int32 = 100
@@ -1267,13 +1357,14 @@
 
 type ListDocumentsRequest struct {
 	Params           *ListDocumentsParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
-	AppId            []byte               `protobuf:"bytes,2,opt,name=app_id" json:"app_id,omitempty"`
+	AppId            []byte               `protobuf:"bytes,2,opt,name=app_id,json=appId" json:"app_id,omitempty"`
 	XXX_unrecognized []byte               `json:"-"`
 }
 
-func (m *ListDocumentsRequest) Reset()         { *m = ListDocumentsRequest{} }
-func (m *ListDocumentsRequest) String() string { return proto.CompactTextString(m) }
-func (*ListDocumentsRequest) ProtoMessage()    {}
+func (m *ListDocumentsRequest) Reset()                    { *m = ListDocumentsRequest{} }
+func (m *ListDocumentsRequest) String() string            { return proto.CompactTextString(m) }
+func (*ListDocumentsRequest) ProtoMessage()               {}
+func (*ListDocumentsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
 
 func (m *ListDocumentsRequest) GetParams() *ListDocumentsParams {
 	if m != nil {
@@ -1295,9 +1386,10 @@
 	XXX_unrecognized []byte         `json:"-"`
 }
 
-func (m *ListDocumentsResponse) Reset()         { *m = ListDocumentsResponse{} }
-func (m *ListDocumentsResponse) String() string { return proto.CompactTextString(m) }
-func (*ListDocumentsResponse) ProtoMessage()    {}
+func (m *ListDocumentsResponse) Reset()                    { *m = ListDocumentsResponse{} }
+func (m *ListDocumentsResponse) String() string            { return proto.CompactTextString(m) }
+func (*ListDocumentsResponse) ProtoMessage()               {}
+func (*ListDocumentsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
 
 func (m *ListDocumentsResponse) GetStatus() *RequestStatus {
 	if m != nil {
@@ -1314,20 +1406,21 @@
 }
 
 type ListIndexesParams struct {
-	FetchSchema       *bool             `protobuf:"varint,1,opt,name=fetch_schema" json:"fetch_schema,omitempty"`
+	FetchSchema       *bool             `protobuf:"varint,1,opt,name=fetch_schema,json=fetchSchema" json:"fetch_schema,omitempty"`
 	Limit             *int32            `protobuf:"varint,2,opt,name=limit,def=20" json:"limit,omitempty"`
 	Namespace         *string           `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"`
-	StartIndexName    *string           `protobuf:"bytes,4,opt,name=start_index_name" json:"start_index_name,omitempty"`
-	IncludeStartIndex *bool             `protobuf:"varint,5,opt,name=include_start_index,def=1" json:"include_start_index,omitempty"`
-	IndexNamePrefix   *string           `protobuf:"bytes,6,opt,name=index_name_prefix" json:"index_name_prefix,omitempty"`
+	StartIndexName    *string           `protobuf:"bytes,4,opt,name=start_index_name,json=startIndexName" json:"start_index_name,omitempty"`
+	IncludeStartIndex *bool             `protobuf:"varint,5,opt,name=include_start_index,json=includeStartIndex,def=1" json:"include_start_index,omitempty"`
+	IndexNamePrefix   *string           `protobuf:"bytes,6,opt,name=index_name_prefix,json=indexNamePrefix" json:"index_name_prefix,omitempty"`
 	Offset            *int32            `protobuf:"varint,7,opt,name=offset" json:"offset,omitempty"`
 	Source            *IndexSpec_Source `protobuf:"varint,8,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"`
 	XXX_unrecognized  []byte            `json:"-"`
 }
 
-func (m *ListIndexesParams) Reset()         { *m = ListIndexesParams{} }
-func (m *ListIndexesParams) String() string { return proto.CompactTextString(m) }
-func (*ListIndexesParams) ProtoMessage()    {}
+func (m *ListIndexesParams) Reset()                    { *m = ListIndexesParams{} }
+func (m *ListIndexesParams) String() string            { return proto.CompactTextString(m) }
+func (*ListIndexesParams) ProtoMessage()               {}
+func (*ListIndexesParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
 
 const Default_ListIndexesParams_Limit int32 = 20
 const Default_ListIndexesParams_IncludeStartIndex bool = true
@@ -1391,13 +1484,14 @@
 
 type ListIndexesRequest struct {
 	Params           *ListIndexesParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
-	AppId            []byte             `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+	AppId            []byte             `protobuf:"bytes,3,opt,name=app_id,json=appId" json:"app_id,omitempty"`
 	XXX_unrecognized []byte             `json:"-"`
 }
 
-func (m *ListIndexesRequest) Reset()         { *m = ListIndexesRequest{} }
-func (m *ListIndexesRequest) String() string { return proto.CompactTextString(m) }
-func (*ListIndexesRequest) ProtoMessage()    {}
+func (m *ListIndexesRequest) Reset()                    { *m = ListIndexesRequest{} }
+func (m *ListIndexesRequest) String() string            { return proto.CompactTextString(m) }
+func (*ListIndexesRequest) ProtoMessage()               {}
+func (*ListIndexesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} }
 
 func (m *ListIndexesRequest) GetParams() *ListIndexesParams {
 	if m != nil {
@@ -1415,13 +1509,14 @@
 
 type ListIndexesResponse struct {
 	Status           *RequestStatus   `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
-	IndexMetadata    []*IndexMetadata `protobuf:"bytes,2,rep,name=index_metadata" json:"index_metadata,omitempty"`
+	IndexMetadata    []*IndexMetadata `protobuf:"bytes,2,rep,name=index_metadata,json=indexMetadata" json:"index_metadata,omitempty"`
 	XXX_unrecognized []byte           `json:"-"`
 }
 
-func (m *ListIndexesResponse) Reset()         { *m = ListIndexesResponse{} }
-func (m *ListIndexesResponse) String() string { return proto.CompactTextString(m) }
-func (*ListIndexesResponse) ProtoMessage()    {}
+func (m *ListIndexesResponse) Reset()                    { *m = ListIndexesResponse{} }
+func (m *ListIndexesResponse) String() string            { return proto.CompactTextString(m) }
+func (*ListIndexesResponse) ProtoMessage()               {}
+func (*ListIndexesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} }
 
 func (m *ListIndexesResponse) GetStatus() *RequestStatus {
 	if m != nil {
@@ -1439,13 +1534,14 @@
 
 type DeleteSchemaParams struct {
 	Source           *IndexSpec_Source `protobuf:"varint,1,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"`
-	IndexSpec        []*IndexSpec      `protobuf:"bytes,2,rep,name=index_spec" json:"index_spec,omitempty"`
+	IndexSpec        []*IndexSpec      `protobuf:"bytes,2,rep,name=index_spec,json=indexSpec" json:"index_spec,omitempty"`
 	XXX_unrecognized []byte            `json:"-"`
 }
 
-func (m *DeleteSchemaParams) Reset()         { *m = DeleteSchemaParams{} }
-func (m *DeleteSchemaParams) String() string { return proto.CompactTextString(m) }
-func (*DeleteSchemaParams) ProtoMessage()    {}
+func (m *DeleteSchemaParams) Reset()                    { *m = DeleteSchemaParams{} }
+func (m *DeleteSchemaParams) String() string            { return proto.CompactTextString(m) }
+func (*DeleteSchemaParams) ProtoMessage()               {}
+func (*DeleteSchemaParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} }
 
 const Default_DeleteSchemaParams_Source IndexSpec_Source = IndexSpec_SEARCH
 
@@ -1465,13 +1561,14 @@
 
 type DeleteSchemaRequest struct {
 	Params           *DeleteSchemaParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
-	AppId            []byte              `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+	AppId            []byte              `protobuf:"bytes,3,opt,name=app_id,json=appId" json:"app_id,omitempty"`
 	XXX_unrecognized []byte              `json:"-"`
 }
 
-func (m *DeleteSchemaRequest) Reset()         { *m = DeleteSchemaRequest{} }
-func (m *DeleteSchemaRequest) String() string { return proto.CompactTextString(m) }
-func (*DeleteSchemaRequest) ProtoMessage()    {}
+func (m *DeleteSchemaRequest) Reset()                    { *m = DeleteSchemaRequest{} }
+func (m *DeleteSchemaRequest) String() string            { return proto.CompactTextString(m) }
+func (*DeleteSchemaRequest) ProtoMessage()               {}
+func (*DeleteSchemaRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} }
 
 func (m *DeleteSchemaRequest) GetParams() *DeleteSchemaParams {
 	if m != nil {
@@ -1492,9 +1589,10 @@
 	XXX_unrecognized []byte           `json:"-"`
 }
 
-func (m *DeleteSchemaResponse) Reset()         { *m = DeleteSchemaResponse{} }
-func (m *DeleteSchemaResponse) String() string { return proto.CompactTextString(m) }
-func (*DeleteSchemaResponse) ProtoMessage()    {}
+func (m *DeleteSchemaResponse) Reset()                    { *m = DeleteSchemaResponse{} }
+func (m *DeleteSchemaResponse) String() string            { return proto.CompactTextString(m) }
+func (*DeleteSchemaResponse) ProtoMessage()               {}
+func (*DeleteSchemaResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} }
 
 func (m *DeleteSchemaResponse) GetStatus() []*RequestStatus {
 	if m != nil {
@@ -1504,16 +1602,17 @@
 }
 
 type SortSpec struct {
-	SortExpression      *string  `protobuf:"bytes,1,req,name=sort_expression" json:"sort_expression,omitempty"`
-	SortDescending      *bool    `protobuf:"varint,2,opt,name=sort_descending,def=1" json:"sort_descending,omitempty"`
-	DefaultValueText    *string  `protobuf:"bytes,4,opt,name=default_value_text" json:"default_value_text,omitempty"`
-	DefaultValueNumeric *float64 `protobuf:"fixed64,5,opt,name=default_value_numeric" json:"default_value_numeric,omitempty"`
+	SortExpression      *string  `protobuf:"bytes,1,req,name=sort_expression,json=sortExpression" json:"sort_expression,omitempty"`
+	SortDescending      *bool    `protobuf:"varint,2,opt,name=sort_descending,json=sortDescending,def=1" json:"sort_descending,omitempty"`
+	DefaultValueText    *string  `protobuf:"bytes,4,opt,name=default_value_text,json=defaultValueText" json:"default_value_text,omitempty"`
+	DefaultValueNumeric *float64 `protobuf:"fixed64,5,opt,name=default_value_numeric,json=defaultValueNumeric" json:"default_value_numeric,omitempty"`
 	XXX_unrecognized    []byte   `json:"-"`
 }
 
-func (m *SortSpec) Reset()         { *m = SortSpec{} }
-func (m *SortSpec) String() string { return proto.CompactTextString(m) }
-func (*SortSpec) ProtoMessage()    {}
+func (m *SortSpec) Reset()                    { *m = SortSpec{} }
+func (m *SortSpec) String() string            { return proto.CompactTextString(m) }
+func (*SortSpec) ProtoMessage()               {}
+func (*SortSpec) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} }
 
 const Default_SortSpec_SortDescending bool = true
 
@@ -1548,13 +1647,14 @@
 type ScorerSpec struct {
 	Scorer                *ScorerSpec_Scorer `protobuf:"varint,1,opt,name=scorer,enum=search.ScorerSpec_Scorer,def=2" json:"scorer,omitempty"`
 	Limit                 *int32             `protobuf:"varint,2,opt,name=limit,def=1000" json:"limit,omitempty"`
-	MatchScorerParameters *string            `protobuf:"bytes,9,opt,name=match_scorer_parameters" json:"match_scorer_parameters,omitempty"`
+	MatchScorerParameters *string            `protobuf:"bytes,9,opt,name=match_scorer_parameters,json=matchScorerParameters" json:"match_scorer_parameters,omitempty"`
 	XXX_unrecognized      []byte             `json:"-"`
 }
 
-func (m *ScorerSpec) Reset()         { *m = ScorerSpec{} }
-func (m *ScorerSpec) String() string { return proto.CompactTextString(m) }
-func (*ScorerSpec) ProtoMessage()    {}
+func (m *ScorerSpec) Reset()                    { *m = ScorerSpec{} }
+func (m *ScorerSpec) String() string            { return proto.CompactTextString(m) }
+func (*ScorerSpec) ProtoMessage()               {}
+func (*ScorerSpec) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} }
 
 const Default_ScorerSpec_Scorer ScorerSpec_Scorer = ScorerSpec_MATCH_SCORER
 const Default_ScorerSpec_Limit int32 = 1000
@@ -1582,13 +1682,14 @@
 
 type FieldSpec struct {
 	Name             []string                `protobuf:"bytes,1,rep,name=name" json:"name,omitempty"`
-	Expression       []*FieldSpec_Expression `protobuf:"group,2,rep,name=Expression" json:"expression,omitempty"`
+	Expression       []*FieldSpec_Expression `protobuf:"group,2,rep,name=Expression,json=expression" json:"expression,omitempty"`
 	XXX_unrecognized []byte                  `json:"-"`
 }
 
-func (m *FieldSpec) Reset()         { *m = FieldSpec{} }
-func (m *FieldSpec) String() string { return proto.CompactTextString(m) }
-func (*FieldSpec) ProtoMessage()    {}
+func (m *FieldSpec) Reset()                    { *m = FieldSpec{} }
+func (m *FieldSpec) String() string            { return proto.CompactTextString(m) }
+func (*FieldSpec) ProtoMessage()               {}
+func (*FieldSpec) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} }
 
 func (m *FieldSpec) GetName() []string {
 	if m != nil {
@@ -1610,9 +1711,10 @@
 	XXX_unrecognized []byte  `json:"-"`
 }
 
-func (m *FieldSpec_Expression) Reset()         { *m = FieldSpec_Expression{} }
-func (m *FieldSpec_Expression) String() string { return proto.CompactTextString(m) }
-func (*FieldSpec_Expression) ProtoMessage()    {}
+func (m *FieldSpec_Expression) Reset()                    { *m = FieldSpec_Expression{} }
+func (m *FieldSpec_Expression) String() string            { return proto.CompactTextString(m) }
+func (*FieldSpec_Expression) ProtoMessage()               {}
+func (*FieldSpec_Expression) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32, 0} }
 
 func (m *FieldSpec_Expression) GetName() string {
 	if m != nil && m.Name != nil {
@@ -1635,9 +1737,10 @@
 	XXX_unrecognized []byte  `json:"-"`
 }
 
-func (m *FacetRange) Reset()         { *m = FacetRange{} }
-func (m *FacetRange) String() string { return proto.CompactTextString(m) }
-func (*FacetRange) ProtoMessage()    {}
+func (m *FacetRange) Reset()                    { *m = FacetRange{} }
+func (m *FacetRange) String() string            { return proto.CompactTextString(m) }
+func (*FacetRange) ProtoMessage()               {}
+func (*FacetRange) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} }
 
 func (m *FacetRange) GetName() string {
 	if m != nil && m.Name != nil {
@@ -1661,15 +1764,16 @@
 }
 
 type FacetRequestParam struct {
-	ValueLimit       *int32        `protobuf:"varint,1,opt,name=value_limit" json:"value_limit,omitempty"`
+	ValueLimit       *int32        `protobuf:"varint,1,opt,name=value_limit,json=valueLimit" json:"value_limit,omitempty"`
 	Range            []*FacetRange `protobuf:"bytes,2,rep,name=range" json:"range,omitempty"`
-	ValueConstraint  []string      `protobuf:"bytes,3,rep,name=value_constraint" json:"value_constraint,omitempty"`
+	ValueConstraint  []string      `protobuf:"bytes,3,rep,name=value_constraint,json=valueConstraint" json:"value_constraint,omitempty"`
 	XXX_unrecognized []byte        `json:"-"`
 }
 
-func (m *FacetRequestParam) Reset()         { *m = FacetRequestParam{} }
-func (m *FacetRequestParam) String() string { return proto.CompactTextString(m) }
-func (*FacetRequestParam) ProtoMessage()    {}
+func (m *FacetRequestParam) Reset()                    { *m = FacetRequestParam{} }
+func (m *FacetRequestParam) String() string            { return proto.CompactTextString(m) }
+func (*FacetRequestParam) ProtoMessage()               {}
+func (*FacetRequestParam) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} }
 
 func (m *FacetRequestParam) GetValueLimit() int32 {
 	if m != nil && m.ValueLimit != nil {
@@ -1693,13 +1797,14 @@
 }
 
 type FacetAutoDetectParam struct {
-	ValueLimit       *int32 `protobuf:"varint,1,opt,name=value_limit,def=10" json:"value_limit,omitempty"`
+	ValueLimit       *int32 `protobuf:"varint,1,opt,name=value_limit,json=valueLimit,def=10" json:"value_limit,omitempty"`
 	XXX_unrecognized []byte `json:"-"`
 }
 
-func (m *FacetAutoDetectParam) Reset()         { *m = FacetAutoDetectParam{} }
-func (m *FacetAutoDetectParam) String() string { return proto.CompactTextString(m) }
-func (*FacetAutoDetectParam) ProtoMessage()    {}
+func (m *FacetAutoDetectParam) Reset()                    { *m = FacetAutoDetectParam{} }
+func (m *FacetAutoDetectParam) String() string            { return proto.CompactTextString(m) }
+func (*FacetAutoDetectParam) ProtoMessage()               {}
+func (*FacetAutoDetectParam) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} }
 
 const Default_FacetAutoDetectParam_ValueLimit int32 = 10
 
@@ -1716,9 +1821,10 @@
 	XXX_unrecognized []byte             `json:"-"`
 }
 
-func (m *FacetRequest) Reset()         { *m = FacetRequest{} }
-func (m *FacetRequest) String() string { return proto.CompactTextString(m) }
-func (*FacetRequest) ProtoMessage()    {}
+func (m *FacetRequest) Reset()                    { *m = FacetRequest{} }
+func (m *FacetRequest) String() string            { return proto.CompactTextString(m) }
+func (*FacetRequest) ProtoMessage()               {}
+func (*FacetRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} }
 
 func (m *FacetRequest) GetName() string {
 	if m != nil && m.Name != nil {
@@ -1741,9 +1847,10 @@
 	XXX_unrecognized []byte                 `json:"-"`
 }
 
-func (m *FacetRefinement) Reset()         { *m = FacetRefinement{} }
-func (m *FacetRefinement) String() string { return proto.CompactTextString(m) }
-func (*FacetRefinement) ProtoMessage()    {}
+func (m *FacetRefinement) Reset()                    { *m = FacetRefinement{} }
+func (m *FacetRefinement) String() string            { return proto.CompactTextString(m) }
+func (*FacetRefinement) ProtoMessage()               {}
+func (*FacetRefinement) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} }
 
 func (m *FacetRefinement) GetName() string {
 	if m != nil && m.Name != nil {
@@ -1772,9 +1879,10 @@
 	XXX_unrecognized []byte  `json:"-"`
 }
 
-func (m *FacetRefinement_Range) Reset()         { *m = FacetRefinement_Range{} }
-func (m *FacetRefinement_Range) String() string { return proto.CompactTextString(m) }
-func (*FacetRefinement_Range) ProtoMessage()    {}
+func (m *FacetRefinement_Range) Reset()                    { *m = FacetRefinement_Range{} }
+func (m *FacetRefinement_Range) String() string            { return proto.CompactTextString(m) }
+func (*FacetRefinement_Range) ProtoMessage()               {}
+func (*FacetRefinement_Range) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37, 0} }
 
 func (m *FacetRefinement_Range) GetStart() string {
 	if m != nil && m.Start != nil {
@@ -1791,29 +1899,30 @@
 }
 
 type SearchParams struct {
-	IndexSpec              *IndexSpec                `protobuf:"bytes,1,req,name=index_spec" json:"index_spec,omitempty"`
+	IndexSpec              *IndexSpec                `protobuf:"bytes,1,req,name=index_spec,json=indexSpec" json:"index_spec,omitempty"`
 	Query                  *string                   `protobuf:"bytes,2,req,name=query" json:"query,omitempty"`
 	Cursor                 *string                   `protobuf:"bytes,4,opt,name=cursor" json:"cursor,omitempty"`
 	Offset                 *int32                    `protobuf:"varint,11,opt,name=offset" json:"offset,omitempty"`
-	CursorType             *SearchParams_CursorType  `protobuf:"varint,5,opt,name=cursor_type,enum=search.SearchParams_CursorType,def=0" json:"cursor_type,omitempty"`
+	CursorType             *SearchParams_CursorType  `protobuf:"varint,5,opt,name=cursor_type,json=cursorType,enum=search.SearchParams_CursorType,def=0" json:"cursor_type,omitempty"`
 	Limit                  *int32                    `protobuf:"varint,6,opt,name=limit,def=20" json:"limit,omitempty"`
-	MatchedCountAccuracy   *int32                    `protobuf:"varint,7,opt,name=matched_count_accuracy" json:"matched_count_accuracy,omitempty"`
-	SortSpec               []*SortSpec               `protobuf:"bytes,8,rep,name=sort_spec" json:"sort_spec,omitempty"`
-	ScorerSpec             *ScorerSpec               `protobuf:"bytes,9,opt,name=scorer_spec" json:"scorer_spec,omitempty"`
-	FieldSpec              *FieldSpec                `protobuf:"bytes,10,opt,name=field_spec" json:"field_spec,omitempty"`
-	KeysOnly               *bool                     `protobuf:"varint,12,opt,name=keys_only" json:"keys_only,omitempty"`
-	ParsingMode            *SearchParams_ParsingMode `protobuf:"varint,13,opt,name=parsing_mode,enum=search.SearchParams_ParsingMode,def=0" json:"parsing_mode,omitempty"`
-	AutoDiscoverFacetCount *int32                    `protobuf:"varint,15,opt,name=auto_discover_facet_count,def=0" json:"auto_discover_facet_count,omitempty"`
-	IncludeFacet           []*FacetRequest           `protobuf:"bytes,16,rep,name=include_facet" json:"include_facet,omitempty"`
-	FacetRefinement        []*FacetRefinement        `protobuf:"bytes,17,rep,name=facet_refinement" json:"facet_refinement,omitempty"`
-	FacetAutoDetectParam   *FacetAutoDetectParam     `protobuf:"bytes,18,opt,name=facet_auto_detect_param" json:"facet_auto_detect_param,omitempty"`
-	FacetDepth             *int32                    `protobuf:"varint,19,opt,name=facet_depth,def=1000" json:"facet_depth,omitempty"`
+	MatchedCountAccuracy   *int32                    `protobuf:"varint,7,opt,name=matched_count_accuracy,json=matchedCountAccuracy" json:"matched_count_accuracy,omitempty"`
+	SortSpec               []*SortSpec               `protobuf:"bytes,8,rep,name=sort_spec,json=sortSpec" json:"sort_spec,omitempty"`
+	ScorerSpec             *ScorerSpec               `protobuf:"bytes,9,opt,name=scorer_spec,json=scorerSpec" json:"scorer_spec,omitempty"`
+	FieldSpec              *FieldSpec                `protobuf:"bytes,10,opt,name=field_spec,json=fieldSpec" json:"field_spec,omitempty"`
+	KeysOnly               *bool                     `protobuf:"varint,12,opt,name=keys_only,json=keysOnly" json:"keys_only,omitempty"`
+	ParsingMode            *SearchParams_ParsingMode `protobuf:"varint,13,opt,name=parsing_mode,json=parsingMode,enum=search.SearchParams_ParsingMode,def=0" json:"parsing_mode,omitempty"`
+	AutoDiscoverFacetCount *int32                    `protobuf:"varint,15,opt,name=auto_discover_facet_count,json=autoDiscoverFacetCount,def=0" json:"auto_discover_facet_count,omitempty"`
+	IncludeFacet           []*FacetRequest           `protobuf:"bytes,16,rep,name=include_facet,json=includeFacet" json:"include_facet,omitempty"`
+	FacetRefinement        []*FacetRefinement        `protobuf:"bytes,17,rep,name=facet_refinement,json=facetRefinement" json:"facet_refinement,omitempty"`
+	FacetAutoDetectParam   *FacetAutoDetectParam     `protobuf:"bytes,18,opt,name=facet_auto_detect_param,json=facetAutoDetectParam" json:"facet_auto_detect_param,omitempty"`
+	FacetDepth             *int32                    `protobuf:"varint,19,opt,name=facet_depth,json=facetDepth,def=1000" json:"facet_depth,omitempty"`
 	XXX_unrecognized       []byte                    `json:"-"`
 }
 
-func (m *SearchParams) Reset()         { *m = SearchParams{} }
-func (m *SearchParams) String() string { return proto.CompactTextString(m) }
-func (*SearchParams) ProtoMessage()    {}
+func (m *SearchParams) Reset()                    { *m = SearchParams{} }
+func (m *SearchParams) String() string            { return proto.CompactTextString(m) }
+func (*SearchParams) ProtoMessage()               {}
+func (*SearchParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} }
 
 const Default_SearchParams_CursorType SearchParams_CursorType = SearchParams_NONE
 const Default_SearchParams_Limit int32 = 20
@@ -1942,13 +2051,14 @@
 
 type SearchRequest struct {
 	Params           *SearchParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
-	AppId            []byte        `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+	AppId            []byte        `protobuf:"bytes,3,opt,name=app_id,json=appId" json:"app_id,omitempty"`
 	XXX_unrecognized []byte        `json:"-"`
 }
 
-func (m *SearchRequest) Reset()         { *m = SearchRequest{} }
-func (m *SearchRequest) String() string { return proto.CompactTextString(m) }
-func (*SearchRequest) ProtoMessage()    {}
+func (m *SearchRequest) Reset()                    { *m = SearchRequest{} }
+func (m *SearchRequest) String() string            { return proto.CompactTextString(m) }
+func (*SearchRequest) ProtoMessage()               {}
+func (*SearchRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} }
 
 func (m *SearchRequest) GetParams() *SearchParams {
 	if m != nil {
@@ -1971,9 +2081,10 @@
 	XXX_unrecognized []byte           `json:"-"`
 }
 
-func (m *FacetResultValue) Reset()         { *m = FacetResultValue{} }
-func (m *FacetResultValue) String() string { return proto.CompactTextString(m) }
-func (*FacetResultValue) ProtoMessage()    {}
+func (m *FacetResultValue) Reset()                    { *m = FacetResultValue{} }
+func (m *FacetResultValue) String() string            { return proto.CompactTextString(m) }
+func (*FacetResultValue) ProtoMessage()               {}
+func (*FacetResultValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} }
 
 func (m *FacetResultValue) GetName() string {
 	if m != nil && m.Name != nil {
@@ -2002,9 +2113,10 @@
 	XXX_unrecognized []byte              `json:"-"`
 }
 
-func (m *FacetResult) Reset()         { *m = FacetResult{} }
-func (m *FacetResult) String() string { return proto.CompactTextString(m) }
-func (*FacetResult) ProtoMessage()    {}
+func (m *FacetResult) Reset()                    { *m = FacetResult{} }
+func (m *FacetResult) String() string            { return proto.CompactTextString(m) }
+func (*FacetResult) ProtoMessage()               {}
+func (*FacetResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} }
 
 func (m *FacetResult) GetName() string {
 	if m != nil && m.Name != nil {
@@ -2028,9 +2140,10 @@
 	XXX_unrecognized []byte    `json:"-"`
 }
 
-func (m *SearchResult) Reset()         { *m = SearchResult{} }
-func (m *SearchResult) String() string { return proto.CompactTextString(m) }
-func (*SearchResult) ProtoMessage()    {}
+func (m *SearchResult) Reset()                    { *m = SearchResult{} }
+func (m *SearchResult) String() string            { return proto.CompactTextString(m) }
+func (*SearchResult) ProtoMessage()               {}
+func (*SearchResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} }
 
 func (m *SearchResult) GetDocument() *Document {
 	if m != nil {
@@ -2061,18 +2174,19 @@
 }
 
 type SearchResponse struct {
-	Result           []*SearchResult           `protobuf:"bytes,1,rep,name=result" json:"result,omitempty"`
-	MatchedCount     *int64                    `protobuf:"varint,2,req,name=matched_count" json:"matched_count,omitempty"`
-	Status           *RequestStatus            `protobuf:"bytes,3,req,name=status" json:"status,omitempty"`
-	Cursor           *string                   `protobuf:"bytes,4,opt,name=cursor" json:"cursor,omitempty"`
-	FacetResult      []*FacetResult            `protobuf:"bytes,5,rep,name=facet_result" json:"facet_result,omitempty"`
-	XXX_extensions   map[int32]proto.Extension `json:"-"`
-	XXX_unrecognized []byte                    `json:"-"`
+	Result                       []*SearchResult `protobuf:"bytes,1,rep,name=result" json:"result,omitempty"`
+	MatchedCount                 *int64          `protobuf:"varint,2,req,name=matched_count,json=matchedCount" json:"matched_count,omitempty"`
+	Status                       *RequestStatus  `protobuf:"bytes,3,req,name=status" json:"status,omitempty"`
+	Cursor                       *string         `protobuf:"bytes,4,opt,name=cursor" json:"cursor,omitempty"`
+	FacetResult                  []*FacetResult  `protobuf:"bytes,5,rep,name=facet_result,json=facetResult" json:"facet_result,omitempty"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
 }
 
-func (m *SearchResponse) Reset()         { *m = SearchResponse{} }
-func (m *SearchResponse) String() string { return proto.CompactTextString(m) }
-func (*SearchResponse) ProtoMessage()    {}
+func (m *SearchResponse) Reset()                    { *m = SearchResponse{} }
+func (m *SearchResponse) String() string            { return proto.CompactTextString(m) }
+func (*SearchResponse) ProtoMessage()               {}
+func (*SearchResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} }
 
 var extRange_SearchResponse = []proto.ExtensionRange{
 	{1000, 9999},
@@ -2081,12 +2195,6 @@
 func (*SearchResponse) ExtensionRangeArray() []proto.ExtensionRange {
 	return extRange_SearchResponse
 }
-func (m *SearchResponse) ExtensionMap() map[int32]proto.Extension {
-	if m.XXX_extensions == nil {
-		m.XXX_extensions = make(map[int32]proto.Extension)
-	}
-	return m.XXX_extensions
-}
 
 func (m *SearchResponse) GetResult() []*SearchResult {
 	if m != nil {
@@ -2124,4 +2232,257 @@
 }
 
 func init() {
+	proto.RegisterType((*Scope)(nil), "search.Scope")
+	proto.RegisterType((*Entry)(nil), "search.Entry")
+	proto.RegisterType((*AccessControlList)(nil), "search.AccessControlList")
+	proto.RegisterType((*FieldValue)(nil), "search.FieldValue")
+	proto.RegisterType((*FieldValue_Geo)(nil), "search.FieldValue.Geo")
+	proto.RegisterType((*Field)(nil), "search.Field")
+	proto.RegisterType((*FieldTypes)(nil), "search.FieldTypes")
+	proto.RegisterType((*IndexShardSettings)(nil), "search.IndexShardSettings")
+	proto.RegisterType((*FacetValue)(nil), "search.FacetValue")
+	proto.RegisterType((*Facet)(nil), "search.Facet")
+	proto.RegisterType((*DocumentMetadata)(nil), "search.DocumentMetadata")
+	proto.RegisterType((*Document)(nil), "search.Document")
+	proto.RegisterType((*SearchServiceError)(nil), "search.SearchServiceError")
+	proto.RegisterType((*RequestStatus)(nil), "search.RequestStatus")
+	proto.RegisterType((*IndexSpec)(nil), "search.IndexSpec")
+	proto.RegisterType((*IndexMetadata)(nil), "search.IndexMetadata")
+	proto.RegisterType((*IndexMetadata_Storage)(nil), "search.IndexMetadata.Storage")
+	proto.RegisterType((*IndexDocumentParams)(nil), "search.IndexDocumentParams")
+	proto.RegisterType((*IndexDocumentRequest)(nil), "search.IndexDocumentRequest")
+	proto.RegisterType((*IndexDocumentResponse)(nil), "search.IndexDocumentResponse")
+	proto.RegisterType((*DeleteDocumentParams)(nil), "search.DeleteDocumentParams")
+	proto.RegisterType((*DeleteDocumentRequest)(nil), "search.DeleteDocumentRequest")
+	proto.RegisterType((*DeleteDocumentResponse)(nil), "search.DeleteDocumentResponse")
+	proto.RegisterType((*ListDocumentsParams)(nil), "search.ListDocumentsParams")
+	proto.RegisterType((*ListDocumentsRequest)(nil), "search.ListDocumentsRequest")
+	proto.RegisterType((*ListDocumentsResponse)(nil), "search.ListDocumentsResponse")
+	proto.RegisterType((*ListIndexesParams)(nil), "search.ListIndexesParams")
+	proto.RegisterType((*ListIndexesRequest)(nil), "search.ListIndexesRequest")
+	proto.RegisterType((*ListIndexesResponse)(nil), "search.ListIndexesResponse")
+	proto.RegisterType((*DeleteSchemaParams)(nil), "search.DeleteSchemaParams")
+	proto.RegisterType((*DeleteSchemaRequest)(nil), "search.DeleteSchemaRequest")
+	proto.RegisterType((*DeleteSchemaResponse)(nil), "search.DeleteSchemaResponse")
+	proto.RegisterType((*SortSpec)(nil), "search.SortSpec")
+	proto.RegisterType((*ScorerSpec)(nil), "search.ScorerSpec")
+	proto.RegisterType((*FieldSpec)(nil), "search.FieldSpec")
+	proto.RegisterType((*FieldSpec_Expression)(nil), "search.FieldSpec.Expression")
+	proto.RegisterType((*FacetRange)(nil), "search.FacetRange")
+	proto.RegisterType((*FacetRequestParam)(nil), "search.FacetRequestParam")
+	proto.RegisterType((*FacetAutoDetectParam)(nil), "search.FacetAutoDetectParam")
+	proto.RegisterType((*FacetRequest)(nil), "search.FacetRequest")
+	proto.RegisterType((*FacetRefinement)(nil), "search.FacetRefinement")
+	proto.RegisterType((*FacetRefinement_Range)(nil), "search.FacetRefinement.Range")
+	proto.RegisterType((*SearchParams)(nil), "search.SearchParams")
+	proto.RegisterType((*SearchRequest)(nil), "search.SearchRequest")
+	proto.RegisterType((*FacetResultValue)(nil), "search.FacetResultValue")
+	proto.RegisterType((*FacetResult)(nil), "search.FacetResult")
+	proto.RegisterType((*SearchResult)(nil), "search.SearchResult")
+	proto.RegisterType((*SearchResponse)(nil), "search.SearchResponse")
+	proto.RegisterEnum("search.Scope_Type", Scope_Type_name, Scope_Type_value)
+	proto.RegisterEnum("search.Entry_Permission", Entry_Permission_name, Entry_Permission_value)
+	proto.RegisterEnum("search.FieldValue_ContentType", FieldValue_ContentType_name, FieldValue_ContentType_value)
+	proto.RegisterEnum("search.FacetValue_ContentType", FacetValue_ContentType_name, FacetValue_ContentType_value)
+	proto.RegisterEnum("search.Document_OrderIdSource", Document_OrderIdSource_name, Document_OrderIdSource_value)
+	proto.RegisterEnum("search.Document_Storage", Document_Storage_name, Document_Storage_value)
+	proto.RegisterEnum("search.SearchServiceError_ErrorCode", SearchServiceError_ErrorCode_name, SearchServiceError_ErrorCode_value)
+	proto.RegisterEnum("search.IndexSpec_Consistency", IndexSpec_Consistency_name, IndexSpec_Consistency_value)
+	proto.RegisterEnum("search.IndexSpec_Source", IndexSpec_Source_name, IndexSpec_Source_value)
+	proto.RegisterEnum("search.IndexSpec_Mode", IndexSpec_Mode_name, IndexSpec_Mode_value)
+	proto.RegisterEnum("search.IndexDocumentParams_Freshness", IndexDocumentParams_Freshness_name, IndexDocumentParams_Freshness_value)
+	proto.RegisterEnum("search.ScorerSpec_Scorer", ScorerSpec_Scorer_name, ScorerSpec_Scorer_value)
+	proto.RegisterEnum("search.SearchParams_CursorType", SearchParams_CursorType_name, SearchParams_CursorType_value)
+	proto.RegisterEnum("search.SearchParams_ParsingMode", SearchParams_ParsingMode_name, SearchParams_ParsingMode_value)
+}
+
+func init() { proto.RegisterFile("search.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+	// 2960 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x59, 0xcf, 0x73, 0xdb, 0xc6,
+	0xf5, 0x17, 0x40, 0x91, 0x22, 0x1f, 0x49, 0x09, 0x5a, 0xfd, 0x30, 0xed, 0xf8, 0x9b, 0x28, 0x70,
+	0x9c, 0x28, 0xf9, 0xda, 0xfa, 0xca, 0xb2, 0x27, 0xf1, 0x57, 0xcd, 0xb4, 0xa1, 0x49, 0x58, 0x66,
+	0x4d, 0x91, 0xca, 0x12, 0x74, 0xe2, 0xce, 0x34, 0x28, 0x0a, 0xac, 0x64, 0x4c, 0x49, 0x80, 0x01,
+	0x40, 0xd7, 0xba, 0x75, 0x72, 0xeb, 0xa5, 0xd3, 0x4e, 0x4f, 0x3d, 0x75, 0x32, 0xbd, 0x74, 0x7a,
+	0xed, 0xbd, 0xa7, 0xf6, 0xd6, 0x5b, 0x4f, 0xfd, 0x07, 0x3a, 0x9d, 0x76, 0xa6, 0x7f, 0x43, 0x67,
+	0xdf, 0x2e, 0x40, 0x80, 0xa2, 0xa3, 0xd8, 0x37, 0xe2, 0xed, 0xdb, 0xb7, 0x6f, 0xdf, 0xe7, 0xbd,
+	0xcf, 0xbe, 0x5d, 0x42, 0x2d, 0x62, 0x76, 0xe8, 0x3c, 0xdb, 0x9b, 0x84, 0x41, 0x1c, 0x90, 0x92,
+	0xf8, 0xd2, 0xff, 0xad, 0x40, 0x71, 0xe0, 0x04, 0x13, 0x46, 0xde, 0x85, 0xe5, 0xf8, 0x7c, 0xc2,
+	0x1a, 0xca, 0x8e, 0xb2, 0xbb, 0x7a, 0x40, 0xf6, 0xa4, 0x3a, 0x0e, 0xee, 0x99, 0xe7, 0x13, 0x46,
+	0x71, 0x9c, 0x6c, 0x42, 0xf1, 0xb9, 0x3d, 0x9a, 0xb2, 0x86, 0xba, 0xa3, 0xec, 0x56, 0xa8, 0xf8,
+	0xd0, 0x7f, 0xa7, 0xc0, 0x32, 0x57, 0x22, 0x0d, 0xd8, 0x1c, 0x0e, 0x0c, 0x6a, 0x3d, 0x78, 0x6a,
+	0xb5, 0x9a, 0xbd, 0x7e, 0xaf, 0xd3, 0x6a, 0x76, 0xad, 0x4e, 0x5b, 0x53, 0xc8, 0x3a, 0xd4, 0x93,
+	0x11, 0xe3, 0xb8, 0xd9, 0xe9, 0x6a, 0x2a, 0xb9, 0x0a, 0x5b, 0x47, 0xb4, 0x3f, 0x3c, 0xb9, 0xa0,
+	0x5d, 0x20, 0x04, 0x56, 0xd3, 0x21, 0xa1, 0xbe, 0x4c, 0x36, 0x60, 0x2d, 0x95, 0xb5, 0xfb, 0xc7,
+	0xcd, 0x4e, 0x4f, 0x2b, 0x92, 0x3a, 0x54, 0x9a, 0xdd, 0xae, 0xc5, 0x4d, 0x0f, 0xb4, 0x12, 0x79,
+	0x03, 0xae, 0xf0, 0xcf, 0xe6, 0xd0, 0x7c, 0x64, 0xf4, 0xcc, 0x4e, 0xab, 0x69, 0x1a, 0x6d, 0x39,
+	0xb8, 0xa2, 0xff, 0x49, 0x81, 0xa2, 0xe1, 0xc7, 0xe1, 0x39, 0xb9, 0x01, 0xc5, 0x88, 0xef, 0x0c,
+	0xb7, 0x5b, 0x3d, 0xa8, 0xe7, 0xb6, 0x4b, 0xc5, 0x18, 0xb9, 0x0f, 0x30, 0x61, 0xe1, 0xd8, 0x8b,
+	0x22, 0x2f, 0xf0, 0x71, 0xbf, 0xab, 0x07, 0x8d, 0x44, 0x13, 0xed, 0xec, 0x9d, 0xa4, 0xe3, 0x34,
+	0xa3, 0x4b, 0xde, 0x86, 0x9a, 0xeb, 0x45, 0x93, 0x91, 0x7d, 0x6e, 0xf9, 0xf6, 0x98, 0x35, 0x0a,
+	0x18, 0xab, 0xaa, 0x94, 0xf5, 0xec, 0x31, 0xd3, 0xef, 0x02, 0xcc, 0x26, 0x93, 0x32, 0x2c, 0x53,
+	0xa3, 0xc9, 0xc3, 0x54, 0x81, 0xe2, 0x67, 0xb4, 0x63, 0x1a, 0x9a, 0x4a, 0x34, 0xa8, 0x3d, 0x1c,
+	0x76, 0xbb, 0x56, 0xab, 0xdf, 0x33, 0x69, 0xbf, 0xab, 0x15, 0x74, 0x0a, 0xeb, 0x4d, 0xc7, 0x61,
+	0x51, 0xd4, 0x0a, 0xfc, 0x38, 0x0c, 0x46, 0x5d, 0x2f, 0x8a, 0x39, 0x22, 0xc1, 0x4f, 0x7d, 0x16,
+	0xe2, 0x5e, 0x2a, 0x54, 0x7c, 0x90, 0xf7, 0x60, 0x85, 0xf9, 0x71, 0xe8, 0xb1, 0xa8, 0xa1, 0xee,
+	0x14, 0xb2, 0x7b, 0x44, 0xcf, 0x69, 0x32, 0xaa, 0xff, 0x41, 0x05, 0x78, 0xe8, 0xb1, 0x91, 0xfb,
+	0x84, 0x23, 0x49, 0xee, 0xe7, 0xf2, 0xe0, 0xcd, 0x64, 0xd2, 0x4c, 0x63, 0x8f, 0xaf, 0xcd, 0xfc,
+	0x98, 0xc3, 0x7d, 0xb8, 0x6c, 0x1a, 0x9f, 0x9b, 0x32, 0x33, 0xde, 0x84, 0xf2, 0xc8, 0xf6, 0xcf,
+	0xa6, 0xf6, 0x99, 0x4c, 0x8e, 0x43, 0x95, 0xf9, 0x34, 0x95, 0xf1, 0xa0, 0x44, 0x71, 0xe8, 0xf9,
+	0x67, 0x96, 0x48, 0x20, 0x19, 0x14, 0x21, 0x13, 0x8b, 0xef, 0x42, 0xe1, 0x8c, 0x05, 0x8d, 0xe5,
+	0x1d, 0x65, 0x17, 0x0e, 0xb6, 0x17, 0xac, 0x7d, 0xc4, 0x02, 0xca, 0x55, 0xae, 0xbd, 0x0f, 0x85,
+	0x23, 0x16, 0x10, 0x0d, 0x0a, 0x23, 0x3b, 0x6e, 0x14, 0x77, 0xd4, 0x5d, 0x85, 0xf2, 0x9f, 0x28,
+	0xf1, 0xcf, 0x1a, 0x25, 0x29, 0xf1, 0xcf, 0xf4, 0xef, 0x43, 0x35, 0xe3, 0x32, 0x0f, 0x35, 0x77,
+	0x5a, 0x5b, 0xe2, 0xbf, 0x1e, 0x99, 0xc7, 0x5d, 0x4d, 0xe1, 0xbf, 0x9a, 0x66, 0xff, 0x58, 0x53,
+	0xf9, 0xaf, 0x76, 0xd3, 0x34, 0xb4, 0x02, 0x01, 0x28, 0xf5, 0x86, 0xc7, 0x0f, 0x0c, 0xaa, 0x2d,
+	0x93, 0x15, 0x28, 0x1c, 0x19, 0x7d, 0xad, 0xa8, 0x1b, 0x50, 0x44, 0x6f, 0x08, 0x81, 0x65, 0x44,
+	0x56, 0xd9, 0x51, 0x77, 0x2b, 0x14, 0x7f, 0x93, 0xdd, 0x59, 0x69, 0xa8, 0xbb, 0xd5, 0x59, 0x0d,
+	0xcd, 0xfc, 0x4f, 0xca, 0xc5, 0x94, 0x21, 0xe7, 0x0e, 0x45, 0x0b, 0x6d, 0x1d, 0x48, 0x18, 0x38,
+	0x76, 0x97, 0xc2, 0x20, 0x00, 0xd0, 0xff, 0xa2, 0x00, 0xe9, 0xf8, 0x2e, 0x7b, 0x31, 0x78, 0x66,
+	0x87, 0xee, 0x80, 0xc5, 0xb1, 0xe7, 0x9f, 0x45, 0xe4, 0x5d, 0x58, 0x9b, 0x84, 0xec, 0xb9, 0xe5,
+	0x4f, 0xc7, 0x56, 0xc4, 0x47, 0xa2, 0x86, 0xb2, 0x53, 0xd8, 0x2d, 0xd2, 0x3a, 0x17, 0xf7, 0xa6,
+	0x63, 0x54, 0x8f, 0xc8, 0x0e, 0x40, 0x46, 0x85, 0xef, 0xa1, 0x78, 0xa8, 0xdc, 0xa1, 0x15, 0x3f,
+	0xd5, 0xf8, 0x2e, 0x5c, 0x9f, 0xb3, 0x64, 0x09, 0xbf, 0xac, 0x53, 0x7b, 0x14, 0x71, 0x44, 0xb9,
+	0xd9, 0x46, 0xce, 0xec, 0x00, 0x15, 0x1e, 0xf2, 0x71, 0x72, 0x13, 0xea, 0xa3, 0xc0, 0xb1, 0x47,
+	0x56, 0xc8, 0x26, 0x23, 0xcf, 0xb1, 0x11, 0xe8, 0xca, 0xe1, 0x12, 0xad, 0xa1, 0x98, 0x0a, 0xa9,
+	0xfe, 0x0b, 0x05, 0xe0, 0xa1, 0xed, 0xb0, 0xf8, 0x9b, 0x33, 0x32, 0xd5, 0xc8, 0x67, 0x24, 0x07,
+	0x52, 0x66, 0xe4, 0xe5, 0x19, 0xa7, 0xdf, 0xb8, 0x90, 0x1c, 0x32, 0x11, 0x32, 0xf0, 0x23, 0xea,
+	0x7c, 0xb5, 0x57, 0x43, 0x3d, 0xf5, 0x2f, 0x41, 0xfd, 0x0b, 0xd0, 0xda, 0x81, 0x33, 0x1d, 0x33,
+	0x3f, 0x3e, 0x66, 0xb1, 0xed, 0xda, 0xb1, 0x4d, 0x1a, 0xb0, 0xf2, 0x9c, 0x85, 0x48, 0x30, 0x7c,
+	0x7f, 0x05, 0x9a, 0x7c, 0x92, 0x7d, 0xd8, 0x74, 0x82, 0xf1, 0xd8, 0x8b, 0x63, 0xe6, 0x5a, 0x51,
+	0x6c, 0x25, 0x6a, 0x2a, 0xaa, 0x91, 0x74, 0x6c, 0x10, 0x3f, 0x11, 0x23, 0xfa, 0x7f, 0x54, 0x28,
+	0x27, 0x0b, 0x90, 0x55, 0x50, 0x3d, 0x57, 0x52, 0x82, 0xea, 0xb9, 0x97, 0x56, 0xe7, 0x0d, 0x28,
+	0x9e, 0xf2, 0xe4, 0x42, 0x10, 0x33, 0x6c, 0x81, 0x19, 0x47, 0xc5, 0x18, 0xb9, 0x0a, 0xe5, 0x20,
+	0x74, 0x59, 0x68, 0x79, 0x2e, 0x62, 0x57, 0xa4, 0x2b, 0xf8, 0xdd, 0x71, 0xc9, 0x09, 0xac, 0x25,
+	0x43, 0x56, 0x14, 0x4c, 0x43, 0x87, 0x35, 0x4a, 0x79, 0xc0, 0x12, 0xd7, 0xf6, 0xfa, 0x62, 0xca,
+	0x00, 0xb5, 0x0e, 0xcb, 0x83, 0xe1, 0xc9, 0x49, 0xb7, 0x63, 0xb4, 0x69, 0x3d, 0xc8, 0x0e, 0x90,
+	0xfb, 0xb0, 0x12, 0xc5, 0x41, 0xc8, 0x1d, 0x2e, 0xe6, 0xb9, 0x37, 0xb5, 0x34, 0x10, 0xe3, 0x87,
+	0xcb, 0xed, 0xce, 0xe0, 0x31, 0x4d, 0xd4, 0x71, 0x2f, 0x3c, 0xfa, 0x8d, 0xf2, 0xdc, 0x5e, 0xb8,
+	0x90, 0x8a, 0x31, 0xfd, 0x16, 0xd4, 0x73, 0x8e, 0xf0, 0x93, 0xa4, 0x6d, 0x3c, 0x6c, 0x0e, 0xbb,
+	0xa6, 0xd1, 0xd6, 0x96, 0x48, 0x0d, 0x52, 0xcf, 0x34, 0x45, 0xdf, 0x80, 0x15, 0xb9, 0x18, 0x52,
+	0x44, 0x67, 0xf0, 0x58, 0x5b, 0xd2, 0x7f, 0xaf, 0x00, 0x11, 0xf9, 0x3d, 0x60, 0xe1, 0x73, 0xcf,
+	0x61, 0x46, 0x18, 0x06, 0xa1, 0xfe, 0x2b, 0x05, 0x2a, 0xf8, 0xab, 0x15, 0xb8, 0x8c, 0x94, 0x40,
+	0xed, 0x3f, 0xd6, 0x96, 0xf8, 0xe9, 0xd5, 0xe9, 0x3d, 0x69, 0x76, 0x3b, 0x6d, 0x8b, 0x1a, 0x9f,
+	0x0e, 0x8d, 0x81, 0xa9, 0x29, 0x5c, 0x68, 0xd2, 0x66, 0x6f, 0xd0, 0x31, 0x7a, 0xa6, 0x65, 0x50,
+	0xda, 0xa7, 0x9a, 0xca, 0xcf, 0xbe, 0x4e, 0xcf, 0x34, 0x68, 0xaf, 0xd9, 0x95, 0xb2, 0x02, 0xd9,
+	0x82, 0xf5, 0x13, 0x83, 0x1e, 0x77, 0x06, 0x83, 0x4e, 0xbf, 0x67, 0xb5, 0x8d, 0x1e, 0x77, 0x6b,
+	0x99, 0x54, 0x61, 0xc5, 0xec, 0x1c, 0x1b, 0xfd, 0xa1, 0xa9, 0x15, 0xc9, 0x35, 0xd8, 0x6e, 0xf5,
+	0x7b, 0xad, 0x21, 0xa5, 0xdc, 0x1a, 0xda, 0x6d, 0xb6, 0xcc, 0x4e, 0xbf, 0xa7, 0x95, 0xf4, 0x5f,
+	0x2b, 0x50, 0xa7, 0xec, 0xcb, 0x29, 0x8b, 0xe2, 0x41, 0x6c, 0xc7, 0xd3, 0x88, 0x97, 0x95, 0x13,
+	0xb8, 0x22, 0x97, 0x57, 0x0f, 0xde, 0x49, 0x4f, 0xc0, 0x0b, 0xfb, 0xd9, 0x4b, 0xf7, 0x42, 0x71,
+	0x06, 0x2f, 0x2b, 0xc6, 0x45, 0x96, 0xcb, 0x62, 0xdb, 0x1b, 0xc9, 0x4e, 0xa0, 0x8a, 0xb2, 0x36,
+	0x8a, 0xc8, 0x4d, 0x58, 0x75, 0x6c, 0x3f, 0xf0, 0x3d, 0x5e, 0xed, 0xb8, 0x4c, 0x01, 0xd3, 0xa5,
+	0x9e, 0x4a, 0xb9, 0x3d, 0xfd, 0xeb, 0x02, 0x54, 0x04, 0x63, 0x4d, 0x98, 0xb3, 0xb0, 0xba, 0x8e,
+	0xa1, 0xea, 0x04, 0x7e, 0xe4, 0x45, 0x31, 0xf3, 0x9d, 0x73, 0x79, 0x08, 0xff, 0x4f, 0xe2, 0x6c,
+	0x3a, 0x97, 0x53, 0x40, 0xa2, 0x74, 0x58, 0x3b, 0x31, 0xa8, 0xd5, 0xee, 0xb7, 0x86, 0xc7, 0x46,
+	0xcf, 0xa4, 0xd9, 0xf9, 0xe4, 0x3a, 0x54, 0xb8, 0xd9, 0x68, 0x62, 0x3b, 0x09, 0x1d, 0xcc, 0x04,
+	0xd9, 0x62, 0x94, 0xd9, 0x9d, 0x14, 0xe3, 0x7d, 0x28, 0xc9, 0xa4, 0x9e, 0x4b, 0xc5, 0x99, 0x07,
+	0x32, 0x9d, 0x4b, 0x03, 0xa3, 0x49, 0x5b, 0x8f, 0xa8, 0xd4, 0x27, 0xf7, 0x60, 0x79, 0xcc, 0xf7,
+	0x2f, 0x8a, 0x61, 0xfb, 0xe2, 0xbc, 0xe3, 0xc0, 0x65, 0x87, 0xe5, 0x13, 0xda, 0xe9, 0xd3, 0x8e,
+	0xf9, 0x94, 0xa2, 0xb6, 0xfe, 0xbf, 0x48, 0x4b, 0xa9, 0xdb, 0x00, 0xa5, 0xa3, 0x6e, 0xff, 0x41,
+	0xb3, 0xab, 0x2d, 0xf1, 0xae, 0x20, 0xbb, 0x3f, 0x4d, 0xd1, 0x3f, 0x84, 0x92, 0x4c, 0x61, 0x00,
+	0xb9, 0xbc, 0xb6, 0x84, 0xe9, 0xdc, 0x34, 0x9b, 0x03, 0xb3, 0x4f, 0x0d, 0xd1, 0x7e, 0xb5, 0xba,
+	0xfd, 0x61, 0xdb, 0xe2, 0x82, 0xe6, 0x91, 0xa1, 0xa9, 0xfa, 0x3b, 0xb0, 0xcc, 0x17, 0xe7, 0x99,
+	0x9e, 0x2c, 0xaf, 0x2d, 0x91, 0x55, 0x80, 0x07, 0xcd, 0xd6, 0x63, 0xde, 0x69, 0xf5, 0x78, 0xe6,
+	0xff, 0x43, 0x81, 0x3a, 0x7a, 0x9b, 0x72, 0xd6, 0x3e, 0x80, 0xc7, 0x05, 0x56, 0x34, 0x61, 0x0e,
+	0xa2, 0x55, 0x3d, 0x58, 0xbf, 0xb0, 0x31, 0x5a, 0xf1, 0x52, 0x64, 0x77, 0x13, 0x72, 0x11, 0xad,
+	0x48, 0xfe, 0x64, 0xc4, 0x43, 0x30, 0x61, 0x98, 0x8f, 0x66, 0x45, 0x5f, 0xc0, 0xd6, 0x2c, 0x8f,
+	0x75, 0xe2, 0x43, 0x52, 0xf9, 0x69, 0xcd, 0x5f, 0xfb, 0x64, 0x56, 0xa0, 0x6f, 0x41, 0xd5, 0x1e,
+	0x07, 0x53, 0x3f, 0xb6, 0xa6, 0x11, 0x73, 0x25, 0xaf, 0x82, 0x10, 0x0d, 0x23, 0xe6, 0xf2, 0x8e,
+	0x69, 0xe4, 0x8d, 0xbd, 0x58, 0x72, 0xa9, 0xf8, 0xd0, 0xbf, 0x52, 0x61, 0x03, 0x17, 0x49, 0xe8,
+	0xe5, 0xc4, 0x0e, 0xed, 0x71, 0x44, 0x6e, 0x41, 0xd9, 0x95, 0x12, 0x3c, 0x38, 0xab, 0x07, 0xda,
+	0x3c, 0x11, 0xd1, 0x54, 0x83, 0x3c, 0x81, 0xca, 0x69, 0xc8, 0xa2, 0x67, 0x3e, 0x8b, 0x22, 0x99,
+	0xae, 0x37, 0x73, 0x5b, 0xc8, 0x5b, 0xdf, 0x7b, 0x98, 0x28, 0x1f, 0xd6, 0x07, 0x4f, 0x7b, 0xad,
+	0x47, 0xb4, 0xdf, 0xeb, 0x0f, 0x07, 0xdd, 0xa7, 0x0f, 0xd4, 0x86, 0x42, 0x67, 0xa6, 0xe6, 0x82,
+	0x5e, 0xb8, 0x3c, 0xe8, 0xfa, 0x5d, 0xa8, 0xa4, 0xc6, 0x39, 0xfc, 0x39, 0xf3, 0x82, 0x90, 0x3e,
+	0x7b, 0x64, 0xf4, 0x78, 0x7b, 0xf9, 0x84, 0xf3, 0x09, 0xe6, 0xd2, 0x8f, 0x61, 0x33, 0xe7, 0xa5,
+	0xe4, 0x0c, 0x72, 0x17, 0x4a, 0x13, 0x74, 0x58, 0xe2, 0xfd, 0xc6, 0x37, 0xec, 0x89, 0x4a, 0x55,
+	0xb2, 0x05, 0x25, 0x7b, 0x32, 0xe1, 0x87, 0x05, 0xc7, 0xb2, 0x46, 0x8b, 0xf6, 0x64, 0xd2, 0x71,
+	0xf5, 0x1f, 0xc2, 0xd6, 0xdc, 0x1a, 0xd1, 0x24, 0xf0, 0x23, 0x46, 0x6e, 0x43, 0x29, 0x42, 0x72,
+	0x92, 0x71, 0xde, 0x4a, 0x16, 0xc9, 0x31, 0x17, 0x95, 0x4a, 0xdc, 0xbc, 0x1b, 0x38, 0xdc, 0x3c,
+	0x4f, 0xab, 0x0a, 0x2d, 0xba, 0x81, 0xd3, 0x71, 0x75, 0x0b, 0x36, 0xdb, 0x6c, 0xc4, 0x62, 0x36,
+	0x87, 0xe3, 0x4c, 0x5d, 0xc9, 0xa8, 0xcf, 0x05, 0x56, 0xfd, 0x16, 0x81, 0x75, 0x61, 0x2b, 0xbf,
+	0x40, 0x12, 0xa4, 0x7b, 0x73, 0x41, 0xba, 0x9e, 0xe6, 0xc9, 0x02, 0x7f, 0x2e, 0x8b, 0xd2, 0x11,
+	0x6c, 0xcf, 0xaf, 0xf2, 0x5a, 0x61, 0xd2, 0xff, 0xa6, 0xc0, 0x06, 0xbf, 0x28, 0x24, 0x76, 0x22,
+	0x19, 0x8f, 0x57, 0x2f, 0xe3, 0x1d, 0xde, 0x4f, 0xd9, 0x61, 0x6c, 0xa5, 0x61, 0xe7, 0x04, 0x0a,
+	0x28, 0x6b, 0xcb, 0x60, 0xae, 0x7b, 0xbe, 0x33, 0x9a, 0xba, 0xcc, 0x4a, 0x35, 0x71, 0x5b, 0xe5,
+	0xc3, 0xe5, 0x38, 0x9c, 0x32, 0xba, 0x26, 0x87, 0x07, 0x72, 0x0e, 0xb9, 0x9a, 0xd4, 0x22, 0x32,
+	0xee, 0x61, 0xe1, 0xce, 0xfe, 0xbe, 0x2c, 0x48, 0xf2, 0x06, 0x54, 0x7e, 0xc2, 0xce, 0x23, 0x2b,
+	0xf0, 0x47, 0xe7, 0xc8, 0xbb, 0x65, 0x5a, 0xe6, 0x82, 0xbe, 0x3f, 0x3a, 0xe7, 0x89, 0x9a, 0xdb,
+	0xd4, 0xa5, 0x89, 0xba, 0x20, 0x04, 0x0b, 0x20, 0x50, 0xb3, 0x10, 0xc4, 0xb0, 0x35, 0xb7, 0xc6,
+	0x02, 0x04, 0xd4, 0xcb, 0x13, 0x35, 0xcb, 0x20, 0xea, 0x65, 0x0c, 0xa2, 0xff, 0x55, 0x85, 0x75,
+	0xbe, 0x2c, 0x42, 0xc0, 0x12, 0xb4, 0xde, 0x86, 0xda, 0x29, 0x8b, 0x9d, 0x67, 0x56, 0xe4, 0x3c,
+	0x63, 0x63, 0x1b, 0x59, 0xad, 0x4c, 0xab, 0x28, 0x1b, 0xa0, 0x88, 0x34, 0xb2, 0xb4, 0x56, 0x3c,
+	0x54, 0x0f, 0xd2, 0x48, 0x7e, 0xf3, 0xb1, 0xb7, 0x0b, 0x9a, 0x00, 0x4b, 0xa4, 0x03, 0x9e, 0xc1,
+	0xd8, 0x99, 0xd3, 0x55, 0x94, 0xa3, 0x23, 0xfc, 0xd2, 0x4a, 0xee, 0xc1, 0x46, 0x1e, 0x5e, 0x9c,
+	0x21, 0xb0, 0x91, 0x00, 0xaf, 0x67, 0x01, 0xc6, 0x99, 0xe4, 0x03, 0x9e, 0x14, 0x89, 0x65, 0x6b,
+	0x12, 0xb2, 0x53, 0xef, 0x05, 0x9e, 0x87, 0x15, 0x9e, 0x0e, 0xd2, 0xf6, 0x09, 0x8a, 0xc9, 0x36,
+	0x94, 0x82, 0xd3, 0xd3, 0x88, 0xc5, 0x8d, 0x15, 0x3c, 0x81, 0xe5, 0x57, 0xe6, 0x00, 0x2e, 0xbf,
+	0xda, 0x01, 0xac, 0x7f, 0x01, 0x24, 0x13, 0xcd, 0x24, 0x4d, 0xee, 0xcc, 0xa5, 0xc9, 0xd5, 0x6c,
+	0x9a, 0xe4, 0x22, 0x7f, 0x59, 0x9d, 0x7e, 0x25, 0xcb, 0x2b, 0x5d, 0xe0, 0xf5, 0x72, 0xe4, 0x63,
+	0x58, 0x15, 0x41, 0x1a, 0xcb, 0x23, 0x4e, 0x66, 0xca, 0xd6, 0xc2, 0xf3, 0x8f, 0xd6, 0xbd, 0xec,
+	0xa7, 0xfe, 0x33, 0x05, 0x88, 0x60, 0x0b, 0x91, 0x0b, 0x32, 0x69, 0x66, 0x51, 0x53, 0x5e, 0xb1,
+	0x6d, 0x99, 0x67, 0xc5, 0xc2, 0xa5, 0xac, 0xf8, 0x23, 0xd8, 0xc8, 0x7a, 0x90, 0x04, 0xfa, 0x60,
+	0x2e, 0xd0, 0xd7, 0xf2, 0x9c, 0x98, 0x75, 0xf7, 0xb2, 0x48, 0x1b, 0x09, 0xb1, 0x27, 0x2b, 0xbc,
+	0x1e, 0x1f, 0xfe, 0x59, 0x81, 0xf2, 0x20, 0x08, 0x63, 0xa4, 0xb4, 0xf7, 0x60, 0x2d, 0x0a, 0xc2,
+	0xd8, 0x62, 0x2f, 0x26, 0x21, 0x8b, 0xe4, 0x3d, 0x4c, 0xc5, 0xd4, 0x0f, 0xc2, 0xd8, 0x48, 0xa5,
+	0xe4, 0xb6, 0x54, 0x74, 0x59, 0xe4, 0x30, 0xdf, 0xf5, 0xfc, 0x33, 0x2c, 0xb3, 0x24, 0xed, 0x51,
+	0xbd, 0x9d, 0x8e, 0x91, 0x5b, 0x40, 0x5c, 0x76, 0x6a, 0x4f, 0x47, 0xb1, 0xb8, 0x7b, 0x5a, 0x31,
+	0x7b, 0x11, 0xcb, 0xaa, 0xd2, 0xe4, 0x08, 0x5e, 0x0e, 0x4d, 0xf6, 0x82, 0x07, 0x69, 0x2b, 0xaf,
+	0xed, 0x4f, 0xc7, 0x2c, 0xf4, 0x1c, 0xac, 0x2c, 0x85, 0x6e, 0x64, 0x27, 0xf4, 0xc4, 0x90, 0xfe,
+	0x77, 0x05, 0x60, 0xe0, 0x04, 0x21, 0x0b, 0x71, 0x23, 0xdf, 0x83, 0x52, 0x84, 0x5f, 0x12, 0xea,
+	0xab, 0x99, 0x27, 0x2d, 0xa9, 0x23, 0x7f, 0x1e, 0xd6, 0x8e, 0x9b, 0x66, 0xeb, 0x91, 0x35, 0x68,
+	0xf5, 0xa9, 0x41, 0xa9, 0x9c, 0x46, 0xae, 0xe5, 0xd9, 0x63, 0xf9, 0xce, 0xfe, 0x8c, 0x89, 0x3f,
+	0x84, 0x2b, 0x63, 0x5b, 0x90, 0x0f, 0xd7, 0xb5, 0x10, 0x27, 0x16, 0xb3, 0x30, 0x6a, 0x54, 0x70,
+	0x4b, 0x5b, 0x38, 0x2c, 0xec, 0x9f, 0xa4, 0x83, 0xd8, 0x99, 0x26, 0xd6, 0xb7, 0xa9, 0xc1, 0x57,
+	0xec, 0xf4, 0x8e, 0xac, 0xec, 0xfa, 0xa2, 0xa3, 0xcd, 0x49, 0x54, 0xfd, 0xb7, 0x0a, 0x54, 0xb0,
+	0x37, 0x9c, 0xbb, 0x17, 0x14, 0xd2, 0x7b, 0xc1, 0xc7, 0x00, 0x19, 0xc8, 0x78, 0x7e, 0xc2, 0xec,
+	0xb8, 0x4d, 0xa7, 0xee, 0xcd, 0x00, 0xa4, 0x19, 0xfd, 0x6b, 0x9f, 0x00, 0x64, 0xa0, 0x4d, 0xec,
+	0x17, 0x32, 0xf7, 0x8e, 0x37, 0x73, 0xf6, 0x97, 0x71, 0x24, 0x23, 0xd1, 0x1f, 0xc9, 0x27, 0x0a,
+	0x6a, 0xfb, 0x67, 0x2c, 0xe3, 0xa1, 0x92, 0x5a, 0xd8, 0x84, 0x22, 0x72, 0x64, 0xf2, 0x50, 0x8a,
+	0x1f, 0x44, 0x83, 0x02, 0xf3, 0x5d, 0xc9, 0xc1, 0xfc, 0xa7, 0xfe, 0x73, 0x05, 0xd6, 0x85, 0x29,
+	0x91, 0xad, 0x18, 0x3e, 0xde, 0xc3, 0x8a, 0x4c, 0x10, 0x98, 0x28, 0x48, 0x86, 0x80, 0xa2, 0x2e,
+	0x42, 0xb2, 0x0b, 0xc5, 0x90, 0xaf, 0x7d, 0xa1, 0xa5, 0x4e, 0xbd, 0xa2, 0x42, 0x81, 0xbc, 0x0f,
+	0x9a, 0x30, 0xc5, 0x2f, 0x42, 0x71, 0x68, 0x7b, 0x7e, 0x8c, 0x97, 0xfc, 0x0a, 0x5d, 0x43, 0x79,
+	0x2b, 0x15, 0xeb, 0xdf, 0x81, 0x4d, 0x9c, 0xdf, 0x9c, 0xc6, 0x41, 0x9b, 0xc5, 0xcc, 0x91, 0xde,
+	0xdc, 0x58, 0xe0, 0xcd, 0xa1, 0x7a, 0x67, 0x3f, 0xeb, 0x91, 0x3e, 0x84, 0x5a, 0x76, 0x1f, 0x0b,
+	0xaf, 0x73, 0x33, 0xda, 0x55, 0xb1, 0xbb, 0xbf, 0x9a, 0x77, 0x3b, 0x13, 0x81, 0x84, 0x0c, 0xf4,
+	0xaf, 0x15, 0x58, 0x93, 0xa3, 0xa7, 0x9e, 0xcf, 0xb0, 0xc9, 0x5e, 0x64, 0x7a, 0xe1, 0xc3, 0x34,
+	0xb9, 0x9b, 0x84, 0x69, 0xee, 0x36, 0x31, 0x67, 0x71, 0x2f, 0x1b, 0xb1, 0x6b, 0xff, 0x07, 0x45,
+	0x81, 0x6b, 0x8a, 0xa1, 0xb2, 0x00, 0x43, 0x75, 0x86, 0xe1, 0x1f, 0x57, 0xa0, 0x26, 0x2e, 0xce,
+	0xaf, 0xdd, 0x5b, 0x6d, 0x42, 0xf1, 0xcb, 0x29, 0x0b, 0xcf, 0xb1, 0x03, 0xad, 0x50, 0xf1, 0xc1,
+	0x8f, 0x43, 0x67, 0x1a, 0x46, 0x41, 0x28, 0xa9, 0x43, 0x7e, 0x65, 0x8e, 0xc9, 0x6a, 0xee, 0x98,
+	0x7c, 0x08, 0x55, 0xa1, 0x61, 0xe1, 0x93, 0x99, 0xb8, 0xac, 0xbe, 0x95, 0xbf, 0xdb, 0xcb, 0x8b,
+	0x47, 0x0b, 0xf5, 0xc4, 0x9b, 0x59, 0xaf, 0xdf, 0x33, 0x28, 0x38, 0xa9, 0x64, 0xd6, 0x4a, 0x94,
+	0xe6, 0x5b, 0x89, 0x7b, 0xb0, 0x8d, 0xb5, 0xce, 0x5c, 0xcb, 0xc1, 0x3b, 0x96, 0xed, 0x38, 0xd3,
+	0xd0, 0x76, 0xce, 0xe5, 0x81, 0xbd, 0x29, 0x47, 0x5b, 0x7c, 0xb0, 0x29, 0xc7, 0xc8, 0x6d, 0xa8,
+	0x20, 0x7b, 0x62, 0x38, 0xca, 0xf9, 0x16, 0x28, 0xe1, 0x62, 0x5a, 0x8e, 0x12, 0x56, 0xbe, 0x0b,
+	0x55, 0xc9, 0x34, 0x38, 0xa1, 0x82, 0xd8, 0x91, 0x8b, 0x8c, 0x46, 0x21, 0x9a, 0x31, 0xe0, 0x3e,
+	0x00, 0xde, 0x21, 0xc5, 0x1c, 0xc0, 0x39, 0xeb, 0x17, 0x28, 0x81, 0x56, 0x4e, 0x53, 0x62, 0xc9,
+	0x35, 0x98, 0xb5, 0x7c, 0x83, 0x49, 0x1e, 0x43, 0x6d, 0x62, 0x87, 0x91, 0xe7, 0x9f, 0x59, 0x78,
+	0x81, 0xaf, 0x63, 0x2c, 0x77, 0x16, 0xc6, 0xf2, 0x44, 0x28, 0xe2, 0x55, 0xbe, 0x34, 0x30, 0x69,
+	0xa7, 0x65, 0xd2, 0xea, 0x64, 0x26, 0x24, 0x1f, 0xc3, 0x55, 0x7b, 0x1a, 0x07, 0x96, 0xeb, 0x45,
+	0x4e, 0xf0, 0x9c, 0x85, 0x16, 0xbe, 0x41, 0x89, 0x08, 0x36, 0xd6, 0x30, 0xc6, 0xca, 0x3e, 0xdd,
+	0xe6, 0x3a, 0x6d, 0xa9, 0x82, 0x19, 0x8a, 0x51, 0x24, 0xff, 0x0f, 0xf5, 0xa4, 0xed, 0x12, 0xef,
+	0x5a, 0x1a, 0x46, 0x70, 0x73, 0x51, 0xf1, 0xd0, 0x9a, 0x54, 0x15, 0x2f, 0x96, 0x0f, 0x40, 0x13,
+	0x4b, 0x85, 0x69, 0xae, 0x37, 0xd6, 0x71, 0xf6, 0x95, 0x97, 0x94, 0x02, 0x5d, 0x3b, 0x9d, 0xab,
+	0xb6, 0x01, 0x5c, 0x11, 0x36, 0xc4, 0x16, 0x90, 0x17, 0xc4, 0x11, 0xd0, 0x20, 0x18, 0xe5, 0xeb,
+	0x39, 0x53, 0x73, 0xe4, 0x41, 0x37, 0x4f, 0x17, 0x51, 0xca, 0x4d, 0xa8, 0x0a, 0xa3, 0x2e, 0x9b,
+	0xc4, 0xcf, 0x1a, 0x1b, 0x99, 0x43, 0x07, 0x70, 0xa0, 0xcd, 0xe5, 0xfa, 0x01, 0xc0, 0x2c, 0x51,
+	0x49, 0x19, 0x30, 0x55, 0xb5, 0x25, 0x7c, 0xe9, 0xe8, 0xf4, 0x8e, 0xba, 0x86, 0xa6, 0x90, 0x55,
+	0x80, 0x13, 0x83, 0x5a, 0xd4, 0x18, 0x0c, 0xbb, 0xa6, 0xa6, 0xea, 0xef, 0x42, 0x35, 0x03, 0x08,
+	0xaa, 0x22, 0x24, 0xda, 0x12, 0xa9, 0xc2, 0x0a, 0x35, 0xba, 0xcd, 0xcf, 0xf1, 0x4d, 0xcf, 0x84,
+	0xba, 0x40, 0x31, 0x61, 0xac, 0x5b, 0x73, 0xbd, 0xca, 0xe6, 0x22, 0xb0, 0x2f, 0xeb, 0x52, 0xa6,
+	0xa0, 0xc9, 0x88, 0x46, 0xc9, 0x91, 0xfd, 0x32, 0xbe, 0x12, 0xf0, 0xe3, 0x4b, 0x3b, 0x15, 0x1f,
+	0xe4, 0x23, 0x80, 0x0c, 0x52, 0xe2, 0x9a, 0xff, 0x52, 0xa4, 0x32, 0xaa, 0xfa, 0xa7, 0x50, 0xcd,
+	0x2c, 0xbb, 0x70, 0xc5, 0xbd, 0x19, 0x43, 0xf2, 0x04, 0x68, 0xcc, 0x99, 0x4d, 0xdd, 0x4d, 0xde,
+	0xab, 0x7f, 0xa3, 0x24, 0xac, 0x26, 0x8d, 0xe6, 0x5f, 0x42, 0xd4, 0x4b, 0x5e, 0x42, 0x6e, 0xcf,
+	0x1d, 0xa1, 0x0b, 0x9e, 0x95, 0x33, 0x0a, 0xc8, 0xb5, 0xbc, 0x98, 0xd1, 0x3b, 0x85, 0x8a, 0x8f,
+	0x0c, 0x01, 0x16, 0xb2, 0x04, 0xa8, 0xff, 0x4b, 0x81, 0xd5, 0xd4, 0x37, 0xd1, 0x06, 0xde, 0x82,
+	0x52, 0x88, 0x7e, 0xca, 0x36, 0x70, 0x0e, 0x3d, 0xb1, 0x07, 0x2a, 0x75, 0xc8, 0x0d, 0xa8, 0xe7,
+	0x78, 0x0c, 0x61, 0x28, 0xd0, 0x5a, 0x96, 0xbe, 0x32, 0x9d, 0x65, 0xe1, 0xdb, 0xf4, 0xf0, 0x2f,
+	0x63, 0xeb, 0x0f, 0xa1, 0x96, 0x14, 0x21, 0xfa, 0x57, 0x44, 0xff, 0x36, 0x16, 0xc4, 0x9f, 0x56,
+	0x4f, 0x67, 0x1f, 0x1f, 0x94, 0xca, 0xff, 0x5c, 0xd1, 0x7e, 0xd9, 0x7b, 0x50, 0xfe, 0x81, 0xfc,
+	0xbf, 0xf6, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x0c, 0x12, 0xcb, 0x31, 0xc6, 0x1d, 0x00, 0x00,
 }
diff --git a/vendor/google.golang.org/appengine/internal/search/search.proto b/vendor/google.golang.org/appengine/internal/search/search.proto
index 219f4c3..61df650 100644
--- a/vendor/google.golang.org/appengine/internal/search/search.proto
+++ b/vendor/google.golang.org/appengine/internal/search/search.proto
@@ -99,6 +99,12 @@
   optional string language = 2 [default = "en"];
   repeated Field field = 3;
   optional int32 order_id = 4;
+  optional OrderIdSource order_id_source = 6 [default = SUPPLIED];
+
+  enum OrderIdSource {
+    DEFAULTED = 0;
+    SUPPLIED = 1;
+  }
 
   enum Storage {
     DISK = 0;
diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
new file mode 100644
index 0000000..af463fb
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
@@ -0,0 +1,355 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
+// DO NOT EDIT!
+
+/*
+Package urlfetch is a generated protocol buffer package.
+
+It is generated from these files:
+	google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
+
+It has these top-level messages:
+	URLFetchServiceError
+	URLFetchRequest
+	URLFetchResponse
+*/
+package urlfetch
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type URLFetchServiceError_ErrorCode int32
+
+const (
+	URLFetchServiceError_OK                       URLFetchServiceError_ErrorCode = 0
+	URLFetchServiceError_INVALID_URL              URLFetchServiceError_ErrorCode = 1
+	URLFetchServiceError_FETCH_ERROR              URLFetchServiceError_ErrorCode = 2
+	URLFetchServiceError_UNSPECIFIED_ERROR        URLFetchServiceError_ErrorCode = 3
+	URLFetchServiceError_RESPONSE_TOO_LARGE       URLFetchServiceError_ErrorCode = 4
+	URLFetchServiceError_DEADLINE_EXCEEDED        URLFetchServiceError_ErrorCode = 5
+	URLFetchServiceError_SSL_CERTIFICATE_ERROR    URLFetchServiceError_ErrorCode = 6
+	URLFetchServiceError_DNS_ERROR                URLFetchServiceError_ErrorCode = 7
+	URLFetchServiceError_CLOSED                   URLFetchServiceError_ErrorCode = 8
+	URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9
+	URLFetchServiceError_TOO_MANY_REDIRECTS       URLFetchServiceError_ErrorCode = 10
+	URLFetchServiceError_MALFORMED_REPLY          URLFetchServiceError_ErrorCode = 11
+	URLFetchServiceError_CONNECTION_ERROR         URLFetchServiceError_ErrorCode = 12
+)
+
+var URLFetchServiceError_ErrorCode_name = map[int32]string{
+	0:  "OK",
+	1:  "INVALID_URL",
+	2:  "FETCH_ERROR",
+	3:  "UNSPECIFIED_ERROR",
+	4:  "RESPONSE_TOO_LARGE",
+	5:  "DEADLINE_EXCEEDED",
+	6:  "SSL_CERTIFICATE_ERROR",
+	7:  "DNS_ERROR",
+	8:  "CLOSED",
+	9:  "INTERNAL_TRANSIENT_ERROR",
+	10: "TOO_MANY_REDIRECTS",
+	11: "MALFORMED_REPLY",
+	12: "CONNECTION_ERROR",
+}
+var URLFetchServiceError_ErrorCode_value = map[string]int32{
+	"OK":                       0,
+	"INVALID_URL":              1,
+	"FETCH_ERROR":              2,
+	"UNSPECIFIED_ERROR":        3,
+	"RESPONSE_TOO_LARGE":       4,
+	"DEADLINE_EXCEEDED":        5,
+	"SSL_CERTIFICATE_ERROR":    6,
+	"DNS_ERROR":                7,
+	"CLOSED":                   8,
+	"INTERNAL_TRANSIENT_ERROR": 9,
+	"TOO_MANY_REDIRECTS":       10,
+	"MALFORMED_REPLY":          11,
+	"CONNECTION_ERROR":         12,
+}
+
+func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode {
+	p := new(URLFetchServiceError_ErrorCode)
+	*p = x
+	return p
+}
+func (x URLFetchServiceError_ErrorCode) String() string {
+	return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x))
+}
+func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode")
+	if err != nil {
+		return err
+	}
+	*x = URLFetchServiceError_ErrorCode(value)
+	return nil
+}
+
+type URLFetchRequest_RequestMethod int32
+
+const (
+	URLFetchRequest_GET    URLFetchRequest_RequestMethod = 1
+	URLFetchRequest_POST   URLFetchRequest_RequestMethod = 2
+	URLFetchRequest_HEAD   URLFetchRequest_RequestMethod = 3
+	URLFetchRequest_PUT    URLFetchRequest_RequestMethod = 4
+	URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5
+	URLFetchRequest_PATCH  URLFetchRequest_RequestMethod = 6
+)
+
+var URLFetchRequest_RequestMethod_name = map[int32]string{
+	1: "GET",
+	2: "POST",
+	3: "HEAD",
+	4: "PUT",
+	5: "DELETE",
+	6: "PATCH",
+}
+var URLFetchRequest_RequestMethod_value = map[string]int32{
+	"GET":    1,
+	"POST":   2,
+	"HEAD":   3,
+	"PUT":    4,
+	"DELETE": 5,
+	"PATCH":  6,
+}
+
+func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod {
+	p := new(URLFetchRequest_RequestMethod)
+	*p = x
+	return p
+}
+func (x URLFetchRequest_RequestMethod) String() string {
+	return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x))
+}
+func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod")
+	if err != nil {
+		return err
+	}
+	*x = URLFetchRequest_RequestMethod(value)
+	return nil
+}
+
+type URLFetchServiceError struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchServiceError) Reset()         { *m = URLFetchServiceError{} }
+func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) }
+func (*URLFetchServiceError) ProtoMessage()    {}
+
+type URLFetchRequest struct {
+	Method                        *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"`
+	Url                           *string                        `protobuf:"bytes,2,req,name=Url" json:"Url,omitempty"`
+	Header                        []*URLFetchRequest_Header      `protobuf:"group,3,rep,name=Header" json:"header,omitempty"`
+	Payload                       []byte                         `protobuf:"bytes,6,opt,name=Payload" json:"Payload,omitempty"`
+	FollowRedirects               *bool                          `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"`
+	Deadline                      *float64                       `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"`
+	MustValidateServerCertificate *bool                          `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"`
+	XXX_unrecognized              []byte                         `json:"-"`
+}
+
+func (m *URLFetchRequest) Reset()         { *m = URLFetchRequest{} }
+func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) }
+func (*URLFetchRequest) ProtoMessage()    {}
+
+const Default_URLFetchRequest_FollowRedirects bool = true
+const Default_URLFetchRequest_MustValidateServerCertificate bool = true
+
+func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod {
+	if m != nil && m.Method != nil {
+		return *m.Method
+	}
+	return URLFetchRequest_GET
+}
+
+func (m *URLFetchRequest) GetUrl() string {
+	if m != nil && m.Url != nil {
+		return *m.Url
+	}
+	return ""
+}
+
+func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *URLFetchRequest) GetPayload() []byte {
+	if m != nil {
+		return m.Payload
+	}
+	return nil
+}
+
+func (m *URLFetchRequest) GetFollowRedirects() bool {
+	if m != nil && m.FollowRedirects != nil {
+		return *m.FollowRedirects
+	}
+	return Default_URLFetchRequest_FollowRedirects
+}
+
+func (m *URLFetchRequest) GetDeadline() float64 {
+	if m != nil && m.Deadline != nil {
+		return *m.Deadline
+	}
+	return 0
+}
+
+func (m *URLFetchRequest) GetMustValidateServerCertificate() bool {
+	if m != nil && m.MustValidateServerCertificate != nil {
+		return *m.MustValidateServerCertificate
+	}
+	return Default_URLFetchRequest_MustValidateServerCertificate
+}
+
+type URLFetchRequest_Header struct {
+	Key              *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
+	Value            *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *URLFetchRequest_Header) Reset()         { *m = URLFetchRequest_Header{} }
+func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) }
+func (*URLFetchRequest_Header) ProtoMessage()    {}
+
+func (m *URLFetchRequest_Header) GetKey() string {
+	if m != nil && m.Key != nil {
+		return *m.Key
+	}
+	return ""
+}
+
+func (m *URLFetchRequest_Header) GetValue() string {
+	if m != nil && m.Value != nil {
+		return *m.Value
+	}
+	return ""
+}
+
+type URLFetchResponse struct {
+	Content               []byte                     `protobuf:"bytes,1,opt,name=Content" json:"Content,omitempty"`
+	StatusCode            *int32                     `protobuf:"varint,2,req,name=StatusCode" json:"StatusCode,omitempty"`
+	Header                []*URLFetchResponse_Header `protobuf:"group,3,rep,name=Header" json:"header,omitempty"`
+	ContentWasTruncated   *bool                      `protobuf:"varint,6,opt,name=ContentWasTruncated,def=0" json:"ContentWasTruncated,omitempty"`
+	ExternalBytesSent     *int64                     `protobuf:"varint,7,opt,name=ExternalBytesSent" json:"ExternalBytesSent,omitempty"`
+	ExternalBytesReceived *int64                     `protobuf:"varint,8,opt,name=ExternalBytesReceived" json:"ExternalBytesReceived,omitempty"`
+	FinalUrl              *string                    `protobuf:"bytes,9,opt,name=FinalUrl" json:"FinalUrl,omitempty"`
+	ApiCpuMilliseconds    *int64                     `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"`
+	ApiBytesSent          *int64                     `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"`
+	ApiBytesReceived      *int64                     `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"`
+	XXX_unrecognized      []byte                     `json:"-"`
+}
+
+func (m *URLFetchResponse) Reset()         { *m = URLFetchResponse{} }
+func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) }
+func (*URLFetchResponse) ProtoMessage()    {}
+
+const Default_URLFetchResponse_ContentWasTruncated bool = false
+const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0
+const Default_URLFetchResponse_ApiBytesSent int64 = 0
+const Default_URLFetchResponse_ApiBytesReceived int64 = 0
+
+func (m *URLFetchResponse) GetContent() []byte {
+	if m != nil {
+		return m.Content
+	}
+	return nil
+}
+
+func (m *URLFetchResponse) GetStatusCode() int32 {
+	if m != nil && m.StatusCode != nil {
+		return *m.StatusCode
+	}
+	return 0
+}
+
+func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *URLFetchResponse) GetContentWasTruncated() bool {
+	if m != nil && m.ContentWasTruncated != nil {
+		return *m.ContentWasTruncated
+	}
+	return Default_URLFetchResponse_ContentWasTruncated
+}
+
+func (m *URLFetchResponse) GetExternalBytesSent() int64 {
+	if m != nil && m.ExternalBytesSent != nil {
+		return *m.ExternalBytesSent
+	}
+	return 0
+}
+
+func (m *URLFetchResponse) GetExternalBytesReceived() int64 {
+	if m != nil && m.ExternalBytesReceived != nil {
+		return *m.ExternalBytesReceived
+	}
+	return 0
+}
+
+func (m *URLFetchResponse) GetFinalUrl() string {
+	if m != nil && m.FinalUrl != nil {
+		return *m.FinalUrl
+	}
+	return ""
+}
+
+func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 {
+	if m != nil && m.ApiCpuMilliseconds != nil {
+		return *m.ApiCpuMilliseconds
+	}
+	return Default_URLFetchResponse_ApiCpuMilliseconds
+}
+
+func (m *URLFetchResponse) GetApiBytesSent() int64 {
+	if m != nil && m.ApiBytesSent != nil {
+		return *m.ApiBytesSent
+	}
+	return Default_URLFetchResponse_ApiBytesSent
+}
+
+func (m *URLFetchResponse) GetApiBytesReceived() int64 {
+	if m != nil && m.ApiBytesReceived != nil {
+		return *m.ApiBytesReceived
+	}
+	return Default_URLFetchResponse_ApiBytesReceived
+}
+
+type URLFetchResponse_Header struct {
+	Key              *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
+	Value            *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *URLFetchResponse_Header) Reset()         { *m = URLFetchResponse_Header{} }
+func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) }
+func (*URLFetchResponse_Header) ProtoMessage()    {}
+
+func (m *URLFetchResponse_Header) GetKey() string {
+	if m != nil && m.Key != nil {
+		return *m.Key
+	}
+	return ""
+}
+
+func (m *URLFetchResponse_Header) GetValue() string {
+	if m != nil && m.Value != nil {
+		return *m.Value
+	}
+	return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
new file mode 100644
index 0000000..f695edf
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
@@ -0,0 +1,64 @@
+syntax = "proto2";
+option go_package = "urlfetch";
+
+package appengine;
+
+message URLFetchServiceError {
+  enum ErrorCode {
+    OK = 0;
+    INVALID_URL = 1;
+    FETCH_ERROR = 2;
+    UNSPECIFIED_ERROR = 3;
+    RESPONSE_TOO_LARGE = 4;
+    DEADLINE_EXCEEDED = 5;
+    SSL_CERTIFICATE_ERROR = 6;
+    DNS_ERROR = 7;
+    CLOSED = 8;
+    INTERNAL_TRANSIENT_ERROR = 9;
+    TOO_MANY_REDIRECTS = 10;
+    MALFORMED_REPLY = 11;
+    CONNECTION_ERROR = 12;
+  }
+}
+
+message URLFetchRequest {
+  enum RequestMethod {
+    GET = 1;
+    POST = 2;
+    HEAD = 3;
+    PUT = 4;
+    DELETE = 5;
+    PATCH = 6;
+  }
+  required RequestMethod Method = 1;
+  required string Url = 2;
+  repeated group Header = 3 {
+    required string Key = 4;
+    required string Value = 5;
+  }
+  optional bytes Payload = 6 [ctype=CORD];
+
+  optional bool FollowRedirects = 7 [default=true];
+
+  optional double Deadline = 8;
+
+  optional bool MustValidateServerCertificate = 9 [default=true];
+}
+
+message URLFetchResponse {
+  optional bytes Content = 1;
+  required int32 StatusCode = 2;
+  repeated group Header = 3 {
+    required string Key = 4;
+    required string Value = 5;
+  }
+  optional bool ContentWasTruncated = 6 [default=false];
+  optional int64 ExternalBytesSent = 7;
+  optional int64 ExternalBytesReceived = 8;
+
+  optional string FinalUrl = 9;
+
+  optional int64 ApiCpuMilliseconds = 10 [default=0];
+  optional int64 ApiBytesSent = 11 [default=0];
+  optional int64 ApiBytesReceived = 12 [default=0];
+}
diff --git a/vendor/google.golang.org/appengine/internal/user/user_service.pb.go b/vendor/google.golang.org/appengine/internal/user/user_service.pb.go
new file mode 100644
index 0000000..6b52ffc
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/user/user_service.pb.go
@@ -0,0 +1,289 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/user/user_service.proto
+// DO NOT EDIT!
+
+/*
+Package user is a generated protocol buffer package.
+
+It is generated from these files:
+	google.golang.org/appengine/internal/user/user_service.proto
+
+It has these top-level messages:
+	UserServiceError
+	CreateLoginURLRequest
+	CreateLoginURLResponse
+	CreateLogoutURLRequest
+	CreateLogoutURLResponse
+	GetOAuthUserRequest
+	GetOAuthUserResponse
+	CheckOAuthSignatureRequest
+	CheckOAuthSignatureResponse
+*/
+package user
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type UserServiceError_ErrorCode int32
+
+const (
+	UserServiceError_OK                    UserServiceError_ErrorCode = 0
+	UserServiceError_REDIRECT_URL_TOO_LONG UserServiceError_ErrorCode = 1
+	UserServiceError_NOT_ALLOWED           UserServiceError_ErrorCode = 2
+	UserServiceError_OAUTH_INVALID_TOKEN   UserServiceError_ErrorCode = 3
+	UserServiceError_OAUTH_INVALID_REQUEST UserServiceError_ErrorCode = 4
+	UserServiceError_OAUTH_ERROR           UserServiceError_ErrorCode = 5
+)
+
+var UserServiceError_ErrorCode_name = map[int32]string{
+	0: "OK",
+	1: "REDIRECT_URL_TOO_LONG",
+	2: "NOT_ALLOWED",
+	3: "OAUTH_INVALID_TOKEN",
+	4: "OAUTH_INVALID_REQUEST",
+	5: "OAUTH_ERROR",
+}
+var UserServiceError_ErrorCode_value = map[string]int32{
+	"OK": 0,
+	"REDIRECT_URL_TOO_LONG": 1,
+	"NOT_ALLOWED":           2,
+	"OAUTH_INVALID_TOKEN":   3,
+	"OAUTH_INVALID_REQUEST": 4,
+	"OAUTH_ERROR":           5,
+}
+
+func (x UserServiceError_ErrorCode) Enum() *UserServiceError_ErrorCode {
+	p := new(UserServiceError_ErrorCode)
+	*p = x
+	return p
+}
+func (x UserServiceError_ErrorCode) String() string {
+	return proto.EnumName(UserServiceError_ErrorCode_name, int32(x))
+}
+func (x *UserServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(UserServiceError_ErrorCode_value, data, "UserServiceError_ErrorCode")
+	if err != nil {
+		return err
+	}
+	*x = UserServiceError_ErrorCode(value)
+	return nil
+}
+
+type UserServiceError struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *UserServiceError) Reset()         { *m = UserServiceError{} }
+func (m *UserServiceError) String() string { return proto.CompactTextString(m) }
+func (*UserServiceError) ProtoMessage()    {}
+
+type CreateLoginURLRequest struct {
+	DestinationUrl    *string `protobuf:"bytes,1,req,name=destination_url" json:"destination_url,omitempty"`
+	AuthDomain        *string `protobuf:"bytes,2,opt,name=auth_domain" json:"auth_domain,omitempty"`
+	FederatedIdentity *string `protobuf:"bytes,3,opt,name=federated_identity,def=" json:"federated_identity,omitempty"`
+	XXX_unrecognized  []byte  `json:"-"`
+}
+
+func (m *CreateLoginURLRequest) Reset()         { *m = CreateLoginURLRequest{} }
+func (m *CreateLoginURLRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateLoginURLRequest) ProtoMessage()    {}
+
+func (m *CreateLoginURLRequest) GetDestinationUrl() string {
+	if m != nil && m.DestinationUrl != nil {
+		return *m.DestinationUrl
+	}
+	return ""
+}
+
+func (m *CreateLoginURLRequest) GetAuthDomain() string {
+	if m != nil && m.AuthDomain != nil {
+		return *m.AuthDomain
+	}
+	return ""
+}
+
+func (m *CreateLoginURLRequest) GetFederatedIdentity() string {
+	if m != nil && m.FederatedIdentity != nil {
+		return *m.FederatedIdentity
+	}
+	return ""
+}
+
+type CreateLoginURLResponse struct {
+	LoginUrl         *string `protobuf:"bytes,1,req,name=login_url" json:"login_url,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *CreateLoginURLResponse) Reset()         { *m = CreateLoginURLResponse{} }
+func (m *CreateLoginURLResponse) String() string { return proto.CompactTextString(m) }
+func (*CreateLoginURLResponse) ProtoMessage()    {}
+
+func (m *CreateLoginURLResponse) GetLoginUrl() string {
+	if m != nil && m.LoginUrl != nil {
+		return *m.LoginUrl
+	}
+	return ""
+}
+
+type CreateLogoutURLRequest struct {
+	DestinationUrl   *string `protobuf:"bytes,1,req,name=destination_url" json:"destination_url,omitempty"`
+	AuthDomain       *string `protobuf:"bytes,2,opt,name=auth_domain" json:"auth_domain,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *CreateLogoutURLRequest) Reset()         { *m = CreateLogoutURLRequest{} }
+func (m *CreateLogoutURLRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateLogoutURLRequest) ProtoMessage()    {}
+
+func (m *CreateLogoutURLRequest) GetDestinationUrl() string {
+	if m != nil && m.DestinationUrl != nil {
+		return *m.DestinationUrl
+	}
+	return ""
+}
+
+func (m *CreateLogoutURLRequest) GetAuthDomain() string {
+	if m != nil && m.AuthDomain != nil {
+		return *m.AuthDomain
+	}
+	return ""
+}
+
+type CreateLogoutURLResponse struct {
+	LogoutUrl        *string `protobuf:"bytes,1,req,name=logout_url" json:"logout_url,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *CreateLogoutURLResponse) Reset()         { *m = CreateLogoutURLResponse{} }
+func (m *CreateLogoutURLResponse) String() string { return proto.CompactTextString(m) }
+func (*CreateLogoutURLResponse) ProtoMessage()    {}
+
+func (m *CreateLogoutURLResponse) GetLogoutUrl() string {
+	if m != nil && m.LogoutUrl != nil {
+		return *m.LogoutUrl
+	}
+	return ""
+}
+
+type GetOAuthUserRequest struct {
+	Scope            *string  `protobuf:"bytes,1,opt,name=scope" json:"scope,omitempty"`
+	Scopes           []string `protobuf:"bytes,2,rep,name=scopes" json:"scopes,omitempty"`
+	XXX_unrecognized []byte   `json:"-"`
+}
+
+func (m *GetOAuthUserRequest) Reset()         { *m = GetOAuthUserRequest{} }
+func (m *GetOAuthUserRequest) String() string { return proto.CompactTextString(m) }
+func (*GetOAuthUserRequest) ProtoMessage()    {}
+
+func (m *GetOAuthUserRequest) GetScope() string {
+	if m != nil && m.Scope != nil {
+		return *m.Scope
+	}
+	return ""
+}
+
+func (m *GetOAuthUserRequest) GetScopes() []string {
+	if m != nil {
+		return m.Scopes
+	}
+	return nil
+}
+
+type GetOAuthUserResponse struct {
+	Email            *string  `protobuf:"bytes,1,req,name=email" json:"email,omitempty"`
+	UserId           *string  `protobuf:"bytes,2,req,name=user_id" json:"user_id,omitempty"`
+	AuthDomain       *string  `protobuf:"bytes,3,req,name=auth_domain" json:"auth_domain,omitempty"`
+	UserOrganization *string  `protobuf:"bytes,4,opt,name=user_organization,def=" json:"user_organization,omitempty"`
+	IsAdmin          *bool    `protobuf:"varint,5,opt,name=is_admin,def=0" json:"is_admin,omitempty"`
+	ClientId         *string  `protobuf:"bytes,6,opt,name=client_id,def=" json:"client_id,omitempty"`
+	Scopes           []string `protobuf:"bytes,7,rep,name=scopes" json:"scopes,omitempty"`
+	XXX_unrecognized []byte   `json:"-"`
+}
+
+func (m *GetOAuthUserResponse) Reset()         { *m = GetOAuthUserResponse{} }
+func (m *GetOAuthUserResponse) String() string { return proto.CompactTextString(m) }
+func (*GetOAuthUserResponse) ProtoMessage()    {}
+
+const Default_GetOAuthUserResponse_IsAdmin bool = false
+
+func (m *GetOAuthUserResponse) GetEmail() string {
+	if m != nil && m.Email != nil {
+		return *m.Email
+	}
+	return ""
+}
+
+func (m *GetOAuthUserResponse) GetUserId() string {
+	if m != nil && m.UserId != nil {
+		return *m.UserId
+	}
+	return ""
+}
+
+func (m *GetOAuthUserResponse) GetAuthDomain() string {
+	if m != nil && m.AuthDomain != nil {
+		return *m.AuthDomain
+	}
+	return ""
+}
+
+func (m *GetOAuthUserResponse) GetUserOrganization() string {
+	if m != nil && m.UserOrganization != nil {
+		return *m.UserOrganization
+	}
+	return ""
+}
+
+func (m *GetOAuthUserResponse) GetIsAdmin() bool {
+	if m != nil && m.IsAdmin != nil {
+		return *m.IsAdmin
+	}
+	return Default_GetOAuthUserResponse_IsAdmin
+}
+
+func (m *GetOAuthUserResponse) GetClientId() string {
+	if m != nil && m.ClientId != nil {
+		return *m.ClientId
+	}
+	return ""
+}
+
+func (m *GetOAuthUserResponse) GetScopes() []string {
+	if m != nil {
+		return m.Scopes
+	}
+	return nil
+}
+
+type CheckOAuthSignatureRequest struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CheckOAuthSignatureRequest) Reset()         { *m = CheckOAuthSignatureRequest{} }
+func (m *CheckOAuthSignatureRequest) String() string { return proto.CompactTextString(m) }
+func (*CheckOAuthSignatureRequest) ProtoMessage()    {}
+
+type CheckOAuthSignatureResponse struct {
+	OauthConsumerKey *string `protobuf:"bytes,1,req,name=oauth_consumer_key" json:"oauth_consumer_key,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *CheckOAuthSignatureResponse) Reset()         { *m = CheckOAuthSignatureResponse{} }
+func (m *CheckOAuthSignatureResponse) String() string { return proto.CompactTextString(m) }
+func (*CheckOAuthSignatureResponse) ProtoMessage()    {}
+
+func (m *CheckOAuthSignatureResponse) GetOauthConsumerKey() string {
+	if m != nil && m.OauthConsumerKey != nil {
+		return *m.OauthConsumerKey
+	}
+	return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/user/user_service.proto b/vendor/google.golang.org/appengine/internal/user/user_service.proto
new file mode 100644
index 0000000..f3e9693
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/user/user_service.proto
@@ -0,0 +1,58 @@
+syntax = "proto2";
+option go_package = "user";
+
+package appengine;
+
+message UserServiceError {
+  enum ErrorCode {
+    OK = 0;
+    REDIRECT_URL_TOO_LONG = 1;
+    NOT_ALLOWED = 2;
+    OAUTH_INVALID_TOKEN = 3;
+    OAUTH_INVALID_REQUEST = 4;
+    OAUTH_ERROR = 5;
+  }
+}
+
+message CreateLoginURLRequest {
+  required string destination_url = 1;
+  optional string auth_domain = 2;
+  optional string federated_identity = 3 [default = ""];
+}
+
+message CreateLoginURLResponse {
+  required string login_url = 1;
+}
+
+message CreateLogoutURLRequest {
+  required string destination_url = 1;
+  optional string auth_domain = 2;
+}
+
+message CreateLogoutURLResponse {
+  required string logout_url = 1;
+}
+
+message GetOAuthUserRequest {
+  optional string scope = 1;
+
+  repeated string scopes = 2;
+}
+
+message GetOAuthUserResponse {
+  required string email = 1;
+  required string user_id = 2;
+  required string auth_domain = 3;
+  optional string user_organization = 4 [default = ""];
+  optional bool is_admin = 5 [default = false];
+  optional string client_id = 6 [default = ""];
+
+  repeated string scopes = 7;
+}
+
+message CheckOAuthSignatureRequest {
+}
+
+message CheckOAuthSignatureResponse {
+  required string oauth_consumer_key = 1;
+}
diff --git a/vendor/google.golang.org/appengine/log/api.go b/vendor/google.golang.org/appengine/log/api.go
new file mode 100644
index 0000000..24d5860
--- /dev/null
+++ b/vendor/google.golang.org/appengine/log/api.go
@@ -0,0 +1,40 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package log
+
+// This file implements the logging API.
+
+import (
+	"golang.org/x/net/context"
+
+	"google.golang.org/appengine/internal"
+)
+
+// Debugf formats its arguments according to the format, analogous to fmt.Printf,
+// and records the text as a log message at Debug level. The message will be associated
+// with the request linked with the provided context.
+func Debugf(ctx context.Context, format string, args ...interface{}) {
+	internal.Logf(ctx, 0, format, args...)
+}
+
+// Infof is like Debugf, but at Info level.
+func Infof(ctx context.Context, format string, args ...interface{}) {
+	internal.Logf(ctx, 1, format, args...)
+}
+
+// Warningf is like Debugf, but at Warning level.
+func Warningf(ctx context.Context, format string, args ...interface{}) {
+	internal.Logf(ctx, 2, format, args...)
+}
+
+// Errorf is like Debugf, but at Error level.
+func Errorf(ctx context.Context, format string, args ...interface{}) {
+	internal.Logf(ctx, 3, format, args...)
+}
+
+// Criticalf is like Debugf, but at Critical level.
+func Criticalf(ctx context.Context, format string, args ...interface{}) {
+	internal.Logf(ctx, 4, format, args...)
+}
diff --git a/vendor/google.golang.org/appengine/log/log.go b/vendor/google.golang.org/appengine/log/log.go
new file mode 100644
index 0000000..cd89e5c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/log/log.go
@@ -0,0 +1,323 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package log provides the means of querying an application's logs from
+within an App Engine application.
+
+Example:
+	c := appengine.NewContext(r)
+	query := &log.Query{
+		AppLogs:  true,
+		Versions: []string{"1"},
+	}
+
+	for results := query.Run(c); ; {
+		record, err := results.Next()
+		if err == log.Done {
+			log.Infof(c, "Done processing results")
+			break
+		}
+		if err != nil {
+			log.Errorf(c, "Failed to retrieve next log: %v", err)
+			break
+		}
+		log.Infof(c, "Saw record %v", record)
+	}
+*/
+package log
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	"golang.org/x/net/context"
+
+	"google.golang.org/appengine"
+	"google.golang.org/appengine/internal"
+	pb "google.golang.org/appengine/internal/log"
+)
+
+// Query defines a logs query.
+type Query struct {
+	// Start time specifies the earliest log to return (inclusive).
+	StartTime time.Time
+
+	// End time specifies the latest log to return (exclusive).
+	EndTime time.Time
+
+	// Offset specifies a position within the log stream to resume reading from,
+	// and should come from a previously returned Record's field of the same name.
+	Offset []byte
+
+	// Incomplete controls whether active (incomplete) requests should be included.
+	Incomplete bool
+
+	// AppLogs indicates if application-level logs should be included.
+	AppLogs bool
+
+	// ApplyMinLevel indicates if MinLevel should be used to filter results.
+	ApplyMinLevel bool
+
+	// If ApplyMinLevel is true, only logs for requests with at least one
+	// application log of MinLevel or higher will be returned.
+	MinLevel int
+
+	// Versions is the major version IDs whose logs should be retrieved.
+	// Logs for specific modules can be retrieved by the specifying versions
+	// in the form "module:version"; the default module is used if no module
+	// is specified.
+	Versions []string
+
+	// A list of requests to search for instead of a time-based scan. Cannot be
+	// combined with filtering options such as StartTime, EndTime, Offset,
+	// Incomplete, ApplyMinLevel, or Versions.
+	RequestIDs []string
+}
+
+// AppLog represents a single application-level log.
+type AppLog struct {
+	Time    time.Time
+	Level   int
+	Message string
+}
+
+// Record contains all the information for a single web request.
+type Record struct {
+	AppID            string
+	ModuleID         string
+	VersionID        string
+	RequestID        []byte
+	IP               string
+	Nickname         string
+	AppEngineRelease string
+
+	// The time when this request started.
+	StartTime time.Time
+
+	// The time when this request finished.
+	EndTime time.Time
+
+	// Opaque cursor into the result stream.
+	Offset []byte
+
+	// The time required to process the request.
+	Latency     time.Duration
+	MCycles     int64
+	Method      string
+	Resource    string
+	HTTPVersion string
+	Status      int32
+
+	// The size of the request sent back to the client, in bytes.
+	ResponseSize int64
+	Referrer     string
+	UserAgent    string
+	URLMapEntry  string
+	Combined     string
+	Host         string
+
+	// The estimated cost of this request, in dollars.
+	Cost              float64
+	TaskQueueName     string
+	TaskName          string
+	WasLoadingRequest bool
+	PendingTime       time.Duration
+	Finished          bool
+	AppLogs           []AppLog
+
+	// Mostly-unique identifier for the instance that handled the request if available.
+	InstanceID string
+}
+
+// Result represents the result of a query.
+type Result struct {
+	logs        []*Record
+	context     context.Context
+	request     *pb.LogReadRequest
+	resultsSeen bool
+	err         error
+}
+
+// Next returns the next log record,
+func (qr *Result) Next() (*Record, error) {
+	if qr.err != nil {
+		return nil, qr.err
+	}
+	if len(qr.logs) > 0 {
+		lr := qr.logs[0]
+		qr.logs = qr.logs[1:]
+		return lr, nil
+	}
+
+	if qr.request.Offset == nil && qr.resultsSeen {
+		return nil, Done
+	}
+
+	if err := qr.run(); err != nil {
+		// Errors here may be retried, so don't store the error.
+		return nil, err
+	}
+
+	return qr.Next()
+}
+
+// Done is returned when a query iteration has completed.
+var Done = errors.New("log: query has no more results")
+
+// protoToAppLogs takes as input an array of pointers to LogLines, the internal
+// Protocol Buffer representation of a single application-level log,
+// and converts it to an array of AppLogs, the external representation
+// of an application-level log.
+func protoToAppLogs(logLines []*pb.LogLine) []AppLog {
+	appLogs := make([]AppLog, len(logLines))
+
+	for i, line := range logLines {
+		appLogs[i] = AppLog{
+			Time:    time.Unix(0, *line.Time*1e3),
+			Level:   int(*line.Level),
+			Message: *line.LogMessage,
+		}
+	}
+
+	return appLogs
+}
+
+// protoToRecord converts a RequestLog, the internal Protocol Buffer
+// representation of a single request-level log, to a Record, its
+// corresponding external representation.
+func protoToRecord(rl *pb.RequestLog) *Record {
+	offset, err := proto.Marshal(rl.Offset)
+	if err != nil {
+		offset = nil
+	}
+	return &Record{
+		AppID:             *rl.AppId,
+		ModuleID:          rl.GetModuleId(),
+		VersionID:         *rl.VersionId,
+		RequestID:         rl.RequestId,
+		Offset:            offset,
+		IP:                *rl.Ip,
+		Nickname:          rl.GetNickname(),
+		AppEngineRelease:  string(rl.GetAppEngineRelease()),
+		StartTime:         time.Unix(0, *rl.StartTime*1e3),
+		EndTime:           time.Unix(0, *rl.EndTime*1e3),
+		Latency:           time.Duration(*rl.Latency) * time.Microsecond,
+		MCycles:           *rl.Mcycles,
+		Method:            *rl.Method,
+		Resource:          *rl.Resource,
+		HTTPVersion:       *rl.HttpVersion,
+		Status:            *rl.Status,
+		ResponseSize:      *rl.ResponseSize,
+		Referrer:          rl.GetReferrer(),
+		UserAgent:         rl.GetUserAgent(),
+		URLMapEntry:       *rl.UrlMapEntry,
+		Combined:          *rl.Combined,
+		Host:              rl.GetHost(),
+		Cost:              rl.GetCost(),
+		TaskQueueName:     rl.GetTaskQueueName(),
+		TaskName:          rl.GetTaskName(),
+		WasLoadingRequest: rl.GetWasLoadingRequest(),
+		PendingTime:       time.Duration(rl.GetPendingTime()) * time.Microsecond,
+		Finished:          rl.GetFinished(),
+		AppLogs:           protoToAppLogs(rl.Line),
+		InstanceID:        string(rl.GetCloneKey()),
+	}
+}
+
+// Run starts a query for log records, which contain request and application
+// level log information.
+func (params *Query) Run(c context.Context) *Result {
+	req, err := makeRequest(params, internal.FullyQualifiedAppID(c), appengine.VersionID(c))
+	return &Result{
+		context: c,
+		request: req,
+		err:     err,
+	}
+}
+
+func makeRequest(params *Query, appID, versionID string) (*pb.LogReadRequest, error) {
+	req := &pb.LogReadRequest{}
+	req.AppId = &appID
+	if !params.StartTime.IsZero() {
+		req.StartTime = proto.Int64(params.StartTime.UnixNano() / 1e3)
+	}
+	if !params.EndTime.IsZero() {
+		req.EndTime = proto.Int64(params.EndTime.UnixNano() / 1e3)
+	}
+	if len(params.Offset) > 0 {
+		var offset pb.LogOffset
+		if err := proto.Unmarshal(params.Offset, &offset); err != nil {
+			return nil, fmt.Errorf("bad Offset: %v", err)
+		}
+		req.Offset = &offset
+	}
+	if params.Incomplete {
+		req.IncludeIncomplete = &params.Incomplete
+	}
+	if params.AppLogs {
+		req.IncludeAppLogs = &params.AppLogs
+	}
+	if params.ApplyMinLevel {
+		req.MinimumLogLevel = proto.Int32(int32(params.MinLevel))
+	}
+	if params.Versions == nil {
+		// If no versions were specified, default to the default module at
+		// the major version being used by this module.
+		if i := strings.Index(versionID, "."); i >= 0 {
+			versionID = versionID[:i]
+		}
+		req.VersionId = []string{versionID}
+	} else {
+		req.ModuleVersion = make([]*pb.LogModuleVersion, 0, len(params.Versions))
+		for _, v := range params.Versions {
+			var m *string
+			if i := strings.Index(v, ":"); i >= 0 {
+				m, v = proto.String(v[:i]), v[i+1:]
+			}
+			req.ModuleVersion = append(req.ModuleVersion, &pb.LogModuleVersion{
+				ModuleId:  m,
+				VersionId: proto.String(v),
+			})
+		}
+	}
+	if params.RequestIDs != nil {
+		ids := make([][]byte, len(params.RequestIDs))
+		for i, v := range params.RequestIDs {
+			ids[i] = []byte(v)
+		}
+		req.RequestId = ids
+	}
+
+	return req, nil
+}
+
+// run takes the query Result produced by a call to Run and updates it with
+// more Records. The updated Result contains a new set of logs as well as an
+// offset to where more logs can be found. We also convert the items in the
+// response from their internal representations to external versions of the
+// same structs.
+func (r *Result) run() error {
+	res := &pb.LogReadResponse{}
+	if err := internal.Call(r.context, "logservice", "Read", r.request, res); err != nil {
+		return err
+	}
+
+	r.logs = make([]*Record, len(res.Log))
+	r.request.Offset = res.Offset
+	r.resultsSeen = true
+
+	for i, log := range res.Log {
+		r.logs[i] = protoToRecord(log)
+	}
+
+	return nil
+}
+
+func init() {
+	internal.RegisterErrorCodeMap("logservice", pb.LogServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/memcache/memcache.go b/vendor/google.golang.org/appengine/memcache/memcache.go
new file mode 100644
index 0000000..5155055
--- /dev/null
+++ b/vendor/google.golang.org/appengine/memcache/memcache.go
@@ -0,0 +1,526 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package memcache provides a client for App Engine's distributed in-memory
+// key-value store for small chunks of arbitrary data.
+//
+// The fundamental operations get and set items, keyed by a string.
+//
+//	item0, err := memcache.Get(c, "key")
+//	if err != nil && err != memcache.ErrCacheMiss {
+//		return err
+//	}
+//	if err == nil {
+//		fmt.Fprintf(w, "memcache hit: Key=%q Val=[% x]\n", item0.Key, item0.Value)
+//	} else {
+//		fmt.Fprintf(w, "memcache miss\n")
+//	}
+//
+// and
+//
+//	item1 := &memcache.Item{
+//		Key:   "foo",
+//		Value: []byte("bar"),
+//	}
+//	if err := memcache.Set(c, item1); err != nil {
+//		return err
+//	}
+package memcache
+
+import (
+	"bytes"
+	"encoding/gob"
+	"encoding/json"
+	"errors"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	"golang.org/x/net/context"
+
+	"google.golang.org/appengine"
+	"google.golang.org/appengine/internal"
+	pb "google.golang.org/appengine/internal/memcache"
+)
+
+var (
+	// ErrCacheMiss means that an operation failed
+	// because the item wasn't present.
+	ErrCacheMiss = errors.New("memcache: cache miss")
+	// ErrCASConflict means that a CompareAndSwap call failed due to the
+	// cached value being modified between the Get and the CompareAndSwap.
+	// If the cached value was simply evicted rather than replaced,
+	// ErrNotStored will be returned instead.
+	ErrCASConflict = errors.New("memcache: compare-and-swap conflict")
+	// ErrNoStats means that no statistics were available.
+	ErrNoStats = errors.New("memcache: no statistics available")
+	// ErrNotStored means that a conditional write operation (i.e. Add or
+	// CompareAndSwap) failed because the condition was not satisfied.
+	ErrNotStored = errors.New("memcache: item not stored")
+	// ErrServerError means that a server error occurred.
+	ErrServerError = errors.New("memcache: server error")
+)
+
+// Item is the unit of memcache gets and sets.
+type Item struct {
+	// Key is the Item's key (250 bytes maximum).
+	Key string
+	// Value is the Item's value.
+	Value []byte
+	// Object is the Item's value for use with a Codec.
+	Object interface{}
+	// Flags are server-opaque flags whose semantics are entirely up to the
+	// App Engine app.
+	Flags uint32
+	// Expiration is the maximum duration that the item will stay
+	// in the cache.
+	// The zero value means the Item has no expiration time.
+	// Subsecond precision is ignored.
+	// This is not set when getting items.
+	Expiration time.Duration
+	// casID is a client-opaque value used for compare-and-swap operations.
+	// Zero means that compare-and-swap is not used.
+	casID uint64
+}
+
+const (
+	secondsIn30Years = 60 * 60 * 24 * 365 * 30 // from memcache server code
+	thirtyYears      = time.Duration(secondsIn30Years) * time.Second
+)
+
+// protoToItem converts a protocol buffer item to a Go struct.
+func protoToItem(p *pb.MemcacheGetResponse_Item) *Item {
+	return &Item{
+		Key:   string(p.Key),
+		Value: p.Value,
+		Flags: p.GetFlags(),
+		casID: p.GetCasId(),
+	}
+}
+
+// If err is an appengine.MultiError, return its first element. Otherwise, return err.
+func singleError(err error) error {
+	if me, ok := err.(appengine.MultiError); ok {
+		return me[0]
+	}
+	return err
+}
+
+// Get gets the item for the given key. ErrCacheMiss is returned for a memcache
+// cache miss. The key must be at most 250 bytes in length.
+func Get(c context.Context, key string) (*Item, error) {
+	m, err := GetMulti(c, []string{key})
+	if err != nil {
+		return nil, err
+	}
+	if _, ok := m[key]; !ok {
+		return nil, ErrCacheMiss
+	}
+	return m[key], nil
+}
+
+// GetMulti is a batch version of Get. The returned map from keys to items may
+// have fewer elements than the input slice, due to memcache cache misses.
+// Each key must be at most 250 bytes in length.
+func GetMulti(c context.Context, key []string) (map[string]*Item, error) {
+	if len(key) == 0 {
+		return nil, nil
+	}
+	keyAsBytes := make([][]byte, len(key))
+	for i, k := range key {
+		keyAsBytes[i] = []byte(k)
+	}
+	req := &pb.MemcacheGetRequest{
+		Key:    keyAsBytes,
+		ForCas: proto.Bool(true),
+	}
+	res := &pb.MemcacheGetResponse{}
+	if err := internal.Call(c, "memcache", "Get", req, res); err != nil {
+		return nil, err
+	}
+	m := make(map[string]*Item, len(res.Item))
+	for _, p := range res.Item {
+		t := protoToItem(p)
+		m[t.Key] = t
+	}
+	return m, nil
+}
+
+// Delete deletes the item for the given key.
+// ErrCacheMiss is returned if the specified item can not be found.
+// The key must be at most 250 bytes in length.
+func Delete(c context.Context, key string) error {
+	return singleError(DeleteMulti(c, []string{key}))
+}
+
+// DeleteMulti is a batch version of Delete.
+// If any keys cannot be found, an appengine.MultiError is returned.
+// Each key must be at most 250 bytes in length.
+func DeleteMulti(c context.Context, key []string) error {
+	if len(key) == 0 {
+		return nil
+	}
+	req := &pb.MemcacheDeleteRequest{
+		Item: make([]*pb.MemcacheDeleteRequest_Item, len(key)),
+	}
+	for i, k := range key {
+		req.Item[i] = &pb.MemcacheDeleteRequest_Item{Key: []byte(k)}
+	}
+	res := &pb.MemcacheDeleteResponse{}
+	if err := internal.Call(c, "memcache", "Delete", req, res); err != nil {
+		return err
+	}
+	if len(res.DeleteStatus) != len(key) {
+		return ErrServerError
+	}
+	me, any := make(appengine.MultiError, len(key)), false
+	for i, s := range res.DeleteStatus {
+		switch s {
+		case pb.MemcacheDeleteResponse_DELETED:
+			// OK
+		case pb.MemcacheDeleteResponse_NOT_FOUND:
+			me[i] = ErrCacheMiss
+			any = true
+		default:
+			me[i] = ErrServerError
+			any = true
+		}
+	}
+	if any {
+		return me
+	}
+	return nil
+}
+
+// Increment atomically increments the decimal value in the given key
+// by delta and returns the new value. The value must fit in a uint64.
+// Overflow wraps around, and underflow is capped to zero. The
+// provided delta may be negative. If the key doesn't exist in
+// memcache, the provided initial value is used to atomically
+// populate it before the delta is applied.
+// The key must be at most 250 bytes in length.
+func Increment(c context.Context, key string, delta int64, initialValue uint64) (newValue uint64, err error) {
+	return incr(c, key, delta, &initialValue)
+}
+
+// IncrementExisting works like Increment but assumes that the key
+// already exists in memcache and doesn't take an initial value.
+// IncrementExisting can save work if calculating the initial value is
+// expensive.
+// An error is returned if the specified item can not be found.
+func IncrementExisting(c context.Context, key string, delta int64) (newValue uint64, err error) {
+	return incr(c, key, delta, nil)
+}
+
+func incr(c context.Context, key string, delta int64, initialValue *uint64) (newValue uint64, err error) {
+	req := &pb.MemcacheIncrementRequest{
+		Key:          []byte(key),
+		InitialValue: initialValue,
+	}
+	if delta >= 0 {
+		req.Delta = proto.Uint64(uint64(delta))
+	} else {
+		req.Delta = proto.Uint64(uint64(-delta))
+		req.Direction = pb.MemcacheIncrementRequest_DECREMENT.Enum()
+	}
+	res := &pb.MemcacheIncrementResponse{}
+	err = internal.Call(c, "memcache", "Increment", req, res)
+	if err != nil {
+		return
+	}
+	if res.NewValue == nil {
+		return 0, ErrCacheMiss
+	}
+	return *res.NewValue, nil
+}
+
+// set sets the given items using the given conflict resolution policy.
+// appengine.MultiError may be returned.
+func set(c context.Context, item []*Item, value [][]byte, policy pb.MemcacheSetRequest_SetPolicy) error {
+	if len(item) == 0 {
+		return nil
+	}
+	req := &pb.MemcacheSetRequest{
+		Item: make([]*pb.MemcacheSetRequest_Item, len(item)),
+	}
+	for i, t := range item {
+		p := &pb.MemcacheSetRequest_Item{
+			Key: []byte(t.Key),
+		}
+		if value == nil {
+			p.Value = t.Value
+		} else {
+			p.Value = value[i]
+		}
+		if t.Flags != 0 {
+			p.Flags = proto.Uint32(t.Flags)
+		}
+		if t.Expiration != 0 {
+			// In the .proto file, MemcacheSetRequest_Item uses a fixed32 (i.e. unsigned)
+			// for expiration time, while MemcacheGetRequest_Item uses int32 (i.e. signed).
+			// Throughout this .go file, we use int32.
+			// Also, in the proto, the expiration value is either a duration (in seconds)
+			// or an absolute Unix timestamp (in seconds), depending on whether the
+			// value is less than or greater than or equal to 30 years, respectively.
+			if t.Expiration < time.Second {
+				// Because an Expiration of 0 means no expiration, we take
+				// care here to translate an item with an expiration
+				// Duration between 0-1 seconds as immediately expiring
+				// (saying it expired a few seconds ago), rather than
+				// rounding it down to 0 and making it live forever.
+				p.ExpirationTime = proto.Uint32(uint32(time.Now().Unix()) - 5)
+			} else if t.Expiration >= thirtyYears {
+				p.ExpirationTime = proto.Uint32(uint32(time.Now().Unix()) + uint32(t.Expiration/time.Second))
+			} else {
+				p.ExpirationTime = proto.Uint32(uint32(t.Expiration / time.Second))
+			}
+		}
+		if t.casID != 0 {
+			p.CasId = proto.Uint64(t.casID)
+			p.ForCas = proto.Bool(true)
+		}
+		p.SetPolicy = policy.Enum()
+		req.Item[i] = p
+	}
+	res := &pb.MemcacheSetResponse{}
+	if err := internal.Call(c, "memcache", "Set", req, res); err != nil {
+		return err
+	}
+	if len(res.SetStatus) != len(item) {
+		return ErrServerError
+	}
+	me, any := make(appengine.MultiError, len(item)), false
+	for i, st := range res.SetStatus {
+		var err error
+		switch st {
+		case pb.MemcacheSetResponse_STORED:
+			// OK
+		case pb.MemcacheSetResponse_NOT_STORED:
+			err = ErrNotStored
+		case pb.MemcacheSetResponse_EXISTS:
+			err = ErrCASConflict
+		default:
+			err = ErrServerError
+		}
+		if err != nil {
+			me[i] = err
+			any = true
+		}
+	}
+	if any {
+		return me
+	}
+	return nil
+}
+
+// Set writes the given item, unconditionally.
+func Set(c context.Context, item *Item) error {
+	return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_SET))
+}
+
+// SetMulti is a batch version of Set.
+// appengine.MultiError may be returned.
+func SetMulti(c context.Context, item []*Item) error {
+	return set(c, item, nil, pb.MemcacheSetRequest_SET)
+}
+
+// Add writes the given item, if no value already exists for its key.
+// ErrNotStored is returned if that condition is not met.
+func Add(c context.Context, item *Item) error {
+	return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_ADD))
+}
+
+// AddMulti is a batch version of Add.
+// appengine.MultiError may be returned.
+func AddMulti(c context.Context, item []*Item) error {
+	return set(c, item, nil, pb.MemcacheSetRequest_ADD)
+}
+
+// CompareAndSwap writes the given item that was previously returned by Get,
+// if the value was neither modified or evicted between the Get and the
+// CompareAndSwap calls. The item's Key should not change between calls but
+// all other item fields may differ.
+// ErrCASConflict is returned if the value was modified in between the calls.
+// ErrNotStored is returned if the value was evicted in between the calls.
+func CompareAndSwap(c context.Context, item *Item) error {
+	return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_CAS))
+}
+
+// CompareAndSwapMulti is a batch version of CompareAndSwap.
+// appengine.MultiError may be returned.
+func CompareAndSwapMulti(c context.Context, item []*Item) error {
+	return set(c, item, nil, pb.MemcacheSetRequest_CAS)
+}
+
+// Codec represents a symmetric pair of functions that implement a codec.
+// Items stored into or retrieved from memcache using a Codec have their
+// values marshaled or unmarshaled.
+//
+// All the methods provided for Codec behave analogously to the package level
+// function with same name.
+type Codec struct {
+	Marshal   func(interface{}) ([]byte, error)
+	Unmarshal func([]byte, interface{}) error
+}
+
+// Get gets the item for the given key and decodes the obtained value into v.
+// ErrCacheMiss is returned for a memcache cache miss.
+// The key must be at most 250 bytes in length.
+func (cd Codec) Get(c context.Context, key string, v interface{}) (*Item, error) {
+	i, err := Get(c, key)
+	if err != nil {
+		return nil, err
+	}
+	if err := cd.Unmarshal(i.Value, v); err != nil {
+		return nil, err
+	}
+	return i, nil
+}
+
+func (cd Codec) set(c context.Context, items []*Item, policy pb.MemcacheSetRequest_SetPolicy) error {
+	var vs [][]byte
+	var me appengine.MultiError
+	for i, item := range items {
+		v, err := cd.Marshal(item.Object)
+		if err != nil {
+			if me == nil {
+				me = make(appengine.MultiError, len(items))
+			}
+			me[i] = err
+			continue
+		}
+		if me == nil {
+			vs = append(vs, v)
+		}
+	}
+	if me != nil {
+		return me
+	}
+
+	return set(c, items, vs, policy)
+}
+
+// Set writes the given item, unconditionally.
+func (cd Codec) Set(c context.Context, item *Item) error {
+	return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_SET))
+}
+
+// SetMulti is a batch version of Set.
+// appengine.MultiError may be returned.
+func (cd Codec) SetMulti(c context.Context, items []*Item) error {
+	return cd.set(c, items, pb.MemcacheSetRequest_SET)
+}
+
+// Add writes the given item, if no value already exists for its key.
+// ErrNotStored is returned if that condition is not met.
+func (cd Codec) Add(c context.Context, item *Item) error {
+	return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_ADD))
+}
+
+// AddMulti is a batch version of Add.
+// appengine.MultiError may be returned.
+func (cd Codec) AddMulti(c context.Context, items []*Item) error {
+	return cd.set(c, items, pb.MemcacheSetRequest_ADD)
+}
+
+// CompareAndSwap writes the given item that was previously returned by Get,
+// if the value was neither modified or evicted between the Get and the
+// CompareAndSwap calls. The item's Key should not change between calls but
+// all other item fields may differ.
+// ErrCASConflict is returned if the value was modified in between the calls.
+// ErrNotStored is returned if the value was evicted in between the calls.
+func (cd Codec) CompareAndSwap(c context.Context, item *Item) error {
+	return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_CAS))
+}
+
+// CompareAndSwapMulti is a batch version of CompareAndSwap.
+// appengine.MultiError may be returned.
+func (cd Codec) CompareAndSwapMulti(c context.Context, items []*Item) error {
+	return cd.set(c, items, pb.MemcacheSetRequest_CAS)
+}
+
+var (
+	// Gob is a Codec that uses the gob package.
+	Gob = Codec{gobMarshal, gobUnmarshal}
+	// JSON is a Codec that uses the json package.
+	JSON = Codec{json.Marshal, json.Unmarshal}
+)
+
+func gobMarshal(v interface{}) ([]byte, error) {
+	var buf bytes.Buffer
+	if err := gob.NewEncoder(&buf).Encode(v); err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), nil
+}
+
+func gobUnmarshal(data []byte, v interface{}) error {
+	return gob.NewDecoder(bytes.NewBuffer(data)).Decode(v)
+}
+
+// Statistics represents a set of statistics about the memcache cache.
+// This may include items that have expired but have not yet been removed from the cache.
+type Statistics struct {
+	Hits     uint64 // Counter of cache hits
+	Misses   uint64 // Counter of cache misses
+	ByteHits uint64 // Counter of bytes transferred for gets
+
+	Items uint64 // Items currently in the cache
+	Bytes uint64 // Size of all items currently in the cache
+
+	Oldest int64 // Age of access of the oldest item, in seconds
+}
+
+// Stats retrieves the current memcache statistics.
+func Stats(c context.Context) (*Statistics, error) {
+	req := &pb.MemcacheStatsRequest{}
+	res := &pb.MemcacheStatsResponse{}
+	if err := internal.Call(c, "memcache", "Stats", req, res); err != nil {
+		return nil, err
+	}
+	if res.Stats == nil {
+		return nil, ErrNoStats
+	}
+	return &Statistics{
+		Hits:     *res.Stats.Hits,
+		Misses:   *res.Stats.Misses,
+		ByteHits: *res.Stats.ByteHits,
+		Items:    *res.Stats.Items,
+		Bytes:    *res.Stats.Bytes,
+		Oldest:   int64(*res.Stats.OldestItemAge),
+	}, nil
+}
+
+// Flush flushes all items from memcache.
+func Flush(c context.Context) error {
+	req := &pb.MemcacheFlushRequest{}
+	res := &pb.MemcacheFlushResponse{}
+	return internal.Call(c, "memcache", "FlushAll", req, res)
+}
+
+func namespaceMod(m proto.Message, namespace string) {
+	switch m := m.(type) {
+	case *pb.MemcacheDeleteRequest:
+		if m.NameSpace == nil {
+			m.NameSpace = &namespace
+		}
+	case *pb.MemcacheGetRequest:
+		if m.NameSpace == nil {
+			m.NameSpace = &namespace
+		}
+	case *pb.MemcacheIncrementRequest:
+		if m.NameSpace == nil {
+			m.NameSpace = &namespace
+		}
+	case *pb.MemcacheSetRequest:
+		if m.NameSpace == nil {
+			m.NameSpace = &namespace
+		}
+		// MemcacheFlushRequest, MemcacheStatsRequest do not apply namespace.
+	}
+}
+
+func init() {
+	internal.RegisterErrorCodeMap("memcache", pb.MemcacheServiceError_ErrorCode_name)
+	internal.NamespaceMods["memcache"] = namespaceMod
+}
diff --git a/vendor/google.golang.org/appengine/search/search.go b/vendor/google.golang.org/appengine/search/search.go
index d7bdad3..3066504 100644
--- a/vendor/google.golang.org/appengine/search/search.go
+++ b/vendor/google.golang.org/appengine/search/search.go
@@ -863,8 +863,9 @@
 		return nil, err
 	}
 	d := &pb.Document{
-		Field:   fieldsProto,
-		OrderId: proto.Int32(int32(time.Since(orderIDEpoch).Seconds())),
+		Field:         fieldsProto,
+		OrderId:       proto.Int32(int32(time.Since(orderIDEpoch).Seconds())),
+		OrderIdSource: pb.Document_DEFAULTED.Enum(),
 	}
 	if meta != nil {
 		if meta.Rank != 0 {
@@ -872,6 +873,7 @@
 				return nil, fmt.Errorf("search: invalid rank %d, must be [0, 2^31)", meta.Rank)
 			}
 			*d.OrderId = int32(meta.Rank)
+			d.OrderIdSource = pb.Document_SUPPLIED.Enum()
 		}
 		if len(meta.Facets) > 0 {
 			facets, err := facetsToProto(meta.Facets)
diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
new file mode 100644
index 0000000..ba3d17c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
@@ -0,0 +1,210 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package urlfetch provides an http.RoundTripper implementation
+// for fetching URLs via App Engine's urlfetch service.
+package urlfetch
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	"golang.org/x/net/context"
+
+	"google.golang.org/appengine/internal"
+	pb "google.golang.org/appengine/internal/urlfetch"
+)
+
+// Transport is an implementation of http.RoundTripper for
+// App Engine. Users should generally create an http.Client using
+// this transport and use the Client rather than using this transport
+// directly.
+type Transport struct {
+	Context context.Context
+
+	// Controls whether the application checks the validity of SSL certificates
+	// over HTTPS connections. A value of false (the default) instructs the
+	// application to send a request to the server only if the certificate is
+	// valid and signed by a trusted certificate authority (CA), and also
+	// includes a hostname that matches the certificate. A value of true
+	// instructs the application to perform no certificate validation.
+	AllowInvalidServerCertificate bool
+}
+
+// Verify statically that *Transport implements http.RoundTripper.
+var _ http.RoundTripper = (*Transport)(nil)
+
+// Client returns an *http.Client using a default urlfetch Transport. This
+// client will have the default deadline of 5 seconds, and will check the
+// validity of SSL certificates.
+//
+// Any deadline of the provided context will be used for requests through this client;
+// if the client does not have a deadline then a 5 second default is used.
+func Client(ctx context.Context) *http.Client {
+	return &http.Client{
+		Transport: &Transport{
+			Context: ctx,
+		},
+	}
+}
+
+type bodyReader struct {
+	content   []byte
+	truncated bool
+	closed    bool
+}
+
+// ErrTruncatedBody is the error returned after the final Read() from a
+// response's Body if the body has been truncated by App Engine's proxy.
+var ErrTruncatedBody = errors.New("urlfetch: truncated body")
+
+func statusCodeToText(code int) string {
+	if t := http.StatusText(code); t != "" {
+		return t
+	}
+	return strconv.Itoa(code)
+}
+
+func (br *bodyReader) Read(p []byte) (n int, err error) {
+	if br.closed {
+		if br.truncated {
+			return 0, ErrTruncatedBody
+		}
+		return 0, io.EOF
+	}
+	n = copy(p, br.content)
+	if n > 0 {
+		br.content = br.content[n:]
+		return
+	}
+	if br.truncated {
+		br.closed = true
+		return 0, ErrTruncatedBody
+	}
+	return 0, io.EOF
+}
+
+func (br *bodyReader) Close() error {
+	br.closed = true
+	br.content = nil
+	return nil
+}
+
+// A map of the URL Fetch-accepted methods that take a request body.
+var methodAcceptsRequestBody = map[string]bool{
+	"POST":  true,
+	"PUT":   true,
+	"PATCH": true,
+}
+
+// urlString returns a valid string given a URL. This function is necessary because
+// the String method of URL doesn't correctly handle URLs with non-empty Opaque values.
+// See http://code.google.com/p/go/issues/detail?id=4860.
+func urlString(u *url.URL) string {
+	if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") {
+		return u.String()
+	}
+	aux := *u
+	aux.Opaque = "//" + aux.Host + aux.Opaque
+	return aux.String()
+}
+
+// RoundTrip issues a single HTTP request and returns its response. Per the
+// http.RoundTripper interface, RoundTrip only returns an error if there
+// was an unsupported request or the URL Fetch proxy fails.
+// Note that HTTP response codes such as 5xx, 403, 404, etc are not
+// errors as far as the transport is concerned and will be returned
+// with err set to nil.
+func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) {
+	methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method]
+	if !ok {
+		return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method)
+	}
+
+	method := pb.URLFetchRequest_RequestMethod(methNum)
+
+	freq := &pb.URLFetchRequest{
+		Method:                        &method,
+		Url:                           proto.String(urlString(req.URL)),
+		FollowRedirects:               proto.Bool(false), // http.Client's responsibility
+		MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate),
+	}
+	if deadline, ok := t.Context.Deadline(); ok {
+		freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds())
+	}
+
+	for k, vals := range req.Header {
+		for _, val := range vals {
+			freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{
+				Key:   proto.String(k),
+				Value: proto.String(val),
+			})
+		}
+	}
+	if methodAcceptsRequestBody[req.Method] && req.Body != nil {
+		// Avoid a []byte copy if req.Body has a Bytes method.
+		switch b := req.Body.(type) {
+		case interface {
+			Bytes() []byte
+		}:
+			freq.Payload = b.Bytes()
+		default:
+			freq.Payload, err = ioutil.ReadAll(req.Body)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+
+	fres := &pb.URLFetchResponse{}
+	if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil {
+		return nil, err
+	}
+
+	res = &http.Response{}
+	res.StatusCode = int(*fres.StatusCode)
+	res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode))
+	res.Header = make(http.Header)
+	res.Request = req
+
+	// Faked:
+	res.ProtoMajor = 1
+	res.ProtoMinor = 1
+	res.Proto = "HTTP/1.1"
+	res.Close = true
+
+	for _, h := range fres.Header {
+		hkey := http.CanonicalHeaderKey(*h.Key)
+		hval := *h.Value
+		if hkey == "Content-Length" {
+			// Will get filled in below for all but HEAD requests.
+			if req.Method == "HEAD" {
+				res.ContentLength, _ = strconv.ParseInt(hval, 10, 64)
+			}
+			continue
+		}
+		res.Header.Add(hkey, hval)
+	}
+
+	if req.Method != "HEAD" {
+		res.ContentLength = int64(len(fres.Content))
+	}
+
+	truncated := fres.GetContentWasTruncated()
+	res.Body = &bodyReader{content: fres.Content, truncated: truncated}
+	return
+}
+
+func init() {
+	internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name)
+	internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED))
+}
diff --git a/vendor/google.golang.org/appengine/user/oauth.go b/vendor/google.golang.org/appengine/user/oauth.go
new file mode 100644
index 0000000..ffad571
--- /dev/null
+++ b/vendor/google.golang.org/appengine/user/oauth.go
@@ -0,0 +1,52 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package user
+
+import (
+	"golang.org/x/net/context"
+
+	"google.golang.org/appengine/internal"
+	pb "google.golang.org/appengine/internal/user"
+)
+
+// CurrentOAuth returns the user associated with the OAuth consumer making this
+// request. If the OAuth consumer did not make a valid OAuth request, or the
+// scopes is non-empty and the current user does not have at least one of the
+// scopes, this method will return an error.
+func CurrentOAuth(c context.Context, scopes ...string) (*User, error) {
+	req := &pb.GetOAuthUserRequest{}
+	if len(scopes) != 1 || scopes[0] != "" {
+		// The signature for this function used to be CurrentOAuth(Context, string).
+		// Ignore the singular "" scope to preserve existing behavior.
+		req.Scopes = scopes
+	}
+
+	res := &pb.GetOAuthUserResponse{}
+
+	err := internal.Call(c, "user", "GetOAuthUser", req, res)
+	if err != nil {
+		return nil, err
+	}
+	return &User{
+		Email:      *res.Email,
+		AuthDomain: *res.AuthDomain,
+		Admin:      res.GetIsAdmin(),
+		ID:         *res.UserId,
+		ClientID:   res.GetClientId(),
+	}, nil
+}
+
+// OAuthConsumerKey returns the OAuth consumer key provided with the current
+// request. This method will return an error if the OAuth request was invalid.
+func OAuthConsumerKey(c context.Context) (string, error) {
+	req := &pb.CheckOAuthSignatureRequest{}
+	res := &pb.CheckOAuthSignatureResponse{}
+
+	err := internal.Call(c, "user", "CheckOAuthSignature", req, res)
+	if err != nil {
+		return "", err
+	}
+	return *res.OauthConsumerKey, err
+}
diff --git a/vendor/google.golang.org/appengine/user/user.go b/vendor/google.golang.org/appengine/user/user.go
new file mode 100644
index 0000000..622b610
--- /dev/null
+++ b/vendor/google.golang.org/appengine/user/user.go
@@ -0,0 +1,84 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package user provides a client for App Engine's user authentication service.
+package user
+
+import (
+	"strings"
+
+	"github.com/golang/protobuf/proto"
+	"golang.org/x/net/context"
+
+	"google.golang.org/appengine/internal"
+	pb "google.golang.org/appengine/internal/user"
+)
+
+// User represents a user of the application.
+type User struct {
+	Email      string
+	AuthDomain string
+	Admin      bool
+
+	// ID is the unique permanent ID of the user.
+	// It is populated if the Email is associated
+	// with a Google account, or empty otherwise.
+	ID string
+
+	// ClientID is the ID of the pre-registered client so its identity can be verified.
+	// See https://developers.google.com/console/help/#generatingoauth2 for more information.
+	ClientID string
+
+	FederatedIdentity string
+	FederatedProvider string
+}
+
+// String returns a displayable name for the user.
+func (u *User) String() string {
+	if u.AuthDomain != "" && strings.HasSuffix(u.Email, "@"+u.AuthDomain) {
+		return u.Email[:len(u.Email)-len("@"+u.AuthDomain)]
+	}
+	if u.FederatedIdentity != "" {
+		return u.FederatedIdentity
+	}
+	return u.Email
+}
+
+// LoginURL returns a URL that, when visited, prompts the user to sign in,
+// then redirects the user to the URL specified by dest.
+func LoginURL(c context.Context, dest string) (string, error) {
+	return LoginURLFederated(c, dest, "")
+}
+
+// LoginURLFederated is like LoginURL but accepts a user's OpenID identifier.
+func LoginURLFederated(c context.Context, dest, identity string) (string, error) {
+	req := &pb.CreateLoginURLRequest{
+		DestinationUrl: proto.String(dest),
+	}
+	if identity != "" {
+		req.FederatedIdentity = proto.String(identity)
+	}
+	res := &pb.CreateLoginURLResponse{}
+	if err := internal.Call(c, "user", "CreateLoginURL", req, res); err != nil {
+		return "", err
+	}
+	return *res.LoginUrl, nil
+}
+
+// LogoutURL returns a URL that, when visited, signs the user out,
+// then redirects the user to the URL specified by dest.
+func LogoutURL(c context.Context, dest string) (string, error) {
+	req := &pb.CreateLogoutURLRequest{
+		DestinationUrl: proto.String(dest),
+	}
+	res := &pb.CreateLogoutURLResponse{}
+	if err := internal.Call(c, "user", "CreateLogoutURL", req, res); err != nil {
+		return "", err
+	}
+	return *res.LogoutUrl, nil
+}
+
+func init() {
+	internal.RegisterErrorCodeMap("user", pb.UserServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/user/user_classic.go b/vendor/google.golang.org/appengine/user/user_classic.go
new file mode 100644
index 0000000..a747ef3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/user/user_classic.go
@@ -0,0 +1,35 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package user
+
+import (
+	"appengine/user"
+
+	"golang.org/x/net/context"
+
+	"google.golang.org/appengine/internal"
+)
+
+func Current(ctx context.Context) *User {
+	u := user.Current(internal.ClassicContextFromContext(ctx))
+	if u == nil {
+		return nil
+	}
+	// Map appengine/user.User to this package's User type.
+	return &User{
+		Email:             u.Email,
+		AuthDomain:        u.AuthDomain,
+		Admin:             u.Admin,
+		ID:                u.ID,
+		FederatedIdentity: u.FederatedIdentity,
+		FederatedProvider: u.FederatedProvider,
+	}
+}
+
+func IsAdmin(ctx context.Context) bool {
+	return user.IsAdmin(internal.ClassicContextFromContext(ctx))
+}
diff --git a/vendor/google.golang.org/appengine/user/user_vm.go b/vendor/google.golang.org/appengine/user/user_vm.go
new file mode 100644
index 0000000..8dc672e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/user/user_vm.go
@@ -0,0 +1,38 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package user
+
+import (
+	"golang.org/x/net/context"
+
+	"google.golang.org/appengine/internal"
+)
+
+// Current returns the currently logged-in user,
+// or nil if the user is not signed in.
+func Current(c context.Context) *User {
+	h := internal.IncomingHeaders(c)
+	u := &User{
+		Email:             h.Get("X-AppEngine-User-Email"),
+		AuthDomain:        h.Get("X-AppEngine-Auth-Domain"),
+		ID:                h.Get("X-AppEngine-User-Id"),
+		Admin:             h.Get("X-AppEngine-User-Is-Admin") == "1",
+		FederatedIdentity: h.Get("X-AppEngine-Federated-Identity"),
+		FederatedProvider: h.Get("X-AppEngine-Federated-Provider"),
+	}
+	if u.Email == "" && u.FederatedIdentity == "" {
+		return nil
+	}
+	return u
+}
+
+// IsAdmin returns true if the current user is signed in and
+// is currently registered as an administrator of the application.
+func IsAdmin(c context.Context) bool {
+	h := internal.IncomingHeaders(c)
+	return h.Get("X-AppEngine-User-Is-Admin") == "1"
+}