gddo-server: vendor with godep

This is an initial vendoring of of gddo-server using godep. Vendoring is needed to allow reproducible builds and deployments.

Change-Id: Ib59dd86f905c9d3196d25813d1d97637276edf6e
Reviewed-on: https://go-review.googlesource.com/30695
Reviewed-by: Andrew Gerrand <adg@golang.org>
diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
new file mode 100644
index 0000000..b79aab2
--- /dev/null
+++ b/Godeps/Godeps.json
@@ -0,0 +1,303 @@
+{
+	"ImportPath": "github.com/golang/gddo",
+	"GoVersion": "go1.7",
+	"GodepVersion": "v74",
+	"Packages": [
+		"github.com/golang/gddo/gddo-server"
+	],
+	"Deps": [
+		{
+			"ImportPath": "cloud.google.com/go/compute/metadata",
+			"Comment": "v0.2.0-10-g5af4269",
+			"Rev": "5af4269f950e91e917bab77f1138139023c868c2"
+		},
+		{
+			"ImportPath": "cloud.google.com/go/internal",
+			"Comment": "v0.2.0-10-g5af4269",
+			"Rev": "5af4269f950e91e917bab77f1138139023c868c2"
+		},
+		{
+			"ImportPath": "cloud.google.com/go/internal/bundler",
+			"Comment": "v0.2.0-10-g5af4269",
+			"Rev": "5af4269f950e91e917bab77f1138139023c868c2"
+		},
+		{
+			"ImportPath": "cloud.google.com/go/logging",
+			"Comment": "v0.2.0-10-g5af4269",
+			"Rev": "5af4269f950e91e917bab77f1138139023c868c2"
+		},
+		{
+			"ImportPath": "cloud.google.com/go/logging/apiv2",
+			"Comment": "v0.2.0-10-g5af4269",
+			"Rev": "5af4269f950e91e917bab77f1138139023c868c2"
+		},
+		{
+			"ImportPath": "cloud.google.com/go/logging/internal",
+			"Comment": "v0.2.0-10-g5af4269",
+			"Rev": "5af4269f950e91e917bab77f1138139023c868c2"
+		},
+		{
+			"ImportPath": "github.com/bradfitz/gomemcache/memcache",
+			"Comment": "release.r60-41-gfb1f79c",
+			"Rev": "fb1f79c6b65acda83063cbc69f6bba1522558bfc"
+		},
+		{
+			"ImportPath": "github.com/garyburd/redigo/internal",
+			"Comment": "v1.0.0-5-gffa8d46",
+			"Rev": "ffa8d46ada782d81cfda81a0fbd9f45ceae448e8"
+		},
+		{
+			"ImportPath": "github.com/garyburd/redigo/redis",
+			"Comment": "v1.0.0-5-gffa8d46",
+			"Rev": "ffa8d46ada782d81cfda81a0fbd9f45ceae448e8"
+		},
+		{
+			"ImportPath": "github.com/golang/protobuf/proto",
+			"Rev": "df1d3ca07d2d07bba352d5b73c4313b4e2a6203e"
+		},
+		{
+			"ImportPath": "github.com/golang/protobuf/ptypes",
+			"Rev": "df1d3ca07d2d07bba352d5b73c4313b4e2a6203e"
+		},
+		{
+			"ImportPath": "github.com/golang/protobuf/ptypes/any",
+			"Rev": "df1d3ca07d2d07bba352d5b73c4313b4e2a6203e"
+		},
+		{
+			"ImportPath": "github.com/golang/protobuf/ptypes/duration",
+			"Rev": "df1d3ca07d2d07bba352d5b73c4313b4e2a6203e"
+		},
+		{
+			"ImportPath": "github.com/golang/protobuf/ptypes/empty",
+			"Rev": "df1d3ca07d2d07bba352d5b73c4313b4e2a6203e"
+		},
+		{
+			"ImportPath": "github.com/golang/protobuf/ptypes/struct",
+			"Rev": "df1d3ca07d2d07bba352d5b73c4313b4e2a6203e"
+		},
+		{
+			"ImportPath": "github.com/golang/protobuf/ptypes/timestamp",
+			"Rev": "df1d3ca07d2d07bba352d5b73c4313b4e2a6203e"
+		},
+		{
+			"ImportPath": "github.com/golang/protobuf/ptypes/wrappers",
+			"Rev": "df1d3ca07d2d07bba352d5b73c4313b4e2a6203e"
+		},
+		{
+			"ImportPath": "github.com/golang/snappy",
+			"Rev": "d9eb7a3d35ec988b8585d4a0068e462c27d28380"
+		},
+		{
+			"ImportPath": "github.com/googleapis/gax-go",
+			"Rev": "ed6ab759ab548d1e6e070f53f9d1105d2d8128b0"
+		},
+		{
+			"ImportPath": "github.com/gregjones/httpcache",
+			"Rev": "413781778738c08fdbb98e1dd65f5abffe8832d0"
+		},
+		{
+			"ImportPath": "github.com/gregjones/httpcache/memcache",
+			"Rev": "413781778738c08fdbb98e1dd65f5abffe8832d0"
+		},
+		{
+			"ImportPath": "golang.org/x/net/context",
+			"Rev": "f4b625ec9b21d620bb5ce57f2dfc3e08ca97fce6"
+		},
+		{
+			"ImportPath": "golang.org/x/net/context/ctxhttp",
+			"Rev": "f4b625ec9b21d620bb5ce57f2dfc3e08ca97fce6"
+		},
+		{
+			"ImportPath": "golang.org/x/net/http2",
+			"Rev": "f4b625ec9b21d620bb5ce57f2dfc3e08ca97fce6"
+		},
+		{
+			"ImportPath": "golang.org/x/net/http2/hpack",
+			"Rev": "f4b625ec9b21d620bb5ce57f2dfc3e08ca97fce6"
+		},
+		{
+			"ImportPath": "golang.org/x/net/idna",
+			"Rev": "f4b625ec9b21d620bb5ce57f2dfc3e08ca97fce6"
+		},
+		{
+			"ImportPath": "golang.org/x/net/internal/timeseries",
+			"Rev": "f4b625ec9b21d620bb5ce57f2dfc3e08ca97fce6"
+		},
+		{
+			"ImportPath": "golang.org/x/net/lex/httplex",
+			"Rev": "f4b625ec9b21d620bb5ce57f2dfc3e08ca97fce6"
+		},
+		{
+			"ImportPath": "golang.org/x/net/trace",
+			"Rev": "f4b625ec9b21d620bb5ce57f2dfc3e08ca97fce6"
+		},
+		{
+			"ImportPath": "golang.org/x/oauth2",
+			"Rev": "1e695b1c8febf17aad3bfa7bf0a819ef94b98ad5"
+		},
+		{
+			"ImportPath": "golang.org/x/oauth2/google",
+			"Rev": "1e695b1c8febf17aad3bfa7bf0a819ef94b98ad5"
+		},
+		{
+			"ImportPath": "golang.org/x/oauth2/internal",
+			"Rev": "1e695b1c8febf17aad3bfa7bf0a819ef94b98ad5"
+		},
+		{
+			"ImportPath": "golang.org/x/oauth2/jws",
+			"Rev": "1e695b1c8febf17aad3bfa7bf0a819ef94b98ad5"
+		},
+		{
+			"ImportPath": "golang.org/x/oauth2/jwt",
+			"Rev": "1e695b1c8febf17aad3bfa7bf0a819ef94b98ad5"
+		},
+		{
+			"ImportPath": "google.golang.org/api/internal",
+			"Rev": "3cf64a039723963488f603d140d0aec154fdcd20"
+		},
+		{
+			"ImportPath": "google.golang.org/api/iterator",
+			"Rev": "3cf64a039723963488f603d140d0aec154fdcd20"
+		},
+		{
+			"ImportPath": "google.golang.org/api/option",
+			"Rev": "3cf64a039723963488f603d140d0aec154fdcd20"
+		},
+		{
+			"ImportPath": "google.golang.org/api/transport",
+			"Rev": "3cf64a039723963488f603d140d0aec154fdcd20"
+		},
+		{
+			"ImportPath": "google.golang.org/appengine",
+			"Comment": "v1.0.0-4-g3f4dbbc",
+			"Rev": "3f4dbbc0ec153a39878fd524ece9f39732bd4998"
+		},
+		{
+			"ImportPath": "google.golang.org/appengine/internal",
+			"Comment": "v1.0.0-4-g3f4dbbc",
+			"Rev": "3f4dbbc0ec153a39878fd524ece9f39732bd4998"
+		},
+		{
+			"ImportPath": "google.golang.org/appengine/internal/app_identity",
+			"Comment": "v1.0.0-4-g3f4dbbc",
+			"Rev": "3f4dbbc0ec153a39878fd524ece9f39732bd4998"
+		},
+		{
+			"ImportPath": "google.golang.org/appengine/internal/base",
+			"Comment": "v1.0.0-4-g3f4dbbc",
+			"Rev": "3f4dbbc0ec153a39878fd524ece9f39732bd4998"
+		},
+		{
+			"ImportPath": "google.golang.org/appengine/internal/datastore",
+			"Comment": "v1.0.0-4-g3f4dbbc",
+			"Rev": "3f4dbbc0ec153a39878fd524ece9f39732bd4998"
+		},
+		{
+			"ImportPath": "google.golang.org/appengine/internal/log",
+			"Comment": "v1.0.0-4-g3f4dbbc",
+			"Rev": "3f4dbbc0ec153a39878fd524ece9f39732bd4998"
+		},
+		{
+			"ImportPath": "google.golang.org/appengine/internal/modules",
+			"Comment": "v1.0.0-4-g3f4dbbc",
+			"Rev": "3f4dbbc0ec153a39878fd524ece9f39732bd4998"
+		},
+		{
+			"ImportPath": "google.golang.org/appengine/internal/remote_api",
+			"Comment": "v1.0.0-4-g3f4dbbc",
+			"Rev": "3f4dbbc0ec153a39878fd524ece9f39732bd4998"
+		},
+		{
+			"ImportPath": "google.golang.org/appengine/internal/search",
+			"Comment": "v1.0.0-4-g3f4dbbc",
+			"Rev": "3f4dbbc0ec153a39878fd524ece9f39732bd4998"
+		},
+		{
+			"ImportPath": "google.golang.org/appengine/search",
+			"Comment": "v1.0.0-4-g3f4dbbc",
+			"Rev": "3f4dbbc0ec153a39878fd524ece9f39732bd4998"
+		},
+		{
+			"ImportPath": "google.golang.org/genproto/googleapis/api/label",
+			"Rev": "9359a8d303c45e3212571b77610f1cefb0c6f3eb"
+		},
+		{
+			"ImportPath": "google.golang.org/genproto/googleapis/api/metric",
+			"Rev": "9359a8d303c45e3212571b77610f1cefb0c6f3eb"
+		},
+		{
+			"ImportPath": "google.golang.org/genproto/googleapis/api/monitoredres",
+			"Rev": "9359a8d303c45e3212571b77610f1cefb0c6f3eb"
+		},
+		{
+			"ImportPath": "google.golang.org/genproto/googleapis/api/serviceconfig",
+			"Rev": "9359a8d303c45e3212571b77610f1cefb0c6f3eb"
+		},
+		{
+			"ImportPath": "google.golang.org/genproto/googleapis/logging/type",
+			"Rev": "9359a8d303c45e3212571b77610f1cefb0c6f3eb"
+		},
+		{
+			"ImportPath": "google.golang.org/genproto/googleapis/logging/v2",
+			"Rev": "9359a8d303c45e3212571b77610f1cefb0c6f3eb"
+		},
+		{
+			"ImportPath": "google.golang.org/genproto/googleapis/rpc/status",
+			"Rev": "9359a8d303c45e3212571b77610f1cefb0c6f3eb"
+		},
+		{
+			"ImportPath": "google.golang.org/genproto/protobuf",
+			"Rev": "9359a8d303c45e3212571b77610f1cefb0c6f3eb"
+		},
+		{
+			"ImportPath": "google.golang.org/grpc",
+			"Comment": "v1.0.2-24-g2131fed",
+			"Rev": "2131fedea9b3fe419b5c06200a674cdbc7bf986d"
+		},
+		{
+			"ImportPath": "google.golang.org/grpc/codes",
+			"Comment": "v1.0.2-24-g2131fed",
+			"Rev": "2131fedea9b3fe419b5c06200a674cdbc7bf986d"
+		},
+		{
+			"ImportPath": "google.golang.org/grpc/credentials",
+			"Comment": "v1.0.2-24-g2131fed",
+			"Rev": "2131fedea9b3fe419b5c06200a674cdbc7bf986d"
+		},
+		{
+			"ImportPath": "google.golang.org/grpc/credentials/oauth",
+			"Comment": "v1.0.2-24-g2131fed",
+			"Rev": "2131fedea9b3fe419b5c06200a674cdbc7bf986d"
+		},
+		{
+			"ImportPath": "google.golang.org/grpc/grpclog",
+			"Comment": "v1.0.2-24-g2131fed",
+			"Rev": "2131fedea9b3fe419b5c06200a674cdbc7bf986d"
+		},
+		{
+			"ImportPath": "google.golang.org/grpc/internal",
+			"Comment": "v1.0.2-24-g2131fed",
+			"Rev": "2131fedea9b3fe419b5c06200a674cdbc7bf986d"
+		},
+		{
+			"ImportPath": "google.golang.org/grpc/metadata",
+			"Comment": "v1.0.2-24-g2131fed",
+			"Rev": "2131fedea9b3fe419b5c06200a674cdbc7bf986d"
+		},
+		{
+			"ImportPath": "google.golang.org/grpc/naming",
+			"Comment": "v1.0.2-24-g2131fed",
+			"Rev": "2131fedea9b3fe419b5c06200a674cdbc7bf986d"
+		},
+		{
+			"ImportPath": "google.golang.org/grpc/peer",
+			"Comment": "v1.0.2-24-g2131fed",
+			"Rev": "2131fedea9b3fe419b5c06200a674cdbc7bf986d"
+		},
+		{
+			"ImportPath": "google.golang.org/grpc/transport",
+			"Comment": "v1.0.2-24-g2131fed",
+			"Rev": "2131fedea9b3fe419b5c06200a674cdbc7bf986d"
+		}
+	]
+}
diff --git a/Godeps/Readme b/Godeps/Readme
new file mode 100644
index 0000000..4cdaa53
--- /dev/null
+++ b/Godeps/Readme
@@ -0,0 +1,5 @@
+This directory tree is generated automatically by godep.
+
+Please do not edit.
+
+See https://github.com/tools/godep for more information.
diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE
new file mode 100644
index 0000000..a4c5efd
--- /dev/null
+++ b/vendor/cloud.google.com/go/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright 2014 Google Inc.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go
new file mode 100644
index 0000000..5c6f3bf
--- /dev/null
+++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go
@@ -0,0 +1,438 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package metadata provides access to Google Compute Engine (GCE)
+// metadata and API service accounts.
+//
+// This package is a wrapper around the GCE metadata service,
+// as documented at https://developers.google.com/compute/docs/metadata.
+package metadata
+
+import (
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"net/url"
+	"os"
+	"runtime"
+	"strings"
+	"sync"
+	"time"
+
+	"golang.org/x/net/context"
+	"golang.org/x/net/context/ctxhttp"
+
+	"cloud.google.com/go/internal"
+)
+
+const (
+	// metadataIP is the documented metadata server IP address.
+	metadataIP = "169.254.169.254"
+
+	// metadataHostEnv is the environment variable specifying the
+	// GCE metadata hostname.  If empty, the default value of
+	// metadataIP ("169.254.169.254") is used instead.
+	// This is variable name is not defined by any spec, as far as
+	// I know; it was made up for the Go package.
+	metadataHostEnv = "GCE_METADATA_HOST"
+)
+
+type cachedValue struct {
+	k    string
+	trim bool
+	mu   sync.Mutex
+	v    string
+}
+
+var (
+	projID  = &cachedValue{k: "project/project-id", trim: true}
+	projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
+	instID  = &cachedValue{k: "instance/id", trim: true}
+)
+
+var (
+	metaClient = &http.Client{
+		Transport: &internal.Transport{
+			Base: &http.Transport{
+				Dial: (&net.Dialer{
+					Timeout:   2 * time.Second,
+					KeepAlive: 30 * time.Second,
+				}).Dial,
+				ResponseHeaderTimeout: 2 * time.Second,
+			},
+		},
+	}
+	subscribeClient = &http.Client{
+		Transport: &internal.Transport{
+			Base: &http.Transport{
+				Dial: (&net.Dialer{
+					Timeout:   2 * time.Second,
+					KeepAlive: 30 * time.Second,
+				}).Dial,
+			},
+		},
+	}
+)
+
+// NotDefinedError is returned when requested metadata is not defined.
+//
+// The underlying string is the suffix after "/computeMetadata/v1/".
+//
+// This error is not returned if the value is defined to be the empty
+// string.
+type NotDefinedError string
+
+func (suffix NotDefinedError) Error() string {
+	return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
+}
+
+// Get returns a value from the metadata service.
+// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
+//
+// If the GCE_METADATA_HOST environment variable is not defined, a default of
+// 169.254.169.254 will be used instead.
+//
+// If the requested metadata is not defined, the returned error will
+// be of type NotDefinedError.
+func Get(suffix string) (string, error) {
+	val, _, err := getETag(metaClient, suffix)
+	return val, err
+}
+
+// getETag returns a value from the metadata service as well as the associated
+// ETag using the provided client. This func is otherwise equivalent to Get.
+func getETag(client *http.Client, suffix string) (value, etag string, err error) {
+	// Using a fixed IP makes it very difficult to spoof the metadata service in
+	// a container, which is an important use-case for local testing of cloud
+	// deployments. To enable spoofing of the metadata service, the environment
+	// variable GCE_METADATA_HOST is first inspected to decide where metadata
+	// requests shall go.
+	host := os.Getenv(metadataHostEnv)
+	if host == "" {
+		// Using 169.254.169.254 instead of "metadata" here because Go
+		// binaries built with the "netgo" tag and without cgo won't
+		// know the search suffix for "metadata" is
+		// ".google.internal", and this IP address is documented as
+		// being stable anyway.
+		host = metadataIP
+	}
+	url := "http://" + host + "/computeMetadata/v1/" + suffix
+	req, _ := http.NewRequest("GET", url, nil)
+	req.Header.Set("Metadata-Flavor", "Google")
+	res, err := client.Do(req)
+	if err != nil {
+		return "", "", err
+	}
+	defer res.Body.Close()
+	if res.StatusCode == http.StatusNotFound {
+		return "", "", NotDefinedError(suffix)
+	}
+	if res.StatusCode != 200 {
+		return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
+	}
+	all, err := ioutil.ReadAll(res.Body)
+	if err != nil {
+		return "", "", err
+	}
+	return string(all), res.Header.Get("Etag"), nil
+}
+
+func getTrimmed(suffix string) (s string, err error) {
+	s, err = Get(suffix)
+	s = strings.TrimSpace(s)
+	return
+}
+
+func (c *cachedValue) get() (v string, err error) {
+	defer c.mu.Unlock()
+	c.mu.Lock()
+	if c.v != "" {
+		return c.v, nil
+	}
+	if c.trim {
+		v, err = getTrimmed(c.k)
+	} else {
+		v, err = Get(c.k)
+	}
+	if err == nil {
+		c.v = v
+	}
+	return
+}
+
+var (
+	onGCEOnce sync.Once
+	onGCE     bool
+)
+
+// OnGCE reports whether this process is running on Google Compute Engine.
+func OnGCE() bool {
+	onGCEOnce.Do(initOnGCE)
+	return onGCE
+}
+
+func initOnGCE() {
+	onGCE = testOnGCE()
+}
+
+func testOnGCE() bool {
+	// The user explicitly said they're on GCE, so trust them.
+	if os.Getenv(metadataHostEnv) != "" {
+		return true
+	}
+
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+
+	resc := make(chan bool, 2)
+
+	// Try two strategies in parallel.
+	// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
+	go func() {
+		res, err := ctxhttp.Get(ctx, metaClient, "http://"+metadataIP)
+		if err != nil {
+			resc <- false
+			return
+		}
+		defer res.Body.Close()
+		resc <- res.Header.Get("Metadata-Flavor") == "Google"
+	}()
+
+	go func() {
+		addrs, err := net.LookupHost("metadata.google.internal")
+		if err != nil || len(addrs) == 0 {
+			resc <- false
+			return
+		}
+		resc <- strsContains(addrs, metadataIP)
+	}()
+
+	tryHarder := systemInfoSuggestsGCE()
+	if tryHarder {
+		res := <-resc
+		if res {
+			// The first strategy succeeded, so let's use it.
+			return true
+		}
+		// Wait for either the DNS or metadata server probe to
+		// contradict the other one and say we are running on
+		// GCE. Give it a lot of time to do so, since the system
+		// info already suggests we're running on a GCE BIOS.
+		timer := time.NewTimer(5 * time.Second)
+		defer timer.Stop()
+		select {
+		case res = <-resc:
+			return res
+		case <-timer.C:
+			// Too slow. Who knows what this system is.
+			return false
+		}
+	}
+
+	// There's no hint from the system info that we're running on
+	// GCE, so use the first probe's result as truth, whether it's
+	// true or false. The goal here is to optimize for speed for
+	// users who are NOT running on GCE. We can't assume that
+	// either a DNS lookup or an HTTP request to a blackholed IP
+	// address is fast. Worst case this should return when the
+	// metaClient's Transport.ResponseHeaderTimeout or
+	// Transport.Dial.Timeout fires (in two seconds).
+	return <-resc
+}
+
+// systemInfoSuggestsGCE reports whether the local system (without
+// doing network requests) suggests that we're running on GCE. If this
+// returns true, testOnGCE tries a bit harder to reach its metadata
+// server.
+func systemInfoSuggestsGCE() bool {
+	if runtime.GOOS != "linux" {
+		// We don't have any non-Linux clues available, at least yet.
+		return false
+	}
+	slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
+	name := strings.TrimSpace(string(slurp))
+	return name == "Google" || name == "Google Compute Engine"
+}
+
+// Subscribe subscribes to a value from the metadata service.
+// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
+// The suffix may contain query parameters.
+//
+// Subscribe calls fn with the latest metadata value indicated by the provided
+// suffix. If the metadata value is deleted, fn is called with the empty string
+// and ok false. Subscribe blocks until fn returns a non-nil error or the value
+// is deleted. Subscribe returns the error value returned from the last call to
+// fn, which may be nil when ok == false.
+func Subscribe(suffix string, fn func(v string, ok bool) error) error {
+	const failedSubscribeSleep = time.Second * 5
+
+	// First check to see if the metadata value exists at all.
+	val, lastETag, err := getETag(subscribeClient, suffix)
+	if err != nil {
+		return err
+	}
+
+	if err := fn(val, true); err != nil {
+		return err
+	}
+
+	ok := true
+	if strings.ContainsRune(suffix, '?') {
+		suffix += "&wait_for_change=true&last_etag="
+	} else {
+		suffix += "?wait_for_change=true&last_etag="
+	}
+	for {
+		val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag))
+		if err != nil {
+			if _, deleted := err.(NotDefinedError); !deleted {
+				time.Sleep(failedSubscribeSleep)
+				continue // Retry on other errors.
+			}
+			ok = false
+		}
+		lastETag = etag
+
+		if err := fn(val, ok); err != nil || !ok {
+			return err
+		}
+	}
+}
+
+// ProjectID returns the current instance's project ID string.
+func ProjectID() (string, error) { return projID.get() }
+
+// NumericProjectID returns the current instance's numeric project ID.
+func NumericProjectID() (string, error) { return projNum.get() }
+
+// InternalIP returns the instance's primary internal IP address.
+func InternalIP() (string, error) {
+	return getTrimmed("instance/network-interfaces/0/ip")
+}
+
+// ExternalIP returns the instance's primary external (public) IP address.
+func ExternalIP() (string, error) {
+	return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
+}
+
+// Hostname returns the instance's hostname. This will be of the form
+// "<instanceID>.c.<projID>.internal".
+func Hostname() (string, error) {
+	return getTrimmed("instance/hostname")
+}
+
+// InstanceTags returns the list of user-defined instance tags,
+// assigned when initially creating a GCE instance.
+func InstanceTags() ([]string, error) {
+	var s []string
+	j, err := Get("instance/tags")
+	if err != nil {
+		return nil, err
+	}
+	if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
+		return nil, err
+	}
+	return s, nil
+}
+
+// InstanceID returns the current VM's numeric instance ID.
+func InstanceID() (string, error) {
+	return instID.get()
+}
+
+// InstanceName returns the current VM's instance ID string.
+func InstanceName() (string, error) {
+	host, err := Hostname()
+	if err != nil {
+		return "", err
+	}
+	return strings.Split(host, ".")[0], nil
+}
+
+// Zone returns the current VM's zone, such as "us-central1-b".
+func Zone() (string, error) {
+	zone, err := getTrimmed("instance/zone")
+	// zone is of the form "projects/<projNum>/zones/<zoneName>".
+	if err != nil {
+		return "", err
+	}
+	return zone[strings.LastIndex(zone, "/")+1:], nil
+}
+
+// InstanceAttributes returns the list of user-defined attributes,
+// assigned when initially creating a GCE VM instance. The value of an
+// attribute can be obtained with InstanceAttributeValue.
+func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
+
+// ProjectAttributes returns the list of user-defined attributes
+// applying to the project as a whole, not just this VM.  The value of
+// an attribute can be obtained with ProjectAttributeValue.
+func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
+
+func lines(suffix string) ([]string, error) {
+	j, err := Get(suffix)
+	if err != nil {
+		return nil, err
+	}
+	s := strings.Split(strings.TrimSpace(j), "\n")
+	for i := range s {
+		s[i] = strings.TrimSpace(s[i])
+	}
+	return s, nil
+}
+
+// InstanceAttributeValue returns the value of the provided VM
+// instance attribute.
+//
+// If the requested attribute is not defined, the returned error will
+// be of type NotDefinedError.
+//
+// InstanceAttributeValue may return ("", nil) if the attribute was
+// defined to be the empty string.
+func InstanceAttributeValue(attr string) (string, error) {
+	return Get("instance/attributes/" + attr)
+}
+
+// ProjectAttributeValue returns the value of the provided
+// project attribute.
+//
+// If the requested attribute is not defined, the returned error will
+// be of type NotDefinedError.
+//
+// ProjectAttributeValue may return ("", nil) if the attribute was
+// defined to be the empty string.
+func ProjectAttributeValue(attr string) (string, error) {
+	return Get("project/attributes/" + attr)
+}
+
+// Scopes returns the service account scopes for the given account.
+// The account may be empty or the string "default" to use the instance's
+// main account.
+func Scopes(serviceAccount string) ([]string, error) {
+	if serviceAccount == "" {
+		serviceAccount = "default"
+	}
+	return lines("instance/service-accounts/" + serviceAccount + "/scopes")
+}
+
+func strsContains(ss []string, s string) bool {
+	for _, v := range ss {
+		if v == s {
+			return true
+		}
+	}
+	return false
+}
diff --git a/vendor/cloud.google.com/go/internal/bundler/bundler.go b/vendor/cloud.google.com/go/internal/bundler/bundler.go
new file mode 100644
index 0000000..7f070ca
--- /dev/null
+++ b/vendor/cloud.google.com/go/internal/bundler/bundler.go
@@ -0,0 +1,257 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package bundler supports bundling (batching) of items. Bundling amortizes an
+// action with fixed costs over multiple items. For example, if an API provides
+// an RPC that accepts a list of items as input, but clients would prefer
+// adding items one at a time, then a Bundler can accept individual items from
+// the client and bundle many of them into a single RPC.
+package bundler
+
+import (
+	"errors"
+	"reflect"
+	"sync"
+	"time"
+)
+
+const (
+	DefaultDelayThreshold       = time.Second
+	DefaultBundleCountThreshold = 10
+	DefaultBundleByteThreshold  = 1e6 // 1M
+	DefaultBufferedByteLimit    = 1e9 // 1G
+)
+
+var (
+	// ErrOverflow indicates that Bundler's stored bytes exceeds its BufferedByteLimit.
+	ErrOverflow = errors.New("bundler reached buffered byte limit")
+
+	// ErrOversizedItem indicates that an item's size exceeds the maximum bundle size.
+	ErrOversizedItem = errors.New("item size exceeds bundle byte limit")
+)
+
+// A Bundler collects items added to it into a bundle until the bundle
+// exceeds a given size, then calls a user-provided function to handle the bundle.
+type Bundler struct {
+	// Starting from the time that the first message is added to a bundle, once
+	// this delay has passed, handle the bundle. The default is DefaultDelayThreshold.
+	DelayThreshold time.Duration
+
+	// Once a bundle has this many items, handle the bundle. Since only one
+	// item at a time is added to a bundle, no bundle will exceed this
+	// threshold, so it also serves as a limit. The default is
+	// DefaultBundleCountThreshold.
+	BundleCountThreshold int
+
+	// Once the number of bytes in current bundle reaches this threshold, handle
+	// the bundle. The default is DefaultBundleByteThreshold. This triggers handling,
+	// but does not cap the total size of a bundle.
+	BundleByteThreshold int
+
+	// The maximum size of a bundle, in bytes. Zero means unlimited.
+	BundleByteLimit int
+
+	// The maximum number of bytes that the Bundler will keep in memory before
+	// returning ErrOverflow. The default is DefaultBufferedByteLimit.
+	BufferedByteLimit int
+
+	handler       func(interface{}) // called to handle a bundle
+	itemSliceZero reflect.Value     // nil (zero value) for slice of items
+	donec         chan struct{}     // closed when the Bundler is closed
+	handlec       chan int          // sent to when a bundle is ready for handling
+	timer         *time.Timer       // implements DelayThreshold
+
+	mu            sync.Mutex
+	bufferedSize  int           // total bytes buffered
+	closedBundles []bundle      // bundles waiting to be handled
+	curBundle     bundle        // incoming items added to this bundle
+	calledc       chan struct{} // closed and re-created after handler is called
+}
+
+type bundle struct {
+	items reflect.Value // slice of item type
+	size  int           // size in bytes of all items
+}
+
+// NewBundler creates a new Bundler. When you are finished with a Bundler, call
+// its Close method.
+//
+// itemExample is a value of the type that will be bundled. For example, if you
+// want to create bundles of *Entry, you could pass &Entry{} for itemExample.
+//
+// handler is a function that will be called on each bundle. If itemExample is
+// of type T, the the argument to handler is of type []T.
+func NewBundler(itemExample interface{}, handler func(interface{})) *Bundler {
+	b := &Bundler{
+		DelayThreshold:       DefaultDelayThreshold,
+		BundleCountThreshold: DefaultBundleCountThreshold,
+		BundleByteThreshold:  DefaultBundleByteThreshold,
+		BufferedByteLimit:    DefaultBufferedByteLimit,
+
+		handler:       handler,
+		itemSliceZero: reflect.Zero(reflect.SliceOf(reflect.TypeOf(itemExample))),
+		donec:         make(chan struct{}),
+		handlec:       make(chan int, 1),
+		calledc:       make(chan struct{}),
+		timer:         time.NewTimer(1000 * time.Hour), // harmless initial timeout
+	}
+	b.curBundle.items = b.itemSliceZero
+	go b.background()
+	return b
+}
+
+// Add adds item to the current bundle. It marks the bundle for handling and
+// starts a new one if any of the thresholds or limits are exceeded.
+//
+// If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then
+// the item can never be handled. Add returns ErrOversizedItem in this case.
+//
+// If adding the item would exceed the maximum memory allowed (Bundler.BufferedByteLimit),
+// Add returns ErrOverflow.
+//
+// Add never blocks.
+func (b *Bundler) Add(item interface{}, size int) error {
+	// If this item exceeds the maximum size of a bundle,
+	// we can never send it.
+	if b.BundleByteLimit > 0 && size > b.BundleByteLimit {
+		return ErrOversizedItem
+	}
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	// If adding this item would exceed our allotted memory
+	// footprint, we can't accept it.
+	if b.bufferedSize+size > b.BufferedByteLimit {
+		return ErrOverflow
+	}
+	// If adding this item to the current bundle would cause it to exceed the
+	// maximum bundle size, close the current bundle and start a new one.
+	if b.BundleByteLimit > 0 && b.curBundle.size+size > b.BundleByteLimit {
+		b.closeAndHandleBundle()
+	}
+	// Add the item.
+	b.curBundle.items = reflect.Append(b.curBundle.items, reflect.ValueOf(item))
+	b.curBundle.size += size
+	b.bufferedSize += size
+	// If this is the first item in the bundle, restart the timer.
+	if b.curBundle.items.Len() == 1 {
+		b.timer.Reset(b.DelayThreshold)
+	}
+	// If the current bundle equals the count threshold, close it.
+	if b.curBundle.items.Len() == b.BundleCountThreshold {
+		b.closeAndHandleBundle()
+	}
+	// If the current bundle equals or exceeds the byte threshold, close it.
+	if b.curBundle.size >= b.BundleByteThreshold {
+		b.closeAndHandleBundle()
+	}
+	return nil
+}
+
+// Flush waits until all items in the Bundler have been handled.
+func (b *Bundler) Flush() {
+	b.mu.Lock()
+	b.closeBundle()
+	// Unconditionally trigger the handling goroutine, to ensure calledc is closed
+	// even if there are no outstanding bundles.
+	select {
+	case b.handlec <- 1:
+	default:
+	}
+	calledc := b.calledc // remember locally, because it may change
+	b.mu.Unlock()
+	<-calledc
+}
+
+// Close calls Flush, then shuts down the Bundler. Close should always be
+// called on a Bundler when it is no longer needed. You must wait for all calls
+// to Add to complete before calling Close. Calling Add concurrently with Close
+// may result in the added items being ignored.
+func (b *Bundler) Close() {
+	b.Flush()
+	b.mu.Lock()
+	b.timer.Stop()
+	b.mu.Unlock()
+	close(b.donec)
+}
+
+func (b *Bundler) closeAndHandleBundle() {
+	if b.closeBundle() {
+		// We have created a closed bundle.
+		// Send to handlec without blocking.
+		select {
+		case b.handlec <- 1:
+		default:
+		}
+	}
+}
+
+// closeBundle finishes the current bundle, adds it to the list of closed
+// bundles and informs the background goroutine that there are bundles ready
+// for processing.
+//
+// This should always be called with b.mu held.
+func (b *Bundler) closeBundle() bool {
+	if b.curBundle.items.Len() == 0 {
+		return false
+	}
+	b.closedBundles = append(b.closedBundles, b.curBundle)
+	b.curBundle.items = b.itemSliceZero
+	b.curBundle.size = 0
+	return true
+}
+
+// background runs in a separate goroutine, waiting for events and handling
+// bundles.
+func (b *Bundler) background() {
+	done := false
+	for {
+		timedOut := false
+		// Wait for something to happen.
+		select {
+		case <-b.handlec:
+		case <-b.donec:
+			done = true
+		case <-b.timer.C:
+			timedOut = true
+		}
+		// Handle closed bundles.
+		b.mu.Lock()
+		if timedOut {
+			b.closeBundle()
+		}
+		buns := b.closedBundles
+		b.closedBundles = nil
+		// Closing calledc means we've sent all bundles. We need
+		// a new channel for the next set of bundles, which may start
+		// accumulating as soon as we release the lock.
+		calledc := b.calledc
+		b.calledc = make(chan struct{})
+		b.mu.Unlock()
+		for i, bun := range buns {
+			b.handler(bun.items.Interface())
+			// Drop the bundle's items, reducing our memory footprint.
+			buns[i].items = reflect.Value{} // buns[i] because bun is a copy
+			// Note immediately that we have more space, so Adds that occur
+			// during this loop will have a chance of succeeding.
+			b.mu.Lock()
+			b.bufferedSize -= bun.size
+			b.mu.Unlock()
+		}
+		// Signal that we've sent all outstanding bundles.
+		close(calledc)
+		if done {
+			break
+		}
+	}
+}
diff --git a/vendor/cloud.google.com/go/internal/cloud.go b/vendor/cloud.google.com/go/internal/cloud.go
new file mode 100644
index 0000000..8e0c8f8
--- /dev/null
+++ b/vendor/cloud.google.com/go/internal/cloud.go
@@ -0,0 +1,64 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package internal provides support for the cloud packages.
+//
+// Users should not import this package directly.
+package internal
+
+import (
+	"fmt"
+	"net/http"
+)
+
+const userAgent = "gcloud-golang/0.1"
+
+// Transport is an http.RoundTripper that appends Google Cloud client's
+// user-agent to the original request's user-agent header.
+type Transport struct {
+	// TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does.
+	// Do User-Agent some other way.
+
+	// Base is the actual http.RoundTripper
+	// requests will use. It must not be nil.
+	Base http.RoundTripper
+}
+
+// RoundTrip appends a user-agent to the existing user-agent
+// header and delegates the request to the base http.RoundTripper.
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+	req = cloneRequest(req)
+	ua := req.Header.Get("User-Agent")
+	if ua == "" {
+		ua = userAgent
+	} else {
+		ua = fmt.Sprintf("%s %s", ua, userAgent)
+	}
+	req.Header.Set("User-Agent", ua)
+	return t.Base.RoundTrip(req)
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func cloneRequest(r *http.Request) *http.Request {
+	// shallow copy of the struct
+	r2 := new(http.Request)
+	*r2 = *r
+	// deep copy of the Header
+	r2.Header = make(http.Header)
+	for k, s := range r.Header {
+		r2.Header[k] = s
+	}
+	return r2
+}
diff --git a/vendor/cloud.google.com/go/logging/apiv2/README.md b/vendor/cloud.google.com/go/logging/apiv2/README.md
new file mode 100644
index 0000000..d2d9a17
--- /dev/null
+++ b/vendor/cloud.google.com/go/logging/apiv2/README.md
@@ -0,0 +1,11 @@
+Auto-generated logging v2 clients
+=================================
+
+This package includes auto-generated clients for the logging v2 API.
+
+Use the handwritten logging client (in the parent directory,
+cloud.google.com/go/logging) in preference to this.
+
+This code is EXPERIMENTAL and subject to CHANGE AT ANY TIME.
+
+
diff --git a/vendor/cloud.google.com/go/logging/apiv2/config_client.go b/vendor/cloud.google.com/go/logging/apiv2/config_client.go
new file mode 100644
index 0000000..80c6edc
--- /dev/null
+++ b/vendor/cloud.google.com/go/logging/apiv2/config_client.go
@@ -0,0 +1,281 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+package logging
+
+import (
+	"fmt"
+	"math"
+	"runtime"
+	"time"
+
+	gax "github.com/googleapis/gax-go"
+	"golang.org/x/net/context"
+	"google.golang.org/api/iterator"
+	"google.golang.org/api/option"
+	"google.golang.org/api/transport"
+	loggingpb "google.golang.org/genproto/googleapis/logging/v2"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+)
+
+var (
+	configParentPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
+	configSinkPathTemplate   = gax.MustCompilePathTemplate("projects/{project}/sinks/{sink}")
+)
+
+// ConfigCallOptions contains the retry settings for each method of ConfigClient.
+type ConfigCallOptions struct {
+	ListSinks  []gax.CallOption
+	GetSink    []gax.CallOption
+	CreateSink []gax.CallOption
+	UpdateSink []gax.CallOption
+	DeleteSink []gax.CallOption
+}
+
+func defaultConfigClientOptions() []option.ClientOption {
+	return []option.ClientOption{
+		option.WithEndpoint("logging.googleapis.com:443"),
+		option.WithScopes(
+			"https://www.googleapis.com/auth/cloud-platform",
+			"https://www.googleapis.com/auth/cloud-platform.read-only",
+			"https://www.googleapis.com/auth/logging.admin",
+			"https://www.googleapis.com/auth/logging.read",
+			"https://www.googleapis.com/auth/logging.write",
+		),
+	}
+}
+
+func defaultConfigCallOptions() *ConfigCallOptions {
+	retry := map[[2]string][]gax.CallOption{
+		{"default", "idempotent"}: {
+			gax.WithRetry(func() gax.Retryer {
+				return gax.OnCodes([]codes.Code{
+					codes.DeadlineExceeded,
+					codes.Unavailable,
+				}, gax.Backoff{
+					Initial:    100 * time.Millisecond,
+					Max:        1000 * time.Millisecond,
+					Multiplier: 1.2,
+				})
+			}),
+		},
+	}
+	return &ConfigCallOptions{
+		ListSinks:  retry[[2]string{"default", "idempotent"}],
+		GetSink:    retry[[2]string{"default", "idempotent"}],
+		CreateSink: retry[[2]string{"default", "non_idempotent"}],
+		UpdateSink: retry[[2]string{"default", "non_idempotent"}],
+		DeleteSink: retry[[2]string{"default", "idempotent"}],
+	}
+}
+
+// ConfigClient is a client for interacting with Stackdriver Logging API.
+type ConfigClient struct {
+	// The connection to the service.
+	conn *grpc.ClientConn
+
+	// The gRPC API client.
+	configClient loggingpb.ConfigServiceV2Client
+
+	// The call options for this service.
+	CallOptions *ConfigCallOptions
+
+	// The metadata to be sent with each request.
+	metadata map[string][]string
+}
+
+// NewConfigClient creates a new config service v2 client.
+//
+// Service for configuring sinks used to export log entries outside Stackdriver
+// Logging.
+func NewConfigClient(ctx context.Context, opts ...option.ClientOption) (*ConfigClient, error) {
+	conn, err := transport.DialGRPC(ctx, append(defaultConfigClientOptions(), opts...)...)
+	if err != nil {
+		return nil, err
+	}
+	c := &ConfigClient{
+		conn:        conn,
+		CallOptions: defaultConfigCallOptions(),
+
+		configClient: loggingpb.NewConfigServiceV2Client(conn),
+	}
+	c.SetGoogleClientInfo("gax", gax.Version)
+	return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *ConfigClient) Connection() *grpc.ClientConn {
+	return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *ConfigClient) Close() error {
+	return c.conn.Close()
+}
+
+// SetGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *ConfigClient) SetGoogleClientInfo(name, version string) {
+	c.metadata = map[string][]string{
+		"x-goog-api-client": {fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, runtime.Version())},
+	}
+}
+
+// ConfigParentPath returns the path for the parent resource.
+func ConfigParentPath(project string) string {
+	path, err := configParentPathTemplate.Render(map[string]string{
+		"project": project,
+	})
+	if err != nil {
+		panic(err)
+	}
+	return path
+}
+
+// ConfigSinkPath returns the path for the sink resource.
+func ConfigSinkPath(project, sink string) string {
+	path, err := configSinkPathTemplate.Render(map[string]string{
+		"project": project,
+		"sink":    sink,
+	})
+	if err != nil {
+		panic(err)
+	}
+	return path
+}
+
+// ListSinks lists sinks.
+func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRequest) *LogSinkIterator {
+	md, _ := metadata.FromContext(ctx)
+	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+	it := &LogSinkIterator{}
+
+	fetch := func(pageSize int, pageToken string) (string, error) {
+		var resp *loggingpb.ListSinksResponse
+		req.PageToken = pageToken
+		if pageSize > math.MaxInt32 {
+			req.PageSize = math.MaxInt32
+		} else {
+			req.PageSize = int32(pageSize)
+		}
+		err := gax.Invoke(ctx, func(ctx context.Context) error {
+			var err error
+			resp, err = c.configClient.ListSinks(ctx, req)
+			return err
+		}, c.CallOptions.ListSinks...)
+		if err != nil {
+			return "", err
+		}
+		it.items = append(it.items, resp.Sinks...)
+		return resp.NextPageToken, nil
+	}
+	bufLen := func() int { return len(it.items) }
+	takeBuf := func() interface{} {
+		b := it.items
+		it.items = nil
+		return b
+	}
+
+	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, bufLen, takeBuf)
+	return it
+}
+
+// GetSink gets a sink.
+func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkRequest) (*loggingpb.LogSink, error) {
+	md, _ := metadata.FromContext(ctx)
+	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+	var resp *loggingpb.LogSink
+	err := gax.Invoke(ctx, func(ctx context.Context) error {
+		var err error
+		resp, err = c.configClient.GetSink(ctx, req)
+		return err
+	}, c.CallOptions.GetSink...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// CreateSink creates a sink.
+func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSinkRequest) (*loggingpb.LogSink, error) {
+	md, _ := metadata.FromContext(ctx)
+	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+	var resp *loggingpb.LogSink
+	err := gax.Invoke(ctx, func(ctx context.Context) error {
+		var err error
+		resp, err = c.configClient.CreateSink(ctx, req)
+		return err
+	}, c.CallOptions.CreateSink...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// UpdateSink updates or creates a sink.
+func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSinkRequest) (*loggingpb.LogSink, error) {
+	md, _ := metadata.FromContext(ctx)
+	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+	var resp *loggingpb.LogSink
+	err := gax.Invoke(ctx, func(ctx context.Context) error {
+		var err error
+		resp, err = c.configClient.UpdateSink(ctx, req)
+		return err
+	}, c.CallOptions.UpdateSink...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// DeleteSink deletes a sink.
+func (c *ConfigClient) DeleteSink(ctx context.Context, req *loggingpb.DeleteSinkRequest) error {
+	md, _ := metadata.FromContext(ctx)
+	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+	err := gax.Invoke(ctx, func(ctx context.Context) error {
+		var err error
+		_, err = c.configClient.DeleteSink(ctx, req)
+		return err
+	}, c.CallOptions.DeleteSink...)
+	return err
+}
+
+// LogSinkIterator manages a stream of *loggingpb.LogSink.
+type LogSinkIterator struct {
+	items    []*loggingpb.LogSink
+	pageInfo *iterator.PageInfo
+	nextFunc func() error
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *LogSinkIterator) PageInfo() *iterator.PageInfo {
+	return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *LogSinkIterator) Next() (*loggingpb.LogSink, error) {
+	if err := it.nextFunc(); err != nil {
+		return nil, err
+	}
+	item := it.items[0]
+	it.items = it.items[1:]
+	return item, nil
+}
diff --git a/vendor/cloud.google.com/go/logging/apiv2/doc.go b/vendor/cloud.google.com/go/logging/apiv2/doc.go
new file mode 100644
index 0000000..8b76719
--- /dev/null
+++ b/vendor/cloud.google.com/go/logging/apiv2/doc.go
@@ -0,0 +1,24 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+// Package logging is an experimental, auto-generated package for the
+// logging API.
+//
+// The Google Cloud Logging API lets you write log entries and manage your
+// logs, log sinks and logs-based metrics.
+package logging
+
+const gapicNameVersion = "gapic/0.1.0"
diff --git a/vendor/cloud.google.com/go/logging/apiv2/logging_client.go b/vendor/cloud.google.com/go/logging/apiv2/logging_client.go
new file mode 100644
index 0000000..add8683
--- /dev/null
+++ b/vendor/cloud.google.com/go/logging/apiv2/logging_client.go
@@ -0,0 +1,322 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+package logging
+
+import (
+	"fmt"
+	"math"
+	"runtime"
+	"time"
+
+	gax "github.com/googleapis/gax-go"
+	"golang.org/x/net/context"
+	"google.golang.org/api/iterator"
+	"google.golang.org/api/option"
+	"google.golang.org/api/transport"
+	monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+	loggingpb "google.golang.org/genproto/googleapis/logging/v2"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+)
+
+var (
+	loggingParentPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
+	loggingLogPathTemplate    = gax.MustCompilePathTemplate("projects/{project}/logs/{log}")
+)
+
+// CallOptions contains the retry settings for each method of Client.
+type CallOptions struct {
+	DeleteLog                        []gax.CallOption
+	WriteLogEntries                  []gax.CallOption
+	ListLogEntries                   []gax.CallOption
+	ListMonitoredResourceDescriptors []gax.CallOption
+}
+
+func defaultClientOptions() []option.ClientOption {
+	return []option.ClientOption{
+		option.WithEndpoint("logging.googleapis.com:443"),
+		option.WithScopes(
+			"https://www.googleapis.com/auth/cloud-platform",
+			"https://www.googleapis.com/auth/cloud-platform.read-only",
+			"https://www.googleapis.com/auth/logging.admin",
+			"https://www.googleapis.com/auth/logging.read",
+			"https://www.googleapis.com/auth/logging.write",
+		),
+	}
+}
+
+func defaultCallOptions() *CallOptions {
+	retry := map[[2]string][]gax.CallOption{
+		{"default", "idempotent"}: {
+			gax.WithRetry(func() gax.Retryer {
+				return gax.OnCodes([]codes.Code{
+					codes.DeadlineExceeded,
+					codes.Unavailable,
+				}, gax.Backoff{
+					Initial:    100 * time.Millisecond,
+					Max:        1000 * time.Millisecond,
+					Multiplier: 1.2,
+				})
+			}),
+		},
+		{"list", "idempotent"}: {
+			gax.WithRetry(func() gax.Retryer {
+				return gax.OnCodes([]codes.Code{
+					codes.DeadlineExceeded,
+					codes.Unavailable,
+				}, gax.Backoff{
+					Initial:    100 * time.Millisecond,
+					Max:        1000 * time.Millisecond,
+					Multiplier: 1.2,
+				})
+			}),
+		},
+	}
+	return &CallOptions{
+		DeleteLog:                        retry[[2]string{"default", "idempotent"}],
+		WriteLogEntries:                  retry[[2]string{"default", "non_idempotent"}],
+		ListLogEntries:                   retry[[2]string{"list", "idempotent"}],
+		ListMonitoredResourceDescriptors: retry[[2]string{"default", "idempotent"}],
+	}
+}
+
+// Client is a client for interacting with Stackdriver Logging API.
+type Client struct {
+	// The connection to the service.
+	conn *grpc.ClientConn
+
+	// The gRPC API client.
+	client loggingpb.LoggingServiceV2Client
+
+	// The call options for this service.
+	CallOptions *CallOptions
+
+	// The metadata to be sent with each request.
+	metadata map[string][]string
+}
+
+// NewClient creates a new logging service v2 client.
+//
+// Service for ingesting and querying logs.
+func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
+	conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
+	if err != nil {
+		return nil, err
+	}
+	c := &Client{
+		conn:        conn,
+		CallOptions: defaultCallOptions(),
+
+		client: loggingpb.NewLoggingServiceV2Client(conn),
+	}
+	c.SetGoogleClientInfo("gax", gax.Version)
+	return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *Client) Connection() *grpc.ClientConn {
+	return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *Client) Close() error {
+	return c.conn.Close()
+}
+
+// SetGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *Client) SetGoogleClientInfo(name, version string) {
+	c.metadata = map[string][]string{
+		"x-goog-api-client": {fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, runtime.Version())},
+	}
+}
+
+// LoggingParentPath returns the path for the parent resource.
+func LoggingParentPath(project string) string {
+	path, err := loggingParentPathTemplate.Render(map[string]string{
+		"project": project,
+	})
+	if err != nil {
+		panic(err)
+	}
+	return path
+}
+
+// LoggingLogPath returns the path for the log resource.
+func LoggingLogPath(project, log string) string {
+	path, err := loggingLogPathTemplate.Render(map[string]string{
+		"project": project,
+		"log":     log,
+	})
+	if err != nil {
+		panic(err)
+	}
+	return path
+}
+
+// DeleteLog deletes a log and all its log entries.
+// The log will reappear if it receives new entries.
+func (c *Client) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest) error {
+	md, _ := metadata.FromContext(ctx)
+	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+	err := gax.Invoke(ctx, func(ctx context.Context) error {
+		var err error
+		_, err = c.client.DeleteLog(ctx, req)
+		return err
+	}, c.CallOptions.DeleteLog...)
+	return err
+}
+
+// WriteLogEntries writes log entries to Stackdriver Logging.  All log entries are
+// written by this method.
+func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEntriesRequest) (*loggingpb.WriteLogEntriesResponse, error) {
+	md, _ := metadata.FromContext(ctx)
+	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+	var resp *loggingpb.WriteLogEntriesResponse
+	err := gax.Invoke(ctx, func(ctx context.Context) error {
+		var err error
+		resp, err = c.client.WriteLogEntries(ctx, req)
+		return err
+	}, c.CallOptions.WriteLogEntries...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// ListLogEntries lists log entries.  Use this method to retrieve log entries from Cloud
+// Logging.  For ways to export log entries, see
+// [Exporting Logs](/logging/docs/export).
+func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntriesRequest) *LogEntryIterator {
+	md, _ := metadata.FromContext(ctx)
+	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+	it := &LogEntryIterator{}
+
+	fetch := func(pageSize int, pageToken string) (string, error) {
+		var resp *loggingpb.ListLogEntriesResponse
+		req.PageToken = pageToken
+		if pageSize > math.MaxInt32 {
+			req.PageSize = math.MaxInt32
+		} else {
+			req.PageSize = int32(pageSize)
+		}
+		err := gax.Invoke(ctx, func(ctx context.Context) error {
+			var err error
+			resp, err = c.client.ListLogEntries(ctx, req)
+			return err
+		}, c.CallOptions.ListLogEntries...)
+		if err != nil {
+			return "", err
+		}
+		it.items = append(it.items, resp.Entries...)
+		return resp.NextPageToken, nil
+	}
+	bufLen := func() int { return len(it.items) }
+	takeBuf := func() interface{} {
+		b := it.items
+		it.items = nil
+		return b
+	}
+
+	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, bufLen, takeBuf)
+	return it
+}
+
+// ListMonitoredResourceDescriptors lists the monitored resource descriptors used by Stackdriver Logging.
+func (c *Client) ListMonitoredResourceDescriptors(ctx context.Context, req *loggingpb.ListMonitoredResourceDescriptorsRequest) *MonitoredResourceDescriptorIterator {
+	md, _ := metadata.FromContext(ctx)
+	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+	it := &MonitoredResourceDescriptorIterator{}
+
+	fetch := func(pageSize int, pageToken string) (string, error) {
+		var resp *loggingpb.ListMonitoredResourceDescriptorsResponse
+		req.PageToken = pageToken
+		if pageSize > math.MaxInt32 {
+			req.PageSize = math.MaxInt32
+		} else {
+			req.PageSize = int32(pageSize)
+		}
+		err := gax.Invoke(ctx, func(ctx context.Context) error {
+			var err error
+			resp, err = c.client.ListMonitoredResourceDescriptors(ctx, req)
+			return err
+		}, c.CallOptions.ListMonitoredResourceDescriptors...)
+		if err != nil {
+			return "", err
+		}
+		it.items = append(it.items, resp.ResourceDescriptors...)
+		return resp.NextPageToken, nil
+	}
+	bufLen := func() int { return len(it.items) }
+	takeBuf := func() interface{} {
+		b := it.items
+		it.items = nil
+		return b
+	}
+
+	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, bufLen, takeBuf)
+	return it
+}
+
+// LogEntryIterator manages a stream of *loggingpb.LogEntry.
+type LogEntryIterator struct {
+	items    []*loggingpb.LogEntry
+	pageInfo *iterator.PageInfo
+	nextFunc func() error
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *LogEntryIterator) PageInfo() *iterator.PageInfo {
+	return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *LogEntryIterator) Next() (*loggingpb.LogEntry, error) {
+	if err := it.nextFunc(); err != nil {
+		return nil, err
+	}
+	item := it.items[0]
+	it.items = it.items[1:]
+	return item, nil
+}
+
+// MonitoredResourceDescriptorIterator manages a stream of *monitoredrespb.MonitoredResourceDescriptor.
+type MonitoredResourceDescriptorIterator struct {
+	items    []*monitoredrespb.MonitoredResourceDescriptor
+	pageInfo *iterator.PageInfo
+	nextFunc func() error
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *MonitoredResourceDescriptorIterator) PageInfo() *iterator.PageInfo {
+	return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *MonitoredResourceDescriptorIterator) Next() (*monitoredrespb.MonitoredResourceDescriptor, error) {
+	if err := it.nextFunc(); err != nil {
+		return nil, err
+	}
+	item := it.items[0]
+	it.items = it.items[1:]
+	return item, nil
+}
diff --git a/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go b/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go
new file mode 100644
index 0000000..6c972ea
--- /dev/null
+++ b/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go
@@ -0,0 +1,280 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+package logging
+
+import (
+	"fmt"
+	"math"
+	"runtime"
+	"time"
+
+	gax "github.com/googleapis/gax-go"
+	"golang.org/x/net/context"
+	"google.golang.org/api/iterator"
+	"google.golang.org/api/option"
+	"google.golang.org/api/transport"
+	loggingpb "google.golang.org/genproto/googleapis/logging/v2"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+)
+
+var (
+	metricsParentPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
+	metricsMetricPathTemplate = gax.MustCompilePathTemplate("projects/{project}/metrics/{metric}")
+)
+
+// MetricsCallOptions contains the retry settings for each method of MetricsClient.
+type MetricsCallOptions struct {
+	ListLogMetrics  []gax.CallOption
+	GetLogMetric    []gax.CallOption
+	CreateLogMetric []gax.CallOption
+	UpdateLogMetric []gax.CallOption
+	DeleteLogMetric []gax.CallOption
+}
+
+func defaultMetricsClientOptions() []option.ClientOption {
+	return []option.ClientOption{
+		option.WithEndpoint("logging.googleapis.com:443"),
+		option.WithScopes(
+			"https://www.googleapis.com/auth/cloud-platform",
+			"https://www.googleapis.com/auth/cloud-platform.read-only",
+			"https://www.googleapis.com/auth/logging.admin",
+			"https://www.googleapis.com/auth/logging.read",
+			"https://www.googleapis.com/auth/logging.write",
+		),
+	}
+}
+
+func defaultMetricsCallOptions() *MetricsCallOptions {
+	retry := map[[2]string][]gax.CallOption{
+		{"default", "idempotent"}: {
+			gax.WithRetry(func() gax.Retryer {
+				return gax.OnCodes([]codes.Code{
+					codes.DeadlineExceeded,
+					codes.Unavailable,
+				}, gax.Backoff{
+					Initial:    100 * time.Millisecond,
+					Max:        1000 * time.Millisecond,
+					Multiplier: 1.2,
+				})
+			}),
+		},
+	}
+	return &MetricsCallOptions{
+		ListLogMetrics:  retry[[2]string{"default", "idempotent"}],
+		GetLogMetric:    retry[[2]string{"default", "idempotent"}],
+		CreateLogMetric: retry[[2]string{"default", "non_idempotent"}],
+		UpdateLogMetric: retry[[2]string{"default", "non_idempotent"}],
+		DeleteLogMetric: retry[[2]string{"default", "idempotent"}],
+	}
+}
+
+// MetricsClient is a client for interacting with Stackdriver Logging API.
+type MetricsClient struct {
+	// The connection to the service.
+	conn *grpc.ClientConn
+
+	// The gRPC API client.
+	metricsClient loggingpb.MetricsServiceV2Client
+
+	// The call options for this service.
+	CallOptions *MetricsCallOptions
+
+	// The metadata to be sent with each request.
+	metadata map[string][]string
+}
+
+// NewMetricsClient creates a new metrics service v2 client.
+//
+// Service for configuring logs-based metrics.
+func NewMetricsClient(ctx context.Context, opts ...option.ClientOption) (*MetricsClient, error) {
+	conn, err := transport.DialGRPC(ctx, append(defaultMetricsClientOptions(), opts...)...)
+	if err != nil {
+		return nil, err
+	}
+	c := &MetricsClient{
+		conn:        conn,
+		CallOptions: defaultMetricsCallOptions(),
+
+		metricsClient: loggingpb.NewMetricsServiceV2Client(conn),
+	}
+	c.SetGoogleClientInfo("gax", gax.Version)
+	return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *MetricsClient) Connection() *grpc.ClientConn {
+	return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *MetricsClient) Close() error {
+	return c.conn.Close()
+}
+
+// SetGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *MetricsClient) SetGoogleClientInfo(name, version string) {
+	c.metadata = map[string][]string{
+		"x-goog-api-client": {fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, runtime.Version())},
+	}
+}
+
+// MetricsParentPath returns the path for the parent resource.
+func MetricsParentPath(project string) string {
+	path, err := metricsParentPathTemplate.Render(map[string]string{
+		"project": project,
+	})
+	if err != nil {
+		panic(err)
+	}
+	return path
+}
+
+// MetricsMetricPath returns the path for the metric resource.
+func MetricsMetricPath(project, metric string) string {
+	path, err := metricsMetricPathTemplate.Render(map[string]string{
+		"project": project,
+		"metric":  metric,
+	})
+	if err != nil {
+		panic(err)
+	}
+	return path
+}
+
+// ListLogMetrics lists logs-based metrics.
+func (c *MetricsClient) ListLogMetrics(ctx context.Context, req *loggingpb.ListLogMetricsRequest) *LogMetricIterator {
+	md, _ := metadata.FromContext(ctx)
+	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+	it := &LogMetricIterator{}
+
+	fetch := func(pageSize int, pageToken string) (string, error) {
+		var resp *loggingpb.ListLogMetricsResponse
+		req.PageToken = pageToken
+		if pageSize > math.MaxInt32 {
+			req.PageSize = math.MaxInt32
+		} else {
+			req.PageSize = int32(pageSize)
+		}
+		err := gax.Invoke(ctx, func(ctx context.Context) error {
+			var err error
+			resp, err = c.metricsClient.ListLogMetrics(ctx, req)
+			return err
+		}, c.CallOptions.ListLogMetrics...)
+		if err != nil {
+			return "", err
+		}
+		it.items = append(it.items, resp.Metrics...)
+		return resp.NextPageToken, nil
+	}
+	bufLen := func() int { return len(it.items) }
+	takeBuf := func() interface{} {
+		b := it.items
+		it.items = nil
+		return b
+	}
+
+	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, bufLen, takeBuf)
+	return it
+}
+
+// GetLogMetric gets a logs-based metric.
+func (c *MetricsClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogMetricRequest) (*loggingpb.LogMetric, error) {
+	md, _ := metadata.FromContext(ctx)
+	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+	var resp *loggingpb.LogMetric
+	err := gax.Invoke(ctx, func(ctx context.Context) error {
+		var err error
+		resp, err = c.metricsClient.GetLogMetric(ctx, req)
+		return err
+	}, c.CallOptions.GetLogMetric...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// CreateLogMetric creates a logs-based metric.
+func (c *MetricsClient) CreateLogMetric(ctx context.Context, req *loggingpb.CreateLogMetricRequest) (*loggingpb.LogMetric, error) {
+	md, _ := metadata.FromContext(ctx)
+	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+	var resp *loggingpb.LogMetric
+	err := gax.Invoke(ctx, func(ctx context.Context) error {
+		var err error
+		resp, err = c.metricsClient.CreateLogMetric(ctx, req)
+		return err
+	}, c.CallOptions.CreateLogMetric...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// UpdateLogMetric creates or updates a logs-based metric.
+func (c *MetricsClient) UpdateLogMetric(ctx context.Context, req *loggingpb.UpdateLogMetricRequest) (*loggingpb.LogMetric, error) {
+	md, _ := metadata.FromContext(ctx)
+	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+	var resp *loggingpb.LogMetric
+	err := gax.Invoke(ctx, func(ctx context.Context) error {
+		var err error
+		resp, err = c.metricsClient.UpdateLogMetric(ctx, req)
+		return err
+	}, c.CallOptions.UpdateLogMetric...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// DeleteLogMetric deletes a logs-based metric.
+func (c *MetricsClient) DeleteLogMetric(ctx context.Context, req *loggingpb.DeleteLogMetricRequest) error {
+	md, _ := metadata.FromContext(ctx)
+	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+	err := gax.Invoke(ctx, func(ctx context.Context) error {
+		var err error
+		_, err = c.metricsClient.DeleteLogMetric(ctx, req)
+		return err
+	}, c.CallOptions.DeleteLogMetric...)
+	return err
+}
+
+// LogMetricIterator manages a stream of *loggingpb.LogMetric.
+type LogMetricIterator struct {
+	items    []*loggingpb.LogMetric
+	pageInfo *iterator.PageInfo
+	nextFunc func() error
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *LogMetricIterator) PageInfo() *iterator.PageInfo {
+	return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *LogMetricIterator) Next() (*loggingpb.LogMetric, error) {
+	if err := it.nextFunc(); err != nil {
+		return nil, err
+	}
+	item := it.items[0]
+	it.items = it.items[1:]
+	return item, nil
+}
diff --git a/vendor/cloud.google.com/go/logging/doc.go b/vendor/cloud.google.com/go/logging/doc.go
new file mode 100644
index 0000000..6580dce
--- /dev/null
+++ b/vendor/cloud.google.com/go/logging/doc.go
@@ -0,0 +1,89 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package logging contains a Stackdriver Logging client suitable for writing logs.
+For reading logs, and working with sinks, metrics and monitored resources,
+see package cloud.google.com/go/logging/logadmin.
+
+This client uses Logging API v2.
+See https://cloud.google.com/logging/docs/api/v2/ for an introduction to the API.
+
+This package is experimental and subject to API changes.
+
+
+Creating a Client
+
+Use a Client to interact with the Stackdriver Logging API.
+
+	// Create a Client
+	ctx := context.Background()
+	client, err := logging.NewClient(ctx, "my-project")
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+
+Basic Usage
+
+For most use-cases, you'll want to add log entries to a buffer to be periodically
+flushed (automatically and asynchronously) to the Stackdriver Logging service.
+
+	// Initialize a logger
+	lg := client.Logger("my-log")
+
+	// Add entry to log buffer
+	lg.Log(logging.Entry{Payload: "something happened!"})
+
+
+Closing your Client
+
+You should call Client.Close before your program exits to flush any buffered log entries to the Stackdriver Logging service.
+
+	// Close the client when finished.
+	err = client.Close()
+	if err != nil {
+		// TODO: Handle error.
+	}
+
+
+Synchronous Logging
+
+For critical errors, you may want to send your log entries immediately.
+LogSync is slow and will block until the log entry has been sent, so it is
+not recommended for basic use.
+
+	lg.LogSync(ctx, logging.Entry{Payload: "ALERT! Something critical happened!"})
+
+
+The Standard Logger Interface
+
+You may want use a standard log.Logger in your program.
+
+	// stdlg implements log.Logger
+	stdlg := lg.StandardLogger(logging.Info)
+	stdlg.Println("some info")
+
+
+Log Levels
+
+An Entry may have one of a number of severity levels associated with it.
+
+	logging.Entry{
+		Payload: "something terrible happened!",
+		Severity: logging.Critical,
+	}
+
+*/
+package logging
diff --git a/vendor/cloud.google.com/go/logging/internal/common.go b/vendor/cloud.google.com/go/logging/internal/common.go
new file mode 100644
index 0000000..7d8ece0
--- /dev/null
+++ b/vendor/cloud.google.com/go/logging/internal/common.go
@@ -0,0 +1,30 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import (
+	"fmt"
+	"strings"
+)
+
+const (
+	ProdAddr = "logging.googleapis.com:443"
+	Version  = "0.2.0"
+)
+
+func LogPath(parent, logID string) string {
+	logID = strings.Replace(logID, "/", "%2F", -1)
+	return fmt.Sprintf("%s/logs/%s", parent, logID)
+}
diff --git a/vendor/cloud.google.com/go/logging/logging.go b/vendor/cloud.google.com/go/logging/logging.go
new file mode 100644
index 0000000..48e0bbe
--- /dev/null
+++ b/vendor/cloud.google.com/go/logging/logging.go
@@ -0,0 +1,665 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// API/gRPC features intentionally missing from this client:
+// - You cannot have the server pick the time of the entry. This client
+//   always sends a time.
+// - There is no way to provide a protocol buffer payload.
+// - No support for the "partial success" feature when writing log entries.
+
+// TODO(jba): test whether forward-slash characters in the log ID must be URL-encoded.
+// These features are missing now, but will likely be added:
+// - There is no way to specify CallOptions.
+
+package logging
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"log"
+	"math"
+	"net/http"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"cloud.google.com/go/internal/bundler"
+	vkit "cloud.google.com/go/logging/apiv2"
+	"cloud.google.com/go/logging/internal"
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes"
+	structpb "github.com/golang/protobuf/ptypes/struct"
+	tspb "github.com/golang/protobuf/ptypes/timestamp"
+	"golang.org/x/net/context"
+	"google.golang.org/api/option"
+	mrpb "google.golang.org/genproto/googleapis/api/monitoredres"
+	logtypepb "google.golang.org/genproto/googleapis/logging/type"
+	logpb "google.golang.org/genproto/googleapis/logging/v2"
+)
+
+const (
+	// Scope for reading from the logging service.
+	ReadScope = "https://www.googleapis.com/auth/logging.read"
+
+	// Scope for writing to the logging service.
+	WriteScope = "https://www.googleapis.com/auth/logging.write"
+
+	// Scope for administrative actions on the logging service.
+	AdminScope = "https://www.googleapis.com/auth/logging.admin"
+)
+
+const (
+	// defaultErrorCapacity is the capacity of the channel used to deliver
+	// errors to the OnError function.
+	defaultErrorCapacity = 10
+
+	// DefaultDelayThreshold is the default value for the DelayThreshold LoggerOption.
+	DefaultDelayThreshold = time.Second
+
+	// DefaultEntryCountThreshold is the default value for the EntryCountThreshold LoggerOption.
+	DefaultEntryCountThreshold = 10
+
+	// DefaultEntryByteThreshold is the default value for the EntryByteThreshold LoggerOption.
+	DefaultEntryByteThreshold = 1 << 20 // 1MiB
+
+	// DefaultBufferedByteLimit is the default value for the BufferedByteLimit LoggerOption.
+	DefaultBufferedByteLimit = 1 << 30 // 1GiB
+)
+
+// For testing:
+var now = time.Now
+
+// ErrOverflow signals that the number of buffered entries for a Logger
+// exceeds its BufferLimit.
+var ErrOverflow = errors.New("logging: log entry overflowed buffer limits")
+
+// Client is a Logging client. A Client is associated with a single Cloud project.
+type Client struct {
+	client    *vkit.Client // client for the logging service
+	projectID string
+	errc      chan error     // should be buffered to minimize dropped errors
+	donec     chan struct{}  // closed on Client.Close to close Logger bundlers
+	loggers   sync.WaitGroup // so we can wait for loggers to close
+	closed    bool
+
+	// OnError is called when an error occurs in a call to Log or Flush. The
+	// error may be due to an invalid Entry, an overflow because BufferLimit
+	// was reached (in which case the error will be ErrOverflow) or an error
+	// communicating with the logging service. OnError is called with errors
+	// from all Loggers. It is never called concurrently. OnError is expected
+	// to return quickly; if errors occur while OnError is running, some may
+	// not be reported. The default behavior is to call log.Printf.
+	//
+	// This field should be set only once, before any method of Client is called.
+	OnError func(err error)
+}
+
+// NewClient returns a new logging client associated with the provided project ID.
+//
+// By default NewClient uses WriteScope. To use a different scope, call
+// NewClient using a WithScopes option (see https://godoc.org/google.golang.org/api/option#WithScopes).
+func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) {
+	// Check for '/' in project ID to reserve the ability to support various owning resources,
+	// in the form "{Collection}/{Name}", for instance "organizations/my-org".
+	if strings.ContainsRune(projectID, '/') {
+		return nil, errors.New("logging: project ID contains '/'")
+	}
+	opts = append([]option.ClientOption{
+		option.WithEndpoint(internal.ProdAddr),
+		option.WithScopes(WriteScope),
+	}, opts...)
+	c, err := vkit.NewClient(ctx, opts...)
+	if err != nil {
+		return nil, err
+	}
+	c.SetGoogleClientInfo("logging", internal.Version)
+	client := &Client{
+		client:    c,
+		projectID: projectID,
+		errc:      make(chan error, defaultErrorCapacity), // create a small buffer for errors
+		donec:     make(chan struct{}),
+		OnError:   func(e error) { log.Printf("logging client: %v", e) },
+	}
+	// Call the user's function synchronously, to make life easier for them.
+	go func() {
+		for err := range client.errc {
+			// This reference to OnError is memory-safe if the user sets OnError before
+			// calling any client methods. The reference happens before the first read from
+			// client.errc, which happens before the first write to client.errc, which
+			// happens before any call, which happens before the user sets OnError.
+			if fn := client.OnError; fn != nil {
+				fn(err)
+			} else {
+				log.Printf("logging (project ID %q): %v", projectID, err)
+			}
+		}
+	}()
+	return client, nil
+}
+
+// parent returns the string used in many RPCs to denote the parent resource of the log.
+func (c *Client) parent() string {
+	return "projects/" + c.projectID
+}
+
+var unixZeroTimestamp *tspb.Timestamp
+
+func init() {
+	var err error
+	unixZeroTimestamp, err = ptypes.TimestampProto(time.Unix(0, 0))
+	if err != nil {
+		panic(err)
+	}
+}
+
+// Ping reports whether the client's connection to the logging service and the
+// authentication configuration are valid. To accomplish this, Ping writes a
+// log entry "ping" to a log named "ping".
+func (c *Client) Ping(ctx context.Context) error {
+	ent := &logpb.LogEntry{
+		Payload:   &logpb.LogEntry_TextPayload{"ping"},
+		Timestamp: unixZeroTimestamp, // Identical timestamps and insert IDs are both
+		InsertId:  "ping",            // necessary for the service to dedup these entries.
+	}
+	_, err := c.client.WriteLogEntries(ctx, &logpb.WriteLogEntriesRequest{
+		LogName:  internal.LogPath(c.parent(), "ping"),
+		Resource: &mrpb.MonitoredResource{Type: "global"},
+		Entries:  []*logpb.LogEntry{ent},
+	})
+	return err
+}
+
+// A Logger is used to write log messages to a single log. It can be configured
+// with a log ID, common monitored resource, and a set of common labels.
+type Logger struct {
+	client     *Client
+	logName    string // "projects/{projectID}/logs/{logID}"
+	stdLoggers map[Severity]*log.Logger
+	bundler    *bundler.Bundler
+
+	// Options
+	commonResource *mrpb.MonitoredResource
+	commonLabels   map[string]string
+}
+
+// A LoggerOption is a configuration option for a Logger.
+type LoggerOption interface {
+	set(*Logger)
+}
+
+// CommonResource sets the monitored resource associated with all log entries
+// written from a Logger. If not provided, a resource of type "global" is used.
+// This value can be overridden by setting an Entry's Resource field.
+func CommonResource(r *mrpb.MonitoredResource) LoggerOption { return commonResource{r} }
+
+type commonResource struct{ *mrpb.MonitoredResource }
+
+func (r commonResource) set(l *Logger) { l.commonResource = r.MonitoredResource }
+
+// CommonLabels are labels that apply to all log entries written from a Logger,
+// so that you don't have to repeat them in each log entry's Labels field. If
+// any of the log entries contains a (key, value) with the same key that is in
+// CommonLabels, then the entry's (key, value) overrides the one in
+// CommonLabels.
+func CommonLabels(m map[string]string) LoggerOption { return commonLabels(m) }
+
+type commonLabels map[string]string
+
+func (c commonLabels) set(l *Logger) { l.commonLabels = c }
+
+// DelayThreshold is the maximum amount of time that an entry should remain
+// buffered in memory before a call to the logging service is triggered. Larger
+// values of DelayThreshold will generally result in fewer calls to the logging
+// service, while increasing the risk that log entries will be lost if the
+// process crashes.
+// The default is DefaultDelayThreshold.
+func DelayThreshold(d time.Duration) LoggerOption { return delayThreshold(d) }
+
+type delayThreshold time.Duration
+
+func (d delayThreshold) set(l *Logger) { l.bundler.DelayThreshold = time.Duration(d) }
+
+// EntryCountThreshold is the maximum number of entries that will be buffered
+// in memory before a call to the logging service is triggered. Larger values
+// will generally result in fewer calls to the logging service, while
+// increasing both memory consumption and the risk that log entries will be
+// lost if the process crashes.
+// The default is DefaultEntryCountThreshold.
+func EntryCountThreshold(n int) LoggerOption { return entryCountThreshold(n) }
+
+type entryCountThreshold int
+
+func (e entryCountThreshold) set(l *Logger) { l.bundler.BundleCountThreshold = int(e) }
+
+// EntryByteThreshold is the maximum number of bytes of entries that will be
+// buffered in memory before a call to the logging service is triggered. See
+// EntryCountThreshold for a discussion of the tradeoffs involved in setting
+// this option.
+// The default is DefaultEntryByteThreshold.
+func EntryByteThreshold(n int) LoggerOption { return entryByteThreshold(n) }
+
+type entryByteThreshold int
+
+func (e entryByteThreshold) set(l *Logger) { l.bundler.BundleByteThreshold = int(e) }
+
+// EntryByteLimit is the maximum number of bytes of entries that will be sent
+// in a single call to the logging service. This option limits the size of a
+// single RPC payload, to account for network or service issues with large
+// RPCs. If EntryByteLimit is smaller than EntryByteThreshold, the latter has
+// no effect.
+// The default is zero, meaning there is no limit.
+func EntryByteLimit(n int) LoggerOption { return entryByteLimit(n) }
+
+type entryByteLimit int
+
+func (e entryByteLimit) set(l *Logger) { l.bundler.BundleByteLimit = int(e) }
+
+// BufferedByteLimit is the maximum number of bytes that the Logger will keep
+// in memory before returning ErrOverflow. This option limits the total memory
+// consumption of the Logger (but note that each Logger has its own, separate
+// limit). It is possible to reach BufferedByteLimit even if it is larger than
+// EntryByteThreshold or EntryByteLimit, because calls triggered by the latter
+// two options may be enqueued (and hence occupying memory) while new log
+// entries are being added.
+// The default is DefaultBufferedByteLimit.
+func BufferedByteLimit(n int) LoggerOption { return bufferedByteLimit(n) }
+
+type bufferedByteLimit int
+
+func (b bufferedByteLimit) set(l *Logger) { l.bundler.BufferedByteLimit = int(b) }
+
+// Logger returns a Logger that will write entries with the given log ID, such as
+// "syslog". A log ID must be less than 512 characters long and can only
+// include the following characters: upper and lower case alphanumeric
+// characters: [A-Za-z0-9]; and punctuation characters: forward-slash,
+// underscore, hyphen, and period.
+func (c *Client) Logger(logID string, opts ...LoggerOption) *Logger {
+	l := &Logger{
+		client:         c,
+		logName:        internal.LogPath(c.parent(), logID),
+		commonResource: &mrpb.MonitoredResource{Type: "global"},
+	}
+	// TODO(jba): determine the right context for the bundle handler.
+	ctx := context.TODO()
+	l.bundler = bundler.NewBundler(&logpb.LogEntry{}, func(entries interface{}) {
+		l.writeLogEntries(ctx, entries.([]*logpb.LogEntry))
+	})
+	l.bundler.DelayThreshold = DefaultDelayThreshold
+	l.bundler.BundleCountThreshold = DefaultEntryCountThreshold
+	l.bundler.BundleByteThreshold = DefaultEntryByteThreshold
+	l.bundler.BufferedByteLimit = DefaultBufferedByteLimit
+	for _, opt := range opts {
+		opt.set(l)
+	}
+
+	l.stdLoggers = map[Severity]*log.Logger{}
+	for s := range severityName {
+		l.stdLoggers[s] = log.New(severityWriter{l, s}, "", 0)
+	}
+	c.loggers.Add(1)
+	go func() {
+		defer c.loggers.Done()
+		<-c.donec
+		l.bundler.Close()
+	}()
+	return l
+}
+
+type severityWriter struct {
+	l *Logger
+	s Severity
+}
+
+func (w severityWriter) Write(p []byte) (n int, err error) {
+	w.l.Log(Entry{
+		Severity: w.s,
+		Payload:  string(p),
+	})
+	return len(p), nil
+}
+
+// Close closes the client.
+func (c *Client) Close() error {
+	if c.closed {
+		return nil
+	}
+	close(c.donec)   // close Logger bundlers
+	c.loggers.Wait() // wait for all bundlers to flush and close
+	// Now there can be no more errors.
+	close(c.errc) // terminate error goroutine
+	// Return only the first error. Since all clients share an underlying connection,
+	// Closes after the first always report a "connection is closing" error.
+	err := c.client.Close()
+	c.closed = true
+	return err
+}
+
+// Severity is the severity of the event described in a log entry. These
+// guideline severity levels are ordered, with numerically smaller levels
+// treated as less severe than numerically larger levels.
+type Severity int
+
+const (
+	// Default means the log entry has no assigned severity level.
+	Default = Severity(logtypepb.LogSeverity_DEFAULT)
+	// Debug means debug or trace information.
+	Debug = Severity(logtypepb.LogSeverity_DEBUG)
+	// Info means routine information, such as ongoing status or performance.
+	Info = Severity(logtypepb.LogSeverity_INFO)
+	// Notice means normal but significant events, such as start up, shut down, or configuration.
+	Notice = Severity(logtypepb.LogSeverity_NOTICE)
+	// Warning means events that might cause problems.
+	Warning = Severity(logtypepb.LogSeverity_WARNING)
+	// Error means events that are likely to cause problems.
+	Error = Severity(logtypepb.LogSeverity_ERROR)
+	// Critical means events that cause more severe problems or brief outages.
+	Critical = Severity(logtypepb.LogSeverity_CRITICAL)
+	// Alert means a person must take an action immediately.
+	Alert = Severity(logtypepb.LogSeverity_ALERT)
+	// Emergency means one or more systems are unusable.
+	Emergency = Severity(logtypepb.LogSeverity_EMERGENCY)
+)
+
+var severityName = map[Severity]string{
+	Default:   "Default",
+	Debug:     "Debug",
+	Info:      "Info",
+	Notice:    "Notice",
+	Warning:   "Warning",
+	Error:     "Error",
+	Critical:  "Critical",
+	Alert:     "Alert",
+	Emergency: "Emergency",
+}
+
+// String converts a severity level to a string.
+func (v Severity) String() string {
+	// same as proto.EnumName
+	s, ok := severityName[v]
+	if ok {
+		return s
+	}
+	return strconv.Itoa(int(v))
+}
+
+// ParseSeverity returns the Severity whose name equals s, ignoring case. It
+// returns Default if no Severity matches.
+func ParseSeverity(s string) Severity {
+	sl := strings.ToLower(s)
+	for sev, name := range severityName {
+		if strings.ToLower(name) == sl {
+			return sev
+		}
+	}
+	return Default
+}
+
+// Entry is a log entry.
+// See https://cloud.google.com/logging/docs/view/logs_index for more about entries.
+type Entry struct {
+	// Timestamp is the time of the entry. If zero, the current time is used.
+	Timestamp time.Time
+
+	// Severity is the entry's severity level.
+	// The zero value is Default.
+	Severity Severity
+
+	// Payload must be either a string or something that
+	// marshals via the encoding/json package to a JSON object
+	// (and not any other type of JSON value).
+	Payload interface{}
+
+	// Labels optionally specifies key/value labels for the log entry.
+	// The Logger.Log method takes ownership of this map. See Logger.CommonLabels
+	// for more about labels.
+	Labels map[string]string
+
+	// InsertID is a unique ID for the log entry. If you provide this field,
+	// the logging service considers other log entries in the same log with the
+	// same ID as duplicates which can be removed. If omitted, the logging
+	// service will generate a unique ID for this log entry. Note that because
+	// this client retries RPCs automatically, it is possible (though unlikely)
+	// that an Entry without an InsertID will be written more than once.
+	InsertID string
+
+	// HTTPRequest optionally specifies metadata about the HTTP request
+	// associated with this log entry, if applicable. It is optional.
+	HTTPRequest *HTTPRequest
+
+	// Operation optionally provides information about an operation associated
+	// with the log entry, if applicable.
+	Operation *logpb.LogEntryOperation
+
+	// LogName is the full log name, in the form
+	// "projects/{ProjectID}/logs/{LogID}". It is set by the client when
+	// reading entries. It is an error to set it when writing entries.
+	LogName string
+
+	// Resource is the monitored resource associated with the entry. It is set
+	// by the client when reading entries. It is an error to set it when
+	// writing entries.
+	Resource *mrpb.MonitoredResource
+}
+
+// HTTPRequest contains an http.Request as well as additional
+// information about the request and its response.
+type HTTPRequest struct {
+	// Request is the http.Request passed to the handler.
+	Request *http.Request
+
+	// RequestSize is the size of the HTTP request message in bytes, including
+	// the request headers and the request body.
+	RequestSize int64
+
+	// Status is the response code indicating the status of the response.
+	// Examples: 200, 404.
+	Status int
+
+	// ResponseSize is the size of the HTTP response message sent back to the client, in bytes,
+	// including the response headers and the response body.
+	ResponseSize int64
+
+	// RemoteIP is the IP address (IPv4 or IPv6) of the client that issued the
+	// HTTP request. Examples: "192.168.1.1", "FE80::0202:B3FF:FE1E:8329".
+	RemoteIP string
+
+	// CacheHit reports whether an entity was served from cache (with or without
+	// validation).
+	CacheHit bool
+
+	// CacheValidatedWithOriginServer reports whether the response was
+	// validated with the origin server before being served from cache. This
+	// field is only meaningful if CacheHit is true.
+	CacheValidatedWithOriginServer bool
+}
+
+func fromHTTPRequest(r *HTTPRequest) *logtypepb.HttpRequest {
+	if r == nil {
+		return nil
+	}
+	if r.Request == nil {
+		panic("HTTPRequest must have a non-nil Request")
+	}
+	u := *r.Request.URL
+	u.Fragment = ""
+	return &logtypepb.HttpRequest{
+		RequestMethod:                  r.Request.Method,
+		RequestUrl:                     u.String(),
+		RequestSize:                    r.RequestSize,
+		Status:                         int32(r.Status),
+		ResponseSize:                   r.ResponseSize,
+		UserAgent:                      r.Request.UserAgent(),
+		RemoteIp:                       r.RemoteIP, // TODO(jba): attempt to parse http.Request.RemoteAddr?
+		Referer:                        r.Request.Referer(),
+		CacheHit:                       r.CacheHit,
+		CacheValidatedWithOriginServer: r.CacheValidatedWithOriginServer,
+	}
+}
+
+// toProtoStruct converts v, which must marshal into a JSON object,
+// into a Google Struct proto.
+func toProtoStruct(v interface{}) (*structpb.Struct, error) {
+	// v is a Go struct that supports JSON marshalling. We want a Struct
+	// protobuf. Some day we may have a more direct way to get there, but right
+	// now the only way is to marshal the Go struct to JSON, unmarshal into a
+	// map, and then build the Struct proto from the map.
+	jb, err := json.Marshal(v)
+	if err != nil {
+		return nil, fmt.Errorf("logging: json.Marshal: %v", err)
+	}
+	var m map[string]interface{}
+	err = json.Unmarshal(jb, &m)
+	if err != nil {
+		return nil, fmt.Errorf("logging: json.Unmarshal: %v", err)
+	}
+	return jsonMapToProtoStruct(m), nil
+}
+
+func jsonMapToProtoStruct(m map[string]interface{}) *structpb.Struct {
+	fields := map[string]*structpb.Value{}
+	for k, v := range m {
+		fields[k] = jsonValueToStructValue(v)
+	}
+	return &structpb.Struct{Fields: fields}
+}
+
+func jsonValueToStructValue(v interface{}) *structpb.Value {
+	switch x := v.(type) {
+	case bool:
+		return &structpb.Value{Kind: &structpb.Value_BoolValue{x}}
+	case float64:
+		return &structpb.Value{Kind: &structpb.Value_NumberValue{x}}
+	case string:
+		return &structpb.Value{Kind: &structpb.Value_StringValue{x}}
+	case nil:
+		return &structpb.Value{Kind: &structpb.Value_NullValue{}}
+	case map[string]interface{}:
+		return &structpb.Value{Kind: &structpb.Value_StructValue{jsonMapToProtoStruct(x)}}
+	case []interface{}:
+		var vals []*structpb.Value
+		for _, e := range x {
+			vals = append(vals, jsonValueToStructValue(e))
+		}
+		return &structpb.Value{Kind: &structpb.Value_ListValue{&structpb.ListValue{vals}}}
+	default:
+		panic(fmt.Sprintf("bad type %T for JSON value", v))
+	}
+}
+
+// LogSync logs the Entry synchronously without any buffering. Because LogSync is slow
+// and will block, it is intended primarily for debugging or critical errors.
+// Prefer Log for most uses.
+// TODO(jba): come up with a better name (LogNow?) or eliminate.
+func (l *Logger) LogSync(ctx context.Context, e Entry) error {
+	ent, err := toLogEntry(e)
+	if err != nil {
+		return err
+	}
+	_, err = l.client.client.WriteLogEntries(ctx, &logpb.WriteLogEntriesRequest{
+		LogName:  l.logName,
+		Resource: l.commonResource,
+		Labels:   l.commonLabels,
+		Entries:  []*logpb.LogEntry{ent},
+	})
+	return err
+}
+
+// Log buffers the Entry for output to the logging service. It never blocks.
+func (l *Logger) Log(e Entry) {
+	ent, err := toLogEntry(e)
+	if err != nil {
+		l.error(err)
+		return
+	}
+	if err := l.bundler.Add(ent, proto.Size(ent)); err != nil {
+		l.error(err)
+	}
+}
+
+// Flush blocks until all currently buffered log entries are sent.
+func (l *Logger) Flush() {
+	l.bundler.Flush()
+}
+
+func (l *Logger) writeLogEntries(ctx context.Context, entries []*logpb.LogEntry) {
+	req := &logpb.WriteLogEntriesRequest{
+		LogName:  l.logName,
+		Resource: l.commonResource,
+		Labels:   l.commonLabels,
+		Entries:  entries,
+	}
+	_, err := l.client.client.WriteLogEntries(ctx, req)
+	if err != nil {
+		l.error(err)
+	}
+}
+
+// error puts the error on the client's error channel
+// without blocking.
+func (l *Logger) error(err error) {
+	select {
+	case l.client.errc <- err:
+	default:
+	}
+}
+
+// StandardLogger returns a *log.Logger for the provided severity.
+//
+// This method is cheap. A single log.Logger is pre-allocated for each
+// severity level in each Logger. Callers may mutate the returned log.Logger
+// (for example by calling SetFlags or SetPrefix).
+func (l *Logger) StandardLogger(s Severity) *log.Logger { return l.stdLoggers[s] }
+
+func trunc32(i int) int32 {
+	if i > math.MaxInt32 {
+		i = math.MaxInt32
+	}
+	return int32(i)
+}
+
+func toLogEntry(e Entry) (*logpb.LogEntry, error) {
+	if e.LogName != "" {
+		return nil, errors.New("logging: Entry.LogName should be not be set when writing")
+	}
+	t := e.Timestamp
+	if t.IsZero() {
+		t = now()
+	}
+	ts, err := ptypes.TimestampProto(t)
+	if err != nil {
+		return nil, err
+	}
+	ent := &logpb.LogEntry{
+		Timestamp:   ts,
+		Severity:    logtypepb.LogSeverity(e.Severity),
+		InsertId:    e.InsertID,
+		HttpRequest: fromHTTPRequest(e.HTTPRequest),
+		Operation:   e.Operation,
+		Labels:      e.Labels,
+	}
+
+	switch p := e.Payload.(type) {
+	case string:
+		ent.Payload = &logpb.LogEntry_TextPayload{p}
+	default:
+		s, err := toProtoStruct(p)
+		if err != nil {
+			return nil, err
+		}
+		ent.Payload = &logpb.LogEntry_JsonPayload{s}
+	}
+	return ent, nil
+}
diff --git a/vendor/github.com/bradfitz/gomemcache/LICENSE b/vendor/github.com/bradfitz/gomemcache/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/bradfitz/gomemcache/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/bradfitz/gomemcache/memcache/memcache.go b/vendor/github.com/bradfitz/gomemcache/memcache/memcache.go
new file mode 100644
index 0000000..7b5442d
--- /dev/null
+++ b/vendor/github.com/bradfitz/gomemcache/memcache/memcache.go
@@ -0,0 +1,666 @@
+/*
+Copyright 2011 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package memcache provides a client for the memcached cache server.
+package memcache
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net"
+
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+)
+
+// Similar to:
+// http://code.google.com/appengine/docs/go/memcache/reference.html
+
+var (
+	// ErrCacheMiss means that a Get failed because the item wasn't present.
+	ErrCacheMiss = errors.New("memcache: cache miss")
+
+	// ErrCASConflict means that a CompareAndSwap call failed due to the
+	// cached value being modified between the Get and the CompareAndSwap.
+	// If the cached value was simply evicted rather than replaced,
+	// ErrNotStored will be returned instead.
+	ErrCASConflict = errors.New("memcache: compare-and-swap conflict")
+
+	// ErrNotStored means that a conditional write operation (i.e. Add or
+	// CompareAndSwap) failed because the condition was not satisfied.
+	ErrNotStored = errors.New("memcache: item not stored")
+
+	// ErrServer means that a server error occurred.
+	ErrServerError = errors.New("memcache: server error")
+
+	// ErrNoStats means that no statistics were available.
+	ErrNoStats = errors.New("memcache: no statistics available")
+
+	// ErrMalformedKey is returned when an invalid key is used.
+	// Keys must be at maximum 250 bytes long, ASCII, and not
+	// contain whitespace or control characters.
+	ErrMalformedKey = errors.New("malformed: key is too long or contains invalid characters")
+
+	// ErrNoServers is returned when no servers are configured or available.
+	ErrNoServers = errors.New("memcache: no servers configured or available")
+)
+
+// DefaultTimeout is the default socket read/write timeout.
+const DefaultTimeout = 100 * time.Millisecond
+
+const (
+	buffered            = 8 // arbitrary buffered channel size, for readability
+	maxIdleConnsPerAddr = 2 // TODO(bradfitz): make this configurable?
+)
+
+// resumableError returns true if err is only a protocol-level cache error.
+// This is used to determine whether or not a server connection should
+// be re-used or not. If an error occurs, by default we don't reuse the
+// connection, unless it was just a cache error.
+func resumableError(err error) bool {
+	switch err {
+	case ErrCacheMiss, ErrCASConflict, ErrNotStored, ErrMalformedKey:
+		return true
+	}
+	return false
+}
+
+func legalKey(key string) bool {
+	if len(key) > 250 {
+		return false
+	}
+	for i := 0; i < len(key); i++ {
+		if key[i] <= ' ' || key[i] > 0x7e {
+			return false
+		}
+	}
+	return true
+}
+
+var (
+	crlf            = []byte("\r\n")
+	space           = []byte(" ")
+	resultOK        = []byte("OK\r\n")
+	resultStored    = []byte("STORED\r\n")
+	resultNotStored = []byte("NOT_STORED\r\n")
+	resultExists    = []byte("EXISTS\r\n")
+	resultNotFound  = []byte("NOT_FOUND\r\n")
+	resultDeleted   = []byte("DELETED\r\n")
+	resultEnd       = []byte("END\r\n")
+	resultOk        = []byte("OK\r\n")
+	resultTouched   = []byte("TOUCHED\r\n")
+
+	resultClientErrorPrefix = []byte("CLIENT_ERROR ")
+)
+
+// New returns a memcache client using the provided server(s)
+// with equal weight. If a server is listed multiple times,
+// it gets a proportional amount of weight.
+func New(server ...string) *Client {
+	ss := new(ServerList)
+	ss.SetServers(server...)
+	return NewFromSelector(ss)
+}
+
+// NewFromSelector returns a new Client using the provided ServerSelector.
+func NewFromSelector(ss ServerSelector) *Client {
+	return &Client{selector: ss}
+}
+
+// Client is a memcache client.
+// It is safe for unlocked use by multiple concurrent goroutines.
+type Client struct {
+	// Timeout specifies the socket read/write timeout.
+	// If zero, DefaultTimeout is used.
+	Timeout time.Duration
+
+	selector ServerSelector
+
+	lk       sync.Mutex
+	freeconn map[string][]*conn
+}
+
+// Item is an item to be got or stored in a memcached server.
+type Item struct {
+	// Key is the Item's key (250 bytes maximum).
+	Key string
+
+	// Value is the Item's value.
+	Value []byte
+
+	// Flags are server-opaque flags whose semantics are entirely
+	// up to the app.
+	Flags uint32
+
+	// Expiration is the cache expiration time, in seconds: either a relative
+	// time from now (up to 1 month), or an absolute Unix epoch time.
+	// Zero means the Item has no expiration time.
+	Expiration int32
+
+	// Compare and swap ID.
+	casid uint64
+}
+
+// conn is a connection to a server.
+type conn struct {
+	nc   net.Conn
+	rw   *bufio.ReadWriter
+	addr net.Addr
+	c    *Client
+}
+
+// release returns this connection back to the client's free pool
+func (cn *conn) release() {
+	cn.c.putFreeConn(cn.addr, cn)
+}
+
+func (cn *conn) extendDeadline() {
+	cn.nc.SetDeadline(time.Now().Add(cn.c.netTimeout()))
+}
+
+// condRelease releases this connection if the error pointed to by err
+// is nil (not an error) or is only a protocol level error (e.g. a
+// cache miss).  The purpose is to not recycle TCP connections that
+// are bad.
+func (cn *conn) condRelease(err *error) {
+	if *err == nil || resumableError(*err) {
+		cn.release()
+	} else {
+		cn.nc.Close()
+	}
+}
+
+func (c *Client) putFreeConn(addr net.Addr, cn *conn) {
+	c.lk.Lock()
+	defer c.lk.Unlock()
+	if c.freeconn == nil {
+		c.freeconn = make(map[string][]*conn)
+	}
+	freelist := c.freeconn[addr.String()]
+	if len(freelist) >= maxIdleConnsPerAddr {
+		cn.nc.Close()
+		return
+	}
+	c.freeconn[addr.String()] = append(freelist, cn)
+}
+
+func (c *Client) getFreeConn(addr net.Addr) (cn *conn, ok bool) {
+	c.lk.Lock()
+	defer c.lk.Unlock()
+	if c.freeconn == nil {
+		return nil, false
+	}
+	freelist, ok := c.freeconn[addr.String()]
+	if !ok || len(freelist) == 0 {
+		return nil, false
+	}
+	cn = freelist[len(freelist)-1]
+	c.freeconn[addr.String()] = freelist[:len(freelist)-1]
+	return cn, true
+}
+
+func (c *Client) netTimeout() time.Duration {
+	if c.Timeout != 0 {
+		return c.Timeout
+	}
+	return DefaultTimeout
+}
+
+// ConnectTimeoutError is the error type used when it takes
+// too long to connect to the desired host. This level of
+// detail can generally be ignored.
+type ConnectTimeoutError struct {
+	Addr net.Addr
+}
+
+func (cte *ConnectTimeoutError) Error() string {
+	return "memcache: connect timeout to " + cte.Addr.String()
+}
+
+func (c *Client) dial(addr net.Addr) (net.Conn, error) {
+	type connError struct {
+		cn  net.Conn
+		err error
+	}
+
+	nc, err := net.DialTimeout(addr.Network(), addr.String(), c.netTimeout())
+	if err == nil {
+		return nc, nil
+	}
+
+	if ne, ok := err.(net.Error); ok && ne.Timeout() {
+		return nil, &ConnectTimeoutError{addr}
+	}
+
+	return nil, err
+}
+
+func (c *Client) getConn(addr net.Addr) (*conn, error) {
+	cn, ok := c.getFreeConn(addr)
+	if ok {
+		cn.extendDeadline()
+		return cn, nil
+	}
+	nc, err := c.dial(addr)
+	if err != nil {
+		return nil, err
+	}
+	cn = &conn{
+		nc:   nc,
+		addr: addr,
+		rw:   bufio.NewReadWriter(bufio.NewReader(nc), bufio.NewWriter(nc)),
+		c:    c,
+	}
+	cn.extendDeadline()
+	return cn, nil
+}
+
+func (c *Client) onItem(item *Item, fn func(*Client, *bufio.ReadWriter, *Item) error) error {
+	addr, err := c.selector.PickServer(item.Key)
+	if err != nil {
+		return err
+	}
+	cn, err := c.getConn(addr)
+	if err != nil {
+		return err
+	}
+	defer cn.condRelease(&err)
+	if err = fn(c, cn.rw, item); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (c *Client) FlushAll() error {
+	return c.selector.Each(c.flushAllFromAddr)
+}
+
+// Get gets the item for the given key. ErrCacheMiss is returned for a
+// memcache cache miss. The key must be at most 250 bytes in length.
+func (c *Client) Get(key string) (item *Item, err error) {
+	err = c.withKeyAddr(key, func(addr net.Addr) error {
+		return c.getFromAddr(addr, []string{key}, func(it *Item) { item = it })
+	})
+	if err == nil && item == nil {
+		err = ErrCacheMiss
+	}
+	return
+}
+
+// Touch updates the expiry for the given key. The seconds parameter is either
+// a Unix timestamp or, if seconds is less than 1 month, the number of seconds
+// into the future at which time the item will expire. ErrCacheMiss is returned if the
+// key is not in the cache. The key must be at most 250 bytes in length.
+func (c *Client) Touch(key string, seconds int32) (err error) {
+	return c.withKeyAddr(key, func(addr net.Addr) error {
+		return c.touchFromAddr(addr, []string{key}, seconds)
+	})
+}
+
+func (c *Client) withKeyAddr(key string, fn func(net.Addr) error) (err error) {
+	if !legalKey(key) {
+		return ErrMalformedKey
+	}
+	addr, err := c.selector.PickServer(key)
+	if err != nil {
+		return err
+	}
+	return fn(addr)
+}
+
+func (c *Client) withAddrRw(addr net.Addr, fn func(*bufio.ReadWriter) error) (err error) {
+	cn, err := c.getConn(addr)
+	if err != nil {
+		return err
+	}
+	defer cn.condRelease(&err)
+	return fn(cn.rw)
+}
+
+func (c *Client) withKeyRw(key string, fn func(*bufio.ReadWriter) error) error {
+	return c.withKeyAddr(key, func(addr net.Addr) error {
+		return c.withAddrRw(addr, fn)
+	})
+}
+
+func (c *Client) getFromAddr(addr net.Addr, keys []string, cb func(*Item)) error {
+	return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
+		if _, err := fmt.Fprintf(rw, "gets %s\r\n", strings.Join(keys, " ")); err != nil {
+			return err
+		}
+		if err := rw.Flush(); err != nil {
+			return err
+		}
+		if err := parseGetResponse(rw.Reader, cb); err != nil {
+			return err
+		}
+		return nil
+	})
+}
+
+// flushAllFromAddr send the flush_all command to the given addr
+func (c *Client) flushAllFromAddr(addr net.Addr) error {
+	return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
+		if _, err := fmt.Fprintf(rw, "flush_all\r\n"); err != nil {
+			return err
+		}
+		if err := rw.Flush(); err != nil {
+			return err
+		}
+		line, err := rw.ReadSlice('\n')
+		if err != nil {
+			return err
+		}
+		switch {
+		case bytes.Equal(line, resultOk):
+			break
+		default:
+			return fmt.Errorf("memcache: unexpected response line from flush_all: %q", string(line))
+		}
+		return nil
+	})
+}
+
+func (c *Client) touchFromAddr(addr net.Addr, keys []string, expiration int32) error {
+	return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
+		for _, key := range keys {
+			if _, err := fmt.Fprintf(rw, "touch %s %d\r\n", key, expiration); err != nil {
+				return err
+			}
+			if err := rw.Flush(); err != nil {
+				return err
+			}
+			line, err := rw.ReadSlice('\n')
+			if err != nil {
+				return err
+			}
+			switch {
+			case bytes.Equal(line, resultTouched):
+				break
+			case bytes.Equal(line, resultNotFound):
+				return ErrCacheMiss
+			default:
+				return fmt.Errorf("memcache: unexpected response line from touch: %q", string(line))
+			}
+		}
+		return nil
+	})
+}
+
+// GetMulti is a batch version of Get. The returned map from keys to
+// items may have fewer elements than the input slice, due to memcache
+// cache misses. Each key must be at most 250 bytes in length.
+// If no error is returned, the returned map will also be non-nil.
+func (c *Client) GetMulti(keys []string) (map[string]*Item, error) {
+	var lk sync.Mutex
+	m := make(map[string]*Item)
+	addItemToMap := func(it *Item) {
+		lk.Lock()
+		defer lk.Unlock()
+		m[it.Key] = it
+	}
+
+	keyMap := make(map[net.Addr][]string)
+	for _, key := range keys {
+		if !legalKey(key) {
+			return nil, ErrMalformedKey
+		}
+		addr, err := c.selector.PickServer(key)
+		if err != nil {
+			return nil, err
+		}
+		keyMap[addr] = append(keyMap[addr], key)
+	}
+
+	ch := make(chan error, buffered)
+	for addr, keys := range keyMap {
+		go func(addr net.Addr, keys []string) {
+			ch <- c.getFromAddr(addr, keys, addItemToMap)
+		}(addr, keys)
+	}
+
+	var err error
+	for _ = range keyMap {
+		if ge := <-ch; ge != nil {
+			err = ge
+		}
+	}
+	return m, err
+}
+
+// parseGetResponse reads a GET response from r and calls cb for each
+// read and allocated Item
+func parseGetResponse(r *bufio.Reader, cb func(*Item)) error {
+	for {
+		line, err := r.ReadSlice('\n')
+		if err != nil {
+			return err
+		}
+		if bytes.Equal(line, resultEnd) {
+			return nil
+		}
+		it := new(Item)
+		size, err := scanGetResponseLine(line, it)
+		if err != nil {
+			return err
+		}
+		it.Value, err = ioutil.ReadAll(io.LimitReader(r, int64(size)+2))
+		if err != nil {
+			return err
+		}
+		if !bytes.HasSuffix(it.Value, crlf) {
+			return fmt.Errorf("memcache: corrupt get result read")
+		}
+		it.Value = it.Value[:size]
+		cb(it)
+	}
+}
+
+// scanGetResponseLine populates it and returns the declared size of the item.
+// It does not read the bytes of the item.
+func scanGetResponseLine(line []byte, it *Item) (size int, err error) {
+	pattern := "VALUE %s %d %d %d\r\n"
+	dest := []interface{}{&it.Key, &it.Flags, &size, &it.casid}
+	if bytes.Count(line, space) == 3 {
+		pattern = "VALUE %s %d %d\r\n"
+		dest = dest[:3]
+	}
+	n, err := fmt.Sscanf(string(line), pattern, dest...)
+	if err != nil || n != len(dest) {
+		return -1, fmt.Errorf("memcache: unexpected line in get response: %q", line)
+	}
+	return size, nil
+}
+
+// Set writes the given item, unconditionally.
+func (c *Client) Set(item *Item) error {
+	return c.onItem(item, (*Client).set)
+}
+
+func (c *Client) set(rw *bufio.ReadWriter, item *Item) error {
+	return c.populateOne(rw, "set", item)
+}
+
+// Add writes the given item, if no value already exists for its
+// key. ErrNotStored is returned if that condition is not met.
+func (c *Client) Add(item *Item) error {
+	return c.onItem(item, (*Client).add)
+}
+
+func (c *Client) add(rw *bufio.ReadWriter, item *Item) error {
+	return c.populateOne(rw, "add", item)
+}
+
+// Replace writes the given item, but only if the server *does*
+// already hold data for this key
+func (c *Client) Replace(item *Item) error {
+	return c.onItem(item, (*Client).replace)
+}
+
+func (c *Client) replace(rw *bufio.ReadWriter, item *Item) error {
+	return c.populateOne(rw, "replace", item)
+}
+
+// CompareAndSwap writes the given item that was previously returned
+// by Get, if the value was neither modified or evicted between the
+// Get and the CompareAndSwap calls. The item's Key should not change
+// between calls but all other item fields may differ. ErrCASConflict
+// is returned if the value was modified in between the
+// calls. ErrNotStored is returned if the value was evicted in between
+// the calls.
+func (c *Client) CompareAndSwap(item *Item) error {
+	return c.onItem(item, (*Client).cas)
+}
+
+func (c *Client) cas(rw *bufio.ReadWriter, item *Item) error {
+	return c.populateOne(rw, "cas", item)
+}
+
+func (c *Client) populateOne(rw *bufio.ReadWriter, verb string, item *Item) error {
+	if !legalKey(item.Key) {
+		return ErrMalformedKey
+	}
+	var err error
+	if verb == "cas" {
+		_, err = fmt.Fprintf(rw, "%s %s %d %d %d %d\r\n",
+			verb, item.Key, item.Flags, item.Expiration, len(item.Value), item.casid)
+	} else {
+		_, err = fmt.Fprintf(rw, "%s %s %d %d %d\r\n",
+			verb, item.Key, item.Flags, item.Expiration, len(item.Value))
+	}
+	if err != nil {
+		return err
+	}
+	if _, err = rw.Write(item.Value); err != nil {
+		return err
+	}
+	if _, err := rw.Write(crlf); err != nil {
+		return err
+	}
+	if err := rw.Flush(); err != nil {
+		return err
+	}
+	line, err := rw.ReadSlice('\n')
+	if err != nil {
+		return err
+	}
+	switch {
+	case bytes.Equal(line, resultStored):
+		return nil
+	case bytes.Equal(line, resultNotStored):
+		return ErrNotStored
+	case bytes.Equal(line, resultExists):
+		return ErrCASConflict
+	case bytes.Equal(line, resultNotFound):
+		return ErrCacheMiss
+	}
+	return fmt.Errorf("memcache: unexpected response line from %q: %q", verb, string(line))
+}
+
+func writeReadLine(rw *bufio.ReadWriter, format string, args ...interface{}) ([]byte, error) {
+	_, err := fmt.Fprintf(rw, format, args...)
+	if err != nil {
+		return nil, err
+	}
+	if err := rw.Flush(); err != nil {
+		return nil, err
+	}
+	line, err := rw.ReadSlice('\n')
+	return line, err
+}
+
+func writeExpectf(rw *bufio.ReadWriter, expect []byte, format string, args ...interface{}) error {
+	line, err := writeReadLine(rw, format, args...)
+	if err != nil {
+		return err
+	}
+	switch {
+	case bytes.Equal(line, resultOK):
+		return nil
+	case bytes.Equal(line, expect):
+		return nil
+	case bytes.Equal(line, resultNotStored):
+		return ErrNotStored
+	case bytes.Equal(line, resultExists):
+		return ErrCASConflict
+	case bytes.Equal(line, resultNotFound):
+		return ErrCacheMiss
+	}
+	return fmt.Errorf("memcache: unexpected response line: %q", string(line))
+}
+
+// Delete deletes the item with the provided key. The error ErrCacheMiss is
+// returned if the item didn't already exist in the cache.
+func (c *Client) Delete(key string) error {
+	return c.withKeyRw(key, func(rw *bufio.ReadWriter) error {
+		return writeExpectf(rw, resultDeleted, "delete %s\r\n", key)
+	})
+}
+
+// DeleteAll deletes all items in the cache.
+func (c *Client) DeleteAll() error {
+	return c.withKeyRw("", func(rw *bufio.ReadWriter) error {
+		return writeExpectf(rw, resultDeleted, "flush_all\r\n")
+	})
+}
+
+// Increment atomically increments key by delta. The return value is
+// the new value after being incremented or an error. If the value
+// didn't exist in memcached the error is ErrCacheMiss. The value in
+// memcached must be an decimal number, or an error will be returned.
+// On 64-bit overflow, the new value wraps around.
+func (c *Client) Increment(key string, delta uint64) (newValue uint64, err error) {
+	return c.incrDecr("incr", key, delta)
+}
+
+// Decrement atomically decrements key by delta. The return value is
+// the new value after being decremented or an error. If the value
+// didn't exist in memcached the error is ErrCacheMiss. The value in
+// memcached must be an decimal number, or an error will be returned.
+// On underflow, the new value is capped at zero and does not wrap
+// around.
+func (c *Client) Decrement(key string, delta uint64) (newValue uint64, err error) {
+	return c.incrDecr("decr", key, delta)
+}
+
+func (c *Client) incrDecr(verb, key string, delta uint64) (uint64, error) {
+	var val uint64
+	err := c.withKeyRw(key, func(rw *bufio.ReadWriter) error {
+		line, err := writeReadLine(rw, "%s %s %d\r\n", verb, key, delta)
+		if err != nil {
+			return err
+		}
+		switch {
+		case bytes.Equal(line, resultNotFound):
+			return ErrCacheMiss
+		case bytes.HasPrefix(line, resultClientErrorPrefix):
+			errMsg := line[len(resultClientErrorPrefix) : len(line)-2]
+			return errors.New("memcache: client error: " + string(errMsg))
+		}
+		val, err = strconv.ParseUint(string(line[:len(line)-2]), 10, 64)
+		if err != nil {
+			return err
+		}
+		return nil
+	})
+	return val, err
+}
diff --git a/vendor/github.com/bradfitz/gomemcache/memcache/selector.go b/vendor/github.com/bradfitz/gomemcache/memcache/selector.go
new file mode 100644
index 0000000..10b04d3
--- /dev/null
+++ b/vendor/github.com/bradfitz/gomemcache/memcache/selector.go
@@ -0,0 +1,114 @@
+/*
+Copyright 2011 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package memcache
+
+import (
+	"hash/crc32"
+	"net"
+	"strings"
+	"sync"
+)
+
+// ServerSelector is the interface that selects a memcache server
+// as a function of the item's key.
+//
+// All ServerSelector implementations must be safe for concurrent use
+// by multiple goroutines.
+type ServerSelector interface {
+	// PickServer returns the server address that a given item
+	// should be shared onto.
+	PickServer(key string) (net.Addr, error)
+	Each(func(net.Addr) error) error
+}
+
+// ServerList is a simple ServerSelector. Its zero value is usable.
+type ServerList struct {
+	mu    sync.RWMutex
+	addrs []net.Addr
+}
+
+// SetServers changes a ServerList's set of servers at runtime and is
+// safe for concurrent use by multiple goroutines.
+//
+// Each server is given equal weight. A server is given more weight
+// if it's listed multiple times.
+//
+// SetServers returns an error if any of the server names fail to
+// resolve. No attempt is made to connect to the server. If any error
+// is returned, no changes are made to the ServerList.
+func (ss *ServerList) SetServers(servers ...string) error {
+	naddr := make([]net.Addr, len(servers))
+	for i, server := range servers {
+		if strings.Contains(server, "/") {
+			addr, err := net.ResolveUnixAddr("unix", server)
+			if err != nil {
+				return err
+			}
+			naddr[i] = addr
+		} else {
+			tcpaddr, err := net.ResolveTCPAddr("tcp", server)
+			if err != nil {
+				return err
+			}
+			naddr[i] = tcpaddr
+		}
+	}
+
+	ss.mu.Lock()
+	defer ss.mu.Unlock()
+	ss.addrs = naddr
+	return nil
+}
+
+// Each iterates over each server calling the given function
+func (ss *ServerList) Each(f func(net.Addr) error) error {
+	ss.mu.RLock()
+	defer ss.mu.RUnlock()
+	for _, a := range ss.addrs {
+		if err := f(a); nil != err {
+			return err
+		}
+	}
+	return nil
+}
+
+// keyBufPool returns []byte buffers for use by PickServer's call to
+// crc32.ChecksumIEEE to avoid allocations. (but doesn't avoid the
+// copies, which at least are bounded in size and small)
+var keyBufPool = sync.Pool{
+	New: func() interface{} {
+		b := make([]byte, 256)
+		return &b
+	},
+}
+
+func (ss *ServerList) PickServer(key string) (net.Addr, error) {
+	ss.mu.RLock()
+	defer ss.mu.RUnlock()
+	if len(ss.addrs) == 0 {
+		return nil, ErrNoServers
+	}
+	if len(ss.addrs) == 1 {
+		return ss.addrs[0], nil
+	}
+	bufp := keyBufPool.Get().(*[]byte)
+	n := copy(*bufp, key)
+	cs := crc32.ChecksumIEEE((*bufp)[:n])
+	keyBufPool.Put(bufp)
+
+	return ss.addrs[cs%uint32(len(ss.addrs))], nil
+}
diff --git a/vendor/github.com/garyburd/redigo/LICENSE b/vendor/github.com/garyburd/redigo/LICENSE
new file mode 100644
index 0000000..67db858
--- /dev/null
+++ b/vendor/github.com/garyburd/redigo/LICENSE
@@ -0,0 +1,175 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/garyburd/redigo/internal/commandinfo.go b/vendor/github.com/garyburd/redigo/internal/commandinfo.go
new file mode 100644
index 0000000..dbc60fc
--- /dev/null
+++ b/vendor/github.com/garyburd/redigo/internal/commandinfo.go
@@ -0,0 +1,54 @@
+// Copyright 2014 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package internal
+
+import (
+	"strings"
+)
+
+const (
+	WatchState = 1 << iota
+	MultiState
+	SubscribeState
+	MonitorState
+)
+
+type CommandInfo struct {
+	Set, Clear int
+}
+
+var commandInfos = map[string]CommandInfo{
+	"WATCH":      {Set: WatchState},
+	"UNWATCH":    {Clear: WatchState},
+	"MULTI":      {Set: MultiState},
+	"EXEC":       {Clear: WatchState | MultiState},
+	"DISCARD":    {Clear: WatchState | MultiState},
+	"PSUBSCRIBE": {Set: SubscribeState},
+	"SUBSCRIBE":  {Set: SubscribeState},
+	"MONITOR":    {Set: MonitorState},
+}
+
+func init() {
+	for n, ci := range commandInfos {
+		commandInfos[strings.ToLower(n)] = ci
+	}
+}
+
+func LookupCommandInfo(commandName string) CommandInfo {
+	if ci, ok := commandInfos[commandName]; ok {
+		return ci
+	}
+	return commandInfos[strings.ToUpper(commandName)]
+}
diff --git a/vendor/github.com/garyburd/redigo/redis/conn.go b/vendor/github.com/garyburd/redigo/redis/conn.go
new file mode 100644
index 0000000..ed358c6
--- /dev/null
+++ b/vendor/github.com/garyburd/redigo/redis/conn.go
@@ -0,0 +1,570 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"net/url"
+	"regexp"
+	"strconv"
+	"sync"
+	"time"
+)
+
+// conn is the low-level implementation of Conn
+type conn struct {
+
+	// Shared
+	mu      sync.Mutex
+	pending int
+	err     error
+	conn    net.Conn
+
+	// Read
+	readTimeout time.Duration
+	br          *bufio.Reader
+
+	// Write
+	writeTimeout time.Duration
+	bw           *bufio.Writer
+
+	// Scratch space for formatting argument length.
+	// '*' or '$', length, "\r\n"
+	lenScratch [32]byte
+
+	// Scratch space for formatting integers and floats.
+	numScratch [40]byte
+}
+
+// DialTimeout acts like Dial but takes timeouts for establishing the
+// connection to the server, writing a command and reading a reply.
+//
+// Deprecated: Use Dial with options instead.
+func DialTimeout(network, address string, connectTimeout, readTimeout, writeTimeout time.Duration) (Conn, error) {
+	return Dial(network, address,
+		DialConnectTimeout(connectTimeout),
+		DialReadTimeout(readTimeout),
+		DialWriteTimeout(writeTimeout))
+}
+
+// DialOption specifies an option for dialing a Redis server.
+type DialOption struct {
+	f func(*dialOptions)
+}
+
+type dialOptions struct {
+	readTimeout  time.Duration
+	writeTimeout time.Duration
+	dial         func(network, addr string) (net.Conn, error)
+	db           int
+	password     string
+}
+
+// DialReadTimeout specifies the timeout for reading a single command reply.
+func DialReadTimeout(d time.Duration) DialOption {
+	return DialOption{func(do *dialOptions) {
+		do.readTimeout = d
+	}}
+}
+
+// DialWriteTimeout specifies the timeout for writing a single command.
+func DialWriteTimeout(d time.Duration) DialOption {
+	return DialOption{func(do *dialOptions) {
+		do.writeTimeout = d
+	}}
+}
+
+// DialConnectTimeout specifies the timeout for connecting to the Redis server.
+func DialConnectTimeout(d time.Duration) DialOption {
+	return DialOption{func(do *dialOptions) {
+		dialer := net.Dialer{Timeout: d}
+		do.dial = dialer.Dial
+	}}
+}
+
+// DialNetDial specifies a custom dial function for creating TCP
+// connections. If this option is left out, then net.Dial is
+// used. DialNetDial overrides DialConnectTimeout.
+func DialNetDial(dial func(network, addr string) (net.Conn, error)) DialOption {
+	return DialOption{func(do *dialOptions) {
+		do.dial = dial
+	}}
+}
+
+// DialDatabase specifies the database to select when dialing a connection.
+func DialDatabase(db int) DialOption {
+	return DialOption{func(do *dialOptions) {
+		do.db = db
+	}}
+}
+
+// DialPassword specifies the password to use when connecting to
+// the Redis server.
+func DialPassword(password string) DialOption {
+	return DialOption{func(do *dialOptions) {
+		do.password = password
+	}}
+}
+
+// Dial connects to the Redis server at the given network and
+// address using the specified options.
+func Dial(network, address string, options ...DialOption) (Conn, error) {
+	do := dialOptions{
+		dial: net.Dial,
+	}
+	for _, option := range options {
+		option.f(&do)
+	}
+
+	netConn, err := do.dial(network, address)
+	if err != nil {
+		return nil, err
+	}
+	c := &conn{
+		conn:         netConn,
+		bw:           bufio.NewWriter(netConn),
+		br:           bufio.NewReader(netConn),
+		readTimeout:  do.readTimeout,
+		writeTimeout: do.writeTimeout,
+	}
+
+	if do.password != "" {
+		if _, err := c.Do("AUTH", do.password); err != nil {
+			netConn.Close()
+			return nil, err
+		}
+	}
+
+	if do.db != 0 {
+		if _, err := c.Do("SELECT", do.db); err != nil {
+			netConn.Close()
+			return nil, err
+		}
+	}
+
+	return c, nil
+}
+
+var pathDBRegexp = regexp.MustCompile(`/(\d*)\z`)
+
+// DialURL connects to a Redis server at the given URL using the Redis
+// URI scheme. URLs should follow the draft IANA specification for the
+// scheme (https://www.iana.org/assignments/uri-schemes/prov/redis).
+func DialURL(rawurl string, options ...DialOption) (Conn, error) {
+	u, err := url.Parse(rawurl)
+	if err != nil {
+		return nil, err
+	}
+
+	if u.Scheme != "redis" {
+		return nil, fmt.Errorf("invalid redis URL scheme: %s", u.Scheme)
+	}
+
+	// As per the IANA draft spec, the host defaults to localhost and
+	// the port defaults to 6379.
+	host, port, err := net.SplitHostPort(u.Host)
+	if err != nil {
+		// assume port is missing
+		host = u.Host
+		port = "6379"
+	}
+	if host == "" {
+		host = "localhost"
+	}
+	address := net.JoinHostPort(host, port)
+
+	if u.User != nil {
+		password, isSet := u.User.Password()
+		if isSet {
+			options = append(options, DialPassword(password))
+		}
+	}
+
+	match := pathDBRegexp.FindStringSubmatch(u.Path)
+	if len(match) == 2 {
+		db := 0
+		if len(match[1]) > 0 {
+			db, err = strconv.Atoi(match[1])
+			if err != nil {
+				return nil, fmt.Errorf("invalid database: %s", u.Path[1:])
+			}
+		}
+		if db != 0 {
+			options = append(options, DialDatabase(db))
+		}
+	} else if u.Path != "" {
+		return nil, fmt.Errorf("invalid database: %s", u.Path[1:])
+	}
+
+	return Dial("tcp", address, options...)
+}
+
+// NewConn returns a new Redigo connection for the given net connection.
+func NewConn(netConn net.Conn, readTimeout, writeTimeout time.Duration) Conn {
+	return &conn{
+		conn:         netConn,
+		bw:           bufio.NewWriter(netConn),
+		br:           bufio.NewReader(netConn),
+		readTimeout:  readTimeout,
+		writeTimeout: writeTimeout,
+	}
+}
+
+func (c *conn) Close() error {
+	c.mu.Lock()
+	err := c.err
+	if c.err == nil {
+		c.err = errors.New("redigo: closed")
+		err = c.conn.Close()
+	}
+	c.mu.Unlock()
+	return err
+}
+
+func (c *conn) fatal(err error) error {
+	c.mu.Lock()
+	if c.err == nil {
+		c.err = err
+		// Close connection to force errors on subsequent calls and to unblock
+		// other reader or writer.
+		c.conn.Close()
+	}
+	c.mu.Unlock()
+	return err
+}
+
+func (c *conn) Err() error {
+	c.mu.Lock()
+	err := c.err
+	c.mu.Unlock()
+	return err
+}
+
+func (c *conn) writeLen(prefix byte, n int) error {
+	c.lenScratch[len(c.lenScratch)-1] = '\n'
+	c.lenScratch[len(c.lenScratch)-2] = '\r'
+	i := len(c.lenScratch) - 3
+	for {
+		c.lenScratch[i] = byte('0' + n%10)
+		i -= 1
+		n = n / 10
+		if n == 0 {
+			break
+		}
+	}
+	c.lenScratch[i] = prefix
+	_, err := c.bw.Write(c.lenScratch[i:])
+	return err
+}
+
+func (c *conn) writeString(s string) error {
+	c.writeLen('$', len(s))
+	c.bw.WriteString(s)
+	_, err := c.bw.WriteString("\r\n")
+	return err
+}
+
+func (c *conn) writeBytes(p []byte) error {
+	c.writeLen('$', len(p))
+	c.bw.Write(p)
+	_, err := c.bw.WriteString("\r\n")
+	return err
+}
+
+func (c *conn) writeInt64(n int64) error {
+	return c.writeBytes(strconv.AppendInt(c.numScratch[:0], n, 10))
+}
+
+func (c *conn) writeFloat64(n float64) error {
+	return c.writeBytes(strconv.AppendFloat(c.numScratch[:0], n, 'g', -1, 64))
+}
+
+func (c *conn) writeCommand(cmd string, args []interface{}) (err error) {
+	c.writeLen('*', 1+len(args))
+	err = c.writeString(cmd)
+	for _, arg := range args {
+		if err != nil {
+			break
+		}
+		switch arg := arg.(type) {
+		case string:
+			err = c.writeString(arg)
+		case []byte:
+			err = c.writeBytes(arg)
+		case int:
+			err = c.writeInt64(int64(arg))
+		case int64:
+			err = c.writeInt64(arg)
+		case float64:
+			err = c.writeFloat64(arg)
+		case bool:
+			if arg {
+				err = c.writeString("1")
+			} else {
+				err = c.writeString("0")
+			}
+		case nil:
+			err = c.writeString("")
+		default:
+			var buf bytes.Buffer
+			fmt.Fprint(&buf, arg)
+			err = c.writeBytes(buf.Bytes())
+		}
+	}
+	return err
+}
+
+type protocolError string
+
+func (pe protocolError) Error() string {
+	return fmt.Sprintf("redigo: %s (possible server error or unsupported concurrent read by application)", string(pe))
+}
+
+func (c *conn) readLine() ([]byte, error) {
+	p, err := c.br.ReadSlice('\n')
+	if err == bufio.ErrBufferFull {
+		return nil, protocolError("long response line")
+	}
+	if err != nil {
+		return nil, err
+	}
+	i := len(p) - 2
+	if i < 0 || p[i] != '\r' {
+		return nil, protocolError("bad response line terminator")
+	}
+	return p[:i], nil
+}
+
+// parseLen parses bulk string and array lengths.
+func parseLen(p []byte) (int, error) {
+	if len(p) == 0 {
+		return -1, protocolError("malformed length")
+	}
+
+	if p[0] == '-' && len(p) == 2 && p[1] == '1' {
+		// handle $-1 and $-1 null replies.
+		return -1, nil
+	}
+
+	var n int
+	for _, b := range p {
+		n *= 10
+		if b < '0' || b > '9' {
+			return -1, protocolError("illegal bytes in length")
+		}
+		n += int(b - '0')
+	}
+
+	return n, nil
+}
+
+// parseInt parses an integer reply.
+func parseInt(p []byte) (interface{}, error) {
+	if len(p) == 0 {
+		return 0, protocolError("malformed integer")
+	}
+
+	var negate bool
+	if p[0] == '-' {
+		negate = true
+		p = p[1:]
+		if len(p) == 0 {
+			return 0, protocolError("malformed integer")
+		}
+	}
+
+	var n int64
+	for _, b := range p {
+		n *= 10
+		if b < '0' || b > '9' {
+			return 0, protocolError("illegal bytes in length")
+		}
+		n += int64(b - '0')
+	}
+
+	if negate {
+		n = -n
+	}
+	return n, nil
+}
+
+var (
+	okReply   interface{} = "OK"
+	pongReply interface{} = "PONG"
+)
+
+func (c *conn) readReply() (interface{}, error) {
+	line, err := c.readLine()
+	if err != nil {
+		return nil, err
+	}
+	if len(line) == 0 {
+		return nil, protocolError("short response line")
+	}
+	switch line[0] {
+	case '+':
+		switch {
+		case len(line) == 3 && line[1] == 'O' && line[2] == 'K':
+			// Avoid allocation for frequent "+OK" response.
+			return okReply, nil
+		case len(line) == 5 && line[1] == 'P' && line[2] == 'O' && line[3] == 'N' && line[4] == 'G':
+			// Avoid allocation in PING command benchmarks :)
+			return pongReply, nil
+		default:
+			return string(line[1:]), nil
+		}
+	case '-':
+		return Error(string(line[1:])), nil
+	case ':':
+		return parseInt(line[1:])
+	case '$':
+		n, err := parseLen(line[1:])
+		if n < 0 || err != nil {
+			return nil, err
+		}
+		p := make([]byte, n)
+		_, err = io.ReadFull(c.br, p)
+		if err != nil {
+			return nil, err
+		}
+		if line, err := c.readLine(); err != nil {
+			return nil, err
+		} else if len(line) != 0 {
+			return nil, protocolError("bad bulk string format")
+		}
+		return p, nil
+	case '*':
+		n, err := parseLen(line[1:])
+		if n < 0 || err != nil {
+			return nil, err
+		}
+		r := make([]interface{}, n)
+		for i := range r {
+			r[i], err = c.readReply()
+			if err != nil {
+				return nil, err
+			}
+		}
+		return r, nil
+	}
+	return nil, protocolError("unexpected response line")
+}
+
+func (c *conn) Send(cmd string, args ...interface{}) error {
+	c.mu.Lock()
+	c.pending += 1
+	c.mu.Unlock()
+	if c.writeTimeout != 0 {
+		c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout))
+	}
+	if err := c.writeCommand(cmd, args); err != nil {
+		return c.fatal(err)
+	}
+	return nil
+}
+
+func (c *conn) Flush() error {
+	if c.writeTimeout != 0 {
+		c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout))
+	}
+	if err := c.bw.Flush(); err != nil {
+		return c.fatal(err)
+	}
+	return nil
+}
+
+func (c *conn) Receive() (reply interface{}, err error) {
+	if c.readTimeout != 0 {
+		c.conn.SetReadDeadline(time.Now().Add(c.readTimeout))
+	}
+	if reply, err = c.readReply(); err != nil {
+		return nil, c.fatal(err)
+	}
+	// When using pub/sub, the number of receives can be greater than the
+	// number of sends. To enable normal use of the connection after
+	// unsubscribing from all channels, we do not decrement pending to a
+	// negative value.
+	//
+	// The pending field is decremented after the reply is read to handle the
+	// case where Receive is called before Send.
+	c.mu.Lock()
+	if c.pending > 0 {
+		c.pending -= 1
+	}
+	c.mu.Unlock()
+	if err, ok := reply.(Error); ok {
+		return nil, err
+	}
+	return
+}
+
+func (c *conn) Do(cmd string, args ...interface{}) (interface{}, error) {
+	c.mu.Lock()
+	pending := c.pending
+	c.pending = 0
+	c.mu.Unlock()
+
+	if cmd == "" && pending == 0 {
+		return nil, nil
+	}
+
+	if c.writeTimeout != 0 {
+		c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout))
+	}
+
+	if cmd != "" {
+		if err := c.writeCommand(cmd, args); err != nil {
+			return nil, c.fatal(err)
+		}
+	}
+
+	if err := c.bw.Flush(); err != nil {
+		return nil, c.fatal(err)
+	}
+
+	if c.readTimeout != 0 {
+		c.conn.SetReadDeadline(time.Now().Add(c.readTimeout))
+	}
+
+	if cmd == "" {
+		reply := make([]interface{}, pending)
+		for i := range reply {
+			r, e := c.readReply()
+			if e != nil {
+				return nil, c.fatal(e)
+			}
+			reply[i] = r
+		}
+		return reply, nil
+	}
+
+	var err error
+	var reply interface{}
+	for i := 0; i <= pending; i++ {
+		var e error
+		if reply, e = c.readReply(); e != nil {
+			return nil, c.fatal(e)
+		}
+		if e, ok := reply.(Error); ok && err == nil {
+			err = e
+		}
+	}
+	return reply, err
+}
diff --git a/vendor/github.com/garyburd/redigo/redis/doc.go b/vendor/github.com/garyburd/redigo/redis/doc.go
new file mode 100644
index 0000000..e6fecca
--- /dev/null
+++ b/vendor/github.com/garyburd/redigo/redis/doc.go
@@ -0,0 +1,168 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+// Package redis is a client for the Redis database.
+//
+// The Redigo FAQ (https://github.com/garyburd/redigo/wiki/FAQ) contains more
+// documentation about this package.
+//
+// Connections
+//
+// The Conn interface is the primary interface for working with Redis.
+// Applications create connections by calling the Dial, DialWithTimeout or
+// NewConn functions. In the future, functions will be added for creating
+// sharded and other types of connections.
+//
+// The application must call the connection Close method when the application
+// is done with the connection.
+//
+// Executing Commands
+//
+// The Conn interface has a generic method for executing Redis commands:
+//
+//  Do(commandName string, args ...interface{}) (reply interface{}, err error)
+//
+// The Redis command reference (http://redis.io/commands) lists the available
+// commands. An example of using the Redis APPEND command is:
+//
+//  n, err := conn.Do("APPEND", "key", "value")
+//
+// The Do method converts command arguments to binary strings for transmission
+// to the server as follows:
+//
+//  Go Type                 Conversion
+//  []byte                  Sent as is
+//  string                  Sent as is
+//  int, int64              strconv.FormatInt(v)
+//  float64                 strconv.FormatFloat(v, 'g', -1, 64)
+//  bool                    true -> "1", false -> "0"
+//  nil                     ""
+//  all other types         fmt.Print(v)
+//
+// Redis command reply types are represented using the following Go types:
+//
+//  Redis type              Go type
+//  error                   redis.Error
+//  integer                 int64
+//  simple string           string
+//  bulk string             []byte or nil if value not present.
+//  array                   []interface{} or nil if value not present.
+//
+// Use type assertions or the reply helper functions to convert from
+// interface{} to the specific Go type for the command result.
+//
+// Pipelining
+//
+// Connections support pipelining using the Send, Flush and Receive methods.
+//
+//  Send(commandName string, args ...interface{}) error
+//  Flush() error
+//  Receive() (reply interface{}, err error)
+//
+// Send writes the command to the connection's output buffer. Flush flushes the
+// connection's output buffer to the server. Receive reads a single reply from
+// the server. The following example shows a simple pipeline.
+//
+//  c.Send("SET", "foo", "bar")
+//  c.Send("GET", "foo")
+//  c.Flush()
+//  c.Receive() // reply from SET
+//  v, err = c.Receive() // reply from GET
+//
+// The Do method combines the functionality of the Send, Flush and Receive
+// methods. The Do method starts by writing the command and flushing the output
+// buffer. Next, the Do method receives all pending replies including the reply
+// for the command just sent by Do. If any of the received replies is an error,
+// then Do returns the error. If there are no errors, then Do returns the last
+// reply. If the command argument to the Do method is "", then the Do method
+// will flush the output buffer and receive pending replies without sending a
+// command.
+//
+// Use the Send and Do methods to implement pipelined transactions.
+//
+//  c.Send("MULTI")
+//  c.Send("INCR", "foo")
+//  c.Send("INCR", "bar")
+//  r, err := c.Do("EXEC")
+//  fmt.Println(r) // prints [1, 1]
+//
+// Concurrency
+//
+// Connections support one concurrent caller to the Receive method and one
+// concurrent caller to the Send and Flush methods. No other concurrency is
+// supported including concurrent calls to the Do method.
+//
+// For full concurrent access to Redis, use the thread-safe Pool to get, use
+// and release a connection from within a goroutine. Connections returned from
+// a Pool have the concurrency restrictions described in the previous
+// paragraph.
+//
+// Publish and Subscribe
+//
+// Use the Send, Flush and Receive methods to implement Pub/Sub subscribers.
+//
+//  c.Send("SUBSCRIBE", "example")
+//  c.Flush()
+//  for {
+//      reply, err := c.Receive()
+//      if err != nil {
+//          return err
+//      }
+//      // process pushed message
+//  }
+//
+// The PubSubConn type wraps a Conn with convenience methods for implementing
+// subscribers. The Subscribe, PSubscribe, Unsubscribe and PUnsubscribe methods
+// send and flush a subscription management command. The receive method
+// converts a pushed message to convenient types for use in a type switch.
+//
+//  psc := redis.PubSubConn{c}
+//  psc.Subscribe("example")
+//  for {
+//      switch v := psc.Receive().(type) {
+//      case redis.Message:
+//          fmt.Printf("%s: message: %s\n", v.Channel, v.Data)
+//      case redis.Subscription:
+//          fmt.Printf("%s: %s %d\n", v.Channel, v.Kind, v.Count)
+//      case error:
+//          return v
+//      }
+//  }
+//
+// Reply Helpers
+//
+// The Bool, Int, Bytes, String, Strings and Values functions convert a reply
+// to a value of a specific type. To allow convenient wrapping of calls to the
+// connection Do and Receive methods, the functions take a second argument of
+// type error.  If the error is non-nil, then the helper function returns the
+// error. If the error is nil, the function converts the reply to the specified
+// type:
+//
+//  exists, err := redis.Bool(c.Do("EXISTS", "foo"))
+//  if err != nil {
+//      // handle error return from c.Do or type conversion error.
+//  }
+//
+// The Scan function converts elements of a array reply to Go types:
+//
+//  var value1 int
+//  var value2 string
+//  reply, err := redis.Values(c.Do("MGET", "key1", "key2"))
+//  if err != nil {
+//      // handle error
+//  }
+//   if _, err := redis.Scan(reply, &value1, &value2); err != nil {
+//      // handle error
+//  }
+package redis
diff --git a/vendor/github.com/garyburd/redigo/redis/log.go b/vendor/github.com/garyburd/redigo/redis/log.go
new file mode 100644
index 0000000..129b86d
--- /dev/null
+++ b/vendor/github.com/garyburd/redigo/redis/log.go
@@ -0,0 +1,117 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+	"bytes"
+	"fmt"
+	"log"
+)
+
+// NewLoggingConn returns a logging wrapper around a connection.
+func NewLoggingConn(conn Conn, logger *log.Logger, prefix string) Conn {
+	if prefix != "" {
+		prefix = prefix + "."
+	}
+	return &loggingConn{conn, logger, prefix}
+}
+
+type loggingConn struct {
+	Conn
+	logger *log.Logger
+	prefix string
+}
+
+func (c *loggingConn) Close() error {
+	err := c.Conn.Close()
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "%sClose() -> (%v)", c.prefix, err)
+	c.logger.Output(2, buf.String())
+	return err
+}
+
+func (c *loggingConn) printValue(buf *bytes.Buffer, v interface{}) {
+	const chop = 32
+	switch v := v.(type) {
+	case []byte:
+		if len(v) > chop {
+			fmt.Fprintf(buf, "%q...", v[:chop])
+		} else {
+			fmt.Fprintf(buf, "%q", v)
+		}
+	case string:
+		if len(v) > chop {
+			fmt.Fprintf(buf, "%q...", v[:chop])
+		} else {
+			fmt.Fprintf(buf, "%q", v)
+		}
+	case []interface{}:
+		if len(v) == 0 {
+			buf.WriteString("[]")
+		} else {
+			sep := "["
+			fin := "]"
+			if len(v) > chop {
+				v = v[:chop]
+				fin = "...]"
+			}
+			for _, vv := range v {
+				buf.WriteString(sep)
+				c.printValue(buf, vv)
+				sep = ", "
+			}
+			buf.WriteString(fin)
+		}
+	default:
+		fmt.Fprint(buf, v)
+	}
+}
+
+func (c *loggingConn) print(method, commandName string, args []interface{}, reply interface{}, err error) {
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "%s%s(", c.prefix, method)
+	if method != "Receive" {
+		buf.WriteString(commandName)
+		for _, arg := range args {
+			buf.WriteString(", ")
+			c.printValue(&buf, arg)
+		}
+	}
+	buf.WriteString(") -> (")
+	if method != "Send" {
+		c.printValue(&buf, reply)
+		buf.WriteString(", ")
+	}
+	fmt.Fprintf(&buf, "%v)", err)
+	c.logger.Output(3, buf.String())
+}
+
+func (c *loggingConn) Do(commandName string, args ...interface{}) (interface{}, error) {
+	reply, err := c.Conn.Do(commandName, args...)
+	c.print("Do", commandName, args, reply, err)
+	return reply, err
+}
+
+func (c *loggingConn) Send(commandName string, args ...interface{}) error {
+	err := c.Conn.Send(commandName, args...)
+	c.print("Send", commandName, args, nil, err)
+	return err
+}
+
+func (c *loggingConn) Receive() (interface{}, error) {
+	reply, err := c.Conn.Receive()
+	c.print("Receive", "", nil, reply, err)
+	return reply, err
+}
diff --git a/vendor/github.com/garyburd/redigo/redis/pool.go b/vendor/github.com/garyburd/redigo/redis/pool.go
new file mode 100644
index 0000000..3d23360
--- /dev/null
+++ b/vendor/github.com/garyburd/redigo/redis/pool.go
@@ -0,0 +1,397 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+	"bytes"
+	"container/list"
+	"crypto/rand"
+	"crypto/sha1"
+	"errors"
+	"io"
+	"strconv"
+	"sync"
+	"time"
+
+	"github.com/garyburd/redigo/internal"
+)
+
+var nowFunc = time.Now // for testing
+
+// ErrPoolExhausted is returned from a pool connection method (Do, Send,
+// Receive, Flush, Err) when the maximum number of database connections in the
+// pool has been reached.
+var ErrPoolExhausted = errors.New("redigo: connection pool exhausted")
+
+var (
+	errPoolClosed = errors.New("redigo: connection pool closed")
+	errConnClosed = errors.New("redigo: connection closed")
+)
+
+// Pool maintains a pool of connections. The application calls the Get method
+// to get a connection from the pool and the connection's Close method to
+// return the connection's resources to the pool.
+//
+// The following example shows how to use a pool in a web application. The
+// application creates a pool at application startup and makes it available to
+// request handlers using a global variable. The pool configuration used here
+// is an example, not a recommendation.
+//
+//  func newPool(server, password string) *redis.Pool {
+//      return &redis.Pool{
+//          MaxIdle: 3,
+//          IdleTimeout: 240 * time.Second,
+//          Dial: func () (redis.Conn, error) {
+//              c, err := redis.Dial("tcp", server)
+//              if err != nil {
+//                  return nil, err
+//              }
+//              if _, err := c.Do("AUTH", password); err != nil {
+//                  c.Close()
+//                  return nil, err
+//              }
+//              return c, err
+//          },
+//          TestOnBorrow: func(c redis.Conn, t time.Time) error {
+//              if time.Since(t) < time.Minute {
+//                  return nil
+//              }
+//              _, err := c.Do("PING")
+//              return err
+//          },
+//      }
+//  }
+//
+//  var (
+//      pool *redis.Pool
+//      redisServer = flag.String("redisServer", ":6379", "")
+//      redisPassword = flag.String("redisPassword", "", "")
+//  )
+//
+//  func main() {
+//      flag.Parse()
+//      pool = newPool(*redisServer, *redisPassword)
+//      ...
+//  }
+//
+// A request handler gets a connection from the pool and closes the connection
+// when the handler is done:
+//
+//  func serveHome(w http.ResponseWriter, r *http.Request) {
+//      conn := pool.Get()
+//      defer conn.Close()
+//      ....
+//  }
+//
+type Pool struct {
+
+	// Dial is an application supplied function for creating and configuring a
+	// connection.
+	//
+	// The connection returned from Dial must not be in a special state
+	// (subscribed to pubsub channel, transaction started, ...).
+	Dial func() (Conn, error)
+
+	// TestOnBorrow is an optional application supplied function for checking
+	// the health of an idle connection before the connection is used again by
+	// the application. Argument t is the time that the connection was returned
+	// to the pool. If the function returns an error, then the connection is
+	// closed.
+	TestOnBorrow func(c Conn, t time.Time) error
+
+	// Maximum number of idle connections in the pool.
+	MaxIdle int
+
+	// Maximum number of connections allocated by the pool at a given time.
+	// When zero, there is no limit on the number of connections in the pool.
+	MaxActive int
+
+	// Close connections after remaining idle for this duration. If the value
+	// is zero, then idle connections are not closed. Applications should set
+	// the timeout to a value less than the server's timeout.
+	IdleTimeout time.Duration
+
+	// If Wait is true and the pool is at the MaxActive limit, then Get() waits
+	// for a connection to be returned to the pool before returning.
+	Wait bool
+
+	// mu protects fields defined below.
+	mu     sync.Mutex
+	cond   *sync.Cond
+	closed bool
+	active int
+
+	// Stack of idleConn with most recently used at the front.
+	idle list.List
+}
+
+type idleConn struct {
+	c Conn
+	t time.Time
+}
+
+// NewPool creates a new pool.
+//
+// Deprecated: Initialize the Pool directory as shown in the example.
+func NewPool(newFn func() (Conn, error), maxIdle int) *Pool {
+	return &Pool{Dial: newFn, MaxIdle: maxIdle}
+}
+
+// Get gets a connection. The application must close the returned connection.
+// This method always returns a valid connection so that applications can defer
+// error handling to the first use of the connection. If there is an error
+// getting an underlying connection, then the connection Err, Do, Send, Flush
+// and Receive methods return that error.
+func (p *Pool) Get() Conn {
+	c, err := p.get()
+	if err != nil {
+		return errorConnection{err}
+	}
+	return &pooledConnection{p: p, c: c}
+}
+
+// ActiveCount returns the number of active connections in the pool.
+func (p *Pool) ActiveCount() int {
+	p.mu.Lock()
+	active := p.active
+	p.mu.Unlock()
+	return active
+}
+
+// Close releases the resources used by the pool.
+func (p *Pool) Close() error {
+	p.mu.Lock()
+	idle := p.idle
+	p.idle.Init()
+	p.closed = true
+	p.active -= idle.Len()
+	if p.cond != nil {
+		p.cond.Broadcast()
+	}
+	p.mu.Unlock()
+	for e := idle.Front(); e != nil; e = e.Next() {
+		e.Value.(idleConn).c.Close()
+	}
+	return nil
+}
+
+// release decrements the active count and signals waiters. The caller must
+// hold p.mu during the call.
+func (p *Pool) release() {
+	p.active -= 1
+	if p.cond != nil {
+		p.cond.Signal()
+	}
+}
+
+// get prunes stale connections and returns a connection from the idle list or
+// creates a new connection.
+func (p *Pool) get() (Conn, error) {
+	p.mu.Lock()
+
+	// Prune stale connections.
+
+	if timeout := p.IdleTimeout; timeout > 0 {
+		for i, n := 0, p.idle.Len(); i < n; i++ {
+			e := p.idle.Back()
+			if e == nil {
+				break
+			}
+			ic := e.Value.(idleConn)
+			if ic.t.Add(timeout).After(nowFunc()) {
+				break
+			}
+			p.idle.Remove(e)
+			p.release()
+			p.mu.Unlock()
+			ic.c.Close()
+			p.mu.Lock()
+		}
+	}
+
+	for {
+
+		// Get idle connection.
+
+		for i, n := 0, p.idle.Len(); i < n; i++ {
+			e := p.idle.Front()
+			if e == nil {
+				break
+			}
+			ic := e.Value.(idleConn)
+			p.idle.Remove(e)
+			test := p.TestOnBorrow
+			p.mu.Unlock()
+			if test == nil || test(ic.c, ic.t) == nil {
+				return ic.c, nil
+			}
+			ic.c.Close()
+			p.mu.Lock()
+			p.release()
+		}
+
+		// Check for pool closed before dialing a new connection.
+
+		if p.closed {
+			p.mu.Unlock()
+			return nil, errors.New("redigo: get on closed pool")
+		}
+
+		// Dial new connection if under limit.
+
+		if p.MaxActive == 0 || p.active < p.MaxActive {
+			dial := p.Dial
+			p.active += 1
+			p.mu.Unlock()
+			c, err := dial()
+			if err != nil {
+				p.mu.Lock()
+				p.release()
+				p.mu.Unlock()
+				c = nil
+			}
+			return c, err
+		}
+
+		if !p.Wait {
+			p.mu.Unlock()
+			return nil, ErrPoolExhausted
+		}
+
+		if p.cond == nil {
+			p.cond = sync.NewCond(&p.mu)
+		}
+		p.cond.Wait()
+	}
+}
+
+func (p *Pool) put(c Conn, forceClose bool) error {
+	err := c.Err()
+	p.mu.Lock()
+	if !p.closed && err == nil && !forceClose {
+		p.idle.PushFront(idleConn{t: nowFunc(), c: c})
+		if p.idle.Len() > p.MaxIdle {
+			c = p.idle.Remove(p.idle.Back()).(idleConn).c
+		} else {
+			c = nil
+		}
+	}
+
+	if c == nil {
+		if p.cond != nil {
+			p.cond.Signal()
+		}
+		p.mu.Unlock()
+		return nil
+	}
+
+	p.release()
+	p.mu.Unlock()
+	return c.Close()
+}
+
+type pooledConnection struct {
+	p     *Pool
+	c     Conn
+	state int
+}
+
+var (
+	sentinel     []byte
+	sentinelOnce sync.Once
+)
+
+func initSentinel() {
+	p := make([]byte, 64)
+	if _, err := rand.Read(p); err == nil {
+		sentinel = p
+	} else {
+		h := sha1.New()
+		io.WriteString(h, "Oops, rand failed. Use time instead.")
+		io.WriteString(h, strconv.FormatInt(time.Now().UnixNano(), 10))
+		sentinel = h.Sum(nil)
+	}
+}
+
+func (pc *pooledConnection) Close() error {
+	c := pc.c
+	if _, ok := c.(errorConnection); ok {
+		return nil
+	}
+	pc.c = errorConnection{errConnClosed}
+
+	if pc.state&internal.MultiState != 0 {
+		c.Send("DISCARD")
+		pc.state &^= (internal.MultiState | internal.WatchState)
+	} else if pc.state&internal.WatchState != 0 {
+		c.Send("UNWATCH")
+		pc.state &^= internal.WatchState
+	}
+	if pc.state&internal.SubscribeState != 0 {
+		c.Send("UNSUBSCRIBE")
+		c.Send("PUNSUBSCRIBE")
+		// To detect the end of the message stream, ask the server to echo
+		// a sentinel value and read until we see that value.
+		sentinelOnce.Do(initSentinel)
+		c.Send("ECHO", sentinel)
+		c.Flush()
+		for {
+			p, err := c.Receive()
+			if err != nil {
+				break
+			}
+			if p, ok := p.([]byte); ok && bytes.Equal(p, sentinel) {
+				pc.state &^= internal.SubscribeState
+				break
+			}
+		}
+	}
+	c.Do("")
+	pc.p.put(c, pc.state != 0)
+	return nil
+}
+
+func (pc *pooledConnection) Err() error {
+	return pc.c.Err()
+}
+
+func (pc *pooledConnection) Do(commandName string, args ...interface{}) (reply interface{}, err error) {
+	ci := internal.LookupCommandInfo(commandName)
+	pc.state = (pc.state | ci.Set) &^ ci.Clear
+	return pc.c.Do(commandName, args...)
+}
+
+func (pc *pooledConnection) Send(commandName string, args ...interface{}) error {
+	ci := internal.LookupCommandInfo(commandName)
+	pc.state = (pc.state | ci.Set) &^ ci.Clear
+	return pc.c.Send(commandName, args...)
+}
+
+func (pc *pooledConnection) Flush() error {
+	return pc.c.Flush()
+}
+
+func (pc *pooledConnection) Receive() (reply interface{}, err error) {
+	return pc.c.Receive()
+}
+
+type errorConnection struct{ err error }
+
+func (ec errorConnection) Do(string, ...interface{}) (interface{}, error) { return nil, ec.err }
+func (ec errorConnection) Send(string, ...interface{}) error              { return ec.err }
+func (ec errorConnection) Err() error                                     { return ec.err }
+func (ec errorConnection) Close() error                                   { return ec.err }
+func (ec errorConnection) Flush() error                                   { return ec.err }
+func (ec errorConnection) Receive() (interface{}, error)                  { return nil, ec.err }
diff --git a/vendor/github.com/garyburd/redigo/redis/pubsub.go b/vendor/github.com/garyburd/redigo/redis/pubsub.go
new file mode 100644
index 0000000..c0ecce8
--- /dev/null
+++ b/vendor/github.com/garyburd/redigo/redis/pubsub.go
@@ -0,0 +1,144 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import "errors"
+
+// Subscription represents a subscribe or unsubscribe notification.
+type Subscription struct {
+
+	// Kind is "subscribe", "unsubscribe", "psubscribe" or "punsubscribe"
+	Kind string
+
+	// The channel that was changed.
+	Channel string
+
+	// The current number of subscriptions for connection.
+	Count int
+}
+
+// Message represents a message notification.
+type Message struct {
+
+	// The originating channel.
+	Channel string
+
+	// The message data.
+	Data []byte
+}
+
+// PMessage represents a pmessage notification.
+type PMessage struct {
+
+	// The matched pattern.
+	Pattern string
+
+	// The originating channel.
+	Channel string
+
+	// The message data.
+	Data []byte
+}
+
+// Pong represents a pubsub pong notification.
+type Pong struct {
+	Data string
+}
+
+// PubSubConn wraps a Conn with convenience methods for subscribers.
+type PubSubConn struct {
+	Conn Conn
+}
+
+// Close closes the connection.
+func (c PubSubConn) Close() error {
+	return c.Conn.Close()
+}
+
+// Subscribe subscribes the connection to the specified channels.
+func (c PubSubConn) Subscribe(channel ...interface{}) error {
+	c.Conn.Send("SUBSCRIBE", channel...)
+	return c.Conn.Flush()
+}
+
+// PSubscribe subscribes the connection to the given patterns.
+func (c PubSubConn) PSubscribe(channel ...interface{}) error {
+	c.Conn.Send("PSUBSCRIBE", channel...)
+	return c.Conn.Flush()
+}
+
+// Unsubscribe unsubscribes the connection from the given channels, or from all
+// of them if none is given.
+func (c PubSubConn) Unsubscribe(channel ...interface{}) error {
+	c.Conn.Send("UNSUBSCRIBE", channel...)
+	return c.Conn.Flush()
+}
+
+// PUnsubscribe unsubscribes the connection from the given patterns, or from all
+// of them if none is given.
+func (c PubSubConn) PUnsubscribe(channel ...interface{}) error {
+	c.Conn.Send("PUNSUBSCRIBE", channel...)
+	return c.Conn.Flush()
+}
+
+// Ping sends a PING to the server with the specified data.
+func (c PubSubConn) Ping(data string) error {
+	c.Conn.Send("PING", data)
+	return c.Conn.Flush()
+}
+
+// Receive returns a pushed message as a Subscription, Message, PMessage, Pong
+// or error. The return value is intended to be used directly in a type switch
+// as illustrated in the PubSubConn example.
+func (c PubSubConn) Receive() interface{} {
+	reply, err := Values(c.Conn.Receive())
+	if err != nil {
+		return err
+	}
+
+	var kind string
+	reply, err = Scan(reply, &kind)
+	if err != nil {
+		return err
+	}
+
+	switch kind {
+	case "message":
+		var m Message
+		if _, err := Scan(reply, &m.Channel, &m.Data); err != nil {
+			return err
+		}
+		return m
+	case "pmessage":
+		var pm PMessage
+		if _, err := Scan(reply, &pm.Pattern, &pm.Channel, &pm.Data); err != nil {
+			return err
+		}
+		return pm
+	case "subscribe", "psubscribe", "unsubscribe", "punsubscribe":
+		s := Subscription{Kind: kind}
+		if _, err := Scan(reply, &s.Channel, &s.Count); err != nil {
+			return err
+		}
+		return s
+	case "pong":
+		var p Pong
+		if _, err := Scan(reply, &p.Data); err != nil {
+			return err
+		}
+		return p
+	}
+	return errors.New("redigo: unknown pubsub notification")
+}
diff --git a/vendor/github.com/garyburd/redigo/redis/redis.go b/vendor/github.com/garyburd/redigo/redis/redis.go
new file mode 100644
index 0000000..c90a48e
--- /dev/null
+++ b/vendor/github.com/garyburd/redigo/redis/redis.go
@@ -0,0 +1,44 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+// Error represents an error returned in a command reply.
+type Error string
+
+func (err Error) Error() string { return string(err) }
+
+// Conn represents a connection to a Redis server.
+type Conn interface {
+	// Close closes the connection.
+	Close() error
+
+	// Err returns a non-nil value if the connection is broken. The returned
+	// value is either the first non-nil value returned from the underlying
+	// network connection or a protocol parsing error. Applications should
+	// close broken connections.
+	Err() error
+
+	// Do sends a command to the server and returns the received reply.
+	Do(commandName string, args ...interface{}) (reply interface{}, err error)
+
+	// Send writes the command to the client's output buffer.
+	Send(commandName string, args ...interface{}) error
+
+	// Flush flushes the output buffer to the Redis server.
+	Flush() error
+
+	// Receive receives a single reply from the Redis server
+	Receive() (reply interface{}, err error)
+}
diff --git a/vendor/github.com/garyburd/redigo/redis/reply.go b/vendor/github.com/garyburd/redigo/redis/reply.go
new file mode 100644
index 0000000..5789614
--- /dev/null
+++ b/vendor/github.com/garyburd/redigo/redis/reply.go
@@ -0,0 +1,393 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+	"errors"
+	"fmt"
+	"strconv"
+)
+
+// ErrNil indicates that a reply value is nil.
+var ErrNil = errors.New("redigo: nil returned")
+
+// Int is a helper that converts a command reply to an integer. If err is not
+// equal to nil, then Int returns 0, err. Otherwise, Int converts the
+// reply to an int as follows:
+//
+//  Reply type    Result
+//  integer       int(reply), nil
+//  bulk string   parsed reply, nil
+//  nil           0, ErrNil
+//  other         0, error
+func Int(reply interface{}, err error) (int, error) {
+	if err != nil {
+		return 0, err
+	}
+	switch reply := reply.(type) {
+	case int64:
+		x := int(reply)
+		if int64(x) != reply {
+			return 0, strconv.ErrRange
+		}
+		return x, nil
+	case []byte:
+		n, err := strconv.ParseInt(string(reply), 10, 0)
+		return int(n), err
+	case nil:
+		return 0, ErrNil
+	case Error:
+		return 0, reply
+	}
+	return 0, fmt.Errorf("redigo: unexpected type for Int, got type %T", reply)
+}
+
+// Int64 is a helper that converts a command reply to 64 bit integer. If err is
+// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the
+// reply to an int64 as follows:
+//
+//  Reply type    Result
+//  integer       reply, nil
+//  bulk string   parsed reply, nil
+//  nil           0, ErrNil
+//  other         0, error
+func Int64(reply interface{}, err error) (int64, error) {
+	if err != nil {
+		return 0, err
+	}
+	switch reply := reply.(type) {
+	case int64:
+		return reply, nil
+	case []byte:
+		n, err := strconv.ParseInt(string(reply), 10, 64)
+		return n, err
+	case nil:
+		return 0, ErrNil
+	case Error:
+		return 0, reply
+	}
+	return 0, fmt.Errorf("redigo: unexpected type for Int64, got type %T", reply)
+}
+
+var errNegativeInt = errors.New("redigo: unexpected value for Uint64")
+
+// Uint64 is a helper that converts a command reply to 64 bit integer. If err is
+// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the
+// reply to an int64 as follows:
+//
+//  Reply type    Result
+//  integer       reply, nil
+//  bulk string   parsed reply, nil
+//  nil           0, ErrNil
+//  other         0, error
+func Uint64(reply interface{}, err error) (uint64, error) {
+	if err != nil {
+		return 0, err
+	}
+	switch reply := reply.(type) {
+	case int64:
+		if reply < 0 {
+			return 0, errNegativeInt
+		}
+		return uint64(reply), nil
+	case []byte:
+		n, err := strconv.ParseUint(string(reply), 10, 64)
+		return n, err
+	case nil:
+		return 0, ErrNil
+	case Error:
+		return 0, reply
+	}
+	return 0, fmt.Errorf("redigo: unexpected type for Uint64, got type %T", reply)
+}
+
+// Float64 is a helper that converts a command reply to 64 bit float. If err is
+// not equal to nil, then Float64 returns 0, err. Otherwise, Float64 converts
+// the reply to an int as follows:
+//
+//  Reply type    Result
+//  bulk string   parsed reply, nil
+//  nil           0, ErrNil
+//  other         0, error
+func Float64(reply interface{}, err error) (float64, error) {
+	if err != nil {
+		return 0, err
+	}
+	switch reply := reply.(type) {
+	case []byte:
+		n, err := strconv.ParseFloat(string(reply), 64)
+		return n, err
+	case nil:
+		return 0, ErrNil
+	case Error:
+		return 0, reply
+	}
+	return 0, fmt.Errorf("redigo: unexpected type for Float64, got type %T", reply)
+}
+
+// String is a helper that converts a command reply to a string. If err is not
+// equal to nil, then String returns "", err. Otherwise String converts the
+// reply to a string as follows:
+//
+//  Reply type      Result
+//  bulk string     string(reply), nil
+//  simple string   reply, nil
+//  nil             "",  ErrNil
+//  other           "",  error
+func String(reply interface{}, err error) (string, error) {
+	if err != nil {
+		return "", err
+	}
+	switch reply := reply.(type) {
+	case []byte:
+		return string(reply), nil
+	case string:
+		return reply, nil
+	case nil:
+		return "", ErrNil
+	case Error:
+		return "", reply
+	}
+	return "", fmt.Errorf("redigo: unexpected type for String, got type %T", reply)
+}
+
+// Bytes is a helper that converts a command reply to a slice of bytes. If err
+// is not equal to nil, then Bytes returns nil, err. Otherwise Bytes converts
+// the reply to a slice of bytes as follows:
+//
+//  Reply type      Result
+//  bulk string     reply, nil
+//  simple string   []byte(reply), nil
+//  nil             nil, ErrNil
+//  other           nil, error
+func Bytes(reply interface{}, err error) ([]byte, error) {
+	if err != nil {
+		return nil, err
+	}
+	switch reply := reply.(type) {
+	case []byte:
+		return reply, nil
+	case string:
+		return []byte(reply), nil
+	case nil:
+		return nil, ErrNil
+	case Error:
+		return nil, reply
+	}
+	return nil, fmt.Errorf("redigo: unexpected type for Bytes, got type %T", reply)
+}
+
+// Bool is a helper that converts a command reply to a boolean. If err is not
+// equal to nil, then Bool returns false, err. Otherwise Bool converts the
+// reply to boolean as follows:
+//
+//  Reply type      Result
+//  integer         value != 0, nil
+//  bulk string     strconv.ParseBool(reply)
+//  nil             false, ErrNil
+//  other           false, error
+func Bool(reply interface{}, err error) (bool, error) {
+	if err != nil {
+		return false, err
+	}
+	switch reply := reply.(type) {
+	case int64:
+		return reply != 0, nil
+	case []byte:
+		return strconv.ParseBool(string(reply))
+	case nil:
+		return false, ErrNil
+	case Error:
+		return false, reply
+	}
+	return false, fmt.Errorf("redigo: unexpected type for Bool, got type %T", reply)
+}
+
+// MultiBulk is a helper that converts an array command reply to a []interface{}.
+//
+// Deprecated: Use Values instead.
+func MultiBulk(reply interface{}, err error) ([]interface{}, error) { return Values(reply, err) }
+
+// Values is a helper that converts an array command reply to a []interface{}.
+// If err is not equal to nil, then Values returns nil, err. Otherwise, Values
+// converts the reply as follows:
+//
+//  Reply type      Result
+//  array           reply, nil
+//  nil             nil, ErrNil
+//  other           nil, error
+func Values(reply interface{}, err error) ([]interface{}, error) {
+	if err != nil {
+		return nil, err
+	}
+	switch reply := reply.(type) {
+	case []interface{}:
+		return reply, nil
+	case nil:
+		return nil, ErrNil
+	case Error:
+		return nil, reply
+	}
+	return nil, fmt.Errorf("redigo: unexpected type for Values, got type %T", reply)
+}
+
+// Strings is a helper that converts an array command reply to a []string. If
+// err is not equal to nil, then Strings returns nil, err. Nil array items are
+// converted to "" in the output slice. Strings returns an error if an array
+// item is not a bulk string or nil.
+func Strings(reply interface{}, err error) ([]string, error) {
+	if err != nil {
+		return nil, err
+	}
+	switch reply := reply.(type) {
+	case []interface{}:
+		result := make([]string, len(reply))
+		for i := range reply {
+			if reply[i] == nil {
+				continue
+			}
+			p, ok := reply[i].([]byte)
+			if !ok {
+				return nil, fmt.Errorf("redigo: unexpected element type for Strings, got type %T", reply[i])
+			}
+			result[i] = string(p)
+		}
+		return result, nil
+	case nil:
+		return nil, ErrNil
+	case Error:
+		return nil, reply
+	}
+	return nil, fmt.Errorf("redigo: unexpected type for Strings, got type %T", reply)
+}
+
+// ByteSlices is a helper that converts an array command reply to a [][]byte.
+// If err is not equal to nil, then ByteSlices returns nil, err. Nil array
+// items are stay nil. ByteSlices returns an error if an array item is not a
+// bulk string or nil.
+func ByteSlices(reply interface{}, err error) ([][]byte, error) {
+	if err != nil {
+		return nil, err
+	}
+	switch reply := reply.(type) {
+	case []interface{}:
+		result := make([][]byte, len(reply))
+		for i := range reply {
+			if reply[i] == nil {
+				continue
+			}
+			p, ok := reply[i].([]byte)
+			if !ok {
+				return nil, fmt.Errorf("redigo: unexpected element type for ByteSlices, got type %T", reply[i])
+			}
+			result[i] = p
+		}
+		return result, nil
+	case nil:
+		return nil, ErrNil
+	case Error:
+		return nil, reply
+	}
+	return nil, fmt.Errorf("redigo: unexpected type for ByteSlices, got type %T", reply)
+}
+
+// Ints is a helper that converts an array command reply to a []int. If
+// err is not equal to nil, then Ints returns nil, err.
+func Ints(reply interface{}, err error) ([]int, error) {
+	var ints []int
+	values, err := Values(reply, err)
+	if err != nil {
+		return ints, err
+	}
+	if err := ScanSlice(values, &ints); err != nil {
+		return ints, err
+	}
+	return ints, nil
+}
+
+// StringMap is a helper that converts an array of strings (alternating key, value)
+// into a map[string]string. The HGETALL and CONFIG GET commands return replies in this format.
+// Requires an even number of values in result.
+func StringMap(result interface{}, err error) (map[string]string, error) {
+	values, err := Values(result, err)
+	if err != nil {
+		return nil, err
+	}
+	if len(values)%2 != 0 {
+		return nil, errors.New("redigo: StringMap expects even number of values result")
+	}
+	m := make(map[string]string, len(values)/2)
+	for i := 0; i < len(values); i += 2 {
+		key, okKey := values[i].([]byte)
+		value, okValue := values[i+1].([]byte)
+		if !okKey || !okValue {
+			return nil, errors.New("redigo: ScanMap key not a bulk string value")
+		}
+		m[string(key)] = string(value)
+	}
+	return m, nil
+}
+
+// IntMap is a helper that converts an array of strings (alternating key, value)
+// into a map[string]int. The HGETALL commands return replies in this format.
+// Requires an even number of values in result.
+func IntMap(result interface{}, err error) (map[string]int, error) {
+	values, err := Values(result, err)
+	if err != nil {
+		return nil, err
+	}
+	if len(values)%2 != 0 {
+		return nil, errors.New("redigo: IntMap expects even number of values result")
+	}
+	m := make(map[string]int, len(values)/2)
+	for i := 0; i < len(values); i += 2 {
+		key, ok := values[i].([]byte)
+		if !ok {
+			return nil, errors.New("redigo: ScanMap key not a bulk string value")
+		}
+		value, err := Int(values[i+1], nil)
+		if err != nil {
+			return nil, err
+		}
+		m[string(key)] = value
+	}
+	return m, nil
+}
+
+// Int64Map is a helper that converts an array of strings (alternating key, value)
+// into a map[string]int64. The HGETALL commands return replies in this format.
+// Requires an even number of values in result.
+func Int64Map(result interface{}, err error) (map[string]int64, error) {
+	values, err := Values(result, err)
+	if err != nil {
+		return nil, err
+	}
+	if len(values)%2 != 0 {
+		return nil, errors.New("redigo: Int64Map expects even number of values result")
+	}
+	m := make(map[string]int64, len(values)/2)
+	for i := 0; i < len(values); i += 2 {
+		key, ok := values[i].([]byte)
+		if !ok {
+			return nil, errors.New("redigo: ScanMap key not a bulk string value")
+		}
+		value, err := Int64(values[i+1], nil)
+		if err != nil {
+			return nil, err
+		}
+		m[string(key)] = value
+	}
+	return m, nil
+}
diff --git a/vendor/github.com/garyburd/redigo/redis/scan.go b/vendor/github.com/garyburd/redigo/redis/scan.go
new file mode 100644
index 0000000..962e94b
--- /dev/null
+++ b/vendor/github.com/garyburd/redigo/redis/scan.go
@@ -0,0 +1,555 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"sync"
+)
+
+func ensureLen(d reflect.Value, n int) {
+	if n > d.Cap() {
+		d.Set(reflect.MakeSlice(d.Type(), n, n))
+	} else {
+		d.SetLen(n)
+	}
+}
+
+func cannotConvert(d reflect.Value, s interface{}) error {
+	var sname string
+	switch s.(type) {
+	case string:
+		sname = "Redis simple string"
+	case Error:
+		sname = "Redis error"
+	case int64:
+		sname = "Redis integer"
+	case []byte:
+		sname = "Redis bulk string"
+	case []interface{}:
+		sname = "Redis array"
+	default:
+		sname = reflect.TypeOf(s).String()
+	}
+	return fmt.Errorf("cannot convert from %s to %s", sname, d.Type())
+}
+
+func convertAssignBulkString(d reflect.Value, s []byte) (err error) {
+	switch d.Type().Kind() {
+	case reflect.Float32, reflect.Float64:
+		var x float64
+		x, err = strconv.ParseFloat(string(s), d.Type().Bits())
+		d.SetFloat(x)
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		var x int64
+		x, err = strconv.ParseInt(string(s), 10, d.Type().Bits())
+		d.SetInt(x)
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		var x uint64
+		x, err = strconv.ParseUint(string(s), 10, d.Type().Bits())
+		d.SetUint(x)
+	case reflect.Bool:
+		var x bool
+		x, err = strconv.ParseBool(string(s))
+		d.SetBool(x)
+	case reflect.String:
+		d.SetString(string(s))
+	case reflect.Slice:
+		if d.Type().Elem().Kind() != reflect.Uint8 {
+			err = cannotConvert(d, s)
+		} else {
+			d.SetBytes(s)
+		}
+	default:
+		err = cannotConvert(d, s)
+	}
+	return
+}
+
+func convertAssignInt(d reflect.Value, s int64) (err error) {
+	switch d.Type().Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		d.SetInt(s)
+		if d.Int() != s {
+			err = strconv.ErrRange
+			d.SetInt(0)
+		}
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		if s < 0 {
+			err = strconv.ErrRange
+		} else {
+			x := uint64(s)
+			d.SetUint(x)
+			if d.Uint() != x {
+				err = strconv.ErrRange
+				d.SetUint(0)
+			}
+		}
+	case reflect.Bool:
+		d.SetBool(s != 0)
+	default:
+		err = cannotConvert(d, s)
+	}
+	return
+}
+
+func convertAssignValue(d reflect.Value, s interface{}) (err error) {
+	switch s := s.(type) {
+	case []byte:
+		err = convertAssignBulkString(d, s)
+	case int64:
+		err = convertAssignInt(d, s)
+	default:
+		err = cannotConvert(d, s)
+	}
+	return err
+}
+
+func convertAssignArray(d reflect.Value, s []interface{}) error {
+	if d.Type().Kind() != reflect.Slice {
+		return cannotConvert(d, s)
+	}
+	ensureLen(d, len(s))
+	for i := 0; i < len(s); i++ {
+		if err := convertAssignValue(d.Index(i), s[i]); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func convertAssign(d interface{}, s interface{}) (err error) {
+	// Handle the most common destination types using type switches and
+	// fall back to reflection for all other types.
+	switch s := s.(type) {
+	case nil:
+		// ingore
+	case []byte:
+		switch d := d.(type) {
+		case *string:
+			*d = string(s)
+		case *int:
+			*d, err = strconv.Atoi(string(s))
+		case *bool:
+			*d, err = strconv.ParseBool(string(s))
+		case *[]byte:
+			*d = s
+		case *interface{}:
+			*d = s
+		case nil:
+			// skip value
+		default:
+			if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr {
+				err = cannotConvert(d, s)
+			} else {
+				err = convertAssignBulkString(d.Elem(), s)
+			}
+		}
+	case int64:
+		switch d := d.(type) {
+		case *int:
+			x := int(s)
+			if int64(x) != s {
+				err = strconv.ErrRange
+				x = 0
+			}
+			*d = x
+		case *bool:
+			*d = s != 0
+		case *interface{}:
+			*d = s
+		case nil:
+			// skip value
+		default:
+			if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr {
+				err = cannotConvert(d, s)
+			} else {
+				err = convertAssignInt(d.Elem(), s)
+			}
+		}
+	case string:
+		switch d := d.(type) {
+		case *string:
+			*d = string(s)
+		default:
+			err = cannotConvert(reflect.ValueOf(d), s)
+		}
+	case []interface{}:
+		switch d := d.(type) {
+		case *[]interface{}:
+			*d = s
+		case *interface{}:
+			*d = s
+		case nil:
+			// skip value
+		default:
+			if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr {
+				err = cannotConvert(d, s)
+			} else {
+				err = convertAssignArray(d.Elem(), s)
+			}
+		}
+	case Error:
+		err = s
+	default:
+		err = cannotConvert(reflect.ValueOf(d), s)
+	}
+	return
+}
+
+// Scan copies from src to the values pointed at by dest.
+//
+// The values pointed at by dest must be an integer, float, boolean, string,
+// []byte, interface{} or slices of these types. Scan uses the standard strconv
+// package to convert bulk strings to numeric and boolean types.
+//
+// If a dest value is nil, then the corresponding src value is skipped.
+//
+// If a src element is nil, then the corresponding dest value is not modified.
+//
+// To enable easy use of Scan in a loop, Scan returns the slice of src
+// following the copied values.
+func Scan(src []interface{}, dest ...interface{}) ([]interface{}, error) {
+	if len(src) < len(dest) {
+		return nil, errors.New("redigo.Scan: array short")
+	}
+	var err error
+	for i, d := range dest {
+		err = convertAssign(d, src[i])
+		if err != nil {
+			err = fmt.Errorf("redigo.Scan: cannot assign to dest %d: %v", i, err)
+			break
+		}
+	}
+	return src[len(dest):], err
+}
+
+type fieldSpec struct {
+	name      string
+	index     []int
+	omitEmpty bool
+}
+
+type structSpec struct {
+	m map[string]*fieldSpec
+	l []*fieldSpec
+}
+
+func (ss *structSpec) fieldSpec(name []byte) *fieldSpec {
+	return ss.m[string(name)]
+}
+
+func compileStructSpec(t reflect.Type, depth map[string]int, index []int, ss *structSpec) {
+	for i := 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+		switch {
+		case f.PkgPath != "" && !f.Anonymous:
+			// Ignore unexported fields.
+		case f.Anonymous:
+			// TODO: Handle pointers. Requires change to decoder and
+			// protection against infinite recursion.
+			if f.Type.Kind() == reflect.Struct {
+				compileStructSpec(f.Type, depth, append(index, i), ss)
+			}
+		default:
+			fs := &fieldSpec{name: f.Name}
+			tag := f.Tag.Get("redis")
+			p := strings.Split(tag, ",")
+			if len(p) > 0 {
+				if p[0] == "-" {
+					continue
+				}
+				if len(p[0]) > 0 {
+					fs.name = p[0]
+				}
+				for _, s := range p[1:] {
+					switch s {
+					case "omitempty":
+						fs.omitEmpty = true
+					default:
+						panic(fmt.Errorf("redigo: unknown field tag %s for type %s", s, t.Name()))
+					}
+				}
+			}
+			d, found := depth[fs.name]
+			if !found {
+				d = 1 << 30
+			}
+			switch {
+			case len(index) == d:
+				// At same depth, remove from result.
+				delete(ss.m, fs.name)
+				j := 0
+				for i := 0; i < len(ss.l); i++ {
+					if fs.name != ss.l[i].name {
+						ss.l[j] = ss.l[i]
+						j += 1
+					}
+				}
+				ss.l = ss.l[:j]
+			case len(index) < d:
+				fs.index = make([]int, len(index)+1)
+				copy(fs.index, index)
+				fs.index[len(index)] = i
+				depth[fs.name] = len(index)
+				ss.m[fs.name] = fs
+				ss.l = append(ss.l, fs)
+			}
+		}
+	}
+}
+
+var (
+	structSpecMutex  sync.RWMutex
+	structSpecCache  = make(map[reflect.Type]*structSpec)
+	defaultFieldSpec = &fieldSpec{}
+)
+
+func structSpecForType(t reflect.Type) *structSpec {
+
+	structSpecMutex.RLock()
+	ss, found := structSpecCache[t]
+	structSpecMutex.RUnlock()
+	if found {
+		return ss
+	}
+
+	structSpecMutex.Lock()
+	defer structSpecMutex.Unlock()
+	ss, found = structSpecCache[t]
+	if found {
+		return ss
+	}
+
+	ss = &structSpec{m: make(map[string]*fieldSpec)}
+	compileStructSpec(t, make(map[string]int), nil, ss)
+	structSpecCache[t] = ss
+	return ss
+}
+
+var errScanStructValue = errors.New("redigo.ScanStruct: value must be non-nil pointer to a struct")
+
+// ScanStruct scans alternating names and values from src to a struct. The
+// HGETALL and CONFIG GET commands return replies in this format.
+//
+// ScanStruct uses exported field names to match values in the response. Use
+// 'redis' field tag to override the name:
+//
+//      Field int `redis:"myName"`
+//
+// Fields with the tag redis:"-" are ignored.
+//
+// Integer, float, boolean, string and []byte fields are supported. Scan uses the
+// standard strconv package to convert bulk string values to numeric and
+// boolean types.
+//
+// If a src element is nil, then the corresponding field is not modified.
+func ScanStruct(src []interface{}, dest interface{}) error {
+	d := reflect.ValueOf(dest)
+	if d.Kind() != reflect.Ptr || d.IsNil() {
+		return errScanStructValue
+	}
+	d = d.Elem()
+	if d.Kind() != reflect.Struct {
+		return errScanStructValue
+	}
+	ss := structSpecForType(d.Type())
+
+	if len(src)%2 != 0 {
+		return errors.New("redigo.ScanStruct: number of values not a multiple of 2")
+	}
+
+	for i := 0; i < len(src); i += 2 {
+		s := src[i+1]
+		if s == nil {
+			continue
+		}
+		name, ok := src[i].([]byte)
+		if !ok {
+			return fmt.Errorf("redigo.ScanStruct: key %d not a bulk string value", i)
+		}
+		fs := ss.fieldSpec(name)
+		if fs == nil {
+			continue
+		}
+		if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil {
+			return fmt.Errorf("redigo.ScanStruct: cannot assign field %s: %v", fs.name, err)
+		}
+	}
+	return nil
+}
+
+var (
+	errScanSliceValue = errors.New("redigo.ScanSlice: dest must be non-nil pointer to a struct")
+)
+
+// ScanSlice scans src to the slice pointed to by dest. The elements the dest
+// slice must be integer, float, boolean, string, struct or pointer to struct
+// values.
+//
+// Struct fields must be integer, float, boolean or string values. All struct
+// fields are used unless a subset is specified using fieldNames.
+func ScanSlice(src []interface{}, dest interface{}, fieldNames ...string) error {
+	d := reflect.ValueOf(dest)
+	if d.Kind() != reflect.Ptr || d.IsNil() {
+		return errScanSliceValue
+	}
+	d = d.Elem()
+	if d.Kind() != reflect.Slice {
+		return errScanSliceValue
+	}
+
+	isPtr := false
+	t := d.Type().Elem()
+	if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct {
+		isPtr = true
+		t = t.Elem()
+	}
+
+	if t.Kind() != reflect.Struct {
+		ensureLen(d, len(src))
+		for i, s := range src {
+			if s == nil {
+				continue
+			}
+			if err := convertAssignValue(d.Index(i), s); err != nil {
+				return fmt.Errorf("redigo.ScanSlice: cannot assign element %d: %v", i, err)
+			}
+		}
+		return nil
+	}
+
+	ss := structSpecForType(t)
+	fss := ss.l
+	if len(fieldNames) > 0 {
+		fss = make([]*fieldSpec, len(fieldNames))
+		for i, name := range fieldNames {
+			fss[i] = ss.m[name]
+			if fss[i] == nil {
+				return fmt.Errorf("redigo.ScanSlice: ScanSlice bad field name %s", name)
+			}
+		}
+	}
+
+	if len(fss) == 0 {
+		return errors.New("redigo.ScanSlice: no struct fields")
+	}
+
+	n := len(src) / len(fss)
+	if n*len(fss) != len(src) {
+		return errors.New("redigo.ScanSlice: length not a multiple of struct field count")
+	}
+
+	ensureLen(d, n)
+	for i := 0; i < n; i++ {
+		d := d.Index(i)
+		if isPtr {
+			if d.IsNil() {
+				d.Set(reflect.New(t))
+			}
+			d = d.Elem()
+		}
+		for j, fs := range fss {
+			s := src[i*len(fss)+j]
+			if s == nil {
+				continue
+			}
+			if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil {
+				return fmt.Errorf("redigo.ScanSlice: cannot assign element %d to field %s: %v", i*len(fss)+j, fs.name, err)
+			}
+		}
+	}
+	return nil
+}
+
+// Args is a helper for constructing command arguments from structured values.
+type Args []interface{}
+
+// Add returns the result of appending value to args.
+func (args Args) Add(value ...interface{}) Args {
+	return append(args, value...)
+}
+
+// AddFlat returns the result of appending the flattened value of v to args.
+//
+// Maps are flattened by appending the alternating keys and map values to args.
+//
+// Slices are flattened by appending the slice elements to args.
+//
+// Structs are flattened by appending the alternating names and values of
+// exported fields to args. If v is a nil struct pointer, then nothing is
+// appended. The 'redis' field tag overrides struct field names. See ScanStruct
+// for more information on the use of the 'redis' field tag.
+//
+// Other types are appended to args as is.
+func (args Args) AddFlat(v interface{}) Args {
+	rv := reflect.ValueOf(v)
+	switch rv.Kind() {
+	case reflect.Struct:
+		args = flattenStruct(args, rv)
+	case reflect.Slice:
+		for i := 0; i < rv.Len(); i++ {
+			args = append(args, rv.Index(i).Interface())
+		}
+	case reflect.Map:
+		for _, k := range rv.MapKeys() {
+			args = append(args, k.Interface(), rv.MapIndex(k).Interface())
+		}
+	case reflect.Ptr:
+		if rv.Type().Elem().Kind() == reflect.Struct {
+			if !rv.IsNil() {
+				args = flattenStruct(args, rv.Elem())
+			}
+		} else {
+			args = append(args, v)
+		}
+	default:
+		args = append(args, v)
+	}
+	return args
+}
+
+func flattenStruct(args Args, v reflect.Value) Args {
+	ss := structSpecForType(v.Type())
+	for _, fs := range ss.l {
+		fv := v.FieldByIndex(fs.index)
+		if fs.omitEmpty {
+			var empty = false
+			switch fv.Kind() {
+			case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+				empty = fv.Len() == 0
+			case reflect.Bool:
+				empty = !fv.Bool()
+			case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+				empty = fv.Int() == 0
+			case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+				empty = fv.Uint() == 0
+			case reflect.Float32, reflect.Float64:
+				empty = fv.Float() == 0
+			case reflect.Interface, reflect.Ptr:
+				empty = fv.IsNil()
+			}
+			if empty {
+				continue
+			}
+		}
+		args = append(args, fs.name, fv.Interface())
+	}
+	return args
+}
diff --git a/vendor/github.com/garyburd/redigo/redis/script.go b/vendor/github.com/garyburd/redigo/redis/script.go
new file mode 100644
index 0000000..78605a9
--- /dev/null
+++ b/vendor/github.com/garyburd/redigo/redis/script.go
@@ -0,0 +1,86 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+	"crypto/sha1"
+	"encoding/hex"
+	"io"
+	"strings"
+)
+
+// Script encapsulates the source, hash and key count for a Lua script. See
+// http://redis.io/commands/eval for information on scripts in Redis.
+type Script struct {
+	keyCount int
+	src      string
+	hash     string
+}
+
+// NewScript returns a new script object. If keyCount is greater than or equal
+// to zero, then the count is automatically inserted in the EVAL command
+// argument list. If keyCount is less than zero, then the application supplies
+// the count as the first value in the keysAndArgs argument to the Do, Send and
+// SendHash methods.
+func NewScript(keyCount int, src string) *Script {
+	h := sha1.New()
+	io.WriteString(h, src)
+	return &Script{keyCount, src, hex.EncodeToString(h.Sum(nil))}
+}
+
+func (s *Script) args(spec string, keysAndArgs []interface{}) []interface{} {
+	var args []interface{}
+	if s.keyCount < 0 {
+		args = make([]interface{}, 1+len(keysAndArgs))
+		args[0] = spec
+		copy(args[1:], keysAndArgs)
+	} else {
+		args = make([]interface{}, 2+len(keysAndArgs))
+		args[0] = spec
+		args[1] = s.keyCount
+		copy(args[2:], keysAndArgs)
+	}
+	return args
+}
+
+// Do evaluates the script. Under the covers, Do optimistically evaluates the
+// script using the EVALSHA command. If the command fails because the script is
+// not loaded, then Do evaluates the script using the EVAL command (thus
+// causing the script to load).
+func (s *Script) Do(c Conn, keysAndArgs ...interface{}) (interface{}, error) {
+	v, err := c.Do("EVALSHA", s.args(s.hash, keysAndArgs)...)
+	if e, ok := err.(Error); ok && strings.HasPrefix(string(e), "NOSCRIPT ") {
+		v, err = c.Do("EVAL", s.args(s.src, keysAndArgs)...)
+	}
+	return v, err
+}
+
+// SendHash evaluates the script without waiting for the reply. The script is
+// evaluated with the EVALSHA command. The application must ensure that the
+// script is loaded by a previous call to Send, Do or Load methods.
+func (s *Script) SendHash(c Conn, keysAndArgs ...interface{}) error {
+	return c.Send("EVALSHA", s.args(s.hash, keysAndArgs)...)
+}
+
+// Send evaluates the script without waiting for the reply.
+func (s *Script) Send(c Conn, keysAndArgs ...interface{}) error {
+	return c.Send("EVAL", s.args(s.src, keysAndArgs)...)
+}
+
+// Load loads the script without evaluating it.
+func (s *Script) Load(c Conn) error {
+	_, err := c.Do("SCRIPT", "LOAD", s.src)
+	return err
+}
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 0000000..1b1b192
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,31 @@
+Go support for Protocol Buffers - Google's data interchange format
+
+Copyright 2010 The Go Authors.  All rights reserved.
+https://github.com/golang/protobuf
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+    * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile
new file mode 100644
index 0000000..e2e0651
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/Makefile
@@ -0,0 +1,43 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors.  All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+install:
+	go install
+
+test: install generate-test-pbs
+	go test
+
+
+generate-test-pbs:
+	make install
+	make -C testdata
+	protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto
+	make
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 0000000..e392575
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,229 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+	"log"
+	"reflect"
+	"strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(pb Message) Message {
+	in := reflect.ValueOf(pb)
+	if in.IsNil() {
+		return pb
+	}
+
+	out := reflect.New(in.Type().Elem())
+	// out is empty so a merge is a deep copy.
+	mergeStruct(out.Elem(), in.Elem())
+	return out.Interface().(Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+	in := reflect.ValueOf(src)
+	out := reflect.ValueOf(dst)
+	if out.IsNil() {
+		panic("proto: nil destination")
+	}
+	if in.Type() != out.Type() {
+		// Explicit test prior to mergeStruct so that mistyped nils will fail
+		panic("proto: type mismatch")
+	}
+	if in.IsNil() {
+		// Merging nil into non-nil is a quiet no-op
+		return
+	}
+	mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+	sprop := GetProperties(in.Type())
+	for i := 0; i < in.NumField(); i++ {
+		f := in.Type().Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+	}
+
+	if emIn, ok := extendable(in.Addr().Interface()); ok {
+		emOut, _ := extendable(out.Addr().Interface())
+		mIn, muIn := emIn.extensionsRead()
+		if mIn != nil {
+			mOut := emOut.extensionsWrite()
+			muIn.Lock()
+			mergeExtension(mOut, mIn)
+			muIn.Unlock()
+		}
+	}
+
+	uf := in.FieldByName("XXX_unrecognized")
+	if !uf.IsValid() {
+		return
+	}
+	uin := uf.Bytes()
+	if len(uin) > 0 {
+		out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+	}
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+	if in.Type() == protoMessageType {
+		if !in.IsNil() {
+			if out.IsNil() {
+				out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+			} else {
+				Merge(out.Interface().(Message), in.Interface().(Message))
+			}
+		}
+		return
+	}
+	switch in.Kind() {
+	case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+		reflect.String, reflect.Uint32, reflect.Uint64:
+		if !viaPtr && isProto3Zero(in) {
+			return
+		}
+		out.Set(in)
+	case reflect.Interface:
+		// Probably a oneof field; copy non-nil values.
+		if in.IsNil() {
+			return
+		}
+		// Allocate destination if it is not set, or set to a different type.
+		// Otherwise we will merge as normal.
+		if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+			out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+		}
+		mergeAny(out.Elem(), in.Elem(), false, nil)
+	case reflect.Map:
+		if in.Len() == 0 {
+			return
+		}
+		if out.IsNil() {
+			out.Set(reflect.MakeMap(in.Type()))
+		}
+		// For maps with value types of *T or []byte we need to deep copy each value.
+		elemKind := in.Type().Elem().Kind()
+		for _, key := range in.MapKeys() {
+			var val reflect.Value
+			switch elemKind {
+			case reflect.Ptr:
+				val = reflect.New(in.Type().Elem().Elem())
+				mergeAny(val, in.MapIndex(key), false, nil)
+			case reflect.Slice:
+				val = in.MapIndex(key)
+				val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+			default:
+				val = in.MapIndex(key)
+			}
+			out.SetMapIndex(key, val)
+		}
+	case reflect.Ptr:
+		if in.IsNil() {
+			return
+		}
+		if out.IsNil() {
+			out.Set(reflect.New(in.Elem().Type()))
+		}
+		mergeAny(out.Elem(), in.Elem(), true, nil)
+	case reflect.Slice:
+		if in.IsNil() {
+			return
+		}
+		if in.Type().Elem().Kind() == reflect.Uint8 {
+			// []byte is a scalar bytes field, not a repeated field.
+
+			// Edge case: if this is in a proto3 message, a zero length
+			// bytes field is considered the zero value, and should not
+			// be merged.
+			if prop != nil && prop.proto3 && in.Len() == 0 {
+				return
+			}
+
+			// Make a deep copy.
+			// Append to []byte{} instead of []byte(nil) so that we never end up
+			// with a nil result.
+			out.SetBytes(append([]byte{}, in.Bytes()...))
+			return
+		}
+		n := in.Len()
+		if out.IsNil() {
+			out.Set(reflect.MakeSlice(in.Type(), 0, n))
+		}
+		switch in.Type().Elem().Kind() {
+		case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+			reflect.String, reflect.Uint32, reflect.Uint64:
+			out.Set(reflect.AppendSlice(out, in))
+		default:
+			for i := 0; i < n; i++ {
+				x := reflect.Indirect(reflect.New(in.Type().Elem()))
+				mergeAny(x, in.Index(i), false, nil)
+				out.Set(reflect.Append(out, x))
+			}
+		}
+	case reflect.Struct:
+		mergeStruct(out, in)
+	default:
+		// unknown type, so not a protocol buffer
+		log.Printf("proto: don't know how to copy %v", in)
+	}
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+	for extNum, eIn := range in {
+		eOut := Extension{desc: eIn.desc}
+		if eIn.value != nil {
+			v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+			mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+			eOut.value = v.Interface()
+		}
+		if eIn.enc != nil {
+			eOut.enc = make([]byte, len(eIn.enc))
+			copy(eOut.enc, eIn.enc)
+		}
+
+		out[extNum] = eOut
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 0000000..04dcb88
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,874 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// The fundamental decoders that interpret bytes on the wire.
+// Those that take integer types all return uint64 and are
+// therefore of type valueDecoder.
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+	// x, n already 0
+	for shift := uint(0); shift < 64; shift += 7 {
+		if n >= len(buf) {
+			return 0, 0
+		}
+		b := uint64(buf[n])
+		n++
+		x |= (b & 0x7F) << shift
+		if (b & 0x80) == 0 {
+			return x, n
+		}
+	}
+
+	// The number is too large to represent in a 64-bit value.
+	return 0, 0
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+	// x, err already 0
+
+	i := p.index
+	l := len(p.buf)
+
+	for shift := uint(0); shift < 64; shift += 7 {
+		if i >= l {
+			err = io.ErrUnexpectedEOF
+			return
+		}
+		b := p.buf[i]
+		i++
+		x |= (uint64(b) & 0x7F) << shift
+		if b < 0x80 {
+			p.index = i
+			return
+		}
+	}
+
+	// The number is too large to represent in a 64-bit value.
+	err = errOverflow
+	return
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+	// x, err already 0
+	i := p.index + 8
+	if i < 0 || i > len(p.buf) {
+		err = io.ErrUnexpectedEOF
+		return
+	}
+	p.index = i
+
+	x = uint64(p.buf[i-8])
+	x |= uint64(p.buf[i-7]) << 8
+	x |= uint64(p.buf[i-6]) << 16
+	x |= uint64(p.buf[i-5]) << 24
+	x |= uint64(p.buf[i-4]) << 32
+	x |= uint64(p.buf[i-3]) << 40
+	x |= uint64(p.buf[i-2]) << 48
+	x |= uint64(p.buf[i-1]) << 56
+	return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+	// x, err already 0
+	i := p.index + 4
+	if i < 0 || i > len(p.buf) {
+		err = io.ErrUnexpectedEOF
+		return
+	}
+	p.index = i
+
+	x = uint64(p.buf[i-4])
+	x |= uint64(p.buf[i-3]) << 8
+	x |= uint64(p.buf[i-2]) << 16
+	x |= uint64(p.buf[i-1]) << 24
+	return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+	x, err = p.DecodeVarint()
+	if err != nil {
+		return
+	}
+	x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+	return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from  the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+	x, err = p.DecodeVarint()
+	if err != nil {
+		return
+	}
+	x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+	return
+}
+
+// These are not ValueDecoders: they produce an array of bytes or a string.
+// bytes, embedded messages
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+	n, err := p.DecodeVarint()
+	if err != nil {
+		return nil, err
+	}
+
+	nb := int(n)
+	if nb < 0 {
+		return nil, fmt.Errorf("proto: bad byte length %d", nb)
+	}
+	end := p.index + nb
+	if end < p.index || end > len(p.buf) {
+		return nil, io.ErrUnexpectedEOF
+	}
+
+	if !alloc {
+		// todo: check if can get more uses of alloc=false
+		buf = p.buf[p.index:end]
+		p.index += nb
+		return
+	}
+
+	buf = make([]byte, nb)
+	copy(buf, p.buf[p.index:])
+	p.index += nb
+	return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+	buf, err := p.DecodeRawBytes(false)
+	if err != nil {
+		return
+	}
+	return string(buf), nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+// If the protocol buffer has extensions, and the field matches, add it as an extension.
+// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
+func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
+	oi := o.index
+
+	err := o.skip(t, tag, wire)
+	if err != nil {
+		return err
+	}
+
+	if !unrecField.IsValid() {
+		return nil
+	}
+
+	ptr := structPointer_Bytes(base, unrecField)
+
+	// Add the skipped field to struct field
+	obuf := o.buf
+
+	o.buf = *ptr
+	o.EncodeVarint(uint64(tag<<3 | wire))
+	*ptr = append(o.buf, obuf[oi:o.index]...)
+
+	o.buf = obuf
+
+	return nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
+
+	var u uint64
+	var err error
+
+	switch wire {
+	case WireVarint:
+		_, err = o.DecodeVarint()
+	case WireFixed64:
+		_, err = o.DecodeFixed64()
+	case WireBytes:
+		_, err = o.DecodeRawBytes(false)
+	case WireFixed32:
+		_, err = o.DecodeFixed32()
+	case WireStartGroup:
+		for {
+			u, err = o.DecodeVarint()
+			if err != nil {
+				break
+			}
+			fwire := int(u & 0x7)
+			if fwire == WireEndGroup {
+				break
+			}
+			ftag := int(u >> 3)
+			err = o.skip(t, ftag, fwire)
+			if err != nil {
+				break
+			}
+		}
+	default:
+		err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
+	}
+	return err
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves.  The method should reset the receiver before
+// decoding starts.  The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+type Unmarshaler interface {
+	Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb.  If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+	pb.Reset()
+	return UnmarshalMerge(buf, pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb.  If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+	// If the object can unmarshal itself, let it.
+	if u, ok := pb.(Unmarshaler); ok {
+		return u.Unmarshal(buf)
+	}
+	return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+	enc, err := p.DecodeRawBytes(false)
+	if err != nil {
+		return err
+	}
+	return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+func (p *Buffer) DecodeGroup(pb Message) error {
+	typ, base, err := getbase(pb)
+	if err != nil {
+		return err
+	}
+	return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb.  If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+func (p *Buffer) Unmarshal(pb Message) error {
+	// If the object can unmarshal itself, let it.
+	if u, ok := pb.(Unmarshaler); ok {
+		err := u.Unmarshal(p.buf[p.index:])
+		p.index = len(p.buf)
+		return err
+	}
+
+	typ, base, err := getbase(pb)
+	if err != nil {
+		return err
+	}
+
+	err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
+
+	if collectStats {
+		stats.Decode++
+	}
+
+	return err
+}
+
+// unmarshalType does the work of unmarshaling a structure.
+func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
+	var state errorState
+	required, reqFields := prop.reqCount, uint64(0)
+
+	var err error
+	for err == nil && o.index < len(o.buf) {
+		oi := o.index
+		var u uint64
+		u, err = o.DecodeVarint()
+		if err != nil {
+			break
+		}
+		wire := int(u & 0x7)
+		if wire == WireEndGroup {
+			if is_group {
+				if required > 0 {
+					// Not enough information to determine the exact field.
+					// (See below.)
+					return &RequiredNotSetError{"{Unknown}"}
+				}
+				return nil // input is satisfied
+			}
+			return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
+		}
+		tag := int(u >> 3)
+		if tag <= 0 {
+			return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
+		}
+		fieldnum, ok := prop.decoderTags.get(tag)
+		if !ok {
+			// Maybe it's an extension?
+			if prop.extendable {
+				if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
+					if err = o.skip(st, tag, wire); err == nil {
+						extmap := e.extensionsWrite()
+						ext := extmap[int32(tag)] // may be missing
+						ext.enc = append(ext.enc, o.buf[oi:o.index]...)
+						extmap[int32(tag)] = ext
+					}
+					continue
+				}
+			}
+			// Maybe it's a oneof?
+			if prop.oneofUnmarshaler != nil {
+				m := structPointer_Interface(base, st).(Message)
+				// First return value indicates whether tag is a oneof field.
+				ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
+				if err == ErrInternalBadWireType {
+					// Map the error to something more descriptive.
+					// Do the formatting here to save generated code space.
+					err = fmt.Errorf("bad wiretype for oneof field in %T", m)
+				}
+				if ok {
+					continue
+				}
+			}
+			err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
+			continue
+		}
+		p := prop.Prop[fieldnum]
+
+		if p.dec == nil {
+			fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
+			continue
+		}
+		dec := p.dec
+		if wire != WireStartGroup && wire != p.WireType {
+			if wire == WireBytes && p.packedDec != nil {
+				// a packable field
+				dec = p.packedDec
+			} else {
+				err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
+				continue
+			}
+		}
+		decErr := dec(o, p, base)
+		if decErr != nil && !state.shouldContinue(decErr, p) {
+			err = decErr
+		}
+		if err == nil && p.Required {
+			// Successfully decoded a required field.
+			if tag <= 64 {
+				// use bitmap for fields 1-64 to catch field reuse.
+				var mask uint64 = 1 << uint64(tag-1)
+				if reqFields&mask == 0 {
+					// new required field
+					reqFields |= mask
+					required--
+				}
+			} else {
+				// This is imprecise. It can be fooled by a required field
+				// with a tag > 64 that is encoded twice; that's very rare.
+				// A fully correct implementation would require allocating
+				// a data structure, which we would like to avoid.
+				required--
+			}
+		}
+	}
+	if err == nil {
+		if is_group {
+			return io.ErrUnexpectedEOF
+		}
+		if state.err != nil {
+			return state.err
+		}
+		if required > 0 {
+			// Not enough information to determine the exact field. If we use extra
+			// CPU, we could determine the field only if the missing required field
+			// has a tag <= 64 and we check reqFields.
+			return &RequiredNotSetError{"{Unknown}"}
+		}
+	}
+	return err
+}
+
+// Individual type decoders
+// For each,
+//	u is the decoded value,
+//	v is a pointer to the field (pointer) in the struct
+
+// Sizes of the pools to allocate inside the Buffer.
+// The goal is modest amortization and allocation
+// on at least 16-byte boundaries.
+const (
+	boolPoolSize   = 16
+	uint32PoolSize = 8
+	uint64PoolSize = 4
+)
+
+// Decode a bool.
+func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
+	u, err := p.valDec(o)
+	if err != nil {
+		return err
+	}
+	if len(o.bools) == 0 {
+		o.bools = make([]bool, boolPoolSize)
+	}
+	o.bools[0] = u != 0
+	*structPointer_Bool(base, p.field) = &o.bools[0]
+	o.bools = o.bools[1:]
+	return nil
+}
+
+func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
+	u, err := p.valDec(o)
+	if err != nil {
+		return err
+	}
+	*structPointer_BoolVal(base, p.field) = u != 0
+	return nil
+}
+
+// Decode an int32.
+func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
+	u, err := p.valDec(o)
+	if err != nil {
+		return err
+	}
+	word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
+	return nil
+}
+
+func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
+	u, err := p.valDec(o)
+	if err != nil {
+		return err
+	}
+	word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
+	return nil
+}
+
+// Decode an int64.
+func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
+	u, err := p.valDec(o)
+	if err != nil {
+		return err
+	}
+	word64_Set(structPointer_Word64(base, p.field), o, u)
+	return nil
+}
+
+func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
+	u, err := p.valDec(o)
+	if err != nil {
+		return err
+	}
+	word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
+	return nil
+}
+
+// Decode a string.
+func (o *Buffer) dec_string(p *Properties, base structPointer) error {
+	s, err := o.DecodeStringBytes()
+	if err != nil {
+		return err
+	}
+	*structPointer_String(base, p.field) = &s
+	return nil
+}
+
+func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
+	s, err := o.DecodeStringBytes()
+	if err != nil {
+		return err
+	}
+	*structPointer_StringVal(base, p.field) = s
+	return nil
+}
+
+// Decode a slice of bytes ([]byte).
+func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
+	b, err := o.DecodeRawBytes(true)
+	if err != nil {
+		return err
+	}
+	*structPointer_Bytes(base, p.field) = b
+	return nil
+}
+
+// Decode a slice of bools ([]bool).
+func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
+	u, err := p.valDec(o)
+	if err != nil {
+		return err
+	}
+	v := structPointer_BoolSlice(base, p.field)
+	*v = append(*v, u != 0)
+	return nil
+}
+
+// Decode a slice of bools ([]bool) in packed format.
+func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
+	v := structPointer_BoolSlice(base, p.field)
+
+	nn, err := o.DecodeVarint()
+	if err != nil {
+		return err
+	}
+	nb := int(nn) // number of bytes of encoded bools
+	fin := o.index + nb
+	if fin < o.index {
+		return errOverflow
+	}
+
+	y := *v
+	for o.index < fin {
+		u, err := p.valDec(o)
+		if err != nil {
+			return err
+		}
+		y = append(y, u != 0)
+	}
+
+	*v = y
+	return nil
+}
+
+// Decode a slice of int32s ([]int32).
+func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
+	u, err := p.valDec(o)
+	if err != nil {
+		return err
+	}
+	structPointer_Word32Slice(base, p.field).Append(uint32(u))
+	return nil
+}
+
+// Decode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
+	v := structPointer_Word32Slice(base, p.field)
+
+	nn, err := o.DecodeVarint()
+	if err != nil {
+		return err
+	}
+	nb := int(nn) // number of bytes of encoded int32s
+
+	fin := o.index + nb
+	if fin < o.index {
+		return errOverflow
+	}
+	for o.index < fin {
+		u, err := p.valDec(o)
+		if err != nil {
+			return err
+		}
+		v.Append(uint32(u))
+	}
+	return nil
+}
+
+// Decode a slice of int64s ([]int64).
+func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
+	u, err := p.valDec(o)
+	if err != nil {
+		return err
+	}
+
+	structPointer_Word64Slice(base, p.field).Append(u)
+	return nil
+}
+
+// Decode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
+	v := structPointer_Word64Slice(base, p.field)
+
+	nn, err := o.DecodeVarint()
+	if err != nil {
+		return err
+	}
+	nb := int(nn) // number of bytes of encoded int64s
+
+	fin := o.index + nb
+	if fin < o.index {
+		return errOverflow
+	}
+	for o.index < fin {
+		u, err := p.valDec(o)
+		if err != nil {
+			return err
+		}
+		v.Append(u)
+	}
+	return nil
+}
+
+// Decode a slice of strings ([]string).
+func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
+	s, err := o.DecodeStringBytes()
+	if err != nil {
+		return err
+	}
+	v := structPointer_StringSlice(base, p.field)
+	*v = append(*v, s)
+	return nil
+}
+
+// Decode a slice of slice of bytes ([][]byte).
+func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
+	b, err := o.DecodeRawBytes(true)
+	if err != nil {
+		return err
+	}
+	v := structPointer_BytesSlice(base, p.field)
+	*v = append(*v, b)
+	return nil
+}
+
+// Decode a map field.
+func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
+	raw, err := o.DecodeRawBytes(false)
+	if err != nil {
+		return err
+	}
+	oi := o.index       // index at the end of this map entry
+	o.index -= len(raw) // move buffer back to start of map entry
+
+	mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
+	if mptr.Elem().IsNil() {
+		mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
+	}
+	v := mptr.Elem() // map[K]V
+
+	// Prepare addressable doubly-indirect placeholders for the key and value types.
+	// See enc_new_map for why.
+	keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
+	keybase := toStructPointer(keyptr.Addr())                  // **K
+
+	var valbase structPointer
+	var valptr reflect.Value
+	switch p.mtype.Elem().Kind() {
+	case reflect.Slice:
+		// []byte
+		var dummy []byte
+		valptr = reflect.ValueOf(&dummy)  // *[]byte
+		valbase = toStructPointer(valptr) // *[]byte
+	case reflect.Ptr:
+		// message; valptr is **Msg; need to allocate the intermediate pointer
+		valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+		valptr.Set(reflect.New(valptr.Type().Elem()))
+		valbase = toStructPointer(valptr)
+	default:
+		// everything else
+		valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+		valbase = toStructPointer(valptr.Addr())                   // **V
+	}
+
+	// Decode.
+	// This parses a restricted wire format, namely the encoding of a message
+	// with two fields. See enc_new_map for the format.
+	for o.index < oi {
+		// tagcode for key and value properties are always a single byte
+		// because they have tags 1 and 2.
+		tagcode := o.buf[o.index]
+		o.index++
+		switch tagcode {
+		case p.mkeyprop.tagcode[0]:
+			if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
+				return err
+			}
+		case p.mvalprop.tagcode[0]:
+			if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
+				return err
+			}
+		default:
+			// TODO: Should we silently skip this instead?
+			return fmt.Errorf("proto: bad map data tag %d", raw[0])
+		}
+	}
+	keyelem, valelem := keyptr.Elem(), valptr.Elem()
+	if !keyelem.IsValid() {
+		keyelem = reflect.Zero(p.mtype.Key())
+	}
+	if !valelem.IsValid() {
+		valelem = reflect.Zero(p.mtype.Elem())
+	}
+
+	v.SetMapIndex(keyelem, valelem)
+	return nil
+}
+
+// Decode a group.
+func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
+	bas := structPointer_GetStructPointer(base, p.field)
+	if structPointer_IsNil(bas) {
+		// allocate new nested message
+		bas = toStructPointer(reflect.New(p.stype))
+		structPointer_SetStructPointer(base, p.field, bas)
+	}
+	return o.unmarshalType(p.stype, p.sprop, true, bas)
+}
+
+// Decode an embedded message.
+func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
+	raw, e := o.DecodeRawBytes(false)
+	if e != nil {
+		return e
+	}
+
+	bas := structPointer_GetStructPointer(base, p.field)
+	if structPointer_IsNil(bas) {
+		// allocate new nested message
+		bas = toStructPointer(reflect.New(p.stype))
+		structPointer_SetStructPointer(base, p.field, bas)
+	}
+
+	// If the object can unmarshal itself, let it.
+	if p.isUnmarshaler {
+		iv := structPointer_Interface(bas, p.stype)
+		return iv.(Unmarshaler).Unmarshal(raw)
+	}
+
+	obuf := o.buf
+	oi := o.index
+	o.buf = raw
+	o.index = 0
+
+	err = o.unmarshalType(p.stype, p.sprop, false, bas)
+	o.buf = obuf
+	o.index = oi
+
+	return err
+}
+
+// Decode a slice of embedded messages.
+func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
+	return o.dec_slice_struct(p, false, base)
+}
+
+// Decode a slice of embedded groups.
+func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
+	return o.dec_slice_struct(p, true, base)
+}
+
+// Decode a slice of structs ([]*struct).
+func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
+	v := reflect.New(p.stype)
+	bas := toStructPointer(v)
+	structPointer_StructPointerSlice(base, p.field).Append(bas)
+
+	if is_group {
+		err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
+		return err
+	}
+
+	raw, err := o.DecodeRawBytes(false)
+	if err != nil {
+		return err
+	}
+
+	// If the object can unmarshal itself, let it.
+	if p.isUnmarshaler {
+		iv := v.Interface()
+		return iv.(Unmarshaler).Unmarshal(raw)
+	}
+
+	obuf := o.buf
+	oi := o.index
+	o.buf = raw
+	o.index = 0
+
+	err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
+
+	o.buf = obuf
+	o.index = oi
+
+	return err
+}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
new file mode 100644
index 0000000..68b9b30
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -0,0 +1,1355 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"sort"
+)
+
+// RequiredNotSetError is the error returned if Marshal is called with
+// a protocol buffer struct whose required fields have not
+// all been initialized. It is also the error returned if Unmarshal is
+// called with an encoded protocol buffer that does not include all the
+// required fields.
+//
+// When printed, RequiredNotSetError reports the first unset required field in a
+// message. If the field cannot be precisely determined, it is reported as
+// "{Unknown}".
+type RequiredNotSetError struct {
+	field string
+}
+
+func (e *RequiredNotSetError) Error() string {
+	return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+
+var (
+	// errRepeatedHasNil is the error returned if Marshal is called with
+	// a struct with a repeated field containing a nil element.
+	errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+	// errOneofHasNil is the error returned if Marshal is called with
+	// a struct with a oneof field containing a nil element.
+	errOneofHasNil = errors.New("proto: oneof field has nil value")
+
+	// ErrNil is the error returned if Marshal is called with nil.
+	ErrNil = errors.New("proto: Marshal called with nil")
+
+	// ErrTooLarge is the error returned if Marshal is called with a
+	// message that encodes to >2GB.
+	ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// maxMarshalSize is the largest allowed size of an encoded protobuf,
+// since C++ and Java use signed int32s for the size.
+const maxMarshalSize = 1<<31 - 1
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+	var buf [maxVarintBytes]byte
+	var n int
+	for n = 0; x > 127; n++ {
+		buf[n] = 0x80 | uint8(x&0x7F)
+		x >>= 7
+	}
+	buf[n] = uint8(x)
+	n++
+	return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+	for x >= 1<<7 {
+		p.buf = append(p.buf, uint8(x&0x7f|0x80))
+		x >>= 7
+	}
+	p.buf = append(p.buf, uint8(x))
+	return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+	return sizeVarint(x)
+}
+
+func sizeVarint(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+	p.buf = append(p.buf,
+		uint8(x),
+		uint8(x>>8),
+		uint8(x>>16),
+		uint8(x>>24),
+		uint8(x>>32),
+		uint8(x>>40),
+		uint8(x>>48),
+		uint8(x>>56))
+	return nil
+}
+
+func sizeFixed64(x uint64) int {
+	return 8
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+	p.buf = append(p.buf,
+		uint8(x),
+		uint8(x>>8),
+		uint8(x>>16),
+		uint8(x>>24))
+	return nil
+}
+
+func sizeFixed32(x uint64) int {
+	return 4
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+	// use signed number to get arithmetic right shift.
+	return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+func sizeZigzag64(x uint64) int {
+	return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+	// use signed number to get arithmetic right shift.
+	return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+func sizeZigzag32(x uint64) int {
+	return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+	p.EncodeVarint(uint64(len(b)))
+	p.buf = append(p.buf, b...)
+	return nil
+}
+
+func sizeRawBytes(b []byte) int {
+	return sizeVarint(uint64(len(b))) +
+		len(b)
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+	p.EncodeVarint(uint64(len(s)))
+	p.buf = append(p.buf, s...)
+	return nil
+}
+
+func sizeStringBytes(s string) int {
+	return sizeVarint(uint64(len(s))) +
+		len(s)
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+	Marshal() ([]byte, error)
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, returning the data.
+func Marshal(pb Message) ([]byte, error) {
+	// Can the object marshal itself?
+	if m, ok := pb.(Marshaler); ok {
+		return m.Marshal()
+	}
+	p := NewBuffer(nil)
+	err := p.Marshal(pb)
+	if p.buf == nil && err == nil {
+		// Return a non-nil slice on success.
+		return []byte{}, nil
+	}
+	return p.buf, err
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+	t, base, err := getbase(pb)
+	if structPointer_IsNil(base) {
+		return ErrNil
+	}
+	if err == nil {
+		var state errorState
+		err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
+	}
+	return err
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+func (p *Buffer) Marshal(pb Message) error {
+	// Can the object marshal itself?
+	if m, ok := pb.(Marshaler); ok {
+		data, err := m.Marshal()
+		p.buf = append(p.buf, data...)
+		return err
+	}
+
+	t, base, err := getbase(pb)
+	if structPointer_IsNil(base) {
+		return ErrNil
+	}
+	if err == nil {
+		err = p.enc_struct(GetProperties(t.Elem()), base)
+	}
+
+	if collectStats {
+		(stats).Encode++ // Parens are to work around a goimports bug.
+	}
+
+	if len(p.buf) > maxMarshalSize {
+		return ErrTooLarge
+	}
+	return err
+}
+
+// Size returns the encoded size of a protocol buffer.
+func Size(pb Message) (n int) {
+	// Can the object marshal itself?  If so, Size is slow.
+	// TODO: add Size to Marshaler, or add a Sizer interface.
+	if m, ok := pb.(Marshaler); ok {
+		b, _ := m.Marshal()
+		return len(b)
+	}
+
+	t, base, err := getbase(pb)
+	if structPointer_IsNil(base) {
+		return 0
+	}
+	if err == nil {
+		n = size_struct(GetProperties(t.Elem()), base)
+	}
+
+	if collectStats {
+		(stats).Size++ // Parens are to work around a goimports bug.
+	}
+
+	return
+}
+
+// Individual type encoders.
+
+// Encode a bool.
+func (o *Buffer) enc_bool(p *Properties, base structPointer) error {
+	v := *structPointer_Bool(base, p.field)
+	if v == nil {
+		return ErrNil
+	}
+	x := 0
+	if *v {
+		x = 1
+	}
+	o.buf = append(o.buf, p.tagcode...)
+	p.valEnc(o, uint64(x))
+	return nil
+}
+
+func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {
+	v := *structPointer_BoolVal(base, p.field)
+	if !v {
+		return ErrNil
+	}
+	o.buf = append(o.buf, p.tagcode...)
+	p.valEnc(o, 1)
+	return nil
+}
+
+func size_bool(p *Properties, base structPointer) int {
+	v := *structPointer_Bool(base, p.field)
+	if v == nil {
+		return 0
+	}
+	return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+func size_proto3_bool(p *Properties, base structPointer) int {
+	v := *structPointer_BoolVal(base, p.field)
+	if !v && !p.oneof {
+		return 0
+	}
+	return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+// Encode an int32.
+func (o *Buffer) enc_int32(p *Properties, base structPointer) error {
+	v := structPointer_Word32(base, p.field)
+	if word32_IsNil(v) {
+		return ErrNil
+	}
+	x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+	o.buf = append(o.buf, p.tagcode...)
+	p.valEnc(o, uint64(x))
+	return nil
+}
+
+func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {
+	v := structPointer_Word32Val(base, p.field)
+	x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+	if x == 0 {
+		return ErrNil
+	}
+	o.buf = append(o.buf, p.tagcode...)
+	p.valEnc(o, uint64(x))
+	return nil
+}
+
+func size_int32(p *Properties, base structPointer) (n int) {
+	v := structPointer_Word32(base, p.field)
+	if word32_IsNil(v) {
+		return 0
+	}
+	x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+	n += len(p.tagcode)
+	n += p.valSize(uint64(x))
+	return
+}
+
+func size_proto3_int32(p *Properties, base structPointer) (n int) {
+	v := structPointer_Word32Val(base, p.field)
+	x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+	if x == 0 && !p.oneof {
+		return 0
+	}
+	n += len(p.tagcode)
+	n += p.valSize(uint64(x))
+	return
+}
+
+// Encode a uint32.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_uint32(p *Properties, base structPointer) error {
+	v := structPointer_Word32(base, p.field)
+	if word32_IsNil(v) {
+		return ErrNil
+	}
+	x := word32_Get(v)
+	o.buf = append(o.buf, p.tagcode...)
+	p.valEnc(o, uint64(x))
+	return nil
+}
+
+func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {
+	v := structPointer_Word32Val(base, p.field)
+	x := word32Val_Get(v)
+	if x == 0 {
+		return ErrNil
+	}
+	o.buf = append(o.buf, p.tagcode...)
+	p.valEnc(o, uint64(x))
+	return nil
+}
+
+func size_uint32(p *Properties, base structPointer) (n int) {
+	v := structPointer_Word32(base, p.field)
+	if word32_IsNil(v) {
+		return 0
+	}
+	x := word32_Get(v)
+	n += len(p.tagcode)
+	n += p.valSize(uint64(x))
+	return
+}
+
+func size_proto3_uint32(p *Properties, base structPointer) (n int) {
+	v := structPointer_Word32Val(base, p.field)
+	x := word32Val_Get(v)
+	if x == 0 && !p.oneof {
+		return 0
+	}
+	n += len(p.tagcode)
+	n += p.valSize(uint64(x))
+	return
+}
+
+// Encode an int64.
+func (o *Buffer) enc_int64(p *Properties, base structPointer) error {
+	v := structPointer_Word64(base, p.field)
+	if word64_IsNil(v) {
+		return ErrNil
+	}
+	x := word64_Get(v)
+	o.buf = append(o.buf, p.tagcode...)
+	p.valEnc(o, x)
+	return nil
+}
+
+func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {
+	v := structPointer_Word64Val(base, p.field)
+	x := word64Val_Get(v)
+	if x == 0 {
+		return ErrNil
+	}
+	o.buf = append(o.buf, p.tagcode...)
+	p.valEnc(o, x)
+	return nil
+}
+
+func size_int64(p *Properties, base structPointer) (n int) {
+	v := structPointer_Word64(base, p.field)
+	if word64_IsNil(v) {
+		return 0
+	}
+	x := word64_Get(v)
+	n += len(p.tagcode)
+	n += p.valSize(x)
+	return
+}
+
+func size_proto3_int64(p *Properties, base structPointer) (n int) {
+	v := structPointer_Word64Val(base, p.field)
+	x := word64Val_Get(v)
+	if x == 0 && !p.oneof {
+		return 0
+	}
+	n += len(p.tagcode)
+	n += p.valSize(x)
+	return
+}
+
+// Encode a string.
+func (o *Buffer) enc_string(p *Properties, base structPointer) error {
+	v := *structPointer_String(base, p.field)
+	if v == nil {
+		return ErrNil
+	}
+	x := *v
+	o.buf = append(o.buf, p.tagcode...)
+	o.EncodeStringBytes(x)
+	return nil
+}
+
+func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {
+	v := *structPointer_StringVal(base, p.field)
+	if v == "" {
+		return ErrNil
+	}
+	o.buf = append(o.buf, p.tagcode...)
+	o.EncodeStringBytes(v)
+	return nil
+}
+
+func size_string(p *Properties, base structPointer) (n int) {
+	v := *structPointer_String(base, p.field)
+	if v == nil {
+		return 0
+	}
+	x := *v
+	n += len(p.tagcode)
+	n += sizeStringBytes(x)
+	return
+}
+
+func size_proto3_string(p *Properties, base structPointer) (n int) {
+	v := *structPointer_StringVal(base, p.field)
+	if v == "" && !p.oneof {
+		return 0
+	}
+	n += len(p.tagcode)
+	n += sizeStringBytes(v)
+	return
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+		return v.IsNil()
+	}
+	return false
+}
+
+// Encode a message struct.
+func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
+	var state errorState
+	structp := structPointer_GetStructPointer(base, p.field)
+	if structPointer_IsNil(structp) {
+		return ErrNil
+	}
+
+	// Can the object marshal itself?
+	if p.isMarshaler {
+		m := structPointer_Interface(structp, p.stype).(Marshaler)
+		data, err := m.Marshal()
+		if err != nil && !state.shouldContinue(err, nil) {
+			return err
+		}
+		o.buf = append(o.buf, p.tagcode...)
+		o.EncodeRawBytes(data)
+		return state.err
+	}
+
+	o.buf = append(o.buf, p.tagcode...)
+	return o.enc_len_struct(p.sprop, structp, &state)
+}
+
+func size_struct_message(p *Properties, base structPointer) int {
+	structp := structPointer_GetStructPointer(base, p.field)
+	if structPointer_IsNil(structp) {
+		return 0
+	}
+
+	// Can the object marshal itself?
+	if p.isMarshaler {
+		m := structPointer_Interface(structp, p.stype).(Marshaler)
+		data, _ := m.Marshal()
+		n0 := len(p.tagcode)
+		n1 := sizeRawBytes(data)
+		return n0 + n1
+	}
+
+	n0 := len(p.tagcode)
+	n1 := size_struct(p.sprop, structp)
+	n2 := sizeVarint(uint64(n1)) // size of encoded length
+	return n0 + n1 + n2
+}
+
+// Encode a group struct.
+func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {
+	var state errorState
+	b := structPointer_GetStructPointer(base, p.field)
+	if structPointer_IsNil(b) {
+		return ErrNil
+	}
+
+	o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+	err := o.enc_struct(p.sprop, b)
+	if err != nil && !state.shouldContinue(err, nil) {
+		return err
+	}
+	o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+	return state.err
+}
+
+func size_struct_group(p *Properties, base structPointer) (n int) {
+	b := structPointer_GetStructPointer(base, p.field)
+	if structPointer_IsNil(b) {
+		return 0
+	}
+
+	n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))
+	n += size_struct(p.sprop, b)
+	n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))
+	return
+}
+
+// Encode a slice of bools ([]bool).
+func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {
+	s := *structPointer_BoolSlice(base, p.field)
+	l := len(s)
+	if l == 0 {
+		return ErrNil
+	}
+	for _, x := range s {
+		o.buf = append(o.buf, p.tagcode...)
+		v := uint64(0)
+		if x {
+			v = 1
+		}
+		p.valEnc(o, v)
+	}
+	return nil
+}
+
+func size_slice_bool(p *Properties, base structPointer) int {
+	s := *structPointer_BoolSlice(base, p.field)
+	l := len(s)
+	if l == 0 {
+		return 0
+	}
+	return l * (len(p.tagcode) + 1) // each bool takes exactly one byte
+}
+
+// Encode a slice of bools ([]bool) in packed format.
+func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {
+	s := *structPointer_BoolSlice(base, p.field)
+	l := len(s)
+	if l == 0 {
+		return ErrNil
+	}
+	o.buf = append(o.buf, p.tagcode...)
+	o.EncodeVarint(uint64(l)) // each bool takes exactly one byte
+	for _, x := range s {
+		v := uint64(0)
+		if x {
+			v = 1
+		}
+		p.valEnc(o, v)
+	}
+	return nil
+}
+
+func size_slice_packed_bool(p *Properties, base structPointer) (n int) {
+	s := *structPointer_BoolSlice(base, p.field)
+	l := len(s)
+	if l == 0 {
+		return 0
+	}
+	n += len(p.tagcode)
+	n += sizeVarint(uint64(l))
+	n += l // each bool takes exactly one byte
+	return
+}
+
+// Encode a slice of bytes ([]byte).
+func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {
+	s := *structPointer_Bytes(base, p.field)
+	if s == nil {
+		return ErrNil
+	}
+	o.buf = append(o.buf, p.tagcode...)
+	o.EncodeRawBytes(s)
+	return nil
+}
+
+func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {
+	s := *structPointer_Bytes(base, p.field)
+	if len(s) == 0 {
+		return ErrNil
+	}
+	o.buf = append(o.buf, p.tagcode...)
+	o.EncodeRawBytes(s)
+	return nil
+}
+
+func size_slice_byte(p *Properties, base structPointer) (n int) {
+	s := *structPointer_Bytes(base, p.field)
+	if s == nil && !p.oneof {
+		return 0
+	}
+	n += len(p.tagcode)
+	n += sizeRawBytes(s)
+	return
+}
+
+func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
+	s := *structPointer_Bytes(base, p.field)
+	if len(s) == 0 && !p.oneof {
+		return 0
+	}
+	n += len(p.tagcode)
+	n += sizeRawBytes(s)
+	return
+}
+
+// Encode a slice of int32s ([]int32).
+func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {
+	s := structPointer_Word32Slice(base, p.field)
+	l := s.Len()
+	if l == 0 {
+		return ErrNil
+	}
+	for i := 0; i < l; i++ {
+		o.buf = append(o.buf, p.tagcode...)
+		x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+		p.valEnc(o, uint64(x))
+	}
+	return nil
+}
+
+func size_slice_int32(p *Properties, base structPointer) (n int) {
+	s := structPointer_Word32Slice(base, p.field)
+	l := s.Len()
+	if l == 0 {
+		return 0
+	}
+	for i := 0; i < l; i++ {
+		n += len(p.tagcode)
+		x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+		n += p.valSize(uint64(x))
+	}
+	return
+}
+
+// Encode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {
+	s := structPointer_Word32Slice(base, p.field)
+	l := s.Len()
+	if l == 0 {
+		return ErrNil
+	}
+	// TODO: Reuse a Buffer.
+	buf := NewBuffer(nil)
+	for i := 0; i < l; i++ {
+		x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+		p.valEnc(buf, uint64(x))
+	}
+
+	o.buf = append(o.buf, p.tagcode...)
+	o.EncodeVarint(uint64(len(buf.buf)))
+	o.buf = append(o.buf, buf.buf...)
+	return nil
+}
+
+func size_slice_packed_int32(p *Properties, base structPointer) (n int) {
+	s := structPointer_Word32Slice(base, p.field)
+	l := s.Len()
+	if l == 0 {
+		return 0
+	}
+	var bufSize int
+	for i := 0; i < l; i++ {
+		x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+		bufSize += p.valSize(uint64(x))
+	}
+
+	n += len(p.tagcode)
+	n += sizeVarint(uint64(bufSize))
+	n += bufSize
+	return
+}
+
+// Encode a slice of uint32s ([]uint32).
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {
+	s := structPointer_Word32Slice(base, p.field)
+	l := s.Len()
+	if l == 0 {
+		return ErrNil
+	}
+	for i := 0; i < l; i++ {
+		o.buf = append(o.buf, p.tagcode...)
+		x := s.Index(i)
+		p.valEnc(o, uint64(x))
+	}
+	return nil
+}
+
+func size_slice_uint32(p *Properties, base structPointer) (n int) {
+	s := structPointer_Word32Slice(base, p.field)
+	l := s.Len()
+	if l == 0 {
+		return 0
+	}
+	for i := 0; i < l; i++ {
+		n += len(p.tagcode)
+		x := s.Index(i)
+		n += p.valSize(uint64(x))
+	}
+	return
+}
+
+// Encode a slice of uint32s ([]uint32) in packed format.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {
+	s := structPointer_Word32Slice(base, p.field)
+	l := s.Len()
+	if l == 0 {
+		return ErrNil
+	}
+	// TODO: Reuse a Buffer.
+	buf := NewBuffer(nil)
+	for i := 0; i < l; i++ {
+		p.valEnc(buf, uint64(s.Index(i)))
+	}
+
+	o.buf = append(o.buf, p.tagcode...)
+	o.EncodeVarint(uint64(len(buf.buf)))
+	o.buf = append(o.buf, buf.buf...)
+	return nil
+}
+
+func size_slice_packed_uint32(p *Properties, base structPointer) (n int) {
+	s := structPointer_Word32Slice(base, p.field)
+	l := s.Len()
+	if l == 0 {
+		return 0
+	}
+	var bufSize int
+	for i := 0; i < l; i++ {
+		bufSize += p.valSize(uint64(s.Index(i)))
+	}
+
+	n += len(p.tagcode)
+	n += sizeVarint(uint64(bufSize))
+	n += bufSize
+	return
+}
+
+// Encode a slice of int64s ([]int64).
+func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {
+	s := structPointer_Word64Slice(base, p.field)
+	l := s.Len()
+	if l == 0 {
+		return ErrNil
+	}
+	for i := 0; i < l; i++ {
+		o.buf = append(o.buf, p.tagcode...)
+		p.valEnc(o, s.Index(i))
+	}
+	return nil
+}
+
+func size_slice_int64(p *Properties, base structPointer) (n int) {
+	s := structPointer_Word64Slice(base, p.field)
+	l := s.Len()
+	if l == 0 {
+		return 0
+	}
+	for i := 0; i < l; i++ {
+		n += len(p.tagcode)
+		n += p.valSize(s.Index(i))
+	}
+	return
+}
+
+// Encode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {
+	s := structPointer_Word64Slice(base, p.field)
+	l := s.Len()
+	if l == 0 {
+		return ErrNil
+	}
+	// TODO: Reuse a Buffer.
+	buf := NewBuffer(nil)
+	for i := 0; i < l; i++ {
+		p.valEnc(buf, s.Index(i))
+	}
+
+	o.buf = append(o.buf, p.tagcode...)
+	o.EncodeVarint(uint64(len(buf.buf)))
+	o.buf = append(o.buf, buf.buf...)
+	return nil
+}
+
+func size_slice_packed_int64(p *Properties, base structPointer) (n int) {
+	s := structPointer_Word64Slice(base, p.field)
+	l := s.Len()
+	if l == 0 {
+		return 0
+	}
+	var bufSize int
+	for i := 0; i < l; i++ {
+		bufSize += p.valSize(s.Index(i))
+	}
+
+	n += len(p.tagcode)
+	n += sizeVarint(uint64(bufSize))
+	n += bufSize
+	return
+}
+
+// Encode a slice of slice of bytes ([][]byte).
+func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {
+	ss := *structPointer_BytesSlice(base, p.field)
+	l := len(ss)
+	if l == 0 {
+		return ErrNil
+	}
+	for i := 0; i < l; i++ {
+		o.buf = append(o.buf, p.tagcode...)
+		o.EncodeRawBytes(ss[i])
+	}
+	return nil
+}
+
+func size_slice_slice_byte(p *Properties, base structPointer) (n int) {
+	ss := *structPointer_BytesSlice(base, p.field)
+	l := len(ss)
+	if l == 0 {
+		return 0
+	}
+	n += l * len(p.tagcode)
+	for i := 0; i < l; i++ {
+		n += sizeRawBytes(ss[i])
+	}
+	return
+}
+
+// Encode a slice of strings ([]string).
+func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {
+	ss := *structPointer_StringSlice(base, p.field)
+	l := len(ss)
+	for i := 0; i < l; i++ {
+		o.buf = append(o.buf, p.tagcode...)
+		o.EncodeStringBytes(ss[i])
+	}
+	return nil
+}
+
+func size_slice_string(p *Properties, base structPointer) (n int) {
+	ss := *structPointer_StringSlice(base, p.field)
+	l := len(ss)
+	n += l * len(p.tagcode)
+	for i := 0; i < l; i++ {
+		n += sizeStringBytes(ss[i])
+	}
+	return
+}
+
+// Encode a slice of message structs ([]*struct).
+func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {
+	var state errorState
+	s := structPointer_StructPointerSlice(base, p.field)
+	l := s.Len()
+
+	for i := 0; i < l; i++ {
+		structp := s.Index(i)
+		if structPointer_IsNil(structp) {
+			return errRepeatedHasNil
+		}
+
+		// Can the object marshal itself?
+		if p.isMarshaler {
+			m := structPointer_Interface(structp, p.stype).(Marshaler)
+			data, err := m.Marshal()
+			if err != nil && !state.shouldContinue(err, nil) {
+				return err
+			}
+			o.buf = append(o.buf, p.tagcode...)
+			o.EncodeRawBytes(data)
+			continue
+		}
+
+		o.buf = append(o.buf, p.tagcode...)
+		err := o.enc_len_struct(p.sprop, structp, &state)
+		if err != nil && !state.shouldContinue(err, nil) {
+			if err == ErrNil {
+				return errRepeatedHasNil
+			}
+			return err
+		}
+	}
+	return state.err
+}
+
+func size_slice_struct_message(p *Properties, base structPointer) (n int) {
+	s := structPointer_StructPointerSlice(base, p.field)
+	l := s.Len()
+	n += l * len(p.tagcode)
+	for i := 0; i < l; i++ {
+		structp := s.Index(i)
+		if structPointer_IsNil(structp) {
+			return // return the size up to this point
+		}
+
+		// Can the object marshal itself?
+		if p.isMarshaler {
+			m := structPointer_Interface(structp, p.stype).(Marshaler)
+			data, _ := m.Marshal()
+			n += sizeRawBytes(data)
+			continue
+		}
+
+		n0 := size_struct(p.sprop, structp)
+		n1 := sizeVarint(uint64(n0)) // size of encoded length
+		n += n0 + n1
+	}
+	return
+}
+
+// Encode a slice of group structs ([]*struct).
+func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {
+	var state errorState
+	s := structPointer_StructPointerSlice(base, p.field)
+	l := s.Len()
+
+	for i := 0; i < l; i++ {
+		b := s.Index(i)
+		if structPointer_IsNil(b) {
+			return errRepeatedHasNil
+		}
+
+		o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+
+		err := o.enc_struct(p.sprop, b)
+
+		if err != nil && !state.shouldContinue(err, nil) {
+			if err == ErrNil {
+				return errRepeatedHasNil
+			}
+			return err
+		}
+
+		o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+	}
+	return state.err
+}
+
+func size_slice_struct_group(p *Properties, base structPointer) (n int) {
+	s := structPointer_StructPointerSlice(base, p.field)
+	l := s.Len()
+
+	n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))
+	n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))
+	for i := 0; i < l; i++ {
+		b := s.Index(i)
+		if structPointer_IsNil(b) {
+			return // return size up to this point
+		}
+
+		n += size_struct(p.sprop, b)
+	}
+	return
+}
+
+// Encode an extension map.
+func (o *Buffer) enc_map(p *Properties, base structPointer) error {
+	exts := structPointer_ExtMap(base, p.field)
+	if err := encodeExtensionsMap(*exts); err != nil {
+		return err
+	}
+
+	return o.enc_map_body(*exts)
+}
+
+func (o *Buffer) enc_exts(p *Properties, base structPointer) error {
+	exts := structPointer_Extensions(base, p.field)
+	if err := encodeExtensions(exts); err != nil {
+		return err
+	}
+	v, _ := exts.extensionsRead()
+
+	return o.enc_map_body(v)
+}
+
+func (o *Buffer) enc_map_body(v map[int32]Extension) error {
+	// Fast-path for common cases: zero or one extensions.
+	if len(v) <= 1 {
+		for _, e := range v {
+			o.buf = append(o.buf, e.enc...)
+		}
+		return nil
+	}
+
+	// Sort keys to provide a deterministic encoding.
+	keys := make([]int, 0, len(v))
+	for k := range v {
+		keys = append(keys, int(k))
+	}
+	sort.Ints(keys)
+
+	for _, k := range keys {
+		o.buf = append(o.buf, v[int32(k)].enc...)
+	}
+	return nil
+}
+
+func size_map(p *Properties, base structPointer) int {
+	v := structPointer_ExtMap(base, p.field)
+	return extensionsMapSize(*v)
+}
+
+func size_exts(p *Properties, base structPointer) int {
+	v := structPointer_Extensions(base, p.field)
+	return extensionsSize(v)
+}
+
+// Encode a map field.
+func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
+	var state errorState // XXX: or do we need to plumb this through?
+
+	/*
+		A map defined as
+			map<key_type, value_type> map_field = N;
+		is encoded in the same way as
+			message MapFieldEntry {
+				key_type key = 1;
+				value_type value = 2;
+			}
+			repeated MapFieldEntry map_field = N;
+	*/
+
+	v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+	if v.Len() == 0 {
+		return nil
+	}
+
+	keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+	enc := func() error {
+		if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
+			return err
+		}
+		if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil {
+			return err
+		}
+		return nil
+	}
+
+	// Don't sort map keys. It is not required by the spec, and C++ doesn't do it.
+	for _, key := range v.MapKeys() {
+		val := v.MapIndex(key)
+
+		keycopy.Set(key)
+		valcopy.Set(val)
+
+		o.buf = append(o.buf, p.tagcode...)
+		if err := o.enc_len_thing(enc, &state); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func size_new_map(p *Properties, base structPointer) int {
+	v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+
+	keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+	n := 0
+	for _, key := range v.MapKeys() {
+		val := v.MapIndex(key)
+		keycopy.Set(key)
+		valcopy.Set(val)
+
+		// Tag codes for key and val are the responsibility of the sub-sizer.
+		keysize := p.mkeyprop.size(p.mkeyprop, keybase)
+		valsize := p.mvalprop.size(p.mvalprop, valbase)
+		entry := keysize + valsize
+		// Add on tag code and length of map entry itself.
+		n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
+	}
+	return n
+}
+
+// mapEncodeScratch returns a new reflect.Value matching the map's value type,
+// and a structPointer suitable for passing to an encoder or sizer.
+func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {
+	// Prepare addressable doubly-indirect placeholders for the key and value types.
+	// This is needed because the element-type encoders expect **T, but the map iteration produces T.
+
+	keycopy = reflect.New(mapType.Key()).Elem()                 // addressable K
+	keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K
+	keyptr.Set(keycopy.Addr())                                  //
+	keybase = toStructPointer(keyptr.Addr())                    // **K
+
+	// Value types are more varied and require special handling.
+	switch mapType.Elem().Kind() {
+	case reflect.Slice:
+		// []byte
+		var dummy []byte
+		valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte
+		valbase = toStructPointer(valcopy.Addr())
+	case reflect.Ptr:
+		// message; the generated field type is map[K]*Msg (so V is *Msg),
+		// so we only need one level of indirection.
+		valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+		valbase = toStructPointer(valcopy.Addr())
+	default:
+		// everything else
+		valcopy = reflect.New(mapType.Elem()).Elem()                // addressable V
+		valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V
+		valptr.Set(valcopy.Addr())                                  //
+		valbase = toStructPointer(valptr.Addr())                    // **V
+	}
+	return
+}
+
+// Encode a struct.
+func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
+	var state errorState
+	// Encode fields in tag order so that decoders may use optimizations
+	// that depend on the ordering.
+	// https://developers.google.com/protocol-buffers/docs/encoding#order
+	for _, i := range prop.order {
+		p := prop.Prop[i]
+		if p.enc != nil {
+			err := p.enc(o, p, base)
+			if err != nil {
+				if err == ErrNil {
+					if p.Required && state.err == nil {
+						state.err = &RequiredNotSetError{p.Name}
+					}
+				} else if err == errRepeatedHasNil {
+					// Give more context to nil values in repeated fields.
+					return errors.New("repeated field " + p.OrigName + " has nil element")
+				} else if !state.shouldContinue(err, p) {
+					return err
+				}
+			}
+			if len(o.buf) > maxMarshalSize {
+				return ErrTooLarge
+			}
+		}
+	}
+
+	// Do oneof fields.
+	if prop.oneofMarshaler != nil {
+		m := structPointer_Interface(base, prop.stype).(Message)
+		if err := prop.oneofMarshaler(m, o); err == ErrNil {
+			return errOneofHasNil
+		} else if err != nil {
+			return err
+		}
+	}
+
+	// Add unrecognized fields at the end.
+	if prop.unrecField.IsValid() {
+		v := *structPointer_Bytes(base, prop.unrecField)
+		if len(o.buf)+len(v) > maxMarshalSize {
+			return ErrTooLarge
+		}
+		if len(v) > 0 {
+			o.buf = append(o.buf, v...)
+		}
+	}
+
+	return state.err
+}
+
+func size_struct(prop *StructProperties, base structPointer) (n int) {
+	for _, i := range prop.order {
+		p := prop.Prop[i]
+		if p.size != nil {
+			n += p.size(p, base)
+		}
+	}
+
+	// Add unrecognized fields at the end.
+	if prop.unrecField.IsValid() {
+		v := *structPointer_Bytes(base, prop.unrecField)
+		n += len(v)
+	}
+
+	// Factor in any oneof fields.
+	if prop.oneofSizer != nil {
+		m := structPointer_Interface(base, prop.stype).(Message)
+		n += prop.oneofSizer(m)
+	}
+
+	return
+}
+
+var zeroes [20]byte // longer than any conceivable sizeVarint
+
+// Encode a struct, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {
+	return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)
+}
+
+// Encode something, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {
+	iLen := len(o.buf)
+	o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
+	iMsg := len(o.buf)
+	err := enc()
+	if err != nil && !state.shouldContinue(err, nil) {
+		return err
+	}
+	lMsg := len(o.buf) - iMsg
+	lLen := sizeVarint(uint64(lMsg))
+	switch x := lLen - (iMsg - iLen); {
+	case x > 0: // actual length is x bytes larger than the space we reserved
+		// Move msg x bytes right.
+		o.buf = append(o.buf, zeroes[:x]...)
+		copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+	case x < 0: // actual length is x bytes smaller than the space we reserved
+		// Move msg x bytes left.
+		copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+		o.buf = o.buf[:len(o.buf)+x] // x is negative
+	}
+	// Encode the length in the reserved space.
+	o.buf = o.buf[:iLen]
+	o.EncodeVarint(uint64(lMsg))
+	o.buf = o.buf[:len(o.buf)+lMsg]
+	return state.err
+}
+
+// errorState maintains the first error that occurs and updates that error
+// with additional context.
+type errorState struct {
+	err error
+}
+
+// shouldContinue reports whether encoding should continue upon encountering the
+// given error. If the error is RequiredNotSetError, shouldContinue returns true
+// and, if this is the first appearance of that error, remembers it for future
+// reporting.
+//
+// If prop is not nil, it may update any error with additional context about the
+// field with the error.
+func (s *errorState) shouldContinue(err error, prop *Properties) bool {
+	// Ignore unset required fields.
+	reqNotSet, ok := err.(*RequiredNotSetError)
+	if !ok {
+		return false
+	}
+	if s.err == nil {
+		if prop != nil {
+			err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field}
+		}
+		s.err = err
+	}
+	return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
new file mode 100644
index 0000000..2ed1cf5
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/equal.go
@@ -0,0 +1,300 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+	"bytes"
+	"log"
+	"reflect"
+	"strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+  - Two messages are equal iff they are the same type,
+    corresponding fields are equal, unknown field sets
+    are equal, and extensions sets are equal.
+  - Two set scalar fields are equal iff their values are equal.
+    If the fields are of a floating-point type, remember that
+    NaN != x for all x, including NaN. If the message is defined
+    in a proto3 .proto file, fields are not "set"; specifically,
+    zero length proto3 "bytes" fields are equal (nil == {}).
+  - Two repeated fields are equal iff their lengths are the same,
+    and their corresponding elements are equal. Note a "bytes" field,
+    although represented by []byte, is not a repeated field and the
+    rule for the scalar fields described above applies.
+  - Two unset fields are equal.
+  - Two unknown field sets are equal if their current
+    encoded state is equal.
+  - Two extension sets are equal iff they have corresponding
+    elements that are pairwise equal.
+  - Two map fields are equal iff their lengths are the same,
+    and they contain the same set of elements. Zero-length map
+    fields are equal.
+  - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+	if a == nil || b == nil {
+		return a == b
+	}
+	v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+	if v1.Type() != v2.Type() {
+		return false
+	}
+	if v1.Kind() == reflect.Ptr {
+		if v1.IsNil() {
+			return v2.IsNil()
+		}
+		if v2.IsNil() {
+			return false
+		}
+		v1, v2 = v1.Elem(), v2.Elem()
+	}
+	if v1.Kind() != reflect.Struct {
+		return false
+	}
+	return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+	sprop := GetProperties(v1.Type())
+	for i := 0; i < v1.NumField(); i++ {
+		f := v1.Type().Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		f1, f2 := v1.Field(i), v2.Field(i)
+		if f.Type.Kind() == reflect.Ptr {
+			if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+				// both unset
+				continue
+			} else if n1 != n2 {
+				// set/unset mismatch
+				return false
+			}
+			b1, ok := f1.Interface().(raw)
+			if ok {
+				b2 := f2.Interface().(raw)
+				// RawMessage
+				if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
+					return false
+				}
+				continue
+			}
+			f1, f2 = f1.Elem(), f2.Elem()
+		}
+		if !equalAny(f1, f2, sprop.Prop[i]) {
+			return false
+		}
+	}
+
+	if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
+		em2 := v2.FieldByName("XXX_InternalExtensions")
+		if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
+			return false
+		}
+	}
+
+	if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+		em2 := v2.FieldByName("XXX_extensions")
+		if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+			return false
+		}
+	}
+
+	uf := v1.FieldByName("XXX_unrecognized")
+	if !uf.IsValid() {
+		return true
+	}
+
+	u1 := uf.Bytes()
+	u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+	if !bytes.Equal(u1, u2) {
+		return false
+	}
+
+	return true
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+	if v1.Type() == protoMessageType {
+		m1, _ := v1.Interface().(Message)
+		m2, _ := v2.Interface().(Message)
+		return Equal(m1, m2)
+	}
+	switch v1.Kind() {
+	case reflect.Bool:
+		return v1.Bool() == v2.Bool()
+	case reflect.Float32, reflect.Float64:
+		return v1.Float() == v2.Float()
+	case reflect.Int32, reflect.Int64:
+		return v1.Int() == v2.Int()
+	case reflect.Interface:
+		// Probably a oneof field; compare the inner values.
+		n1, n2 := v1.IsNil(), v2.IsNil()
+		if n1 || n2 {
+			return n1 == n2
+		}
+		e1, e2 := v1.Elem(), v2.Elem()
+		if e1.Type() != e2.Type() {
+			return false
+		}
+		return equalAny(e1, e2, nil)
+	case reflect.Map:
+		if v1.Len() != v2.Len() {
+			return false
+		}
+		for _, key := range v1.MapKeys() {
+			val2 := v2.MapIndex(key)
+			if !val2.IsValid() {
+				// This key was not found in the second map.
+				return false
+			}
+			if !equalAny(v1.MapIndex(key), val2, nil) {
+				return false
+			}
+		}
+		return true
+	case reflect.Ptr:
+		// Maps may have nil values in them, so check for nil.
+		if v1.IsNil() && v2.IsNil() {
+			return true
+		}
+		if v1.IsNil() != v2.IsNil() {
+			return false
+		}
+		return equalAny(v1.Elem(), v2.Elem(), prop)
+	case reflect.Slice:
+		if v1.Type().Elem().Kind() == reflect.Uint8 {
+			// short circuit: []byte
+
+			// Edge case: if this is in a proto3 message, a zero length
+			// bytes field is considered the zero value.
+			if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+				return true
+			}
+			if v1.IsNil() != v2.IsNil() {
+				return false
+			}
+			return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+		}
+
+		if v1.Len() != v2.Len() {
+			return false
+		}
+		for i := 0; i < v1.Len(); i++ {
+			if !equalAny(v1.Index(i), v2.Index(i), prop) {
+				return false
+			}
+		}
+		return true
+	case reflect.String:
+		return v1.Interface().(string) == v2.Interface().(string)
+	case reflect.Struct:
+		return equalStruct(v1, v2)
+	case reflect.Uint32, reflect.Uint64:
+		return v1.Uint() == v2.Uint()
+	}
+
+	// unknown type, so not a protocol buffer
+	log.Printf("proto: don't know how to compare %v", v1)
+	return false
+}
+
+// base is the struct type that the extensions are based on.
+// x1 and x2 are InternalExtensions.
+func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
+	em1, _ := x1.extensionsRead()
+	em2, _ := x2.extensionsRead()
+	return equalExtMap(base, em1, em2)
+}
+
+func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
+	if len(em1) != len(em2) {
+		return false
+	}
+
+	for extNum, e1 := range em1 {
+		e2, ok := em2[extNum]
+		if !ok {
+			return false
+		}
+
+		m1, m2 := e1.value, e2.value
+
+		if m1 != nil && m2 != nil {
+			// Both are unencoded.
+			if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+				return false
+			}
+			continue
+		}
+
+		// At least one is encoded. To do a semantically correct comparison
+		// we need to unmarshal them first.
+		var desc *ExtensionDesc
+		if m := extensionMaps[base]; m != nil {
+			desc = m[extNum]
+		}
+		if desc == nil {
+			log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+			continue
+		}
+		var err error
+		if m1 == nil {
+			m1, err = decodeExtension(e1.enc, desc)
+		}
+		if m2 == nil && err == nil {
+			m2, err = decodeExtension(e2.enc, desc)
+		}
+		if err != nil {
+			// The encoded form is invalid.
+			log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+			return false
+		}
+		if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+			return false
+		}
+	}
+
+	return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 0000000..6b9b363
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,586 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+	"sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+	Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer generated by the current
+// proto compiler that may be extended.
+type extendableProto interface {
+	Message
+	ExtensionRangeArray() []ExtensionRange
+	extensionsWrite() map[int32]Extension
+	extensionsRead() (map[int32]Extension, sync.Locker)
+}
+
+// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
+// version of the proto compiler that may be extended.
+type extendableProtoV1 interface {
+	Message
+	ExtensionRangeArray() []ExtensionRange
+	ExtensionMap() map[int32]Extension
+}
+
+// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
+type extensionAdapter struct {
+	extendableProtoV1
+}
+
+func (e extensionAdapter) extensionsWrite() map[int32]Extension {
+	return e.ExtensionMap()
+}
+
+func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+	return e.ExtensionMap(), notLocker{}
+}
+
+// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
+type notLocker struct{}
+
+func (n notLocker) Lock()   {}
+func (n notLocker) Unlock() {}
+
+// extendable returns the extendableProto interface for the given generated proto message.
+// If the proto message has the old extension format, it returns a wrapper that implements
+// the extendableProto interface.
+func extendable(p interface{}) (extendableProto, bool) {
+	if ep, ok := p.(extendableProto); ok {
+		return ep, ok
+	}
+	if ep, ok := p.(extendableProtoV1); ok {
+		return extensionAdapter{ep}, ok
+	}
+	return nil, false
+}
+
+// XXX_InternalExtensions is an internal representation of proto extensions.
+//
+// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
+// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
+//
+// The methods of XXX_InternalExtensions are not concurrency safe in general,
+// but calls to logically read-only methods such as has and get may be executed concurrently.
+type XXX_InternalExtensions struct {
+	// The struct must be indirect so that if a user inadvertently copies a
+	// generated message and its embedded XXX_InternalExtensions, they
+	// avoid the mayhem of a copied mutex.
+	//
+	// The mutex serializes all logically read-only operations to p.extensionMap.
+	// It is up to the client to ensure that write operations to p.extensionMap are
+	// mutually exclusive with other accesses.
+	p *struct {
+		mu           sync.Mutex
+		extensionMap map[int32]Extension
+	}
+}
+
+// extensionsWrite returns the extension map, creating it on first use.
+func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
+	if e.p == nil {
+		e.p = new(struct {
+			mu           sync.Mutex
+			extensionMap map[int32]Extension
+		})
+		e.p.extensionMap = make(map[int32]Extension)
+	}
+	return e.p.extensionMap
+}
+
+// extensionsRead returns the extensions map for read-only use.  It may be nil.
+// The caller must hold the returned mutex's lock when accessing Elements within the map.
+func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
+	if e.p == nil {
+		return nil, nil
+	}
+	return e.p.extensionMap, &e.p.mu
+}
+
+var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
+var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem()
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+	ExtendedType  Message     // nil pointer to the type that is being extended
+	ExtensionType interface{} // nil pointer to the extension type
+	Field         int32       // field number
+	Name          string      // fully-qualified name of extension, for text formatting
+	Tag           string      // protobuf tag style
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+	t := reflect.TypeOf(ed.ExtensionType)
+	return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+	// When an extension is stored in a message using SetExtension
+	// only desc and value are set. When the message is marshaled
+	// enc will be set to the encoded form of the message.
+	//
+	// When a message is unmarshaled and contains extensions, each
+	// extension will have only enc set. When such an extension is
+	// accessed using GetExtension (or GetExtensions) desc and value
+	// will be set.
+	desc  *ExtensionDesc
+	value interface{}
+	enc   []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base Message, id int32, b []byte) {
+	epb, ok := extendable(base)
+	if !ok {
+		return
+	}
+	extmap := epb.extensionsWrite()
+	extmap[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+	for _, er := range pb.ExtensionRangeArray() {
+		if er.Start <= field && field <= er.End {
+			return true
+		}
+	}
+	return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+	var pbi interface{} = pb
+	// Check the extended type.
+	if ea, ok := pbi.(extensionAdapter); ok {
+		pbi = ea.extendableProtoV1
+	}
+	if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
+		return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
+	}
+	// Check the range.
+	if !isExtensionField(pb, extension.Field) {
+		return errors.New("proto: bad extension number; not in declared ranges")
+	}
+	return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+	base  reflect.Type
+	field int32
+}
+
+var extProp = struct {
+	sync.RWMutex
+	m map[extPropKey]*Properties
+}{
+	m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+	key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+	extProp.RLock()
+	if prop, ok := extProp.m[key]; ok {
+		extProp.RUnlock()
+		return prop
+	}
+	extProp.RUnlock()
+
+	extProp.Lock()
+	defer extProp.Unlock()
+	// Check again.
+	if prop, ok := extProp.m[key]; ok {
+		return prop
+	}
+
+	prop := new(Properties)
+	prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+	extProp.m[key] = prop
+	return prop
+}
+
+// encode encodes any unmarshaled (unencoded) extensions in e.
+func encodeExtensions(e *XXX_InternalExtensions) error {
+	m, mu := e.extensionsRead()
+	if m == nil {
+		return nil // fast path
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	return encodeExtensionsMap(m)
+}
+
+// encode encodes any unmarshaled (unencoded) extensions in e.
+func encodeExtensionsMap(m map[int32]Extension) error {
+	for k, e := range m {
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		et := reflect.TypeOf(e.desc.ExtensionType)
+		props := extensionProperties(e.desc)
+
+		p := NewBuffer(nil)
+		// If e.value has type T, the encoder expects a *struct{ X T }.
+		// Pass a *T with a zero field and hope it all works out.
+		x := reflect.New(et)
+		x.Elem().Set(reflect.ValueOf(e.value))
+		if err := props.enc(p, props, toStructPointer(x)); err != nil {
+			return err
+		}
+		e.enc = p.buf
+		m[k] = e
+	}
+	return nil
+}
+
+func extensionsSize(e *XXX_InternalExtensions) (n int) {
+	m, mu := e.extensionsRead()
+	if m == nil {
+		return 0
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	return extensionsMapSize(m)
+}
+
+func extensionsMapSize(m map[int32]Extension) (n int) {
+	for _, e := range m {
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			n += len(e.enc)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		et := reflect.TypeOf(e.desc.ExtensionType)
+		props := extensionProperties(e.desc)
+
+		// If e.value has type T, the encoder expects a *struct{ X T }.
+		// Pass a *T with a zero field and hope it all works out.
+		x := reflect.New(et)
+		x.Elem().Set(reflect.ValueOf(e.value))
+		n += props.size(props, toStructPointer(x))
+	}
+	return
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb Message, extension *ExtensionDesc) bool {
+	// TODO: Check types, field numbers, etc.?
+	epb, ok := extendable(pb)
+	if !ok {
+		return false
+	}
+	extmap, mu := epb.extensionsRead()
+	if extmap == nil {
+		return false
+	}
+	mu.Lock()
+	_, ok = extmap[extension.Field]
+	mu.Unlock()
+	return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb Message, extension *ExtensionDesc) {
+	epb, ok := extendable(pb)
+	if !ok {
+		return
+	}
+	// TODO: Check types, field numbers, etc.?
+	extmap := epb.extensionsWrite()
+	delete(extmap, extension.Field)
+}
+
+// GetExtension parses and returns the given extension of pb.
+// If the extension is not present and has no default value it returns ErrMissingExtension.
+func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
+	epb, ok := extendable(pb)
+	if !ok {
+		return nil, errors.New("proto: not an extendable proto")
+	}
+
+	if err := checkExtensionTypes(epb, extension); err != nil {
+		return nil, err
+	}
+
+	emap, mu := epb.extensionsRead()
+	if emap == nil {
+		return defaultExtensionValue(extension)
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	e, ok := emap[extension.Field]
+	if !ok {
+		// defaultExtensionValue returns the default value or
+		// ErrMissingExtension if there is no default.
+		return defaultExtensionValue(extension)
+	}
+
+	if e.value != nil {
+		// Already decoded. Check the descriptor, though.
+		if e.desc != extension {
+			// This shouldn't happen. If it does, it means that
+			// GetExtension was called twice with two different
+			// descriptors with the same field number.
+			return nil, errors.New("proto: descriptor conflict")
+		}
+		return e.value, nil
+	}
+
+	v, err := decodeExtension(e.enc, extension)
+	if err != nil {
+		return nil, err
+	}
+
+	// Remember the decoded version and drop the encoded version.
+	// That way it is safe to mutate what we return.
+	e.value = v
+	e.desc = extension
+	e.enc = nil
+	emap[extension.Field] = e
+	return e.value, nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+	t := reflect.TypeOf(extension.ExtensionType)
+	props := extensionProperties(extension)
+
+	sf, _, err := fieldDefault(t, props)
+	if err != nil {
+		return nil, err
+	}
+
+	if sf == nil || sf.value == nil {
+		// There is no default value.
+		return nil, ErrMissingExtension
+	}
+
+	if t.Kind() != reflect.Ptr {
+		// We do not need to return a Ptr, we can directly return sf.value.
+		return sf.value, nil
+	}
+
+	// We need to return an interface{} that is a pointer to sf.value.
+	value := reflect.New(t).Elem()
+	value.Set(reflect.New(value.Type().Elem()))
+	if sf.kind == reflect.Int32 {
+		// We may have an int32 or an enum, but the underlying data is int32.
+		// Since we can't set an int32 into a non int32 reflect.value directly
+		// set it as a int32.
+		value.Elem().SetInt(int64(sf.value.(int32)))
+	} else {
+		value.Elem().Set(reflect.ValueOf(sf.value))
+	}
+	return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+	o := NewBuffer(b)
+
+	t := reflect.TypeOf(extension.ExtensionType)
+
+	props := extensionProperties(extension)
+
+	// t is a pointer to a struct, pointer to basic type or a slice.
+	// Allocate a "field" to store the pointer/slice itself; the
+	// pointer/slice will be stored here. We pass
+	// the address of this field to props.dec.
+	// This passes a zero field and a *t and lets props.dec
+	// interpret it as a *struct{ x t }.
+	value := reflect.New(t).Elem()
+
+	for {
+		// Discard wire type and field number varint. It isn't needed.
+		if _, err := o.DecodeVarint(); err != nil {
+			return nil, err
+		}
+
+		if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
+			return nil, err
+		}
+
+		if o.index >= len(o.buf) {
+			break
+		}
+	}
+	return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+	epb, ok := extendable(pb)
+	if !ok {
+		return nil, errors.New("proto: not an extendable proto")
+	}
+	extensions = make([]interface{}, len(es))
+	for i, e := range es {
+		extensions[i], err = GetExtension(epb, e)
+		if err == ErrMissingExtension {
+			err = nil
+		}
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
+// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
+// just the Field field, which defines the extension's field number.
+func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
+	epb, ok := extendable(pb)
+	if !ok {
+		return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
+	}
+	registeredExtensions := RegisteredExtensions(pb)
+
+	emap, mu := epb.extensionsRead()
+	if emap == nil {
+		return nil, nil
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	extensions := make([]*ExtensionDesc, 0, len(emap))
+	for extid, e := range emap {
+		desc := e.desc
+		if desc == nil {
+			desc = registeredExtensions[extid]
+			if desc == nil {
+				desc = &ExtensionDesc{Field: extid}
+			}
+		}
+
+		extensions = append(extensions, desc)
+	}
+	return extensions, nil
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
+	epb, ok := extendable(pb)
+	if !ok {
+		return errors.New("proto: not an extendable proto")
+	}
+	if err := checkExtensionTypes(epb, extension); err != nil {
+		return err
+	}
+	typ := reflect.TypeOf(extension.ExtensionType)
+	if typ != reflect.TypeOf(value) {
+		return errors.New("proto: bad extension value type")
+	}
+	// nil extension values need to be caught early, because the
+	// encoder can't distinguish an ErrNil due to a nil extension
+	// from an ErrNil due to a missing field. Extensions are
+	// always optional, so the encoder would just swallow the error
+	// and drop all the extensions from the encoded message.
+	if reflect.ValueOf(value).IsNil() {
+		return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+	}
+
+	extmap := epb.extensionsWrite()
+	extmap[extension.Field] = Extension{desc: extension, value: value}
+	return nil
+}
+
+// ClearAllExtensions clears all extensions from pb.
+func ClearAllExtensions(pb Message) {
+	epb, ok := extendable(pb)
+	if !ok {
+		return
+	}
+	m := epb.extensionsWrite()
+	for k := range m {
+		delete(m, k)
+	}
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+	st := reflect.TypeOf(desc.ExtendedType).Elem()
+	m := extensionMaps[st]
+	if m == nil {
+		m = make(map[int32]*ExtensionDesc)
+		extensionMaps[st] = m
+	}
+	if _, ok := m[desc.Field]; ok {
+		panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+	}
+	m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+	return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 0000000..ac4ddbc
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,898 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers.  It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+  - Names are turned from camel_case to CamelCase for export.
+  - There are no methods on v to set fields; just treat
+	them as structure fields.
+  - There are getters that return a field's value if set,
+	and return the field's default value if unset.
+	The getters work even if the receiver is a nil message.
+  - The zero value for a struct is its correct initialization state.
+	All desired fields must be set before marshaling.
+  - A Reset() method will restore a protobuf struct to its zero state.
+  - Non-repeated fields are pointers to the values; nil means unset.
+	That is, optional or required field int32 f becomes F *int32.
+  - Repeated fields are slices.
+  - Helper functions are available to aid the setting of fields.
+	msg.Foo = proto.String("hello") // set field
+  - Constants are defined to hold the default values of all fields that
+	have them.  They have the form Default_StructName_FieldName.
+	Because the getter methods handle defaulted values,
+	direct use of these constants should be rare.
+  - Enums are given type names and maps from names to values.
+	Enum values are prefixed by the enclosing message's name, or by the
+	enum's type name if it is a top-level enum. Enum types have a String
+	method, and a Enum method to assist in message construction.
+  - Nested messages, groups and enums have type names prefixed with the name of
+	the surrounding message type.
+  - Extensions are given descriptor names that start with E_,
+	followed by an underscore-delimited list of the nested messages
+	that contain it (if any) followed by the CamelCased name of the
+	extension field itself.  HasExtension, ClearExtension, GetExtension
+	and SetExtension are functions for manipulating extensions.
+  - Oneof field sets are given a single field in their message,
+	with distinguished wrapper types for each possible field value.
+  - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+  - Non-repeated fields of non-message type are values instead of pointers.
+  - Getters are only generated for message and oneof fields.
+  - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+	package example;
+
+	enum FOO { X = 17; }
+
+	message Test {
+	  required string label = 1;
+	  optional int32 type = 2 [default=77];
+	  repeated int64 reps = 3;
+	  optional group OptionalGroup = 4 {
+	    required string RequiredField = 5;
+	  }
+	  oneof union {
+	    int32 number = 6;
+	    string name = 7;
+	  }
+	}
+
+The resulting file, test.pb.go, is:
+
+	package example
+
+	import proto "github.com/golang/protobuf/proto"
+	import math "math"
+
+	type FOO int32
+	const (
+		FOO_X FOO = 17
+	)
+	var FOO_name = map[int32]string{
+		17: "X",
+	}
+	var FOO_value = map[string]int32{
+		"X": 17,
+	}
+
+	func (x FOO) Enum() *FOO {
+		p := new(FOO)
+		*p = x
+		return p
+	}
+	func (x FOO) String() string {
+		return proto.EnumName(FOO_name, int32(x))
+	}
+	func (x *FOO) UnmarshalJSON(data []byte) error {
+		value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+		if err != nil {
+			return err
+		}
+		*x = FOO(value)
+		return nil
+	}
+
+	type Test struct {
+		Label         *string             `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+		Type          *int32              `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+		Reps          []int64             `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+		Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+		// Types that are valid to be assigned to Union:
+		//	*Test_Number
+		//	*Test_Name
+		Union            isTest_Union `protobuf_oneof:"union"`
+		XXX_unrecognized []byte       `json:"-"`
+	}
+	func (m *Test) Reset()         { *m = Test{} }
+	func (m *Test) String() string { return proto.CompactTextString(m) }
+	func (*Test) ProtoMessage() {}
+
+	type isTest_Union interface {
+		isTest_Union()
+	}
+
+	type Test_Number struct {
+		Number int32 `protobuf:"varint,6,opt,name=number"`
+	}
+	type Test_Name struct {
+		Name string `protobuf:"bytes,7,opt,name=name"`
+	}
+
+	func (*Test_Number) isTest_Union() {}
+	func (*Test_Name) isTest_Union()   {}
+
+	func (m *Test) GetUnion() isTest_Union {
+		if m != nil {
+			return m.Union
+		}
+		return nil
+	}
+	const Default_Test_Type int32 = 77
+
+	func (m *Test) GetLabel() string {
+		if m != nil && m.Label != nil {
+			return *m.Label
+		}
+		return ""
+	}
+
+	func (m *Test) GetType() int32 {
+		if m != nil && m.Type != nil {
+			return *m.Type
+		}
+		return Default_Test_Type
+	}
+
+	func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+		if m != nil {
+			return m.Optionalgroup
+		}
+		return nil
+	}
+
+	type Test_OptionalGroup struct {
+		RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+	}
+	func (m *Test_OptionalGroup) Reset()         { *m = Test_OptionalGroup{} }
+	func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+	func (m *Test_OptionalGroup) GetRequiredField() string {
+		if m != nil && m.RequiredField != nil {
+			return *m.RequiredField
+		}
+		return ""
+	}
+
+	func (m *Test) GetNumber() int32 {
+		if x, ok := m.GetUnion().(*Test_Number); ok {
+			return x.Number
+		}
+		return 0
+	}
+
+	func (m *Test) GetName() string {
+		if x, ok := m.GetUnion().(*Test_Name); ok {
+			return x.Name
+		}
+		return ""
+	}
+
+	func init() {
+		proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+	}
+
+To create and play with a Test object:
+
+	package main
+
+	import (
+		"log"
+
+		"github.com/golang/protobuf/proto"
+		pb "./example.pb"
+	)
+
+	func main() {
+		test := &pb.Test{
+			Label: proto.String("hello"),
+			Type:  proto.Int32(17),
+			Reps:  []int64{1, 2, 3},
+			Optionalgroup: &pb.Test_OptionalGroup{
+				RequiredField: proto.String("good bye"),
+			},
+			Union: &pb.Test_Name{"fred"},
+		}
+		data, err := proto.Marshal(test)
+		if err != nil {
+			log.Fatal("marshaling error: ", err)
+		}
+		newTest := &pb.Test{}
+		err = proto.Unmarshal(data, newTest)
+		if err != nil {
+			log.Fatal("unmarshaling error: ", err)
+		}
+		// Now test and newTest contain the same data.
+		if test.GetLabel() != newTest.GetLabel() {
+			log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+		}
+		// Use a type switch to determine which oneof was set.
+		switch u := test.Union.(type) {
+		case *pb.Test_Number: // u.Number contains the number.
+		case *pb.Test_Name: // u.Name contains the string.
+		}
+		// etc.
+	}
+*/
+package proto
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"reflect"
+	"sort"
+	"strconv"
+	"sync"
+)
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+	Reset()
+	String() string
+	ProtoMessage()
+}
+
+// Stats records allocation details about the protocol buffer encoders
+// and decoders.  Useful for tuning the library itself.
+type Stats struct {
+	Emalloc uint64 // mallocs in encode
+	Dmalloc uint64 // mallocs in decode
+	Encode  uint64 // number of encodes
+	Decode  uint64 // number of decodes
+	Chit    uint64 // number of cache hits
+	Cmiss   uint64 // number of cache misses
+	Size    uint64 // number of sizes
+}
+
+// Set to true to enable stats collection.
+const collectStats = false
+
+var stats Stats
+
+// GetStats returns a copy of the global Stats structure.
+func GetStats() Stats { return stats }
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers.  It may be reused between invocations to
+// reduce memory usage.  It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+	buf   []byte // encode/decode byte stream
+	index int    // read point
+
+	// pools of basic types to amortize allocation.
+	bools   []bool
+	uint32s []uint32
+	uint64s []uint64
+
+	// extra pools, only used with pointer_reflect.go
+	int32s   []int32
+	int64s   []int64
+	float32s []float32
+	float64s []float64
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+	return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+	p.buf = p.buf[0:0] // for reading/writing
+	p.index = 0        // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+	p.buf = s
+	p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+	return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+	return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+	p := new(int32)
+	*p = int32(v)
+	return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+	return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+	return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+	return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+	return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+	return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+	return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name.  Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+	s, ok := m[v]
+	if ok {
+		return s
+	}
+	return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+	if data[0] == '"' {
+		// New style: enums are strings.
+		var repr string
+		if err := json.Unmarshal(data, &repr); err != nil {
+			return -1, err
+		}
+		val, ok := m[repr]
+		if !ok {
+			return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+		}
+		return val, nil
+	}
+	// Old style: enums are ints.
+	var val int32
+	if err := json.Unmarshal(data, &val); err != nil {
+		return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+	}
+	return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+	var u uint64
+
+	obuf := p.buf
+	index := p.index
+	p.buf = b
+	p.index = 0
+	depth := 0
+
+	fmt.Printf("\n--- %s ---\n", s)
+
+out:
+	for {
+		for i := 0; i < depth; i++ {
+			fmt.Print("  ")
+		}
+
+		index := p.index
+		if index == len(p.buf) {
+			break
+		}
+
+		op, err := p.DecodeVarint()
+		if err != nil {
+			fmt.Printf("%3d: fetching op err %v\n", index, err)
+			break out
+		}
+		tag := op >> 3
+		wire := op & 7
+
+		switch wire {
+		default:
+			fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+				index, tag, wire)
+			break out
+
+		case WireBytes:
+			var r []byte
+
+			r, err = p.DecodeRawBytes(false)
+			if err != nil {
+				break out
+			}
+			fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+			if len(r) <= 6 {
+				for i := 0; i < len(r); i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+			} else {
+				for i := 0; i < 3; i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+				fmt.Printf(" ..")
+				for i := len(r) - 3; i < len(r); i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+			}
+			fmt.Printf("\n")
+
+		case WireFixed32:
+			u, err = p.DecodeFixed32()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+		case WireFixed64:
+			u, err = p.DecodeFixed64()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+		case WireVarint:
+			u, err = p.DecodeVarint()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+		case WireStartGroup:
+			fmt.Printf("%3d: t=%3d start\n", index, tag)
+			depth++
+
+		case WireEndGroup:
+			depth--
+			fmt.Printf("%3d: t=%3d end\n", index, tag)
+		}
+	}
+
+	if depth != 0 {
+		fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+	}
+	fmt.Printf("\n")
+
+	p.buf = obuf
+	p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+	setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+	v = v.Elem()
+
+	defaultMu.RLock()
+	dm, ok := defaults[v.Type()]
+	defaultMu.RUnlock()
+	if !ok {
+		dm = buildDefaultMessage(v.Type())
+		defaultMu.Lock()
+		defaults[v.Type()] = dm
+		defaultMu.Unlock()
+	}
+
+	for _, sf := range dm.scalars {
+		f := v.Field(sf.index)
+		if !f.IsNil() {
+			// field already set
+			continue
+		}
+		dv := sf.value
+		if dv == nil && !zeros {
+			// no explicit default, and don't want to set zeros
+			continue
+		}
+		fptr := f.Addr().Interface() // **T
+		// TODO: Consider batching the allocations we do here.
+		switch sf.kind {
+		case reflect.Bool:
+			b := new(bool)
+			if dv != nil {
+				*b = dv.(bool)
+			}
+			*(fptr.(**bool)) = b
+		case reflect.Float32:
+			f := new(float32)
+			if dv != nil {
+				*f = dv.(float32)
+			}
+			*(fptr.(**float32)) = f
+		case reflect.Float64:
+			f := new(float64)
+			if dv != nil {
+				*f = dv.(float64)
+			}
+			*(fptr.(**float64)) = f
+		case reflect.Int32:
+			// might be an enum
+			if ft := f.Type(); ft != int32PtrType {
+				// enum
+				f.Set(reflect.New(ft.Elem()))
+				if dv != nil {
+					f.Elem().SetInt(int64(dv.(int32)))
+				}
+			} else {
+				// int32 field
+				i := new(int32)
+				if dv != nil {
+					*i = dv.(int32)
+				}
+				*(fptr.(**int32)) = i
+			}
+		case reflect.Int64:
+			i := new(int64)
+			if dv != nil {
+				*i = dv.(int64)
+			}
+			*(fptr.(**int64)) = i
+		case reflect.String:
+			s := new(string)
+			if dv != nil {
+				*s = dv.(string)
+			}
+			*(fptr.(**string)) = s
+		case reflect.Uint8:
+			// exceptional case: []byte
+			var b []byte
+			if dv != nil {
+				db := dv.([]byte)
+				b = make([]byte, len(db))
+				copy(b, db)
+			} else {
+				b = []byte{}
+			}
+			*(fptr.(*[]byte)) = b
+		case reflect.Uint32:
+			u := new(uint32)
+			if dv != nil {
+				*u = dv.(uint32)
+			}
+			*(fptr.(**uint32)) = u
+		case reflect.Uint64:
+			u := new(uint64)
+			if dv != nil {
+				*u = dv.(uint64)
+			}
+			*(fptr.(**uint64)) = u
+		default:
+			log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+		}
+	}
+
+	for _, ni := range dm.nested {
+		f := v.Field(ni)
+		// f is *T or []*T or map[T]*T
+		switch f.Kind() {
+		case reflect.Ptr:
+			if f.IsNil() {
+				continue
+			}
+			setDefaults(f, recur, zeros)
+
+		case reflect.Slice:
+			for i := 0; i < f.Len(); i++ {
+				e := f.Index(i)
+				if e.IsNil() {
+					continue
+				}
+				setDefaults(e, recur, zeros)
+			}
+
+		case reflect.Map:
+			for _, k := range f.MapKeys() {
+				e := f.MapIndex(k)
+				if e.IsNil() {
+					continue
+				}
+				setDefaults(e, recur, zeros)
+			}
+		}
+	}
+}
+
+var (
+	// defaults maps a protocol buffer struct type to a slice of the fields,
+	// with its scalar fields set to their proto-declared non-zero default values.
+	defaultMu sync.RWMutex
+	defaults  = make(map[reflect.Type]defaultMessage)
+
+	int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+	scalars []scalarField
+	nested  []int // struct field index of nested messages
+}
+
+type scalarField struct {
+	index int          // struct field index
+	kind  reflect.Kind // element type (the T in *T or []T)
+	value interface{}  // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+	sprop := GetProperties(t)
+	for _, prop := range sprop.Prop {
+		fi, ok := sprop.decoderTags.get(prop.Tag)
+		if !ok {
+			// XXX_unrecognized
+			continue
+		}
+		ft := t.Field(fi).Type
+
+		sf, nested, err := fieldDefault(ft, prop)
+		switch {
+		case err != nil:
+			log.Print(err)
+		case nested:
+			dm.nested = append(dm.nested, fi)
+		case sf != nil:
+			sf.index = fi
+			dm.scalars = append(dm.scalars, *sf)
+		}
+	}
+
+	return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+	var canHaveDefault bool
+	switch ft.Kind() {
+	case reflect.Ptr:
+		if ft.Elem().Kind() == reflect.Struct {
+			nestedMessage = true
+		} else {
+			canHaveDefault = true // proto2 scalar field
+		}
+
+	case reflect.Slice:
+		switch ft.Elem().Kind() {
+		case reflect.Ptr:
+			nestedMessage = true // repeated message
+		case reflect.Uint8:
+			canHaveDefault = true // bytes field
+		}
+
+	case reflect.Map:
+		if ft.Elem().Kind() == reflect.Ptr {
+			nestedMessage = true // map with message values
+		}
+	}
+
+	if !canHaveDefault {
+		if nestedMessage {
+			return nil, true, nil
+		}
+		return nil, false, nil
+	}
+
+	// We now know that ft is a pointer or slice.
+	sf = &scalarField{kind: ft.Elem().Kind()}
+
+	// scalar fields without defaults
+	if !prop.HasDefault {
+		return sf, false, nil
+	}
+
+	// a scalar field: either *T or []byte
+	switch ft.Elem().Kind() {
+	case reflect.Bool:
+		x, err := strconv.ParseBool(prop.Default)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.Float32:
+		x, err := strconv.ParseFloat(prop.Default, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+		}
+		sf.value = float32(x)
+	case reflect.Float64:
+		x, err := strconv.ParseFloat(prop.Default, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.Int32:
+		x, err := strconv.ParseInt(prop.Default, 10, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+		}
+		sf.value = int32(x)
+	case reflect.Int64:
+		x, err := strconv.ParseInt(prop.Default, 10, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.String:
+		sf.value = prop.Default
+	case reflect.Uint8:
+		// []byte (not *uint8)
+		sf.value = []byte(prop.Default)
+	case reflect.Uint32:
+		x, err := strconv.ParseUint(prop.Default, 10, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+		}
+		sf.value = uint32(x)
+	case reflect.Uint64:
+		x, err := strconv.ParseUint(prop.Default, 10, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	default:
+		return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+	}
+
+	return sf, false, nil
+}
+
+// Map fields may have key types of non-float scalars, strings and enums.
+// The easiest way to sort them in some deterministic order is to use fmt.
+// If this turns out to be inefficient we can always consider other options,
+// such as doing a Schwartzian transform.
+
+func mapKeys(vs []reflect.Value) sort.Interface {
+	s := mapKeySorter{
+		vs: vs,
+		// default Less function: textual comparison
+		less: func(a, b reflect.Value) bool {
+			return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
+		},
+	}
+
+	// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
+	// numeric keys are sorted numerically.
+	if len(vs) == 0 {
+		return s
+	}
+	switch vs[0].Kind() {
+	case reflect.Int32, reflect.Int64:
+		s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+	case reflect.Uint32, reflect.Uint64:
+		s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+	}
+
+	return s
+}
+
+type mapKeySorter struct {
+	vs   []reflect.Value
+	less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int      { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+	return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint32, reflect.Uint64:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.String:
+		return v.String() == ""
+	}
+	return false
+}
+
+// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion2 = true
+
+// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion1 = true
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 0000000..fd982de
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,311 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"reflect"
+	"sort"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+//   message MessageSet {
+//     repeated group Item = 1 {
+//       required int32 type_id = 2;
+//       required string message = 3;
+//     };
+//   }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+	TypeId  *int32 `protobuf:"varint,2,req,name=type_id"`
+	Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+	Item             []*_MessageSet_Item `protobuf:"group,1,rep"`
+	XXX_unrecognized []byte
+	// TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+	MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+	mti, ok := pb.(messageTypeIder)
+	if !ok {
+		return nil
+	}
+	id := mti.MessageTypeId()
+	for _, item := range ms.Item {
+		if *item.TypeId == id {
+			return item
+		}
+	}
+	return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+	if ms.find(pb) != nil {
+		return true
+	}
+	return false
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+	if item := ms.find(pb); item != nil {
+		return Unmarshal(item.Message, pb)
+	}
+	if _, ok := pb.(messageTypeIder); !ok {
+		return errNoMessageTypeID
+	}
+	return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+	msg, err := Marshal(pb)
+	if err != nil {
+		return err
+	}
+	if item := ms.find(pb); item != nil {
+		// reuse existing item
+		item.Message = msg
+		return nil
+	}
+
+	mti, ok := pb.(messageTypeIder)
+	if !ok {
+		return errNoMessageTypeID
+	}
+
+	mtid := mti.MessageTypeId()
+	ms.Item = append(ms.Item, &_MessageSet_Item{
+		TypeId:  &mtid,
+		Message: msg,
+	})
+	return nil
+}
+
+func (ms *messageSet) Reset()         { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage()     {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+	i := 0
+	for ; buf[i]&0x80 != 0; i++ {
+	}
+	return buf[i+1:]
+}
+
+// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
+// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSet(exts interface{}) ([]byte, error) {
+	var m map[int32]Extension
+	switch exts := exts.(type) {
+	case *XXX_InternalExtensions:
+		if err := encodeExtensions(exts); err != nil {
+			return nil, err
+		}
+		m, _ = exts.extensionsRead()
+	case map[int32]Extension:
+		if err := encodeExtensionsMap(exts); err != nil {
+			return nil, err
+		}
+		m = exts
+	default:
+		return nil, errors.New("proto: not an extension map")
+	}
+
+	// Sort extension IDs to provide a deterministic encoding.
+	// See also enc_map in encode.go.
+	ids := make([]int, 0, len(m))
+	for id := range m {
+		ids = append(ids, int(id))
+	}
+	sort.Ints(ids)
+
+	ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
+	for _, id := range ids {
+		e := m[int32(id)]
+		// Remove the wire type and field number varint, as well as the length varint.
+		msg := skipVarint(skipVarint(e.enc))
+
+		ms.Item = append(ms.Item, &_MessageSet_Item{
+			TypeId:  Int32(int32(id)),
+			Message: msg,
+		})
+	}
+	return Marshal(ms)
+}
+
+// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSet(buf []byte, exts interface{}) error {
+	var m map[int32]Extension
+	switch exts := exts.(type) {
+	case *XXX_InternalExtensions:
+		m = exts.extensionsWrite()
+	case map[int32]Extension:
+		m = exts
+	default:
+		return errors.New("proto: not an extension map")
+	}
+
+	ms := new(messageSet)
+	if err := Unmarshal(buf, ms); err != nil {
+		return err
+	}
+	for _, item := range ms.Item {
+		id := *item.TypeId
+		msg := item.Message
+
+		// Restore wire type and field number varint, plus length varint.
+		// Be careful to preserve duplicate items.
+		b := EncodeVarint(uint64(id)<<3 | WireBytes)
+		if ext, ok := m[id]; ok {
+			// Existing data; rip off the tag and length varint
+			// so we join the new data correctly.
+			// We can assume that ext.enc is set because we are unmarshaling.
+			o := ext.enc[len(b):]   // skip wire type and field number
+			_, n := DecodeVarint(o) // calculate length of length varint
+			o = o[n:]               // skip length varint
+			msg = append(o, msg...) // join old data and new data
+		}
+		b = append(b, EncodeVarint(uint64(len(msg)))...)
+		b = append(b, msg...)
+
+		m[id] = Extension{enc: b}
+	}
+	return nil
+}
+
+// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
+// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
+	var m map[int32]Extension
+	switch exts := exts.(type) {
+	case *XXX_InternalExtensions:
+		m, _ = exts.extensionsRead()
+	case map[int32]Extension:
+		m = exts
+	default:
+		return nil, errors.New("proto: not an extension map")
+	}
+	var b bytes.Buffer
+	b.WriteByte('{')
+
+	// Process the map in key order for deterministic output.
+	ids := make([]int32, 0, len(m))
+	for id := range m {
+		ids = append(ids, id)
+	}
+	sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
+
+	for i, id := range ids {
+		ext := m[id]
+		if i > 0 {
+			b.WriteByte(',')
+		}
+
+		msd, ok := messageSetMap[id]
+		if !ok {
+			// Unknown type; we can't render it, so skip it.
+			continue
+		}
+		fmt.Fprintf(&b, `"[%s]":`, msd.name)
+
+		x := ext.value
+		if x == nil {
+			x = reflect.New(msd.t.Elem()).Interface()
+			if err := Unmarshal(ext.enc, x.(Message)); err != nil {
+				return nil, err
+			}
+		}
+		d, err := json.Marshal(x)
+		if err != nil {
+			return nil, err
+		}
+		b.Write(d)
+	}
+	b.WriteByte('}')
+	return b.Bytes(), nil
+}
+
+// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
+// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
+	// Common-case fast path.
+	if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
+		return nil
+	}
+
+	// This is fairly tricky, and it's not clear that it is needed.
+	return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
+}
+
+// A global registry of types that can be used in a MessageSet.
+
+var messageSetMap = make(map[int32]messageSetDesc)
+
+type messageSetDesc struct {
+	t    reflect.Type // pointer to struct
+	name string
+}
+
+// RegisterMessageSetType is called from the generated code.
+func RegisterMessageSetType(m Message, fieldNum int32, name string) {
+	messageSetMap[fieldNum] = messageSetDesc{
+		t:    reflect.TypeOf(m),
+		name: name,
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 0000000..fb512e2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,484 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+	"math"
+	"reflect"
+)
+
+// A structPointer is a pointer to a struct.
+type structPointer struct {
+	v reflect.Value
+}
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+// The reflect value must itself be a pointer to a struct.
+func toStructPointer(v reflect.Value) structPointer {
+	return structPointer{v}
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+	return p.v.IsNil()
+}
+
+// Interface returns the struct pointer as an interface value.
+func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
+	return p.v.Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+	return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// field returns the given field in the struct as a reflect value.
+func structPointer_field(p structPointer, f field) reflect.Value {
+	// Special case: an extension map entry with a value of type T
+	// passes a *T to the struct-handling code with a zero field,
+	// expecting that it will be treated as equivalent to *struct{ X T },
+	// which has the same memory layout. We have to handle that case
+	// specially, because reflect will panic if we call FieldByIndex on a
+	// non-struct.
+	if f == nil {
+		return p.v.Elem()
+	}
+
+	return p.v.Elem().FieldByIndex(f)
+}
+
+// ifield returns the given field in the struct as an interface value.
+func structPointer_ifield(p structPointer, f field) interface{} {
+	return structPointer_field(p, f).Addr().Interface()
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+	return structPointer_ifield(p, f).(*[]byte)
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+	return structPointer_ifield(p, f).(*[][]byte)
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+	return structPointer_ifield(p, f).(**bool)
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+	return structPointer_ifield(p, f).(*bool)
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+	return structPointer_ifield(p, f).(*[]bool)
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+	return structPointer_ifield(p, f).(**string)
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+	return structPointer_ifield(p, f).(*string)
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+	return structPointer_ifield(p, f).(*[]string)
+}
+
+// Extensions returns the address of an extension map field in the struct.
+func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
+	return structPointer_ifield(p, f).(*XXX_InternalExtensions)
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+	return structPointer_ifield(p, f).(*map[int32]Extension)
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+	return structPointer_field(p, f).Addr()
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+	structPointer_field(p, f).Set(q.v)
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+	return structPointer{structPointer_field(p, f)}
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
+	return structPointerSlice{structPointer_field(p, f)}
+}
+
+// A structPointerSlice represents the address of a slice of pointers to structs
+// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
+type structPointerSlice struct {
+	v reflect.Value
+}
+
+func (p structPointerSlice) Len() int                  { return p.v.Len() }
+func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
+func (p structPointerSlice) Append(q structPointer) {
+	p.v.Set(reflect.Append(p.v, q.v))
+}
+
+var (
+	int32Type   = reflect.TypeOf(int32(0))
+	uint32Type  = reflect.TypeOf(uint32(0))
+	float32Type = reflect.TypeOf(float32(0))
+	int64Type   = reflect.TypeOf(int64(0))
+	uint64Type  = reflect.TypeOf(uint64(0))
+	float64Type = reflect.TypeOf(float64(0))
+)
+
+// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
+// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
+type word32 struct {
+	v reflect.Value
+}
+
+// IsNil reports whether p is nil.
+func word32_IsNil(p word32) bool {
+	return p.v.IsNil()
+}
+
+// Set sets p to point at a newly allocated word with bits set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+	t := p.v.Type().Elem()
+	switch t {
+	case int32Type:
+		if len(o.int32s) == 0 {
+			o.int32s = make([]int32, uint32PoolSize)
+		}
+		o.int32s[0] = int32(x)
+		p.v.Set(reflect.ValueOf(&o.int32s[0]))
+		o.int32s = o.int32s[1:]
+		return
+	case uint32Type:
+		if len(o.uint32s) == 0 {
+			o.uint32s = make([]uint32, uint32PoolSize)
+		}
+		o.uint32s[0] = x
+		p.v.Set(reflect.ValueOf(&o.uint32s[0]))
+		o.uint32s = o.uint32s[1:]
+		return
+	case float32Type:
+		if len(o.float32s) == 0 {
+			o.float32s = make([]float32, uint32PoolSize)
+		}
+		o.float32s[0] = math.Float32frombits(x)
+		p.v.Set(reflect.ValueOf(&o.float32s[0]))
+		o.float32s = o.float32s[1:]
+		return
+	}
+
+	// must be enum
+	p.v.Set(reflect.New(t))
+	p.v.Elem().SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32_Get(p word32) uint32 {
+	elem := p.v.Elem()
+	switch elem.Kind() {
+	case reflect.Int32:
+		return uint32(elem.Int())
+	case reflect.Uint32:
+		return uint32(elem.Uint())
+	case reflect.Float32:
+		return math.Float32bits(float32(elem.Float()))
+	}
+	panic("unreachable")
+}
+
+// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+	return word32{structPointer_field(p, f)}
+}
+
+// A word32Val represents a field of type int32, uint32, float32, or enum.
+// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
+type word32Val struct {
+	v reflect.Value
+}
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+	switch p.v.Type() {
+	case int32Type:
+		p.v.SetInt(int64(x))
+		return
+	case uint32Type:
+		p.v.SetUint(uint64(x))
+		return
+	case float32Type:
+		p.v.SetFloat(float64(math.Float32frombits(x)))
+		return
+	}
+
+	// must be enum
+	p.v.SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32Val_Get(p word32Val) uint32 {
+	elem := p.v
+	switch elem.Kind() {
+	case reflect.Int32:
+		return uint32(elem.Int())
+	case reflect.Uint32:
+		return uint32(elem.Uint())
+	case reflect.Float32:
+		return math.Float32bits(float32(elem.Float()))
+	}
+	panic("unreachable")
+}
+
+// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+	return word32Val{structPointer_field(p, f)}
+}
+
+// A word32Slice is a slice of 32-bit values.
+// That is, v.Type() is []int32, []uint32, []float32, or []enum.
+type word32Slice struct {
+	v reflect.Value
+}
+
+func (p word32Slice) Append(x uint32) {
+	n, m := p.v.Len(), p.v.Cap()
+	if n < m {
+		p.v.SetLen(n + 1)
+	} else {
+		t := p.v.Type().Elem()
+		p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+	}
+	elem := p.v.Index(n)
+	switch elem.Kind() {
+	case reflect.Int32:
+		elem.SetInt(int64(int32(x)))
+	case reflect.Uint32:
+		elem.SetUint(uint64(x))
+	case reflect.Float32:
+		elem.SetFloat(float64(math.Float32frombits(x)))
+	}
+}
+
+func (p word32Slice) Len() int {
+	return p.v.Len()
+}
+
+func (p word32Slice) Index(i int) uint32 {
+	elem := p.v.Index(i)
+	switch elem.Kind() {
+	case reflect.Int32:
+		return uint32(elem.Int())
+	case reflect.Uint32:
+		return uint32(elem.Uint())
+	case reflect.Float32:
+		return math.Float32bits(float32(elem.Float()))
+	}
+	panic("unreachable")
+}
+
+// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) word32Slice {
+	return word32Slice{structPointer_field(p, f)}
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 struct {
+	v reflect.Value
+}
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+	t := p.v.Type().Elem()
+	switch t {
+	case int64Type:
+		if len(o.int64s) == 0 {
+			o.int64s = make([]int64, uint64PoolSize)
+		}
+		o.int64s[0] = int64(x)
+		p.v.Set(reflect.ValueOf(&o.int64s[0]))
+		o.int64s = o.int64s[1:]
+		return
+	case uint64Type:
+		if len(o.uint64s) == 0 {
+			o.uint64s = make([]uint64, uint64PoolSize)
+		}
+		o.uint64s[0] = x
+		p.v.Set(reflect.ValueOf(&o.uint64s[0]))
+		o.uint64s = o.uint64s[1:]
+		return
+	case float64Type:
+		if len(o.float64s) == 0 {
+			o.float64s = make([]float64, uint64PoolSize)
+		}
+		o.float64s[0] = math.Float64frombits(x)
+		p.v.Set(reflect.ValueOf(&o.float64s[0]))
+		o.float64s = o.float64s[1:]
+		return
+	}
+	panic("unreachable")
+}
+
+func word64_IsNil(p word64) bool {
+	return p.v.IsNil()
+}
+
+func word64_Get(p word64) uint64 {
+	elem := p.v.Elem()
+	switch elem.Kind() {
+	case reflect.Int64:
+		return uint64(elem.Int())
+	case reflect.Uint64:
+		return elem.Uint()
+	case reflect.Float64:
+		return math.Float64bits(elem.Float())
+	}
+	panic("unreachable")
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+	return word64{structPointer_field(p, f)}
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val struct {
+	v reflect.Value
+}
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+	switch p.v.Type() {
+	case int64Type:
+		p.v.SetInt(int64(x))
+		return
+	case uint64Type:
+		p.v.SetUint(x)
+		return
+	case float64Type:
+		p.v.SetFloat(math.Float64frombits(x))
+		return
+	}
+	panic("unreachable")
+}
+
+func word64Val_Get(p word64Val) uint64 {
+	elem := p.v
+	switch elem.Kind() {
+	case reflect.Int64:
+		return uint64(elem.Int())
+	case reflect.Uint64:
+		return elem.Uint()
+	case reflect.Float64:
+		return math.Float64bits(elem.Float())
+	}
+	panic("unreachable")
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+	return word64Val{structPointer_field(p, f)}
+}
+
+type word64Slice struct {
+	v reflect.Value
+}
+
+func (p word64Slice) Append(x uint64) {
+	n, m := p.v.Len(), p.v.Cap()
+	if n < m {
+		p.v.SetLen(n + 1)
+	} else {
+		t := p.v.Type().Elem()
+		p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+	}
+	elem := p.v.Index(n)
+	switch elem.Kind() {
+	case reflect.Int64:
+		elem.SetInt(int64(int64(x)))
+	case reflect.Uint64:
+		elem.SetUint(uint64(x))
+	case reflect.Float64:
+		elem.SetFloat(float64(math.Float64frombits(x)))
+	}
+}
+
+func (p word64Slice) Len() int {
+	return p.v.Len()
+}
+
+func (p word64Slice) Index(i int) uint64 {
+	elem := p.v.Index(i)
+	switch elem.Kind() {
+	case reflect.Int64:
+		return uint64(elem.Int())
+	case reflect.Uint64:
+		return uint64(elem.Uint())
+	case reflect.Float64:
+		return math.Float64bits(float64(elem.Float()))
+	}
+	panic("unreachable")
+}
+
+func structPointer_Word64Slice(p structPointer, f field) word64Slice {
+	return word64Slice{structPointer_field(p, f)}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 0000000..6b5567d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,270 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+// NOTE: These type_Foo functions would more idiomatically be methods,
+// but Go does not allow methods on pointer types, and we must preserve
+// some pointer type for the garbage collector. We use these
+// funcs with clunky names as our poor approximation to methods.
+//
+// An alternative would be
+//	type structPointer struct { p unsafe.Pointer }
+// but that does not registerize as well.
+
+// A structPointer is a pointer to a struct.
+type structPointer unsafe.Pointer
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+func toStructPointer(v reflect.Value) structPointer {
+	return structPointer(unsafe.Pointer(v.Pointer()))
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+	return p == nil
+}
+
+// Interface returns the struct pointer, assumed to have element type t,
+// as an interface value.
+func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
+	return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+	return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+	return f != ^field(0)
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+	return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+	return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+	return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+	return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+	return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+	return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+	return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+	return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
+	return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+	return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+	return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+	*(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+	return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
+	return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
+type structPointerSlice []structPointer
+
+func (v *structPointerSlice) Len() int                  { return len(*v) }
+func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
+func (v *structPointerSlice) Append(p structPointer)    { *v = append(*v, p) }
+
+// A word32 is the address of a "pointer to 32-bit value" field.
+type word32 **uint32
+
+// IsNil reports whether *v is nil.
+func word32_IsNil(p word32) bool {
+	return *p == nil
+}
+
+// Set sets *v to point at a newly allocated word set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+	if len(o.uint32s) == 0 {
+		o.uint32s = make([]uint32, uint32PoolSize)
+	}
+	o.uint32s[0] = x
+	*p = &o.uint32s[0]
+	o.uint32s = o.uint32s[1:]
+}
+
+// Get gets the value pointed at by *v.
+func word32_Get(p word32) uint32 {
+	return **p
+}
+
+// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+	return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Val is the address of a 32-bit value field.
+type word32Val *uint32
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+	*p = x
+}
+
+// Get gets the value pointed at by p.
+func word32Val_Get(p word32Val) uint32 {
+	return *p
+}
+
+// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+	return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Slice is a slice of 32-bit values.
+type word32Slice []uint32
+
+func (v *word32Slice) Append(x uint32)    { *v = append(*v, x) }
+func (v *word32Slice) Len() int           { return len(*v) }
+func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
+
+// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
+	return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 **uint64
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+	if len(o.uint64s) == 0 {
+		o.uint64s = make([]uint64, uint64PoolSize)
+	}
+	o.uint64s[0] = x
+	*p = &o.uint64s[0]
+	o.uint64s = o.uint64s[1:]
+}
+
+func word64_IsNil(p word64) bool {
+	return *p == nil
+}
+
+func word64_Get(p word64) uint64 {
+	return **p
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+	return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val *uint64
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+	*p = x
+}
+
+func word64Val_Get(p word64Val) uint64 {
+	return *p
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+	return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Slice is like word32Slice but for 64-bit values.
+type word64Slice []uint64
+
+func (v *word64Slice) Append(x uint64)    { *v = append(*v, x) }
+func (v *word64Slice) Len() int           { return len(*v) }
+func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
+
+func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
+	return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 0000000..ec2289c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,872 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+	"fmt"
+	"log"
+	"os"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+	WireVarint     = 0
+	WireFixed64    = 1
+	WireBytes      = 2
+	WireStartGroup = 3
+	WireEndGroup   = 4
+	WireFixed32    = 5
+)
+
+const startSize = 10 // initial slice/string sizes
+
+// Encoders are defined in encode.go
+// An encoder outputs the full representation of a field, including its
+// tag and encoder type.
+type encoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueEncoder encodes a single integer in a particular encoding.
+type valueEncoder func(o *Buffer, x uint64) error
+
+// Sizers are defined in encode.go
+// A sizer returns the encoded size of a field, including its tag and encoder
+// type.
+type sizer func(prop *Properties, base structPointer) int
+
+// A valueSizer returns the encoded size of a single integer in a particular
+// encoding.
+type valueSizer func(x uint64) int
+
+// Decoders are defined in decode.go
+// A decoder creates a value from its wire representation.
+// Unrecognized subelements are saved in unrec.
+type decoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueDecoder decodes a single integer in a particular encoding.
+type valueDecoder func(o *Buffer) (x uint64, err error)
+
+// A oneofMarshaler does the marshaling for all oneof fields in a message.
+type oneofMarshaler func(Message, *Buffer) error
+
+// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
+type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
+
+// A oneofSizer does the sizing for all oneof fields in a message.
+type oneofSizer func(Message) int
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+	fastTags []int
+	slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+	if t > 0 && t < tagMapFastLimit {
+		if t >= len(p.fastTags) {
+			return 0, false
+		}
+		fi := p.fastTags[t]
+		return fi, fi >= 0
+	}
+	fi, ok := p.slowTags[t]
+	return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+	if t > 0 && t < tagMapFastLimit {
+		for len(p.fastTags) < t+1 {
+			p.fastTags = append(p.fastTags, -1)
+		}
+		p.fastTags[t] = fi
+		return
+	}
+	if p.slowTags == nil {
+		p.slowTags = make(map[int]int)
+	}
+	p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+	Prop             []*Properties  // properties for each field
+	reqCount         int            // required count
+	decoderTags      tagMap         // map from proto tag to struct field number
+	decoderOrigNames map[string]int // map from original name to struct field number
+	order            []int          // list of struct field numbers in tag order
+	unrecField       field          // field id of the XXX_unrecognized []byte field
+	extendable       bool           // is this an extendable proto
+
+	oneofMarshaler   oneofMarshaler
+	oneofUnmarshaler oneofUnmarshaler
+	oneofSizer       oneofSizer
+	stype            reflect.Type
+
+	// OneofTypes contains information about the oneof fields in this message.
+	// It is keyed by the original name of a field.
+	OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+	Type  reflect.Type // pointer to generated struct type for this oneof field
+	Field int          // struct field number of the containing oneof in the message
+	Prop  *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+	return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+	Name     string // name of the field, for error messages
+	OrigName string // original name before protocol compiler (always set)
+	JSONName string // name to use for JSON; determined by protoc
+	Wire     string
+	WireType int
+	Tag      int
+	Required bool
+	Optional bool
+	Repeated bool
+	Packed   bool   // relevant for repeated primitives only
+	Enum     string // set for enum types only
+	proto3   bool   // whether this is known to be a proto3 field; set for []byte only
+	oneof    bool   // whether this is a oneof field
+
+	Default    string // default value
+	HasDefault bool   // whether an explicit default was provided
+	def_uint64 uint64
+
+	enc           encoder
+	valEnc        valueEncoder // set for bool and numeric types only
+	field         field
+	tagcode       []byte // encoding of EncodeVarint((Tag<<3)|WireType)
+	tagbuf        [8]byte
+	stype         reflect.Type      // set for struct types only
+	sprop         *StructProperties // set for struct types only
+	isMarshaler   bool
+	isUnmarshaler bool
+
+	mtype    reflect.Type // set for map types only
+	mkeyprop *Properties  // set for map types only
+	mvalprop *Properties  // set for map types only
+
+	size    sizer
+	valSize valueSizer // set for bool and numeric types only
+
+	dec    decoder
+	valDec valueDecoder // set for bool and numeric types only
+
+	// If this is a packable field, this will be the decoder for the packed version of the field.
+	packedDec decoder
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+	s := p.Wire
+	s = ","
+	s += strconv.Itoa(p.Tag)
+	if p.Required {
+		s += ",req"
+	}
+	if p.Optional {
+		s += ",opt"
+	}
+	if p.Repeated {
+		s += ",rep"
+	}
+	if p.Packed {
+		s += ",packed"
+	}
+	s += ",name=" + p.OrigName
+	if p.JSONName != p.OrigName {
+		s += ",json=" + p.JSONName
+	}
+	if p.proto3 {
+		s += ",proto3"
+	}
+	if p.oneof {
+		s += ",oneof"
+	}
+	if len(p.Enum) > 0 {
+		s += ",enum=" + p.Enum
+	}
+	if p.HasDefault {
+		s += ",def=" + p.Default
+	}
+	return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+	// "bytes,49,opt,name=foo,def=hello!"
+	fields := strings.Split(s, ",") // breaks def=, but handled below.
+	if len(fields) < 2 {
+		fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+		return
+	}
+
+	p.Wire = fields[0]
+	switch p.Wire {
+	case "varint":
+		p.WireType = WireVarint
+		p.valEnc = (*Buffer).EncodeVarint
+		p.valDec = (*Buffer).DecodeVarint
+		p.valSize = sizeVarint
+	case "fixed32":
+		p.WireType = WireFixed32
+		p.valEnc = (*Buffer).EncodeFixed32
+		p.valDec = (*Buffer).DecodeFixed32
+		p.valSize = sizeFixed32
+	case "fixed64":
+		p.WireType = WireFixed64
+		p.valEnc = (*Buffer).EncodeFixed64
+		p.valDec = (*Buffer).DecodeFixed64
+		p.valSize = sizeFixed64
+	case "zigzag32":
+		p.WireType = WireVarint
+		p.valEnc = (*Buffer).EncodeZigzag32
+		p.valDec = (*Buffer).DecodeZigzag32
+		p.valSize = sizeZigzag32
+	case "zigzag64":
+		p.WireType = WireVarint
+		p.valEnc = (*Buffer).EncodeZigzag64
+		p.valDec = (*Buffer).DecodeZigzag64
+		p.valSize = sizeZigzag64
+	case "bytes", "group":
+		p.WireType = WireBytes
+		// no numeric converter for non-numeric types
+	default:
+		fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+		return
+	}
+
+	var err error
+	p.Tag, err = strconv.Atoi(fields[1])
+	if err != nil {
+		return
+	}
+
+	for i := 2; i < len(fields); i++ {
+		f := fields[i]
+		switch {
+		case f == "req":
+			p.Required = true
+		case f == "opt":
+			p.Optional = true
+		case f == "rep":
+			p.Repeated = true
+		case f == "packed":
+			p.Packed = true
+		case strings.HasPrefix(f, "name="):
+			p.OrigName = f[5:]
+		case strings.HasPrefix(f, "json="):
+			p.JSONName = f[5:]
+		case strings.HasPrefix(f, "enum="):
+			p.Enum = f[5:]
+		case f == "proto3":
+			p.proto3 = true
+		case f == "oneof":
+			p.oneof = true
+		case strings.HasPrefix(f, "def="):
+			p.HasDefault = true
+			p.Default = f[4:] // rest of string
+			if i+1 < len(fields) {
+				// Commas aren't escaped, and def is always last.
+				p.Default += "," + strings.Join(fields[i+1:], ",")
+				break
+			}
+		}
+	}
+}
+
+func logNoSliceEnc(t1, t2 reflect.Type) {
+	fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// Initialize the fields for encoding and decoding.
+func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+	p.enc = nil
+	p.dec = nil
+	p.size = nil
+
+	switch t1 := typ; t1.Kind() {
+	default:
+		fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
+
+	// proto3 scalar types
+
+	case reflect.Bool:
+		p.enc = (*Buffer).enc_proto3_bool
+		p.dec = (*Buffer).dec_proto3_bool
+		p.size = size_proto3_bool
+	case reflect.Int32:
+		p.enc = (*Buffer).enc_proto3_int32
+		p.dec = (*Buffer).dec_proto3_int32
+		p.size = size_proto3_int32
+	case reflect.Uint32:
+		p.enc = (*Buffer).enc_proto3_uint32
+		p.dec = (*Buffer).dec_proto3_int32 // can reuse
+		p.size = size_proto3_uint32
+	case reflect.Int64, reflect.Uint64:
+		p.enc = (*Buffer).enc_proto3_int64
+		p.dec = (*Buffer).dec_proto3_int64
+		p.size = size_proto3_int64
+	case reflect.Float32:
+		p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
+		p.dec = (*Buffer).dec_proto3_int32
+		p.size = size_proto3_uint32
+	case reflect.Float64:
+		p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
+		p.dec = (*Buffer).dec_proto3_int64
+		p.size = size_proto3_int64
+	case reflect.String:
+		p.enc = (*Buffer).enc_proto3_string
+		p.dec = (*Buffer).dec_proto3_string
+		p.size = size_proto3_string
+
+	case reflect.Ptr:
+		switch t2 := t1.Elem(); t2.Kind() {
+		default:
+			fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
+			break
+		case reflect.Bool:
+			p.enc = (*Buffer).enc_bool
+			p.dec = (*Buffer).dec_bool
+			p.size = size_bool
+		case reflect.Int32:
+			p.enc = (*Buffer).enc_int32
+			p.dec = (*Buffer).dec_int32
+			p.size = size_int32
+		case reflect.Uint32:
+			p.enc = (*Buffer).enc_uint32
+			p.dec = (*Buffer).dec_int32 // can reuse
+			p.size = size_uint32
+		case reflect.Int64, reflect.Uint64:
+			p.enc = (*Buffer).enc_int64
+			p.dec = (*Buffer).dec_int64
+			p.size = size_int64
+		case reflect.Float32:
+			p.enc = (*Buffer).enc_uint32 // can just treat them as bits
+			p.dec = (*Buffer).dec_int32
+			p.size = size_uint32
+		case reflect.Float64:
+			p.enc = (*Buffer).enc_int64 // can just treat them as bits
+			p.dec = (*Buffer).dec_int64
+			p.size = size_int64
+		case reflect.String:
+			p.enc = (*Buffer).enc_string
+			p.dec = (*Buffer).dec_string
+			p.size = size_string
+		case reflect.Struct:
+			p.stype = t1.Elem()
+			p.isMarshaler = isMarshaler(t1)
+			p.isUnmarshaler = isUnmarshaler(t1)
+			if p.Wire == "bytes" {
+				p.enc = (*Buffer).enc_struct_message
+				p.dec = (*Buffer).dec_struct_message
+				p.size = size_struct_message
+			} else {
+				p.enc = (*Buffer).enc_struct_group
+				p.dec = (*Buffer).dec_struct_group
+				p.size = size_struct_group
+			}
+		}
+
+	case reflect.Slice:
+		switch t2 := t1.Elem(); t2.Kind() {
+		default:
+			logNoSliceEnc(t1, t2)
+			break
+		case reflect.Bool:
+			if p.Packed {
+				p.enc = (*Buffer).enc_slice_packed_bool
+				p.size = size_slice_packed_bool
+			} else {
+				p.enc = (*Buffer).enc_slice_bool
+				p.size = size_slice_bool
+			}
+			p.dec = (*Buffer).dec_slice_bool
+			p.packedDec = (*Buffer).dec_slice_packed_bool
+		case reflect.Int32:
+			if p.Packed {
+				p.enc = (*Buffer).enc_slice_packed_int32
+				p.size = size_slice_packed_int32
+			} else {
+				p.enc = (*Buffer).enc_slice_int32
+				p.size = size_slice_int32
+			}
+			p.dec = (*Buffer).dec_slice_int32
+			p.packedDec = (*Buffer).dec_slice_packed_int32
+		case reflect.Uint32:
+			if p.Packed {
+				p.enc = (*Buffer).enc_slice_packed_uint32
+				p.size = size_slice_packed_uint32
+			} else {
+				p.enc = (*Buffer).enc_slice_uint32
+				p.size = size_slice_uint32
+			}
+			p.dec = (*Buffer).dec_slice_int32
+			p.packedDec = (*Buffer).dec_slice_packed_int32
+		case reflect.Int64, reflect.Uint64:
+			if p.Packed {
+				p.enc = (*Buffer).enc_slice_packed_int64
+				p.size = size_slice_packed_int64
+			} else {
+				p.enc = (*Buffer).enc_slice_int64
+				p.size = size_slice_int64
+			}
+			p.dec = (*Buffer).dec_slice_int64
+			p.packedDec = (*Buffer).dec_slice_packed_int64
+		case reflect.Uint8:
+			p.dec = (*Buffer).dec_slice_byte
+			if p.proto3 {
+				p.enc = (*Buffer).enc_proto3_slice_byte
+				p.size = size_proto3_slice_byte
+			} else {
+				p.enc = (*Buffer).enc_slice_byte
+				p.size = size_slice_byte
+			}
+		case reflect.Float32, reflect.Float64:
+			switch t2.Bits() {
+			case 32:
+				// can just treat them as bits
+				if p.Packed {
+					p.enc = (*Buffer).enc_slice_packed_uint32
+					p.size = size_slice_packed_uint32
+				} else {
+					p.enc = (*Buffer).enc_slice_uint32
+					p.size = size_slice_uint32
+				}
+				p.dec = (*Buffer).dec_slice_int32
+				p.packedDec = (*Buffer).dec_slice_packed_int32
+			case 64:
+				// can just treat them as bits
+				if p.Packed {
+					p.enc = (*Buffer).enc_slice_packed_int64
+					p.size = size_slice_packed_int64
+				} else {
+					p.enc = (*Buffer).enc_slice_int64
+					p.size = size_slice_int64
+				}
+				p.dec = (*Buffer).dec_slice_int64
+				p.packedDec = (*Buffer).dec_slice_packed_int64
+			default:
+				logNoSliceEnc(t1, t2)
+				break
+			}
+		case reflect.String:
+			p.enc = (*Buffer).enc_slice_string
+			p.dec = (*Buffer).dec_slice_string
+			p.size = size_slice_string
+		case reflect.Ptr:
+			switch t3 := t2.Elem(); t3.Kind() {
+			default:
+				fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
+				break
+			case reflect.Struct:
+				p.stype = t2.Elem()
+				p.isMarshaler = isMarshaler(t2)
+				p.isUnmarshaler = isUnmarshaler(t2)
+				if p.Wire == "bytes" {
+					p.enc = (*Buffer).enc_slice_struct_message
+					p.dec = (*Buffer).dec_slice_struct_message
+					p.size = size_slice_struct_message
+				} else {
+					p.enc = (*Buffer).enc_slice_struct_group
+					p.dec = (*Buffer).dec_slice_struct_group
+					p.size = size_slice_struct_group
+				}
+			}
+		case reflect.Slice:
+			switch t2.Elem().Kind() {
+			default:
+				fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
+				break
+			case reflect.Uint8:
+				p.enc = (*Buffer).enc_slice_slice_byte
+				p.dec = (*Buffer).dec_slice_slice_byte
+				p.size = size_slice_slice_byte
+			}
+		}
+
+	case reflect.Map:
+		p.enc = (*Buffer).enc_new_map
+		p.dec = (*Buffer).dec_new_map
+		p.size = size_new_map
+
+		p.mtype = t1
+		p.mkeyprop = &Properties{}
+		p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+		p.mvalprop = &Properties{}
+		vtype := p.mtype.Elem()
+		if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+			// The value type is not a message (*T) or bytes ([]byte),
+			// so we need encoders for the pointer to this type.
+			vtype = reflect.PtrTo(vtype)
+		}
+		p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+	}
+
+	// precalculate tag code
+	wire := p.WireType
+	if p.Packed {
+		wire = WireBytes
+	}
+	x := uint32(p.Tag)<<3 | uint32(wire)
+	i := 0
+	for i = 0; x > 127; i++ {
+		p.tagbuf[i] = 0x80 | uint8(x&0x7F)
+		x >>= 7
+	}
+	p.tagbuf[i] = uint8(x)
+	p.tagcode = p.tagbuf[0 : i+1]
+
+	if p.stype != nil {
+		if lockGetProp {
+			p.sprop = GetProperties(p.stype)
+		} else {
+			p.sprop = getPropertiesLocked(p.stype)
+		}
+	}
+}
+
+var (
+	marshalerType   = reflect.TypeOf((*Marshaler)(nil)).Elem()
+	unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+)
+
+// isMarshaler reports whether type t implements Marshaler.
+func isMarshaler(t reflect.Type) bool {
+	// We're checking for (likely) pointer-receiver methods
+	// so if t is not a pointer, something is very wrong.
+	// The calls above only invoke isMarshaler on pointer types.
+	if t.Kind() != reflect.Ptr {
+		panic("proto: misuse of isMarshaler")
+	}
+	return t.Implements(marshalerType)
+}
+
+// isUnmarshaler reports whether type t implements Unmarshaler.
+func isUnmarshaler(t reflect.Type) bool {
+	// We're checking for (likely) pointer-receiver methods
+	// so if t is not a pointer, something is very wrong.
+	// The calls above only invoke isUnmarshaler on pointer types.
+	if t.Kind() != reflect.Ptr {
+		panic("proto: misuse of isUnmarshaler")
+	}
+	return t.Implements(unmarshalerType)
+}
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+	p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+	// "bytes,49,opt,def=hello!"
+	p.Name = name
+	p.OrigName = name
+	if f != nil {
+		p.field = toField(f)
+	}
+	if tag == "" {
+		return
+	}
+	p.Parse(tag)
+	p.setEncAndDec(typ, f, lockGetProp)
+}
+
+var (
+	propertiesMu  sync.RWMutex
+	propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+	if t.Kind() != reflect.Struct {
+		panic("proto: type must have kind struct")
+	}
+
+	// Most calls to GetProperties in a long-running program will be
+	// retrieving details for types we have seen before.
+	propertiesMu.RLock()
+	sprop, ok := propertiesMap[t]
+	propertiesMu.RUnlock()
+	if ok {
+		if collectStats {
+			stats.Chit++
+		}
+		return sprop
+	}
+
+	propertiesMu.Lock()
+	sprop = getPropertiesLocked(t)
+	propertiesMu.Unlock()
+	return sprop
+}
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+	if prop, ok := propertiesMap[t]; ok {
+		if collectStats {
+			stats.Chit++
+		}
+		return prop
+	}
+	if collectStats {
+		stats.Cmiss++
+	}
+
+	prop := new(StructProperties)
+	// in case of recursive protos, fill this in now.
+	propertiesMap[t] = prop
+
+	// build properties
+	prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) ||
+		reflect.PtrTo(t).Implements(extendableProtoV1Type)
+	prop.unrecField = invalidField
+	prop.Prop = make([]*Properties, t.NumField())
+	prop.order = make([]int, t.NumField())
+
+	for i := 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+		p := new(Properties)
+		name := f.Name
+		p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+		if f.Name == "XXX_InternalExtensions" { // special case
+			p.enc = (*Buffer).enc_exts
+			p.dec = nil // not needed
+			p.size = size_exts
+		} else if f.Name == "XXX_extensions" { // special case
+			p.enc = (*Buffer).enc_map
+			p.dec = nil // not needed
+			p.size = size_map
+		} else if f.Name == "XXX_unrecognized" { // special case
+			prop.unrecField = toField(&f)
+		}
+		oneof := f.Tag.Get("protobuf_oneof") // special case
+		if oneof != "" {
+			// Oneof fields don't use the traditional protobuf tag.
+			p.OrigName = oneof
+		}
+		prop.Prop[i] = p
+		prop.order[i] = i
+		if debug {
+			print(i, " ", f.Name, " ", t.String(), " ")
+			if p.Tag > 0 {
+				print(p.String())
+			}
+			print("\n")
+		}
+		if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" {
+			fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
+		}
+	}
+
+	// Re-order prop.order.
+	sort.Sort(prop)
+
+	type oneofMessage interface {
+		XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+	}
+	if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+		var oots []interface{}
+		prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
+		prop.stype = t
+
+		// Interpret oneof metadata.
+		prop.OneofTypes = make(map[string]*OneofProperties)
+		for _, oot := range oots {
+			oop := &OneofProperties{
+				Type: reflect.ValueOf(oot).Type(), // *T
+				Prop: new(Properties),
+			}
+			sft := oop.Type.Elem().Field(0)
+			oop.Prop.Name = sft.Name
+			oop.Prop.Parse(sft.Tag.Get("protobuf"))
+			// There will be exactly one interface field that
+			// this new value is assignable to.
+			for i := 0; i < t.NumField(); i++ {
+				f := t.Field(i)
+				if f.Type.Kind() != reflect.Interface {
+					continue
+				}
+				if !oop.Type.AssignableTo(f.Type) {
+					continue
+				}
+				oop.Field = i
+				break
+			}
+			prop.OneofTypes[oop.Prop.OrigName] = oop
+		}
+	}
+
+	// build required counts
+	// build tags
+	reqCount := 0
+	prop.decoderOrigNames = make(map[string]int)
+	for i, p := range prop.Prop {
+		if strings.HasPrefix(p.Name, "XXX_") {
+			// Internal fields should not appear in tags/origNames maps.
+			// They are handled specially when encoding and decoding.
+			continue
+		}
+		if p.Required {
+			reqCount++
+		}
+		prop.decoderTags.put(p.Tag, i)
+		prop.decoderOrigNames[p.OrigName] = i
+	}
+	prop.reqCount = reqCount
+
+	return prop
+}
+
+// Return the Properties object for the x[0]'th field of the structure.
+func propByIndex(t reflect.Type, x []int) *Properties {
+	if len(x) != 1 {
+		fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
+		return nil
+	}
+	prop := GetProperties(t)
+	return prop.Prop[x[0]]
+}
+
+// Get the address and type of a pointer to a struct from an interface.
+func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
+	if pb == nil {
+		err = ErrNil
+		return
+	}
+	// get the reflect type of the pointer to the struct.
+	t = reflect.TypeOf(pb)
+	// get the address of the struct.
+	value := reflect.ValueOf(pb)
+	b = toStructPointer(value)
+	return
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+	if _, ok := enumValueMaps[typeName]; ok {
+		panic("proto: duplicate enum registered: " + typeName)
+	}
+	enumValueMaps[typeName] = valueMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+	return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+	protoTypes    = make(map[string]reflect.Type)
+	revProtoTypes = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+	if _, ok := protoTypes[name]; ok {
+		// TODO: Some day, make this a panic.
+		log.Printf("proto: duplicate proto type registered: %s", name)
+		return
+	}
+	t := reflect.TypeOf(x)
+	protoTypes[name] = t
+	revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string {
+	type xname interface {
+		XXX_MessageName() string
+	}
+	if m, ok := x.(xname); ok {
+		return m.XXX_MessageName()
+	}
+	return revProtoTypes[reflect.TypeOf(x)]
+}
+
+// MessageType returns the message type (pointer to struct) for a named message.
+func MessageType(name string) reflect.Type { return protoTypes[name] }
+
+// A registry of all linked proto files.
+var (
+	protoFiles = make(map[string][]byte) // file name => fileDescriptor
+)
+
+// RegisterFile is called from generated code and maps from the
+// full file name of a .proto file to its compressed FileDescriptorProto.
+func RegisterFile(filename string, fileDescriptor []byte) {
+	protoFiles[filename] = fileDescriptor
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
+func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 0000000..965876b
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,854 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+	"bufio"
+	"bytes"
+	"encoding"
+	"errors"
+	"fmt"
+	"io"
+	"log"
+	"math"
+	"reflect"
+	"sort"
+	"strings"
+)
+
+var (
+	newline         = []byte("\n")
+	spaces          = []byte("                                        ")
+	gtNewline       = []byte(">\n")
+	endBraceNewline = []byte("}\n")
+	backslashN      = []byte{'\\', 'n'}
+	backslashR      = []byte{'\\', 'r'}
+	backslashT      = []byte{'\\', 't'}
+	backslashDQ     = []byte{'\\', '"'}
+	backslashBS     = []byte{'\\', '\\'}
+	posInf          = []byte("inf")
+	negInf          = []byte("-inf")
+	nan             = []byte("nan")
+)
+
+type writer interface {
+	io.Writer
+	WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+	ind      int
+	complete bool // if the current position is a complete line
+	compact  bool // whether to write out as a one-liner
+	w        writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+	if !strings.Contains(s, "\n") {
+		if !w.compact && w.complete {
+			w.writeIndent()
+		}
+		w.complete = false
+		return io.WriteString(w.w, s)
+	}
+	// WriteString is typically called without newlines, so this
+	// codepath and its copy are rare.  We copy to avoid
+	// duplicating all of Write's logic here.
+	return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+	newlines := bytes.Count(p, newline)
+	if newlines == 0 {
+		if !w.compact && w.complete {
+			w.writeIndent()
+		}
+		n, err = w.w.Write(p)
+		w.complete = false
+		return n, err
+	}
+
+	frags := bytes.SplitN(p, newline, newlines+1)
+	if w.compact {
+		for i, frag := range frags {
+			if i > 0 {
+				if err := w.w.WriteByte(' '); err != nil {
+					return n, err
+				}
+				n++
+			}
+			nn, err := w.w.Write(frag)
+			n += nn
+			if err != nil {
+				return n, err
+			}
+		}
+		return n, nil
+	}
+
+	for i, frag := range frags {
+		if w.complete {
+			w.writeIndent()
+		}
+		nn, err := w.w.Write(frag)
+		n += nn
+		if err != nil {
+			return n, err
+		}
+		if i+1 < len(frags) {
+			if err := w.w.WriteByte('\n'); err != nil {
+				return n, err
+			}
+			n++
+		}
+	}
+	w.complete = len(frags[len(frags)-1]) == 0
+	return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+	if w.compact && c == '\n' {
+		c = ' '
+	}
+	if !w.compact && w.complete {
+		w.writeIndent()
+	}
+	err := w.w.WriteByte(c)
+	w.complete = c == '\n'
+	return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+	if w.ind == 0 {
+		log.Print("proto: textWriter unindented too far")
+		return
+	}
+	w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+	if _, err := w.WriteString(props.OrigName); err != nil {
+		return err
+	}
+	if props.Wire != "group" {
+		return w.WriteByte(':')
+	}
+	return nil
+}
+
+// raw is the interface satisfied by RawMessage.
+type raw interface {
+	Bytes() []byte
+}
+
+func requiresQuotes(u string) bool {
+	// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+	for _, ch := range u {
+		switch {
+		case ch == '.' || ch == '/' || ch == '_':
+			continue
+		case '0' <= ch && ch <= '9':
+			continue
+		case 'A' <= ch && ch <= 'Z':
+			continue
+		case 'a' <= ch && ch <= 'z':
+			continue
+		default:
+			return true
+		}
+	}
+	return false
+}
+
+// isAny reports whether sv is a google.protobuf.Any message
+func isAny(sv reflect.Value) bool {
+	type wkt interface {
+		XXX_WellKnownType() string
+	}
+	t, ok := sv.Addr().Interface().(wkt)
+	return ok && t.XXX_WellKnownType() == "Any"
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
+	turl := sv.FieldByName("TypeUrl")
+	val := sv.FieldByName("Value")
+	if !turl.IsValid() || !val.IsValid() {
+		return true, errors.New("proto: invalid google.protobuf.Any message")
+	}
+
+	b, ok := val.Interface().([]byte)
+	if !ok {
+		return true, errors.New("proto: invalid google.protobuf.Any message")
+	}
+
+	parts := strings.Split(turl.String(), "/")
+	mt := MessageType(parts[len(parts)-1])
+	if mt == nil {
+		return false, nil
+	}
+	m := reflect.New(mt.Elem())
+	if err := Unmarshal(b, m.Interface().(Message)); err != nil {
+		return false, nil
+	}
+	w.Write([]byte("["))
+	u := turl.String()
+	if requiresQuotes(u) {
+		writeString(w, u)
+	} else {
+		w.Write([]byte(u))
+	}
+	if w.compact {
+		w.Write([]byte("]:<"))
+	} else {
+		w.Write([]byte("]: <\n"))
+		w.ind++
+	}
+	if err := tm.writeStruct(w, m.Elem()); err != nil {
+		return true, err
+	}
+	if w.compact {
+		w.Write([]byte("> "))
+	} else {
+		w.ind--
+		w.Write([]byte(">\n"))
+	}
+	return true, nil
+}
+
+func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
+	if tm.ExpandAny && isAny(sv) {
+		if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
+			return err
+		}
+	}
+	st := sv.Type()
+	sprops := GetProperties(st)
+	for i := 0; i < sv.NumField(); i++ {
+		fv := sv.Field(i)
+		props := sprops.Prop[i]
+		name := st.Field(i).Name
+
+		if strings.HasPrefix(name, "XXX_") {
+			// There are two XXX_ fields:
+			//   XXX_unrecognized []byte
+			//   XXX_extensions   map[int32]proto.Extension
+			// The first is handled here;
+			// the second is handled at the bottom of this function.
+			if name == "XXX_unrecognized" && !fv.IsNil() {
+				if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if fv.Kind() == reflect.Ptr && fv.IsNil() {
+			// Field not filled in. This could be an optional field or
+			// a required field that wasn't filled in. Either way, there
+			// isn't anything we can show for it.
+			continue
+		}
+		if fv.Kind() == reflect.Slice && fv.IsNil() {
+			// Repeated field that is empty, or a bytes field that is unused.
+			continue
+		}
+
+		if props.Repeated && fv.Kind() == reflect.Slice {
+			// Repeated field.
+			for j := 0; j < fv.Len(); j++ {
+				if err := writeName(w, props); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				v := fv.Index(j)
+				if v.Kind() == reflect.Ptr && v.IsNil() {
+					// A nil message in a repeated field is not valid,
+					// but we can handle that more gracefully than panicking.
+					if _, err := w.Write([]byte("<nil>\n")); err != nil {
+						return err
+					}
+					continue
+				}
+				if err := tm.writeAny(w, v, props); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if fv.Kind() == reflect.Map {
+			// Map fields are rendered as a repeated struct with key/value fields.
+			keys := fv.MapKeys()
+			sort.Sort(mapKeys(keys))
+			for _, key := range keys {
+				val := fv.MapIndex(key)
+				if err := writeName(w, props); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				// open struct
+				if err := w.WriteByte('<'); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte('\n'); err != nil {
+						return err
+					}
+				}
+				w.indent()
+				// key
+				if _, err := w.WriteString("key:"); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+				// nil values aren't legal, but we can avoid panicking because of them.
+				if val.Kind() != reflect.Ptr || !val.IsNil() {
+					// value
+					if _, err := w.WriteString("value:"); err != nil {
+						return err
+					}
+					if !w.compact {
+						if err := w.WriteByte(' '); err != nil {
+							return err
+						}
+					}
+					if err := tm.writeAny(w, val, props.mvalprop); err != nil {
+						return err
+					}
+					if err := w.WriteByte('\n'); err != nil {
+						return err
+					}
+				}
+				// close struct
+				w.unindent()
+				if err := w.WriteByte('>'); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+			// empty bytes field
+			continue
+		}
+		if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+			// proto3 non-repeated scalar field; skip if zero value
+			if isProto3Zero(fv) {
+				continue
+			}
+		}
+
+		if fv.Kind() == reflect.Interface {
+			// Check if it is a oneof.
+			if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+				// fv is nil, or holds a pointer to generated struct.
+				// That generated struct has exactly one field,
+				// which has a protobuf struct tag.
+				if fv.IsNil() {
+					continue
+				}
+				inner := fv.Elem().Elem() // interface -> *T -> T
+				tag := inner.Type().Field(0).Tag.Get("protobuf")
+				props = new(Properties) // Overwrite the outer props var, but not its pointee.
+				props.Parse(tag)
+				// Write the value in the oneof, not the oneof itself.
+				fv = inner.Field(0)
+
+				// Special case to cope with malformed messages gracefully:
+				// If the value in the oneof is a nil pointer, don't panic
+				// in writeAny.
+				if fv.Kind() == reflect.Ptr && fv.IsNil() {
+					// Use errors.New so writeAny won't render quotes.
+					msg := errors.New("/* nil */")
+					fv = reflect.ValueOf(&msg).Elem()
+				}
+			}
+		}
+
+		if err := writeName(w, props); err != nil {
+			return err
+		}
+		if !w.compact {
+			if err := w.WriteByte(' '); err != nil {
+				return err
+			}
+		}
+		if b, ok := fv.Interface().(raw); ok {
+			if err := writeRaw(w, b.Bytes()); err != nil {
+				return err
+			}
+			continue
+		}
+
+		// Enums have a String method, so writeAny will work fine.
+		if err := tm.writeAny(w, fv, props); err != nil {
+			return err
+		}
+
+		if err := w.WriteByte('\n'); err != nil {
+			return err
+		}
+	}
+
+	// Extensions (the XXX_extensions field).
+	pv := sv.Addr()
+	if _, ok := extendable(pv.Interface()); ok {
+		if err := tm.writeExtensions(w, pv); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// writeRaw writes an uninterpreted raw message.
+func writeRaw(w *textWriter, b []byte) error {
+	if err := w.WriteByte('<'); err != nil {
+		return err
+	}
+	if !w.compact {
+		if err := w.WriteByte('\n'); err != nil {
+			return err
+		}
+	}
+	w.indent()
+	if err := writeUnknownStruct(w, b); err != nil {
+		return err
+	}
+	w.unindent()
+	if err := w.WriteByte('>'); err != nil {
+		return err
+	}
+	return nil
+}
+
+// writeAny writes an arbitrary field.
+func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+	v = reflect.Indirect(v)
+
+	// Floats have special cases.
+	if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+		x := v.Float()
+		var b []byte
+		switch {
+		case math.IsInf(x, 1):
+			b = posInf
+		case math.IsInf(x, -1):
+			b = negInf
+		case math.IsNaN(x):
+			b = nan
+		}
+		if b != nil {
+			_, err := w.Write(b)
+			return err
+		}
+		// Other values are handled below.
+	}
+
+	// We don't attempt to serialise every possible value type; only those
+	// that can occur in protocol buffers.
+	switch v.Kind() {
+	case reflect.Slice:
+		// Should only be a []byte; repeated fields are handled in writeStruct.
+		if err := writeString(w, string(v.Bytes())); err != nil {
+			return err
+		}
+	case reflect.String:
+		if err := writeString(w, v.String()); err != nil {
+			return err
+		}
+	case reflect.Struct:
+		// Required/optional group/message.
+		var bra, ket byte = '<', '>'
+		if props != nil && props.Wire == "group" {
+			bra, ket = '{', '}'
+		}
+		if err := w.WriteByte(bra); err != nil {
+			return err
+		}
+		if !w.compact {
+			if err := w.WriteByte('\n'); err != nil {
+				return err
+			}
+		}
+		w.indent()
+		if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
+			text, err := etm.MarshalText()
+			if err != nil {
+				return err
+			}
+			if _, err = w.Write(text); err != nil {
+				return err
+			}
+		} else if err := tm.writeStruct(w, v); err != nil {
+			return err
+		}
+		w.unindent()
+		if err := w.WriteByte(ket); err != nil {
+			return err
+		}
+	default:
+		_, err := fmt.Fprint(w, v.Interface())
+		return err
+	}
+	return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+	return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+	// use WriteByte here to get any needed indent
+	if err := w.WriteByte('"'); err != nil {
+		return err
+	}
+	// Loop over the bytes, not the runes.
+	for i := 0; i < len(s); i++ {
+		var err error
+		// Divergence from C++: we don't escape apostrophes.
+		// There's no need to escape them, and the C++ parser
+		// copes with a naked apostrophe.
+		switch c := s[i]; c {
+		case '\n':
+			_, err = w.w.Write(backslashN)
+		case '\r':
+			_, err = w.w.Write(backslashR)
+		case '\t':
+			_, err = w.w.Write(backslashT)
+		case '"':
+			_, err = w.w.Write(backslashDQ)
+		case '\\':
+			_, err = w.w.Write(backslashBS)
+		default:
+			if isprint(c) {
+				err = w.w.WriteByte(c)
+			} else {
+				_, err = fmt.Fprintf(w.w, "\\%03o", c)
+			}
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+	if !w.compact {
+		if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+			return err
+		}
+	}
+	b := NewBuffer(data)
+	for b.index < len(b.buf) {
+		x, err := b.DecodeVarint()
+		if err != nil {
+			_, err := fmt.Fprintf(w, "/* %v */\n", err)
+			return err
+		}
+		wire, tag := x&7, x>>3
+		if wire == WireEndGroup {
+			w.unindent()
+			if _, err := w.Write(endBraceNewline); err != nil {
+				return err
+			}
+			continue
+		}
+		if _, err := fmt.Fprint(w, tag); err != nil {
+			return err
+		}
+		if wire != WireStartGroup {
+			if err := w.WriteByte(':'); err != nil {
+				return err
+			}
+		}
+		if !w.compact || wire == WireStartGroup {
+			if err := w.WriteByte(' '); err != nil {
+				return err
+			}
+		}
+		switch wire {
+		case WireBytes:
+			buf, e := b.DecodeRawBytes(false)
+			if e == nil {
+				_, err = fmt.Fprintf(w, "%q", buf)
+			} else {
+				_, err = fmt.Fprintf(w, "/* %v */", e)
+			}
+		case WireFixed32:
+			x, err = b.DecodeFixed32()
+			err = writeUnknownInt(w, x, err)
+		case WireFixed64:
+			x, err = b.DecodeFixed64()
+			err = writeUnknownInt(w, x, err)
+		case WireStartGroup:
+			err = w.WriteByte('{')
+			w.indent()
+		case WireVarint:
+			x, err = b.DecodeVarint()
+			err = writeUnknownInt(w, x, err)
+		default:
+			_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+		}
+		if err != nil {
+			return err
+		}
+		if err = w.WriteByte('\n'); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+	if err == nil {
+		_, err = fmt.Fprint(w, x)
+	} else {
+		_, err = fmt.Fprintf(w, "/* %v */", err)
+	}
+	return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int           { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
+	emap := extensionMaps[pv.Type().Elem()]
+	ep, _ := extendable(pv.Interface())
+
+	// Order the extensions by ID.
+	// This isn't strictly necessary, but it will give us
+	// canonical output, which will also make testing easier.
+	m, mu := ep.extensionsRead()
+	if m == nil {
+		return nil
+	}
+	mu.Lock()
+	ids := make([]int32, 0, len(m))
+	for id := range m {
+		ids = append(ids, id)
+	}
+	sort.Sort(int32Slice(ids))
+	mu.Unlock()
+
+	for _, extNum := range ids {
+		ext := m[extNum]
+		var desc *ExtensionDesc
+		if emap != nil {
+			desc = emap[extNum]
+		}
+		if desc == nil {
+			// Unknown extension.
+			if err := writeUnknownStruct(w, ext.enc); err != nil {
+				return err
+			}
+			continue
+		}
+
+		pb, err := GetExtension(ep, desc)
+		if err != nil {
+			return fmt.Errorf("failed getting extension: %v", err)
+		}
+
+		// Repeated extensions will appear as a slice.
+		if !desc.repeated() {
+			if err := tm.writeExtension(w, desc.Name, pb); err != nil {
+				return err
+			}
+		} else {
+			v := reflect.ValueOf(pb)
+			for i := 0; i < v.Len(); i++ {
+				if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+					return err
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
+	if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+		return err
+	}
+	if !w.compact {
+		if err := w.WriteByte(' '); err != nil {
+			return err
+		}
+	}
+	if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+		return err
+	}
+	if err := w.WriteByte('\n'); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (w *textWriter) writeIndent() {
+	if !w.complete {
+		return
+	}
+	remain := w.ind * 2
+	for remain > 0 {
+		n := remain
+		if n > len(spaces) {
+			n = len(spaces)
+		}
+		w.w.Write(spaces[:n])
+		remain -= n
+	}
+	w.complete = false
+}
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+	Compact   bool // use compact text format (one line).
+	ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
+	val := reflect.ValueOf(pb)
+	if pb == nil || val.IsNil() {
+		w.Write([]byte("<nil>"))
+		return nil
+	}
+	var bw *bufio.Writer
+	ww, ok := w.(writer)
+	if !ok {
+		bw = bufio.NewWriter(w)
+		ww = bw
+	}
+	aw := &textWriter{
+		w:        ww,
+		complete: true,
+		compact:  tm.Compact,
+	}
+
+	if etm, ok := pb.(encoding.TextMarshaler); ok {
+		text, err := etm.MarshalText()
+		if err != nil {
+			return err
+		}
+		if _, err = aw.Write(text); err != nil {
+			return err
+		}
+		if bw != nil {
+			return bw.Flush()
+		}
+		return nil
+	}
+	// Dereference the received pointer so we don't have outer < and >.
+	v := reflect.Indirect(val)
+	if err := tm.writeStruct(aw, v); err != nil {
+		return err
+	}
+	if bw != nil {
+		return bw.Flush()
+	}
+	return nil
+}
+
+// Text is the same as Marshal, but returns the string directly.
+func (tm *TextMarshaler) Text(pb Message) string {
+	var buf bytes.Buffer
+	tm.Marshal(&buf, pb)
+	return buf.String()
+}
+
+var (
+	defaultTextMarshaler = TextMarshaler{}
+	compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// TODO: consider removing some of the Marshal functions below.
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 0000000..4fd0531
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,891 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+	"encoding"
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+)
+
+// Error string emitted when deserializing Any and fields are already set
+const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
+
+type ParseError struct {
+	Message string
+	Line    int // 1-based line number
+	Offset  int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+	if p.Line == 1 {
+		// show offset only for first line
+		return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+	}
+	return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+	value    string
+	err      *ParseError
+	line     int    // line number
+	offset   int    // byte number from start of input, not start of line
+	unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+	if t.err == nil {
+		return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+	}
+	return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+	s            string // remaining input
+	done         bool   // whether the parsing is finished (success or error)
+	backed       bool   // whether back() was called
+	offset, line int
+	cur          token
+}
+
+func newTextParser(s string) *textParser {
+	p := new(textParser)
+	p.s = s
+	p.line = 1
+	p.cur.line = 1
+	return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+	pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+	p.cur.err = pe
+	p.done = true
+	return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+	switch {
+	case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+		return true
+	case '0' <= c && c <= '9':
+		return true
+	}
+	switch c {
+	case '-', '+', '.', '_':
+		return true
+	}
+	return false
+}
+
+func isWhitespace(c byte) bool {
+	switch c {
+	case ' ', '\t', '\n', '\r':
+		return true
+	}
+	return false
+}
+
+func isQuote(c byte) bool {
+	switch c {
+	case '"', '\'':
+		return true
+	}
+	return false
+}
+
+func (p *textParser) skipWhitespace() {
+	i := 0
+	for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+		if p.s[i] == '#' {
+			// comment; skip to end of line or input
+			for i < len(p.s) && p.s[i] != '\n' {
+				i++
+			}
+			if i == len(p.s) {
+				break
+			}
+		}
+		if p.s[i] == '\n' {
+			p.line++
+		}
+		i++
+	}
+	p.offset += i
+	p.s = p.s[i:len(p.s)]
+	if len(p.s) == 0 {
+		p.done = true
+	}
+}
+
+func (p *textParser) advance() {
+	// Skip whitespace
+	p.skipWhitespace()
+	if p.done {
+		return
+	}
+
+	// Start of non-whitespace
+	p.cur.err = nil
+	p.cur.offset, p.cur.line = p.offset, p.line
+	p.cur.unquoted = ""
+	switch p.s[0] {
+	case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+		// Single symbol
+		p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+	case '"', '\'':
+		// Quoted string
+		i := 1
+		for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+			if p.s[i] == '\\' && i+1 < len(p.s) {
+				// skip escaped char
+				i++
+			}
+			i++
+		}
+		if i >= len(p.s) || p.s[i] != p.s[0] {
+			p.errorf("unmatched quote")
+			return
+		}
+		unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+		if err != nil {
+			p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+			return
+		}
+		p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+		p.cur.unquoted = unq
+	default:
+		i := 0
+		for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+			i++
+		}
+		if i == 0 {
+			p.errorf("unexpected byte %#x", p.s[0])
+			return
+		}
+		p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+	}
+	p.offset += len(p.cur.value)
+}
+
+var (
+	errBadUTF8 = errors.New("proto: bad UTF-8")
+	errBadHex  = errors.New("proto: bad hexadecimal")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+	// This is based on C++'s tokenizer.cc.
+	// Despite its name, this is *not* parsing C syntax.
+	// For instance, "\0" is an invalid quoted string.
+
+	// Avoid allocation in trivial cases.
+	simple := true
+	for _, r := range s {
+		if r == '\\' || r == quote {
+			simple = false
+			break
+		}
+	}
+	if simple {
+		return s, nil
+	}
+
+	buf := make([]byte, 0, 3*len(s)/2)
+	for len(s) > 0 {
+		r, n := utf8.DecodeRuneInString(s)
+		if r == utf8.RuneError && n == 1 {
+			return "", errBadUTF8
+		}
+		s = s[n:]
+		if r != '\\' {
+			if r < utf8.RuneSelf {
+				buf = append(buf, byte(r))
+			} else {
+				buf = append(buf, string(r)...)
+			}
+			continue
+		}
+
+		ch, tail, err := unescape(s)
+		if err != nil {
+			return "", err
+		}
+		buf = append(buf, ch...)
+		s = tail
+	}
+	return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+	r, n := utf8.DecodeRuneInString(s)
+	if r == utf8.RuneError && n == 1 {
+		return "", "", errBadUTF8
+	}
+	s = s[n:]
+	switch r {
+	case 'a':
+		return "\a", s, nil
+	case 'b':
+		return "\b", s, nil
+	case 'f':
+		return "\f", s, nil
+	case 'n':
+		return "\n", s, nil
+	case 'r':
+		return "\r", s, nil
+	case 't':
+		return "\t", s, nil
+	case 'v':
+		return "\v", s, nil
+	case '?':
+		return "?", s, nil // trigraph workaround
+	case '\'', '"', '\\':
+		return string(r), s, nil
+	case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
+		if len(s) < 2 {
+			return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+		}
+		base := 8
+		ss := s[:2]
+		s = s[2:]
+		if r == 'x' || r == 'X' {
+			base = 16
+		} else {
+			ss = string(r) + ss
+		}
+		i, err := strconv.ParseUint(ss, base, 8)
+		if err != nil {
+			return "", "", err
+		}
+		return string([]byte{byte(i)}), s, nil
+	case 'u', 'U':
+		n := 4
+		if r == 'U' {
+			n = 8
+		}
+		if len(s) < n {
+			return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
+		}
+
+		bs := make([]byte, n/2)
+		for i := 0; i < n; i += 2 {
+			a, ok1 := unhex(s[i])
+			b, ok2 := unhex(s[i+1])
+			if !ok1 || !ok2 {
+				return "", "", errBadHex
+			}
+			bs[i/2] = a<<4 | b
+		}
+		s = s[n:]
+		return string(bs), s, nil
+	}
+	return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Adapted from src/pkg/strconv/quote.go.
+func unhex(b byte) (v byte, ok bool) {
+	switch {
+	case '0' <= b && b <= '9':
+		return b - '0', true
+	case 'a' <= b && b <= 'f':
+		return b - 'a' + 10, true
+	case 'A' <= b && b <= 'F':
+		return b - 'A' + 10, true
+	}
+	return 0, false
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+	if p.backed || p.done {
+		p.backed = false
+		return &p.cur
+	}
+	p.advance()
+	if p.done {
+		p.cur.value = ""
+	} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+		// Look for multiple quoted strings separated by whitespace,
+		// and concatenate them.
+		cat := p.cur
+		for {
+			p.skipWhitespace()
+			if p.done || !isQuote(p.s[0]) {
+				break
+			}
+			p.advance()
+			if p.cur.err != nil {
+				return &p.cur
+			}
+			cat.value += " " + p.cur.value
+			cat.unquoted += p.cur.unquoted
+		}
+		p.done = false // parser may have seen EOF, but we want to return cat
+		p.cur = cat
+	}
+	return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != s {
+		p.back()
+		return p.errorf("expected %q, found %q", s, tok.value)
+	}
+	return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+	st := sv.Type()
+	sprops := GetProperties(st)
+	for i := 0; i < st.NumField(); i++ {
+		if !isNil(sv.Field(i)) {
+			continue
+		}
+
+		props := sprops.Prop[i]
+		if props.Required {
+			return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+		}
+	}
+	return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+	i, ok := sprops.decoderOrigNames[name]
+	if ok {
+		return i, sprops.Prop[i], true
+	}
+	return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != ":" {
+		// Colon is optional when the field is a group or message.
+		needColon := true
+		switch props.Wire {
+		case "group":
+			needColon = false
+		case "bytes":
+			// A "bytes" field is either a message, a string, or a repeated field;
+			// those three become *T, *string and []T respectively, so we can check for
+			// this field being a pointer to a non-string.
+			if typ.Kind() == reflect.Ptr {
+				// *T or *string
+				if typ.Elem().Kind() == reflect.String {
+					break
+				}
+			} else if typ.Kind() == reflect.Slice {
+				// []T or []*T
+				if typ.Elem().Kind() != reflect.Ptr {
+					break
+				}
+			} else if typ.Kind() == reflect.String {
+				// The proto3 exception is for a string field,
+				// which requires a colon.
+				break
+			}
+			needColon = false
+		}
+		if needColon {
+			return p.errorf("expected ':', found %q", tok.value)
+		}
+		p.back()
+	}
+	return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+	st := sv.Type()
+	sprops := GetProperties(st)
+	reqCount := sprops.reqCount
+	var reqFieldErr error
+	fieldSet := make(map[string]bool)
+	// A struct is a sequence of "name: value", terminated by one of
+	// '>' or '}', or the end of the input.  A name may also be
+	// "[extension]" or "[type/url]".
+	//
+	// The whole struct can also be an expanded Any message, like:
+	// [type/url] < ... struct contents ... >
+	for {
+		tok := p.next()
+		if tok.err != nil {
+			return tok.err
+		}
+		if tok.value == terminator {
+			break
+		}
+		if tok.value == "[" {
+			// Looks like an extension or an Any.
+			//
+			// TODO: Check whether we need to handle
+			// namespace rooted names (e.g. ".something.Foo").
+			extName, err := p.consumeExtName()
+			if err != nil {
+				return err
+			}
+
+			if s := strings.LastIndex(extName, "/"); s >= 0 {
+				// If it contains a slash, it's an Any type URL.
+				messageName := extName[s+1:]
+				mt := MessageType(messageName)
+				if mt == nil {
+					return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
+				}
+				tok = p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				// consume an optional colon
+				if tok.value == ":" {
+					tok = p.next()
+					if tok.err != nil {
+						return tok.err
+					}
+				}
+				var terminator string
+				switch tok.value {
+				case "<":
+					terminator = ">"
+				case "{":
+					terminator = "}"
+				default:
+					return p.errorf("expected '{' or '<', found %q", tok.value)
+				}
+				v := reflect.New(mt.Elem())
+				if pe := p.readStruct(v.Elem(), terminator); pe != nil {
+					return pe
+				}
+				b, err := Marshal(v.Interface().(Message))
+				if err != nil {
+					return p.errorf("failed to marshal message of type %q: %v", messageName, err)
+				}
+				if fieldSet["type_url"] {
+					return p.errorf(anyRepeatedlyUnpacked, "type_url")
+				}
+				if fieldSet["value"] {
+					return p.errorf(anyRepeatedlyUnpacked, "value")
+				}
+				sv.FieldByName("TypeUrl").SetString(extName)
+				sv.FieldByName("Value").SetBytes(b)
+				fieldSet["type_url"] = true
+				fieldSet["value"] = true
+				continue
+			}
+
+			var desc *ExtensionDesc
+			// This could be faster, but it's functional.
+			// TODO: Do something smarter than a linear scan.
+			for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+				if d.Name == extName {
+					desc = d
+					break
+				}
+			}
+			if desc == nil {
+				return p.errorf("unrecognized extension %q", extName)
+			}
+
+			props := &Properties{}
+			props.Parse(desc.Tag)
+
+			typ := reflect.TypeOf(desc.ExtensionType)
+			if err := p.checkForColon(props, typ); err != nil {
+				return err
+			}
+
+			rep := desc.repeated()
+
+			// Read the extension structure, and set it in
+			// the value we're constructing.
+			var ext reflect.Value
+			if !rep {
+				ext = reflect.New(typ).Elem()
+			} else {
+				ext = reflect.New(typ.Elem()).Elem()
+			}
+			if err := p.readAny(ext, props); err != nil {
+				if _, ok := err.(*RequiredNotSetError); !ok {
+					return err
+				}
+				reqFieldErr = err
+			}
+			ep := sv.Addr().Interface().(Message)
+			if !rep {
+				SetExtension(ep, desc, ext.Interface())
+			} else {
+				old, err := GetExtension(ep, desc)
+				var sl reflect.Value
+				if err == nil {
+					sl = reflect.ValueOf(old) // existing slice
+				} else {
+					sl = reflect.MakeSlice(typ, 0, 1)
+				}
+				sl = reflect.Append(sl, ext)
+				SetExtension(ep, desc, sl.Interface())
+			}
+			if err := p.consumeOptionalSeparator(); err != nil {
+				return err
+			}
+			continue
+		}
+
+		// This is a normal, non-extension field.
+		name := tok.value
+		var dst reflect.Value
+		fi, props, ok := structFieldByName(sprops, name)
+		if ok {
+			dst = sv.Field(fi)
+		} else if oop, ok := sprops.OneofTypes[name]; ok {
+			// It is a oneof.
+			props = oop.Prop
+			nv := reflect.New(oop.Type.Elem())
+			dst = nv.Elem().Field(0)
+			sv.Field(oop.Field).Set(nv)
+		}
+		if !dst.IsValid() {
+			return p.errorf("unknown field name %q in %v", name, st)
+		}
+
+		if dst.Kind() == reflect.Map {
+			// Consume any colon.
+			if err := p.checkForColon(props, dst.Type()); err != nil {
+				return err
+			}
+
+			// Construct the map if it doesn't already exist.
+			if dst.IsNil() {
+				dst.Set(reflect.MakeMap(dst.Type()))
+			}
+			key := reflect.New(dst.Type().Key()).Elem()
+			val := reflect.New(dst.Type().Elem()).Elem()
+
+			// The map entry should be this sequence of tokens:
+			//	< key : KEY value : VALUE >
+			// However, implementations may omit key or value, and technically
+			// we should support them in any order.  See b/28924776 for a time
+			// this went wrong.
+
+			tok := p.next()
+			var terminator string
+			switch tok.value {
+			case "<":
+				terminator = ">"
+			case "{":
+				terminator = "}"
+			default:
+				return p.errorf("expected '{' or '<', found %q", tok.value)
+			}
+			for {
+				tok := p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				if tok.value == terminator {
+					break
+				}
+				switch tok.value {
+				case "key":
+					if err := p.consumeToken(":"); err != nil {
+						return err
+					}
+					if err := p.readAny(key, props.mkeyprop); err != nil {
+						return err
+					}
+					if err := p.consumeOptionalSeparator(); err != nil {
+						return err
+					}
+				case "value":
+					if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
+						return err
+					}
+					if err := p.readAny(val, props.mvalprop); err != nil {
+						return err
+					}
+					if err := p.consumeOptionalSeparator(); err != nil {
+						return err
+					}
+				default:
+					p.back()
+					return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+				}
+			}
+
+			dst.SetMapIndex(key, val)
+			continue
+		}
+
+		// Check that it's not already set if it's not a repeated field.
+		if !props.Repeated && fieldSet[name] {
+			return p.errorf("non-repeated field %q was repeated", name)
+		}
+
+		if err := p.checkForColon(props, dst.Type()); err != nil {
+			return err
+		}
+
+		// Parse into the field.
+		fieldSet[name] = true
+		if err := p.readAny(dst, props); err != nil {
+			if _, ok := err.(*RequiredNotSetError); !ok {
+				return err
+			}
+			reqFieldErr = err
+		}
+		if props.Required {
+			reqCount--
+		}
+
+		if err := p.consumeOptionalSeparator(); err != nil {
+			return err
+		}
+
+	}
+
+	if reqCount > 0 {
+		return p.missingRequiredFieldError(sv)
+	}
+	return reqFieldErr
+}
+
+// consumeExtName consumes extension name or expanded Any type URL and the
+// following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtName() (string, error) {
+	tok := p.next()
+	if tok.err != nil {
+		return "", tok.err
+	}
+
+	// If extension name or type url is quoted, it's a single token.
+	if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+		name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+		if err != nil {
+			return "", err
+		}
+		return name, p.consumeToken("]")
+	}
+
+	// Consume everything up to "]"
+	var parts []string
+	for tok.value != "]" {
+		parts = append(parts, tok.value)
+		tok = p.next()
+		if tok.err != nil {
+			return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+		}
+	}
+	return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != ";" && tok.value != "," {
+		p.back()
+	}
+	return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value == "" {
+		return p.errorf("unexpected EOF")
+	}
+
+	switch fv := v; fv.Kind() {
+	case reflect.Slice:
+		at := v.Type()
+		if at.Elem().Kind() == reflect.Uint8 {
+			// Special case for []byte
+			if tok.value[0] != '"' && tok.value[0] != '\'' {
+				// Deliberately written out here, as the error after
+				// this switch statement would write "invalid []byte: ...",
+				// which is not as user-friendly.
+				return p.errorf("invalid string: %v", tok.value)
+			}
+			bytes := []byte(tok.unquoted)
+			fv.Set(reflect.ValueOf(bytes))
+			return nil
+		}
+		// Repeated field.
+		if tok.value == "[" {
+			// Repeated field with list notation, like [1,2,3].
+			for {
+				fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+				err := p.readAny(fv.Index(fv.Len()-1), props)
+				if err != nil {
+					return err
+				}
+				tok := p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				if tok.value == "]" {
+					break
+				}
+				if tok.value != "," {
+					return p.errorf("Expected ']' or ',' found %q", tok.value)
+				}
+			}
+			return nil
+		}
+		// One value of the repeated field.
+		p.back()
+		fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+		return p.readAny(fv.Index(fv.Len()-1), props)
+	case reflect.Bool:
+		// true/1/t/True or false/f/0/False.
+		switch tok.value {
+		case "true", "1", "t", "True":
+			fv.SetBool(true)
+			return nil
+		case "false", "0", "f", "False":
+			fv.SetBool(false)
+			return nil
+		}
+	case reflect.Float32, reflect.Float64:
+		v := tok.value
+		// Ignore 'f' for compatibility with output generated by C++, but don't
+		// remove 'f' when the value is "-inf" or "inf".
+		if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+			v = v[:len(v)-1]
+		}
+		if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+			fv.SetFloat(f)
+			return nil
+		}
+	case reflect.Int32:
+		if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+			fv.SetInt(x)
+			return nil
+		}
+
+		if len(props.Enum) == 0 {
+			break
+		}
+		m, ok := enumValueMaps[props.Enum]
+		if !ok {
+			break
+		}
+		x, ok := m[tok.value]
+		if !ok {
+			break
+		}
+		fv.SetInt(int64(x))
+		return nil
+	case reflect.Int64:
+		if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+			fv.SetInt(x)
+			return nil
+		}
+
+	case reflect.Ptr:
+		// A basic field (indirected through pointer), or a repeated message/group
+		p.back()
+		fv.Set(reflect.New(fv.Type().Elem()))
+		return p.readAny(fv.Elem(), props)
+	case reflect.String:
+		if tok.value[0] == '"' || tok.value[0] == '\'' {
+			fv.SetString(tok.unquoted)
+			return nil
+		}
+	case reflect.Struct:
+		var terminator string
+		switch tok.value {
+		case "{":
+			terminator = "}"
+		case "<":
+			terminator = ">"
+		default:
+			return p.errorf("expected '{' or '<', found %q", tok.value)
+		}
+		// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+		return p.readStruct(fv, terminator)
+	case reflect.Uint32:
+		if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+			fv.SetUint(uint64(x))
+			return nil
+		}
+	case reflect.Uint64:
+		if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+			fv.SetUint(x)
+			return nil
+		}
+	}
+	return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+	if um, ok := pb.(encoding.TextUnmarshaler); ok {
+		err := um.UnmarshalText([]byte(s))
+		return err
+	}
+	pb.Reset()
+	v := reflect.ValueOf(pb)
+	if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
+		return pe
+	}
+	return nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
new file mode 100644
index 0000000..89e07ae
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any.go
@@ -0,0 +1,136 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements functions to marshal proto.Message to/from
+// google.protobuf.Any message.
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes/any"
+)
+
+const googleApis = "type.googleapis.com/"
+
+// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
+//
+// Note that regular type assertions should be done using the Is
+// function. AnyMessageName is provided for less common use cases like filtering a
+// sequence of Any messages based on a set of allowed message type names.
+func AnyMessageName(any *any.Any) (string, error) {
+	slash := strings.LastIndex(any.TypeUrl, "/")
+	if slash < 0 {
+		return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
+	}
+	return any.TypeUrl[slash+1:], nil
+}
+
+// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
+func MarshalAny(pb proto.Message) (*any.Any, error) {
+	value, err := proto.Marshal(pb)
+	if err != nil {
+		return nil, err
+	}
+	return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
+}
+
+// DynamicAny is a value that can be passed to UnmarshalAny to automatically
+// allocate a proto.Message for the type specified in a google.protobuf.Any
+// message. The allocated message is stored in the embedded proto.Message.
+//
+// Example:
+//
+//   var x ptypes.DynamicAny
+//   if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
+//   fmt.Printf("unmarshaled message: %v", x.Message)
+type DynamicAny struct {
+	proto.Message
+}
+
+// Empty returns a new proto.Message of the type specified in a
+// google.protobuf.Any message. It returns an error if corresponding message
+// type isn't linked in.
+func Empty(any *any.Any) (proto.Message, error) {
+	aname, err := AnyMessageName(any)
+	if err != nil {
+		return nil, err
+	}
+
+	t := proto.MessageType(aname)
+	if t == nil {
+		return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
+	}
+	return reflect.New(t.Elem()).Interface().(proto.Message), nil
+}
+
+// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
+// message and places the decoded result in pb. It returns an error if type of
+// contents of Any message does not match type of pb message.
+//
+// pb can be a proto.Message, or a *DynamicAny.
+func UnmarshalAny(any *any.Any, pb proto.Message) error {
+	if d, ok := pb.(*DynamicAny); ok {
+		if d.Message == nil {
+			var err error
+			d.Message, err = Empty(any)
+			if err != nil {
+				return err
+			}
+		}
+		return UnmarshalAny(any, d.Message)
+	}
+
+	aname, err := AnyMessageName(any)
+	if err != nil {
+		return err
+	}
+
+	mname := proto.MessageName(pb)
+	if aname != mname {
+		return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
+	}
+	return proto.Unmarshal(any.Value, pb)
+}
+
+// Is returns true if any value contains a given message type.
+func Is(any *any.Any, pb proto.Message) bool {
+	aname, err := AnyMessageName(any)
+	if err != nil {
+		return false
+	}
+
+	return aname == proto.MessageName(pb)
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
new file mode 100644
index 0000000..f2c6906
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
@@ -0,0 +1,155 @@
+// Code generated by protoc-gen-go.
+// source: github.com/golang/protobuf/ptypes/any/any.proto
+// DO NOT EDIT!
+
+/*
+Package any is a generated protocol buffer package.
+
+It is generated from these files:
+	github.com/golang/protobuf/ptypes/any/any.proto
+
+It has these top-level messages:
+	Any
+*/
+package any
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// `Any` contains an arbitrary serialized protocol buffer message along with a
+// URL that describes the type of the serialized message.
+//
+// Protobuf library provides support to pack/unpack Any values in the form
+// of utility functions or additional generated methods of the Any type.
+//
+// Example 1: Pack and unpack a message in C++.
+//
+//     Foo foo = ...;
+//     Any any;
+//     any.PackFrom(foo);
+//     ...
+//     if (any.UnpackTo(&foo)) {
+//       ...
+//     }
+//
+// Example 2: Pack and unpack a message in Java.
+//
+//     Foo foo = ...;
+//     Any any = Any.pack(foo);
+//     ...
+//     if (any.is(Foo.class)) {
+//       foo = any.unpack(Foo.class);
+//     }
+//
+//  Example 3: Pack and unpack a message in Python.
+//
+//     foo = Foo(...)
+//     any = Any()
+//     any.Pack(foo)
+//     ...
+//     if any.Is(Foo.DESCRIPTOR):
+//       any.Unpack(foo)
+//       ...
+//
+// The pack methods provided by protobuf library will by default use
+// 'type.googleapis.com/full.type.name' as the type URL and the unpack
+// methods only use the fully qualified type name after the last '/'
+// in the type URL, for example "foo.bar.com/x/y.z" will yield type
+// name "y.z".
+//
+//
+// JSON
+// ====
+// The JSON representation of an `Any` value uses the regular
+// representation of the deserialized, embedded message, with an
+// additional field `@type` which contains the type URL. Example:
+//
+//     package google.profile;
+//     message Person {
+//       string first_name = 1;
+//       string last_name = 2;
+//     }
+//
+//     {
+//       "@type": "type.googleapis.com/google.profile.Person",
+//       "firstName": <string>,
+//       "lastName": <string>
+//     }
+//
+// If the embedded message type is well-known and has a custom JSON
+// representation, that representation will be embedded adding a field
+// `value` which holds the custom JSON in addition to the `@type`
+// field. Example (for message [google.protobuf.Duration][]):
+//
+//     {
+//       "@type": "type.googleapis.com/google.protobuf.Duration",
+//       "value": "1.212s"
+//     }
+//
+type Any struct {
+	// A URL/resource name whose content describes the type of the
+	// serialized protocol buffer message.
+	//
+	// For URLs which use the scheme `http`, `https`, or no scheme, the
+	// following restrictions and interpretations apply:
+	//
+	// * If no scheme is provided, `https` is assumed.
+	// * The last segment of the URL's path must represent the fully
+	//   qualified name of the type (as in `path/google.protobuf.Duration`).
+	//   The name should be in a canonical form (e.g., leading "." is
+	//   not accepted).
+	// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
+	//   value in binary format, or produce an error.
+	// * Applications are allowed to cache lookup results based on the
+	//   URL, or have them precompiled into a binary to avoid any
+	//   lookup. Therefore, binary compatibility needs to be preserved
+	//   on changes to types. (Use versioned type names to manage
+	//   breaking changes.)
+	//
+	// Schemes other than `http`, `https` (or the empty scheme) might be
+	// used with implementation specific semantics.
+	//
+	TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"`
+	// Must be a valid serialized protocol buffer of the above specified type.
+	Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *Any) Reset()                    { *m = Any{} }
+func (m *Any) String() string            { return proto.CompactTextString(m) }
+func (*Any) ProtoMessage()               {}
+func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (*Any) XXX_WellKnownType() string   { return "Any" }
+
+func init() {
+	proto.RegisterType((*Any)(nil), "google.protobuf.Any")
+}
+
+func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+	// 187 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9,
+	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
+	0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc,
+	0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c,
+	0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69,
+	0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24,
+	0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1,
+	0x38, 0x15, 0x71, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19,
+	0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x05, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd,
+	0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x4c, 0x0b, 0x80, 0xaa, 0xd2, 0x0b, 0x4f, 0xcd, 0xc9,
+	0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4e, 0x62, 0x03, 0x6b, 0x37, 0x06, 0x04, 0x00,
+	0x00, 0xff, 0xff, 0xc6, 0x4d, 0x03, 0x23, 0xf6, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
new file mode 100644
index 0000000..81dcf46
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
@@ -0,0 +1,140 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option go_package = "github.com/golang/protobuf/ptypes/any";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "AnyProto";
+option java_multiple_files = true;
+option java_generate_equals_and_hash = true;
+option objc_class_prefix = "GPB";
+
+// `Any` contains an arbitrary serialized protocol buffer message along with a
+// URL that describes the type of the serialized message.
+//
+// Protobuf library provides support to pack/unpack Any values in the form
+// of utility functions or additional generated methods of the Any type.
+//
+// Example 1: Pack and unpack a message in C++.
+//
+//     Foo foo = ...;
+//     Any any;
+//     any.PackFrom(foo);
+//     ...
+//     if (any.UnpackTo(&foo)) {
+//       ...
+//     }
+//
+// Example 2: Pack and unpack a message in Java.
+//
+//     Foo foo = ...;
+//     Any any = Any.pack(foo);
+//     ...
+//     if (any.is(Foo.class)) {
+//       foo = any.unpack(Foo.class);
+//     }
+//
+//  Example 3: Pack and unpack a message in Python.
+//
+//     foo = Foo(...)
+//     any = Any()
+//     any.Pack(foo)
+//     ...
+//     if any.Is(Foo.DESCRIPTOR):
+//       any.Unpack(foo)
+//       ...
+//
+// The pack methods provided by protobuf library will by default use
+// 'type.googleapis.com/full.type.name' as the type URL and the unpack
+// methods only use the fully qualified type name after the last '/'
+// in the type URL, for example "foo.bar.com/x/y.z" will yield type
+// name "y.z".
+//
+//
+// JSON
+// ====
+// The JSON representation of an `Any` value uses the regular
+// representation of the deserialized, embedded message, with an
+// additional field `@type` which contains the type URL. Example:
+//
+//     package google.profile;
+//     message Person {
+//       string first_name = 1;
+//       string last_name = 2;
+//     }
+//
+//     {
+//       "@type": "type.googleapis.com/google.profile.Person",
+//       "firstName": <string>,
+//       "lastName": <string>
+//     }
+//
+// If the embedded message type is well-known and has a custom JSON
+// representation, that representation will be embedded adding a field
+// `value` which holds the custom JSON in addition to the `@type`
+// field. Example (for message [google.protobuf.Duration][]):
+//
+//     {
+//       "@type": "type.googleapis.com/google.protobuf.Duration",
+//       "value": "1.212s"
+//     }
+//
+message Any {
+  // A URL/resource name whose content describes the type of the
+  // serialized protocol buffer message.
+  //
+  // For URLs which use the scheme `http`, `https`, or no scheme, the
+  // following restrictions and interpretations apply:
+  //
+  // * If no scheme is provided, `https` is assumed.
+  // * The last segment of the URL's path must represent the fully
+  //   qualified name of the type (as in `path/google.protobuf.Duration`).
+  //   The name should be in a canonical form (e.g., leading "." is
+  //   not accepted).
+  // * An HTTP GET on the URL must yield a [google.protobuf.Type][]
+  //   value in binary format, or produce an error.
+  // * Applications are allowed to cache lookup results based on the
+  //   URL, or have them precompiled into a binary to avoid any
+  //   lookup. Therefore, binary compatibility needs to be preserved
+  //   on changes to types. (Use versioned type names to manage
+  //   breaking changes.)
+  //
+  // Schemes other than `http`, `https` (or the empty scheme) might be
+  // used with implementation specific semantics.
+  //
+  string type_url = 1;
+
+  // Must be a valid serialized protocol buffer of the above specified type.
+  bytes value = 2;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go
new file mode 100644
index 0000000..c0d595d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/doc.go
@@ -0,0 +1,35 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package ptypes contains code for interacting with well-known types.
+*/
+package ptypes
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go
new file mode 100644
index 0000000..65cb0f8
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration.go
@@ -0,0 +1,102 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements conversions between google.protobuf.Duration
+// and time.Duration.
+
+import (
+	"errors"
+	"fmt"
+	"time"
+
+	durpb "github.com/golang/protobuf/ptypes/duration"
+)
+
+const (
+	// Range of a durpb.Duration in seconds, as specified in
+	// google/protobuf/duration.proto. This is about 10,000 years in seconds.
+	maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
+	minSeconds = -maxSeconds
+)
+
+// validateDuration determines whether the durpb.Duration is valid according to the
+// definition in google/protobuf/duration.proto. A valid durpb.Duration
+// may still be too large to fit into a time.Duration (the range of durpb.Duration
+// is about 10,000 years, and the range of time.Duration is about 290).
+func validateDuration(d *durpb.Duration) error {
+	if d == nil {
+		return errors.New("duration: nil Duration")
+	}
+	if d.Seconds < minSeconds || d.Seconds > maxSeconds {
+		return fmt.Errorf("duration: %v: seconds out of range", d)
+	}
+	if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
+		return fmt.Errorf("duration: %v: nanos out of range", d)
+	}
+	// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
+	if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
+		return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
+	}
+	return nil
+}
+
+// Duration converts a durpb.Duration to a time.Duration. Duration
+// returns an error if the durpb.Duration is invalid or is too large to be
+// represented in a time.Duration.
+func Duration(p *durpb.Duration) (time.Duration, error) {
+	if err := validateDuration(p); err != nil {
+		return 0, err
+	}
+	d := time.Duration(p.Seconds) * time.Second
+	if int64(d/time.Second) != p.Seconds {
+		return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+	}
+	if p.Nanos != 0 {
+		d += time.Duration(p.Nanos)
+		if (d < 0) != (p.Nanos < 0) {
+			return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+		}
+	}
+	return d, nil
+}
+
+// DurationProto converts a time.Duration to a durpb.Duration.
+func DurationProto(d time.Duration) *durpb.Duration {
+	nanos := d.Nanoseconds()
+	secs := nanos / 1e9
+	nanos -= secs * 1e9
+	return &durpb.Duration{
+		Seconds: secs,
+		Nanos:   int32(nanos),
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
new file mode 100644
index 0000000..5697483
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
@@ -0,0 +1,114 @@
+// Code generated by protoc-gen-go.
+// source: github.com/golang/protobuf/ptypes/duration/duration.proto
+// DO NOT EDIT!
+
+/*
+Package duration is a generated protocol buffer package.
+
+It is generated from these files:
+	github.com/golang/protobuf/ptypes/duration/duration.proto
+
+It has these top-level messages:
+	Duration
+*/
+package duration
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// A Duration represents a signed, fixed-length span of time represented
+// as a count of seconds and fractions of seconds at nanosecond
+// resolution. It is independent of any calendar and concepts like "day"
+// or "month". It is related to Timestamp in that the difference between
+// two Timestamp values is a Duration and it can be added or subtracted
+// from a Timestamp. Range is approximately +-10,000 years.
+//
+// Example 1: Compute Duration from two Timestamps in pseudo code.
+//
+//     Timestamp start = ...;
+//     Timestamp end = ...;
+//     Duration duration = ...;
+//
+//     duration.seconds = end.seconds - start.seconds;
+//     duration.nanos = end.nanos - start.nanos;
+//
+//     if (duration.seconds < 0 && duration.nanos > 0) {
+//       duration.seconds += 1;
+//       duration.nanos -= 1000000000;
+//     } else if (durations.seconds > 0 && duration.nanos < 0) {
+//       duration.seconds -= 1;
+//       duration.nanos += 1000000000;
+//     }
+//
+// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
+//
+//     Timestamp start = ...;
+//     Duration duration = ...;
+//     Timestamp end = ...;
+//
+//     end.seconds = start.seconds + duration.seconds;
+//     end.nanos = start.nanos + duration.nanos;
+//
+//     if (end.nanos < 0) {
+//       end.seconds -= 1;
+//       end.nanos += 1000000000;
+//     } else if (end.nanos >= 1000000000) {
+//       end.seconds += 1;
+//       end.nanos -= 1000000000;
+//     }
+//
+//
+type Duration struct {
+	// Signed seconds of the span of time. Must be from -315,576,000,000
+	// to +315,576,000,000 inclusive.
+	Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
+	// Signed fractions of a second at nanosecond resolution of the span
+	// of time. Durations less than one second are represented with a 0
+	// `seconds` field and a positive or negative `nanos` field. For durations
+	// of one second or more, a non-zero value for the `nanos` field must be
+	// of the same sign as the `seconds` field. Must be from -999,999,999
+	// to +999,999,999 inclusive.
+	Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
+}
+
+func (m *Duration) Reset()                    { *m = Duration{} }
+func (m *Duration) String() string            { return proto.CompactTextString(m) }
+func (*Duration) ProtoMessage()               {}
+func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (*Duration) XXX_WellKnownType() string   { return "Duration" }
+
+func init() {
+	proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
+}
+
+func init() {
+	proto.RegisterFile("github.com/golang/protobuf/ptypes/duration/duration.proto", fileDescriptor0)
+}
+
+var fileDescriptor0 = []byte{
+	// 189 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9,
+	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
+	0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0x29,
+	0x2d, 0x4a, 0x2c, 0xc9, 0xcc, 0xcf, 0x83, 0x33, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3,
+	0xd3, 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24, 0xb8,
+	0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x60,
+	0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xd6,
+	0x20, 0x08, 0xc7, 0xa9, 0x86, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x48, 0x27, 0x5e, 0x98,
+	0x81, 0x01, 0x20, 0x91, 0x00, 0xc6, 0x28, 0x2d, 0xe2, 0xdd, 0xbb, 0x80, 0x91, 0x71, 0x11, 0x13,
+	0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xb9, 0x01, 0x50, 0xa5, 0x7a, 0xe1, 0xa9,
+	0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x2d, 0x49, 0x6c, 0x60, 0x33, 0x8c, 0x01,
+	0x01, 0x00, 0x00, 0xff, 0xff, 0x62, 0xfb, 0xb1, 0x51, 0x0e, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
new file mode 100644
index 0000000..96c1796
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
@@ -0,0 +1,98 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option go_package = "github.com/golang/protobuf/ptypes/duration";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "DurationProto";
+option java_multiple_files = true;
+option java_generate_equals_and_hash = true;
+option objc_class_prefix = "GPB";
+
+// A Duration represents a signed, fixed-length span of time represented
+// as a count of seconds and fractions of seconds at nanosecond
+// resolution. It is independent of any calendar and concepts like "day"
+// or "month". It is related to Timestamp in that the difference between
+// two Timestamp values is a Duration and it can be added or subtracted
+// from a Timestamp. Range is approximately +-10,000 years.
+//
+// Example 1: Compute Duration from two Timestamps in pseudo code.
+//
+//     Timestamp start = ...;
+//     Timestamp end = ...;
+//     Duration duration = ...;
+//
+//     duration.seconds = end.seconds - start.seconds;
+//     duration.nanos = end.nanos - start.nanos;
+//
+//     if (duration.seconds < 0 && duration.nanos > 0) {
+//       duration.seconds += 1;
+//       duration.nanos -= 1000000000;
+//     } else if (durations.seconds > 0 && duration.nanos < 0) {
+//       duration.seconds -= 1;
+//       duration.nanos += 1000000000;
+//     }
+//
+// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
+//
+//     Timestamp start = ...;
+//     Duration duration = ...;
+//     Timestamp end = ...;
+//
+//     end.seconds = start.seconds + duration.seconds;
+//     end.nanos = start.nanos + duration.nanos;
+//
+//     if (end.nanos < 0) {
+//       end.seconds -= 1;
+//       end.nanos += 1000000000;
+//     } else if (end.nanos >= 1000000000) {
+//       end.seconds += 1;
+//       end.nanos -= 1000000000;
+//     }
+//
+//
+message Duration {
+
+  // Signed seconds of the span of time. Must be from -315,576,000,000
+  // to +315,576,000,000 inclusive.
+  int64 seconds = 1;
+
+  // Signed fractions of a second at nanosecond resolution of the span
+  // of time. Durations less than one second are represented with a 0
+  // `seconds` field and a positive or negative `nanos` field. For durations
+  // of one second or more, a non-zero value for the `nanos` field must be
+  // of the same sign as the `seconds` field. Must be from -999,999,999
+  // to +999,999,999 inclusive.
+  int32 nanos = 2;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
new file mode 100644
index 0000000..46c765a
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
@@ -0,0 +1,69 @@
+// Code generated by protoc-gen-go.
+// source: github.com/golang/protobuf/ptypes/empty/empty.proto
+// DO NOT EDIT!
+
+/*
+Package empty is a generated protocol buffer package.
+
+It is generated from these files:
+	github.com/golang/protobuf/ptypes/empty/empty.proto
+
+It has these top-level messages:
+	Empty
+*/
+package empty
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// A generic empty message that you can re-use to avoid defining duplicated
+// empty messages in your APIs. A typical example is to use it as the request
+// or the response type of an API method. For instance:
+//
+//     service Foo {
+//       rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
+//     }
+//
+// The JSON representation for `Empty` is empty JSON object `{}`.
+type Empty struct {
+}
+
+func (m *Empty) Reset()                    { *m = Empty{} }
+func (m *Empty) String() string            { return proto.CompactTextString(m) }
+func (*Empty) ProtoMessage()               {}
+func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (*Empty) XXX_WellKnownType() string   { return "Empty" }
+
+func init() {
+	proto.RegisterType((*Empty)(nil), "google.protobuf.Empty")
+}
+
+func init() {
+	proto.RegisterFile("github.com/golang/protobuf/ptypes/empty/empty.proto", fileDescriptor0)
+}
+
+var fileDescriptor0 = []byte{
+	// 150 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x32, 0x4e, 0xcf, 0x2c, 0xc9,
+	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
+	0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcd,
+	0x2d, 0x28, 0xa9, 0x84, 0x90, 0x7a, 0x60, 0x39, 0x21, 0xfe, 0xf4, 0xfc, 0xfc, 0xf4, 0x9c, 0x54,
+	0x3d, 0x98, 0x4a, 0x25, 0x76, 0x2e, 0x56, 0x57, 0x90, 0xbc, 0x53, 0x25, 0x97, 0x70, 0x72, 0x7e,
+	0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36, 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x27, 0xd2,
+	0xce, 0x05, 0x8c, 0x8c, 0x3f, 0x18, 0x19, 0x17, 0x31, 0x31, 0xbb, 0x07, 0x38, 0xad, 0x62, 0x92,
+	0x73, 0x87, 0x18, 0x1a, 0x00, 0x55, 0xaa, 0x17, 0x9e, 0x9a, 0x93, 0xe3, 0x9d, 0x97, 0x5f, 0x9e,
+	0x17, 0x02, 0xd2, 0x92, 0xc4, 0x06, 0x36, 0xc3, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x7f, 0xbb,
+	0xf4, 0x0e, 0xd2, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto
new file mode 100644
index 0000000..37f4cd1
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto
@@ -0,0 +1,53 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option go_package = "github.com/golang/protobuf/ptypes/empty";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "EmptyProto";
+option java_multiple_files = true;
+option java_generate_equals_and_hash = true;
+option objc_class_prefix = "GPB";
+option cc_enable_arenas = true;
+
+// A generic empty message that you can re-use to avoid defining duplicated
+// empty messages in your APIs. A typical example is to use it as the request
+// or the response type of an API method. For instance:
+//
+//     service Foo {
+//       rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
+//     }
+//
+// The JSON representation for `Empty` is empty JSON object `{}`.
+message Empty {}
diff --git a/vendor/github.com/golang/protobuf/ptypes/regen.sh b/vendor/github.com/golang/protobuf/ptypes/regen.sh
new file mode 100755
index 0000000..2a5b4e8
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/regen.sh
@@ -0,0 +1,66 @@
+#!/bin/bash -e
+#
+# This script fetches and rebuilds the "well-known types" protocol buffers.
+# To run this you will need protoc and goprotobuf installed;
+# see https://github.com/golang/protobuf for instructions.
+# You also need Go and Git installed.
+
+PKG=github.com/golang/protobuf/ptypes
+UPSTREAM=https://github.com/google/protobuf
+UPSTREAM_SUBDIR=src/google/protobuf
+PROTO_FILES='
+  any.proto
+  duration.proto
+  empty.proto
+  struct.proto
+  timestamp.proto
+  wrappers.proto
+'
+
+function die() {
+  echo 1>&2 $*
+  exit 1
+}
+
+# Sanity check that the right tools are accessible.
+for tool in go git protoc protoc-gen-go; do
+  q=$(which $tool) || die "didn't find $tool"
+  echo 1>&2 "$tool: $q"
+done
+
+tmpdir=$(mktemp -d -t regen-wkt.XXXXXX)
+trap 'rm -rf $tmpdir' EXIT
+
+echo -n 1>&2 "finding package dir... "
+pkgdir=$(go list -f '{{.Dir}}' $PKG)
+echo 1>&2 $pkgdir
+base=$(echo $pkgdir | sed "s,/$PKG\$,,")
+echo 1>&2 "base: $base"
+cd $base
+
+echo 1>&2 "fetching latest protos... "
+git clone -q $UPSTREAM $tmpdir
+# Pass 1: build mapping from upstream filename to our filename.
+declare -A filename_map
+for f in $(cd $PKG && find * -name '*.proto'); do
+  echo -n 1>&2 "looking for latest version of $f... "
+  up=$(cd $tmpdir/$UPSTREAM_SUBDIR && find * -name $(basename $f) | grep -v /testdata/)
+  echo 1>&2 $up
+  if [ $(echo $up | wc -w) != "1" ]; then
+    die "not exactly one match"
+  fi
+  filename_map[$up]=$f
+done
+# Pass 2: copy files
+for up in "${!filename_map[@]}"; do
+  f=${filename_map[$up]}
+  shortname=$(basename $f | sed 's,\.proto$,,')
+  cp $tmpdir/$UPSTREAM_SUBDIR/$up $PKG/$f
+done
+
+# Run protoc once per package.
+for dir in $(find $PKG -name '*.proto' | xargs dirname | sort | uniq); do
+  echo 1>&2 "* $dir"
+  protoc --go_out=. $dir/*.proto
+done
+echo 1>&2 "All OK"
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
new file mode 100644
index 0000000..197042e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
@@ -0,0 +1,382 @@
+// Code generated by protoc-gen-go.
+// source: github.com/golang/protobuf/ptypes/struct/struct.proto
+// DO NOT EDIT!
+
+/*
+Package structpb is a generated protocol buffer package.
+
+It is generated from these files:
+	github.com/golang/protobuf/ptypes/struct/struct.proto
+
+It has these top-level messages:
+	Struct
+	Value
+	ListValue
+*/
+package structpb
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// `NullValue` is a singleton enumeration to represent the null value for the
+// `Value` type union.
+//
+//  The JSON representation for `NullValue` is JSON `null`.
+type NullValue int32
+
+const (
+	// Null value.
+	NullValue_NULL_VALUE NullValue = 0
+)
+
+var NullValue_name = map[int32]string{
+	0: "NULL_VALUE",
+}
+var NullValue_value = map[string]int32{
+	"NULL_VALUE": 0,
+}
+
+func (x NullValue) String() string {
+	return proto.EnumName(NullValue_name, int32(x))
+}
+func (NullValue) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (NullValue) XXX_WellKnownType() string       { return "NullValue" }
+
+// `Struct` represents a structured data value, consisting of fields
+// which map to dynamically typed values. In some languages, `Struct`
+// might be supported by a native representation. For example, in
+// scripting languages like JS a struct is represented as an
+// object. The details of that representation are described together
+// with the proto support for the language.
+//
+// The JSON representation for `Struct` is JSON object.
+type Struct struct {
+	// Unordered map of dynamically typed values.
+	Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+}
+
+func (m *Struct) Reset()                    { *m = Struct{} }
+func (m *Struct) String() string            { return proto.CompactTextString(m) }
+func (*Struct) ProtoMessage()               {}
+func (*Struct) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (*Struct) XXX_WellKnownType() string   { return "Struct" }
+
+func (m *Struct) GetFields() map[string]*Value {
+	if m != nil {
+		return m.Fields
+	}
+	return nil
+}
+
+// `Value` represents a dynamically typed value which can be either
+// null, a number, a string, a boolean, a recursive struct value, or a
+// list of values. A producer of value is expected to set one of that
+// variants, absence of any variant indicates an error.
+//
+// The JSON representation for `Value` is JSON value.
+type Value struct {
+	// The kind of value.
+	//
+	// Types that are valid to be assigned to Kind:
+	//	*Value_NullValue
+	//	*Value_NumberValue
+	//	*Value_StringValue
+	//	*Value_BoolValue
+	//	*Value_StructValue
+	//	*Value_ListValue
+	Kind isValue_Kind `protobuf_oneof:"kind"`
+}
+
+func (m *Value) Reset()                    { *m = Value{} }
+func (m *Value) String() string            { return proto.CompactTextString(m) }
+func (*Value) ProtoMessage()               {}
+func (*Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+func (*Value) XXX_WellKnownType() string   { return "Value" }
+
+type isValue_Kind interface {
+	isValue_Kind()
+}
+
+type Value_NullValue struct {
+	NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,enum=google.protobuf.NullValue,oneof"`
+}
+type Value_NumberValue struct {
+	NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,oneof"`
+}
+type Value_StringValue struct {
+	StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,oneof"`
+}
+type Value_BoolValue struct {
+	BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,oneof"`
+}
+type Value_StructValue struct {
+	StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,oneof"`
+}
+type Value_ListValue struct {
+	ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,oneof"`
+}
+
+func (*Value_NullValue) isValue_Kind()   {}
+func (*Value_NumberValue) isValue_Kind() {}
+func (*Value_StringValue) isValue_Kind() {}
+func (*Value_BoolValue) isValue_Kind()   {}
+func (*Value_StructValue) isValue_Kind() {}
+func (*Value_ListValue) isValue_Kind()   {}
+
+func (m *Value) GetKind() isValue_Kind {
+	if m != nil {
+		return m.Kind
+	}
+	return nil
+}
+
+func (m *Value) GetNullValue() NullValue {
+	if x, ok := m.GetKind().(*Value_NullValue); ok {
+		return x.NullValue
+	}
+	return NullValue_NULL_VALUE
+}
+
+func (m *Value) GetNumberValue() float64 {
+	if x, ok := m.GetKind().(*Value_NumberValue); ok {
+		return x.NumberValue
+	}
+	return 0
+}
+
+func (m *Value) GetStringValue() string {
+	if x, ok := m.GetKind().(*Value_StringValue); ok {
+		return x.StringValue
+	}
+	return ""
+}
+
+func (m *Value) GetBoolValue() bool {
+	if x, ok := m.GetKind().(*Value_BoolValue); ok {
+		return x.BoolValue
+	}
+	return false
+}
+
+func (m *Value) GetStructValue() *Struct {
+	if x, ok := m.GetKind().(*Value_StructValue); ok {
+		return x.StructValue
+	}
+	return nil
+}
+
+func (m *Value) GetListValue() *ListValue {
+	if x, ok := m.GetKind().(*Value_ListValue); ok {
+		return x.ListValue
+	}
+	return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+	return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{
+		(*Value_NullValue)(nil),
+		(*Value_NumberValue)(nil),
+		(*Value_StringValue)(nil),
+		(*Value_BoolValue)(nil),
+		(*Value_StructValue)(nil),
+		(*Value_ListValue)(nil),
+	}
+}
+
+func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+	m := msg.(*Value)
+	// kind
+	switch x := m.Kind.(type) {
+	case *Value_NullValue:
+		b.EncodeVarint(1<<3 | proto.WireVarint)
+		b.EncodeVarint(uint64(x.NullValue))
+	case *Value_NumberValue:
+		b.EncodeVarint(2<<3 | proto.WireFixed64)
+		b.EncodeFixed64(math.Float64bits(x.NumberValue))
+	case *Value_StringValue:
+		b.EncodeVarint(3<<3 | proto.WireBytes)
+		b.EncodeStringBytes(x.StringValue)
+	case *Value_BoolValue:
+		t := uint64(0)
+		if x.BoolValue {
+			t = 1
+		}
+		b.EncodeVarint(4<<3 | proto.WireVarint)
+		b.EncodeVarint(t)
+	case *Value_StructValue:
+		b.EncodeVarint(5<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.StructValue); err != nil {
+			return err
+		}
+	case *Value_ListValue:
+		b.EncodeVarint(6<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.ListValue); err != nil {
+			return err
+		}
+	case nil:
+	default:
+		return fmt.Errorf("Value.Kind has unexpected type %T", x)
+	}
+	return nil
+}
+
+func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+	m := msg.(*Value)
+	switch tag {
+	case 1: // kind.null_value
+		if wire != proto.WireVarint {
+			return true, proto.ErrInternalBadWireType
+		}
+		x, err := b.DecodeVarint()
+		m.Kind = &Value_NullValue{NullValue(x)}
+		return true, err
+	case 2: // kind.number_value
+		if wire != proto.WireFixed64 {
+			return true, proto.ErrInternalBadWireType
+		}
+		x, err := b.DecodeFixed64()
+		m.Kind = &Value_NumberValue{math.Float64frombits(x)}
+		return true, err
+	case 3: // kind.string_value
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		x, err := b.DecodeStringBytes()
+		m.Kind = &Value_StringValue{x}
+		return true, err
+	case 4: // kind.bool_value
+		if wire != proto.WireVarint {
+			return true, proto.ErrInternalBadWireType
+		}
+		x, err := b.DecodeVarint()
+		m.Kind = &Value_BoolValue{x != 0}
+		return true, err
+	case 5: // kind.struct_value
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(Struct)
+		err := b.DecodeMessage(msg)
+		m.Kind = &Value_StructValue{msg}
+		return true, err
+	case 6: // kind.list_value
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(ListValue)
+		err := b.DecodeMessage(msg)
+		m.Kind = &Value_ListValue{msg}
+		return true, err
+	default:
+		return false, nil
+	}
+}
+
+func _Value_OneofSizer(msg proto.Message) (n int) {
+	m := msg.(*Value)
+	// kind
+	switch x := m.Kind.(type) {
+	case *Value_NullValue:
+		n += proto.SizeVarint(1<<3 | proto.WireVarint)
+		n += proto.SizeVarint(uint64(x.NullValue))
+	case *Value_NumberValue:
+		n += proto.SizeVarint(2<<3 | proto.WireFixed64)
+		n += 8
+	case *Value_StringValue:
+		n += proto.SizeVarint(3<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(len(x.StringValue)))
+		n += len(x.StringValue)
+	case *Value_BoolValue:
+		n += proto.SizeVarint(4<<3 | proto.WireVarint)
+		n += 1
+	case *Value_StructValue:
+		s := proto.Size(x.StructValue)
+		n += proto.SizeVarint(5<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *Value_ListValue:
+		s := proto.Size(x.ListValue)
+		n += proto.SizeVarint(6<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case nil:
+	default:
+		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+	}
+	return n
+}
+
+// `ListValue` is a wrapper around a repeated field of values.
+//
+// The JSON representation for `ListValue` is JSON array.
+type ListValue struct {
+	// Repeated field of dynamically typed values.
+	Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"`
+}
+
+func (m *ListValue) Reset()                    { *m = ListValue{} }
+func (m *ListValue) String() string            { return proto.CompactTextString(m) }
+func (*ListValue) ProtoMessage()               {}
+func (*ListValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+func (*ListValue) XXX_WellKnownType() string   { return "ListValue" }
+
+func (m *ListValue) GetValues() []*Value {
+	if m != nil {
+		return m.Values
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*Struct)(nil), "google.protobuf.Struct")
+	proto.RegisterType((*Value)(nil), "google.protobuf.Value")
+	proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue")
+	proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value)
+}
+
+func init() {
+	proto.RegisterFile("github.com/golang/protobuf/ptypes/struct/struct.proto", fileDescriptor0)
+}
+
+var fileDescriptor0 = []byte{
+	// 416 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x92, 0x41, 0x8b, 0xd3, 0x40,
+	0x14, 0x80, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa0, 0xa1, 0x7b, 0x09,
+	0x22, 0x09, 0x56, 0x04, 0x31, 0x5e, 0x0c, 0xac, 0xbb, 0x60, 0x58, 0x62, 0x74, 0x57, 0xf0, 0x52,
+	0x9a, 0x34, 0x8d, 0xa1, 0xd3, 0x99, 0x90, 0xcc, 0x28, 0x3d, 0xfa, 0x2f, 0x3c, 0x8a, 0x47, 0x8f,
+	0xfe, 0x42, 0x99, 0x99, 0x24, 0x4a, 0x4b, 0xc1, 0xd3, 0xf4, 0xbd, 0xf9, 0xde, 0x37, 0xef, 0xbd,
+	0x06, 0x9e, 0x97, 0x15, 0xff, 0x2c, 0x32, 0x3f, 0x67, 0x9b, 0xa0, 0x64, 0x64, 0x41, 0xcb, 0xa0,
+	0x6e, 0x18, 0x67, 0x99, 0x58, 0x05, 0x35, 0xdf, 0xd6, 0x45, 0x1b, 0xb4, 0xbc, 0x11, 0x39, 0xef,
+	0x0e, 0x5f, 0xdd, 0xe2, 0x3b, 0x25, 0x63, 0x25, 0x29, 0xfc, 0x9e, 0x9d, 0x7e, 0x47, 0x60, 0xbd,
+	0x57, 0x04, 0x0e, 0xc1, 0x5a, 0x55, 0x05, 0x59, 0xb6, 0x13, 0xe4, 0x9a, 0x9e, 0x33, 0x3b, 0xf3,
+	0x77, 0x60, 0x5f, 0x83, 0xfe, 0x1b, 0x45, 0x9d, 0x53, 0xde, 0x6c, 0xd3, 0xae, 0xe4, 0xf4, 0x1d,
+	0x38, 0xff, 0xa4, 0xf1, 0x09, 0x98, 0xeb, 0x62, 0x3b, 0x41, 0x2e, 0xf2, 0xec, 0x54, 0xfe, 0xc4,
+	0x4f, 0x60, 0xfc, 0x65, 0x41, 0x44, 0x31, 0x31, 0x5c, 0xe4, 0x39, 0xb3, 0x7b, 0x7b, 0xf2, 0x1b,
+	0x79, 0x9b, 0x6a, 0xe8, 0xa5, 0xf1, 0x02, 0x4d, 0x7f, 0x1b, 0x30, 0x56, 0x49, 0x1c, 0x02, 0x50,
+	0x41, 0xc8, 0x5c, 0x0b, 0xa4, 0xf4, 0x78, 0x76, 0xba, 0x27, 0xb8, 0x12, 0x84, 0x28, 0xfe, 0x72,
+	0x94, 0xda, 0xb4, 0x0f, 0xf0, 0x19, 0xdc, 0xa6, 0x62, 0x93, 0x15, 0xcd, 0xfc, 0xef, 0xfb, 0xe8,
+	0x72, 0x94, 0x3a, 0x3a, 0x3b, 0x40, 0x2d, 0x6f, 0x2a, 0x5a, 0x76, 0x90, 0x29, 0x1b, 0x97, 0x90,
+	0xce, 0x6a, 0xe8, 0x11, 0x40, 0xc6, 0x58, 0xdf, 0xc6, 0x91, 0x8b, 0xbc, 0x5b, 0xf2, 0x29, 0x99,
+	0xd3, 0xc0, 0x2b, 0x65, 0x11, 0x39, 0xef, 0x90, 0xb1, 0x1a, 0xf5, 0xfe, 0x81, 0x3d, 0x76, 0x7a,
+	0x91, 0xf3, 0x61, 0x4a, 0x52, 0xb5, 0x7d, 0xad, 0xa5, 0x6a, 0xf7, 0xa7, 0x8c, 0xab, 0x96, 0x0f,
+	0x53, 0x92, 0x3e, 0x88, 0x2c, 0x38, 0x5a, 0x57, 0x74, 0x39, 0x0d, 0xc1, 0x1e, 0x08, 0xec, 0x83,
+	0xa5, 0x64, 0xfd, 0x3f, 0x7a, 0x68, 0xe9, 0x1d, 0xf5, 0xf8, 0x01, 0xd8, 0xc3, 0x12, 0xf1, 0x31,
+	0xc0, 0xd5, 0x75, 0x1c, 0xcf, 0x6f, 0x5e, 0xc7, 0xd7, 0xe7, 0x27, 0xa3, 0xe8, 0x1b, 0x82, 0xbb,
+	0x39, 0xdb, 0xec, 0x2a, 0x22, 0x47, 0x4f, 0x93, 0xc8, 0x38, 0x41, 0x9f, 0x9e, 0xfe, 0xef, 0x87,
+	0x19, 0xea, 0xa3, 0xce, 0x7e, 0x20, 0xf4, 0xd3, 0x30, 0x2f, 0x92, 0xe8, 0x97, 0xf1, 0xf0, 0x42,
+	0xcb, 0x93, 0xbe, 0xbf, 0x8f, 0x05, 0x21, 0x6f, 0x29, 0xfb, 0x4a, 0x3f, 0xc8, 0xca, 0xcc, 0x52,
+	0xaa, 0x67, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xbc, 0xcf, 0x6d, 0x50, 0xfe, 0x02, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
new file mode 100644
index 0000000..beeba81
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
@@ -0,0 +1,96 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option go_package = "github.com/golang/protobuf/ptypes/struct;structpb";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "StructProto";
+option java_multiple_files = true;
+option java_generate_equals_and_hash = true;
+option objc_class_prefix = "GPB";
+
+
+// `Struct` represents a structured data value, consisting of fields
+// which map to dynamically typed values. In some languages, `Struct`
+// might be supported by a native representation. For example, in
+// scripting languages like JS a struct is represented as an
+// object. The details of that representation are described together
+// with the proto support for the language.
+//
+// The JSON representation for `Struct` is JSON object.
+message Struct {
+  // Unordered map of dynamically typed values.
+  map<string, Value> fields = 1;
+}
+
+// `Value` represents a dynamically typed value which can be either
+// null, a number, a string, a boolean, a recursive struct value, or a
+// list of values. A producer of value is expected to set one of that
+// variants, absence of any variant indicates an error.
+//
+// The JSON representation for `Value` is JSON value.
+message Value {
+  // The kind of value.
+  oneof kind {
+    // Represents a null value.
+    NullValue null_value = 1;
+    // Represents a double value.
+    double number_value = 2;
+    // Represents a string value.
+    string string_value = 3;
+    // Represents a boolean value.
+    bool bool_value = 4;
+    // Represents a structured value.
+    Struct struct_value = 5;
+    // Represents a repeated `Value`.
+    ListValue list_value = 6;
+  }
+}
+
+// `NullValue` is a singleton enumeration to represent the null value for the
+// `Value` type union.
+//
+//  The JSON representation for `NullValue` is JSON `null`.
+enum NullValue {
+  // Null value.
+  NULL_VALUE = 0;
+}
+
+// `ListValue` is a wrapper around a repeated field of values.
+//
+// The JSON representation for `ListValue` is JSON array.
+message ListValue {
+  // Repeated field of dynamically typed values.
+  repeated Value values = 1;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
new file mode 100644
index 0000000..1b36576
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
@@ -0,0 +1,125 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements operations on google.protobuf.Timestamp.
+
+import (
+	"errors"
+	"fmt"
+	"time"
+
+	tspb "github.com/golang/protobuf/ptypes/timestamp"
+)
+
+const (
+	// Seconds field of the earliest valid Timestamp.
+	// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+	minValidSeconds = -62135596800
+	// Seconds field just after the latest valid Timestamp.
+	// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+	maxValidSeconds = 253402300800
+)
+
+// validateTimestamp determines whether a Timestamp is valid.
+// A valid timestamp represents a time in the range
+// [0001-01-01, 10000-01-01) and has a Nanos field
+// in the range [0, 1e9).
+//
+// If the Timestamp is valid, validateTimestamp returns nil.
+// Otherwise, it returns an error that describes
+// the problem.
+//
+// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
+func validateTimestamp(ts *tspb.Timestamp) error {
+	if ts == nil {
+		return errors.New("timestamp: nil Timestamp")
+	}
+	if ts.Seconds < minValidSeconds {
+		return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
+	}
+	if ts.Seconds >= maxValidSeconds {
+		return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
+	}
+	if ts.Nanos < 0 || ts.Nanos >= 1e9 {
+		return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
+	}
+	return nil
+}
+
+// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
+// It returns an error if the argument is invalid.
+//
+// Unlike most Go functions, if Timestamp returns an error, the first return value
+// is not the zero time.Time. Instead, it is the value obtained from the
+// time.Unix function when passed the contents of the Timestamp, in the UTC
+// locale. This may or may not be a meaningful time; many invalid Timestamps
+// do map to valid time.Times.
+//
+// A nil Timestamp returns an error. The first return value in that case is
+// undefined.
+func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
+	// Don't return the zero value on error, because corresponds to a valid
+	// timestamp. Instead return whatever time.Unix gives us.
+	var t time.Time
+	if ts == nil {
+		t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
+	} else {
+		t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
+	}
+	return t, validateTimestamp(ts)
+}
+
+// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
+// It returns an error if the resulting Timestamp is invalid.
+func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
+	seconds := t.Unix()
+	nanos := int32(t.Sub(time.Unix(seconds, 0)))
+	ts := &tspb.Timestamp{
+		Seconds: seconds,
+		Nanos:   nanos,
+	}
+	if err := validateTimestamp(ts); err != nil {
+		return nil, err
+	}
+	return ts, nil
+}
+
+// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
+// Timestamps, it returns an error message in parentheses.
+func TimestampString(ts *tspb.Timestamp) string {
+	t, err := Timestamp(ts)
+	if err != nil {
+		return fmt.Sprintf("(%v)", err)
+	}
+	return t.Format(time.RFC3339Nano)
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
new file mode 100644
index 0000000..ffcc515
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
@@ -0,0 +1,127 @@
+// Code generated by protoc-gen-go.
+// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
+// DO NOT EDIT!
+
+/*
+Package timestamp is a generated protocol buffer package.
+
+It is generated from these files:
+	github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
+
+It has these top-level messages:
+	Timestamp
+*/
+package timestamp
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// A Timestamp represents a point in time independent of any time zone
+// or calendar, represented as seconds and fractions of seconds at
+// nanosecond resolution in UTC Epoch time. It is encoded using the
+// Proleptic Gregorian Calendar which extends the Gregorian calendar
+// backwards to year one. It is encoded assuming all minutes are 60
+// seconds long, i.e. leap seconds are "smeared" so that no leap second
+// table is needed for interpretation. Range is from
+// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
+// By restricting to that range, we ensure that we can convert to
+// and from  RFC 3339 date strings.
+// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+//
+// Example 1: Compute Timestamp from POSIX `time()`.
+//
+//     Timestamp timestamp;
+//     timestamp.set_seconds(time(NULL));
+//     timestamp.set_nanos(0);
+//
+// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+//
+//     struct timeval tv;
+//     gettimeofday(&tv, NULL);
+//
+//     Timestamp timestamp;
+//     timestamp.set_seconds(tv.tv_sec);
+//     timestamp.set_nanos(tv.tv_usec * 1000);
+//
+// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+//
+//     FILETIME ft;
+//     GetSystemTimeAsFileTime(&ft);
+//     UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+//
+//     // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+//     // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+//     Timestamp timestamp;
+//     timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+//     timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+//
+// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+//
+//     long millis = System.currentTimeMillis();
+//
+//     Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+//         .setNanos((int) ((millis % 1000) * 1000000)).build();
+//
+//
+// Example 5: Compute Timestamp from current time in Python.
+//
+//     now = time.time()
+//     seconds = int(now)
+//     nanos = int((now - seconds) * 10**9)
+//     timestamp = Timestamp(seconds=seconds, nanos=nanos)
+//
+//
+type Timestamp struct {
+	// Represents seconds of UTC time since Unix epoch
+	// 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to
+	// 9999-12-31T23:59:59Z inclusive.
+	Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
+	// Non-negative fractions of a second at nanosecond resolution. Negative
+	// second values with fractions must still have non-negative nanos values
+	// that count forward in time. Must be from 0 to 999,999,999
+	// inclusive.
+	Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
+}
+
+func (m *Timestamp) Reset()                    { *m = Timestamp{} }
+func (m *Timestamp) String() string            { return proto.CompactTextString(m) }
+func (*Timestamp) ProtoMessage()               {}
+func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (*Timestamp) XXX_WellKnownType() string   { return "Timestamp" }
+
+func init() {
+	proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
+}
+
+func init() {
+	proto.RegisterFile("github.com/golang/protobuf/ptypes/timestamp/timestamp.proto", fileDescriptor0)
+}
+
+var fileDescriptor0 = []byte{
+	// 194 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9,
+	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
+	0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0xc9,
+	0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x40, 0xb0, 0xf4, 0xc0, 0x6a, 0x84, 0xf8, 0xd3, 0xf3,
+	0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x60, 0x3a, 0x94, 0xac, 0xb9, 0x38, 0x43, 0x60, 0x6a, 0x84, 0x24,
+	0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83,
+	0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d,
+	0xd6, 0x20, 0x08, 0xc7, 0xa9, 0x91, 0x91, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x50, 0x27,
+	0x3e, 0xb8, 0x91, 0x01, 0x20, 0xa1, 0x00, 0xc6, 0x28, 0x6d, 0x12, 0x1c, 0xbd, 0x80, 0x91, 0xf1,
+	0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, 0xc3, 0x03,
+	0xa0, 0xca, 0xf5, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0xda, 0x92,
+	0xd8, 0xc0, 0xe6, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x17, 0x5f, 0xb7, 0xdc, 0x17, 0x01,
+	0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
new file mode 100644
index 0000000..7992a85
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
@@ -0,0 +1,111 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/timestamp";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "TimestampProto";
+option java_multiple_files = true;
+option java_generate_equals_and_hash = true;
+option objc_class_prefix = "GPB";
+
+// A Timestamp represents a point in time independent of any time zone
+// or calendar, represented as seconds and fractions of seconds at
+// nanosecond resolution in UTC Epoch time. It is encoded using the
+// Proleptic Gregorian Calendar which extends the Gregorian calendar
+// backwards to year one. It is encoded assuming all minutes are 60
+// seconds long, i.e. leap seconds are "smeared" so that no leap second
+// table is needed for interpretation. Range is from
+// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
+// By restricting to that range, we ensure that we can convert to
+// and from  RFC 3339 date strings.
+// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+//
+// Example 1: Compute Timestamp from POSIX `time()`.
+//
+//     Timestamp timestamp;
+//     timestamp.set_seconds(time(NULL));
+//     timestamp.set_nanos(0);
+//
+// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+//
+//     struct timeval tv;
+//     gettimeofday(&tv, NULL);
+//
+//     Timestamp timestamp;
+//     timestamp.set_seconds(tv.tv_sec);
+//     timestamp.set_nanos(tv.tv_usec * 1000);
+//
+// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+//
+//     FILETIME ft;
+//     GetSystemTimeAsFileTime(&ft);
+//     UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+//
+//     // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+//     // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+//     Timestamp timestamp;
+//     timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+//     timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+//
+// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+//
+//     long millis = System.currentTimeMillis();
+//
+//     Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+//         .setNanos((int) ((millis % 1000) * 1000000)).build();
+//
+//
+// Example 5: Compute Timestamp from current time in Python.
+//
+//     now = time.time()
+//     seconds = int(now)
+//     nanos = int((now - seconds) * 10**9)
+//     timestamp = Timestamp(seconds=seconds, nanos=nanos)
+//
+//
+message Timestamp {
+
+  // Represents seconds of UTC time since Unix epoch
+  // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to
+  // 9999-12-31T23:59:59Z inclusive.
+  int64 seconds = 1;
+
+  // Non-negative fractions of a second at nanosecond resolution. Negative
+  // second values with fractions must still have non-negative nanos values
+  // that count forward in time. Must be from 0 to 999,999,999
+  // inclusive.
+  int32 nanos = 2;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
new file mode 100644
index 0000000..5e52a81
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
@@ -0,0 +1,200 @@
+// Code generated by protoc-gen-go.
+// source: github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
+// DO NOT EDIT!
+
+/*
+Package wrappers is a generated protocol buffer package.
+
+It is generated from these files:
+	github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
+
+It has these top-level messages:
+	DoubleValue
+	FloatValue
+	Int64Value
+	UInt64Value
+	Int32Value
+	UInt32Value
+	BoolValue
+	StringValue
+	BytesValue
+*/
+package wrappers
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// Wrapper message for `double`.
+//
+// The JSON representation for `DoubleValue` is JSON number.
+type DoubleValue struct {
+	// The double value.
+	Value float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *DoubleValue) Reset()                    { *m = DoubleValue{} }
+func (m *DoubleValue) String() string            { return proto.CompactTextString(m) }
+func (*DoubleValue) ProtoMessage()               {}
+func (*DoubleValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (*DoubleValue) XXX_WellKnownType() string   { return "DoubleValue" }
+
+// Wrapper message for `float`.
+//
+// The JSON representation for `FloatValue` is JSON number.
+type FloatValue struct {
+	// The float value.
+	Value float32 `protobuf:"fixed32,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *FloatValue) Reset()                    { *m = FloatValue{} }
+func (m *FloatValue) String() string            { return proto.CompactTextString(m) }
+func (*FloatValue) ProtoMessage()               {}
+func (*FloatValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+func (*FloatValue) XXX_WellKnownType() string   { return "FloatValue" }
+
+// Wrapper message for `int64`.
+//
+// The JSON representation for `Int64Value` is JSON string.
+type Int64Value struct {
+	// The int64 value.
+	Value int64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *Int64Value) Reset()                    { *m = Int64Value{} }
+func (m *Int64Value) String() string            { return proto.CompactTextString(m) }
+func (*Int64Value) ProtoMessage()               {}
+func (*Int64Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+func (*Int64Value) XXX_WellKnownType() string   { return "Int64Value" }
+
+// Wrapper message for `uint64`.
+//
+// The JSON representation for `UInt64Value` is JSON string.
+type UInt64Value struct {
+	// The uint64 value.
+	Value uint64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *UInt64Value) Reset()                    { *m = UInt64Value{} }
+func (m *UInt64Value) String() string            { return proto.CompactTextString(m) }
+func (*UInt64Value) ProtoMessage()               {}
+func (*UInt64Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+func (*UInt64Value) XXX_WellKnownType() string   { return "UInt64Value" }
+
+// Wrapper message for `int32`.
+//
+// The JSON representation for `Int32Value` is JSON number.
+type Int32Value struct {
+	// The int32 value.
+	Value int32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *Int32Value) Reset()                    { *m = Int32Value{} }
+func (m *Int32Value) String() string            { return proto.CompactTextString(m) }
+func (*Int32Value) ProtoMessage()               {}
+func (*Int32Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+func (*Int32Value) XXX_WellKnownType() string   { return "Int32Value" }
+
+// Wrapper message for `uint32`.
+//
+// The JSON representation for `UInt32Value` is JSON number.
+type UInt32Value struct {
+	// The uint32 value.
+	Value uint32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *UInt32Value) Reset()                    { *m = UInt32Value{} }
+func (m *UInt32Value) String() string            { return proto.CompactTextString(m) }
+func (*UInt32Value) ProtoMessage()               {}
+func (*UInt32Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+func (*UInt32Value) XXX_WellKnownType() string   { return "UInt32Value" }
+
+// Wrapper message for `bool`.
+//
+// The JSON representation for `BoolValue` is JSON `true` and `false`.
+type BoolValue struct {
+	// The bool value.
+	Value bool `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *BoolValue) Reset()                    { *m = BoolValue{} }
+func (m *BoolValue) String() string            { return proto.CompactTextString(m) }
+func (*BoolValue) ProtoMessage()               {}
+func (*BoolValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+func (*BoolValue) XXX_WellKnownType() string   { return "BoolValue" }
+
+// Wrapper message for `string`.
+//
+// The JSON representation for `StringValue` is JSON string.
+type StringValue struct {
+	// The string value.
+	Value string `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *StringValue) Reset()                    { *m = StringValue{} }
+func (m *StringValue) String() string            { return proto.CompactTextString(m) }
+func (*StringValue) ProtoMessage()               {}
+func (*StringValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+func (*StringValue) XXX_WellKnownType() string   { return "StringValue" }
+
+// Wrapper message for `bytes`.
+//
+// The JSON representation for `BytesValue` is JSON string.
+type BytesValue struct {
+	// The bytes value.
+	Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *BytesValue) Reset()                    { *m = BytesValue{} }
+func (m *BytesValue) String() string            { return proto.CompactTextString(m) }
+func (*BytesValue) ProtoMessage()               {}
+func (*BytesValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+func (*BytesValue) XXX_WellKnownType() string   { return "BytesValue" }
+
+func init() {
+	proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue")
+	proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue")
+	proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value")
+	proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value")
+	proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value")
+	proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value")
+	proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue")
+	proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue")
+	proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue")
+}
+
+func init() {
+	proto.RegisterFile("github.com/golang/protobuf/ptypes/wrappers/wrappers.proto", fileDescriptor0)
+}
+
+var fileDescriptor0 = []byte{
+	// 260 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9,
+	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
+	0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0x2f,
+	0x4a, 0x2c, 0x28, 0x48, 0x2d, 0x42, 0x30, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3, 0xd3,
+	0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0x94, 0xb9, 0xb8, 0x5d, 0xf2, 0x4b, 0x93, 0x72, 0x52, 0xc3,
+	0x12, 0x73, 0x4a, 0x53, 0x85, 0x44, 0xb8, 0x58, 0xcb, 0x40, 0x0c, 0x09, 0x46, 0x05, 0x46, 0x0d,
+	0xc6, 0x20, 0x08, 0x47, 0x49, 0x89, 0x8b, 0xcb, 0x2d, 0x27, 0x3f, 0xb1, 0x04, 0x8b, 0x1a, 0x26,
+	0x24, 0x35, 0x9e, 0x79, 0x25, 0x66, 0x26, 0x58, 0xd4, 0x30, 0xc3, 0xd4, 0x28, 0x73, 0x71, 0x87,
+	0xe2, 0x52, 0xc4, 0x82, 0x6a, 0x90, 0xb1, 0x11, 0x16, 0x35, 0xac, 0x68, 0x06, 0x61, 0x55, 0xc4,
+	0x0b, 0x53, 0xa4, 0xc8, 0xc5, 0xe9, 0x94, 0x9f, 0x9f, 0x83, 0x45, 0x09, 0x07, 0x92, 0x39, 0xc1,
+	0x25, 0x45, 0x99, 0x79, 0xe9, 0x58, 0x14, 0x71, 0x22, 0x39, 0xc8, 0xa9, 0xb2, 0x24, 0xb5, 0x18,
+	0x8b, 0x1a, 0x1e, 0xa8, 0x1a, 0xa7, 0x7a, 0x2e, 0xe1, 0xe4, 0xfc, 0x5c, 0x3d, 0xb4, 0xd0, 0x75,
+	0xe2, 0x0d, 0x87, 0x06, 0x7f, 0x00, 0x48, 0x24, 0x80, 0x31, 0x4a, 0x8b, 0xf8, 0xa8, 0x5b, 0xc0,
+	0xc8, 0xf8, 0x83, 0x91, 0x71, 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88,
+	0xd1, 0x01, 0x50, 0xd5, 0x7a, 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20,
+	0x5d, 0x49, 0x6c, 0x60, 0x63, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xdf, 0x64, 0x4b,
+	0x1c, 0x02, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
new file mode 100644
index 0000000..4828ad9
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
@@ -0,0 +1,119 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Wrappers for primitive (non-message) types. These types are useful
+// for embedding primitives in the `google.protobuf.Any` type and for places
+// where we need to distinguish between the absence of a primitive
+// typed field and its default value.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/wrappers";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "WrappersProto";
+option java_multiple_files = true;
+option java_generate_equals_and_hash = true;
+option objc_class_prefix = "GPB";
+
+// Wrapper message for `double`.
+//
+// The JSON representation for `DoubleValue` is JSON number.
+message DoubleValue {
+  // The double value.
+  double value = 1;
+}
+
+// Wrapper message for `float`.
+//
+// The JSON representation for `FloatValue` is JSON number.
+message FloatValue {
+  // The float value.
+  float value = 1;
+}
+
+// Wrapper message for `int64`.
+//
+// The JSON representation for `Int64Value` is JSON string.
+message Int64Value {
+  // The int64 value.
+  int64 value = 1;
+}
+
+// Wrapper message for `uint64`.
+//
+// The JSON representation for `UInt64Value` is JSON string.
+message UInt64Value {
+  // The uint64 value.
+  uint64 value = 1;
+}
+
+// Wrapper message for `int32`.
+//
+// The JSON representation for `Int32Value` is JSON number.
+message Int32Value {
+  // The int32 value.
+  int32 value = 1;
+}
+
+// Wrapper message for `uint32`.
+//
+// The JSON representation for `UInt32Value` is JSON number.
+message UInt32Value {
+  // The uint32 value.
+  uint32 value = 1;
+}
+
+// Wrapper message for `bool`.
+//
+// The JSON representation for `BoolValue` is JSON `true` and `false`.
+message BoolValue {
+  // The bool value.
+  bool value = 1;
+}
+
+// Wrapper message for `string`.
+//
+// The JSON representation for `StringValue` is JSON string.
+message StringValue {
+  // The string value.
+  string value = 1;
+}
+
+// Wrapper message for `bytes`.
+//
+// The JSON representation for `BytesValue` is JSON string.
+message BytesValue {
+  // The bytes value.
+  bytes value = 1;
+}
diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore
new file mode 100644
index 0000000..042091d
--- /dev/null
+++ b/vendor/github.com/golang/snappy/.gitignore
@@ -0,0 +1,16 @@
+cmd/snappytool/snappytool
+testdata/bench
+
+# These explicitly listed benchmark data files are for an obsolete version of
+# snappy_test.go.
+testdata/alice29.txt
+testdata/asyoulik.txt
+testdata/fireworks.jpeg
+testdata/geo.protodata
+testdata/html
+testdata/html_x_4
+testdata/kppkn.gtb
+testdata/lcet10.txt
+testdata/paper-100k.pdf
+testdata/plrabn12.txt
+testdata/urls.10K
diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS
new file mode 100644
index 0000000..bcfa195
--- /dev/null
+++ b/vendor/github.com/golang/snappy/AUTHORS
@@ -0,0 +1,15 @@
+# This is the official list of Snappy-Go authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as
+#	Name or Organization <email address>
+# The email address is not required for organizations.
+
+# Please keep the list sorted.
+
+Damian Gryski <dgryski@gmail.com>
+Google Inc.
+Jan Mercl <0xjnml@gmail.com>
+Rodolfo Carvalho <rhcarvalho@gmail.com>
+Sebastien Binet <seb.binet@gmail.com>
diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS
new file mode 100644
index 0000000..931ae31
--- /dev/null
+++ b/vendor/github.com/golang/snappy/CONTRIBUTORS
@@ -0,0 +1,37 @@
+# This is the official list of people who can contribute
+# (and typically have contributed) code to the Snappy-Go repository.
+# The AUTHORS file lists the copyright holders; this file
+# lists people.  For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# The submission process automatically checks to make sure
+# that people submitting code are listed in this file (by email address).
+#
+# Names should be added to this file only after verifying that
+# the individual or the individual's organization has agreed to
+# the appropriate Contributor License Agreement, found here:
+#
+#     http://code.google.com/legal/individual-cla-v1.0.html
+#     http://code.google.com/legal/corporate-cla-v1.0.html
+#
+# The agreement for individuals can be filled out on the web.
+#
+# When adding J Random Contributor's name to this file,
+# either J's name or J's organization's name should be
+# added to the AUTHORS file, depending on whether the
+# individual or corporate CLA was used.
+
+# Names should be added to this file like so:
+#     Name <email address>
+
+# Please keep the list sorted.
+
+Damian Gryski <dgryski@gmail.com>
+Jan Mercl <0xjnml@gmail.com>
+Kai Backman <kaib@golang.org>
+Marc-Antoine Ruel <maruel@chromium.org>
+Nigel Tao <nigeltao@golang.org>
+Rob Pike <r@golang.org>
+Rodolfo Carvalho <rhcarvalho@gmail.com>
+Russ Cox <rsc@golang.org>
+Sebastien Binet <seb.binet@gmail.com>
diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE
new file mode 100644
index 0000000..6050c10
--- /dev/null
+++ b/vendor/github.com/golang/snappy/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README
new file mode 100644
index 0000000..cea1287
--- /dev/null
+++ b/vendor/github.com/golang/snappy/README
@@ -0,0 +1,107 @@
+The Snappy compression format in the Go programming language.
+
+To download and install from source:
+$ go get github.com/golang/snappy
+
+Unless otherwise noted, the Snappy-Go source files are distributed
+under the BSD-style license found in the LICENSE file.
+
+
+
+Benchmarks.
+
+The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten
+or so files, the same set used by the C++ Snappy code (github.com/google/snappy
+and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @
+3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29:
+
+"go test -test.bench=."
+
+_UFlat0-8         2.19GB/s ± 0%  html
+_UFlat1-8         1.41GB/s ± 0%  urls
+_UFlat2-8         23.5GB/s ± 2%  jpg
+_UFlat3-8         1.91GB/s ± 0%  jpg_200
+_UFlat4-8         14.0GB/s ± 1%  pdf
+_UFlat5-8         1.97GB/s ± 0%  html4
+_UFlat6-8          814MB/s ± 0%  txt1
+_UFlat7-8          785MB/s ± 0%  txt2
+_UFlat8-8          857MB/s ± 0%  txt3
+_UFlat9-8          719MB/s ± 1%  txt4
+_UFlat10-8        2.84GB/s ± 0%  pb
+_UFlat11-8        1.05GB/s ± 0%  gaviota
+
+_ZFlat0-8         1.04GB/s ± 0%  html
+_ZFlat1-8          534MB/s ± 0%  urls
+_ZFlat2-8         15.7GB/s ± 1%  jpg
+_ZFlat3-8          740MB/s ± 3%  jpg_200
+_ZFlat4-8         9.20GB/s ± 1%  pdf
+_ZFlat5-8          991MB/s ± 0%  html4
+_ZFlat6-8          379MB/s ± 0%  txt1
+_ZFlat7-8          352MB/s ± 0%  txt2
+_ZFlat8-8          396MB/s ± 1%  txt3
+_ZFlat9-8          327MB/s ± 1%  txt4
+_ZFlat10-8        1.33GB/s ± 1%  pb
+_ZFlat11-8         605MB/s ± 1%  gaviota
+
+
+
+"go test -test.bench=. -tags=noasm"
+
+_UFlat0-8          621MB/s ± 2%  html
+_UFlat1-8          494MB/s ± 1%  urls
+_UFlat2-8         23.2GB/s ± 1%  jpg
+_UFlat3-8         1.12GB/s ± 1%  jpg_200
+_UFlat4-8         4.35GB/s ± 1%  pdf
+_UFlat5-8          609MB/s ± 0%  html4
+_UFlat6-8          296MB/s ± 0%  txt1
+_UFlat7-8          288MB/s ± 0%  txt2
+_UFlat8-8          309MB/s ± 1%  txt3
+_UFlat9-8          280MB/s ± 1%  txt4
+_UFlat10-8         753MB/s ± 0%  pb
+_UFlat11-8         400MB/s ± 0%  gaviota
+
+_ZFlat0-8          409MB/s ± 1%  html
+_ZFlat1-8          250MB/s ± 1%  urls
+_ZFlat2-8         12.3GB/s ± 1%  jpg
+_ZFlat3-8          132MB/s ± 0%  jpg_200
+_ZFlat4-8         2.92GB/s ± 0%  pdf
+_ZFlat5-8          405MB/s ± 1%  html4
+_ZFlat6-8          179MB/s ± 1%  txt1
+_ZFlat7-8          170MB/s ± 1%  txt2
+_ZFlat8-8          189MB/s ± 1%  txt3
+_ZFlat9-8          164MB/s ± 1%  txt4
+_ZFlat10-8         479MB/s ± 1%  pb
+_ZFlat11-8         270MB/s ± 1%  gaviota
+
+
+
+For comparison (Go's encoded output is byte-for-byte identical to C++'s), here
+are the numbers from C++ Snappy's
+
+make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log
+
+BM_UFlat/0     2.4GB/s  html
+BM_UFlat/1     1.4GB/s  urls
+BM_UFlat/2    21.8GB/s  jpg
+BM_UFlat/3     1.5GB/s  jpg_200
+BM_UFlat/4    13.3GB/s  pdf
+BM_UFlat/5     2.1GB/s  html4
+BM_UFlat/6     1.0GB/s  txt1
+BM_UFlat/7   959.4MB/s  txt2
+BM_UFlat/8     1.0GB/s  txt3
+BM_UFlat/9   864.5MB/s  txt4
+BM_UFlat/10    2.9GB/s  pb
+BM_UFlat/11    1.2GB/s  gaviota
+
+BM_ZFlat/0   944.3MB/s  html (22.31 %)
+BM_ZFlat/1   501.6MB/s  urls (47.78 %)
+BM_ZFlat/2    14.3GB/s  jpg (99.95 %)
+BM_ZFlat/3   538.3MB/s  jpg_200 (73.00 %)
+BM_ZFlat/4     8.3GB/s  pdf (83.30 %)
+BM_ZFlat/5   903.5MB/s  html4 (22.52 %)
+BM_ZFlat/6   336.0MB/s  txt1 (57.88 %)
+BM_ZFlat/7   312.3MB/s  txt2 (61.91 %)
+BM_ZFlat/8   353.1MB/s  txt3 (54.99 %)
+BM_ZFlat/9   289.9MB/s  txt4 (66.26 %)
+BM_ZFlat/10    1.2GB/s  pb (19.68 %)
+BM_ZFlat/11  527.4MB/s  gaviota (37.72 %)
diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go
new file mode 100644
index 0000000..72efb03
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode.go
@@ -0,0 +1,237 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+)
+
+var (
+	// ErrCorrupt reports that the input is invalid.
+	ErrCorrupt = errors.New("snappy: corrupt input")
+	// ErrTooLarge reports that the uncompressed length is too large.
+	ErrTooLarge = errors.New("snappy: decoded block is too large")
+	// ErrUnsupported reports that the input isn't supported.
+	ErrUnsupported = errors.New("snappy: unsupported input")
+
+	errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
+)
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+	v, _, err := decodedLen(src)
+	return v, err
+}
+
+// decodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func decodedLen(src []byte) (blockLen, headerLen int, err error) {
+	v, n := binary.Uvarint(src)
+	if n <= 0 || v > 0xffffffff {
+		return 0, 0, ErrCorrupt
+	}
+
+	const wordSize = 32 << (^uint(0) >> 32 & 1)
+	if wordSize == 32 && v > 0x7fffffff {
+		return 0, 0, ErrTooLarge
+	}
+	return int(v), n, nil
+}
+
+const (
+	decodeErrCodeCorrupt                  = 1
+	decodeErrCodeUnsupportedLiteralLength = 2
+)
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Decode(dst, src []byte) ([]byte, error) {
+	dLen, s, err := decodedLen(src)
+	if err != nil {
+		return nil, err
+	}
+	if dLen <= len(dst) {
+		dst = dst[:dLen]
+	} else {
+		dst = make([]byte, dLen)
+	}
+	switch decode(dst, src[s:]) {
+	case 0:
+		return dst, nil
+	case decodeErrCodeUnsupportedLiteralLength:
+		return nil, errUnsupportedLiteralLength
+	}
+	return nil, ErrCorrupt
+}
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func NewReader(r io.Reader) *Reader {
+	return &Reader{
+		r:       r,
+		decoded: make([]byte, maxBlockSize),
+		buf:     make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
+	}
+}
+
+// Reader is an io.Reader that can read Snappy-compressed bytes.
+type Reader struct {
+	r       io.Reader
+	err     error
+	decoded []byte
+	buf     []byte
+	// decoded[i:j] contains decoded bytes that have not yet been passed on.
+	i, j       int
+	readHeader bool
+}
+
+// Reset discards any buffered data, resets all state, and switches the Snappy
+// reader to read from r. This permits reusing a Reader rather than allocating
+// a new one.
+func (r *Reader) Reset(reader io.Reader) {
+	r.r = reader
+	r.err = nil
+	r.i = 0
+	r.j = 0
+	r.readHeader = false
+}
+
+func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
+	if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+		if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+			r.err = ErrCorrupt
+		}
+		return false
+	}
+	return true
+}
+
+// Read satisfies the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+	if r.err != nil {
+		return 0, r.err
+	}
+	for {
+		if r.i < r.j {
+			n := copy(p, r.decoded[r.i:r.j])
+			r.i += n
+			return n, nil
+		}
+		if !r.readFull(r.buf[:4], true) {
+			return 0, r.err
+		}
+		chunkType := r.buf[0]
+		if !r.readHeader {
+			if chunkType != chunkTypeStreamIdentifier {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			r.readHeader = true
+		}
+		chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+		if chunkLen > len(r.buf) {
+			r.err = ErrUnsupported
+			return 0, r.err
+		}
+
+		// The chunk types are specified at
+		// https://github.com/google/snappy/blob/master/framing_format.txt
+		switch chunkType {
+		case chunkTypeCompressedData:
+			// Section 4.2. Compressed data (chunk type 0x00).
+			if chunkLen < checksumSize {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			buf := r.buf[:chunkLen]
+			if !r.readFull(buf, false) {
+				return 0, r.err
+			}
+			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+			buf = buf[checksumSize:]
+
+			n, err := DecodedLen(buf)
+			if err != nil {
+				r.err = err
+				return 0, r.err
+			}
+			if n > len(r.decoded) {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			if _, err := Decode(r.decoded, buf); err != nil {
+				r.err = err
+				return 0, r.err
+			}
+			if crc(r.decoded[:n]) != checksum {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			r.i, r.j = 0, n
+			continue
+
+		case chunkTypeUncompressedData:
+			// Section 4.3. Uncompressed data (chunk type 0x01).
+			if chunkLen < checksumSize {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			buf := r.buf[:checksumSize]
+			if !r.readFull(buf, false) {
+				return 0, r.err
+			}
+			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+			// Read directly into r.decoded instead of via r.buf.
+			n := chunkLen - checksumSize
+			if n > len(r.decoded) {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			if !r.readFull(r.decoded[:n], false) {
+				return 0, r.err
+			}
+			if crc(r.decoded[:n]) != checksum {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			r.i, r.j = 0, n
+			continue
+
+		case chunkTypeStreamIdentifier:
+			// Section 4.1. Stream identifier (chunk type 0xff).
+			if chunkLen != len(magicBody) {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			if !r.readFull(r.buf[:len(magicBody)], false) {
+				return 0, r.err
+			}
+			for i := 0; i < len(magicBody); i++ {
+				if r.buf[i] != magicBody[i] {
+					r.err = ErrCorrupt
+					return 0, r.err
+				}
+			}
+			continue
+		}
+
+		if chunkType <= 0x7f {
+			// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+			r.err = ErrUnsupported
+			return 0, r.err
+		}
+		// Section 4.4 Padding (chunk type 0xfe).
+		// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+		if !r.readFull(r.buf[:chunkLen], false) {
+			return 0, r.err
+		}
+	}
+}
diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go
new file mode 100644
index 0000000..fcd192b
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.go
@@ -0,0 +1,14 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// decode has the same semantics as in decode_other.go.
+//
+//go:noescape
+func decode(dst, src []byte) int
diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s
new file mode 100644
index 0000000..e6179f6
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.s
@@ -0,0 +1,490 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The asm code generally follows the pure Go code in decode_other.go, except
+// where marked with a "!!!".
+
+// func decode(dst, src []byte) int
+//
+// All local variables fit into registers. The non-zero stack size is only to
+// spill registers and push args when issuing a CALL. The register allocation:
+//	- AX	scratch
+//	- BX	scratch
+//	- CX	length or x
+//	- DX	offset
+//	- SI	&src[s]
+//	- DI	&dst[d]
+//	+ R8	dst_base
+//	+ R9	dst_len
+//	+ R10	dst_base + dst_len
+//	+ R11	src_base
+//	+ R12	src_len
+//	+ R13	src_base + src_len
+//	- R14	used by doCopy
+//	- R15	used by doCopy
+//
+// The registers R8-R13 (marked with a "+") are set at the start of the
+// function, and after a CALL returns, and are not otherwise modified.
+//
+// The d variable is implicitly DI - R8,  and len(dst)-d is R10 - DI.
+// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
+TEXT ·decode(SB), NOSPLIT, $48-56
+	// Initialize SI, DI and R8-R13.
+	MOVQ dst_base+0(FP), R8
+	MOVQ dst_len+8(FP), R9
+	MOVQ R8, DI
+	MOVQ R8, R10
+	ADDQ R9, R10
+	MOVQ src_base+24(FP), R11
+	MOVQ src_len+32(FP), R12
+	MOVQ R11, SI
+	MOVQ R11, R13
+	ADDQ R12, R13
+
+loop:
+	// for s < len(src)
+	CMPQ SI, R13
+	JEQ  end
+
+	// CX = uint32(src[s])
+	//
+	// switch src[s] & 0x03
+	MOVBLZX (SI), CX
+	MOVL    CX, BX
+	ANDL    $3, BX
+	CMPL    BX, $1
+	JAE     tagCopy
+
+	// ----------------------------------------
+	// The code below handles literal tags.
+
+	// case tagLiteral:
+	// x := uint32(src[s] >> 2)
+	// switch
+	SHRL $2, CX
+	CMPL CX, $60
+	JAE  tagLit60Plus
+
+	// case x < 60:
+	// s++
+	INCQ SI
+
+doLit:
+	// This is the end of the inner "switch", when we have a literal tag.
+	//
+	// We assume that CX == x and x fits in a uint32, where x is the variable
+	// used in the pure Go decode_other.go code.
+
+	// length = int(x) + 1
+	//
+	// Unlike the pure Go code, we don't need to check if length <= 0 because
+	// CX can hold 64 bits, so the increment cannot overflow.
+	INCQ CX
+
+	// Prepare to check if copying length bytes will run past the end of dst or
+	// src.
+	//
+	// AX = len(dst) - d
+	// BX = len(src) - s
+	MOVQ R10, AX
+	SUBQ DI, AX
+	MOVQ R13, BX
+	SUBQ SI, BX
+
+	// !!! Try a faster technique for short (16 or fewer bytes) copies.
+	//
+	// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
+	//   goto callMemmove // Fall back on calling runtime·memmove.
+	// }
+	//
+	// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
+	// against 21 instead of 16, because it cannot assume that all of its input
+	// is contiguous in memory and so it needs to leave enough source bytes to
+	// read the next tag without refilling buffers, but Go's Decode assumes
+	// contiguousness (the src argument is a []byte).
+	CMPQ CX, $16
+	JGT  callMemmove
+	CMPQ AX, $16
+	JLT  callMemmove
+	CMPQ BX, $16
+	JLT  callMemmove
+
+	// !!! Implement the copy from src to dst as a 16-byte load and store.
+	// (Decode's documentation says that dst and src must not overlap.)
+	//
+	// This always copies 16 bytes, instead of only length bytes, but that's
+	// OK. If the input is a valid Snappy encoding then subsequent iterations
+	// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
+	// non-nil error), so the overrun will be ignored.
+	//
+	// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+	// 16-byte loads and stores. This technique probably wouldn't be as
+	// effective on architectures that are fussier about alignment.
+	MOVOU 0(SI), X0
+	MOVOU X0, 0(DI)
+
+	// d += length
+	// s += length
+	ADDQ CX, DI
+	ADDQ CX, SI
+	JMP  loop
+
+callMemmove:
+	// if length > len(dst)-d || length > len(src)-s { etc }
+	CMPQ CX, AX
+	JGT  errCorrupt
+	CMPQ CX, BX
+	JGT  errCorrupt
+
+	// copy(dst[d:], src[s:s+length])
+	//
+	// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
+	// DI, SI and CX as arguments. Coincidentally, we also need to spill those
+	// three registers to the stack, to save local variables across the CALL.
+	MOVQ DI, 0(SP)
+	MOVQ SI, 8(SP)
+	MOVQ CX, 16(SP)
+	MOVQ DI, 24(SP)
+	MOVQ SI, 32(SP)
+	MOVQ CX, 40(SP)
+	CALL runtime·memmove(SB)
+
+	// Restore local variables: unspill registers from the stack and
+	// re-calculate R8-R13.
+	MOVQ 24(SP), DI
+	MOVQ 32(SP), SI
+	MOVQ 40(SP), CX
+	MOVQ dst_base+0(FP), R8
+	MOVQ dst_len+8(FP), R9
+	MOVQ R8, R10
+	ADDQ R9, R10
+	MOVQ src_base+24(FP), R11
+	MOVQ src_len+32(FP), R12
+	MOVQ R11, R13
+	ADDQ R12, R13
+
+	// d += length
+	// s += length
+	ADDQ CX, DI
+	ADDQ CX, SI
+	JMP  loop
+
+tagLit60Plus:
+	// !!! This fragment does the
+	//
+	// s += x - 58; if uint(s) > uint(len(src)) { etc }
+	//
+	// checks. In the asm version, we code it once instead of once per switch case.
+	ADDQ CX, SI
+	SUBQ $58, SI
+	MOVQ SI, BX
+	SUBQ R11, BX
+	CMPQ BX, R12
+	JA   errCorrupt
+
+	// case x == 60:
+	CMPL CX, $61
+	JEQ  tagLit61
+	JA   tagLit62Plus
+
+	// x = uint32(src[s-1])
+	MOVBLZX -1(SI), CX
+	JMP     doLit
+
+tagLit61:
+	// case x == 61:
+	// x = uint32(src[s-2]) | uint32(src[s-1])<<8
+	MOVWLZX -2(SI), CX
+	JMP     doLit
+
+tagLit62Plus:
+	CMPL CX, $62
+	JA   tagLit63
+
+	// case x == 62:
+	// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+	MOVWLZX -3(SI), CX
+	MOVBLZX -1(SI), BX
+	SHLL    $16, BX
+	ORL     BX, CX
+	JMP     doLit
+
+tagLit63:
+	// case x == 63:
+	// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+	MOVL -4(SI), CX
+	JMP  doLit
+
+// The code above handles literal tags.
+// ----------------------------------------
+// The code below handles copy tags.
+
+tagCopy4:
+	// case tagCopy4:
+	// s += 5
+	ADDQ $5, SI
+
+	// if uint(s) > uint(len(src)) { etc }
+	MOVQ SI, BX
+	SUBQ R11, BX
+	CMPQ BX, R12
+	JA   errCorrupt
+
+	// length = 1 + int(src[s-5])>>2
+	SHRQ $2, CX
+	INCQ CX
+
+	// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+	MOVLQZX -4(SI), DX
+	JMP     doCopy
+
+tagCopy2:
+	// case tagCopy2:
+	// s += 3
+	ADDQ $3, SI
+
+	// if uint(s) > uint(len(src)) { etc }
+	MOVQ SI, BX
+	SUBQ R11, BX
+	CMPQ BX, R12
+	JA   errCorrupt
+
+	// length = 1 + int(src[s-3])>>2
+	SHRQ $2, CX
+	INCQ CX
+
+	// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+	MOVWQZX -2(SI), DX
+	JMP     doCopy
+
+tagCopy:
+	// We have a copy tag. We assume that:
+	//	- BX == src[s] & 0x03
+	//	- CX == src[s]
+	CMPQ BX, $2
+	JEQ  tagCopy2
+	JA   tagCopy4
+
+	// case tagCopy1:
+	// s += 2
+	ADDQ $2, SI
+
+	// if uint(s) > uint(len(src)) { etc }
+	MOVQ SI, BX
+	SUBQ R11, BX
+	CMPQ BX, R12
+	JA   errCorrupt
+
+	// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+	MOVQ    CX, DX
+	ANDQ    $0xe0, DX
+	SHLQ    $3, DX
+	MOVBQZX -1(SI), BX
+	ORQ     BX, DX
+
+	// length = 4 + int(src[s-2])>>2&0x7
+	SHRQ $2, CX
+	ANDQ $7, CX
+	ADDQ $4, CX
+
+doCopy:
+	// This is the end of the outer "switch", when we have a copy tag.
+	//
+	// We assume that:
+	//	- CX == length && CX > 0
+	//	- DX == offset
+
+	// if offset <= 0 { etc }
+	CMPQ DX, $0
+	JLE  errCorrupt
+
+	// if d < offset { etc }
+	MOVQ DI, BX
+	SUBQ R8, BX
+	CMPQ BX, DX
+	JLT  errCorrupt
+
+	// if length > len(dst)-d { etc }
+	MOVQ R10, BX
+	SUBQ DI, BX
+	CMPQ CX, BX
+	JGT  errCorrupt
+
+	// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
+	//
+	// Set:
+	//	- R14 = len(dst)-d
+	//	- R15 = &dst[d-offset]
+	MOVQ R10, R14
+	SUBQ DI, R14
+	MOVQ DI, R15
+	SUBQ DX, R15
+
+	// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
+	//
+	// First, try using two 8-byte load/stores, similar to the doLit technique
+	// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
+	// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
+	// and not one 16-byte load/store, and the first store has to be before the
+	// second load, due to the overlap if offset is in the range [8, 16).
+	//
+	// if length > 16 || offset < 8 || len(dst)-d < 16 {
+	//   goto slowForwardCopy
+	// }
+	// copy 16 bytes
+	// d += length
+	CMPQ CX, $16
+	JGT  slowForwardCopy
+	CMPQ DX, $8
+	JLT  slowForwardCopy
+	CMPQ R14, $16
+	JLT  slowForwardCopy
+	MOVQ 0(R15), AX
+	MOVQ AX, 0(DI)
+	MOVQ 8(R15), BX
+	MOVQ BX, 8(DI)
+	ADDQ CX, DI
+	JMP  loop
+
+slowForwardCopy:
+	// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
+	// can still try 8-byte load stores, provided we can overrun up to 10 extra
+	// bytes. As above, the overrun will be fixed up by subsequent iterations
+	// of the outermost loop.
+	//
+	// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
+	// commentary says:
+	//
+	// ----
+	//
+	// The main part of this loop is a simple copy of eight bytes at a time
+	// until we've copied (at least) the requested amount of bytes.  However,
+	// if d and d-offset are less than eight bytes apart (indicating a
+	// repeating pattern of length < 8), we first need to expand the pattern in
+	// order to get the correct results. For instance, if the buffer looks like
+	// this, with the eight-byte <d-offset> and <d> patterns marked as
+	// intervals:
+	//
+	//    abxxxxxxxxxxxx
+	//    [------]           d-offset
+	//      [------]         d
+	//
+	// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
+	// once, after which we can move <d> two bytes without moving <d-offset>:
+	//
+	//    ababxxxxxxxxxx
+	//    [------]           d-offset
+	//        [------]       d
+	//
+	// and repeat the exercise until the two no longer overlap.
+	//
+	// This allows us to do very well in the special case of one single byte
+	// repeated many times, without taking a big hit for more general cases.
+	//
+	// The worst case of extra writing past the end of the match occurs when
+	// offset == 1 and length == 1; the last copy will read from byte positions
+	// [0..7] and write to [4..11], whereas it was only supposed to write to
+	// position 1. Thus, ten excess bytes.
+	//
+	// ----
+	//
+	// That "10 byte overrun" worst case is confirmed by Go's
+	// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
+	// and finishSlowForwardCopy algorithm.
+	//
+	// if length > len(dst)-d-10 {
+	//   goto verySlowForwardCopy
+	// }
+	SUBQ $10, R14
+	CMPQ CX, R14
+	JGT  verySlowForwardCopy
+
+makeOffsetAtLeast8:
+	// !!! As above, expand the pattern so that offset >= 8 and we can use
+	// 8-byte load/stores.
+	//
+	// for offset < 8 {
+	//   copy 8 bytes from dst[d-offset:] to dst[d:]
+	//   length -= offset
+	//   d      += offset
+	//   offset += offset
+	//   // The two previous lines together means that d-offset, and therefore
+	//   // R15, is unchanged.
+	// }
+	CMPQ DX, $8
+	JGE  fixUpSlowForwardCopy
+	MOVQ (R15), BX
+	MOVQ BX, (DI)
+	SUBQ DX, CX
+	ADDQ DX, DI
+	ADDQ DX, DX
+	JMP  makeOffsetAtLeast8
+
+fixUpSlowForwardCopy:
+	// !!! Add length (which might be negative now) to d (implied by DI being
+	// &dst[d]) so that d ends up at the right place when we jump back to the
+	// top of the loop. Before we do that, though, we save DI to AX so that, if
+	// length is positive, copying the remaining length bytes will write to the
+	// right place.
+	MOVQ DI, AX
+	ADDQ CX, DI
+
+finishSlowForwardCopy:
+	// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
+	// length means that we overrun, but as above, that will be fixed up by
+	// subsequent iterations of the outermost loop.
+	CMPQ CX, $0
+	JLE  loop
+	MOVQ (R15), BX
+	MOVQ BX, (AX)
+	ADDQ $8, R15
+	ADDQ $8, AX
+	SUBQ $8, CX
+	JMP  finishSlowForwardCopy
+
+verySlowForwardCopy:
+	// verySlowForwardCopy is a simple implementation of forward copy. In C
+	// parlance, this is a do/while loop instead of a while loop, since we know
+	// that length > 0. In Go syntax:
+	//
+	// for {
+	//   dst[d] = dst[d - offset]
+	//   d++
+	//   length--
+	//   if length == 0 {
+	//     break
+	//   }
+	// }
+	MOVB (R15), BX
+	MOVB BX, (DI)
+	INCQ R15
+	INCQ DI
+	DECQ CX
+	JNZ  verySlowForwardCopy
+	JMP  loop
+
+// The code above handles copy tags.
+// ----------------------------------------
+
+end:
+	// This is the end of the "for s < len(src)".
+	//
+	// if d != len(dst) { etc }
+	CMPQ DI, R10
+	JNE  errCorrupt
+
+	// return 0
+	MOVQ $0, ret+48(FP)
+	RET
+
+errCorrupt:
+	// return decodeErrCodeCorrupt
+	MOVQ $1, ret+48(FP)
+	RET
diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go
new file mode 100644
index 0000000..8c9f204
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_other.go
@@ -0,0 +1,101 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+// decode writes the decoding of src to dst. It assumes that the varint-encoded
+// length of the decompressed bytes has already been read, and that len(dst)
+// equals that length.
+//
+// It returns 0 on success or a decodeErrCodeXxx error code on failure.
+func decode(dst, src []byte) int {
+	var d, s, offset, length int
+	for s < len(src) {
+		switch src[s] & 0x03 {
+		case tagLiteral:
+			x := uint32(src[s] >> 2)
+			switch {
+			case x < 60:
+				s++
+			case x == 60:
+				s += 2
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-1])
+			case x == 61:
+				s += 3
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-2]) | uint32(src[s-1])<<8
+			case x == 62:
+				s += 4
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+			case x == 63:
+				s += 5
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+			}
+			length = int(x) + 1
+			if length <= 0 {
+				return decodeErrCodeUnsupportedLiteralLength
+			}
+			if length > len(dst)-d || length > len(src)-s {
+				return decodeErrCodeCorrupt
+			}
+			copy(dst[d:], src[s:s+length])
+			d += length
+			s += length
+			continue
+
+		case tagCopy1:
+			s += 2
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				return decodeErrCodeCorrupt
+			}
+			length = 4 + int(src[s-2])>>2&0x7
+			offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+
+		case tagCopy2:
+			s += 3
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				return decodeErrCodeCorrupt
+			}
+			length = 1 + int(src[s-3])>>2
+			offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+
+		case tagCopy4:
+			s += 5
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				return decodeErrCodeCorrupt
+			}
+			length = 1 + int(src[s-5])>>2
+			offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+		}
+
+		if offset <= 0 || d < offset || length > len(dst)-d {
+			return decodeErrCodeCorrupt
+		}
+		// Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
+		// the built-in copy function, this byte-by-byte copy always runs
+		// forwards, even if the slices overlap. Conceptually, this is:
+		//
+		// d += forwardCopy(dst[d:d+length], dst[d-offset:])
+		for end := d + length; d != end; d++ {
+			dst[d] = dst[d-offset]
+		}
+	}
+	if d != len(dst) {
+		return decodeErrCodeCorrupt
+	}
+	return 0
+}
diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go
new file mode 100644
index 0000000..8749689
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode.go
@@ -0,0 +1,285 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+)
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Encode(dst, src []byte) []byte {
+	if n := MaxEncodedLen(len(src)); n < 0 {
+		panic(ErrTooLarge)
+	} else if len(dst) < n {
+		dst = make([]byte, n)
+	}
+
+	// The block starts with the varint-encoded length of the decompressed bytes.
+	d := binary.PutUvarint(dst, uint64(len(src)))
+
+	for len(src) > 0 {
+		p := src
+		src = nil
+		if len(p) > maxBlockSize {
+			p, src = p[:maxBlockSize], p[maxBlockSize:]
+		}
+		if len(p) < minNonLiteralBlockSize {
+			d += emitLiteral(dst[d:], p)
+		} else {
+			d += encodeBlock(dst[d:], p)
+		}
+	}
+	return dst[:d]
+}
+
+// inputMargin is the minimum number of extra input bytes to keep, inside
+// encodeBlock's inner loop. On some architectures, this margin lets us
+// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
+// literals can be implemented as a single load to and store from a 16-byte
+// register. That literal's actual length can be as short as 1 byte, so this
+// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
+// the encoding loop will fix up the copy overrun, and this inputMargin ensures
+// that we don't overrun the dst and src buffers.
+const inputMargin = 16 - 1
+
+// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
+// could be encoded with a copy tag. This is the minimum with respect to the
+// algorithm used by encodeBlock, not a minimum enforced by the file format.
+//
+// The encoded output must start with at least a 1 byte literal, as there are
+// no previous bytes to copy. A minimal (1 byte) copy after that, generated
+// from an emitCopy call in encodeBlock's main loop, would require at least
+// another inputMargin bytes, for the reason above: we want any emitLiteral
+// calls inside encodeBlock's main loop to use the fast path if possible, which
+// requires being able to overrun by inputMargin bytes. Thus,
+// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
+//
+// The C++ code doesn't use this exact threshold, but it could, as discussed at
+// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
+// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
+// optimization. It should not affect the encoded form. This is tested by
+// TestSameEncodingAsCppShortCopies.
+const minNonLiteralBlockSize = 1 + 1 + inputMargin
+
+// MaxEncodedLen returns the maximum length of a snappy block, given its
+// uncompressed length.
+//
+// It will return a negative value if srcLen is too large to encode.
+func MaxEncodedLen(srcLen int) int {
+	n := uint64(srcLen)
+	if n > 0xffffffff {
+		return -1
+	}
+	// Compressed data can be defined as:
+	//    compressed := item* literal*
+	//    item       := literal* copy
+	//
+	// The trailing literal sequence has a space blowup of at most 62/60
+	// since a literal of length 60 needs one tag byte + one extra byte
+	// for length information.
+	//
+	// Item blowup is trickier to measure. Suppose the "copy" op copies
+	// 4 bytes of data. Because of a special check in the encoding code,
+	// we produce a 4-byte copy only if the offset is < 65536. Therefore
+	// the copy op takes 3 bytes to encode, and this type of item leads
+	// to at most the 62/60 blowup for representing literals.
+	//
+	// Suppose the "copy" op copies 5 bytes of data. If the offset is big
+	// enough, it will take 5 bytes to encode the copy op. Therefore the
+	// worst case here is a one-byte literal followed by a five-byte copy.
+	// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
+	//
+	// This last factor dominates the blowup, so the final estimate is:
+	n = 32 + n + n/6
+	if n > 0xffffffff {
+		return -1
+	}
+	return int(n)
+}
+
+var errClosed = errors.New("snappy: Writer is closed")
+
+// NewWriter returns a new Writer that compresses to w.
+//
+// The Writer returned does not buffer writes. There is no need to Flush or
+// Close such a Writer.
+//
+// Deprecated: the Writer returned is not suitable for many small writes, only
+// for few large writes. Use NewBufferedWriter instead, which is efficient
+// regardless of the frequency and shape of the writes, and remember to Close
+// that Writer when done.
+func NewWriter(w io.Writer) *Writer {
+	return &Writer{
+		w:    w,
+		obuf: make([]byte, obufLen),
+	}
+}
+
+// NewBufferedWriter returns a new Writer that compresses to w, using the
+// framing format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+//
+// The Writer returned buffers writes. Users must call Close to guarantee all
+// data has been forwarded to the underlying io.Writer. They may also call
+// Flush zero or more times before calling Close.
+func NewBufferedWriter(w io.Writer) *Writer {
+	return &Writer{
+		w:    w,
+		ibuf: make([]byte, 0, maxBlockSize),
+		obuf: make([]byte, obufLen),
+	}
+}
+
+// Writer is an io.Writer than can write Snappy-compressed bytes.
+type Writer struct {
+	w   io.Writer
+	err error
+
+	// ibuf is a buffer for the incoming (uncompressed) bytes.
+	//
+	// Its use is optional. For backwards compatibility, Writers created by the
+	// NewWriter function have ibuf == nil, do not buffer incoming bytes, and
+	// therefore do not need to be Flush'ed or Close'd.
+	ibuf []byte
+
+	// obuf is a buffer for the outgoing (compressed) bytes.
+	obuf []byte
+
+	// wroteStreamHeader is whether we have written the stream header.
+	wroteStreamHeader bool
+}
+
+// Reset discards the writer's state and switches the Snappy writer to write to
+// w. This permits reusing a Writer rather than allocating a new one.
+func (w *Writer) Reset(writer io.Writer) {
+	w.w = writer
+	w.err = nil
+	if w.ibuf != nil {
+		w.ibuf = w.ibuf[:0]
+	}
+	w.wroteStreamHeader = false
+}
+
+// Write satisfies the io.Writer interface.
+func (w *Writer) Write(p []byte) (nRet int, errRet error) {
+	if w.ibuf == nil {
+		// Do not buffer incoming bytes. This does not perform or compress well
+		// if the caller of Writer.Write writes many small slices. This
+		// behavior is therefore deprecated, but still supported for backwards
+		// compatibility with code that doesn't explicitly Flush or Close.
+		return w.write(p)
+	}
+
+	// The remainder of this method is based on bufio.Writer.Write from the
+	// standard library.
+
+	for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
+		var n int
+		if len(w.ibuf) == 0 {
+			// Large write, empty buffer.
+			// Write directly from p to avoid copy.
+			n, _ = w.write(p)
+		} else {
+			n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+			w.ibuf = w.ibuf[:len(w.ibuf)+n]
+			w.Flush()
+		}
+		nRet += n
+		p = p[n:]
+	}
+	if w.err != nil {
+		return nRet, w.err
+	}
+	n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+	w.ibuf = w.ibuf[:len(w.ibuf)+n]
+	nRet += n
+	return nRet, nil
+}
+
+func (w *Writer) write(p []byte) (nRet int, errRet error) {
+	if w.err != nil {
+		return 0, w.err
+	}
+	for len(p) > 0 {
+		obufStart := len(magicChunk)
+		if !w.wroteStreamHeader {
+			w.wroteStreamHeader = true
+			copy(w.obuf, magicChunk)
+			obufStart = 0
+		}
+
+		var uncompressed []byte
+		if len(p) > maxBlockSize {
+			uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
+		} else {
+			uncompressed, p = p, nil
+		}
+		checksum := crc(uncompressed)
+
+		// Compress the buffer, discarding the result if the improvement
+		// isn't at least 12.5%.
+		compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
+		chunkType := uint8(chunkTypeCompressedData)
+		chunkLen := 4 + len(compressed)
+		obufEnd := obufHeaderLen + len(compressed)
+		if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
+			chunkType = chunkTypeUncompressedData
+			chunkLen = 4 + len(uncompressed)
+			obufEnd = obufHeaderLen
+		}
+
+		// Fill in the per-chunk header that comes before the body.
+		w.obuf[len(magicChunk)+0] = chunkType
+		w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
+		w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
+		w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
+		w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
+		w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
+		w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
+		w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
+
+		if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
+			w.err = err
+			return nRet, err
+		}
+		if chunkType == chunkTypeUncompressedData {
+			if _, err := w.w.Write(uncompressed); err != nil {
+				w.err = err
+				return nRet, err
+			}
+		}
+		nRet += len(uncompressed)
+	}
+	return nRet, nil
+}
+
+// Flush flushes the Writer to its underlying io.Writer.
+func (w *Writer) Flush() error {
+	if w.err != nil {
+		return w.err
+	}
+	if len(w.ibuf) == 0 {
+		return nil
+	}
+	w.write(w.ibuf)
+	w.ibuf = w.ibuf[:0]
+	return w.err
+}
+
+// Close calls Flush and then closes the Writer.
+func (w *Writer) Close() error {
+	w.Flush()
+	ret := w.err
+	if w.err == nil {
+		w.err = errClosed
+	}
+	return ret
+}
diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go
new file mode 100644
index 0000000..150d91b
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.go
@@ -0,0 +1,29 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// emitLiteral has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitLiteral(dst, lit []byte) int
+
+// emitCopy has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitCopy(dst []byte, offset, length int) int
+
+// extendMatch has the same semantics as in encode_other.go.
+//
+//go:noescape
+func extendMatch(src []byte, i, j int) int
+
+// encodeBlock has the same semantics as in encode_other.go.
+//
+//go:noescape
+func encodeBlock(dst, src []byte) (d int)
diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s
new file mode 100644
index 0000000..adfd979
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.s
@@ -0,0 +1,730 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
+// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
+// https://github.com/golang/snappy/issues/29
+//
+// As a workaround, the package was built with a known good assembler, and
+// those instructions were disassembled by "objdump -d" to yield the
+//	4e 0f b7 7c 5c 78       movzwq 0x78(%rsp,%r11,2),%r15
+// style comments, in AT&T asm syntax. Note that rsp here is a physical
+// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
+// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
+// fine on Go 1.6.
+
+// The asm code generally follows the pure Go code in encode_other.go, except
+// where marked with a "!!!".
+
+// ----------------------------------------------------------------------------
+
+// func emitLiteral(dst, lit []byte) int
+//
+// All local variables fit into registers. The register allocation:
+//	- AX	len(lit)
+//	- BX	n
+//	- DX	return value
+//	- DI	&dst[i]
+//	- R10	&lit[0]
+//
+// The 24 bytes of stack space is to call runtime·memmove.
+//
+// The unusual register allocation of local variables, such as R10 for the
+// source pointer, matches the allocation used at the call site in encodeBlock,
+// which makes it easier to manually inline this function.
+TEXT ·emitLiteral(SB), NOSPLIT, $24-56
+	MOVQ dst_base+0(FP), DI
+	MOVQ lit_base+24(FP), R10
+	MOVQ lit_len+32(FP), AX
+	MOVQ AX, DX
+	MOVL AX, BX
+	SUBL $1, BX
+
+	CMPL BX, $60
+	JLT  oneByte
+	CMPL BX, $256
+	JLT  twoBytes
+
+threeBytes:
+	MOVB $0xf4, 0(DI)
+	MOVW BX, 1(DI)
+	ADDQ $3, DI
+	ADDQ $3, DX
+	JMP  memmove
+
+twoBytes:
+	MOVB $0xf0, 0(DI)
+	MOVB BX, 1(DI)
+	ADDQ $2, DI
+	ADDQ $2, DX
+	JMP  memmove
+
+oneByte:
+	SHLB $2, BX
+	MOVB BX, 0(DI)
+	ADDQ $1, DI
+	ADDQ $1, DX
+
+memmove:
+	MOVQ DX, ret+48(FP)
+
+	// copy(dst[i:], lit)
+	//
+	// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+	// DI, R10 and AX as arguments.
+	MOVQ DI, 0(SP)
+	MOVQ R10, 8(SP)
+	MOVQ AX, 16(SP)
+	CALL runtime·memmove(SB)
+	RET
+
+// ----------------------------------------------------------------------------
+
+// func emitCopy(dst []byte, offset, length int) int
+//
+// All local variables fit into registers. The register allocation:
+//	- AX	length
+//	- SI	&dst[0]
+//	- DI	&dst[i]
+//	- R11	offset
+//
+// The unusual register allocation of local variables, such as R11 for the
+// offset, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·emitCopy(SB), NOSPLIT, $0-48
+	MOVQ dst_base+0(FP), DI
+	MOVQ DI, SI
+	MOVQ offset+24(FP), R11
+	MOVQ length+32(FP), AX
+
+loop0:
+	// for length >= 68 { etc }
+	CMPL AX, $68
+	JLT  step1
+
+	// Emit a length 64 copy, encoded as 3 bytes.
+	MOVB $0xfe, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+	SUBL $64, AX
+	JMP  loop0
+
+step1:
+	// if length > 64 { etc }
+	CMPL AX, $64
+	JLE  step2
+
+	// Emit a length 60 copy, encoded as 3 bytes.
+	MOVB $0xee, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+	SUBL $60, AX
+
+step2:
+	// if length >= 12 || offset >= 2048 { goto step3 }
+	CMPL AX, $12
+	JGE  step3
+	CMPL R11, $2048
+	JGE  step3
+
+	// Emit the remaining copy, encoded as 2 bytes.
+	MOVB R11, 1(DI)
+	SHRL $8, R11
+	SHLB $5, R11
+	SUBB $4, AX
+	SHLB $2, AX
+	ORB  AX, R11
+	ORB  $1, R11
+	MOVB R11, 0(DI)
+	ADDQ $2, DI
+
+	// Return the number of bytes written.
+	SUBQ SI, DI
+	MOVQ DI, ret+40(FP)
+	RET
+
+step3:
+	// Emit the remaining copy, encoded as 3 bytes.
+	SUBL $1, AX
+	SHLB $2, AX
+	ORB  $2, AX
+	MOVB AX, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+
+	// Return the number of bytes written.
+	SUBQ SI, DI
+	MOVQ DI, ret+40(FP)
+	RET
+
+// ----------------------------------------------------------------------------
+
+// func extendMatch(src []byte, i, j int) int
+//
+// All local variables fit into registers. The register allocation:
+//	- DX	&src[0]
+//	- SI	&src[j]
+//	- R13	&src[len(src) - 8]
+//	- R14	&src[len(src)]
+//	- R15	&src[i]
+//
+// The unusual register allocation of local variables, such as R15 for a source
+// pointer, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·extendMatch(SB), NOSPLIT, $0-48
+	MOVQ src_base+0(FP), DX
+	MOVQ src_len+8(FP), R14
+	MOVQ i+24(FP), R15
+	MOVQ j+32(FP), SI
+	ADDQ DX, R14
+	ADDQ DX, R15
+	ADDQ DX, SI
+	MOVQ R14, R13
+	SUBQ $8, R13
+
+cmp8:
+	// As long as we are 8 or more bytes before the end of src, we can load and
+	// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+	CMPQ SI, R13
+	JA   cmp1
+	MOVQ (R15), AX
+	MOVQ (SI), BX
+	CMPQ AX, BX
+	JNE  bsf
+	ADDQ $8, R15
+	ADDQ $8, SI
+	JMP  cmp8
+
+bsf:
+	// If those 8 bytes were not equal, XOR the two 8 byte values, and return
+	// the index of the first byte that differs. The BSF instruction finds the
+	// least significant 1 bit, the amd64 architecture is little-endian, and
+	// the shift by 3 converts a bit index to a byte index.
+	XORQ AX, BX
+	BSFQ BX, BX
+	SHRQ $3, BX
+	ADDQ BX, SI
+
+	// Convert from &src[ret] to ret.
+	SUBQ DX, SI
+	MOVQ SI, ret+40(FP)
+	RET
+
+cmp1:
+	// In src's tail, compare 1 byte at a time.
+	CMPQ SI, R14
+	JAE  extendMatchEnd
+	MOVB (R15), AX
+	MOVB (SI), BX
+	CMPB AX, BX
+	JNE  extendMatchEnd
+	ADDQ $1, R15
+	ADDQ $1, SI
+	JMP  cmp1
+
+extendMatchEnd:
+	// Convert from &src[ret] to ret.
+	SUBQ DX, SI
+	MOVQ SI, ret+40(FP)
+	RET
+
+// ----------------------------------------------------------------------------
+
+// func encodeBlock(dst, src []byte) (d int)
+//
+// All local variables fit into registers, other than "var table". The register
+// allocation:
+//	- AX	.	.
+//	- BX	.	.
+//	- CX	56	shift (note that amd64 shifts by non-immediates must use CX).
+//	- DX	64	&src[0], tableSize
+//	- SI	72	&src[s]
+//	- DI	80	&dst[d]
+//	- R9	88	sLimit
+//	- R10	.	&src[nextEmit]
+//	- R11	96	prevHash, currHash, nextHash, offset
+//	- R12	104	&src[base], skip
+//	- R13	.	&src[nextS], &src[len(src) - 8]
+//	- R14	.	len(src), bytesBetweenHashLookups, &src[len(src)], x
+//	- R15	112	candidate
+//
+// The second column (56, 64, etc) is the stack offset to spill the registers
+// when calling other functions. We could pack this slightly tighter, but it's
+// simpler to have a dedicated spill map independent of the function called.
+//
+// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
+// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
+// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
+TEXT ·encodeBlock(SB), 0, $32888-56
+	MOVQ dst_base+0(FP), DI
+	MOVQ src_base+24(FP), SI
+	MOVQ src_len+32(FP), R14
+
+	// shift, tableSize := uint32(32-8), 1<<8
+	MOVQ $24, CX
+	MOVQ $256, DX
+
+calcShift:
+	// for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+	//	shift--
+	// }
+	CMPQ DX, $16384
+	JGE  varTable
+	CMPQ DX, R14
+	JGE  varTable
+	SUBQ $1, CX
+	SHLQ $1, DX
+	JMP  calcShift
+
+varTable:
+	// var table [maxTableSize]uint16
+	//
+	// In the asm code, unlike the Go code, we can zero-initialize only the
+	// first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
+	// writes 16 bytes, so we can do only tableSize/8 writes instead of the
+	// 2048 writes that would zero-initialize all of table's 32768 bytes.
+	SHRQ $3, DX
+	LEAQ table-32768(SP), BX
+	PXOR X0, X0
+
+memclr:
+	MOVOU X0, 0(BX)
+	ADDQ  $16, BX
+	SUBQ  $1, DX
+	JNZ   memclr
+
+	// !!! DX = &src[0]
+	MOVQ SI, DX
+
+	// sLimit := len(src) - inputMargin
+	MOVQ R14, R9
+	SUBQ $15, R9
+
+	// !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
+	// change for the rest of the function.
+	MOVQ CX, 56(SP)
+	MOVQ DX, 64(SP)
+	MOVQ R9, 88(SP)
+
+	// nextEmit := 0
+	MOVQ DX, R10
+
+	// s := 1
+	ADDQ $1, SI
+
+	// nextHash := hash(load32(src, s), shift)
+	MOVL  0(SI), R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+outer:
+	// for { etc }
+
+	// skip := 32
+	MOVQ $32, R12
+
+	// nextS := s
+	MOVQ SI, R13
+
+	// candidate := 0
+	MOVQ $0, R15
+
+inner0:
+	// for { etc }
+
+	// s := nextS
+	MOVQ R13, SI
+
+	// bytesBetweenHashLookups := skip >> 5
+	MOVQ R12, R14
+	SHRQ $5, R14
+
+	// nextS = s + bytesBetweenHashLookups
+	ADDQ R14, R13
+
+	// skip += bytesBetweenHashLookups
+	ADDQ R14, R12
+
+	// if nextS > sLimit { goto emitRemainder }
+	MOVQ R13, AX
+	SUBQ DX, AX
+	CMPQ AX, R9
+	JA   emitRemainder
+
+	// candidate = int(table[nextHash])
+	// XXX: MOVWQZX table-32768(SP)(R11*2), R15
+	// XXX: 4e 0f b7 7c 5c 78       movzwq 0x78(%rsp,%r11,2),%r15
+	BYTE $0x4e
+	BYTE $0x0f
+	BYTE $0xb7
+	BYTE $0x7c
+	BYTE $0x5c
+	BYTE $0x78
+
+	// table[nextHash] = uint16(s)
+	MOVQ SI, AX
+	SUBQ DX, AX
+
+	// XXX: MOVW AX, table-32768(SP)(R11*2)
+	// XXX: 66 42 89 44 5c 78       mov    %ax,0x78(%rsp,%r11,2)
+	BYTE $0x66
+	BYTE $0x42
+	BYTE $0x89
+	BYTE $0x44
+	BYTE $0x5c
+	BYTE $0x78
+
+	// nextHash = hash(load32(src, nextS), shift)
+	MOVL  0(R13), R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+	// if load32(src, s) != load32(src, candidate) { continue } break
+	MOVL 0(SI), AX
+	MOVL (DX)(R15*1), BX
+	CMPL AX, BX
+	JNE  inner0
+
+fourByteMatch:
+	// As per the encode_other.go code:
+	//
+	// A 4-byte match has been found. We'll later see etc.
+
+	// !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
+	// on inputMargin in encode.go.
+	MOVQ SI, AX
+	SUBQ R10, AX
+	CMPQ AX, $16
+	JLE  emitLiteralFastPath
+
+	// ----------------------------------------
+	// Begin inline of the emitLiteral call.
+	//
+	// d += emitLiteral(dst[d:], src[nextEmit:s])
+
+	MOVL AX, BX
+	SUBL $1, BX
+
+	CMPL BX, $60
+	JLT  inlineEmitLiteralOneByte
+	CMPL BX, $256
+	JLT  inlineEmitLiteralTwoBytes
+
+inlineEmitLiteralThreeBytes:
+	MOVB $0xf4, 0(DI)
+	MOVW BX, 1(DI)
+	ADDQ $3, DI
+	JMP  inlineEmitLiteralMemmove
+
+inlineEmitLiteralTwoBytes:
+	MOVB $0xf0, 0(DI)
+	MOVB BX, 1(DI)
+	ADDQ $2, DI
+	JMP  inlineEmitLiteralMemmove
+
+inlineEmitLiteralOneByte:
+	SHLB $2, BX
+	MOVB BX, 0(DI)
+	ADDQ $1, DI
+
+inlineEmitLiteralMemmove:
+	// Spill local variables (registers) onto the stack; call; unspill.
+	//
+	// copy(dst[i:], lit)
+	//
+	// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+	// DI, R10 and AX as arguments.
+	MOVQ DI, 0(SP)
+	MOVQ R10, 8(SP)
+	MOVQ AX, 16(SP)
+	ADDQ AX, DI              // Finish the "d +=" part of "d += emitLiteral(etc)".
+	MOVQ SI, 72(SP)
+	MOVQ DI, 80(SP)
+	MOVQ R15, 112(SP)
+	CALL runtime·memmove(SB)
+	MOVQ 56(SP), CX
+	MOVQ 64(SP), DX
+	MOVQ 72(SP), SI
+	MOVQ 80(SP), DI
+	MOVQ 88(SP), R9
+	MOVQ 112(SP), R15
+	JMP  inner1
+
+inlineEmitLiteralEnd:
+	// End inline of the emitLiteral call.
+	// ----------------------------------------
+
+emitLiteralFastPath:
+	// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
+	MOVB AX, BX
+	SUBB $1, BX
+	SHLB $2, BX
+	MOVB BX, (DI)
+	ADDQ $1, DI
+
+	// !!! Implement the copy from lit to dst as a 16-byte load and store.
+	// (Encode's documentation says that dst and src must not overlap.)
+	//
+	// This always copies 16 bytes, instead of only len(lit) bytes, but that's
+	// OK. Subsequent iterations will fix up the overrun.
+	//
+	// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+	// 16-byte loads and stores. This technique probably wouldn't be as
+	// effective on architectures that are fussier about alignment.
+	MOVOU 0(R10), X0
+	MOVOU X0, 0(DI)
+	ADDQ  AX, DI
+
+inner1:
+	// for { etc }
+
+	// base := s
+	MOVQ SI, R12
+
+	// !!! offset := base - candidate
+	MOVQ R12, R11
+	SUBQ R15, R11
+	SUBQ DX, R11
+
+	// ----------------------------------------
+	// Begin inline of the extendMatch call.
+	//
+	// s = extendMatch(src, candidate+4, s+4)
+
+	// !!! R14 = &src[len(src)]
+	MOVQ src_len+32(FP), R14
+	ADDQ DX, R14
+
+	// !!! R13 = &src[len(src) - 8]
+	MOVQ R14, R13
+	SUBQ $8, R13
+
+	// !!! R15 = &src[candidate + 4]
+	ADDQ $4, R15
+	ADDQ DX, R15
+
+	// !!! s += 4
+	ADDQ $4, SI
+
+inlineExtendMatchCmp8:
+	// As long as we are 8 or more bytes before the end of src, we can load and
+	// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+	CMPQ SI, R13
+	JA   inlineExtendMatchCmp1
+	MOVQ (R15), AX
+	MOVQ (SI), BX
+	CMPQ AX, BX
+	JNE  inlineExtendMatchBSF
+	ADDQ $8, R15
+	ADDQ $8, SI
+	JMP  inlineExtendMatchCmp8
+
+inlineExtendMatchBSF:
+	// If those 8 bytes were not equal, XOR the two 8 byte values, and return
+	// the index of the first byte that differs. The BSF instruction finds the
+	// least significant 1 bit, the amd64 architecture is little-endian, and
+	// the shift by 3 converts a bit index to a byte index.
+	XORQ AX, BX
+	BSFQ BX, BX
+	SHRQ $3, BX
+	ADDQ BX, SI
+	JMP  inlineExtendMatchEnd
+
+inlineExtendMatchCmp1:
+	// In src's tail, compare 1 byte at a time.
+	CMPQ SI, R14
+	JAE  inlineExtendMatchEnd
+	MOVB (R15), AX
+	MOVB (SI), BX
+	CMPB AX, BX
+	JNE  inlineExtendMatchEnd
+	ADDQ $1, R15
+	ADDQ $1, SI
+	JMP  inlineExtendMatchCmp1
+
+inlineExtendMatchEnd:
+	// End inline of the extendMatch call.
+	// ----------------------------------------
+
+	// ----------------------------------------
+	// Begin inline of the emitCopy call.
+	//
+	// d += emitCopy(dst[d:], base-candidate, s-base)
+
+	// !!! length := s - base
+	MOVQ SI, AX
+	SUBQ R12, AX
+
+inlineEmitCopyLoop0:
+	// for length >= 68 { etc }
+	CMPL AX, $68
+	JLT  inlineEmitCopyStep1
+
+	// Emit a length 64 copy, encoded as 3 bytes.
+	MOVB $0xfe, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+	SUBL $64, AX
+	JMP  inlineEmitCopyLoop0
+
+inlineEmitCopyStep1:
+	// if length > 64 { etc }
+	CMPL AX, $64
+	JLE  inlineEmitCopyStep2
+
+	// Emit a length 60 copy, encoded as 3 bytes.
+	MOVB $0xee, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+	SUBL $60, AX
+
+inlineEmitCopyStep2:
+	// if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
+	CMPL AX, $12
+	JGE  inlineEmitCopyStep3
+	CMPL R11, $2048
+	JGE  inlineEmitCopyStep3
+
+	// Emit the remaining copy, encoded as 2 bytes.
+	MOVB R11, 1(DI)
+	SHRL $8, R11
+	SHLB $5, R11
+	SUBB $4, AX
+	SHLB $2, AX
+	ORB  AX, R11
+	ORB  $1, R11
+	MOVB R11, 0(DI)
+	ADDQ $2, DI
+	JMP  inlineEmitCopyEnd
+
+inlineEmitCopyStep3:
+	// Emit the remaining copy, encoded as 3 bytes.
+	SUBL $1, AX
+	SHLB $2, AX
+	ORB  $2, AX
+	MOVB AX, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+
+inlineEmitCopyEnd:
+	// End inline of the emitCopy call.
+	// ----------------------------------------
+
+	// nextEmit = s
+	MOVQ SI, R10
+
+	// if s >= sLimit { goto emitRemainder }
+	MOVQ SI, AX
+	SUBQ DX, AX
+	CMPQ AX, R9
+	JAE  emitRemainder
+
+	// As per the encode_other.go code:
+	//
+	// We could immediately etc.
+
+	// x := load64(src, s-1)
+	MOVQ -1(SI), R14
+
+	// prevHash := hash(uint32(x>>0), shift)
+	MOVL  R14, R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+	// table[prevHash] = uint16(s-1)
+	MOVQ SI, AX
+	SUBQ DX, AX
+	SUBQ $1, AX
+
+	// XXX: MOVW AX, table-32768(SP)(R11*2)
+	// XXX: 66 42 89 44 5c 78       mov    %ax,0x78(%rsp,%r11,2)
+	BYTE $0x66
+	BYTE $0x42
+	BYTE $0x89
+	BYTE $0x44
+	BYTE $0x5c
+	BYTE $0x78
+
+	// currHash := hash(uint32(x>>8), shift)
+	SHRQ  $8, R14
+	MOVL  R14, R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+	// candidate = int(table[currHash])
+	// XXX: MOVWQZX table-32768(SP)(R11*2), R15
+	// XXX: 4e 0f b7 7c 5c 78       movzwq 0x78(%rsp,%r11,2),%r15
+	BYTE $0x4e
+	BYTE $0x0f
+	BYTE $0xb7
+	BYTE $0x7c
+	BYTE $0x5c
+	BYTE $0x78
+
+	// table[currHash] = uint16(s)
+	ADDQ $1, AX
+
+	// XXX: MOVW AX, table-32768(SP)(R11*2)
+	// XXX: 66 42 89 44 5c 78       mov    %ax,0x78(%rsp,%r11,2)
+	BYTE $0x66
+	BYTE $0x42
+	BYTE $0x89
+	BYTE $0x44
+	BYTE $0x5c
+	BYTE $0x78
+
+	// if uint32(x>>8) == load32(src, candidate) { continue }
+	MOVL (DX)(R15*1), BX
+	CMPL R14, BX
+	JEQ  inner1
+
+	// nextHash = hash(uint32(x>>16), shift)
+	SHRQ  $8, R14
+	MOVL  R14, R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+	// s++
+	ADDQ $1, SI
+
+	// break out of the inner1 for loop, i.e. continue the outer loop.
+	JMP outer
+
+emitRemainder:
+	// if nextEmit < len(src) { etc }
+	MOVQ src_len+32(FP), AX
+	ADDQ DX, AX
+	CMPQ R10, AX
+	JEQ  encodeBlockEnd
+
+	// d += emitLiteral(dst[d:], src[nextEmit:])
+	//
+	// Push args.
+	MOVQ DI, 0(SP)
+	MOVQ $0, 8(SP)   // Unnecessary, as the callee ignores it, but conservative.
+	MOVQ $0, 16(SP)  // Unnecessary, as the callee ignores it, but conservative.
+	MOVQ R10, 24(SP)
+	SUBQ R10, AX
+	MOVQ AX, 32(SP)
+	MOVQ AX, 40(SP)  // Unnecessary, as the callee ignores it, but conservative.
+
+	// Spill local variables (registers) onto the stack; call; unspill.
+	MOVQ DI, 80(SP)
+	CALL ·emitLiteral(SB)
+	MOVQ 80(SP), DI
+
+	// Finish the "d +=" part of "d += emitLiteral(etc)".
+	ADDQ 48(SP), DI
+
+encodeBlockEnd:
+	MOVQ dst_base+0(FP), AX
+	SUBQ AX, DI
+	MOVQ DI, d+48(FP)
+	RET
diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go
new file mode 100644
index 0000000..dbcae90
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_other.go
@@ -0,0 +1,238 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+func load32(b []byte, i int) uint32 {
+	b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+	return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load64(b []byte, i int) uint64 {
+	b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+	return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+		uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+//
+// It assumes that:
+//	dst is long enough to hold the encoded bytes
+//	1 <= len(lit) && len(lit) <= 65536
+func emitLiteral(dst, lit []byte) int {
+	i, n := 0, uint(len(lit)-1)
+	switch {
+	case n < 60:
+		dst[0] = uint8(n)<<2 | tagLiteral
+		i = 1
+	case n < 1<<8:
+		dst[0] = 60<<2 | tagLiteral
+		dst[1] = uint8(n)
+		i = 2
+	default:
+		dst[0] = 61<<2 | tagLiteral
+		dst[1] = uint8(n)
+		dst[2] = uint8(n >> 8)
+		i = 3
+	}
+	return i + copy(dst[i:], lit)
+}
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+//	dst is long enough to hold the encoded bytes
+//	1 <= offset && offset <= 65535
+//	4 <= length && length <= 65535
+func emitCopy(dst []byte, offset, length int) int {
+	i := 0
+	// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
+	// threshold for this loop is a little higher (at 68 = 64 + 4), and the
+	// length emitted down below is is a little lower (at 60 = 64 - 4), because
+	// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
+	// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
+	// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
+	// 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
+	// tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
+	// encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
+	for length >= 68 {
+		// Emit a length 64 copy, encoded as 3 bytes.
+		dst[i+0] = 63<<2 | tagCopy2
+		dst[i+1] = uint8(offset)
+		dst[i+2] = uint8(offset >> 8)
+		i += 3
+		length -= 64
+	}
+	if length > 64 {
+		// Emit a length 60 copy, encoded as 3 bytes.
+		dst[i+0] = 59<<2 | tagCopy2
+		dst[i+1] = uint8(offset)
+		dst[i+2] = uint8(offset >> 8)
+		i += 3
+		length -= 60
+	}
+	if length >= 12 || offset >= 2048 {
+		// Emit the remaining copy, encoded as 3 bytes.
+		dst[i+0] = uint8(length-1)<<2 | tagCopy2
+		dst[i+1] = uint8(offset)
+		dst[i+2] = uint8(offset >> 8)
+		return i + 3
+	}
+	// Emit the remaining copy, encoded as 2 bytes.
+	dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+	dst[i+1] = uint8(offset)
+	return i + 2
+}
+
+// extendMatch returns the largest k such that k <= len(src) and that
+// src[i:i+k-j] and src[j:k] have the same contents.
+//
+// It assumes that:
+//	0 <= i && i < j && j <= len(src)
+func extendMatch(src []byte, i, j int) int {
+	for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
+	}
+	return j
+}
+
+func hash(u, shift uint32) uint32 {
+	return (u * 0x1e35a7bd) >> shift
+}
+
+// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//	len(dst) >= MaxEncodedLen(len(src)) &&
+// 	minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlock(dst, src []byte) (d int) {
+	// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
+	// The table element type is uint16, as s < sLimit and sLimit < len(src)
+	// and len(src) <= maxBlockSize and maxBlockSize == 65536.
+	const (
+		maxTableSize = 1 << 14
+		// tableMask is redundant, but helps the compiler eliminate bounds
+		// checks.
+		tableMask = maxTableSize - 1
+	)
+	shift := uint32(32 - 8)
+	for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+		shift--
+	}
+	// In Go, all array elements are zero-initialized, so there is no advantage
+	// to a smaller tableSize per se. However, it matches the C++ algorithm,
+	// and in the asm versions of this code, we can get away with zeroing only
+	// the first tableSize elements.
+	var table [maxTableSize]uint16
+
+	// sLimit is when to stop looking for offset/length copies. The inputMargin
+	// lets us use a fast path for emitLiteral in the main loop, while we are
+	// looking for copies.
+	sLimit := len(src) - inputMargin
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := 0
+
+	// The encoded form must start with a literal, as there are no previous
+	// bytes to copy, so we start looking for hash matches at s == 1.
+	s := 1
+	nextHash := hash(load32(src, s), shift)
+
+	for {
+		// Copied from the C++ snappy implementation:
+		//
+		// Heuristic match skipping: If 32 bytes are scanned with no matches
+		// found, start looking only at every other byte. If 32 more bytes are
+		// scanned (or skipped), look at every third byte, etc.. When a match
+		// is found, immediately go back to looking at every byte. This is a
+		// small loss (~5% performance, ~0.1% density) for compressible data
+		// due to more bookkeeping, but for non-compressible data (such as
+		// JPEG) it's a huge win since the compressor quickly "realizes" the
+		// data is incompressible and doesn't bother looking for matches
+		// everywhere.
+		//
+		// The "skip" variable keeps track of how many bytes there are since
+		// the last match; dividing it by 32 (ie. right-shifting by five) gives
+		// the number of bytes to move ahead for each iteration.
+		skip := 32
+
+		nextS := s
+		candidate := 0
+		for {
+			s = nextS
+			bytesBetweenHashLookups := skip >> 5
+			nextS = s + bytesBetweenHashLookups
+			skip += bytesBetweenHashLookups
+			if nextS > sLimit {
+				goto emitRemainder
+			}
+			candidate = int(table[nextHash&tableMask])
+			table[nextHash&tableMask] = uint16(s)
+			nextHash = hash(load32(src, nextS), shift)
+			if load32(src, s) == load32(src, candidate) {
+				break
+			}
+		}
+
+		// A 4-byte match has been found. We'll later see if more than 4 bytes
+		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+		// them as literal bytes.
+		d += emitLiteral(dst[d:], src[nextEmit:s])
+
+		// Call emitCopy, and then see if another emitCopy could be our next
+		// move. Repeat until we find no match for the input immediately after
+		// what was consumed by the last emitCopy call.
+		//
+		// If we exit this loop normally then we need to call emitLiteral next,
+		// though we don't yet know how big the literal will be. We handle that
+		// by proceeding to the next iteration of the main loop. We also can
+		// exit this loop via goto if we get close to exhausting the input.
+		for {
+			// Invariant: we have a 4-byte match at s, and no need to emit any
+			// literal bytes prior to s.
+			base := s
+
+			// Extend the 4-byte match as long as possible.
+			//
+			// This is an inlined version of:
+			//	s = extendMatch(src, candidate+4, s+4)
+			s += 4
+			for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
+			}
+
+			d += emitCopy(dst[d:], base-candidate, s-base)
+			nextEmit = s
+			if s >= sLimit {
+				goto emitRemainder
+			}
+
+			// We could immediately start working at s now, but to improve
+			// compression we first update the hash table at s-1 and at s. If
+			// another emitCopy is not our next move, also calculate nextHash
+			// at s+1. At least on GOARCH=amd64, these three hash calculations
+			// are faster as one load64 call (with some shifts) instead of
+			// three load32 calls.
+			x := load64(src, s-1)
+			prevHash := hash(uint32(x>>0), shift)
+			table[prevHash&tableMask] = uint16(s - 1)
+			currHash := hash(uint32(x>>8), shift)
+			candidate = int(table[currHash&tableMask])
+			table[currHash&tableMask] = uint16(s)
+			if uint32(x>>8) != load32(src, candidate) {
+				nextHash = hash(uint32(x>>16), shift)
+				s++
+				break
+			}
+		}
+	}
+
+emitRemainder:
+	if nextEmit < len(src) {
+		d += emitLiteral(dst[d:], src[nextEmit:])
+	}
+	return d
+}
diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go
new file mode 100644
index 0000000..c7f445f
--- /dev/null
+++ b/vendor/github.com/golang/snappy/snappy.go
@@ -0,0 +1,87 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package snappy implements the snappy block-based compression format.
+// It aims for very high speeds and reasonable compression.
+//
+// The C++ snappy implementation is at https://github.com/google/snappy
+package snappy
+
+import (
+	"hash/crc32"
+)
+
+/*
+Each encoded block begins with the varint-encoded length of the decoded data,
+followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
+first byte of each chunk is broken into its 2 least and 6 most significant bits
+called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
+Zero means a literal tag. All other values mean a copy tag.
+
+For literal tags:
+  - If m < 60, the next 1 + m bytes are literal bytes.
+  - Otherwise, let n be the little-endian unsigned integer denoted by the next
+    m - 59 bytes. The next 1 + n bytes after that are literal bytes.
+
+For copy tags, length bytes are copied from offset bytes ago, in the style of
+Lempel-Ziv compression algorithms. In particular:
+  - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
+    The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
+    of the offset. The next byte is bits 0-7 of the offset.
+  - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
+    The length is 1 + m. The offset is the little-endian unsigned integer
+    denoted by the next 2 bytes.
+  - For l == 3, this tag is a legacy format that is no longer issued by most
+    encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
+    [1, 65). The length is 1 + m. The offset is the little-endian unsigned
+    integer denoted by the next 4 bytes.
+*/
+const (
+	tagLiteral = 0x00
+	tagCopy1   = 0x01
+	tagCopy2   = 0x02
+	tagCopy4   = 0x03
+)
+
+const (
+	checksumSize    = 4
+	chunkHeaderSize = 4
+	magicChunk      = "\xff\x06\x00\x00" + magicBody
+	magicBody       = "sNaPpY"
+
+	// maxBlockSize is the maximum size of the input to encodeBlock. It is not
+	// part of the wire format per se, but some parts of the encoder assume
+	// that an offset fits into a uint16.
+	//
+	// Also, for the framing format (Writer type instead of Encode function),
+	// https://github.com/google/snappy/blob/master/framing_format.txt says
+	// that "the uncompressed data in a chunk must be no longer than 65536
+	// bytes".
+	maxBlockSize = 65536
+
+	// maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
+	// hard coded to be a const instead of a variable, so that obufLen can also
+	// be a const. Their equivalence is confirmed by
+	// TestMaxEncodedLenOfMaxBlockSize.
+	maxEncodedLenOfMaxBlockSize = 76490
+
+	obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
+	obufLen       = obufHeaderLen + maxEncodedLenOfMaxBlockSize
+)
+
+const (
+	chunkTypeCompressedData   = 0x00
+	chunkTypeUncompressedData = 0x01
+	chunkTypePadding          = 0xfe
+	chunkTypeStreamIdentifier = 0xff
+)
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func crc(b []byte) uint32 {
+	c := crc32.Update(0, crcTable, b)
+	return uint32(c>>15|c<<17) + 0xa282ead8
+}
diff --git a/vendor/github.com/googleapis/gax-go/.gitignore b/vendor/github.com/googleapis/gax-go/.gitignore
new file mode 100644
index 0000000..289bf1e
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/.gitignore
@@ -0,0 +1 @@
+*.cover
diff --git a/vendor/github.com/googleapis/gax-go/.travis.yml b/vendor/github.com/googleapis/gax-go/.travis.yml
new file mode 100644
index 0000000..6db28b6
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/.travis.yml
@@ -0,0 +1,15 @@
+sudo: false
+language: go
+go:
+  - 1.6
+  - 1.7
+before_install:
+  - go get golang.org/x/tools/cmd/cover
+  - go get golang.org/x/tools/cmd/goimports
+script:
+  - gofmt -l .
+  - goimports -l .
+  - go tool vet .
+  - go test -coverprofile=coverage.txt -covermode=atomic
+after_success:
+  - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/googleapis/gax-go/CONTRIBUTING.md b/vendor/github.com/googleapis/gax-go/CONTRIBUTING.md
new file mode 100644
index 0000000..2827b7d
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/CONTRIBUTING.md
@@ -0,0 +1,27 @@
+Want to contribute? Great! First, read this page (including the small print at the end).
+
+### Before you contribute
+Before we can use your code, you must sign the
+[Google Individual Contributor License Agreement]
+(https://cla.developers.google.com/about/google-individual)
+(CLA), which you can do online. The CLA is necessary mainly because you own the
+copyright to your changes, even after your contribution becomes part of our
+codebase, so we need your permission to use and distribute your code. We also
+need to be sure of various other things—for instance that you'll tell us if you
+know that your code infringes on other people's patents. You don't have to sign
+the CLA until after you've submitted your code for review and a member has
+approved it, but you must do it before we can put your code into our codebase.
+Before you start working on a larger contribution, you should get in touch with
+us first through the issue tracker with your idea so that we can help out and
+possibly guide you. Coordinating up front makes it much easier to avoid
+frustration later on.
+
+### Code reviews
+All submissions, including submissions by project members, require review. We
+use Github pull requests for this purpose.
+
+### The small print
+Contributions made by corporations are covered by a different agreement than
+the one above, the
+[Software Grant and Corporate Contributor License Agreement]
+(https://cla.developers.google.com/about/google-corporate).
diff --git a/vendor/github.com/googleapis/gax-go/LICENSE b/vendor/github.com/googleapis/gax-go/LICENSE
new file mode 100644
index 0000000..6d16b65
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/LICENSE
@@ -0,0 +1,27 @@
+Copyright 2016, Google Inc.
+All rights reserved.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/googleapis/gax-go/README.md b/vendor/github.com/googleapis/gax-go/README.md
new file mode 100644
index 0000000..38ebdcf
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/README.md
@@ -0,0 +1,11 @@
+Google API Extensions for Go
+============================
+
+[![Build Status](https://travis-ci.org/googleapis/gax-go.svg?branch=master)](https://travis-ci.org/googleapis/gax-go)
+[![Code Coverage](https://img.shields.io/codecov/c/github/googleapis/gax-go.svg)](https://codecov.io/github/googleapis/gax-go)
+
+Google API Extensions for Go (gax-go) is a set of modules which aids the
+development of APIs for clients and servers based on `gRPC` and Google API
+conventions.
+
+This project is currently experimental and not supported.
diff --git a/vendor/github.com/googleapis/gax-go/call_option.go b/vendor/github.com/googleapis/gax-go/call_option.go
new file mode 100644
index 0000000..4ba1cdf
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/call_option.go
@@ -0,0 +1,136 @@
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package gax
+
+import (
+	"math/rand"
+	"time"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+)
+
+// CallOption is an option used by Invoke to control behaviors of RPC calls.
+// CallOption works by modifying relevant fields of CallSettings.
+type CallOption interface {
+	// Resolve applies the option by modifying cs.
+	Resolve(cs *CallSettings)
+}
+
+// Retryer is used by Invoke to determine retry behavior.
+type Retryer interface {
+	// Retry reports whether a request should be retriedand how long to pause before retrying
+	// if the previous attempt returned with err. Invoke never calls Retry with nil error.
+	Retry(err error) (pause time.Duration, shouldRetry bool)
+}
+
+type retryerOption func() Retryer
+
+func (o retryerOption) Resolve(s *CallSettings) {
+	s.Retry = o
+}
+
+// WithRetry sets CallSettings.Retry to fn.
+func WithRetry(fn func() Retryer) CallOption {
+	return retryerOption(fn)
+}
+
+// OnCodes returns a Retryer that retries if and only if
+// the previous attempt returns a GRPC error whose error code is stored in cc.
+// Pause times between retries are specified by bo.
+//
+// bo is only used for its parameters; each Retryer has its own copy.
+func OnCodes(cc []codes.Code, bo Backoff) Retryer {
+	return &boRetryer{
+		backoff: bo,
+		codes:   append([]codes.Code(nil), cc...),
+	}
+}
+
+type boRetryer struct {
+	backoff Backoff
+	codes   []codes.Code
+}
+
+func (r *boRetryer) Retry(err error) (time.Duration, bool) {
+	c := grpc.Code(err)
+	for _, rc := range r.codes {
+		if c == rc {
+			return r.backoff.Pause(), true
+		}
+	}
+	return 0, false
+}
+
+// Backoff implements exponential backoff.
+// The wait time between retries is a random value between 0 and the "retry envelope".
+// The envelope starts at Initial and increases by the factor of Multiplier every retry,
+// but is capped at Max.
+type Backoff struct {
+	// Initial is the initial value of the retry envelope, defaults to 1 second.
+	Initial time.Duration
+
+	// Max is the maximum value of the retry envelope, defaults to 30 seconds.
+	Max time.Duration
+
+	// Multiplier is the factor by which the retry envelope increases.
+	// It should be greater than 1 and defaults to 2.
+	Multiplier float64
+
+	// cur is the current retry envelope
+	cur time.Duration
+}
+
+func (bo *Backoff) Pause() time.Duration {
+	if bo.Initial == 0 {
+		bo.Initial = time.Second
+	}
+	if bo.cur == 0 {
+		bo.cur = bo.Initial
+	}
+	if bo.Max == 0 {
+		bo.Max = 30 * time.Second
+	}
+	if bo.Multiplier < 1 {
+		bo.Multiplier = 2
+	}
+	d := time.Duration(rand.Int63n(int64(bo.cur)))
+	bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier)
+	if bo.cur > bo.Max {
+		bo.cur = bo.Max
+	}
+	return d
+}
+
+type CallSettings struct {
+	// Retry returns a Retryer to be used to control retry logic of a method call.
+	// If Retry is nil or the returned Retryer is nil, the call will not be retried.
+	Retry func() Retryer
+}
diff --git a/vendor/github.com/googleapis/gax-go/gax.go b/vendor/github.com/googleapis/gax-go/gax.go
new file mode 100644
index 0000000..c7e4ce9
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/gax.go
@@ -0,0 +1,32 @@
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package gax
+
+const Version = "0.1.0"
diff --git a/vendor/github.com/googleapis/gax-go/invoke.go b/vendor/github.com/googleapis/gax-go/invoke.go
new file mode 100644
index 0000000..644c677
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/invoke.go
@@ -0,0 +1,91 @@
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package gax
+
+import (
+	"time"
+
+	"golang.org/x/net/context"
+)
+
+// A user defined call stub.
+type APICall func(context.Context) error
+
+// Invoke calls the given APICall,
+// performing retries as specified by opts, if any.
+func Invoke(ctx context.Context, call APICall, opts ...CallOption) error {
+	var settings CallSettings
+	for _, opt := range opts {
+		opt.Resolve(&settings)
+	}
+	return invoke(ctx, call, settings, timeSleeper{})
+}
+
+type sleeper interface {
+	// Sleep sleeps for duration d or until ctx.Done() closes, whichever happens first.
+	// If ctx.Done() closes, Sleep returns ctx.Err(), otherwise it returns nil.
+	Sleep(ctx context.Context, d time.Duration) error
+}
+
+func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error {
+	var retryer Retryer
+	for {
+		err := call(ctx)
+		if err == nil {
+			return nil
+		}
+		if settings.Retry == nil {
+			return err
+		}
+		if retryer == nil {
+			if r := settings.Retry(); r != nil {
+				retryer = r
+			} else {
+				return err
+			}
+		}
+		if d, ok := retryer.Retry(err); !ok {
+			return err
+		} else if err = sp.Sleep(ctx, d); err != nil {
+			return err
+		}
+	}
+}
+
+type timeSleeper struct{}
+
+func (s timeSleeper) Sleep(ctx context.Context, d time.Duration) error {
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	case <-time.After(d):
+		return nil
+	}
+}
diff --git a/vendor/github.com/googleapis/gax-go/path_template.go b/vendor/github.com/googleapis/gax-go/path_template.go
new file mode 100644
index 0000000..41bda94
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/path_template.go
@@ -0,0 +1,176 @@
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package gax
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+)
+
+type matcher interface {
+	match([]string) (int, error)
+	String() string
+}
+
+type segment struct {
+	matcher
+	name string
+}
+
+type labelMatcher string
+
+func (ls labelMatcher) match(segments []string) (int, error) {
+	if len(segments) == 0 {
+		return 0, fmt.Errorf("expected %s but no more segments found", ls)
+	}
+	if segments[0] != string(ls) {
+		return 0, fmt.Errorf("expected %s but got %s", ls, segments[0])
+	}
+	return 1, nil
+}
+
+func (ls labelMatcher) String() string {
+	return string(ls)
+}
+
+type wildcardMatcher int
+
+func (wm wildcardMatcher) match(segments []string) (int, error) {
+	if len(segments) == 0 {
+		return 0, errors.New("no more segments found")
+	}
+	return 1, nil
+}
+
+func (wm wildcardMatcher) String() string {
+	return "*"
+}
+
+type pathWildcardMatcher int
+
+func (pwm pathWildcardMatcher) match(segments []string) (int, error) {
+	length := len(segments) - int(pwm)
+	if length <= 0 {
+		return 0, errors.New("not sufficient segments are supplied for path wildcard")
+	}
+	return length, nil
+}
+
+func (pwm pathWildcardMatcher) String() string {
+	return "**"
+}
+
+type ParseError struct {
+	Pos      int
+	Template string
+	Message  string
+}
+
+func (pe ParseError) Error() string {
+	return fmt.Sprintf("at %d of template '%s', %s", pe.Pos, pe.Template, pe.Message)
+}
+
+// PathTemplate manages the template to build and match with paths used
+// by API services. It holds a template and variable names in it, and
+// it can extract matched patterns from a path string or build a path
+// string from a binding.
+//
+// See http.proto in github.com/googleapis/googleapis/ for the details of
+// the template syntax.
+type PathTemplate struct {
+	segments []segment
+}
+
+// NewPathTemplate parses a path template, and returns a PathTemplate
+// instance if successful.
+func NewPathTemplate(template string) (*PathTemplate, error) {
+	return parsePathTemplate(template)
+}
+
+// MustCompilePathTemplate is like NewPathTemplate but panics if the
+// expression cannot be parsed. It simplifies safe initialization of
+// global variables holding compiled regular expressions.
+func MustCompilePathTemplate(template string) *PathTemplate {
+	pt, err := NewPathTemplate(template)
+	if err != nil {
+		panic(err)
+	}
+	return pt
+}
+
+// Match attempts to match the given path with the template, and returns
+// the mapping of the variable name to the matched pattern string.
+func (pt *PathTemplate) Match(path string) (map[string]string, error) {
+	paths := strings.Split(path, "/")
+	values := map[string]string{}
+	for _, segment := range pt.segments {
+		length, err := segment.match(paths)
+		if err != nil {
+			return nil, err
+		}
+		if segment.name != "" {
+			value := strings.Join(paths[:length], "/")
+			if oldValue, ok := values[segment.name]; ok {
+				values[segment.name] = oldValue + "/" + value
+			} else {
+				values[segment.name] = value
+			}
+		}
+		paths = paths[length:]
+	}
+	if len(paths) != 0 {
+		return nil, fmt.Errorf("Trailing path %s remains after the matching", strings.Join(paths, "/"))
+	}
+	return values, nil
+}
+
+// Render creates a path string from its template and the binding from
+// the variable name to the value.
+func (pt *PathTemplate) Render(binding map[string]string) (string, error) {
+	result := make([]string, 0, len(pt.segments))
+	var lastVariableName string
+	for _, segment := range pt.segments {
+		name := segment.name
+		if lastVariableName != "" && name == lastVariableName {
+			continue
+		}
+		lastVariableName = name
+		if name == "" {
+			result = append(result, segment.String())
+		} else if value, ok := binding[name]; ok {
+			result = append(result, value)
+		} else {
+			return "", fmt.Errorf("%s is not found", name)
+		}
+	}
+	built := strings.Join(result, "/")
+	return built, nil
+}
diff --git a/vendor/github.com/googleapis/gax-go/path_template_parser.go b/vendor/github.com/googleapis/gax-go/path_template_parser.go
new file mode 100644
index 0000000..79c8e75
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/path_template_parser.go
@@ -0,0 +1,227 @@
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package gax
+
+import (
+	"fmt"
+	"io"
+	"strings"
+)
+
+// This parser follows the syntax of path templates, from
+// https://github.com/googleapis/googleapis/blob/master/google/api/http.proto.
+// The differences are that there is no custom verb, we allow the initial slash
+// to be absent, and that we are not strict as
+// https://tools.ietf.org/html/rfc6570 about the characters in identifiers and
+// literals.
+
+type pathTemplateParser struct {
+	r                *strings.Reader
+	runeCount        int             // the number of the current rune in the original string
+	nextVar          int             // the number to use for the next unnamed variable
+	seenName         map[string]bool // names we've seen already
+	seenPathWildcard bool            // have we seen "**" already?
+}
+
+func parsePathTemplate(template string) (pt *PathTemplate, err error) {
+	p := &pathTemplateParser{
+		r:        strings.NewReader(template),
+		seenName: map[string]bool{},
+	}
+
+	// Handle panics with strings like errors.
+	// See pathTemplateParser.error, below.
+	defer func() {
+		if x := recover(); x != nil {
+			errmsg, ok := x.(errString)
+			if !ok {
+				panic(x)
+			}
+			pt = nil
+			err = ParseError{p.runeCount, template, string(errmsg)}
+		}
+	}()
+
+	segs := p.template()
+	// If there is a path wildcard, set its length. We can't do this
+	// until we know how many segments we've got all together.
+	for i, seg := range segs {
+		if _, ok := seg.matcher.(pathWildcardMatcher); ok {
+			segs[i].matcher = pathWildcardMatcher(len(segs) - i - 1)
+			break
+		}
+	}
+	return &PathTemplate{segments: segs}, nil
+
+}
+
+// Used to indicate errors "thrown" by this parser. We don't use string because
+// many parts of the standard library panic with strings.
+type errString string
+
+// Terminates parsing immediately with an error.
+func (p *pathTemplateParser) error(msg string) {
+	panic(errString(msg))
+}
+
+// Template = [ "/" ] Segments
+func (p *pathTemplateParser) template() []segment {
+	var segs []segment
+	if p.consume('/') {
+		// Initial '/' needs an initial empty matcher.
+		segs = append(segs, segment{matcher: labelMatcher("")})
+	}
+	return append(segs, p.segments("")...)
+}
+
+// Segments = Segment { "/" Segment }
+func (p *pathTemplateParser) segments(name string) []segment {
+	var segs []segment
+	for {
+		subsegs := p.segment(name)
+		segs = append(segs, subsegs...)
+		if !p.consume('/') {
+			break
+		}
+	}
+	return segs
+}
+
+// Segment  = "*" | "**" | LITERAL | Variable
+func (p *pathTemplateParser) segment(name string) []segment {
+	if p.consume('*') {
+		if name == "" {
+			name = fmt.Sprintf("$%d", p.nextVar)
+			p.nextVar++
+		}
+		if p.consume('*') {
+			if p.seenPathWildcard {
+				p.error("multiple '**' disallowed")
+			}
+			p.seenPathWildcard = true
+			// We'll change 0 to the right number at the end.
+			return []segment{{name: name, matcher: pathWildcardMatcher(0)}}
+		}
+		return []segment{{name: name, matcher: wildcardMatcher(0)}}
+	}
+	if p.consume('{') {
+		if name != "" {
+			p.error("recursive named bindings are not allowed")
+		}
+		return p.variable()
+	}
+	return []segment{{name: name, matcher: labelMatcher(p.literal())}}
+}
+
+// Variable = "{" FieldPath [ "=" Segments ] "}"
+// "{" is already consumed.
+func (p *pathTemplateParser) variable() []segment {
+	// Simplification: treat FieldPath as LITERAL, instead of IDENT { '.' IDENT }
+	name := p.literal()
+	if p.seenName[name] {
+		p.error(name + " appears multiple times")
+	}
+	p.seenName[name] = true
+	var segs []segment
+	if p.consume('=') {
+		segs = p.segments(name)
+	} else {
+		// "{var}" is equivalent to "{var=*}"
+		segs = []segment{{name: name, matcher: wildcardMatcher(0)}}
+	}
+	if !p.consume('}') {
+		p.error("expected '}'")
+	}
+	return segs
+}
+
+// A literal is any sequence of characters other than a few special ones.
+// The list of stop characters is not quite the same as in the template RFC.
+func (p *pathTemplateParser) literal() string {
+	lit := p.consumeUntil("/*}{=")
+	if lit == "" {
+		p.error("empty literal")
+	}
+	return lit
+}
+
+// Read runes until EOF or one of the runes in stopRunes is encountered.
+// If the latter, unread the stop rune. Return the accumulated runes as a string.
+func (p *pathTemplateParser) consumeUntil(stopRunes string) string {
+	var runes []rune
+	for {
+		r, ok := p.readRune()
+		if !ok {
+			break
+		}
+		if strings.IndexRune(stopRunes, r) >= 0 {
+			p.unreadRune()
+			break
+		}
+		runes = append(runes, r)
+	}
+	return string(runes)
+}
+
+// If the next rune is r, consume it and return true.
+// Otherwise, leave the input unchanged and return false.
+func (p *pathTemplateParser) consume(r rune) bool {
+	rr, ok := p.readRune()
+	if !ok {
+		return false
+	}
+	if r == rr {
+		return true
+	}
+	p.unreadRune()
+	return false
+}
+
+// Read the next rune from the input. Return it.
+// The second return value is false at EOF.
+func (p *pathTemplateParser) readRune() (rune, bool) {
+	r, _, err := p.r.ReadRune()
+	if err == io.EOF {
+		return r, false
+	}
+	if err != nil {
+		p.error(err.Error())
+	}
+	p.runeCount++
+	return r, true
+}
+
+// Put the last rune that was read back on the input.
+func (p *pathTemplateParser) unreadRune() {
+	if err := p.r.UnreadRune(); err != nil {
+		p.error(err.Error())
+	}
+	p.runeCount--
+}
diff --git a/vendor/github.com/gregjones/httpcache/.gitignore b/vendor/github.com/gregjones/httpcache/.gitignore
new file mode 100644
index 0000000..0026861
--- /dev/null
+++ b/vendor/github.com/gregjones/httpcache/.gitignore
@@ -0,0 +1,22 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
diff --git a/vendor/github.com/gregjones/httpcache/.travis.yml b/vendor/github.com/gregjones/httpcache/.travis.yml
new file mode 100644
index 0000000..e34612c
--- /dev/null
+++ b/vendor/github.com/gregjones/httpcache/.travis.yml
@@ -0,0 +1,17 @@
+sudo: false
+language: go
+go:
+  - 1.6
+  - 1.7
+  - tip
+matrix:
+  allow_failures:
+    - go: tip
+  fast_finish: true
+install:
+  - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
+script:
+  - go get -t -v ./...
+  - diff -u <(echo -n) <(gofmt -d .)
+  - go tool vet .
+  - go test -v -race ./...
diff --git a/vendor/github.com/gregjones/httpcache/LICENSE.txt b/vendor/github.com/gregjones/httpcache/LICENSE.txt
new file mode 100644
index 0000000..81316be
--- /dev/null
+++ b/vendor/github.com/gregjones/httpcache/LICENSE.txt
@@ -0,0 +1,7 @@
+Copyright © 2012 Greg Jones (greg.jones@gmail.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/gregjones/httpcache/README.md b/vendor/github.com/gregjones/httpcache/README.md
new file mode 100644
index 0000000..ccd0172
--- /dev/null
+++ b/vendor/github.com/gregjones/httpcache/README.md
@@ -0,0 +1,25 @@
+httpcache
+=========
+
+[![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache)
+
+A Transport for Go's http.Client that will cache responses according to the HTTP RFC
+
+Package httpcache provides a http.RoundTripper implementation that works as a mostly RFC-compliant cache for http responses.
+
+It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy).
+
+**Documentation:** http://godoc.org/github.com/gregjones/httpcache
+
+**License:** MIT (see LICENSE.txt)
+
+Cache backends
+--------------
+
+- The built-in 'memory' cache stores responses in an in-memory map.
+- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library.
+- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers.
+- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage.
+- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb).
+- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries.
+- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache.
diff --git a/vendor/github.com/gregjones/httpcache/httpcache.go b/vendor/github.com/gregjones/httpcache/httpcache.go
new file mode 100644
index 0000000..69842a7
--- /dev/null
+++ b/vendor/github.com/gregjones/httpcache/httpcache.go
@@ -0,0 +1,594 @@
+// Package httpcache provides a http.RoundTripper implementation that works as a
+// mostly RFC-compliant cache for http responses.
+//
+// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client
+// and not for a shared proxy).
+//
+package httpcache
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"log"
+	"net/http"
+	"net/http/httputil"
+	"strings"
+	"sync"
+	"time"
+)
+
+const (
+	stale = iota
+	fresh
+	transparent
+	// XFromCache is the header added to responses that are returned from the cache
+	XFromCache = "X-From-Cache"
+)
+
+// A Cache interface is used by the Transport to store and retrieve responses.
+type Cache interface {
+	// Get returns the []byte representation of a cached response and a bool
+	// set to true if the value isn't empty
+	Get(key string) (responseBytes []byte, ok bool)
+	// Set stores the []byte representation of a response against a key
+	Set(key string, responseBytes []byte)
+	// Delete removes the value associated with the key
+	Delete(key string)
+}
+
+// cacheKey returns the cache key for req.
+func cacheKey(req *http.Request) string {
+	return req.URL.String()
+}
+
+// CachedResponse returns the cached http.Response for req if present, and nil
+// otherwise.
+func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) {
+	cachedVal, ok := c.Get(cacheKey(req))
+	if !ok {
+		return
+	}
+
+	b := bytes.NewBuffer(cachedVal)
+	return http.ReadResponse(bufio.NewReader(b), req)
+}
+
+// MemoryCache is an implemtation of Cache that stores responses in an in-memory map.
+type MemoryCache struct {
+	mu    sync.RWMutex
+	items map[string][]byte
+}
+
+// Get returns the []byte representation of the response and true if present, false if not
+func (c *MemoryCache) Get(key string) (resp []byte, ok bool) {
+	c.mu.RLock()
+	resp, ok = c.items[key]
+	c.mu.RUnlock()
+	return resp, ok
+}
+
+// Set saves response resp to the cache with key
+func (c *MemoryCache) Set(key string, resp []byte) {
+	c.mu.Lock()
+	c.items[key] = resp
+	c.mu.Unlock()
+}
+
+// Delete removes key from the cache
+func (c *MemoryCache) Delete(key string) {
+	c.mu.Lock()
+	delete(c.items, key)
+	c.mu.Unlock()
+}
+
+// NewMemoryCache returns a new Cache that will store items in an in-memory map
+func NewMemoryCache() *MemoryCache {
+	c := &MemoryCache{items: map[string][]byte{}}
+	return c
+}
+
+// onEOFReader executes a function on reader EOF or close
+type onEOFReader struct {
+	rc io.ReadCloser
+	fn func()
+}
+
+func (r *onEOFReader) Read(p []byte) (n int, err error) {
+	n, err = r.rc.Read(p)
+	if err == io.EOF {
+		r.runFunc()
+	}
+	return
+}
+
+func (r *onEOFReader) Close() error {
+	err := r.rc.Close()
+	r.runFunc()
+	return err
+}
+
+func (r *onEOFReader) runFunc() {
+	if fn := r.fn; fn != nil {
+		fn()
+		r.fn = nil
+	}
+}
+
+// Transport is an implementation of http.RoundTripper that will return values from a cache
+// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since)
+// to repeated requests allowing servers to return 304 / Not Modified
+type Transport struct {
+	// The RoundTripper interface actually used to make requests
+	// If nil, http.DefaultTransport is used
+	Transport http.RoundTripper
+	Cache     Cache
+	// If true, responses returned from the cache will be given an extra header, X-From-Cache
+	MarkCachedResponses bool
+	// guards modReq
+	mu sync.RWMutex
+	// Mapping of original request => cloned
+	modReq map[*http.Request]*http.Request
+}
+
+// NewTransport returns a new Transport with the
+// provided Cache implementation and MarkCachedResponses set to true
+func NewTransport(c Cache) *Transport {
+	return &Transport{Cache: c, MarkCachedResponses: true}
+}
+
+// Client returns an *http.Client that caches responses.
+func (t *Transport) Client() *http.Client {
+	return &http.Client{Transport: t}
+}
+
+// varyMatches will return false unless all of the cached values for the headers listed in Vary
+// match the new request
+func varyMatches(cachedResp *http.Response, req *http.Request) bool {
+	for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") {
+		header = http.CanonicalHeaderKey(header)
+		if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) {
+			return false
+		}
+	}
+	return true
+}
+
+// setModReq maintains a mapping between original requests and their associated cloned requests
+func (t *Transport) setModReq(orig, mod *http.Request) {
+	t.mu.Lock()
+	if t.modReq == nil {
+		t.modReq = make(map[*http.Request]*http.Request)
+	}
+	if mod == nil {
+		delete(t.modReq, orig)
+	} else {
+		t.modReq[orig] = mod
+	}
+	t.mu.Unlock()
+}
+
+// RoundTrip takes a Request and returns a Response
+//
+// If there is a fresh Response already in cache, then it will be returned without connecting to
+// the server.
+//
+// If there is a stale Response, then any validators it contains will be set on the new request
+// to give the server a chance to respond with NotModified. If this happens, then the cached Response
+// will be returned.
+func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
+	cacheKey := cacheKey(req)
+	cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == ""
+	var cachedResp *http.Response
+	if cacheable {
+		cachedResp, err = CachedResponse(t.Cache, req)
+	} else {
+		// Need to invalidate an existing value
+		t.Cache.Delete(cacheKey)
+	}
+
+	transport := t.Transport
+	if transport == nil {
+		transport = http.DefaultTransport
+	}
+
+	if cacheable && cachedResp != nil && err == nil {
+		if t.MarkCachedResponses {
+			cachedResp.Header.Set(XFromCache, "1")
+		}
+
+		if varyMatches(cachedResp, req) {
+			// Can only use cached value if the new request doesn't Vary significantly
+			freshness := getFreshness(cachedResp.Header, req.Header)
+			if freshness == fresh {
+				return cachedResp, nil
+			}
+
+			if freshness == stale {
+				var req2 *http.Request
+				// Add validators if caller hasn't already done so
+				etag := cachedResp.Header.Get("etag")
+				if etag != "" && req.Header.Get("etag") == "" {
+					req2 = cloneRequest(req)
+					req2.Header.Set("if-none-match", etag)
+				}
+				lastModified := cachedResp.Header.Get("last-modified")
+				if lastModified != "" && req.Header.Get("last-modified") == "" {
+					if req2 == nil {
+						req2 = cloneRequest(req)
+					}
+					req2.Header.Set("if-modified-since", lastModified)
+				}
+				if req2 != nil {
+					// Associate original request with cloned request so we can refer to
+					// it in CancelRequest()
+					t.setModReq(req, req2)
+					req = req2
+					defer func() {
+						// Release req/clone mapping on error
+						if err != nil {
+							t.setModReq(req, nil)
+						}
+						if resp != nil {
+							// Release req/clone mapping on body close/EOF
+							resp.Body = &onEOFReader{
+								rc: resp.Body,
+								fn: func() { t.setModReq(req, nil) },
+							}
+						}
+					}()
+				}
+			}
+		}
+
+		resp, err = transport.RoundTrip(req)
+		if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified {
+			// Replace the 304 response with the one from cache, but update with some new headers
+			endToEndHeaders := getEndToEndHeaders(resp.Header)
+			for _, header := range endToEndHeaders {
+				cachedResp.Header[header] = resp.Header[header]
+			}
+			cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK))
+			cachedResp.StatusCode = http.StatusOK
+
+			resp = cachedResp
+		} else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) &&
+			req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) {
+			// In case of transport failure and stale-if-error activated, returns cached content
+			// when available
+			cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK))
+			cachedResp.StatusCode = http.StatusOK
+			return cachedResp, nil
+		} else {
+			if err != nil || resp.StatusCode != http.StatusOK {
+				t.Cache.Delete(cacheKey)
+			}
+			if err != nil {
+				return nil, err
+			}
+		}
+	} else {
+		reqCacheControl := parseCacheControl(req.Header)
+		if _, ok := reqCacheControl["only-if-cached"]; ok {
+			resp = newGatewayTimeoutResponse(req)
+		} else {
+			resp, err = transport.RoundTrip(req)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+
+	if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) {
+		for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") {
+			varyKey = http.CanonicalHeaderKey(varyKey)
+			fakeHeader := "X-Varied-" + varyKey
+			reqValue := req.Header.Get(varyKey)
+			if reqValue != "" {
+				resp.Header.Set(fakeHeader, reqValue)
+			}
+		}
+		respBytes, err := httputil.DumpResponse(resp, true)
+		if err == nil {
+			t.Cache.Set(cacheKey, respBytes)
+		}
+	} else {
+		t.Cache.Delete(cacheKey)
+	}
+	return resp, nil
+}
+
+// CancelRequest calls CancelRequest on the underlaying transport if implemented or
+// throw a warning otherwise.
+func (t *Transport) CancelRequest(req *http.Request) {
+	type canceler interface {
+		CancelRequest(*http.Request)
+	}
+	tr, ok := t.Transport.(canceler)
+	if !ok {
+		log.Printf("httpcache: Client Transport of type %T doesn't support CancelRequest; Timeout not supported", t.Transport)
+		return
+	}
+
+	t.mu.RLock()
+	if modReq, ok := t.modReq[req]; ok {
+		t.mu.RUnlock()
+		t.mu.Lock()
+		delete(t.modReq, req)
+		t.mu.Unlock()
+		tr.CancelRequest(modReq)
+	} else {
+		t.mu.RUnlock()
+		tr.CancelRequest(req)
+	}
+}
+
+// ErrNoDateHeader indicates that the HTTP headers contained no Date header.
+var ErrNoDateHeader = errors.New("no Date header")
+
+// Date parses and returns the value of the Date header.
+func Date(respHeaders http.Header) (date time.Time, err error) {
+	dateHeader := respHeaders.Get("date")
+	if dateHeader == "" {
+		err = ErrNoDateHeader
+		return
+	}
+
+	return time.Parse(time.RFC1123, dateHeader)
+}
+
+type realClock struct{}
+
+func (c *realClock) since(d time.Time) time.Duration {
+	return time.Since(d)
+}
+
+type timer interface {
+	since(d time.Time) time.Duration
+}
+
+var clock timer = &realClock{}
+
+// getFreshness will return one of fresh/stale/transparent based on the cache-control
+// values of the request and the response
+//
+// fresh indicates the response can be returned
+// stale indicates that the response needs validating before it is returned
+// transparent indicates the response should not be used to fulfil the request
+//
+// Because this is only a private cache, 'public' and 'private' in cache-control aren't
+// signficant. Similarly, smax-age isn't used.
+func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) {
+	respCacheControl := parseCacheControl(respHeaders)
+	reqCacheControl := parseCacheControl(reqHeaders)
+	if _, ok := reqCacheControl["no-cache"]; ok {
+		return transparent
+	}
+	if _, ok := respCacheControl["no-cache"]; ok {
+		return stale
+	}
+	if _, ok := reqCacheControl["only-if-cached"]; ok {
+		return fresh
+	}
+
+	date, err := Date(respHeaders)
+	if err != nil {
+		return stale
+	}
+	currentAge := clock.since(date)
+
+	var lifetime time.Duration
+	var zeroDuration time.Duration
+
+	// If a response includes both an Expires header and a max-age directive,
+	// the max-age directive overrides the Expires header, even if the Expires header is more restrictive.
+	if maxAge, ok := respCacheControl["max-age"]; ok {
+		lifetime, err = time.ParseDuration(maxAge + "s")
+		if err != nil {
+			lifetime = zeroDuration
+		}
+	} else {
+		expiresHeader := respHeaders.Get("Expires")
+		if expiresHeader != "" {
+			expires, err := time.Parse(time.RFC1123, expiresHeader)
+			if err != nil {
+				lifetime = zeroDuration
+			} else {
+				lifetime = expires.Sub(date)
+			}
+		}
+	}
+
+	if maxAge, ok := reqCacheControl["max-age"]; ok {
+		// the client is willing to accept a response whose age is no greater than the specified time in seconds
+		lifetime, err = time.ParseDuration(maxAge + "s")
+		if err != nil {
+			lifetime = zeroDuration
+		}
+	}
+	if minfresh, ok := reqCacheControl["min-fresh"]; ok {
+		//  the client wants a response that will still be fresh for at least the specified number of seconds.
+		minfreshDuration, err := time.ParseDuration(minfresh + "s")
+		if err == nil {
+			currentAge = time.Duration(currentAge + minfreshDuration)
+		}
+	}
+
+	if maxstale, ok := reqCacheControl["max-stale"]; ok {
+		// Indicates that the client is willing to accept a response that has exceeded its expiration time.
+		// If max-stale is assigned a value, then the client is willing to accept a response that has exceeded
+		// its expiration time by no more than the specified number of seconds.
+		// If no value is assigned to max-stale, then the client is willing to accept a stale response of any age.
+		//
+		// Responses served only because of a max-stale value are supposed to have a Warning header added to them,
+		// but that seems like a  hassle, and is it actually useful? If so, then there needs to be a different
+		// return-value available here.
+		if maxstale == "" {
+			return fresh
+		}
+		maxstaleDuration, err := time.ParseDuration(maxstale + "s")
+		if err == nil {
+			currentAge = time.Duration(currentAge - maxstaleDuration)
+		}
+	}
+
+	if lifetime > currentAge {
+		return fresh
+	}
+
+	return stale
+}
+
+// Returns true if either the request or the response includes the stale-if-error
+// cache control extension: https://tools.ietf.org/html/rfc5861
+func canStaleOnError(respHeaders, reqHeaders http.Header) bool {
+	respCacheControl := parseCacheControl(respHeaders)
+	reqCacheControl := parseCacheControl(reqHeaders)
+
+	var err error
+	lifetime := time.Duration(-1)
+
+	if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok {
+		if staleMaxAge != "" {
+			lifetime, err = time.ParseDuration(staleMaxAge + "s")
+			if err != nil {
+				return false
+			}
+		} else {
+			return true
+		}
+	}
+	if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok {
+		if staleMaxAge != "" {
+			lifetime, err = time.ParseDuration(staleMaxAge + "s")
+			if err != nil {
+				return false
+			}
+		} else {
+			return true
+		}
+	}
+
+	if lifetime >= 0 {
+		date, err := Date(respHeaders)
+		if err != nil {
+			return false
+		}
+		currentAge := clock.since(date)
+		if lifetime > currentAge {
+			return true
+		}
+	}
+
+	return false
+}
+
+func getEndToEndHeaders(respHeaders http.Header) []string {
+	// These headers are always hop-by-hop
+	hopByHopHeaders := map[string]struct{}{
+		"Connection":          struct{}{},
+		"Keep-Alive":          struct{}{},
+		"Proxy-Authenticate":  struct{}{},
+		"Proxy-Authorization": struct{}{},
+		"Te":                struct{}{},
+		"Trailers":          struct{}{},
+		"Transfer-Encoding": struct{}{},
+		"Upgrade":           struct{}{},
+	}
+
+	for _, extra := range strings.Split(respHeaders.Get("connection"), ",") {
+		// any header listed in connection, if present, is also considered hop-by-hop
+		if strings.Trim(extra, " ") != "" {
+			hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{}
+		}
+	}
+	endToEndHeaders := []string{}
+	for respHeader, _ := range respHeaders {
+		if _, ok := hopByHopHeaders[respHeader]; !ok {
+			endToEndHeaders = append(endToEndHeaders, respHeader)
+		}
+	}
+	return endToEndHeaders
+}
+
+func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) {
+	if _, ok := respCacheControl["no-store"]; ok {
+		return false
+	}
+	if _, ok := reqCacheControl["no-store"]; ok {
+		return false
+	}
+	return true
+}
+
+func newGatewayTimeoutResponse(req *http.Request) *http.Response {
+	var braw bytes.Buffer
+	braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n")
+	resp, err := http.ReadResponse(bufio.NewReader(&braw), req)
+	if err != nil {
+		panic(err)
+	}
+	return resp
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+// (This function copyright goauth2 authors: https://code.google.com/p/goauth2)
+func cloneRequest(r *http.Request) *http.Request {
+	// shallow copy of the struct
+	r2 := new(http.Request)
+	*r2 = *r
+	// deep copy of the Header
+	r2.Header = make(http.Header)
+	for k, s := range r.Header {
+		r2.Header[k] = s
+	}
+	return r2
+}
+
+type cacheControl map[string]string
+
+func parseCacheControl(headers http.Header) cacheControl {
+	cc := cacheControl{}
+	ccHeader := headers.Get("Cache-Control")
+	for _, part := range strings.Split(ccHeader, ",") {
+		part = strings.Trim(part, " ")
+		if part == "" {
+			continue
+		}
+		if strings.ContainsRune(part, '=') {
+			keyval := strings.Split(part, "=")
+			cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",")
+		} else {
+			cc[part] = ""
+		}
+	}
+	return cc
+}
+
+// headerAllCommaSepValues returns all comma-separated values (each
+// with whitespace trimmed) for header name in headers. According to
+// Section 4.2 of the HTTP/1.1 spec
+// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2),
+// values from multiple occurrences of a header should be concatenated, if
+// the header's value is a comma-separated list.
+func headerAllCommaSepValues(headers http.Header, name string) []string {
+	var vals []string
+	for _, val := range headers[http.CanonicalHeaderKey(name)] {
+		fields := strings.Split(val, ",")
+		for i, f := range fields {
+			fields[i] = strings.TrimSpace(f)
+		}
+		vals = append(vals, fields...)
+	}
+	return vals
+}
+
+// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation
+func NewMemoryCacheTransport() *Transport {
+	c := NewMemoryCache()
+	t := NewTransport(c)
+	return t
+}
diff --git a/vendor/github.com/gregjones/httpcache/memcache/appengine.go b/vendor/github.com/gregjones/httpcache/memcache/appengine.go
new file mode 100644
index 0000000..e68d9bc
--- /dev/null
+++ b/vendor/github.com/gregjones/httpcache/memcache/appengine.go
@@ -0,0 +1,61 @@
+// +build appengine
+
+// Package memcache provides an implementation of httpcache.Cache that uses App
+// Engine's memcache package to store cached responses.
+//
+// When not built for Google App Engine, this package will provide an
+// implementation that connects to a specified memcached server.  See the
+// memcache.go file in this package for details.
+package memcache
+
+import (
+	"appengine"
+	"appengine/memcache"
+)
+
+// Cache is an implementation of httpcache.Cache that caches responses in App
+// Engine's memcache.
+type Cache struct {
+	appengine.Context
+}
+
+// cacheKey modifies an httpcache key for use in memcache.  Specifically, it
+// prefixes keys to avoid collision with other data stored in memcache.
+func cacheKey(key string) string {
+	return "httpcache:" + key
+}
+
+// Get returns the response corresponding to key if present.
+func (c *Cache) Get(key string) (resp []byte, ok bool) {
+	item, err := memcache.Get(c.Context, cacheKey(key))
+	if err != nil {
+		if err != memcache.ErrCacheMiss {
+			c.Context.Errorf("error getting cached response: %v", err)
+		}
+		return nil, false
+	}
+	return item.Value, true
+}
+
+// Set saves a response to the cache as key.
+func (c *Cache) Set(key string, resp []byte) {
+	item := &memcache.Item{
+		Key:   cacheKey(key),
+		Value: resp,
+	}
+	if err := memcache.Set(c.Context, item); err != nil {
+		c.Context.Errorf("error caching response: %v", err)
+	}
+}
+
+// Delete removes the response with key from the cache.
+func (c *Cache) Delete(key string) {
+	if err := memcache.Delete(c.Context, cacheKey(key)); err != nil {
+		c.Context.Errorf("error deleting cached response: %v", err)
+	}
+}
+
+// New returns a new Cache for the given context.
+func New(ctx appengine.Context) *Cache {
+	return &Cache{ctx}
+}
diff --git a/vendor/github.com/gregjones/httpcache/memcache/memcache.go b/vendor/github.com/gregjones/httpcache/memcache/memcache.go
new file mode 100644
index 0000000..462f0e5
--- /dev/null
+++ b/vendor/github.com/gregjones/httpcache/memcache/memcache.go
@@ -0,0 +1,60 @@
+// +build !appengine
+
+// Package memcache provides an implementation of httpcache.Cache that uses
+// gomemcache to store cached responses.
+//
+// When built for Google App Engine, this package will provide an
+// implementation that uses App Engine's memcache service.  See the
+// appengine.go file in this package for details.
+package memcache
+
+import (
+	"github.com/bradfitz/gomemcache/memcache"
+)
+
+// Cache is an implementation of httpcache.Cache that caches responses in a
+// memcache server.
+type Cache struct {
+	*memcache.Client
+}
+
+// cacheKey modifies an httpcache key for use in memcache.  Specifically, it
+// prefixes keys to avoid collision with other data stored in memcache.
+func cacheKey(key string) string {
+	return "httpcache:" + key
+}
+
+// Get returns the response corresponding to key if present.
+func (c *Cache) Get(key string) (resp []byte, ok bool) {
+	item, err := c.Client.Get(cacheKey(key))
+	if err != nil {
+		return nil, false
+	}
+	return item.Value, true
+}
+
+// Set saves a response to the cache as key.
+func (c *Cache) Set(key string, resp []byte) {
+	item := &memcache.Item{
+		Key:   cacheKey(key),
+		Value: resp,
+	}
+	c.Client.Set(item)
+}
+
+// Delete removes the response with key from the cache.
+func (c *Cache) Delete(key string) {
+	c.Client.Delete(cacheKey(key))
+}
+
+// New returns a new Cache using the provided memcache server(s) with equal
+// weight. If a server is listed multiple times, it gets a proportional amount
+// of weight.
+func New(server ...string) *Cache {
+	return NewWithClient(memcache.New(server...))
+}
+
+// NewWithClient returns a new Cache with the given memcache client.
+func NewWithClient(client *memcache.Client) *Cache {
+	return &Cache{client}
+}
diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/vendor/golang.org/x/net/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/vendor/golang.org/x/net/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go.  This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation.  If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go
new file mode 100644
index 0000000..ea1a7cd
--- /dev/null
+++ b/vendor/golang.org/x/net/context/context.go
@@ -0,0 +1,156 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package context defines the Context type, which carries deadlines,
+// cancelation signals, and other request-scoped values across API boundaries
+// and between processes.
+//
+// Incoming requests to a server should create a Context, and outgoing calls to
+// servers should accept a Context.  The chain of function calls between must
+// propagate the Context, optionally replacing it with a modified copy created
+// using WithDeadline, WithTimeout, WithCancel, or WithValue.
+//
+// Programs that use Contexts should follow these rules to keep interfaces
+// consistent across packages and enable static analysis tools to check context
+// propagation:
+//
+// Do not store Contexts inside a struct type; instead, pass a Context
+// explicitly to each function that needs it.  The Context should be the first
+// parameter, typically named ctx:
+//
+// 	func DoSomething(ctx context.Context, arg Arg) error {
+// 		// ... use ctx ...
+// 	}
+//
+// Do not pass a nil Context, even if a function permits it.  Pass context.TODO
+// if you are unsure about which Context to use.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+//
+// The same Context may be passed to functions running in different goroutines;
+// Contexts are safe for simultaneous use by multiple goroutines.
+//
+// See http://blog.golang.org/context for example code for a server that uses
+// Contexts.
+package context
+
+import "time"
+
+// A Context carries a deadline, a cancelation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+type Context interface {
+	// Deadline returns the time when work done on behalf of this context
+	// should be canceled.  Deadline returns ok==false when no deadline is
+	// set.  Successive calls to Deadline return the same results.
+	Deadline() (deadline time.Time, ok bool)
+
+	// Done returns a channel that's closed when work done on behalf of this
+	// context should be canceled.  Done may return nil if this context can
+	// never be canceled.  Successive calls to Done return the same value.
+	//
+	// WithCancel arranges for Done to be closed when cancel is called;
+	// WithDeadline arranges for Done to be closed when the deadline
+	// expires; WithTimeout arranges for Done to be closed when the timeout
+	// elapses.
+	//
+	// Done is provided for use in select statements:
+	//
+	//  // Stream generates values with DoSomething and sends them to out
+	//  // until DoSomething returns an error or ctx.Done is closed.
+	//  func Stream(ctx context.Context, out chan<- Value) error {
+	//  	for {
+	//  		v, err := DoSomething(ctx)
+	//  		if err != nil {
+	//  			return err
+	//  		}
+	//  		select {
+	//  		case <-ctx.Done():
+	//  			return ctx.Err()
+	//  		case out <- v:
+	//  		}
+	//  	}
+	//  }
+	//
+	// See http://blog.golang.org/pipelines for more examples of how to use
+	// a Done channel for cancelation.
+	Done() <-chan struct{}
+
+	// Err returns a non-nil error value after Done is closed.  Err returns
+	// Canceled if the context was canceled or DeadlineExceeded if the
+	// context's deadline passed.  No other values for Err are defined.
+	// After Done is closed, successive calls to Err return the same value.
+	Err() error
+
+	// Value returns the value associated with this context for key, or nil
+	// if no value is associated with key.  Successive calls to Value with
+	// the same key returns the same result.
+	//
+	// Use context values only for request-scoped data that transits
+	// processes and API boundaries, not for passing optional parameters to
+	// functions.
+	//
+	// A key identifies a specific value in a Context.  Functions that wish
+	// to store values in Context typically allocate a key in a global
+	// variable then use that key as the argument to context.WithValue and
+	// Context.Value.  A key can be any type that supports equality;
+	// packages should define keys as an unexported type to avoid
+	// collisions.
+	//
+	// Packages that define a Context key should provide type-safe accessors
+	// for the values stores using that key:
+	//
+	// 	// Package user defines a User type that's stored in Contexts.
+	// 	package user
+	//
+	// 	import "golang.org/x/net/context"
+	//
+	// 	// User is the type of value stored in the Contexts.
+	// 	type User struct {...}
+	//
+	// 	// key is an unexported type for keys defined in this package.
+	// 	// This prevents collisions with keys defined in other packages.
+	// 	type key int
+	//
+	// 	// userKey is the key for user.User values in Contexts.  It is
+	// 	// unexported; clients use user.NewContext and user.FromContext
+	// 	// instead of using this key directly.
+	// 	var userKey key = 0
+	//
+	// 	// NewContext returns a new Context that carries value u.
+	// 	func NewContext(ctx context.Context, u *User) context.Context {
+	// 		return context.WithValue(ctx, userKey, u)
+	// 	}
+	//
+	// 	// FromContext returns the User value stored in ctx, if any.
+	// 	func FromContext(ctx context.Context) (*User, bool) {
+	// 		u, ok := ctx.Value(userKey).(*User)
+	// 		return u, ok
+	// 	}
+	Value(key interface{}) interface{}
+}
+
+// Background returns a non-nil, empty Context. It is never canceled, has no
+// values, and has no deadline.  It is typically used by the main function,
+// initialization, and tests, and as the top-level Context for incoming
+// requests.
+func Background() Context {
+	return background
+}
+
+// TODO returns a non-nil, empty Context.  Code should use context.TODO when
+// it's unclear which Context to use or it is not yet available (because the
+// surrounding function has not yet been extended to accept a Context
+// parameter).  TODO is recognized by static analysis tools that determine
+// whether Contexts are propagated correctly in a program.
+func TODO() Context {
+	return todo
+}
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc func()
diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
new file mode 100644
index 0000000..22eabff
--- /dev/null
+++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
@@ -0,0 +1,74 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
+package ctxhttp
+
+import (
+	"io"
+	"net/http"
+	"net/url"
+	"strings"
+
+	"golang.org/x/net/context"
+)
+
+// Do sends an HTTP request with the provided http.Client and returns
+// an HTTP response.
+//
+// If the client is nil, http.DefaultClient is used.
+//
+// The provided ctx must be non-nil. If it is canceled or times out,
+// ctx.Err() will be returned.
+func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
+	if client == nil {
+		client = http.DefaultClient
+	}
+	resp, err := client.Do(req.WithContext(ctx))
+	// If we got an error, and the context has been canceled,
+	// the context's error is probably more useful.
+	if err != nil {
+		select {
+		case <-ctx.Done():
+			err = ctx.Err()
+		default:
+		}
+	}
+	return resp, err
+}
+
+// Get issues a GET request via the Do function.
+func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+	req, err := http.NewRequest("GET", url, nil)
+	if err != nil {
+		return nil, err
+	}
+	return Do(ctx, client, req)
+}
+
+// Head issues a HEAD request via the Do function.
+func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+	req, err := http.NewRequest("HEAD", url, nil)
+	if err != nil {
+		return nil, err
+	}
+	return Do(ctx, client, req)
+}
+
+// Post issues a POST request via the Do function.
+func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
+	req, err := http.NewRequest("POST", url, body)
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Set("Content-Type", bodyType)
+	return Do(ctx, client, req)
+}
+
+// PostForm issues a POST request via the Do function.
+func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
+	return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
+}
diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go
new file mode 100644
index 0000000..7564b20
--- /dev/null
+++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go
@@ -0,0 +1,147 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.7
+
+package ctxhttp
+
+import (
+	"io"
+	"net/http"
+	"net/url"
+	"strings"
+
+	"golang.org/x/net/context"
+)
+
+func nop() {}
+
+var (
+	testHookContextDoneBeforeHeaders = nop
+	testHookDoReturned               = nop
+	testHookDidBodyClose             = nop
+)
+
+// Do sends an HTTP request with the provided http.Client and returns an HTTP response.
+// If the client is nil, http.DefaultClient is used.
+// If the context is canceled or times out, ctx.Err() will be returned.
+func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
+	if client == nil {
+		client = http.DefaultClient
+	}
+
+	// TODO(djd): Respect any existing value of req.Cancel.
+	cancel := make(chan struct{})
+	req.Cancel = cancel
+
+	type responseAndError struct {
+		resp *http.Response
+		err  error
+	}
+	result := make(chan responseAndError, 1)
+
+	// Make local copies of test hooks closed over by goroutines below.
+	// Prevents data races in tests.
+	testHookDoReturned := testHookDoReturned
+	testHookDidBodyClose := testHookDidBodyClose
+
+	go func() {
+		resp, err := client.Do(req)
+		testHookDoReturned()
+		result <- responseAndError{resp, err}
+	}()
+
+	var resp *http.Response
+
+	select {
+	case <-ctx.Done():
+		testHookContextDoneBeforeHeaders()
+		close(cancel)
+		// Clean up after the goroutine calling client.Do:
+		go func() {
+			if r := <-result; r.resp != nil {
+				testHookDidBodyClose()
+				r.resp.Body.Close()
+			}
+		}()
+		return nil, ctx.Err()
+	case r := <-result:
+		var err error
+		resp, err = r.resp, r.err
+		if err != nil {
+			return resp, err
+		}
+	}
+
+	c := make(chan struct{})
+	go func() {
+		select {
+		case <-ctx.Done():
+			close(cancel)
+		case <-c:
+			// The response's Body is closed.
+		}
+	}()
+	resp.Body = &notifyingReader{resp.Body, c}
+
+	return resp, nil
+}
+
+// Get issues a GET request via the Do function.
+func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+	req, err := http.NewRequest("GET", url, nil)
+	if err != nil {
+		return nil, err
+	}
+	return Do(ctx, client, req)
+}
+
+// Head issues a HEAD request via the Do function.
+func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+	req, err := http.NewRequest("HEAD", url, nil)
+	if err != nil {
+		return nil, err
+	}
+	return Do(ctx, client, req)
+}
+
+// Post issues a POST request via the Do function.
+func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
+	req, err := http.NewRequest("POST", url, body)
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Set("Content-Type", bodyType)
+	return Do(ctx, client, req)
+}
+
+// PostForm issues a POST request via the Do function.
+func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
+	return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
+}
+
+// notifyingReader is an io.ReadCloser that closes the notify channel after
+// Close is called or a Read fails on the underlying ReadCloser.
+type notifyingReader struct {
+	io.ReadCloser
+	notify chan<- struct{}
+}
+
+func (r *notifyingReader) Read(p []byte) (int, error) {
+	n, err := r.ReadCloser.Read(p)
+	if err != nil && r.notify != nil {
+		close(r.notify)
+		r.notify = nil
+	}
+	return n, err
+}
+
+func (r *notifyingReader) Close() error {
+	err := r.ReadCloser.Close()
+	if r.notify != nil {
+		close(r.notify)
+		r.notify = nil
+	}
+	return err
+}
diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go
new file mode 100644
index 0000000..f8cda19
--- /dev/null
+++ b/vendor/golang.org/x/net/context/go17.go
@@ -0,0 +1,72 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+package context
+
+import (
+	"context" // standard library's context, as of Go 1.7
+	"time"
+)
+
+var (
+	todo       = context.TODO()
+	background = context.Background()
+)
+
+// Canceled is the error returned by Context.Err when the context is canceled.
+var Canceled = context.Canceled
+
+// DeadlineExceeded is the error returned by Context.Err when the context's
+// deadline passes.
+var DeadlineExceeded = context.DeadlineExceeded
+
+// WithCancel returns a copy of parent with a new Done channel. The returned
+// context's Done channel is closed when the returned cancel function is called
+// or when the parent context's Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
+	ctx, f := context.WithCancel(parent)
+	return ctx, CancelFunc(f)
+}
+
+// WithDeadline returns a copy of the parent context with the deadline adjusted
+// to be no later than d.  If the parent's deadline is already earlier than d,
+// WithDeadline(parent, d) is semantically equivalent to parent.  The returned
+// context's Done channel is closed when the deadline expires, when the returned
+// cancel function is called, or when the parent context's Done channel is
+// closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
+	ctx, f := context.WithDeadline(parent, deadline)
+	return ctx, CancelFunc(f)
+}
+
+// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete:
+//
+// 	func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// 		ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// 		defer cancel()  // releases resources if slowOperation completes before timeout elapses
+// 		return slowOperation(ctx)
+// 	}
+func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
+	return WithDeadline(parent, time.Now().Add(timeout))
+}
+
+// WithValue returns a copy of parent in which the value associated with key is
+// val.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+func WithValue(parent Context, key interface{}, val interface{}) Context {
+	return context.WithValue(parent, key, val)
+}
diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go
new file mode 100644
index 0000000..5a30aca
--- /dev/null
+++ b/vendor/golang.org/x/net/context/pre_go17.go
@@ -0,0 +1,300 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.7
+
+package context
+
+import (
+	"errors"
+	"fmt"
+	"sync"
+	"time"
+)
+
+// An emptyCtx is never canceled, has no values, and has no deadline.  It is not
+// struct{}, since vars of this type must have distinct addresses.
+type emptyCtx int
+
+func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
+	return
+}
+
+func (*emptyCtx) Done() <-chan struct{} {
+	return nil
+}
+
+func (*emptyCtx) Err() error {
+	return nil
+}
+
+func (*emptyCtx) Value(key interface{}) interface{} {
+	return nil
+}
+
+func (e *emptyCtx) String() string {
+	switch e {
+	case background:
+		return "context.Background"
+	case todo:
+		return "context.TODO"
+	}
+	return "unknown empty Context"
+}
+
+var (
+	background = new(emptyCtx)
+	todo       = new(emptyCtx)
+)
+
+// Canceled is the error returned by Context.Err when the context is canceled.
+var Canceled = errors.New("context canceled")
+
+// DeadlineExceeded is the error returned by Context.Err when the context's
+// deadline passes.
+var DeadlineExceeded = errors.New("context deadline exceeded")
+
+// WithCancel returns a copy of parent with a new Done channel. The returned
+// context's Done channel is closed when the returned cancel function is called
+// or when the parent context's Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
+	c := newCancelCtx(parent)
+	propagateCancel(parent, c)
+	return c, func() { c.cancel(true, Canceled) }
+}
+
+// newCancelCtx returns an initialized cancelCtx.
+func newCancelCtx(parent Context) *cancelCtx {
+	return &cancelCtx{
+		Context: parent,
+		done:    make(chan struct{}),
+	}
+}
+
+// propagateCancel arranges for child to be canceled when parent is.
+func propagateCancel(parent Context, child canceler) {
+	if parent.Done() == nil {
+		return // parent is never canceled
+	}
+	if p, ok := parentCancelCtx(parent); ok {
+		p.mu.Lock()
+		if p.err != nil {
+			// parent has already been canceled
+			child.cancel(false, p.err)
+		} else {
+			if p.children == nil {
+				p.children = make(map[canceler]bool)
+			}
+			p.children[child] = true
+		}
+		p.mu.Unlock()
+	} else {
+		go func() {
+			select {
+			case <-parent.Done():
+				child.cancel(false, parent.Err())
+			case <-child.Done():
+			}
+		}()
+	}
+}
+
+// parentCancelCtx follows a chain of parent references until it finds a
+// *cancelCtx.  This function understands how each of the concrete types in this
+// package represents its parent.
+func parentCancelCtx(parent Context) (*cancelCtx, bool) {
+	for {
+		switch c := parent.(type) {
+		case *cancelCtx:
+			return c, true
+		case *timerCtx:
+			return c.cancelCtx, true
+		case *valueCtx:
+			parent = c.Context
+		default:
+			return nil, false
+		}
+	}
+}
+
+// removeChild removes a context from its parent.
+func removeChild(parent Context, child canceler) {
+	p, ok := parentCancelCtx(parent)
+	if !ok {
+		return
+	}
+	p.mu.Lock()
+	if p.children != nil {
+		delete(p.children, child)
+	}
+	p.mu.Unlock()
+}
+
+// A canceler is a context type that can be canceled directly.  The
+// implementations are *cancelCtx and *timerCtx.
+type canceler interface {
+	cancel(removeFromParent bool, err error)
+	Done() <-chan struct{}
+}
+
+// A cancelCtx can be canceled.  When canceled, it also cancels any children
+// that implement canceler.
+type cancelCtx struct {
+	Context
+
+	done chan struct{} // closed by the first cancel call.
+
+	mu       sync.Mutex
+	children map[canceler]bool // set to nil by the first cancel call
+	err      error             // set to non-nil by the first cancel call
+}
+
+func (c *cancelCtx) Done() <-chan struct{} {
+	return c.done
+}
+
+func (c *cancelCtx) Err() error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	return c.err
+}
+
+func (c *cancelCtx) String() string {
+	return fmt.Sprintf("%v.WithCancel", c.Context)
+}
+
+// cancel closes c.done, cancels each of c's children, and, if
+// removeFromParent is true, removes c from its parent's children.
+func (c *cancelCtx) cancel(removeFromParent bool, err error) {
+	if err == nil {
+		panic("context: internal error: missing cancel error")
+	}
+	c.mu.Lock()
+	if c.err != nil {
+		c.mu.Unlock()
+		return // already canceled
+	}
+	c.err = err
+	close(c.done)
+	for child := range c.children {
+		// NOTE: acquiring the child's lock while holding parent's lock.
+		child.cancel(false, err)
+	}
+	c.children = nil
+	c.mu.Unlock()
+
+	if removeFromParent {
+		removeChild(c.Context, c)
+	}
+}
+
+// WithDeadline returns a copy of the parent context with the deadline adjusted
+// to be no later than d.  If the parent's deadline is already earlier than d,
+// WithDeadline(parent, d) is semantically equivalent to parent.  The returned
+// context's Done channel is closed when the deadline expires, when the returned
+// cancel function is called, or when the parent context's Done channel is
+// closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
+	if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
+		// The current deadline is already sooner than the new one.
+		return WithCancel(parent)
+	}
+	c := &timerCtx{
+		cancelCtx: newCancelCtx(parent),
+		deadline:  deadline,
+	}
+	propagateCancel(parent, c)
+	d := deadline.Sub(time.Now())
+	if d <= 0 {
+		c.cancel(true, DeadlineExceeded) // deadline has already passed
+		return c, func() { c.cancel(true, Canceled) }
+	}
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if c.err == nil {
+		c.timer = time.AfterFunc(d, func() {
+			c.cancel(true, DeadlineExceeded)
+		})
+	}
+	return c, func() { c.cancel(true, Canceled) }
+}
+
+// A timerCtx carries a timer and a deadline.  It embeds a cancelCtx to
+// implement Done and Err.  It implements cancel by stopping its timer then
+// delegating to cancelCtx.cancel.
+type timerCtx struct {
+	*cancelCtx
+	timer *time.Timer // Under cancelCtx.mu.
+
+	deadline time.Time
+}
+
+func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
+	return c.deadline, true
+}
+
+func (c *timerCtx) String() string {
+	return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
+}
+
+func (c *timerCtx) cancel(removeFromParent bool, err error) {
+	c.cancelCtx.cancel(false, err)
+	if removeFromParent {
+		// Remove this timerCtx from its parent cancelCtx's children.
+		removeChild(c.cancelCtx.Context, c)
+	}
+	c.mu.Lock()
+	if c.timer != nil {
+		c.timer.Stop()
+		c.timer = nil
+	}
+	c.mu.Unlock()
+}
+
+// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete:
+//
+// 	func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// 		ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// 		defer cancel()  // releases resources if slowOperation completes before timeout elapses
+// 		return slowOperation(ctx)
+// 	}
+func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
+	return WithDeadline(parent, time.Now().Add(timeout))
+}
+
+// WithValue returns a copy of parent in which the value associated with key is
+// val.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+func WithValue(parent Context, key interface{}, val interface{}) Context {
+	return &valueCtx{parent, key, val}
+}
+
+// A valueCtx carries a key-value pair.  It implements Value for that key and
+// delegates all other calls to the embedded Context.
+type valueCtx struct {
+	Context
+	key, val interface{}
+}
+
+func (c *valueCtx) String() string {
+	return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
+}
+
+func (c *valueCtx) Value(key interface{}) interface{} {
+	if c.key == key {
+		return c.val
+	}
+	return c.Context.Value(key)
+}
diff --git a/vendor/golang.org/x/net/http2/.gitignore b/vendor/golang.org/x/net/http2/.gitignore
new file mode 100644
index 0000000..190f122
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/.gitignore
@@ -0,0 +1,2 @@
+*~
+h2i/h2i
diff --git a/vendor/golang.org/x/net/http2/Dockerfile b/vendor/golang.org/x/net/http2/Dockerfile
new file mode 100644
index 0000000..53fc525
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/Dockerfile
@@ -0,0 +1,51 @@
+#
+# This Dockerfile builds a recent curl with HTTP/2 client support, using
+# a recent nghttp2 build.
+#
+# See the Makefile for how to tag it. If Docker and that image is found, the
+# Go tests use this curl binary for integration tests.
+#
+
+FROM ubuntu:trusty
+
+RUN apt-get update && \
+    apt-get upgrade -y && \
+    apt-get install -y git-core build-essential wget
+
+RUN apt-get install -y --no-install-recommends \
+       autotools-dev libtool pkg-config zlib1g-dev \
+       libcunit1-dev libssl-dev libxml2-dev libevent-dev \
+       automake autoconf
+
+# The list of packages nghttp2 recommends for h2load:
+RUN apt-get install -y --no-install-recommends make binutils \
+        autoconf automake autotools-dev \
+        libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
+        libev-dev libevent-dev libjansson-dev libjemalloc-dev \
+        cython python3.4-dev python-setuptools
+
+# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
+ENV NGHTTP2_VER 895da9a
+RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
+
+WORKDIR /root/nghttp2
+RUN git reset --hard $NGHTTP2_VER
+RUN autoreconf -i
+RUN automake
+RUN autoconf
+RUN ./configure
+RUN make
+RUN make install
+
+WORKDIR /root
+RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz
+RUN tar -zxvf curl-7.45.0.tar.gz
+WORKDIR /root/curl-7.45.0
+RUN ./configure --with-ssl --with-nghttp2=/usr/local
+RUN make
+RUN make install
+RUN ldconfig
+
+CMD ["-h"]
+ENTRYPOINT ["/usr/local/bin/curl"]
+
diff --git a/vendor/golang.org/x/net/http2/Makefile b/vendor/golang.org/x/net/http2/Makefile
new file mode 100644
index 0000000..55fd826
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/Makefile
@@ -0,0 +1,3 @@
+curlimage:
+	docker build -t gohttp2/curl .
+
diff --git a/vendor/golang.org/x/net/http2/README b/vendor/golang.org/x/net/http2/README
new file mode 100644
index 0000000..360d5aa
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/README
@@ -0,0 +1,20 @@
+This is a work-in-progress HTTP/2 implementation for Go.
+
+It will eventually live in the Go standard library and won't require
+any changes to your code to use.  It will just be automatic.
+
+Status:
+
+* The server support is pretty good. A few things are missing
+  but are being worked on.
+* The client work has just started but shares a lot of code
+  is coming along much quicker.
+
+Docs are at https://godoc.org/golang.org/x/net/http2
+
+Demo test server at https://http2.golang.org/
+
+Help & bug reports welcome!
+
+Contributing: https://golang.org/doc/contribute.html
+Bugs:         https://golang.org/issue/new?title=x/net/http2:+
diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go
new file mode 100644
index 0000000..b139412
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/client_conn_pool.go
@@ -0,0 +1,256 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Transport code's client connection pooling.
+
+package http2
+
+import (
+	"crypto/tls"
+	"net/http"
+	"sync"
+)
+
+// ClientConnPool manages a pool of HTTP/2 client connections.
+type ClientConnPool interface {
+	GetClientConn(req *http.Request, addr string) (*ClientConn, error)
+	MarkDead(*ClientConn)
+}
+
+// clientConnPoolIdleCloser is the interface implemented by ClientConnPool
+// implementations which can close their idle connections.
+type clientConnPoolIdleCloser interface {
+	ClientConnPool
+	closeIdleConnections()
+}
+
+var (
+	_ clientConnPoolIdleCloser = (*clientConnPool)(nil)
+	_ clientConnPoolIdleCloser = noDialClientConnPool{}
+)
+
+// TODO: use singleflight for dialing and addConnCalls?
+type clientConnPool struct {
+	t *Transport
+
+	mu sync.Mutex // TODO: maybe switch to RWMutex
+	// TODO: add support for sharing conns based on cert names
+	// (e.g. share conn for googleapis.com and appspot.com)
+	conns        map[string][]*ClientConn // key is host:port
+	dialing      map[string]*dialCall     // currently in-flight dials
+	keys         map[*ClientConn][]string
+	addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls
+}
+
+func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
+	return p.getClientConn(req, addr, dialOnMiss)
+}
+
+const (
+	dialOnMiss   = true
+	noDialOnMiss = false
+)
+
+func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
+	if isConnectionCloseRequest(req) && dialOnMiss {
+		// It gets its own connection.
+		const singleUse = true
+		cc, err := p.t.dialClientConn(addr, singleUse)
+		if err != nil {
+			return nil, err
+		}
+		return cc, nil
+	}
+	p.mu.Lock()
+	for _, cc := range p.conns[addr] {
+		if cc.CanTakeNewRequest() {
+			p.mu.Unlock()
+			return cc, nil
+		}
+	}
+	if !dialOnMiss {
+		p.mu.Unlock()
+		return nil, ErrNoCachedConn
+	}
+	call := p.getStartDialLocked(addr)
+	p.mu.Unlock()
+	<-call.done
+	return call.res, call.err
+}
+
+// dialCall is an in-flight Transport dial call to a host.
+type dialCall struct {
+	p    *clientConnPool
+	done chan struct{} // closed when done
+	res  *ClientConn   // valid after done is closed
+	err  error         // valid after done is closed
+}
+
+// requires p.mu is held.
+func (p *clientConnPool) getStartDialLocked(addr string) *dialCall {
+	if call, ok := p.dialing[addr]; ok {
+		// A dial is already in-flight. Don't start another.
+		return call
+	}
+	call := &dialCall{p: p, done: make(chan struct{})}
+	if p.dialing == nil {
+		p.dialing = make(map[string]*dialCall)
+	}
+	p.dialing[addr] = call
+	go call.dial(addr)
+	return call
+}
+
+// run in its own goroutine.
+func (c *dialCall) dial(addr string) {
+	const singleUse = false // shared conn
+	c.res, c.err = c.p.t.dialClientConn(addr, singleUse)
+	close(c.done)
+
+	c.p.mu.Lock()
+	delete(c.p.dialing, addr)
+	if c.err == nil {
+		c.p.addConnLocked(addr, c.res)
+	}
+	c.p.mu.Unlock()
+}
+
+// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't
+// already exist. It coalesces concurrent calls with the same key.
+// This is used by the http1 Transport code when it creates a new connection. Because
+// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know
+// the protocol), it can get into a situation where it has multiple TLS connections.
+// This code decides which ones live or die.
+// The return value used is whether c was used.
+// c is never closed.
+func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) {
+	p.mu.Lock()
+	for _, cc := range p.conns[key] {
+		if cc.CanTakeNewRequest() {
+			p.mu.Unlock()
+			return false, nil
+		}
+	}
+	call, dup := p.addConnCalls[key]
+	if !dup {
+		if p.addConnCalls == nil {
+			p.addConnCalls = make(map[string]*addConnCall)
+		}
+		call = &addConnCall{
+			p:    p,
+			done: make(chan struct{}),
+		}
+		p.addConnCalls[key] = call
+		go call.run(t, key, c)
+	}
+	p.mu.Unlock()
+
+	<-call.done
+	if call.err != nil {
+		return false, call.err
+	}
+	return !dup, nil
+}
+
+type addConnCall struct {
+	p    *clientConnPool
+	done chan struct{} // closed when done
+	err  error
+}
+
+func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) {
+	cc, err := t.NewClientConn(tc)
+
+	p := c.p
+	p.mu.Lock()
+	if err != nil {
+		c.err = err
+	} else {
+		p.addConnLocked(key, cc)
+	}
+	delete(p.addConnCalls, key)
+	p.mu.Unlock()
+	close(c.done)
+}
+
+func (p *clientConnPool) addConn(key string, cc *ClientConn) {
+	p.mu.Lock()
+	p.addConnLocked(key, cc)
+	p.mu.Unlock()
+}
+
+// p.mu must be held
+func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) {
+	for _, v := range p.conns[key] {
+		if v == cc {
+			return
+		}
+	}
+	if p.conns == nil {
+		p.conns = make(map[string][]*ClientConn)
+	}
+	if p.keys == nil {
+		p.keys = make(map[*ClientConn][]string)
+	}
+	p.conns[key] = append(p.conns[key], cc)
+	p.keys[cc] = append(p.keys[cc], key)
+}
+
+func (p *clientConnPool) MarkDead(cc *ClientConn) {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	for _, key := range p.keys[cc] {
+		vv, ok := p.conns[key]
+		if !ok {
+			continue
+		}
+		newList := filterOutClientConn(vv, cc)
+		if len(newList) > 0 {
+			p.conns[key] = newList
+		} else {
+			delete(p.conns, key)
+		}
+	}
+	delete(p.keys, cc)
+}
+
+func (p *clientConnPool) closeIdleConnections() {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	// TODO: don't close a cc if it was just added to the pool
+	// milliseconds ago and has never been used. There's currently
+	// a small race window with the HTTP/1 Transport's integration
+	// where it can add an idle conn just before using it, and
+	// somebody else can concurrently call CloseIdleConns and
+	// break some caller's RoundTrip.
+	for _, vv := range p.conns {
+		for _, cc := range vv {
+			cc.closeIfIdle()
+		}
+	}
+}
+
+func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn {
+	out := in[:0]
+	for _, v := range in {
+		if v != exclude {
+			out = append(out, v)
+		}
+	}
+	// If we filtered it out, zero out the last item to prevent
+	// the GC from seeing it.
+	if len(in) != len(out) {
+		in[len(in)-1] = nil
+	}
+	return out
+}
+
+// noDialClientConnPool is an implementation of http2.ClientConnPool
+// which never dials.  We let the HTTP/1.1 client dial and use its TLS
+// connection instead.
+type noDialClientConnPool struct{ *clientConnPool }
+
+func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
+	return p.getClientConn(req, addr, noDialOnMiss)
+}
diff --git a/vendor/golang.org/x/net/http2/configure_transport.go b/vendor/golang.org/x/net/http2/configure_transport.go
new file mode 100644
index 0000000..4f720f5
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/configure_transport.go
@@ -0,0 +1,80 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.6
+
+package http2
+
+import (
+	"crypto/tls"
+	"fmt"
+	"net/http"
+)
+
+func configureTransport(t1 *http.Transport) (*Transport, error) {
+	connPool := new(clientConnPool)
+	t2 := &Transport{
+		ConnPool: noDialClientConnPool{connPool},
+		t1:       t1,
+	}
+	connPool.t = t2
+	if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil {
+		return nil, err
+	}
+	if t1.TLSClientConfig == nil {
+		t1.TLSClientConfig = new(tls.Config)
+	}
+	if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") {
+		t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...)
+	}
+	if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") {
+		t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1")
+	}
+	upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper {
+		addr := authorityAddr("https", authority)
+		if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil {
+			go c.Close()
+			return erringRoundTripper{err}
+		} else if !used {
+			// Turns out we don't need this c.
+			// For example, two goroutines made requests to the same host
+			// at the same time, both kicking off TCP dials. (since protocol
+			// was unknown)
+			go c.Close()
+		}
+		return t2
+	}
+	if m := t1.TLSNextProto; len(m) == 0 {
+		t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{
+			"h2": upgradeFn,
+		}
+	} else {
+		m["h2"] = upgradeFn
+	}
+	return t2, nil
+}
+
+// registerHTTPSProtocol calls Transport.RegisterProtocol but
+// convering panics into errors.
+func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) {
+	defer func() {
+		if e := recover(); e != nil {
+			err = fmt.Errorf("%v", e)
+		}
+	}()
+	t.RegisterProtocol("https", rt)
+	return nil
+}
+
+// noDialH2RoundTripper is a RoundTripper which only tries to complete the request
+// if there's already has a cached connection to the host.
+type noDialH2RoundTripper struct{ t *Transport }
+
+func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+	res, err := rt.t.RoundTrip(req)
+	if err == ErrNoCachedConn {
+		return nil, http.ErrSkipAltProtocol
+	}
+	return res, err
+}
diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go
new file mode 100644
index 0000000..20fd762
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/errors.go
@@ -0,0 +1,130 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+	"errors"
+	"fmt"
+)
+
+// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.
+type ErrCode uint32
+
+const (
+	ErrCodeNo                 ErrCode = 0x0
+	ErrCodeProtocol           ErrCode = 0x1
+	ErrCodeInternal           ErrCode = 0x2
+	ErrCodeFlowControl        ErrCode = 0x3
+	ErrCodeSettingsTimeout    ErrCode = 0x4
+	ErrCodeStreamClosed       ErrCode = 0x5
+	ErrCodeFrameSize          ErrCode = 0x6
+	ErrCodeRefusedStream      ErrCode = 0x7
+	ErrCodeCancel             ErrCode = 0x8
+	ErrCodeCompression        ErrCode = 0x9
+	ErrCodeConnect            ErrCode = 0xa
+	ErrCodeEnhanceYourCalm    ErrCode = 0xb
+	ErrCodeInadequateSecurity ErrCode = 0xc
+	ErrCodeHTTP11Required     ErrCode = 0xd
+)
+
+var errCodeName = map[ErrCode]string{
+	ErrCodeNo:                 "NO_ERROR",
+	ErrCodeProtocol:           "PROTOCOL_ERROR",
+	ErrCodeInternal:           "INTERNAL_ERROR",
+	ErrCodeFlowControl:        "FLOW_CONTROL_ERROR",
+	ErrCodeSettingsTimeout:    "SETTINGS_TIMEOUT",
+	ErrCodeStreamClosed:       "STREAM_CLOSED",
+	ErrCodeFrameSize:          "FRAME_SIZE_ERROR",
+	ErrCodeRefusedStream:      "REFUSED_STREAM",
+	ErrCodeCancel:             "CANCEL",
+	ErrCodeCompression:        "COMPRESSION_ERROR",
+	ErrCodeConnect:            "CONNECT_ERROR",
+	ErrCodeEnhanceYourCalm:    "ENHANCE_YOUR_CALM",
+	ErrCodeInadequateSecurity: "INADEQUATE_SECURITY",
+	ErrCodeHTTP11Required:     "HTTP_1_1_REQUIRED",
+}
+
+func (e ErrCode) String() string {
+	if s, ok := errCodeName[e]; ok {
+		return s
+	}
+	return fmt.Sprintf("unknown error code 0x%x", uint32(e))
+}
+
+// ConnectionError is an error that results in the termination of the
+// entire connection.
+type ConnectionError ErrCode
+
+func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) }
+
+// StreamError is an error that only affects one stream within an
+// HTTP/2 connection.
+type StreamError struct {
+	StreamID uint32
+	Code     ErrCode
+	Cause    error // optional additional detail
+}
+
+func streamError(id uint32, code ErrCode) StreamError {
+	return StreamError{StreamID: id, Code: code}
+}
+
+func (e StreamError) Error() string {
+	if e.Cause != nil {
+		return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause)
+	}
+	return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
+}
+
+// 6.9.1 The Flow Control Window
+// "If a sender receives a WINDOW_UPDATE that causes a flow control
+// window to exceed this maximum it MUST terminate either the stream
+// or the connection, as appropriate. For streams, [...]; for the
+// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code."
+type goAwayFlowError struct{}
+
+func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
+
+// connErrorReason wraps a ConnectionError with an informative error about why it occurs.
+
+// Errors of this type are only returned by the frame parser functions
+// and converted into ConnectionError(ErrCodeProtocol).
+type connError struct {
+	Code   ErrCode
+	Reason string
+}
+
+func (e connError) Error() string {
+	return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason)
+}
+
+type pseudoHeaderError string
+
+func (e pseudoHeaderError) Error() string {
+	return fmt.Sprintf("invalid pseudo-header %q", string(e))
+}
+
+type duplicatePseudoHeaderError string
+
+func (e duplicatePseudoHeaderError) Error() string {
+	return fmt.Sprintf("duplicate pseudo-header %q", string(e))
+}
+
+type headerFieldNameError string
+
+func (e headerFieldNameError) Error() string {
+	return fmt.Sprintf("invalid header field name %q", string(e))
+}
+
+type headerFieldValueError string
+
+func (e headerFieldValueError) Error() string {
+	return fmt.Sprintf("invalid header field value %q", string(e))
+}
+
+var (
+	errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers")
+	errPseudoAfterRegular   = errors.New("pseudo header field after regular")
+)
diff --git a/vendor/golang.org/x/net/http2/fixed_buffer.go b/vendor/golang.org/x/net/http2/fixed_buffer.go
new file mode 100644
index 0000000..47da0f0
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/fixed_buffer.go
@@ -0,0 +1,60 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+	"errors"
+)
+
+// fixedBuffer is an io.ReadWriter backed by a fixed size buffer.
+// It never allocates, but moves old data as new data is written.
+type fixedBuffer struct {
+	buf  []byte
+	r, w int
+}
+
+var (
+	errReadEmpty = errors.New("read from empty fixedBuffer")
+	errWriteFull = errors.New("write on full fixedBuffer")
+)
+
+// Read copies bytes from the buffer into p.
+// It is an error to read when no data is available.
+func (b *fixedBuffer) Read(p []byte) (n int, err error) {
+	if b.r == b.w {
+		return 0, errReadEmpty
+	}
+	n = copy(p, b.buf[b.r:b.w])
+	b.r += n
+	if b.r == b.w {
+		b.r = 0
+		b.w = 0
+	}
+	return n, nil
+}
+
+// Len returns the number of bytes of the unread portion of the buffer.
+func (b *fixedBuffer) Len() int {
+	return b.w - b.r
+}
+
+// Write copies bytes from p into the buffer.
+// It is an error to write more data than the buffer can hold.
+func (b *fixedBuffer) Write(p []byte) (n int, err error) {
+	// Slide existing data to beginning.
+	if b.r > 0 && len(p) > len(b.buf)-b.w {
+		copy(b.buf, b.buf[b.r:b.w])
+		b.w -= b.r
+		b.r = 0
+	}
+
+	// Write new data.
+	n = copy(b.buf[b.w:], p)
+	b.w += n
+	if n < len(p) {
+		err = errWriteFull
+	}
+	return n, err
+}
diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go
new file mode 100644
index 0000000..957de25
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/flow.go
@@ -0,0 +1,50 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Flow control
+
+package http2
+
+// flow is the flow control window's size.
+type flow struct {
+	// n is the number of DATA bytes we're allowed to send.
+	// A flow is kept both on a conn and a per-stream.
+	n int32
+
+	// conn points to the shared connection-level flow that is
+	// shared by all streams on that conn. It is nil for the flow
+	// that's on the conn directly.
+	conn *flow
+}
+
+func (f *flow) setConnFlow(cf *flow) { f.conn = cf }
+
+func (f *flow) available() int32 {
+	n := f.n
+	if f.conn != nil && f.conn.n < n {
+		n = f.conn.n
+	}
+	return n
+}
+
+func (f *flow) take(n int32) {
+	if n > f.available() {
+		panic("internal error: took too much")
+	}
+	f.n -= n
+	if f.conn != nil {
+		f.conn.n -= n
+	}
+}
+
+// add adds n bytes (positive or negative) to the flow control window.
+// It returns false if the sum would exceed 2^31-1.
+func (f *flow) add(n int32) bool {
+	remain := (1<<31 - 1) - f.n
+	if n > remain {
+		return false
+	}
+	f.n += n
+	return true
+}
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
new file mode 100644
index 0000000..b0c79b0
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -0,0 +1,1539 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"log"
+	"strings"
+	"sync"
+
+	"golang.org/x/net/http2/hpack"
+	"golang.org/x/net/lex/httplex"
+)
+
+const frameHeaderLen = 9
+
+var padZeros = make([]byte, 255) // zeros for padding
+
+// A FrameType is a registered frame type as defined in
+// http://http2.github.io/http2-spec/#rfc.section.11.2
+type FrameType uint8
+
+const (
+	FrameData         FrameType = 0x0
+	FrameHeaders      FrameType = 0x1
+	FramePriority     FrameType = 0x2
+	FrameRSTStream    FrameType = 0x3
+	FrameSettings     FrameType = 0x4
+	FramePushPromise  FrameType = 0x5
+	FramePing         FrameType = 0x6
+	FrameGoAway       FrameType = 0x7
+	FrameWindowUpdate FrameType = 0x8
+	FrameContinuation FrameType = 0x9
+)
+
+var frameName = map[FrameType]string{
+	FrameData:         "DATA",
+	FrameHeaders:      "HEADERS",
+	FramePriority:     "PRIORITY",
+	FrameRSTStream:    "RST_STREAM",
+	FrameSettings:     "SETTINGS",
+	FramePushPromise:  "PUSH_PROMISE",
+	FramePing:         "PING",
+	FrameGoAway:       "GOAWAY",
+	FrameWindowUpdate: "WINDOW_UPDATE",
+	FrameContinuation: "CONTINUATION",
+}
+
+func (t FrameType) String() string {
+	if s, ok := frameName[t]; ok {
+		return s
+	}
+	return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t))
+}
+
+// Flags is a bitmask of HTTP/2 flags.
+// The meaning of flags varies depending on the frame type.
+type Flags uint8
+
+// Has reports whether f contains all (0 or more) flags in v.
+func (f Flags) Has(v Flags) bool {
+	return (f & v) == v
+}
+
+// Frame-specific FrameHeader flag bits.
+const (
+	// Data Frame
+	FlagDataEndStream Flags = 0x1
+	FlagDataPadded    Flags = 0x8
+
+	// Headers Frame
+	FlagHeadersEndStream  Flags = 0x1
+	FlagHeadersEndHeaders Flags = 0x4
+	FlagHeadersPadded     Flags = 0x8
+	FlagHeadersPriority   Flags = 0x20
+
+	// Settings Frame
+	FlagSettingsAck Flags = 0x1
+
+	// Ping Frame
+	FlagPingAck Flags = 0x1
+
+	// Continuation Frame
+	FlagContinuationEndHeaders Flags = 0x4
+
+	FlagPushPromiseEndHeaders Flags = 0x4
+	FlagPushPromisePadded     Flags = 0x8
+)
+
+var flagName = map[FrameType]map[Flags]string{
+	FrameData: {
+		FlagDataEndStream: "END_STREAM",
+		FlagDataPadded:    "PADDED",
+	},
+	FrameHeaders: {
+		FlagHeadersEndStream:  "END_STREAM",
+		FlagHeadersEndHeaders: "END_HEADERS",
+		FlagHeadersPadded:     "PADDED",
+		FlagHeadersPriority:   "PRIORITY",
+	},
+	FrameSettings: {
+		FlagSettingsAck: "ACK",
+	},
+	FramePing: {
+		FlagPingAck: "ACK",
+	},
+	FrameContinuation: {
+		FlagContinuationEndHeaders: "END_HEADERS",
+	},
+	FramePushPromise: {
+		FlagPushPromiseEndHeaders: "END_HEADERS",
+		FlagPushPromisePadded:     "PADDED",
+	},
+}
+
+// a frameParser parses a frame given its FrameHeader and payload
+// bytes. The length of payload will always equal fh.Length (which
+// might be 0).
+type frameParser func(fh FrameHeader, payload []byte) (Frame, error)
+
+var frameParsers = map[FrameType]frameParser{
+	FrameData:         parseDataFrame,
+	FrameHeaders:      parseHeadersFrame,
+	FramePriority:     parsePriorityFrame,
+	FrameRSTStream:    parseRSTStreamFrame,
+	FrameSettings:     parseSettingsFrame,
+	FramePushPromise:  parsePushPromise,
+	FramePing:         parsePingFrame,
+	FrameGoAway:       parseGoAwayFrame,
+	FrameWindowUpdate: parseWindowUpdateFrame,
+	FrameContinuation: parseContinuationFrame,
+}
+
+func typeFrameParser(t FrameType) frameParser {
+	if f := frameParsers[t]; f != nil {
+		return f
+	}
+	return parseUnknownFrame
+}
+
+// A FrameHeader is the 9 byte header of all HTTP/2 frames.
+//
+// See http://http2.github.io/http2-spec/#FrameHeader
+type FrameHeader struct {
+	valid bool // caller can access []byte fields in the Frame
+
+	// Type is the 1 byte frame type. There are ten standard frame
+	// types, but extension frame types may be written by WriteRawFrame
+	// and will be returned by ReadFrame (as UnknownFrame).
+	Type FrameType
+
+	// Flags are the 1 byte of 8 potential bit flags per frame.
+	// They are specific to the frame type.
+	Flags Flags
+
+	// Length is the length of the frame, not including the 9 byte header.
+	// The maximum size is one byte less than 16MB (uint24), but only
+	// frames up to 16KB are allowed without peer agreement.
+	Length uint32
+
+	// StreamID is which stream this frame is for. Certain frames
+	// are not stream-specific, in which case this field is 0.
+	StreamID uint32
+}
+
+// Header returns h. It exists so FrameHeaders can be embedded in other
+// specific frame types and implement the Frame interface.
+func (h FrameHeader) Header() FrameHeader { return h }
+
+func (h FrameHeader) String() string {
+	var buf bytes.Buffer
+	buf.WriteString("[FrameHeader ")
+	h.writeDebug(&buf)
+	buf.WriteByte(']')
+	return buf.String()
+}
+
+func (h FrameHeader) writeDebug(buf *bytes.Buffer) {
+	buf.WriteString(h.Type.String())
+	if h.Flags != 0 {
+		buf.WriteString(" flags=")
+		set := 0
+		for i := uint8(0); i < 8; i++ {
+			if h.Flags&(1<<i) == 0 {
+				continue
+			}
+			set++
+			if set > 1 {
+				buf.WriteByte('|')
+			}
+			name := flagName[h.Type][Flags(1<<i)]
+			if name != "" {
+				buf.WriteString(name)
+			} else {
+				fmt.Fprintf(buf, "0x%x", 1<<i)
+			}
+		}
+	}
+	if h.StreamID != 0 {
+		fmt.Fprintf(buf, " stream=%d", h.StreamID)
+	}
+	fmt.Fprintf(buf, " len=%d", h.Length)
+}
+
+func (h *FrameHeader) checkValid() {
+	if !h.valid {
+		panic("Frame accessor called on non-owned Frame")
+	}
+}
+
+func (h *FrameHeader) invalidate() { h.valid = false }
+
+// frame header bytes.
+// Used only by ReadFrameHeader.
+var fhBytes = sync.Pool{
+	New: func() interface{} {
+		buf := make([]byte, frameHeaderLen)
+		return &buf
+	},
+}
+
+// ReadFrameHeader reads 9 bytes from r and returns a FrameHeader.
+// Most users should use Framer.ReadFrame instead.
+func ReadFrameHeader(r io.Reader) (FrameHeader, error) {
+	bufp := fhBytes.Get().(*[]byte)
+	defer fhBytes.Put(bufp)
+	return readFrameHeader(*bufp, r)
+}
+
+func readFrameHeader(buf []byte, r io.Reader) (FrameHeader, error) {
+	_, err := io.ReadFull(r, buf[:frameHeaderLen])
+	if err != nil {
+		return FrameHeader{}, err
+	}
+	return FrameHeader{
+		Length:   (uint32(buf[0])<<16 | uint32(buf[1])<<8 | uint32(buf[2])),
+		Type:     FrameType(buf[3]),
+		Flags:    Flags(buf[4]),
+		StreamID: binary.BigEndian.Uint32(buf[5:]) & (1<<31 - 1),
+		valid:    true,
+	}, nil
+}
+
+// A Frame is the base interface implemented by all frame types.
+// Callers will generally type-assert the specific frame type:
+// *HeadersFrame, *SettingsFrame, *WindowUpdateFrame, etc.
+//
+// Frames are only valid until the next call to Framer.ReadFrame.
+type Frame interface {
+	Header() FrameHeader
+
+	// invalidate is called by Framer.ReadFrame to make this
+	// frame's buffers as being invalid, since the subsequent
+	// frame will reuse them.
+	invalidate()
+}
+
+// A Framer reads and writes Frames.
+type Framer struct {
+	r         io.Reader
+	lastFrame Frame
+	errDetail error
+
+	// lastHeaderStream is non-zero if the last frame was an
+	// unfinished HEADERS/CONTINUATION.
+	lastHeaderStream uint32
+
+	maxReadSize uint32
+	headerBuf   [frameHeaderLen]byte
+
+	// TODO: let getReadBuf be configurable, and use a less memory-pinning
+	// allocator in server.go to minimize memory pinned for many idle conns.
+	// Will probably also need to make frame invalidation have a hook too.
+	getReadBuf func(size uint32) []byte
+	readBuf    []byte // cache for default getReadBuf
+
+	maxWriteSize uint32 // zero means unlimited; TODO: implement
+
+	w    io.Writer
+	wbuf []byte
+
+	// AllowIllegalWrites permits the Framer's Write methods to
+	// write frames that do not conform to the HTTP/2 spec. This
+	// permits using the Framer to test other HTTP/2
+	// implementations' conformance to the spec.
+	// If false, the Write methods will prefer to return an error
+	// rather than comply.
+	AllowIllegalWrites bool
+
+	// AllowIllegalReads permits the Framer's ReadFrame method
+	// to return non-compliant frames or frame orders.
+	// This is for testing and permits using the Framer to test
+	// other HTTP/2 implementations' conformance to the spec.
+	// It is not compatible with ReadMetaHeaders.
+	AllowIllegalReads bool
+
+	// ReadMetaHeaders if non-nil causes ReadFrame to merge
+	// HEADERS and CONTINUATION frames together and return
+	// MetaHeadersFrame instead.
+	ReadMetaHeaders *hpack.Decoder
+
+	// MaxHeaderListSize is the http2 MAX_HEADER_LIST_SIZE.
+	// It's used only if ReadMetaHeaders is set; 0 means a sane default
+	// (currently 16MB)
+	// If the limit is hit, MetaHeadersFrame.Truncated is set true.
+	MaxHeaderListSize uint32
+
+	// TODO: track which type of frame & with which flags was sent
+	// last.  Then return an error (unless AllowIllegalWrites) if
+	// we're in the middle of a header block and a
+	// non-Continuation or Continuation on a different stream is
+	// attempted to be written.
+
+	logReads bool
+
+	debugFramer    *Framer // only use for logging written writes
+	debugFramerBuf *bytes.Buffer
+}
+
+func (fr *Framer) maxHeaderListSize() uint32 {
+	if fr.MaxHeaderListSize == 0 {
+		return 16 << 20 // sane default, per docs
+	}
+	return fr.MaxHeaderListSize
+}
+
+func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) {
+	// Write the FrameHeader.
+	f.wbuf = append(f.wbuf[:0],
+		0, // 3 bytes of length, filled in in endWrite
+		0,
+		0,
+		byte(ftype),
+		byte(flags),
+		byte(streamID>>24),
+		byte(streamID>>16),
+		byte(streamID>>8),
+		byte(streamID))
+}
+
+func (f *Framer) endWrite() error {
+	// Now that we know the final size, fill in the FrameHeader in
+	// the space previously reserved for it. Abuse append.
+	length := len(f.wbuf) - frameHeaderLen
+	if length >= (1 << 24) {
+		return ErrFrameTooLarge
+	}
+	_ = append(f.wbuf[:0],
+		byte(length>>16),
+		byte(length>>8),
+		byte(length))
+	if logFrameWrites {
+		f.logWrite()
+	}
+
+	n, err := f.w.Write(f.wbuf)
+	if err == nil && n != len(f.wbuf) {
+		err = io.ErrShortWrite
+	}
+	return err
+}
+
+func (f *Framer) logWrite() {
+	if f.debugFramer == nil {
+		f.debugFramerBuf = new(bytes.Buffer)
+		f.debugFramer = NewFramer(nil, f.debugFramerBuf)
+		f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below
+		// Let us read anything, even if we accidentally wrote it
+		// in the wrong order:
+		f.debugFramer.AllowIllegalReads = true
+	}
+	f.debugFramerBuf.Write(f.wbuf)
+	fr, err := f.debugFramer.ReadFrame()
+	if err != nil {
+		log.Printf("http2: Framer %p: failed to decode just-written frame", f)
+		return
+	}
+	log.Printf("http2: Framer %p: wrote %v", f, summarizeFrame(fr))
+}
+
+func (f *Framer) writeByte(v byte)     { f.wbuf = append(f.wbuf, v) }
+func (f *Framer) writeBytes(v []byte)  { f.wbuf = append(f.wbuf, v...) }
+func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) }
+func (f *Framer) writeUint32(v uint32) {
+	f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
+}
+
+const (
+	minMaxFrameSize = 1 << 14
+	maxFrameSize    = 1<<24 - 1
+)
+
+// NewFramer returns a Framer that writes frames to w and reads them from r.
+func NewFramer(w io.Writer, r io.Reader) *Framer {
+	fr := &Framer{
+		w:        w,
+		r:        r,
+		logReads: logFrameReads,
+	}
+	fr.getReadBuf = func(size uint32) []byte {
+		if cap(fr.readBuf) >= int(size) {
+			return fr.readBuf[:size]
+		}
+		fr.readBuf = make([]byte, size)
+		return fr.readBuf
+	}
+	fr.SetMaxReadFrameSize(maxFrameSize)
+	return fr
+}
+
+// SetMaxReadFrameSize sets the maximum size of a frame
+// that will be read by a subsequent call to ReadFrame.
+// It is the caller's responsibility to advertise this
+// limit with a SETTINGS frame.
+func (fr *Framer) SetMaxReadFrameSize(v uint32) {
+	if v > maxFrameSize {
+		v = maxFrameSize
+	}
+	fr.maxReadSize = v
+}
+
+// ErrorDetail returns a more detailed error of the last error
+// returned by Framer.ReadFrame. For instance, if ReadFrame
+// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail
+// will say exactly what was invalid. ErrorDetail is not guaranteed
+// to return a non-nil value and like the rest of the http2 package,
+// its return value is not protected by an API compatibility promise.
+// ErrorDetail is reset after the next call to ReadFrame.
+func (fr *Framer) ErrorDetail() error {
+	return fr.errDetail
+}
+
+// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer
+// sends a frame that is larger than declared with SetMaxReadFrameSize.
+var ErrFrameTooLarge = errors.New("http2: frame too large")
+
+// terminalReadFrameError reports whether err is an unrecoverable
+// error from ReadFrame and no other frames should be read.
+func terminalReadFrameError(err error) bool {
+	if _, ok := err.(StreamError); ok {
+		return false
+	}
+	return err != nil
+}
+
+// ReadFrame reads a single frame. The returned Frame is only valid
+// until the next call to ReadFrame.
+//
+// If the frame is larger than previously set with SetMaxReadFrameSize, the
+// returned error is ErrFrameTooLarge. Other errors may be of type
+// ConnectionError, StreamError, or anything else from the underlying
+// reader.
+func (fr *Framer) ReadFrame() (Frame, error) {
+	fr.errDetail = nil
+	if fr.lastFrame != nil {
+		fr.lastFrame.invalidate()
+	}
+	fh, err := readFrameHeader(fr.headerBuf[:], fr.r)
+	if err != nil {
+		return nil, err
+	}
+	if fh.Length > fr.maxReadSize {
+		return nil, ErrFrameTooLarge
+	}
+	payload := fr.getReadBuf(fh.Length)
+	if _, err := io.ReadFull(fr.r, payload); err != nil {
+		return nil, err
+	}
+	f, err := typeFrameParser(fh.Type)(fh, payload)
+	if err != nil {
+		if ce, ok := err.(connError); ok {
+			return nil, fr.connError(ce.Code, ce.Reason)
+		}
+		return nil, err
+	}
+	if err := fr.checkFrameOrder(f); err != nil {
+		return nil, err
+	}
+	if fr.logReads {
+		log.Printf("http2: Framer %p: read %v", fr, summarizeFrame(f))
+	}
+	if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil {
+		return fr.readMetaFrame(f.(*HeadersFrame))
+	}
+	return f, nil
+}
+
+// connError returns ConnectionError(code) but first
+// stashes away a public reason to the caller can optionally relay it
+// to the peer before hanging up on them. This might help others debug
+// their implementations.
+func (fr *Framer) connError(code ErrCode, reason string) error {
+	fr.errDetail = errors.New(reason)
+	return ConnectionError(code)
+}
+
+// checkFrameOrder reports an error if f is an invalid frame to return
+// next from ReadFrame. Mostly it checks whether HEADERS and
+// CONTINUATION frames are contiguous.
+func (fr *Framer) checkFrameOrder(f Frame) error {
+	last := fr.lastFrame
+	fr.lastFrame = f
+	if fr.AllowIllegalReads {
+		return nil
+	}
+
+	fh := f.Header()
+	if fr.lastHeaderStream != 0 {
+		if fh.Type != FrameContinuation {
+			return fr.connError(ErrCodeProtocol,
+				fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d",
+					fh.Type, fh.StreamID,
+					last.Header().Type, fr.lastHeaderStream))
+		}
+		if fh.StreamID != fr.lastHeaderStream {
+			return fr.connError(ErrCodeProtocol,
+				fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d",
+					fh.StreamID, fr.lastHeaderStream))
+		}
+	} else if fh.Type == FrameContinuation {
+		return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID))
+	}
+
+	switch fh.Type {
+	case FrameHeaders, FrameContinuation:
+		if fh.Flags.Has(FlagHeadersEndHeaders) {
+			fr.lastHeaderStream = 0
+		} else {
+			fr.lastHeaderStream = fh.StreamID
+		}
+	}
+
+	return nil
+}
+
+// A DataFrame conveys arbitrary, variable-length sequences of octets
+// associated with a stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.1
+type DataFrame struct {
+	FrameHeader
+	data []byte
+}
+
+func (f *DataFrame) StreamEnded() bool {
+	return f.FrameHeader.Flags.Has(FlagDataEndStream)
+}
+
+// Data returns the frame's data octets, not including any padding
+// size byte or padding suffix bytes.
+// The caller must not retain the returned memory past the next
+// call to ReadFrame.
+func (f *DataFrame) Data() []byte {
+	f.checkValid()
+	return f.data
+}
+
+func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) {
+	if fh.StreamID == 0 {
+		// DATA frames MUST be associated with a stream. If a
+		// DATA frame is received whose stream identifier
+		// field is 0x0, the recipient MUST respond with a
+		// connection error (Section 5.4.1) of type
+		// PROTOCOL_ERROR.
+		return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"}
+	}
+	f := &DataFrame{
+		FrameHeader: fh,
+	}
+	var padSize byte
+	if fh.Flags.Has(FlagDataPadded) {
+		var err error
+		payload, padSize, err = readByte(payload)
+		if err != nil {
+			return nil, err
+		}
+	}
+	if int(padSize) > len(payload) {
+		// If the length of the padding is greater than the
+		// length of the frame payload, the recipient MUST
+		// treat this as a connection error.
+		// Filed: https://github.com/http2/http2-spec/issues/610
+		return nil, connError{ErrCodeProtocol, "pad size larger than data payload"}
+	}
+	f.data = payload[:len(payload)-int(padSize)]
+	return f, nil
+}
+
+var (
+	errStreamID    = errors.New("invalid stream ID")
+	errDepStreamID = errors.New("invalid dependent stream ID")
+	errPadLength   = errors.New("pad length too large")
+)
+
+func validStreamIDOrZero(streamID uint32) bool {
+	return streamID&(1<<31) == 0
+}
+
+func validStreamID(streamID uint32) bool {
+	return streamID != 0 && streamID&(1<<31) == 0
+}
+
+// WriteData writes a DATA frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility not to violate the maximum frame size
+// and to not call other Write methods concurrently.
+func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
+	return f.WriteDataPadded(streamID, endStream, data, nil)
+}
+
+// WriteData writes a DATA frame with optional padding.
+//
+// If pad is nil, the padding bit is not sent.
+// The length of pad must not exceed 255 bytes.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility not to violate the maximum frame size
+// and to not call other Write methods concurrently.
+func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error {
+	if !validStreamID(streamID) && !f.AllowIllegalWrites {
+		return errStreamID
+	}
+	if len(pad) > 255 {
+		return errPadLength
+	}
+	var flags Flags
+	if endStream {
+		flags |= FlagDataEndStream
+	}
+	if pad != nil {
+		flags |= FlagDataPadded
+	}
+	f.startWrite(FrameData, flags, streamID)
+	if pad != nil {
+		f.wbuf = append(f.wbuf, byte(len(pad)))
+	}
+	f.wbuf = append(f.wbuf, data...)
+	f.wbuf = append(f.wbuf, pad...)
+	return f.endWrite()
+}
+
+// A SettingsFrame conveys configuration parameters that affect how
+// endpoints communicate, such as preferences and constraints on peer
+// behavior.
+//
+// See http://http2.github.io/http2-spec/#SETTINGS
+type SettingsFrame struct {
+	FrameHeader
+	p []byte
+}
+
+func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) {
+	if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 {
+		// When this (ACK 0x1) bit is set, the payload of the
+		// SETTINGS frame MUST be empty.  Receipt of a
+		// SETTINGS frame with the ACK flag set and a length
+		// field value other than 0 MUST be treated as a
+		// connection error (Section 5.4.1) of type
+		// FRAME_SIZE_ERROR.
+		return nil, ConnectionError(ErrCodeFrameSize)
+	}
+	if fh.StreamID != 0 {
+		// SETTINGS frames always apply to a connection,
+		// never a single stream.  The stream identifier for a
+		// SETTINGS frame MUST be zero (0x0).  If an endpoint
+		// receives a SETTINGS frame whose stream identifier
+		// field is anything other than 0x0, the endpoint MUST
+		// respond with a connection error (Section 5.4.1) of
+		// type PROTOCOL_ERROR.
+		return nil, ConnectionError(ErrCodeProtocol)
+	}
+	if len(p)%6 != 0 {
+		// Expecting even number of 6 byte settings.
+		return nil, ConnectionError(ErrCodeFrameSize)
+	}
+	f := &SettingsFrame{FrameHeader: fh, p: p}
+	if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 {
+		// Values above the maximum flow control window size of 2^31 - 1 MUST
+		// be treated as a connection error (Section 5.4.1) of type
+		// FLOW_CONTROL_ERROR.
+		return nil, ConnectionError(ErrCodeFlowControl)
+	}
+	return f, nil
+}
+
+func (f *SettingsFrame) IsAck() bool {
+	return f.FrameHeader.Flags.Has(FlagSettingsAck)
+}
+
+func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) {
+	f.checkValid()
+	buf := f.p
+	for len(buf) > 0 {
+		settingID := SettingID(binary.BigEndian.Uint16(buf[:2]))
+		if settingID == s {
+			return binary.BigEndian.Uint32(buf[2:6]), true
+		}
+		buf = buf[6:]
+	}
+	return 0, false
+}
+
+// ForeachSetting runs fn for each setting.
+// It stops and returns the first error.
+func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error {
+	f.checkValid()
+	buf := f.p
+	for len(buf) > 0 {
+		if err := fn(Setting{
+			SettingID(binary.BigEndian.Uint16(buf[:2])),
+			binary.BigEndian.Uint32(buf[2:6]),
+		}); err != nil {
+			return err
+		}
+		buf = buf[6:]
+	}
+	return nil
+}
+
+// WriteSettings writes a SETTINGS frame with zero or more settings
+// specified and the ACK bit not set.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteSettings(settings ...Setting) error {
+	f.startWrite(FrameSettings, 0, 0)
+	for _, s := range settings {
+		f.writeUint16(uint16(s.ID))
+		f.writeUint32(s.Val)
+	}
+	return f.endWrite()
+}
+
+// WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteSettingsAck() error {
+	f.startWrite(FrameSettings, FlagSettingsAck, 0)
+	return f.endWrite()
+}
+
+// A PingFrame is a mechanism for measuring a minimal round trip time
+// from the sender, as well as determining whether an idle connection
+// is still functional.
+// See http://http2.github.io/http2-spec/#rfc.section.6.7
+type PingFrame struct {
+	FrameHeader
+	Data [8]byte
+}
+
+func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) }
+
+func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) {
+	if len(payload) != 8 {
+		return nil, ConnectionError(ErrCodeFrameSize)
+	}
+	if fh.StreamID != 0 {
+		return nil, ConnectionError(ErrCodeProtocol)
+	}
+	f := &PingFrame{FrameHeader: fh}
+	copy(f.Data[:], payload)
+	return f, nil
+}
+
+func (f *Framer) WritePing(ack bool, data [8]byte) error {
+	var flags Flags
+	if ack {
+		flags = FlagPingAck
+	}
+	f.startWrite(FramePing, flags, 0)
+	f.writeBytes(data[:])
+	return f.endWrite()
+}
+
+// A GoAwayFrame informs the remote peer to stop creating streams on this connection.
+// See http://http2.github.io/http2-spec/#rfc.section.6.8
+type GoAwayFrame struct {
+	FrameHeader
+	LastStreamID uint32
+	ErrCode      ErrCode
+	debugData    []byte
+}
+
+// DebugData returns any debug data in the GOAWAY frame. Its contents
+// are not defined.
+// The caller must not retain the returned memory past the next
+// call to ReadFrame.
+func (f *GoAwayFrame) DebugData() []byte {
+	f.checkValid()
+	return f.debugData
+}
+
+func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) {
+	if fh.StreamID != 0 {
+		return nil, ConnectionError(ErrCodeProtocol)
+	}
+	if len(p) < 8 {
+		return nil, ConnectionError(ErrCodeFrameSize)
+	}
+	return &GoAwayFrame{
+		FrameHeader:  fh,
+		LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1),
+		ErrCode:      ErrCode(binary.BigEndian.Uint32(p[4:8])),
+		debugData:    p[8:],
+	}, nil
+}
+
+func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error {
+	f.startWrite(FrameGoAway, 0, 0)
+	f.writeUint32(maxStreamID & (1<<31 - 1))
+	f.writeUint32(uint32(code))
+	f.writeBytes(debugData)
+	return f.endWrite()
+}
+
+// An UnknownFrame is the frame type returned when the frame type is unknown
+// or no specific frame type parser exists.
+type UnknownFrame struct {
+	FrameHeader
+	p []byte
+}
+
+// Payload returns the frame's payload (after the header).  It is not
+// valid to call this method after a subsequent call to
+// Framer.ReadFrame, nor is it valid to retain the returned slice.
+// The memory is owned by the Framer and is invalidated when the next
+// frame is read.
+func (f *UnknownFrame) Payload() []byte {
+	f.checkValid()
+	return f.p
+}
+
+func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) {
+	return &UnknownFrame{fh, p}, nil
+}
+
+// A WindowUpdateFrame is used to implement flow control.
+// See http://http2.github.io/http2-spec/#rfc.section.6.9
+type WindowUpdateFrame struct {
+	FrameHeader
+	Increment uint32 // never read with high bit set
+}
+
+func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) {
+	if len(p) != 4 {
+		return nil, ConnectionError(ErrCodeFrameSize)
+	}
+	inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit
+	if inc == 0 {
+		// A receiver MUST treat the receipt of a
+		// WINDOW_UPDATE frame with an flow control window
+		// increment of 0 as a stream error (Section 5.4.2) of
+		// type PROTOCOL_ERROR; errors on the connection flow
+		// control window MUST be treated as a connection
+		// error (Section 5.4.1).
+		if fh.StreamID == 0 {
+			return nil, ConnectionError(ErrCodeProtocol)
+		}
+		return nil, streamError(fh.StreamID, ErrCodeProtocol)
+	}
+	return &WindowUpdateFrame{
+		FrameHeader: fh,
+		Increment:   inc,
+	}, nil
+}
+
+// WriteWindowUpdate writes a WINDOW_UPDATE frame.
+// The increment value must be between 1 and 2,147,483,647, inclusive.
+// If the Stream ID is zero, the window update applies to the
+// connection as a whole.
+func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error {
+	// "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets."
+	if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites {
+		return errors.New("illegal window increment value")
+	}
+	f.startWrite(FrameWindowUpdate, 0, streamID)
+	f.writeUint32(incr)
+	return f.endWrite()
+}
+
+// A HeadersFrame is used to open a stream and additionally carries a
+// header block fragment.
+type HeadersFrame struct {
+	FrameHeader
+
+	// Priority is set if FlagHeadersPriority is set in the FrameHeader.
+	Priority PriorityParam
+
+	headerFragBuf []byte // not owned
+}
+
+func (f *HeadersFrame) HeaderBlockFragment() []byte {
+	f.checkValid()
+	return f.headerFragBuf
+}
+
+func (f *HeadersFrame) HeadersEnded() bool {
+	return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders)
+}
+
+func (f *HeadersFrame) StreamEnded() bool {
+	return f.FrameHeader.Flags.Has(FlagHeadersEndStream)
+}
+
+func (f *HeadersFrame) HasPriority() bool {
+	return f.FrameHeader.Flags.Has(FlagHeadersPriority)
+}
+
+func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) {
+	hf := &HeadersFrame{
+		FrameHeader: fh,
+	}
+	if fh.StreamID == 0 {
+		// HEADERS frames MUST be associated with a stream.  If a HEADERS frame
+		// is received whose stream identifier field is 0x0, the recipient MUST
+		// respond with a connection error (Section 5.4.1) of type
+		// PROTOCOL_ERROR.
+		return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"}
+	}
+	var padLength uint8
+	if fh.Flags.Has(FlagHeadersPadded) {
+		if p, padLength, err = readByte(p); err != nil {
+			return
+		}
+	}
+	if fh.Flags.Has(FlagHeadersPriority) {
+		var v uint32
+		p, v, err = readUint32(p)
+		if err != nil {
+			return nil, err
+		}
+		hf.Priority.StreamDep = v & 0x7fffffff
+		hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set
+		p, hf.Priority.Weight, err = readByte(p)
+		if err != nil {
+			return nil, err
+		}
+	}
+	if len(p)-int(padLength) <= 0 {
+		return nil, streamError(fh.StreamID, ErrCodeProtocol)
+	}
+	hf.headerFragBuf = p[:len(p)-int(padLength)]
+	return hf, nil
+}
+
+// HeadersFrameParam are the parameters for writing a HEADERS frame.
+type HeadersFrameParam struct {
+	// StreamID is the required Stream ID to initiate.
+	StreamID uint32
+	// BlockFragment is part (or all) of a Header Block.
+	BlockFragment []byte
+
+	// EndStream indicates that the header block is the last that
+	// the endpoint will send for the identified stream. Setting
+	// this flag causes the stream to enter one of "half closed"
+	// states.
+	EndStream bool
+
+	// EndHeaders indicates that this frame contains an entire
+	// header block and is not followed by any
+	// CONTINUATION frames.
+	EndHeaders bool
+
+	// PadLength is the optional number of bytes of zeros to add
+	// to this frame.
+	PadLength uint8
+
+	// Priority, if non-zero, includes stream priority information
+	// in the HEADER frame.
+	Priority PriorityParam
+}
+
+// WriteHeaders writes a single HEADERS frame.
+//
+// This is a low-level header writing method. Encoding headers and
+// splitting them into any necessary CONTINUATION frames is handled
+// elsewhere.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteHeaders(p HeadersFrameParam) error {
+	if !validStreamID(p.StreamID) && !f.AllowIllegalWrites {
+		return errStreamID
+	}
+	var flags Flags
+	if p.PadLength != 0 {
+		flags |= FlagHeadersPadded
+	}
+	if p.EndStream {
+		flags |= FlagHeadersEndStream
+	}
+	if p.EndHeaders {
+		flags |= FlagHeadersEndHeaders
+	}
+	if !p.Priority.IsZero() {
+		flags |= FlagHeadersPriority
+	}
+	f.startWrite(FrameHeaders, flags, p.StreamID)
+	if p.PadLength != 0 {
+		f.writeByte(p.PadLength)
+	}
+	if !p.Priority.IsZero() {
+		v := p.Priority.StreamDep
+		if !validStreamIDOrZero(v) && !f.AllowIllegalWrites {
+			return errDepStreamID
+		}
+		if p.Priority.Exclusive {
+			v |= 1 << 31
+		}
+		f.writeUint32(v)
+		f.writeByte(p.Priority.Weight)
+	}
+	f.wbuf = append(f.wbuf, p.BlockFragment...)
+	f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...)
+	return f.endWrite()
+}
+
+// A PriorityFrame specifies the sender-advised priority of a stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.3
+type PriorityFrame struct {
+	FrameHeader
+	PriorityParam
+}
+
+// PriorityParam are the stream prioritzation parameters.
+type PriorityParam struct {
+	// StreamDep is a 31-bit stream identifier for the
+	// stream that this stream depends on. Zero means no
+	// dependency.
+	StreamDep uint32
+
+	// Exclusive is whether the dependency is exclusive.
+	Exclusive bool
+
+	// Weight is the stream's zero-indexed weight. It should be
+	// set together with StreamDep, or neither should be set.  Per
+	// the spec, "Add one to the value to obtain a weight between
+	// 1 and 256."
+	Weight uint8
+}
+
+func (p PriorityParam) IsZero() bool {
+	return p == PriorityParam{}
+}
+
+func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) {
+	if fh.StreamID == 0 {
+		return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
+	}
+	if len(payload) != 5 {
+		return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))}
+	}
+	v := binary.BigEndian.Uint32(payload[:4])
+	streamID := v & 0x7fffffff // mask off high bit
+	return &PriorityFrame{
+		FrameHeader: fh,
+		PriorityParam: PriorityParam{
+			Weight:    payload[4],
+			StreamDep: streamID,
+			Exclusive: streamID != v, // was high bit set?
+		},
+	}, nil
+}
+
+// WritePriority writes a PRIORITY frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error {
+	if !validStreamID(streamID) && !f.AllowIllegalWrites {
+		return errStreamID
+	}
+	if !validStreamIDOrZero(p.StreamDep) {
+		return errDepStreamID
+	}
+	f.startWrite(FramePriority, 0, streamID)
+	v := p.StreamDep
+	if p.Exclusive {
+		v |= 1 << 31
+	}
+	f.writeUint32(v)
+	f.writeByte(p.Weight)
+	return f.endWrite()
+}
+
+// A RSTStreamFrame allows for abnormal termination of a stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.4
+type RSTStreamFrame struct {
+	FrameHeader
+	ErrCode ErrCode
+}
+
+func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) {
+	if len(p) != 4 {
+		return nil, ConnectionError(ErrCodeFrameSize)
+	}
+	if fh.StreamID == 0 {
+		return nil, ConnectionError(ErrCodeProtocol)
+	}
+	return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil
+}
+
+// WriteRSTStream writes a RST_STREAM frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error {
+	if !validStreamID(streamID) && !f.AllowIllegalWrites {
+		return errStreamID
+	}
+	f.startWrite(FrameRSTStream, 0, streamID)
+	f.writeUint32(uint32(code))
+	return f.endWrite()
+}
+
+// A ContinuationFrame is used to continue a sequence of header block fragments.
+// See http://http2.github.io/http2-spec/#rfc.section.6.10
+type ContinuationFrame struct {
+	FrameHeader
+	headerFragBuf []byte
+}
+
+func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) {
+	if fh.StreamID == 0 {
+		return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
+	}
+	return &ContinuationFrame{fh, p}, nil
+}
+
+func (f *ContinuationFrame) HeaderBlockFragment() []byte {
+	f.checkValid()
+	return f.headerFragBuf
+}
+
+func (f *ContinuationFrame) HeadersEnded() bool {
+	return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders)
+}
+
+// WriteContinuation writes a CONTINUATION frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error {
+	if !validStreamID(streamID) && !f.AllowIllegalWrites {
+		return errStreamID
+	}
+	var flags Flags
+	if endHeaders {
+		flags |= FlagContinuationEndHeaders
+	}
+	f.startWrite(FrameContinuation, flags, streamID)
+	f.wbuf = append(f.wbuf, headerBlockFragment...)
+	return f.endWrite()
+}
+
+// A PushPromiseFrame is used to initiate a server stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.6
+type PushPromiseFrame struct {
+	FrameHeader
+	PromiseID     uint32
+	headerFragBuf []byte // not owned
+}
+
+func (f *PushPromiseFrame) HeaderBlockFragment() []byte {
+	f.checkValid()
+	return f.headerFragBuf
+}
+
+func (f *PushPromiseFrame) HeadersEnded() bool {
+	return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders)
+}
+
+func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) {
+	pp := &PushPromiseFrame{
+		FrameHeader: fh,
+	}
+	if pp.StreamID == 0 {
+		// PUSH_PROMISE frames MUST be associated with an existing,
+		// peer-initiated stream. The stream identifier of a
+		// PUSH_PROMISE frame indicates the stream it is associated
+		// with. If the stream identifier field specifies the value
+		// 0x0, a recipient MUST respond with a connection error
+		// (Section 5.4.1) of type PROTOCOL_ERROR.
+		return nil, ConnectionError(ErrCodeProtocol)
+	}
+	// The PUSH_PROMISE frame includes optional padding.
+	// Padding fields and flags are identical to those defined for DATA frames
+	var padLength uint8
+	if fh.Flags.Has(FlagPushPromisePadded) {
+		if p, padLength, err = readByte(p); err != nil {
+			return
+		}
+	}
+
+	p, pp.PromiseID, err = readUint32(p)
+	if err != nil {
+		return
+	}
+	pp.PromiseID = pp.PromiseID & (1<<31 - 1)
+
+	if int(padLength) > len(p) {
+		// like the DATA frame, error out if padding is longer than the body.
+		return nil, ConnectionError(ErrCodeProtocol)
+	}
+	pp.headerFragBuf = p[:len(p)-int(padLength)]
+	return pp, nil
+}
+
+// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame.
+type PushPromiseParam struct {
+	// StreamID is the required Stream ID to initiate.
+	StreamID uint32
+
+	// PromiseID is the required Stream ID which this
+	// Push Promises
+	PromiseID uint32
+
+	// BlockFragment is part (or all) of a Header Block.
+	BlockFragment []byte
+
+	// EndHeaders indicates that this frame contains an entire
+	// header block and is not followed by any
+	// CONTINUATION frames.
+	EndHeaders bool
+
+	// PadLength is the optional number of bytes of zeros to add
+	// to this frame.
+	PadLength uint8
+}
+
+// WritePushPromise writes a single PushPromise Frame.
+//
+// As with Header Frames, This is the low level call for writing
+// individual frames. Continuation frames are handled elsewhere.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WritePushPromise(p PushPromiseParam) error {
+	if !validStreamID(p.StreamID) && !f.AllowIllegalWrites {
+		return errStreamID
+	}
+	var flags Flags
+	if p.PadLength != 0 {
+		flags |= FlagPushPromisePadded
+	}
+	if p.EndHeaders {
+		flags |= FlagPushPromiseEndHeaders
+	}
+	f.startWrite(FramePushPromise, flags, p.StreamID)
+	if p.PadLength != 0 {
+		f.writeByte(p.PadLength)
+	}
+	if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites {
+		return errStreamID
+	}
+	f.writeUint32(p.PromiseID)
+	f.wbuf = append(f.wbuf, p.BlockFragment...)
+	f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...)
+	return f.endWrite()
+}
+
+// WriteRawFrame writes a raw frame. This can be used to write
+// extension frames unknown to this package.
+func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error {
+	f.startWrite(t, flags, streamID)
+	f.writeBytes(payload)
+	return f.endWrite()
+}
+
+func readByte(p []byte) (remain []byte, b byte, err error) {
+	if len(p) == 0 {
+		return nil, 0, io.ErrUnexpectedEOF
+	}
+	return p[1:], p[0], nil
+}
+
+func readUint32(p []byte) (remain []byte, v uint32, err error) {
+	if len(p) < 4 {
+		return nil, 0, io.ErrUnexpectedEOF
+	}
+	return p[4:], binary.BigEndian.Uint32(p[:4]), nil
+}
+
+type streamEnder interface {
+	StreamEnded() bool
+}
+
+type headersEnder interface {
+	HeadersEnded() bool
+}
+
+type headersOrContinuation interface {
+	headersEnder
+	HeaderBlockFragment() []byte
+}
+
+// A MetaHeadersFrame is the representation of one HEADERS frame and
+// zero or more contiguous CONTINUATION frames and the decoding of
+// their HPACK-encoded contents.
+//
+// This type of frame does not appear on the wire and is only returned
+// by the Framer when Framer.ReadMetaHeaders is set.
+type MetaHeadersFrame struct {
+	*HeadersFrame
+
+	// Fields are the fields contained in the HEADERS and
+	// CONTINUATION frames. The underlying slice is owned by the
+	// Framer and must not be retained after the next call to
+	// ReadFrame.
+	//
+	// Fields are guaranteed to be in the correct http2 order and
+	// not have unknown pseudo header fields or invalid header
+	// field names or values. Required pseudo header fields may be
+	// missing, however. Use the MetaHeadersFrame.Pseudo accessor
+	// method access pseudo headers.
+	Fields []hpack.HeaderField
+
+	// Truncated is whether the max header list size limit was hit
+	// and Fields is incomplete. The hpack decoder state is still
+	// valid, however.
+	Truncated bool
+}
+
+// PseudoValue returns the given pseudo header field's value.
+// The provided pseudo field should not contain the leading colon.
+func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string {
+	for _, hf := range mh.Fields {
+		if !hf.IsPseudo() {
+			return ""
+		}
+		if hf.Name[1:] == pseudo {
+			return hf.Value
+		}
+	}
+	return ""
+}
+
+// RegularFields returns the regular (non-pseudo) header fields of mh.
+// The caller does not own the returned slice.
+func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField {
+	for i, hf := range mh.Fields {
+		if !hf.IsPseudo() {
+			return mh.Fields[i:]
+		}
+	}
+	return nil
+}
+
+// PseudoFields returns the pseudo header fields of mh.
+// The caller does not own the returned slice.
+func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField {
+	for i, hf := range mh.Fields {
+		if !hf.IsPseudo() {
+			return mh.Fields[:i]
+		}
+	}
+	return mh.Fields
+}
+
+func (mh *MetaHeadersFrame) checkPseudos() error {
+	var isRequest, isResponse bool
+	pf := mh.PseudoFields()
+	for i, hf := range pf {
+		switch hf.Name {
+		case ":method", ":path", ":scheme", ":authority":
+			isRequest = true
+		case ":status":
+			isResponse = true
+		default:
+			return pseudoHeaderError(hf.Name)
+		}
+		// Check for duplicates.
+		// This would be a bad algorithm, but N is 4.
+		// And this doesn't allocate.
+		for _, hf2 := range pf[:i] {
+			if hf.Name == hf2.Name {
+				return duplicatePseudoHeaderError(hf.Name)
+			}
+		}
+	}
+	if isRequest && isResponse {
+		return errMixPseudoHeaderTypes
+	}
+	return nil
+}
+
+func (fr *Framer) maxHeaderStringLen() int {
+	v := fr.maxHeaderListSize()
+	if uint32(int(v)) == v {
+		return int(v)
+	}
+	// They had a crazy big number for MaxHeaderBytes anyway,
+	// so give them unlimited header lengths:
+	return 0
+}
+
+// readMetaFrame returns 0 or more CONTINUATION frames from fr and
+// merge them into into the provided hf and returns a MetaHeadersFrame
+// with the decoded hpack values.
+func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
+	if fr.AllowIllegalReads {
+		return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders")
+	}
+	mh := &MetaHeadersFrame{
+		HeadersFrame: hf,
+	}
+	var remainSize = fr.maxHeaderListSize()
+	var sawRegular bool
+
+	var invalid error // pseudo header field errors
+	hdec := fr.ReadMetaHeaders
+	hdec.SetEmitEnabled(true)
+	hdec.SetMaxStringLength(fr.maxHeaderStringLen())
+	hdec.SetEmitFunc(func(hf hpack.HeaderField) {
+		if VerboseLogs && logFrameReads {
+			log.Printf("http2: decoded hpack field %+v", hf)
+		}
+		if !httplex.ValidHeaderFieldValue(hf.Value) {
+			invalid = headerFieldValueError(hf.Value)
+		}
+		isPseudo := strings.HasPrefix(hf.Name, ":")
+		if isPseudo {
+			if sawRegular {
+				invalid = errPseudoAfterRegular
+			}
+		} else {
+			sawRegular = true
+			if !validWireHeaderFieldName(hf.Name) {
+				invalid = headerFieldNameError(hf.Name)
+			}
+		}
+
+		if invalid != nil {
+			hdec.SetEmitEnabled(false)
+			return
+		}
+
+		size := hf.Size()
+		if size > remainSize {
+			hdec.SetEmitEnabled(false)
+			mh.Truncated = true
+			return
+		}
+		remainSize -= size
+
+		mh.Fields = append(mh.Fields, hf)
+	})
+	// Lose reference to MetaHeadersFrame:
+	defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {})
+
+	var hc headersOrContinuation = hf
+	for {
+		frag := hc.HeaderBlockFragment()
+		if _, err := hdec.Write(frag); err != nil {
+			return nil, ConnectionError(ErrCodeCompression)
+		}
+
+		if hc.HeadersEnded() {
+			break
+		}
+		if f, err := fr.ReadFrame(); err != nil {
+			return nil, err
+		} else {
+			hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder
+		}
+	}
+
+	mh.HeadersFrame.headerFragBuf = nil
+	mh.HeadersFrame.invalidate()
+
+	if err := hdec.Close(); err != nil {
+		return nil, ConnectionError(ErrCodeCompression)
+	}
+	if invalid != nil {
+		fr.errDetail = invalid
+		if VerboseLogs {
+			log.Printf("http2: invalid header: %v", invalid)
+		}
+		return nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid}
+	}
+	if err := mh.checkPseudos(); err != nil {
+		fr.errDetail = err
+		if VerboseLogs {
+			log.Printf("http2: invalid pseudo headers: %v", err)
+		}
+		return nil, StreamError{mh.StreamID, ErrCodeProtocol, err}
+	}
+	return mh, nil
+}
+
+func summarizeFrame(f Frame) string {
+	var buf bytes.Buffer
+	f.Header().writeDebug(&buf)
+	switch f := f.(type) {
+	case *SettingsFrame:
+		n := 0
+		f.ForeachSetting(func(s Setting) error {
+			n++
+			if n == 1 {
+				buf.WriteString(", settings:")
+			}
+			fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val)
+			return nil
+		})
+		if n > 0 {
+			buf.Truncate(buf.Len() - 1) // remove trailing comma
+		}
+	case *DataFrame:
+		data := f.Data()
+		const max = 256
+		if len(data) > max {
+			data = data[:max]
+		}
+		fmt.Fprintf(&buf, " data=%q", data)
+		if len(f.Data()) > max {
+			fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max)
+		}
+	case *WindowUpdateFrame:
+		if f.StreamID == 0 {
+			buf.WriteString(" (conn)")
+		}
+		fmt.Fprintf(&buf, " incr=%v", f.Increment)
+	case *PingFrame:
+		fmt.Fprintf(&buf, " ping=%q", f.Data[:])
+	case *GoAwayFrame:
+		fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q",
+			f.LastStreamID, f.ErrCode, f.debugData)
+	case *RSTStreamFrame:
+		fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode)
+	}
+	return buf.String()
+}
diff --git a/vendor/golang.org/x/net/http2/go16.go b/vendor/golang.org/x/net/http2/go16.go
new file mode 100644
index 0000000..2b72855
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/go16.go
@@ -0,0 +1,43 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.6
+
+package http2
+
+import (
+	"crypto/tls"
+	"net/http"
+	"time"
+)
+
+func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
+	return t1.ExpectContinueTimeout
+}
+
+// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
+func isBadCipher(cipher uint16) bool {
+	switch cipher {
+	case tls.TLS_RSA_WITH_RC4_128_SHA,
+		tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
+		tls.TLS_RSA_WITH_AES_128_CBC_SHA,
+		tls.TLS_RSA_WITH_AES_256_CBC_SHA,
+		tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
+		tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
+		tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
+		tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+		tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+		tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
+		tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
+		tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+		tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
+		// Reject cipher suites from Appendix A.
+		// "This list includes those cipher suites that do not
+		// offer an ephemeral key exchange and those that are
+		// based on the TLS null, stream or block cipher type"
+		return true
+	default:
+		return false
+	}
+}
diff --git a/vendor/golang.org/x/net/http2/go17.go b/vendor/golang.org/x/net/http2/go17.go
new file mode 100644
index 0000000..47b7fae
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/go17.go
@@ -0,0 +1,106 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+package http2
+
+import (
+	"context"
+	"net"
+	"net/http"
+	"net/http/httptrace"
+	"time"
+)
+
+type contextContext interface {
+	context.Context
+}
+
+func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) {
+	ctx, cancel = context.WithCancel(context.Background())
+	ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr())
+	if hs := opts.baseConfig(); hs != nil {
+		ctx = context.WithValue(ctx, http.ServerContextKey, hs)
+	}
+	return
+}
+
+func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) {
+	return context.WithCancel(ctx)
+}
+
+func requestWithContext(req *http.Request, ctx contextContext) *http.Request {
+	return req.WithContext(ctx)
+}
+
+type clientTrace httptrace.ClientTrace
+
+func reqContext(r *http.Request) context.Context { return r.Context() }
+
+func (t *Transport) idleConnTimeout() time.Duration {
+	if t.t1 != nil {
+		return t.t1.IdleConnTimeout
+	}
+	return 0
+}
+
+func setResponseUncompressed(res *http.Response) { res.Uncompressed = true }
+
+func traceGotConn(req *http.Request, cc *ClientConn) {
+	trace := httptrace.ContextClientTrace(req.Context())
+	if trace == nil || trace.GotConn == nil {
+		return
+	}
+	ci := httptrace.GotConnInfo{Conn: cc.tconn}
+	cc.mu.Lock()
+	ci.Reused = cc.nextStreamID > 1
+	ci.WasIdle = len(cc.streams) == 0 && ci.Reused
+	if ci.WasIdle && !cc.lastActive.IsZero() {
+		ci.IdleTime = time.Now().Sub(cc.lastActive)
+	}
+	cc.mu.Unlock()
+
+	trace.GotConn(ci)
+}
+
+func traceWroteHeaders(trace *clientTrace) {
+	if trace != nil && trace.WroteHeaders != nil {
+		trace.WroteHeaders()
+	}
+}
+
+func traceGot100Continue(trace *clientTrace) {
+	if trace != nil && trace.Got100Continue != nil {
+		trace.Got100Continue()
+	}
+}
+
+func traceWait100Continue(trace *clientTrace) {
+	if trace != nil && trace.Wait100Continue != nil {
+		trace.Wait100Continue()
+	}
+}
+
+func traceWroteRequest(trace *clientTrace, err error) {
+	if trace != nil && trace.WroteRequest != nil {
+		trace.WroteRequest(httptrace.WroteRequestInfo{Err: err})
+	}
+}
+
+func traceFirstResponseByte(trace *clientTrace) {
+	if trace != nil && trace.GotFirstResponseByte != nil {
+		trace.GotFirstResponseByte()
+	}
+}
+
+func requestTrace(req *http.Request) *clientTrace {
+	trace := httptrace.ContextClientTrace(req.Context())
+	return (*clientTrace)(trace)
+}
+
+// Ping sends a PING frame to the server and waits for the ack.
+func (cc *ClientConn) Ping(ctx context.Context) error {
+	return cc.ping(ctx)
+}
diff --git a/vendor/golang.org/x/net/http2/go17_not18.go b/vendor/golang.org/x/net/http2/go17_not18.go
new file mode 100644
index 0000000..b4c52ec
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/go17_not18.go
@@ -0,0 +1,36 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7,!go1.8
+
+package http2
+
+import "crypto/tls"
+
+// temporary copy of Go 1.7's private tls.Config.clone:
+func cloneTLSConfig(c *tls.Config) *tls.Config {
+	return &tls.Config{
+		Rand:                        c.Rand,
+		Time:                        c.Time,
+		Certificates:                c.Certificates,
+		NameToCertificate:           c.NameToCertificate,
+		GetCertificate:              c.GetCertificate,
+		RootCAs:                     c.RootCAs,
+		NextProtos:                  c.NextProtos,
+		ServerName:                  c.ServerName,
+		ClientAuth:                  c.ClientAuth,
+		ClientCAs:                   c.ClientCAs,
+		InsecureSkipVerify:          c.InsecureSkipVerify,
+		CipherSuites:                c.CipherSuites,
+		PreferServerCipherSuites:    c.PreferServerCipherSuites,
+		SessionTicketsDisabled:      c.SessionTicketsDisabled,
+		SessionTicketKey:            c.SessionTicketKey,
+		ClientSessionCache:          c.ClientSessionCache,
+		MinVersion:                  c.MinVersion,
+		MaxVersion:                  c.MaxVersion,
+		CurvePreferences:            c.CurvePreferences,
+		DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
+		Renegotiation:               c.Renegotiation,
+	}
+}
diff --git a/vendor/golang.org/x/net/http2/go18.go b/vendor/golang.org/x/net/http2/go18.go
new file mode 100644
index 0000000..c2ae167
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/go18.go
@@ -0,0 +1,11 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.8
+
+package http2
+
+import "crypto/tls"
+
+func cloneTLSConfig(c *tls.Config) *tls.Config { return c.Clone() }
diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go
new file mode 100644
index 0000000..9933c9f
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/gotrack.go
@@ -0,0 +1,170 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Defensive debug-only utility to track that functions run on the
+// goroutine that they're supposed to.
+
+package http2
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"os"
+	"runtime"
+	"strconv"
+	"sync"
+)
+
+var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
+
+type goroutineLock uint64
+
+func newGoroutineLock() goroutineLock {
+	if !DebugGoroutines {
+		return 0
+	}
+	return goroutineLock(curGoroutineID())
+}
+
+func (g goroutineLock) check() {
+	if !DebugGoroutines {
+		return
+	}
+	if curGoroutineID() != uint64(g) {
+		panic("running on the wrong goroutine")
+	}
+}
+
+func (g goroutineLock) checkNotOn() {
+	if !DebugGoroutines {
+		return
+	}
+	if curGoroutineID() == uint64(g) {
+		panic("running on the wrong goroutine")
+	}
+}
+
+var goroutineSpace = []byte("goroutine ")
+
+func curGoroutineID() uint64 {
+	bp := littleBuf.Get().(*[]byte)
+	defer littleBuf.Put(bp)
+	b := *bp
+	b = b[:runtime.Stack(b, false)]
+	// Parse the 4707 out of "goroutine 4707 ["
+	b = bytes.TrimPrefix(b, goroutineSpace)
+	i := bytes.IndexByte(b, ' ')
+	if i < 0 {
+		panic(fmt.Sprintf("No space found in %q", b))
+	}
+	b = b[:i]
+	n, err := parseUintBytes(b, 10, 64)
+	if err != nil {
+		panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
+	}
+	return n
+}
+
+var littleBuf = sync.Pool{
+	New: func() interface{} {
+		buf := make([]byte, 64)
+		return &buf
+	},
+}
+
+// parseUintBytes is like strconv.ParseUint, but using a []byte.
+func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) {
+	var cutoff, maxVal uint64
+
+	if bitSize == 0 {
+		bitSize = int(strconv.IntSize)
+	}
+
+	s0 := s
+	switch {
+	case len(s) < 1:
+		err = strconv.ErrSyntax
+		goto Error
+
+	case 2 <= base && base <= 36:
+		// valid base; nothing to do
+
+	case base == 0:
+		// Look for octal, hex prefix.
+		switch {
+		case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):
+			base = 16
+			s = s[2:]
+			if len(s) < 1 {
+				err = strconv.ErrSyntax
+				goto Error
+			}
+		case s[0] == '0':
+			base = 8
+		default:
+			base = 10
+		}
+
+	default:
+		err = errors.New("invalid base " + strconv.Itoa(base))
+		goto Error
+	}
+
+	n = 0
+	cutoff = cutoff64(base)
+	maxVal = 1<<uint(bitSize) - 1
+
+	for i := 0; i < len(s); i++ {
+		var v byte
+		d := s[i]
+		switch {
+		case '0' <= d && d <= '9':
+			v = d - '0'
+		case 'a' <= d && d <= 'z':
+			v = d - 'a' + 10
+		case 'A' <= d && d <= 'Z':
+			v = d - 'A' + 10
+		default:
+			n = 0
+			err = strconv.ErrSyntax
+			goto Error
+		}
+		if int(v) >= base {
+			n = 0
+			err = strconv.ErrSyntax
+			goto Error
+		}
+
+		if n >= cutoff {
+			// n*base overflows
+			n = 1<<64 - 1
+			err = strconv.ErrRange
+			goto Error
+		}
+		n *= uint64(base)
+
+		n1 := n + uint64(v)
+		if n1 < n || n1 > maxVal {
+			// n+v overflows
+			n = 1<<64 - 1
+			err = strconv.ErrRange
+			goto Error
+		}
+		n = n1
+	}
+
+	return n, nil
+
+Error:
+	return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err}
+}
+
+// Return the first number n such that n*base >= 1<<64.
+func cutoff64(base int) uint64 {
+	if base < 2 {
+		return 0
+	}
+	return (1<<64-1)/uint64(base) + 1
+}
diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go
new file mode 100644
index 0000000..c2805f6
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/headermap.go
@@ -0,0 +1,78 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+	"net/http"
+	"strings"
+)
+
+var (
+	commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case
+	commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case
+)
+
+func init() {
+	for _, v := range []string{
+		"accept",
+		"accept-charset",
+		"accept-encoding",
+		"accept-language",
+		"accept-ranges",
+		"age",
+		"access-control-allow-origin",
+		"allow",
+		"authorization",
+		"cache-control",
+		"content-disposition",
+		"content-encoding",
+		"content-language",
+		"content-length",
+		"content-location",
+		"content-range",
+		"content-type",
+		"cookie",
+		"date",
+		"etag",
+		"expect",
+		"expires",
+		"from",
+		"host",
+		"if-match",
+		"if-modified-since",
+		"if-none-match",
+		"if-unmodified-since",
+		"last-modified",
+		"link",
+		"location",
+		"max-forwards",
+		"proxy-authenticate",
+		"proxy-authorization",
+		"range",
+		"referer",
+		"refresh",
+		"retry-after",
+		"server",
+		"set-cookie",
+		"strict-transport-security",
+		"trailer",
+		"transfer-encoding",
+		"user-agent",
+		"vary",
+		"via",
+		"www-authenticate",
+	} {
+		chk := http.CanonicalHeaderKey(v)
+		commonLowerHeader[chk] = v
+		commonCanonHeader[v] = chk
+	}
+}
+
+func lowerHeader(v string) string {
+	if s, ok := commonLowerHeader[v]; ok {
+		return s
+	}
+	return strings.ToLower(v)
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go
new file mode 100644
index 0000000..f9bb033
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/encode.go
@@ -0,0 +1,251 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpack
+
+import (
+	"io"
+)
+
+const (
+	uint32Max              = ^uint32(0)
+	initialHeaderTableSize = 4096
+)
+
+type Encoder struct {
+	dynTab dynamicTable
+	// minSize is the minimum table size set by
+	// SetMaxDynamicTableSize after the previous Header Table Size
+	// Update.
+	minSize uint32
+	// maxSizeLimit is the maximum table size this encoder
+	// supports. This will protect the encoder from too large
+	// size.
+	maxSizeLimit uint32
+	// tableSizeUpdate indicates whether "Header Table Size
+	// Update" is required.
+	tableSizeUpdate bool
+	w               io.Writer
+	buf             []byte
+}
+
+// NewEncoder returns a new Encoder which performs HPACK encoding. An
+// encoded data is written to w.
+func NewEncoder(w io.Writer) *Encoder {
+	e := &Encoder{
+		minSize:         uint32Max,
+		maxSizeLimit:    initialHeaderTableSize,
+		tableSizeUpdate: false,
+		w:               w,
+	}
+	e.dynTab.setMaxSize(initialHeaderTableSize)
+	return e
+}
+
+// WriteField encodes f into a single Write to e's underlying Writer.
+// This function may also produce bytes for "Header Table Size Update"
+// if necessary.  If produced, it is done before encoding f.
+func (e *Encoder) WriteField(f HeaderField) error {
+	e.buf = e.buf[:0]
+
+	if e.tableSizeUpdate {
+		e.tableSizeUpdate = false
+		if e.minSize < e.dynTab.maxSize {
+			e.buf = appendTableSize(e.buf, e.minSize)
+		}
+		e.minSize = uint32Max
+		e.buf = appendTableSize(e.buf, e.dynTab.maxSize)
+	}
+
+	idx, nameValueMatch := e.searchTable(f)
+	if nameValueMatch {
+		e.buf = appendIndexed(e.buf, idx)
+	} else {
+		indexing := e.shouldIndex(f)
+		if indexing {
+			e.dynTab.add(f)
+		}
+
+		if idx == 0 {
+			e.buf = appendNewName(e.buf, f, indexing)
+		} else {
+			e.buf = appendIndexedName(e.buf, f, idx, indexing)
+		}
+	}
+	n, err := e.w.Write(e.buf)
+	if err == nil && n != len(e.buf) {
+		err = io.ErrShortWrite
+	}
+	return err
+}
+
+// searchTable searches f in both stable and dynamic header tables.
+// The static header table is searched first. Only when there is no
+// exact match for both name and value, the dynamic header table is
+// then searched. If there is no match, i is 0. If both name and value
+// match, i is the matched index and nameValueMatch becomes true. If
+// only name matches, i points to that index and nameValueMatch
+// becomes false.
+func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
+	for idx, hf := range staticTable {
+		if !constantTimeStringCompare(hf.Name, f.Name) {
+			continue
+		}
+		if i == 0 {
+			i = uint64(idx + 1)
+		}
+		if f.Sensitive {
+			continue
+		}
+		if !constantTimeStringCompare(hf.Value, f.Value) {
+			continue
+		}
+		i = uint64(idx + 1)
+		nameValueMatch = true
+		return
+	}
+
+	j, nameValueMatch := e.dynTab.search(f)
+	if nameValueMatch || (i == 0 && j != 0) {
+		i = j + uint64(len(staticTable))
+	}
+	return
+}
+
+// SetMaxDynamicTableSize changes the dynamic header table size to v.
+// The actual size is bounded by the value passed to
+// SetMaxDynamicTableSizeLimit.
+func (e *Encoder) SetMaxDynamicTableSize(v uint32) {
+	if v > e.maxSizeLimit {
+		v = e.maxSizeLimit
+	}
+	if v < e.minSize {
+		e.minSize = v
+	}
+	e.tableSizeUpdate = true
+	e.dynTab.setMaxSize(v)
+}
+
+// SetMaxDynamicTableSizeLimit changes the maximum value that can be
+// specified in SetMaxDynamicTableSize to v. By default, it is set to
+// 4096, which is the same size of the default dynamic header table
+// size described in HPACK specification. If the current maximum
+// dynamic header table size is strictly greater than v, "Header Table
+// Size Update" will be done in the next WriteField call and the
+// maximum dynamic header table size is truncated to v.
+func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) {
+	e.maxSizeLimit = v
+	if e.dynTab.maxSize > v {
+		e.tableSizeUpdate = true
+		e.dynTab.setMaxSize(v)
+	}
+}
+
+// shouldIndex reports whether f should be indexed.
+func (e *Encoder) shouldIndex(f HeaderField) bool {
+	return !f.Sensitive && f.Size() <= e.dynTab.maxSize
+}
+
+// appendIndexed appends index i, as encoded in "Indexed Header Field"
+// representation, to dst and returns the extended buffer.
+func appendIndexed(dst []byte, i uint64) []byte {
+	first := len(dst)
+	dst = appendVarInt(dst, 7, i)
+	dst[first] |= 0x80
+	return dst
+}
+
+// appendNewName appends f, as encoded in one of "Literal Header field
+// - New Name" representation variants, to dst and returns the
+// extended buffer.
+//
+// If f.Sensitive is true, "Never Indexed" representation is used. If
+// f.Sensitive is false and indexing is true, "Inremental Indexing"
+// representation is used.
+func appendNewName(dst []byte, f HeaderField, indexing bool) []byte {
+	dst = append(dst, encodeTypeByte(indexing, f.Sensitive))
+	dst = appendHpackString(dst, f.Name)
+	return appendHpackString(dst, f.Value)
+}
+
+// appendIndexedName appends f and index i referring indexed name
+// entry, as encoded in one of "Literal Header field - Indexed Name"
+// representation variants, to dst and returns the extended buffer.
+//
+// If f.Sensitive is true, "Never Indexed" representation is used. If
+// f.Sensitive is false and indexing is true, "Incremental Indexing"
+// representation is used.
+func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte {
+	first := len(dst)
+	var n byte
+	if indexing {
+		n = 6
+	} else {
+		n = 4
+	}
+	dst = appendVarInt(dst, n, i)
+	dst[first] |= encodeTypeByte(indexing, f.Sensitive)
+	return appendHpackString(dst, f.Value)
+}
+
+// appendTableSize appends v, as encoded in "Header Table Size Update"
+// representation, to dst and returns the extended buffer.
+func appendTableSize(dst []byte, v uint32) []byte {
+	first := len(dst)
+	dst = appendVarInt(dst, 5, uint64(v))
+	dst[first] |= 0x20
+	return dst
+}
+
+// appendVarInt appends i, as encoded in variable integer form using n
+// bit prefix, to dst and returns the extended buffer.
+//
+// See
+// http://http2.github.io/http2-spec/compression.html#integer.representation
+func appendVarInt(dst []byte, n byte, i uint64) []byte {
+	k := uint64((1 << n) - 1)
+	if i < k {
+		return append(dst, byte(i))
+	}
+	dst = append(dst, byte(k))
+	i -= k
+	for ; i >= 128; i >>= 7 {
+		dst = append(dst, byte(0x80|(i&0x7f)))
+	}
+	return append(dst, byte(i))
+}
+
+// appendHpackString appends s, as encoded in "String Literal"
+// representation, to dst and returns the the extended buffer.
+//
+// s will be encoded in Huffman codes only when it produces strictly
+// shorter byte string.
+func appendHpackString(dst []byte, s string) []byte {
+	huffmanLength := HuffmanEncodeLength(s)
+	if huffmanLength < uint64(len(s)) {
+		first := len(dst)
+		dst = appendVarInt(dst, 7, huffmanLength)
+		dst = AppendHuffmanString(dst, s)
+		dst[first] |= 0x80
+	} else {
+		dst = appendVarInt(dst, 7, uint64(len(s)))
+		dst = append(dst, s...)
+	}
+	return dst
+}
+
+// encodeTypeByte returns type byte. If sensitive is true, type byte
+// for "Never Indexed" representation is returned. If sensitive is
+// false and indexing is true, type byte for "Incremental Indexing"
+// representation is returned. Otherwise, type byte for "Without
+// Indexing" is returned.
+func encodeTypeByte(indexing, sensitive bool) byte {
+	if sensitive {
+		return 0x10
+	}
+	if indexing {
+		return 0x40
+	}
+	return 0
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go
new file mode 100644
index 0000000..135b9f6
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/hpack.go
@@ -0,0 +1,542 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package hpack implements HPACK, a compression format for
+// efficiently representing HTTP header fields in the context of HTTP/2.
+//
+// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09
+package hpack
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+)
+
+// A DecodingError is something the spec defines as a decoding error.
+type DecodingError struct {
+	Err error
+}
+
+func (de DecodingError) Error() string {
+	return fmt.Sprintf("decoding error: %v", de.Err)
+}
+
+// An InvalidIndexError is returned when an encoder references a table
+// entry before the static table or after the end of the dynamic table.
+type InvalidIndexError int
+
+func (e InvalidIndexError) Error() string {
+	return fmt.Sprintf("invalid indexed representation index %d", int(e))
+}
+
+// A HeaderField is a name-value pair. Both the name and value are
+// treated as opaque sequences of octets.
+type HeaderField struct {
+	Name, Value string
+
+	// Sensitive means that this header field should never be
+	// indexed.
+	Sensitive bool
+}
+
+// IsPseudo reports whether the header field is an http2 pseudo header.
+// That is, it reports whether it starts with a colon.
+// It is not otherwise guaranteed to be a valid pseudo header field,
+// though.
+func (hf HeaderField) IsPseudo() bool {
+	return len(hf.Name) != 0 && hf.Name[0] == ':'
+}
+
+func (hf HeaderField) String() string {
+	var suffix string
+	if hf.Sensitive {
+		suffix = " (sensitive)"
+	}
+	return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix)
+}
+
+// Size returns the size of an entry per RFC 7541 section 4.1.
+func (hf HeaderField) Size() uint32 {
+	// http://http2.github.io/http2-spec/compression.html#rfc.section.4.1
+	// "The size of the dynamic table is the sum of the size of
+	// its entries.  The size of an entry is the sum of its name's
+	// length in octets (as defined in Section 5.2), its value's
+	// length in octets (see Section 5.2), plus 32.  The size of
+	// an entry is calculated using the length of the name and
+	// value without any Huffman encoding applied."
+
+	// This can overflow if somebody makes a large HeaderField
+	// Name and/or Value by hand, but we don't care, because that
+	// won't happen on the wire because the encoding doesn't allow
+	// it.
+	return uint32(len(hf.Name) + len(hf.Value) + 32)
+}
+
+// A Decoder is the decoding context for incremental processing of
+// header blocks.
+type Decoder struct {
+	dynTab dynamicTable
+	emit   func(f HeaderField)
+
+	emitEnabled bool // whether calls to emit are enabled
+	maxStrLen   int  // 0 means unlimited
+
+	// buf is the unparsed buffer. It's only written to
+	// saveBuf if it was truncated in the middle of a header
+	// block. Because it's usually not owned, we can only
+	// process it under Write.
+	buf []byte // not owned; only valid during Write
+
+	// saveBuf is previous data passed to Write which we weren't able
+	// to fully parse before. Unlike buf, we own this data.
+	saveBuf bytes.Buffer
+}
+
+// NewDecoder returns a new decoder with the provided maximum dynamic
+// table size. The emitFunc will be called for each valid field
+// parsed, in the same goroutine as calls to Write, before Write returns.
+func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder {
+	d := &Decoder{
+		emit:        emitFunc,
+		emitEnabled: true,
+	}
+	d.dynTab.allowedMaxSize = maxDynamicTableSize
+	d.dynTab.setMaxSize(maxDynamicTableSize)
+	return d
+}
+
+// ErrStringLength is returned by Decoder.Write when the max string length
+// (as configured by Decoder.SetMaxStringLength) would be violated.
+var ErrStringLength = errors.New("hpack: string too long")
+
+// SetMaxStringLength sets the maximum size of a HeaderField name or
+// value string. If a string exceeds this length (even after any
+// decompression), Write will return ErrStringLength.
+// A value of 0 means unlimited and is the default from NewDecoder.
+func (d *Decoder) SetMaxStringLength(n int) {
+	d.maxStrLen = n
+}
+
+// SetEmitFunc changes the callback used when new header fields
+// are decoded.
+// It must be non-nil. It does not affect EmitEnabled.
+func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) {
+	d.emit = emitFunc
+}
+
+// SetEmitEnabled controls whether the emitFunc provided to NewDecoder
+// should be called. The default is true.
+//
+// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE
+// while still decoding and keeping in-sync with decoder state, but
+// without doing unnecessary decompression or generating unnecessary
+// garbage for header fields past the limit.
+func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v }
+
+// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder
+// are currently enabled. The default is true.
+func (d *Decoder) EmitEnabled() bool { return d.emitEnabled }
+
+// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their
+// underlying buffers for garbage reasons.
+
+func (d *Decoder) SetMaxDynamicTableSize(v uint32) {
+	d.dynTab.setMaxSize(v)
+}
+
+// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded
+// stream (via dynamic table size updates) may set the maximum size
+// to.
+func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {
+	d.dynTab.allowedMaxSize = v
+}
+
+type dynamicTable struct {
+	// ents is the FIFO described at
+	// http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2
+	// The newest (low index) is append at the end, and items are
+	// evicted from the front.
+	ents           []HeaderField
+	size           uint32
+	maxSize        uint32 // current maxSize
+	allowedMaxSize uint32 // maxSize may go up to this, inclusive
+}
+
+func (dt *dynamicTable) setMaxSize(v uint32) {
+	dt.maxSize = v
+	dt.evict()
+}
+
+// TODO: change dynamicTable to be a struct with a slice and a size int field,
+// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1:
+//
+//
+// Then make add increment the size. maybe the max size should move from Decoder to
+// dynamicTable and add should return an ok bool if there was enough space.
+//
+// Later we'll need a remove operation on dynamicTable.
+
+func (dt *dynamicTable) add(f HeaderField) {
+	dt.ents = append(dt.ents, f)
+	dt.size += f.Size()
+	dt.evict()
+}
+
+// If we're too big, evict old stuff (front of the slice)
+func (dt *dynamicTable) evict() {
+	base := dt.ents // keep base pointer of slice
+	for dt.size > dt.maxSize {
+		dt.size -= dt.ents[0].Size()
+		dt.ents = dt.ents[1:]
+	}
+
+	// Shift slice contents down if we evicted things.
+	if len(dt.ents) != len(base) {
+		copy(base, dt.ents)
+		dt.ents = base[:len(dt.ents)]
+	}
+}
+
+// constantTimeStringCompare compares string a and b in a constant
+// time manner.
+func constantTimeStringCompare(a, b string) bool {
+	if len(a) != len(b) {
+		return false
+	}
+
+	c := byte(0)
+
+	for i := 0; i < len(a); i++ {
+		c |= a[i] ^ b[i]
+	}
+
+	return c == 0
+}
+
+// Search searches f in the table. The return value i is 0 if there is
+// no name match. If there is name match or name/value match, i is the
+// index of that entry (1-based). If both name and value match,
+// nameValueMatch becomes true.
+func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
+	l := len(dt.ents)
+	for j := l - 1; j >= 0; j-- {
+		ent := dt.ents[j]
+		if !constantTimeStringCompare(ent.Name, f.Name) {
+			continue
+		}
+		if i == 0 {
+			i = uint64(l - j)
+		}
+		if f.Sensitive {
+			continue
+		}
+		if !constantTimeStringCompare(ent.Value, f.Value) {
+			continue
+		}
+		i = uint64(l - j)
+		nameValueMatch = true
+		return
+	}
+	return
+}
+
+func (d *Decoder) maxTableIndex() int {
+	return len(d.dynTab.ents) + len(staticTable)
+}
+
+func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
+	if i < 1 {
+		return
+	}
+	if i > uint64(d.maxTableIndex()) {
+		return
+	}
+	if i <= uint64(len(staticTable)) {
+		return staticTable[i-1], true
+	}
+	dents := d.dynTab.ents
+	return dents[len(dents)-(int(i)-len(staticTable))], true
+}
+
+// Decode decodes an entire block.
+//
+// TODO: remove this method and make it incremental later? This is
+// easier for debugging now.
+func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) {
+	var hf []HeaderField
+	saveFunc := d.emit
+	defer func() { d.emit = saveFunc }()
+	d.emit = func(f HeaderField) { hf = append(hf, f) }
+	if _, err := d.Write(p); err != nil {
+		return nil, err
+	}
+	if err := d.Close(); err != nil {
+		return nil, err
+	}
+	return hf, nil
+}
+
+func (d *Decoder) Close() error {
+	if d.saveBuf.Len() > 0 {
+		d.saveBuf.Reset()
+		return DecodingError{errors.New("truncated headers")}
+	}
+	return nil
+}
+
+func (d *Decoder) Write(p []byte) (n int, err error) {
+	if len(p) == 0 {
+		// Prevent state machine CPU attacks (making us redo
+		// work up to the point of finding out we don't have
+		// enough data)
+		return
+	}
+	// Only copy the data if we have to. Optimistically assume
+	// that p will contain a complete header block.
+	if d.saveBuf.Len() == 0 {
+		d.buf = p
+	} else {
+		d.saveBuf.Write(p)
+		d.buf = d.saveBuf.Bytes()
+		d.saveBuf.Reset()
+	}
+
+	for len(d.buf) > 0 {
+		err = d.parseHeaderFieldRepr()
+		if err == errNeedMore {
+			// Extra paranoia, making sure saveBuf won't
+			// get too large.  All the varint and string
+			// reading code earlier should already catch
+			// overlong things and return ErrStringLength,
+			// but keep this as a last resort.
+			const varIntOverhead = 8 // conservative
+			if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) {
+				return 0, ErrStringLength
+			}
+			d.saveBuf.Write(d.buf)
+			return len(p), nil
+		}
+		if err != nil {
+			break
+		}
+	}
+	return len(p), err
+}
+
+// errNeedMore is an internal sentinel error value that means the
+// buffer is truncated and we need to read more data before we can
+// continue parsing.
+var errNeedMore = errors.New("need more data")
+
+type indexType int
+
+const (
+	indexedTrue indexType = iota
+	indexedFalse
+	indexedNever
+)
+
+func (v indexType) indexed() bool   { return v == indexedTrue }
+func (v indexType) sensitive() bool { return v == indexedNever }
+
+// returns errNeedMore if there isn't enough data available.
+// any other error is fatal.
+// consumes d.buf iff it returns nil.
+// precondition: must be called with len(d.buf) > 0
+func (d *Decoder) parseHeaderFieldRepr() error {
+	b := d.buf[0]
+	switch {
+	case b&128 != 0:
+		// Indexed representation.
+		// High bit set?
+		// http://http2.github.io/http2-spec/compression.html#rfc.section.6.1
+		return d.parseFieldIndexed()
+	case b&192 == 64:
+		// 6.2.1 Literal Header Field with Incremental Indexing
+		// 0b10xxxxxx: top two bits are 10
+		// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1
+		return d.parseFieldLiteral(6, indexedTrue)
+	case b&240 == 0:
+		// 6.2.2 Literal Header Field without Indexing
+		// 0b0000xxxx: top four bits are 0000
+		// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2
+		return d.parseFieldLiteral(4, indexedFalse)
+	case b&240 == 16:
+		// 6.2.3 Literal Header Field never Indexed
+		// 0b0001xxxx: top four bits are 0001
+		// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3
+		return d.parseFieldLiteral(4, indexedNever)
+	case b&224 == 32:
+		// 6.3 Dynamic Table Size Update
+		// Top three bits are '001'.
+		// http://http2.github.io/http2-spec/compression.html#rfc.section.6.3
+		return d.parseDynamicTableSizeUpdate()
+	}
+
+	return DecodingError{errors.New("invalid encoding")}
+}
+
+// (same invariants and behavior as parseHeaderFieldRepr)
+func (d *Decoder) parseFieldIndexed() error {
+	buf := d.buf
+	idx, buf, err := readVarInt(7, buf)
+	if err != nil {
+		return err
+	}
+	hf, ok := d.at(idx)
+	if !ok {
+		return DecodingError{InvalidIndexError(idx)}
+	}
+	d.buf = buf
+	return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value})
+}
+
+// (same invariants and behavior as parseHeaderFieldRepr)
+func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
+	buf := d.buf
+	nameIdx, buf, err := readVarInt(n, buf)
+	if err != nil {
+		return err
+	}
+
+	var hf HeaderField
+	wantStr := d.emitEnabled || it.indexed()
+	if nameIdx > 0 {
+		ihf, ok := d.at(nameIdx)
+		if !ok {
+			return DecodingError{InvalidIndexError(nameIdx)}
+		}
+		hf.Name = ihf.Name
+	} else {
+		hf.Name, buf, err = d.readString(buf, wantStr)
+		if err != nil {
+			return err
+		}
+	}
+	hf.Value, buf, err = d.readString(buf, wantStr)
+	if err != nil {
+		return err
+	}
+	d.buf = buf
+	if it.indexed() {
+		d.dynTab.add(hf)
+	}
+	hf.Sensitive = it.sensitive()
+	return d.callEmit(hf)
+}
+
+func (d *Decoder) callEmit(hf HeaderField) error {
+	if d.maxStrLen != 0 {
+		if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen {
+			return ErrStringLength
+		}
+	}
+	if d.emitEnabled {
+		d.emit(hf)
+	}
+	return nil
+}
+
+// (same invariants and behavior as parseHeaderFieldRepr)
+func (d *Decoder) parseDynamicTableSizeUpdate() error {
+	buf := d.buf
+	size, buf, err := readVarInt(5, buf)
+	if err != nil {
+		return err
+	}
+	if size > uint64(d.dynTab.allowedMaxSize) {
+		return DecodingError{errors.New("dynamic table size update too large")}
+	}
+	d.dynTab.setMaxSize(uint32(size))
+	d.buf = buf
+	return nil
+}
+
+var errVarintOverflow = DecodingError{errors.New("varint integer overflow")}
+
+// readVarInt reads an unsigned variable length integer off the
+// beginning of p. n is the parameter as described in
+// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1.
+//
+// n must always be between 1 and 8.
+//
+// The returned remain buffer is either a smaller suffix of p, or err != nil.
+// The error is errNeedMore if p doesn't contain a complete integer.
+func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
+	if n < 1 || n > 8 {
+		panic("bad n")
+	}
+	if len(p) == 0 {
+		return 0, p, errNeedMore
+	}
+	i = uint64(p[0])
+	if n < 8 {
+		i &= (1 << uint64(n)) - 1
+	}
+	if i < (1<<uint64(n))-1 {
+		return i, p[1:], nil
+	}
+
+	origP := p
+	p = p[1:]
+	var m uint64
+	for len(p) > 0 {
+		b := p[0]
+		p = p[1:]
+		i += uint64(b&127) << m
+		if b&128 == 0 {
+			return i, p, nil
+		}
+		m += 7
+		if m >= 63 { // TODO: proper overflow check. making this up.
+			return 0, origP, errVarintOverflow
+		}
+	}
+	return 0, origP, errNeedMore
+}
+
+// readString decodes an hpack string from p.
+//
+// wantStr is whether s will be used. If false, decompression and
+// []byte->string garbage are skipped if s will be ignored
+// anyway. This does mean that huffman decoding errors for non-indexed
+// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server
+// is returning an error anyway, and because they're not indexed, the error
+// won't affect the decoding state.
+func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) {
+	if len(p) == 0 {
+		return "", p, errNeedMore
+	}
+	isHuff := p[0]&128 != 0
+	strLen, p, err := readVarInt(7, p)
+	if err != nil {
+		return "", p, err
+	}
+	if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) {
+		return "", nil, ErrStringLength
+	}
+	if uint64(len(p)) < strLen {
+		return "", p, errNeedMore
+	}
+	if !isHuff {
+		if wantStr {
+			s = string(p[:strLen])
+		}
+		return s, p[strLen:], nil
+	}
+
+	if wantStr {
+		buf := bufPool.Get().(*bytes.Buffer)
+		buf.Reset() // don't trust others
+		defer bufPool.Put(buf)
+		if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil {
+			buf.Reset()
+			return "", nil, err
+		}
+		s = buf.String()
+		buf.Reset() // be nice to GC
+	}
+	return s, p[strLen:], nil
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go
new file mode 100644
index 0000000..8850e39
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/huffman.go
@@ -0,0 +1,212 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpack
+
+import (
+	"bytes"
+	"errors"
+	"io"
+	"sync"
+)
+
+var bufPool = sync.Pool{
+	New: func() interface{} { return new(bytes.Buffer) },
+}
+
+// HuffmanDecode decodes the string in v and writes the expanded
+// result to w, returning the number of bytes written to w and the
+// Write call's return value. At most one Write call is made.
+func HuffmanDecode(w io.Writer, v []byte) (int, error) {
+	buf := bufPool.Get().(*bytes.Buffer)
+	buf.Reset()
+	defer bufPool.Put(buf)
+	if err := huffmanDecode(buf, 0, v); err != nil {
+		return 0, err
+	}
+	return w.Write(buf.Bytes())
+}
+
+// HuffmanDecodeToString decodes the string in v.
+func HuffmanDecodeToString(v []byte) (string, error) {
+	buf := bufPool.Get().(*bytes.Buffer)
+	buf.Reset()
+	defer bufPool.Put(buf)
+	if err := huffmanDecode(buf, 0, v); err != nil {
+		return "", err
+	}
+	return buf.String(), nil
+}
+
+// ErrInvalidHuffman is returned for errors found decoding
+// Huffman-encoded strings.
+var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data")
+
+// huffmanDecode decodes v to buf.
+// If maxLen is greater than 0, attempts to write more to buf than
+// maxLen bytes will return ErrStringLength.
+func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
+	n := rootHuffmanNode
+	// cur is the bit buffer that has not been fed into n.
+	// cbits is the number of low order bits in cur that are valid.
+	// sbits is the number of bits of the symbol prefix being decoded.
+	cur, cbits, sbits := uint(0), uint8(0), uint8(0)
+	for _, b := range v {
+		cur = cur<<8 | uint(b)
+		cbits += 8
+		sbits += 8
+		for cbits >= 8 {
+			idx := byte(cur >> (cbits - 8))
+			n = n.children[idx]
+			if n == nil {
+				return ErrInvalidHuffman
+			}
+			if n.children == nil {
+				if maxLen != 0 && buf.Len() == maxLen {
+					return ErrStringLength
+				}
+				buf.WriteByte(n.sym)
+				cbits -= n.codeLen
+				n = rootHuffmanNode
+				sbits = cbits
+			} else {
+				cbits -= 8
+			}
+		}
+	}
+	for cbits > 0 {
+		n = n.children[byte(cur<<(8-cbits))]
+		if n == nil {
+			return ErrInvalidHuffman
+		}
+		if n.children != nil || n.codeLen > cbits {
+			break
+		}
+		if maxLen != 0 && buf.Len() == maxLen {
+			return ErrStringLength
+		}
+		buf.WriteByte(n.sym)
+		cbits -= n.codeLen
+		n = rootHuffmanNode
+		sbits = cbits
+	}
+	if sbits > 7 {
+		// Either there was an incomplete symbol, or overlong padding.
+		// Both are decoding errors per RFC 7541 section 5.2.
+		return ErrInvalidHuffman
+	}
+	if mask := uint(1<<cbits - 1); cur&mask != mask {
+		// Trailing bits must be a prefix of EOS per RFC 7541 section 5.2.
+		return ErrInvalidHuffman
+	}
+
+	return nil
+}
+
+type node struct {
+	// children is non-nil for internal nodes
+	children []*node
+
+	// The following are only valid if children is nil:
+	codeLen uint8 // number of bits that led to the output of sym
+	sym     byte  // output symbol
+}
+
+func newInternalNode() *node {
+	return &node{children: make([]*node, 256)}
+}
+
+var rootHuffmanNode = newInternalNode()
+
+func init() {
+	if len(huffmanCodes) != 256 {
+		panic("unexpected size")
+	}
+	for i, code := range huffmanCodes {
+		addDecoderNode(byte(i), code, huffmanCodeLen[i])
+	}
+}
+
+func addDecoderNode(sym byte, code uint32, codeLen uint8) {
+	cur := rootHuffmanNode
+	for codeLen > 8 {
+		codeLen -= 8
+		i := uint8(code >> codeLen)
+		if cur.children[i] == nil {
+			cur.children[i] = newInternalNode()
+		}
+		cur = cur.children[i]
+	}
+	shift := 8 - codeLen
+	start, end := int(uint8(code<<shift)), int(1<<shift)
+	for i := start; i < start+end; i++ {
+		cur.children[i] = &node{sym: sym, codeLen: codeLen}
+	}
+}
+
+// AppendHuffmanString appends s, as encoded in Huffman codes, to dst
+// and returns the extended buffer.
+func AppendHuffmanString(dst []byte, s string) []byte {
+	rembits := uint8(8)
+
+	for i := 0; i < len(s); i++ {
+		if rembits == 8 {
+			dst = append(dst, 0)
+		}
+		dst, rembits = appendByteToHuffmanCode(dst, rembits, s[i])
+	}
+
+	if rembits < 8 {
+		// special EOS symbol
+		code := uint32(0x3fffffff)
+		nbits := uint8(30)
+
+		t := uint8(code >> (nbits - rembits))
+		dst[len(dst)-1] |= t
+	}
+
+	return dst
+}
+
+// HuffmanEncodeLength returns the number of bytes required to encode
+// s in Huffman codes. The result is round up to byte boundary.
+func HuffmanEncodeLength(s string) uint64 {
+	n := uint64(0)
+	for i := 0; i < len(s); i++ {
+		n += uint64(huffmanCodeLen[s[i]])
+	}
+	return (n + 7) / 8
+}
+
+// appendByteToHuffmanCode appends Huffman code for c to dst and
+// returns the extended buffer and the remaining bits in the last
+// element. The appending is not byte aligned and the remaining bits
+// in the last element of dst is given in rembits.
+func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) {
+	code := huffmanCodes[c]
+	nbits := huffmanCodeLen[c]
+
+	for {
+		if rembits > nbits {
+			t := uint8(code << (rembits - nbits))
+			dst[len(dst)-1] |= t
+			rembits -= nbits
+			break
+		}
+
+		t := uint8(code >> (nbits - rembits))
+		dst[len(dst)-1] |= t
+
+		nbits -= rembits
+		rembits = 8
+
+		if nbits == 0 {
+			break
+		}
+
+		dst = append(dst, 0)
+	}
+
+	return dst, rembits
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go
new file mode 100644
index 0000000..b9283a0
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/tables.go
@@ -0,0 +1,352 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpack
+
+func pair(name, value string) HeaderField {
+	return HeaderField{Name: name, Value: value}
+}
+
+// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
+var staticTable = [...]HeaderField{
+	pair(":authority", ""), // index 1 (1-based)
+	pair(":method", "GET"),
+	pair(":method", "POST"),
+	pair(":path", "/"),
+	pair(":path", "/index.html"),
+	pair(":scheme", "http"),
+	pair(":scheme", "https"),
+	pair(":status", "200"),
+	pair(":status", "204"),
+	pair(":status", "206"),
+	pair(":status", "304"),
+	pair(":status", "400"),
+	pair(":status", "404"),
+	pair(":status", "500"),
+	pair("accept-charset", ""),
+	pair("accept-encoding", "gzip, deflate"),
+	pair("accept-language", ""),
+	pair("accept-ranges", ""),
+	pair("accept", ""),
+	pair("access-control-allow-origin", ""),
+	pair("age", ""),
+	pair("allow", ""),
+	pair("authorization", ""),
+	pair("cache-control", ""),
+	pair("content-disposition", ""),
+	pair("content-encoding", ""),
+	pair("content-language", ""),
+	pair("content-length", ""),
+	pair("content-location", ""),
+	pair("content-range", ""),
+	pair("content-type", ""),
+	pair("cookie", ""),
+	pair("date", ""),
+	pair("etag", ""),
+	pair("expect", ""),
+	pair("expires", ""),
+	pair("from", ""),
+	pair("host", ""),
+	pair("if-match", ""),
+	pair("if-modified-since", ""),
+	pair("if-none-match", ""),
+	pair("if-range", ""),
+	pair("if-unmodified-since", ""),
+	pair("last-modified", ""),
+	pair("link", ""),
+	pair("location", ""),
+	pair("max-forwards", ""),
+	pair("proxy-authenticate", ""),
+	pair("proxy-authorization", ""),
+	pair("range", ""),
+	pair("referer", ""),
+	pair("refresh", ""),
+	pair("retry-after", ""),
+	pair("server", ""),
+	pair("set-cookie", ""),
+	pair("strict-transport-security", ""),
+	pair("transfer-encoding", ""),
+	pair("user-agent", ""),
+	pair("vary", ""),
+	pair("via", ""),
+	pair("www-authenticate", ""),
+}
+
+var huffmanCodes = [256]uint32{
+	0x1ff8,
+	0x7fffd8,
+	0xfffffe2,
+	0xfffffe3,
+	0xfffffe4,
+	0xfffffe5,
+	0xfffffe6,
+	0xfffffe7,
+	0xfffffe8,
+	0xffffea,
+	0x3ffffffc,
+	0xfffffe9,
+	0xfffffea,
+	0x3ffffffd,
+	0xfffffeb,
+	0xfffffec,
+	0xfffffed,
+	0xfffffee,
+	0xfffffef,
+	0xffffff0,
+	0xffffff1,
+	0xffffff2,
+	0x3ffffffe,
+	0xffffff3,
+	0xffffff4,
+	0xffffff5,
+	0xffffff6,
+	0xffffff7,
+	0xffffff8,
+	0xffffff9,
+	0xffffffa,
+	0xffffffb,
+	0x14,
+	0x3f8,
+	0x3f9,
+	0xffa,
+	0x1ff9,
+	0x15,
+	0xf8,
+	0x7fa,
+	0x3fa,
+	0x3fb,
+	0xf9,
+	0x7fb,
+	0xfa,
+	0x16,
+	0x17,
+	0x18,
+	0x0,
+	0x1,
+	0x2,
+	0x19,
+	0x1a,
+	0x1b,
+	0x1c,
+	0x1d,
+	0x1e,
+	0x1f,
+	0x5c,
+	0xfb,
+	0x7ffc,
+	0x20,
+	0xffb,
+	0x3fc,
+	0x1ffa,
+	0x21,
+	0x5d,
+	0x5e,
+	0x5f,
+	0x60,
+	0x61,
+	0x62,
+	0x63,
+	0x64,
+	0x65,
+	0x66,
+	0x67,
+	0x68,
+	0x69,
+	0x6a,
+	0x6b,
+	0x6c,
+	0x6d,
+	0x6e,
+	0x6f,
+	0x70,
+	0x71,
+	0x72,
+	0xfc,
+	0x73,
+	0xfd,
+	0x1ffb,
+	0x7fff0,
+	0x1ffc,
+	0x3ffc,
+	0x22,
+	0x7ffd,
+	0x3,
+	0x23,
+	0x4,
+	0x24,
+	0x5,
+	0x25,
+	0x26,
+	0x27,
+	0x6,
+	0x74,
+	0x75,
+	0x28,
+	0x29,
+	0x2a,
+	0x7,
+	0x2b,
+	0x76,
+	0x2c,
+	0x8,
+	0x9,
+	0x2d,
+	0x77,
+	0x78,
+	0x79,
+	0x7a,
+	0x7b,
+	0x7ffe,
+	0x7fc,
+	0x3ffd,
+	0x1ffd,
+	0xffffffc,
+	0xfffe6,
+	0x3fffd2,
+	0xfffe7,
+	0xfffe8,
+	0x3fffd3,
+	0x3fffd4,
+	0x3fffd5,
+	0x7fffd9,
+	0x3fffd6,
+	0x7fffda,
+	0x7fffdb,
+	0x7fffdc,
+	0x7fffdd,
+	0x7fffde,
+	0xffffeb,
+	0x7fffdf,
+	0xffffec,
+	0xffffed,
+	0x3fffd7,
+	0x7fffe0,
+	0xffffee,
+	0x7fffe1,
+	0x7fffe2,
+	0x7fffe3,
+	0x7fffe4,
+	0x1fffdc,
+	0x3fffd8,
+	0x7fffe5,
+	0x3fffd9,
+	0x7fffe6,
+	0x7fffe7,
+	0xffffef,
+	0x3fffda,
+	0x1fffdd,
+	0xfffe9,
+	0x3fffdb,
+	0x3fffdc,
+	0x7fffe8,
+	0x7fffe9,
+	0x1fffde,
+	0x7fffea,
+	0x3fffdd,
+	0x3fffde,
+	0xfffff0,
+	0x1fffdf,
+	0x3fffdf,
+	0x7fffeb,
+	0x7fffec,
+	0x1fffe0,
+	0x1fffe1,
+	0x3fffe0,
+	0x1fffe2,
+	0x7fffed,
+	0x3fffe1,
+	0x7fffee,
+	0x7fffef,
+	0xfffea,
+	0x3fffe2,
+	0x3fffe3,
+	0x3fffe4,
+	0x7ffff0,
+	0x3fffe5,
+	0x3fffe6,
+	0x7ffff1,
+	0x3ffffe0,
+	0x3ffffe1,
+	0xfffeb,
+	0x7fff1,
+	0x3fffe7,
+	0x7ffff2,
+	0x3fffe8,
+	0x1ffffec,
+	0x3ffffe2,
+	0x3ffffe3,
+	0x3ffffe4,
+	0x7ffffde,
+	0x7ffffdf,
+	0x3ffffe5,
+	0xfffff1,
+	0x1ffffed,
+	0x7fff2,
+	0x1fffe3,
+	0x3ffffe6,
+	0x7ffffe0,
+	0x7ffffe1,
+	0x3ffffe7,
+	0x7ffffe2,
+	0xfffff2,
+	0x1fffe4,
+	0x1fffe5,
+	0x3ffffe8,
+	0x3ffffe9,
+	0xffffffd,
+	0x7ffffe3,
+	0x7ffffe4,
+	0x7ffffe5,
+	0xfffec,
+	0xfffff3,
+	0xfffed,
+	0x1fffe6,
+	0x3fffe9,
+	0x1fffe7,
+	0x1fffe8,
+	0x7ffff3,
+	0x3fffea,
+	0x3fffeb,
+	0x1ffffee,
+	0x1ffffef,
+	0xfffff4,
+	0xfffff5,
+	0x3ffffea,
+	0x7ffff4,
+	0x3ffffeb,
+	0x7ffffe6,
+	0x3ffffec,
+	0x3ffffed,
+	0x7ffffe7,
+	0x7ffffe8,
+	0x7ffffe9,
+	0x7ffffea,
+	0x7ffffeb,
+	0xffffffe,
+	0x7ffffec,
+	0x7ffffed,
+	0x7ffffee,
+	0x7ffffef,
+	0x7fffff0,
+	0x3ffffee,
+}
+
+var huffmanCodeLen = [256]uint8{
+	13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28,
+	28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+	6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6,
+	5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10,
+	13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+	7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6,
+	15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5,
+	6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28,
+	20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23,
+	24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24,
+	22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23,
+	21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23,
+	26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25,
+	19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27,
+	20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23,
+	26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26,
+}
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
new file mode 100644
index 0000000..40b46ae
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -0,0 +1,365 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package http2 implements the HTTP/2 protocol.
+//
+// This package is low-level and intended to be used directly by very
+// few people. Most users will use it indirectly through the automatic
+// use by the net/http package (from Go 1.6 and later).
+// For use in earlier Go versions see ConfigureServer. (Transport support
+// requires Go 1.6 or later)
+//
+// See https://http2.github.io/ for more information on HTTP/2.
+//
+// See https://http2.golang.org/ for a test server running this code.
+//
+package http2
+
+import (
+	"bufio"
+	"crypto/tls"
+	"errors"
+	"fmt"
+	"io"
+	"net/http"
+	"os"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+
+	"golang.org/x/net/lex/httplex"
+)
+
+var (
+	VerboseLogs    bool
+	logFrameWrites bool
+	logFrameReads  bool
+)
+
+func init() {
+	e := os.Getenv("GODEBUG")
+	if strings.Contains(e, "http2debug=1") {
+		VerboseLogs = true
+	}
+	if strings.Contains(e, "http2debug=2") {
+		VerboseLogs = true
+		logFrameWrites = true
+		logFrameReads = true
+	}
+}
+
+const (
+	// ClientPreface is the string that must be sent by new
+	// connections from clients.
+	ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
+
+	// SETTINGS_MAX_FRAME_SIZE default
+	// http://http2.github.io/http2-spec/#rfc.section.6.5.2
+	initialMaxFrameSize = 16384
+
+	// NextProtoTLS is the NPN/ALPN protocol negotiated during
+	// HTTP/2's TLS setup.
+	NextProtoTLS = "h2"
+
+	// http://http2.github.io/http2-spec/#SettingValues
+	initialHeaderTableSize = 4096
+
+	initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size
+
+	defaultMaxReadFrameSize = 1 << 20
+)
+
+var (
+	clientPreface = []byte(ClientPreface)
+)
+
+type streamState int
+
+const (
+	stateIdle streamState = iota
+	stateOpen
+	stateHalfClosedLocal
+	stateHalfClosedRemote
+	stateResvLocal
+	stateResvRemote
+	stateClosed
+)
+
+var stateName = [...]string{
+	stateIdle:             "Idle",
+	stateOpen:             "Open",
+	stateHalfClosedLocal:  "HalfClosedLocal",
+	stateHalfClosedRemote: "HalfClosedRemote",
+	stateResvLocal:        "ResvLocal",
+	stateResvRemote:       "ResvRemote",
+	stateClosed:           "Closed",
+}
+
+func (st streamState) String() string {
+	return stateName[st]
+}
+
+// Setting is a setting parameter: which setting it is, and its value.
+type Setting struct {
+	// ID is which setting is being set.
+	// See http://http2.github.io/http2-spec/#SettingValues
+	ID SettingID
+
+	// Val is the value.
+	Val uint32
+}
+
+func (s Setting) String() string {
+	return fmt.Sprintf("[%v = %d]", s.ID, s.Val)
+}
+
+// Valid reports whether the setting is valid.
+func (s Setting) Valid() error {
+	// Limits and error codes from 6.5.2 Defined SETTINGS Parameters
+	switch s.ID {
+	case SettingEnablePush:
+		if s.Val != 1 && s.Val != 0 {
+			return ConnectionError(ErrCodeProtocol)
+		}
+	case SettingInitialWindowSize:
+		if s.Val > 1<<31-1 {
+			return ConnectionError(ErrCodeFlowControl)
+		}
+	case SettingMaxFrameSize:
+		if s.Val < 16384 || s.Val > 1<<24-1 {
+			return ConnectionError(ErrCodeProtocol)
+		}
+	}
+	return nil
+}
+
+// A SettingID is an HTTP/2 setting as defined in
+// http://http2.github.io/http2-spec/#iana-settings
+type SettingID uint16
+
+const (
+	SettingHeaderTableSize      SettingID = 0x1
+	SettingEnablePush           SettingID = 0x2
+	SettingMaxConcurrentStreams SettingID = 0x3
+	SettingInitialWindowSize    SettingID = 0x4
+	SettingMaxFrameSize         SettingID = 0x5
+	SettingMaxHeaderListSize    SettingID = 0x6
+)
+
+var settingName = map[SettingID]string{
+	SettingHeaderTableSize:      "HEADER_TABLE_SIZE",
+	SettingEnablePush:           "ENABLE_PUSH",
+	SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
+	SettingInitialWindowSize:    "INITIAL_WINDOW_SIZE",
+	SettingMaxFrameSize:         "MAX_FRAME_SIZE",
+	SettingMaxHeaderListSize:    "MAX_HEADER_LIST_SIZE",
+}
+
+func (s SettingID) String() string {
+	if v, ok := settingName[s]; ok {
+		return v
+	}
+	return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
+}
+
+var (
+	errInvalidHeaderFieldName  = errors.New("http2: invalid header field name")
+	errInvalidHeaderFieldValue = errors.New("http2: invalid header field value")
+)
+
+// validWireHeaderFieldName reports whether v is a valid header field
+// name (key). See httplex.ValidHeaderName for the base rules.
+//
+// Further, http2 says:
+//   "Just as in HTTP/1.x, header field names are strings of ASCII
+//   characters that are compared in a case-insensitive
+//   fashion. However, header field names MUST be converted to
+//   lowercase prior to their encoding in HTTP/2. "
+func validWireHeaderFieldName(v string) bool {
+	if len(v) == 0 {
+		return false
+	}
+	for _, r := range v {
+		if !httplex.IsTokenRune(r) {
+			return false
+		}
+		if 'A' <= r && r <= 'Z' {
+			return false
+		}
+	}
+	return true
+}
+
+var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n)
+
+func init() {
+	for i := 100; i <= 999; i++ {
+		if v := http.StatusText(i); v != "" {
+			httpCodeStringCommon[i] = strconv.Itoa(i)
+		}
+	}
+}
+
+func httpCodeString(code int) string {
+	if s, ok := httpCodeStringCommon[code]; ok {
+		return s
+	}
+	return strconv.Itoa(code)
+}
+
+// from pkg io
+type stringWriter interface {
+	WriteString(s string) (n int, err error)
+}
+
+// A gate lets two goroutines coordinate their activities.
+type gate chan struct{}
+
+func (g gate) Done() { g <- struct{}{} }
+func (g gate) Wait() { <-g }
+
+// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
+type closeWaiter chan struct{}
+
+// Init makes a closeWaiter usable.
+// It exists because so a closeWaiter value can be placed inside a
+// larger struct and have the Mutex and Cond's memory in the same
+// allocation.
+func (cw *closeWaiter) Init() {
+	*cw = make(chan struct{})
+}
+
+// Close marks the closeWaiter as closed and unblocks any waiters.
+func (cw closeWaiter) Close() {
+	close(cw)
+}
+
+// Wait waits for the closeWaiter to become closed.
+func (cw closeWaiter) Wait() {
+	<-cw
+}
+
+// bufferedWriter is a buffered writer that writes to w.
+// Its buffered writer is lazily allocated as needed, to minimize
+// idle memory usage with many connections.
+type bufferedWriter struct {
+	w  io.Writer     // immutable
+	bw *bufio.Writer // non-nil when data is buffered
+}
+
+func newBufferedWriter(w io.Writer) *bufferedWriter {
+	return &bufferedWriter{w: w}
+}
+
+var bufWriterPool = sync.Pool{
+	New: func() interface{} {
+		// TODO: pick something better? this is a bit under
+		// (3 x typical 1500 byte MTU) at least.
+		return bufio.NewWriterSize(nil, 4<<10)
+	},
+}
+
+func (w *bufferedWriter) Write(p []byte) (n int, err error) {
+	if w.bw == nil {
+		bw := bufWriterPool.Get().(*bufio.Writer)
+		bw.Reset(w.w)
+		w.bw = bw
+	}
+	return w.bw.Write(p)
+}
+
+func (w *bufferedWriter) Flush() error {
+	bw := w.bw
+	if bw == nil {
+		return nil
+	}
+	err := bw.Flush()
+	bw.Reset(nil)
+	bufWriterPool.Put(bw)
+	w.bw = nil
+	return err
+}
+
+func mustUint31(v int32) uint32 {
+	if v < 0 || v > 2147483647 {
+		panic("out of range")
+	}
+	return uint32(v)
+}
+
+// bodyAllowedForStatus reports whether a given response status code
+// permits a body. See RFC 2616, section 4.4.
+func bodyAllowedForStatus(status int) bool {
+	switch {
+	case status >= 100 && status <= 199:
+		return false
+	case status == 204:
+		return false
+	case status == 304:
+		return false
+	}
+	return true
+}
+
+type httpError struct {
+	msg     string
+	timeout bool
+}
+
+func (e *httpError) Error() string   { return e.msg }
+func (e *httpError) Timeout() bool   { return e.timeout }
+func (e *httpError) Temporary() bool { return true }
+
+var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true}
+
+type connectionStater interface {
+	ConnectionState() tls.ConnectionState
+}
+
+var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }}
+
+type sorter struct {
+	v []string // owned by sorter
+}
+
+func (s *sorter) Len() int           { return len(s.v) }
+func (s *sorter) Swap(i, j int)      { s.v[i], s.v[j] = s.v[j], s.v[i] }
+func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] }
+
+// Keys returns the sorted keys of h.
+//
+// The returned slice is only valid until s used again or returned to
+// its pool.
+func (s *sorter) Keys(h http.Header) []string {
+	keys := s.v[:0]
+	for k := range h {
+		keys = append(keys, k)
+	}
+	s.v = keys
+	sort.Sort(s)
+	return keys
+}
+
+func (s *sorter) SortStrings(ss []string) {
+	// Our sorter works on s.v, which sorter owns, so
+	// stash it away while we sort the user's buffer.
+	save := s.v
+	s.v = ss
+	sort.Sort(s)
+	s.v = save
+}
+
+// validPseudoPath reports whether v is a valid :path pseudo-header
+// value. It must be either:
+//
+//     *) a non-empty string starting with '/', but not with with "//",
+//     *) the string '*', for OPTIONS requests.
+//
+// For now this is only used a quick check for deciding when to clean
+// up Opaque URLs before sending requests from the Transport.
+// See golang.org/issue/16847
+func validPseudoPath(v string) bool {
+	return (len(v) > 0 && v[0] == '/' && (len(v) == 1 || v[1] != '/')) || v == "*"
+}
diff --git a/vendor/golang.org/x/net/http2/not_go16.go b/vendor/golang.org/x/net/http2/not_go16.go
new file mode 100644
index 0000000..efd2e12
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/not_go16.go
@@ -0,0 +1,46 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.6
+
+package http2
+
+import (
+	"crypto/tls"
+	"net/http"
+	"time"
+)
+
+func configureTransport(t1 *http.Transport) (*Transport, error) {
+	return nil, errTransportVersion
+}
+
+func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
+	return 0
+
+}
+
+// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
+func isBadCipher(cipher uint16) bool {
+	switch cipher {
+	case tls.TLS_RSA_WITH_RC4_128_SHA,
+		tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
+		tls.TLS_RSA_WITH_AES_128_CBC_SHA,
+		tls.TLS_RSA_WITH_AES_256_CBC_SHA,
+		tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
+		tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+		tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+		tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
+		tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
+		tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+		tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
+		// Reject cipher suites from Appendix A.
+		// "This list includes those cipher suites that do not
+		// offer an ephemeral key exchange and those that are
+		// based on the TLS null, stream or block cipher type"
+		return true
+	default:
+		return false
+	}
+}
diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go
new file mode 100644
index 0000000..140434a
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/not_go17.go
@@ -0,0 +1,87 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.7
+
+package http2
+
+import (
+	"crypto/tls"
+	"net"
+	"net/http"
+	"time"
+)
+
+type contextContext interface {
+	Done() <-chan struct{}
+	Err() error
+}
+
+type fakeContext struct{}
+
+func (fakeContext) Done() <-chan struct{} { return nil }
+func (fakeContext) Err() error            { panic("should not be called") }
+
+func reqContext(r *http.Request) fakeContext {
+	return fakeContext{}
+}
+
+func setResponseUncompressed(res *http.Response) {
+	// Nothing.
+}
+
+type clientTrace struct{}
+
+func requestTrace(*http.Request) *clientTrace { return nil }
+func traceGotConn(*http.Request, *ClientConn) {}
+func traceFirstResponseByte(*clientTrace)     {}
+func traceWroteHeaders(*clientTrace)          {}
+func traceWroteRequest(*clientTrace, error)   {}
+func traceGot100Continue(trace *clientTrace)  {}
+func traceWait100Continue(trace *clientTrace) {}
+
+func nop() {}
+
+func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) {
+	return nil, nop
+}
+
+func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) {
+	return ctx, nop
+}
+
+func requestWithContext(req *http.Request, ctx contextContext) *http.Request {
+	return req
+}
+
+// temporary copy of Go 1.6's private tls.Config.clone:
+func cloneTLSConfig(c *tls.Config) *tls.Config {
+	return &tls.Config{
+		Rand:                     c.Rand,
+		Time:                     c.Time,
+		Certificates:             c.Certificates,
+		NameToCertificate:        c.NameToCertificate,
+		GetCertificate:           c.GetCertificate,
+		RootCAs:                  c.RootCAs,
+		NextProtos:               c.NextProtos,
+		ServerName:               c.ServerName,
+		ClientAuth:               c.ClientAuth,
+		ClientCAs:                c.ClientCAs,
+		InsecureSkipVerify:       c.InsecureSkipVerify,
+		CipherSuites:             c.CipherSuites,
+		PreferServerCipherSuites: c.PreferServerCipherSuites,
+		SessionTicketsDisabled:   c.SessionTicketsDisabled,
+		SessionTicketKey:         c.SessionTicketKey,
+		ClientSessionCache:       c.ClientSessionCache,
+		MinVersion:               c.MinVersion,
+		MaxVersion:               c.MaxVersion,
+		CurvePreferences:         c.CurvePreferences,
+	}
+}
+
+func (cc *ClientConn) Ping(ctx contextContext) error {
+	return cc.ping(ctx)
+}
+
+func (t *Transport) idleConnTimeout() time.Duration { return 0 }
diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go
new file mode 100644
index 0000000..53b7a1d
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/pipe.go
@@ -0,0 +1,153 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+	"errors"
+	"io"
+	"sync"
+)
+
+// pipe is a goroutine-safe io.Reader/io.Writer pair.  It's like
+// io.Pipe except there are no PipeReader/PipeWriter halves, and the
+// underlying buffer is an interface. (io.Pipe is always unbuffered)
+type pipe struct {
+	mu       sync.Mutex
+	c        sync.Cond // c.L lazily initialized to &p.mu
+	b        pipeBuffer
+	err      error         // read error once empty. non-nil means closed.
+	breakErr error         // immediate read error (caller doesn't see rest of b)
+	donec    chan struct{} // closed on error
+	readFn   func()        // optional code to run in Read before error
+}
+
+type pipeBuffer interface {
+	Len() int
+	io.Writer
+	io.Reader
+}
+
+func (p *pipe) Len() int {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	return p.b.Len()
+}
+
+// Read waits until data is available and copies bytes
+// from the buffer into p.
+func (p *pipe) Read(d []byte) (n int, err error) {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if p.c.L == nil {
+		p.c.L = &p.mu
+	}
+	for {
+		if p.breakErr != nil {
+			return 0, p.breakErr
+		}
+		if p.b.Len() > 0 {
+			return p.b.Read(d)
+		}
+		if p.err != nil {
+			if p.readFn != nil {
+				p.readFn()     // e.g. copy trailers
+				p.readFn = nil // not sticky like p.err
+			}
+			return 0, p.err
+		}
+		p.c.Wait()
+	}
+}
+
+var errClosedPipeWrite = errors.New("write on closed buffer")
+
+// Write copies bytes from p into the buffer and wakes a reader.
+// It is an error to write more data than the buffer can hold.
+func (p *pipe) Write(d []byte) (n int, err error) {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if p.c.L == nil {
+		p.c.L = &p.mu
+	}
+	defer p.c.Signal()
+	if p.err != nil {
+		return 0, errClosedPipeWrite
+	}
+	return p.b.Write(d)
+}
+
+// CloseWithError causes the next Read (waking up a current blocked
+// Read if needed) to return the provided err after all data has been
+// read.
+//
+// The error must be non-nil.
+func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) }
+
+// BreakWithError causes the next Read (waking up a current blocked
+// Read if needed) to return the provided err immediately, without
+// waiting for unread data.
+func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) }
+
+// closeWithErrorAndCode is like CloseWithError but also sets some code to run
+// in the caller's goroutine before returning the error.
+func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) }
+
+func (p *pipe) closeWithError(dst *error, err error, fn func()) {
+	if err == nil {
+		panic("err must be non-nil")
+	}
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if p.c.L == nil {
+		p.c.L = &p.mu
+	}
+	defer p.c.Signal()
+	if *dst != nil {
+		// Already been done.
+		return
+	}
+	p.readFn = fn
+	*dst = err
+	p.closeDoneLocked()
+}
+
+// requires p.mu be held.
+func (p *pipe) closeDoneLocked() {
+	if p.donec == nil {
+		return
+	}
+	// Close if unclosed. This isn't racy since we always
+	// hold p.mu while closing.
+	select {
+	case <-p.donec:
+	default:
+		close(p.donec)
+	}
+}
+
+// Err returns the error (if any) first set by BreakWithError or CloseWithError.
+func (p *pipe) Err() error {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if p.breakErr != nil {
+		return p.breakErr
+	}
+	return p.err
+}
+
+// Done returns a channel which is closed if and when this pipe is closed
+// with CloseWithError.
+func (p *pipe) Done() <-chan struct{} {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if p.donec == nil {
+		p.donec = make(chan struct{})
+		if p.err != nil || p.breakErr != nil {
+			// Already hit an error.
+			p.closeDoneLocked()
+		}
+	}
+	return p.donec
+}
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
new file mode 100644
index 0000000..c986bc1
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -0,0 +1,2305 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO: replace all <-sc.doneServing with reads from the stream's cw
+// instead, and make sure that on close we close all open
+// streams. then remove doneServing?
+
+// TODO: re-audit GOAWAY support. Consider each incoming frame type and
+// whether it should be ignored during graceful shutdown.
+
+// TODO: disconnect idle clients. GFE seems to do 4 minutes. make
+// configurable?  or maximum number of idle clients and remove the
+// oldest?
+
+// TODO: turn off the serve goroutine when idle, so
+// an idle conn only has the readFrames goroutine active. (which could
+// also be optimized probably to pin less memory in crypto/tls). This
+// would involve tracking when the serve goroutine is active (atomic
+// int32 read/CAS probably?) and starting it up when frames arrive,
+// and shutting it down when all handlers exit. the occasional PING
+// packets could use time.AfterFunc to call sc.wakeStartServeLoop()
+// (which is a no-op if already running) and then queue the PING write
+// as normal. The serve loop would then exit in most cases (if no
+// Handlers running) and not be woken up again until the PING packet
+// returns.
+
+// TODO (maybe): add a mechanism for Handlers to going into
+// half-closed-local mode (rw.(io.Closer) test?) but not exit their
+// handler, and continue to be able to read from the
+// Request.Body. This would be a somewhat semantic change from HTTP/1
+// (or at least what we expose in net/http), so I'd probably want to
+// add it there too. For now, this package says that returning from
+// the Handler ServeHTTP function means you're both done reading and
+// done writing, without a way to stop just one or the other.
+
+package http2
+
+import (
+	"bufio"
+	"bytes"
+	"crypto/tls"
+	"errors"
+	"fmt"
+	"io"
+	"log"
+	"net"
+	"net/http"
+	"net/textproto"
+	"net/url"
+	"os"
+	"reflect"
+	"runtime"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"golang.org/x/net/http2/hpack"
+)
+
+const (
+	prefaceTimeout        = 10 * time.Second
+	firstSettingsTimeout  = 2 * time.Second // should be in-flight with preface anyway
+	handlerChunkWriteSize = 4 << 10
+	defaultMaxStreams     = 250 // TODO: make this 100 as the GFE seems to?
+)
+
+var (
+	errClientDisconnected = errors.New("client disconnected")
+	errClosedBody         = errors.New("body closed by handler")
+	errHandlerComplete    = errors.New("http2: request body closed due to handler exiting")
+	errStreamClosed       = errors.New("http2: stream closed")
+)
+
+var responseWriterStatePool = sync.Pool{
+	New: func() interface{} {
+		rws := &responseWriterState{}
+		rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize)
+		return rws
+	},
+}
+
+// Test hooks.
+var (
+	testHookOnConn        func()
+	testHookGetServerConn func(*serverConn)
+	testHookOnPanicMu     *sync.Mutex // nil except in tests
+	testHookOnPanic       func(sc *serverConn, panicVal interface{}) (rePanic bool)
+)
+
+// Server is an HTTP/2 server.
+type Server struct {
+	// MaxHandlers limits the number of http.Handler ServeHTTP goroutines
+	// which may run at a time over all connections.
+	// Negative or zero no limit.
+	// TODO: implement
+	MaxHandlers int
+
+	// MaxConcurrentStreams optionally specifies the number of
+	// concurrent streams that each client may have open at a
+	// time. This is unrelated to the number of http.Handler goroutines
+	// which may be active globally, which is MaxHandlers.
+	// If zero, MaxConcurrentStreams defaults to at least 100, per
+	// the HTTP/2 spec's recommendations.
+	MaxConcurrentStreams uint32
+
+	// MaxReadFrameSize optionally specifies the largest frame
+	// this server is willing to read. A valid value is between
+	// 16k and 16M, inclusive. If zero or otherwise invalid, a
+	// default value is used.
+	MaxReadFrameSize uint32
+
+	// PermitProhibitedCipherSuites, if true, permits the use of
+	// cipher suites prohibited by the HTTP/2 spec.
+	PermitProhibitedCipherSuites bool
+}
+
+func (s *Server) maxReadFrameSize() uint32 {
+	if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize {
+		return v
+	}
+	return defaultMaxReadFrameSize
+}
+
+func (s *Server) maxConcurrentStreams() uint32 {
+	if v := s.MaxConcurrentStreams; v > 0 {
+		return v
+	}
+	return defaultMaxStreams
+}
+
+// ConfigureServer adds HTTP/2 support to a net/http Server.
+//
+// The configuration conf may be nil.
+//
+// ConfigureServer must be called before s begins serving.
+func ConfigureServer(s *http.Server, conf *Server) error {
+	if conf == nil {
+		conf = new(Server)
+	}
+
+	if s.TLSConfig == nil {
+		s.TLSConfig = new(tls.Config)
+	} else if s.TLSConfig.CipherSuites != nil {
+		// If they already provided a CipherSuite list, return
+		// an error if it has a bad order or is missing
+		// ECDHE_RSA_WITH_AES_128_GCM_SHA256.
+		const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
+		haveRequired := false
+		sawBad := false
+		for i, cs := range s.TLSConfig.CipherSuites {
+			if cs == requiredCipher {
+				haveRequired = true
+			}
+			if isBadCipher(cs) {
+				sawBad = true
+			} else if sawBad {
+				return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs)
+			}
+		}
+		if !haveRequired {
+			return fmt.Errorf("http2: TLSConfig.CipherSuites is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256")
+		}
+	}
+
+	// Note: not setting MinVersion to tls.VersionTLS12,
+	// as we don't want to interfere with HTTP/1.1 traffic
+	// on the user's server. We enforce TLS 1.2 later once
+	// we accept a connection. Ideally this should be done
+	// during next-proto selection, but using TLS <1.2 with
+	// HTTP/2 is still the client's bug.
+
+	s.TLSConfig.PreferServerCipherSuites = true
+
+	haveNPN := false
+	for _, p := range s.TLSConfig.NextProtos {
+		if p == NextProtoTLS {
+			haveNPN = true
+			break
+		}
+	}
+	if !haveNPN {
+		s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS)
+	}
+	// h2-14 is temporary (as of 2015-03-05) while we wait for all browsers
+	// to switch to "h2".
+	s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "h2-14")
+
+	if s.TLSNextProto == nil {
+		s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}
+	}
+	protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) {
+		if testHookOnConn != nil {
+			testHookOnConn()
+		}
+		conf.ServeConn(c, &ServeConnOpts{
+			Handler:    h,
+			BaseConfig: hs,
+		})
+	}
+	s.TLSNextProto[NextProtoTLS] = protoHandler
+	s.TLSNextProto["h2-14"] = protoHandler // temporary; see above.
+	return nil
+}
+
+// ServeConnOpts are options for the Server.ServeConn method.
+type ServeConnOpts struct {
+	// BaseConfig optionally sets the base configuration
+	// for values. If nil, defaults are used.
+	BaseConfig *http.Server
+
+	// Handler specifies which handler to use for processing
+	// requests. If nil, BaseConfig.Handler is used. If BaseConfig
+	// or BaseConfig.Handler is nil, http.DefaultServeMux is used.
+	Handler http.Handler
+}
+
+func (o *ServeConnOpts) baseConfig() *http.Server {
+	if o != nil && o.BaseConfig != nil {
+		return o.BaseConfig
+	}
+	return new(http.Server)
+}
+
+func (o *ServeConnOpts) handler() http.Handler {
+	if o != nil {
+		if o.Handler != nil {
+			return o.Handler
+		}
+		if o.BaseConfig != nil && o.BaseConfig.Handler != nil {
+			return o.BaseConfig.Handler
+		}
+	}
+	return http.DefaultServeMux
+}
+
+// ServeConn serves HTTP/2 requests on the provided connection and
+// blocks until the connection is no longer readable.
+//
+// ServeConn starts speaking HTTP/2 assuming that c has not had any
+// reads or writes. It writes its initial settings frame and expects
+// to be able to read the preface and settings frame from the
+// client. If c has a ConnectionState method like a *tls.Conn, the
+// ConnectionState is used to verify the TLS ciphersuite and to set
+// the Request.TLS field in Handlers.
+//
+// ServeConn does not support h2c by itself. Any h2c support must be
+// implemented in terms of providing a suitably-behaving net.Conn.
+//
+// The opts parameter is optional. If nil, default values are used.
+func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
+	baseCtx, cancel := serverConnBaseContext(c, opts)
+	defer cancel()
+
+	sc := &serverConn{
+		srv:              s,
+		hs:               opts.baseConfig(),
+		conn:             c,
+		baseCtx:          baseCtx,
+		remoteAddrStr:    c.RemoteAddr().String(),
+		bw:               newBufferedWriter(c),
+		handler:          opts.handler(),
+		streams:          make(map[uint32]*stream),
+		readFrameCh:      make(chan readFrameResult),
+		wantWriteFrameCh: make(chan frameWriteMsg, 8),
+		wroteFrameCh:     make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
+		bodyReadCh:       make(chan bodyReadMsg),         // buffering doesn't matter either way
+		doneServing:      make(chan struct{}),
+		advMaxStreams:    s.maxConcurrentStreams(),
+		writeSched: writeScheduler{
+			maxFrameSize: initialMaxFrameSize,
+		},
+		initialWindowSize: initialWindowSize,
+		headerTableSize:   initialHeaderTableSize,
+		serveG:            newGoroutineLock(),
+		pushEnabled:       true,
+	}
+
+	sc.flow.add(initialWindowSize)
+	sc.inflow.add(initialWindowSize)
+	sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
+
+	fr := NewFramer(sc.bw, c)
+	fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
+	fr.MaxHeaderListSize = sc.maxHeaderListSize()
+	fr.SetMaxReadFrameSize(s.maxReadFrameSize())
+	sc.framer = fr
+
+	if tc, ok := c.(connectionStater); ok {
+		sc.tlsState = new(tls.ConnectionState)
+		*sc.tlsState = tc.ConnectionState()
+		// 9.2 Use of TLS Features
+		// An implementation of HTTP/2 over TLS MUST use TLS
+		// 1.2 or higher with the restrictions on feature set
+		// and cipher suite described in this section. Due to
+		// implementation limitations, it might not be
+		// possible to fail TLS negotiation. An endpoint MUST
+		// immediately terminate an HTTP/2 connection that
+		// does not meet the TLS requirements described in
+		// this section with a connection error (Section
+		// 5.4.1) of type INADEQUATE_SECURITY.
+		if sc.tlsState.Version < tls.VersionTLS12 {
+			sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low")
+			return
+		}
+
+		if sc.tlsState.ServerName == "" {
+			// Client must use SNI, but we don't enforce that anymore,
+			// since it was causing problems when connecting to bare IP
+			// addresses during development.
+			//
+			// TODO: optionally enforce? Or enforce at the time we receive
+			// a new request, and verify the the ServerName matches the :authority?
+			// But that precludes proxy situations, perhaps.
+			//
+			// So for now, do nothing here again.
+		}
+
+		if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
+			// "Endpoints MAY choose to generate a connection error
+			// (Section 5.4.1) of type INADEQUATE_SECURITY if one of
+			// the prohibited cipher suites are negotiated."
+			//
+			// We choose that. In my opinion, the spec is weak
+			// here. It also says both parties must support at least
+			// TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no
+			// excuses here. If we really must, we could allow an
+			// "AllowInsecureWeakCiphers" option on the server later.
+			// Let's see how it plays out first.
+			sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite))
+			return
+		}
+	}
+
+	if hook := testHookGetServerConn; hook != nil {
+		hook(sc)
+	}
+	sc.serve()
+}
+
+func (sc *serverConn) rejectConn(err ErrCode, debug string) {
+	sc.vlogf("http2: server rejecting conn: %v, %s", err, debug)
+	// ignoring errors. hanging up anyway.
+	sc.framer.WriteGoAway(0, err, []byte(debug))
+	sc.bw.Flush()
+	sc.conn.Close()
+}
+
+type serverConn struct {
+	// Immutable:
+	srv              *Server
+	hs               *http.Server
+	conn             net.Conn
+	bw               *bufferedWriter // writing to conn
+	handler          http.Handler
+	baseCtx          contextContext
+	framer           *Framer
+	doneServing      chan struct{}         // closed when serverConn.serve ends
+	readFrameCh      chan readFrameResult  // written by serverConn.readFrames
+	wantWriteFrameCh chan frameWriteMsg    // from handlers -> serve
+	wroteFrameCh     chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
+	bodyReadCh       chan bodyReadMsg      // from handlers -> serve
+	testHookCh       chan func(int)        // code to run on the serve loop
+	flow             flow                  // conn-wide (not stream-specific) outbound flow control
+	inflow           flow                  // conn-wide inbound flow control
+	tlsState         *tls.ConnectionState  // shared by all handlers, like net/http
+	remoteAddrStr    string
+
+	// Everything following is owned by the serve loop; use serveG.check():
+	serveG                goroutineLock // used to verify funcs are on serve()
+	pushEnabled           bool
+	sawFirstSettings      bool // got the initial SETTINGS frame after the preface
+	needToSendSettingsAck bool
+	unackedSettings       int    // how many SETTINGS have we sent without ACKs?
+	clientMaxStreams      uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
+	advMaxStreams         uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
+	curOpenStreams        uint32 // client's number of open streams
+	maxStreamID           uint32 // max ever seen
+	streams               map[uint32]*stream
+	initialWindowSize     int32
+	headerTableSize       uint32
+	peerMaxHeaderListSize uint32            // zero means unknown (default)
+	canonHeader           map[string]string // http2-lower-case -> Go-Canonical-Case
+	writingFrame          bool              // started write goroutine but haven't heard back on wroteFrameCh
+	needsFrameFlush       bool              // last frame write wasn't a flush
+	writeSched            writeScheduler
+	inGoAway              bool // we've started to or sent GOAWAY
+	needToSendGoAway      bool // we need to schedule a GOAWAY frame write
+	goAwayCode            ErrCode
+	shutdownTimerCh       <-chan time.Time // nil until used
+	shutdownTimer         *time.Timer      // nil until used
+	freeRequestBodyBuf    []byte           // if non-nil, a free initialWindowSize buffer for getRequestBodyBuf
+
+	// Owned by the writeFrameAsync goroutine:
+	headerWriteBuf bytes.Buffer
+	hpackEncoder   *hpack.Encoder
+}
+
+func (sc *serverConn) maxHeaderListSize() uint32 {
+	n := sc.hs.MaxHeaderBytes
+	if n <= 0 {
+		n = http.DefaultMaxHeaderBytes
+	}
+	// http2's count is in a slightly different unit and includes 32 bytes per pair.
+	// So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
+	const perFieldOverhead = 32 // per http2 spec
+	const typicalHeaders = 10   // conservative
+	return uint32(n + typicalHeaders*perFieldOverhead)
+}
+
+// stream represents a stream. This is the minimal metadata needed by
+// the serve goroutine. Most of the actual stream state is owned by
+// the http.Handler's goroutine in the responseWriter. Because the
+// responseWriter's responseWriterState is recycled at the end of a
+// handler, this struct intentionally has no pointer to the
+// *responseWriter{,State} itself, as the Handler ending nils out the
+// responseWriter's state field.
+type stream struct {
+	// immutable:
+	sc        *serverConn
+	id        uint32
+	body      *pipe       // non-nil if expecting DATA frames
+	cw        closeWaiter // closed wait stream transitions to closed state
+	ctx       contextContext
+	cancelCtx func()
+
+	// owned by serverConn's serve loop:
+	bodyBytes        int64   // body bytes seen so far
+	declBodyBytes    int64   // or -1 if undeclared
+	flow             flow    // limits writing from Handler to client
+	inflow           flow    // what the client is allowed to POST/etc to us
+	parent           *stream // or nil
+	numTrailerValues int64
+	weight           uint8
+	state            streamState
+	sentReset        bool // only true once detached from streams map
+	gotReset         bool // only true once detacted from streams map
+	gotTrailerHeader bool // HEADER frame for trailers was seen
+	wroteHeaders     bool // whether we wrote headers (not status 100)
+	reqBuf           []byte
+
+	trailer    http.Header // accumulated trailers
+	reqTrailer http.Header // handler's Request.Trailer
+}
+
+func (sc *serverConn) Framer() *Framer  { return sc.framer }
+func (sc *serverConn) CloseConn() error { return sc.conn.Close() }
+func (sc *serverConn) Flush() error     { return sc.bw.Flush() }
+func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) {
+	return sc.hpackEncoder, &sc.headerWriteBuf
+}
+
+func (sc *serverConn) state(streamID uint32) (streamState, *stream) {
+	sc.serveG.check()
+	// http://http2.github.io/http2-spec/#rfc.section.5.1
+	if st, ok := sc.streams[streamID]; ok {
+		return st.state, st
+	}
+	// "The first use of a new stream identifier implicitly closes all
+	// streams in the "idle" state that might have been initiated by
+	// that peer with a lower-valued stream identifier. For example, if
+	// a client sends a HEADERS frame on stream 7 without ever sending a
+	// frame on stream 5, then stream 5 transitions to the "closed"
+	// state when the first frame for stream 7 is sent or received."
+	if streamID <= sc.maxStreamID {
+		return stateClosed, nil
+	}
+	return stateIdle, nil
+}
+
+// setConnState calls the net/http ConnState hook for this connection, if configured.
+// Note that the net/http package does StateNew and StateClosed for us.
+// There is currently no plan for StateHijacked or hijacking HTTP/2 connections.
+func (sc *serverConn) setConnState(state http.ConnState) {
+	if sc.hs.ConnState != nil {
+		sc.hs.ConnState(sc.conn, state)
+	}
+}
+
+func (sc *serverConn) vlogf(format string, args ...interface{}) {
+	if VerboseLogs {
+		sc.logf(format, args...)
+	}
+}
+
+func (sc *serverConn) logf(format string, args ...interface{}) {
+	if lg := sc.hs.ErrorLog; lg != nil {
+		lg.Printf(format, args...)
+	} else {
+		log.Printf(format, args...)
+	}
+}
+
+// errno returns v's underlying uintptr, else 0.
+//
+// TODO: remove this helper function once http2 can use build
+// tags. See comment in isClosedConnError.
+func errno(v error) uintptr {
+	if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr {
+		return uintptr(rv.Uint())
+	}
+	return 0
+}
+
+// isClosedConnError reports whether err is an error from use of a closed
+// network connection.
+func isClosedConnError(err error) bool {
+	if err == nil {
+		return false
+	}
+
+	// TODO: remove this string search and be more like the Windows
+	// case below. That might involve modifying the standard library
+	// to return better error types.
+	str := err.Error()
+	if strings.Contains(str, "use of closed network connection") {
+		return true
+	}
+
+	// TODO(bradfitz): x/tools/cmd/bundle doesn't really support
+	// build tags, so I can't make an http2_windows.go file with
+	// Windows-specific stuff. Fix that and move this, once we
+	// have a way to bundle this into std's net/http somehow.
+	if runtime.GOOS == "windows" {
+		if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
+			if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" {
+				const WSAECONNABORTED = 10053
+				const WSAECONNRESET = 10054
+				if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED {
+					return true
+				}
+			}
+		}
+	}
+	return false
+}
+
+func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
+	if err == nil {
+		return
+	}
+	if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) {
+		// Boring, expected errors.
+		sc.vlogf(format, args...)
+	} else {
+		sc.logf(format, args...)
+	}
+}
+
+func (sc *serverConn) canonicalHeader(v string) string {
+	sc.serveG.check()
+	cv, ok := commonCanonHeader[v]
+	if ok {
+		return cv
+	}
+	cv, ok = sc.canonHeader[v]
+	if ok {
+		return cv
+	}
+	if sc.canonHeader == nil {
+		sc.canonHeader = make(map[string]string)
+	}
+	cv = http.CanonicalHeaderKey(v)
+	sc.canonHeader[v] = cv
+	return cv
+}
+
+type readFrameResult struct {
+	f   Frame // valid until readMore is called
+	err error
+
+	// readMore should be called once the consumer no longer needs or
+	// retains f. After readMore, f is invalid and more frames can be
+	// read.
+	readMore func()
+}
+
+// readFrames is the loop that reads incoming frames.
+// It takes care to only read one frame at a time, blocking until the
+// consumer is done with the frame.
+// It's run on its own goroutine.
+func (sc *serverConn) readFrames() {
+	gate := make(gate)
+	gateDone := gate.Done
+	for {
+		f, err := sc.framer.ReadFrame()
+		select {
+		case sc.readFrameCh <- readFrameResult{f, err, gateDone}:
+		case <-sc.doneServing:
+			return
+		}
+		select {
+		case <-gate:
+		case <-sc.doneServing:
+			return
+		}
+		if terminalReadFrameError(err) {
+			return
+		}
+	}
+}
+
+// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.
+type frameWriteResult struct {
+	wm  frameWriteMsg // what was written (or attempted)
+	err error         // result of the writeFrame call
+}
+
+// writeFrameAsync runs in its own goroutine and writes a single frame
+// and then reports when it's done.
+// At most one goroutine can be running writeFrameAsync at a time per
+// serverConn.
+func (sc *serverConn) writeFrameAsync(wm frameWriteMsg) {
+	err := wm.write.writeFrame(sc)
+	sc.wroteFrameCh <- frameWriteResult{wm, err}
+}
+
+func (sc *serverConn) closeAllStreamsOnConnClose() {
+	sc.serveG.check()
+	for _, st := range sc.streams {
+		sc.closeStream(st, errClientDisconnected)
+	}
+}
+
+func (sc *serverConn) stopShutdownTimer() {
+	sc.serveG.check()
+	if t := sc.shutdownTimer; t != nil {
+		t.Stop()
+	}
+}
+
+func (sc *serverConn) notePanic() {
+	// Note: this is for serverConn.serve panicking, not http.Handler code.
+	if testHookOnPanicMu != nil {
+		testHookOnPanicMu.Lock()
+		defer testHookOnPanicMu.Unlock()
+	}
+	if testHookOnPanic != nil {
+		if e := recover(); e != nil {
+			if testHookOnPanic(sc, e) {
+				panic(e)
+			}
+		}
+	}
+}
+
+func (sc *serverConn) serve() {
+	sc.serveG.check()
+	defer sc.notePanic()
+	defer sc.conn.Close()
+	defer sc.closeAllStreamsOnConnClose()
+	defer sc.stopShutdownTimer()
+	defer close(sc.doneServing) // unblocks handlers trying to send
+
+	if VerboseLogs {
+		sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
+	}
+
+	sc.writeFrame(frameWriteMsg{
+		write: writeSettings{
+			{SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
+			{SettingMaxConcurrentStreams, sc.advMaxStreams},
+			{SettingMaxHeaderListSize, sc.maxHeaderListSize()},
+
+			// TODO: more actual settings, notably
+			// SettingInitialWindowSize, but then we also
+			// want to bump up the conn window size the
+			// same amount here right after the settings
+		},
+	})
+	sc.unackedSettings++
+
+	if err := sc.readPreface(); err != nil {
+		sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
+		return
+	}
+	// Now that we've got the preface, get us out of the
+	// "StateNew" state.  We can't go directly to idle, though.
+	// Active means we read some data and anticipate a request. We'll
+	// do another Active when we get a HEADERS frame.
+	sc.setConnState(http.StateActive)
+	sc.setConnState(http.StateIdle)
+
+	go sc.readFrames() // closed by defer sc.conn.Close above
+
+	settingsTimer := time.NewTimer(firstSettingsTimeout)
+	loopNum := 0
+	for {
+		loopNum++
+		select {
+		case wm := <-sc.wantWriteFrameCh:
+			sc.writeFrame(wm)
+		case res := <-sc.wroteFrameCh:
+			sc.wroteFrame(res)
+		case res := <-sc.readFrameCh:
+			if !sc.processFrameFromReader(res) {
+				return
+			}
+			res.readMore()
+			if settingsTimer.C != nil {
+				settingsTimer.Stop()
+				settingsTimer.C = nil
+			}
+		case m := <-sc.bodyReadCh:
+			sc.noteBodyRead(m.st, m.n)
+		case <-settingsTimer.C:
+			sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
+			return
+		case <-sc.shutdownTimerCh:
+			sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
+			return
+		case fn := <-sc.testHookCh:
+			fn(loopNum)
+		}
+	}
+}
+
+// readPreface reads the ClientPreface greeting from the peer
+// or returns an error on timeout or an invalid greeting.
+func (sc *serverConn) readPreface() error {
+	errc := make(chan error, 1)
+	go func() {
+		// Read the client preface
+		buf := make([]byte, len(ClientPreface))
+		if _, err := io.ReadFull(sc.conn, buf); err != nil {
+			errc <- err
+		} else if !bytes.Equal(buf, clientPreface) {
+			errc <- fmt.Errorf("bogus greeting %q", buf)
+		} else {
+			errc <- nil
+		}
+	}()
+	timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
+	defer timer.Stop()
+	select {
+	case <-timer.C:
+		return errors.New("timeout waiting for client preface")
+	case err := <-errc:
+		if err == nil {
+			if VerboseLogs {
+				sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr())
+			}
+		}
+		return err
+	}
+}
+
+var errChanPool = sync.Pool{
+	New: func() interface{} { return make(chan error, 1) },
+}
+
+var writeDataPool = sync.Pool{
+	New: func() interface{} { return new(writeData) },
+}
+
+// writeDataFromHandler writes DATA response frames from a handler on
+// the given stream.
+func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
+	ch := errChanPool.Get().(chan error)
+	writeArg := writeDataPool.Get().(*writeData)
+	*writeArg = writeData{stream.id, data, endStream}
+	err := sc.writeFrameFromHandler(frameWriteMsg{
+		write:  writeArg,
+		stream: stream,
+		done:   ch,
+	})
+	if err != nil {
+		return err
+	}
+	var frameWriteDone bool // the frame write is done (successfully or not)
+	select {
+	case err = <-ch:
+		frameWriteDone = true
+	case <-sc.doneServing:
+		return errClientDisconnected
+	case <-stream.cw:
+		// If both ch and stream.cw were ready (as might
+		// happen on the final Write after an http.Handler
+		// ends), prefer the write result. Otherwise this
+		// might just be us successfully closing the stream.
+		// The writeFrameAsync and serve goroutines guarantee
+		// that the ch send will happen before the stream.cw
+		// close.
+		select {
+		case err = <-ch:
+			frameWriteDone = true
+		default:
+			return errStreamClosed
+		}
+	}
+	errChanPool.Put(ch)
+	if frameWriteDone {
+		writeDataPool.Put(writeArg)
+	}
+	return err
+}
+
+// writeFrameFromHandler sends wm to sc.wantWriteFrameCh, but aborts
+// if the connection has gone away.
+//
+// This must not be run from the serve goroutine itself, else it might
+// deadlock writing to sc.wantWriteFrameCh (which is only mildly
+// buffered and is read by serve itself). If you're on the serve
+// goroutine, call writeFrame instead.
+func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) error {
+	sc.serveG.checkNotOn() // NOT
+	select {
+	case sc.wantWriteFrameCh <- wm:
+		return nil
+	case <-sc.doneServing:
+		// Serve loop is gone.
+		// Client has closed their connection to the server.
+		return errClientDisconnected
+	}
+}
+
+// writeFrame schedules a frame to write and sends it if there's nothing
+// already being written.
+//
+// There is no pushback here (the serve goroutine never blocks). It's
+// the http.Handlers that block, waiting for their previous frames to
+// make it onto the wire
+//
+// If you're not on the serve goroutine, use writeFrameFromHandler instead.
+func (sc *serverConn) writeFrame(wm frameWriteMsg) {
+	sc.serveG.check()
+
+	var ignoreWrite bool
+
+	// Don't send a 100-continue response if we've already sent headers.
+	// See golang.org/issue/14030.
+	switch wm.write.(type) {
+	case *writeResHeaders:
+		wm.stream.wroteHeaders = true
+	case write100ContinueHeadersFrame:
+		if wm.stream.wroteHeaders {
+			ignoreWrite = true
+		}
+	}
+
+	if !ignoreWrite {
+		sc.writeSched.add(wm)
+	}
+	sc.scheduleFrameWrite()
+}
+
+// startFrameWrite starts a goroutine to write wm (in a separate
+// goroutine since that might block on the network), and updates the
+// serve goroutine's state about the world, updated from info in wm.
+func (sc *serverConn) startFrameWrite(wm frameWriteMsg) {
+	sc.serveG.check()
+	if sc.writingFrame {
+		panic("internal error: can only be writing one frame at a time")
+	}
+
+	st := wm.stream
+	if st != nil {
+		switch st.state {
+		case stateHalfClosedLocal:
+			panic("internal error: attempt to send frame on half-closed-local stream")
+		case stateClosed:
+			if st.sentReset || st.gotReset {
+				// Skip this frame.
+				sc.scheduleFrameWrite()
+				return
+			}
+			panic(fmt.Sprintf("internal error: attempt to send a write %v on a closed stream", wm))
+		}
+	}
+
+	sc.writingFrame = true
+	sc.needsFrameFlush = true
+	go sc.writeFrameAsync(wm)
+}
+
+// errHandlerPanicked is the error given to any callers blocked in a read from
+// Request.Body when the main goroutine panics. Since most handlers read in the
+// the main ServeHTTP goroutine, this will show up rarely.
+var errHandlerPanicked = errors.New("http2: handler panicked")
+
+// wroteFrame is called on the serve goroutine with the result of
+// whatever happened on writeFrameAsync.
+func (sc *serverConn) wroteFrame(res frameWriteResult) {
+	sc.serveG.check()
+	if !sc.writingFrame {
+		panic("internal error: expected to be already writing a frame")
+	}
+	sc.writingFrame = false
+
+	wm := res.wm
+	st := wm.stream
+
+	closeStream := endsStream(wm.write)
+
+	if _, ok := wm.write.(handlerPanicRST); ok {
+		sc.closeStream(st, errHandlerPanicked)
+	}
+
+	// Reply (if requested) to the blocked ServeHTTP goroutine.
+	if ch := wm.done; ch != nil {
+		select {
+		case ch <- res.err:
+		default:
+			panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wm.write))
+		}
+	}
+	wm.write = nil // prevent use (assume it's tainted after wm.done send)
+
+	if closeStream {
+		if st == nil {
+			panic("internal error: expecting non-nil stream")
+		}
+		switch st.state {
+		case stateOpen:
+			// Here we would go to stateHalfClosedLocal in
+			// theory, but since our handler is done and
+			// the net/http package provides no mechanism
+			// for finishing writing to a ResponseWriter
+			// while still reading data (see possible TODO
+			// at top of this file), we go into closed
+			// state here anyway, after telling the peer
+			// we're hanging up on them.
+			st.state = stateHalfClosedLocal // won't last long, but necessary for closeStream via resetStream
+			errCancel := streamError(st.id, ErrCodeCancel)
+			sc.resetStream(errCancel)
+		case stateHalfClosedRemote:
+			sc.closeStream(st, errHandlerComplete)
+		}
+	}
+
+	sc.scheduleFrameWrite()
+}
+
+// scheduleFrameWrite tickles the frame writing scheduler.
+//
+// If a frame is already being written, nothing happens. This will be called again
+// when the frame is done being written.
+//
+// If a frame isn't being written we need to send one, the best frame
+// to send is selected, preferring first things that aren't
+// stream-specific (e.g. ACKing settings), and then finding the
+// highest priority stream.
+//
+// If a frame isn't being written and there's nothing else to send, we
+// flush the write buffer.
+func (sc *serverConn) scheduleFrameWrite() {
+	sc.serveG.check()
+	if sc.writingFrame {
+		return
+	}
+	if sc.needToSendGoAway {
+		sc.needToSendGoAway = false
+		sc.startFrameWrite(frameWriteMsg{
+			write: &writeGoAway{
+				maxStreamID: sc.maxStreamID,
+				code:        sc.goAwayCode,
+			},
+		})
+		return
+	}
+	if sc.needToSendSettingsAck {
+		sc.needToSendSettingsAck = false
+		sc.startFrameWrite(frameWriteMsg{write: writeSettingsAck{}})
+		return
+	}
+	if !sc.inGoAway {
+		if wm, ok := sc.writeSched.take(); ok {
+			sc.startFrameWrite(wm)
+			return
+		}
+	}
+	if sc.needsFrameFlush {
+		sc.startFrameWrite(frameWriteMsg{write: flushFrameWriter{}})
+		sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
+		return
+	}
+}
+
+func (sc *serverConn) goAway(code ErrCode) {
+	sc.serveG.check()
+	if sc.inGoAway {
+		return
+	}
+	if code != ErrCodeNo {
+		sc.shutDownIn(250 * time.Millisecond)
+	} else {
+		// TODO: configurable
+		sc.shutDownIn(1 * time.Second)
+	}
+	sc.inGoAway = true
+	sc.needToSendGoAway = true
+	sc.goAwayCode = code
+	sc.scheduleFrameWrite()
+}
+
+func (sc *serverConn) shutDownIn(d time.Duration) {
+	sc.serveG.check()
+	sc.shutdownTimer = time.NewTimer(d)
+	sc.shutdownTimerCh = sc.shutdownTimer.C
+}
+
+func (sc *serverConn) resetStream(se StreamError) {
+	sc.serveG.check()
+	sc.writeFrame(frameWriteMsg{write: se})
+	if st, ok := sc.streams[se.StreamID]; ok {
+		st.sentReset = true
+		sc.closeStream(st, se)
+	}
+}
+
+// processFrameFromReader processes the serve loop's read from readFrameCh from the
+// frame-reading goroutine.
+// processFrameFromReader returns whether the connection should be kept open.
+func (sc *serverConn) processFrameFromReader(res readFrameResult) bool {
+	sc.serveG.check()
+	err := res.err
+	if err != nil {
+		if err == ErrFrameTooLarge {
+			sc.goAway(ErrCodeFrameSize)
+			return true // goAway will close the loop
+		}
+		clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err)
+		if clientGone {
+			// TODO: could we also get into this state if
+			// the peer does a half close
+			// (e.g. CloseWrite) because they're done
+			// sending frames but they're still wanting
+			// our open replies?  Investigate.
+			// TODO: add CloseWrite to crypto/tls.Conn first
+			// so we have a way to test this? I suppose
+			// just for testing we could have a non-TLS mode.
+			return false
+		}
+	} else {
+		f := res.f
+		if VerboseLogs {
+			sc.vlogf("http2: server read frame %v", summarizeFrame(f))
+		}
+		err = sc.processFrame(f)
+		if err == nil {
+			return true
+		}
+	}
+
+	switch ev := err.(type) {
+	case StreamError:
+		sc.resetStream(ev)
+		return true
+	case goAwayFlowError:
+		sc.goAway(ErrCodeFlowControl)
+		return true
+	case ConnectionError:
+		sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev)
+		sc.goAway(ErrCode(ev))
+		return true // goAway will handle shutdown
+	default:
+		if res.err != nil {
+			sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err)
+		} else {
+			sc.logf("http2: server closing client connection: %v", err)
+		}
+		return false
+	}
+}
+
+func (sc *serverConn) processFrame(f Frame) error {
+	sc.serveG.check()
+
+	// First frame received must be SETTINGS.
+	if !sc.sawFirstSettings {
+		if _, ok := f.(*SettingsFrame); !ok {
+			return ConnectionError(ErrCodeProtocol)
+		}
+		sc.sawFirstSettings = true
+	}
+
+	switch f := f.(type) {
+	case *SettingsFrame:
+		return sc.processSettings(f)
+	case *MetaHeadersFrame:
+		return sc.processHeaders(f)
+	case *WindowUpdateFrame:
+		return sc.processWindowUpdate(f)
+	case *PingFrame:
+		return sc.processPing(f)
+	case *DataFrame:
+		return sc.processData(f)
+	case *RSTStreamFrame:
+		return sc.processResetStream(f)
+	case *PriorityFrame:
+		return sc.processPriority(f)
+	case *PushPromiseFrame:
+		// A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
+		// frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
+		return ConnectionError(ErrCodeProtocol)
+	default:
+		sc.vlogf("http2: server ignoring frame: %v", f.Header())
+		return nil
+	}
+}
+
+func (sc *serverConn) processPing(f *PingFrame) error {
+	sc.serveG.check()
+	if f.IsAck() {
+		// 6.7 PING: " An endpoint MUST NOT respond to PING frames
+		// containing this flag."
+		return nil
+	}
+	if f.StreamID != 0 {
+		// "PING frames are not associated with any individual
+		// stream. If a PING frame is received with a stream
+		// identifier field value other than 0x0, the recipient MUST
+		// respond with a connection error (Section 5.4.1) of type
+		// PROTOCOL_ERROR."
+		return ConnectionError(ErrCodeProtocol)
+	}
+	sc.writeFrame(frameWriteMsg{write: writePingAck{f}})
+	return nil
+}
+
+func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
+	sc.serveG.check()
+	switch {
+	case f.StreamID != 0: // stream-level flow control
+		st := sc.streams[f.StreamID]
+		if st == nil {
+			// "WINDOW_UPDATE can be sent by a peer that has sent a
+			// frame bearing the END_STREAM flag. This means that a
+			// receiver could receive a WINDOW_UPDATE frame on a "half
+			// closed (remote)" or "closed" stream. A receiver MUST
+			// NOT treat this as an error, see Section 5.1."
+			return nil
+		}
+		if !st.flow.add(int32(f.Increment)) {
+			return streamError(f.StreamID, ErrCodeFlowControl)
+		}
+	default: // connection-level flow control
+		if !sc.flow.add(int32(f.Increment)) {
+			return goAwayFlowError{}
+		}
+	}
+	sc.scheduleFrameWrite()
+	return nil
+}
+
+func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
+	sc.serveG.check()
+
+	state, st := sc.state(f.StreamID)
+	if state == stateIdle {
+		// 6.4 "RST_STREAM frames MUST NOT be sent for a
+		// stream in the "idle" state. If a RST_STREAM frame
+		// identifying an idle stream is received, the
+		// recipient MUST treat this as a connection error
+		// (Section 5.4.1) of type PROTOCOL_ERROR.
+		return ConnectionError(ErrCodeProtocol)
+	}
+	if st != nil {
+		st.gotReset = true
+		st.cancelCtx()
+		sc.closeStream(st, streamError(f.StreamID, f.ErrCode))
+	}
+	return nil
+}
+
+func (sc *serverConn) closeStream(st *stream, err error) {
+	sc.serveG.check()
+	if st.state == stateIdle || st.state == stateClosed {
+		panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
+	}
+	st.state = stateClosed
+	sc.curOpenStreams--
+	if sc.curOpenStreams == 0 {
+		sc.setConnState(http.StateIdle)
+	}
+	delete(sc.streams, st.id)
+	if p := st.body; p != nil {
+		// Return any buffered unread bytes worth of conn-level flow control.
+		// See golang.org/issue/16481
+		sc.sendWindowUpdate(nil, p.Len())
+
+		p.CloseWithError(err)
+	}
+	st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
+	sc.writeSched.forgetStream(st.id)
+	if st.reqBuf != nil {
+		// Stash this request body buffer (64k) away for reuse
+		// by a future POST/PUT/etc.
+		//
+		// TODO(bradfitz): share on the server? sync.Pool?
+		// Server requires locks and might hurt contention.
+		// sync.Pool might work, or might be worse, depending
+		// on goroutine CPU migrations. (get and put on
+		// separate CPUs).  Maybe a mix of strategies. But
+		// this is an easy win for now.
+		sc.freeRequestBodyBuf = st.reqBuf
+	}
+}
+
+func (sc *serverConn) processSettings(f *SettingsFrame) error {
+	sc.serveG.check()
+	if f.IsAck() {
+		sc.unackedSettings--
+		if sc.unackedSettings < 0 {
+			// Why is the peer ACKing settings we never sent?
+			// The spec doesn't mention this case, but
+			// hang up on them anyway.
+			return ConnectionError(ErrCodeProtocol)
+		}
+		return nil
+	}
+	if err := f.ForeachSetting(sc.processSetting); err != nil {
+		return err
+	}
+	sc.needToSendSettingsAck = true
+	sc.scheduleFrameWrite()
+	return nil
+}
+
+func (sc *serverConn) processSetting(s Setting) error {
+	sc.serveG.check()
+	if err := s.Valid(); err != nil {
+		return err
+	}
+	if VerboseLogs {
+		sc.vlogf("http2: server processing setting %v", s)
+	}
+	switch s.ID {
+	case SettingHeaderTableSize:
+		sc.headerTableSize = s.Val
+		sc.hpackEncoder.SetMaxDynamicTableSize(s.Val)
+	case SettingEnablePush:
+		sc.pushEnabled = s.Val != 0
+	case SettingMaxConcurrentStreams:
+		sc.clientMaxStreams = s.Val
+	case SettingInitialWindowSize:
+		return sc.processSettingInitialWindowSize(s.Val)
+	case SettingMaxFrameSize:
+		sc.writeSched.maxFrameSize = s.Val
+	case SettingMaxHeaderListSize:
+		sc.peerMaxHeaderListSize = s.Val
+	default:
+		// Unknown setting: "An endpoint that receives a SETTINGS
+		// frame with any unknown or unsupported identifier MUST
+		// ignore that setting."
+		if VerboseLogs {
+			sc.vlogf("http2: server ignoring unknown setting %v", s)
+		}
+	}
+	return nil
+}
+
+func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
+	sc.serveG.check()
+	// Note: val already validated to be within range by
+	// processSetting's Valid call.
+
+	// "A SETTINGS frame can alter the initial flow control window
+	// size for all current streams. When the value of
+	// SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST
+	// adjust the size of all stream flow control windows that it
+	// maintains by the difference between the new value and the
+	// old value."
+	old := sc.initialWindowSize
+	sc.initialWindowSize = int32(val)
+	growth := sc.initialWindowSize - old // may be negative
+	for _, st := range sc.streams {
+		if !st.flow.add(growth) {
+			// 6.9.2 Initial Flow Control Window Size
+			// "An endpoint MUST treat a change to
+			// SETTINGS_INITIAL_WINDOW_SIZE that causes any flow
+			// control window to exceed the maximum size as a
+			// connection error (Section 5.4.1) of type
+			// FLOW_CONTROL_ERROR."
+			return ConnectionError(ErrCodeFlowControl)
+		}
+	}
+	return nil
+}
+
+func (sc *serverConn) processData(f *DataFrame) error {
+	sc.serveG.check()
+	data := f.Data()
+
+	// "If a DATA frame is received whose stream is not in "open"
+	// or "half closed (local)" state, the recipient MUST respond
+	// with a stream error (Section 5.4.2) of type STREAM_CLOSED."
+	id := f.Header().StreamID
+	st, ok := sc.streams[id]
+	if !ok || st.state != stateOpen || st.gotTrailerHeader {
+		// This includes sending a RST_STREAM if the stream is
+		// in stateHalfClosedLocal (which currently means that
+		// the http.Handler returned, so it's done reading &
+		// done writing). Try to stop the client from sending
+		// more DATA.
+
+		// But still enforce their connection-level flow control,
+		// and return any flow control bytes since we're not going
+		// to consume them.
+		if sc.inflow.available() < int32(f.Length) {
+			return streamError(id, ErrCodeFlowControl)
+		}
+		// Deduct the flow control from inflow, since we're
+		// going to immediately add it back in
+		// sendWindowUpdate, which also schedules sending the
+		// frames.
+		sc.inflow.take(int32(f.Length))
+		sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
+
+		return streamError(id, ErrCodeStreamClosed)
+	}
+	if st.body == nil {
+		panic("internal error: should have a body in this state")
+	}
+
+	// Sender sending more than they'd declared?
+	if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
+		st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
+		return streamError(id, ErrCodeStreamClosed)
+	}
+	if f.Length > 0 {
+		// Check whether the client has flow control quota.
+		if st.inflow.available() < int32(f.Length) {
+			return streamError(id, ErrCodeFlowControl)
+		}
+		st.inflow.take(int32(f.Length))
+
+		if len(data) > 0 {
+			wrote, err := st.body.Write(data)
+			if err != nil {
+				return streamError(id, ErrCodeStreamClosed)
+			}
+			if wrote != len(data) {
+				panic("internal error: bad Writer")
+			}
+			st.bodyBytes += int64(len(data))
+		}
+
+		// Return any padded flow control now, since we won't
+		// refund it later on body reads.
+		if pad := int32(f.Length) - int32(len(data)); pad > 0 {
+			sc.sendWindowUpdate32(nil, pad)
+			sc.sendWindowUpdate32(st, pad)
+		}
+	}
+	if f.StreamEnded() {
+		st.endStream()
+	}
+	return nil
+}
+
+// endStream closes a Request.Body's pipe. It is called when a DATA
+// frame says a request body is over (or after trailers).
+func (st *stream) endStream() {
+	sc := st.sc
+	sc.serveG.check()
+
+	if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes {
+		st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes",
+			st.declBodyBytes, st.bodyBytes))
+	} else {
+		st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest)
+		st.body.CloseWithError(io.EOF)
+	}
+	st.state = stateHalfClosedRemote
+}
+
+// copyTrailersToHandlerRequest is run in the Handler's goroutine in
+// its Request.Body.Read just before it gets io.EOF.
+func (st *stream) copyTrailersToHandlerRequest() {
+	for k, vv := range st.trailer {
+		if _, ok := st.reqTrailer[k]; ok {
+			// Only copy it over it was pre-declared.
+			st.reqTrailer[k] = vv
+		}
+	}
+}
+
+func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
+	sc.serveG.check()
+	id := f.Header().StreamID
+	if sc.inGoAway {
+		// Ignore.
+		return nil
+	}
+	// http://http2.github.io/http2-spec/#rfc.section.5.1.1
+	// Streams initiated by a client MUST use odd-numbered stream
+	// identifiers. [...] An endpoint that receives an unexpected
+	// stream identifier MUST respond with a connection error
+	// (Section 5.4.1) of type PROTOCOL_ERROR.
+	if id%2 != 1 {
+		return ConnectionError(ErrCodeProtocol)
+	}
+	// A HEADERS frame can be used to create a new stream or
+	// send a trailer for an open one. If we already have a stream
+	// open, let it process its own HEADERS frame (trailers at this
+	// point, if it's valid).
+	st := sc.streams[f.Header().StreamID]
+	if st != nil {
+		return st.processTrailerHeaders(f)
+	}
+
+	// [...] The identifier of a newly established stream MUST be
+	// numerically greater than all streams that the initiating
+	// endpoint has opened or reserved. [...]  An endpoint that
+	// receives an unexpected stream identifier MUST respond with
+	// a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
+	if id <= sc.maxStreamID {
+		return ConnectionError(ErrCodeProtocol)
+	}
+	sc.maxStreamID = id
+
+	ctx, cancelCtx := contextWithCancel(sc.baseCtx)
+	st = &stream{
+		sc:        sc,
+		id:        id,
+		state:     stateOpen,
+		ctx:       ctx,
+		cancelCtx: cancelCtx,
+	}
+	if f.StreamEnded() {
+		st.state = stateHalfClosedRemote
+	}
+	st.cw.Init()
+
+	st.flow.conn = &sc.flow // link to conn-level counter
+	st.flow.add(sc.initialWindowSize)
+	st.inflow.conn = &sc.inflow      // link to conn-level counter
+	st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings
+
+	sc.streams[id] = st
+	if f.HasPriority() {
+		adjustStreamPriority(sc.streams, st.id, f.Priority)
+	}
+	sc.curOpenStreams++
+	if sc.curOpenStreams == 1 {
+		sc.setConnState(http.StateActive)
+	}
+	if sc.curOpenStreams > sc.advMaxStreams {
+		// "Endpoints MUST NOT exceed the limit set by their
+		// peer. An endpoint that receives a HEADERS frame
+		// that causes their advertised concurrent stream
+		// limit to be exceeded MUST treat this as a stream
+		// error (Section 5.4.2) of type PROTOCOL_ERROR or
+		// REFUSED_STREAM."
+		if sc.unackedSettings == 0 {
+			// They should know better.
+			return streamError(st.id, ErrCodeProtocol)
+		}
+		// Assume it's a network race, where they just haven't
+		// received our last SETTINGS update. But actually
+		// this can't happen yet, because we don't yet provide
+		// a way for users to adjust server parameters at
+		// runtime.
+		return streamError(st.id, ErrCodeRefusedStream)
+	}
+
+	rw, req, err := sc.newWriterAndRequest(st, f)
+	if err != nil {
+		return err
+	}
+	st.reqTrailer = req.Trailer
+	if st.reqTrailer != nil {
+		st.trailer = make(http.Header)
+	}
+	st.body = req.Body.(*requestBody).pipe // may be nil
+	st.declBodyBytes = req.ContentLength
+
+	handler := sc.handler.ServeHTTP
+	if f.Truncated {
+		// Their header list was too long. Send a 431 error.
+		handler = handleHeaderListTooLong
+	} else if err := checkValidHTTP2Request(req); err != nil {
+		handler = new400Handler(err)
+	}
+
+	// The net/http package sets the read deadline from the
+	// http.Server.ReadTimeout during the TLS handshake, but then
+	// passes the connection off to us with the deadline already
+	// set. Disarm it here after the request headers are read, similar
+	// to how the http1 server works.
+	// Unlike http1, though, we never re-arm it yet, though.
+	// TODO(bradfitz): figure out golang.org/issue/14204
+	// (IdleTimeout) and how this relates. Maybe the default
+	// IdleTimeout is ReadTimeout.
+	if sc.hs.ReadTimeout != 0 {
+		sc.conn.SetReadDeadline(time.Time{})
+	}
+
+	go sc.runHandler(rw, req, handler)
+	return nil
+}
+
+func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
+	sc := st.sc
+	sc.serveG.check()
+	if st.gotTrailerHeader {
+		return ConnectionError(ErrCodeProtocol)
+	}
+	st.gotTrailerHeader = true
+	if !f.StreamEnded() {
+		return streamError(st.id, ErrCodeProtocol)
+	}
+
+	if len(f.PseudoFields()) > 0 {
+		return streamError(st.id, ErrCodeProtocol)
+	}
+	if st.trailer != nil {
+		for _, hf := range f.RegularFields() {
+			key := sc.canonicalHeader(hf.Name)
+			if !ValidTrailerHeader(key) {
+				// TODO: send more details to the peer somehow. But http2 has
+				// no way to send debug data at a stream level. Discuss with
+				// HTTP folk.
+				return streamError(st.id, ErrCodeProtocol)
+			}
+			st.trailer[key] = append(st.trailer[key], hf.Value)
+		}
+	}
+	st.endStream()
+	return nil
+}
+
+func (sc *serverConn) processPriority(f *PriorityFrame) error {
+	adjustStreamPriority(sc.streams, f.StreamID, f.PriorityParam)
+	return nil
+}
+
+func adjustStreamPriority(streams map[uint32]*stream, streamID uint32, priority PriorityParam) {
+	st, ok := streams[streamID]
+	if !ok {
+		// TODO: not quite correct (this streamID might
+		// already exist in the dep tree, but be closed), but
+		// close enough for now.
+		return
+	}
+	st.weight = priority.Weight
+	parent := streams[priority.StreamDep] // might be nil
+	if parent == st {
+		// if client tries to set this stream to be the parent of itself
+		// ignore and keep going
+		return
+	}
+
+	// section 5.3.3: If a stream is made dependent on one of its
+	// own dependencies, the formerly dependent stream is first
+	// moved to be dependent on the reprioritized stream's previous
+	// parent. The moved dependency retains its weight.
+	for piter := parent; piter != nil; piter = piter.parent {
+		if piter == st {
+			parent.parent = st.parent
+			break
+		}
+	}
+	st.parent = parent
+	if priority.Exclusive && (st.parent != nil || priority.StreamDep == 0) {
+		for _, openStream := range streams {
+			if openStream != st && openStream.parent == st.parent {
+				openStream.parent = st
+			}
+		}
+	}
+}
+
+func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) {
+	sc.serveG.check()
+
+	method := f.PseudoValue("method")
+	path := f.PseudoValue("path")
+	scheme := f.PseudoValue("scheme")
+	authority := f.PseudoValue("authority")
+
+	isConnect := method == "CONNECT"
+	if isConnect {
+		if path != "" || scheme != "" || authority == "" {
+			return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
+		}
+	} else if method == "" || path == "" ||
+		(scheme != "https" && scheme != "http") {
+		// See 8.1.2.6 Malformed Requests and Responses:
+		//
+		// Malformed requests or responses that are detected
+		// MUST be treated as a stream error (Section 5.4.2)
+		// of type PROTOCOL_ERROR."
+		//
+		// 8.1.2.3 Request Pseudo-Header Fields
+		// "All HTTP/2 requests MUST include exactly one valid
+		// value for the :method, :scheme, and :path
+		// pseudo-header fields"
+		return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
+	}
+
+	bodyOpen := !f.StreamEnded()
+	if method == "HEAD" && bodyOpen {
+		// HEAD requests can't have bodies
+		return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
+	}
+	var tlsState *tls.ConnectionState // nil if not scheme https
+
+	if scheme == "https" {
+		tlsState = sc.tlsState
+	}
+
+	header := make(http.Header)
+	for _, hf := range f.RegularFields() {
+		header.Add(sc.canonicalHeader(hf.Name), hf.Value)
+	}
+
+	if authority == "" {
+		authority = header.Get("Host")
+	}
+	needsContinue := header.Get("Expect") == "100-continue"
+	if needsContinue {
+		header.Del("Expect")
+	}
+	// Merge Cookie headers into one "; "-delimited value.
+	if cookies := header["Cookie"]; len(cookies) > 1 {
+		header.Set("Cookie", strings.Join(cookies, "; "))
+	}
+
+	// Setup Trailers
+	var trailer http.Header
+	for _, v := range header["Trailer"] {
+		for _, key := range strings.Split(v, ",") {
+			key = http.CanonicalHeaderKey(strings.TrimSpace(key))
+			switch key {
+			case "Transfer-Encoding", "Trailer", "Content-Length":
+				// Bogus. (copy of http1 rules)
+				// Ignore.
+			default:
+				if trailer == nil {
+					trailer = make(http.Header)
+				}
+				trailer[key] = nil
+			}
+		}
+	}
+	delete(header, "Trailer")
+
+	body := &requestBody{
+		conn:          sc,
+		stream:        st,
+		needsContinue: needsContinue,
+	}
+	var url_ *url.URL
+	var requestURI string
+	if isConnect {
+		url_ = &url.URL{Host: authority}
+		requestURI = authority // mimic HTTP/1 server behavior
+	} else {
+		var err error
+		url_, err = url.ParseRequestURI(path)
+		if err != nil {
+			return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
+		}
+		requestURI = path
+	}
+	req := &http.Request{
+		Method:     method,
+		URL:        url_,
+		RemoteAddr: sc.remoteAddrStr,
+		Header:     header,
+		RequestURI: requestURI,
+		Proto:      "HTTP/2.0",
+		ProtoMajor: 2,
+		ProtoMinor: 0,
+		TLS:        tlsState,
+		Host:       authority,
+		Body:       body,
+		Trailer:    trailer,
+	}
+	req = requestWithContext(req, st.ctx)
+	if bodyOpen {
+		// Disabled, per golang.org/issue/14960:
+		// st.reqBuf = sc.getRequestBodyBuf()
+		// TODO: remove this 64k of garbage per request (again, but without a data race):
+		buf := make([]byte, initialWindowSize)
+
+		body.pipe = &pipe{
+			b: &fixedBuffer{buf: buf},
+		}
+
+		if vv, ok := header["Content-Length"]; ok {
+			req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
+		} else {
+			req.ContentLength = -1
+		}
+	}
+
+	rws := responseWriterStatePool.Get().(*responseWriterState)
+	bwSave := rws.bw
+	*rws = responseWriterState{} // zero all the fields
+	rws.conn = sc
+	rws.bw = bwSave
+	rws.bw.Reset(chunkWriter{rws})
+	rws.stream = st
+	rws.req = req
+	rws.body = body
+
+	rw := &responseWriter{rws: rws}
+	return rw, req, nil
+}
+
+func (sc *serverConn) getRequestBodyBuf() []byte {
+	sc.serveG.check()
+	if buf := sc.freeRequestBodyBuf; buf != nil {
+		sc.freeRequestBodyBuf = nil
+		return buf
+	}
+	return make([]byte, initialWindowSize)
+}
+
+// Run on its own goroutine.
+func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
+	didPanic := true
+	defer func() {
+		rw.rws.stream.cancelCtx()
+		if didPanic {
+			e := recover()
+			// Same as net/http:
+			const size = 64 << 10
+			buf := make([]byte, size)
+			buf = buf[:runtime.Stack(buf, false)]
+			sc.writeFrameFromHandler(frameWriteMsg{
+				write:  handlerPanicRST{rw.rws.stream.id},
+				stream: rw.rws.stream,
+			})
+			sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf)
+			return
+		}
+		rw.handlerDone()
+	}()
+	handler(rw, req)
+	didPanic = false
+}
+
+func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) {
+	// 10.5.1 Limits on Header Block Size:
+	// .. "A server that receives a larger header block than it is
+	// willing to handle can send an HTTP 431 (Request Header Fields Too
+	// Large) status code"
+	const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+
+	w.WriteHeader(statusRequestHeaderFieldsTooLarge)
+	io.WriteString(w, "<h1>HTTP Error 431</h1><p>Request Header Field(s) Too Large</p>")
+}
+
+// called from handler goroutines.
+// h may be nil.
+func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error {
+	sc.serveG.checkNotOn() // NOT on
+	var errc chan error
+	if headerData.h != nil {
+		// If there's a header map (which we don't own), so we have to block on
+		// waiting for this frame to be written, so an http.Flush mid-handler
+		// writes out the correct value of keys, before a handler later potentially
+		// mutates it.
+		errc = errChanPool.Get().(chan error)
+	}
+	if err := sc.writeFrameFromHandler(frameWriteMsg{
+		write:  headerData,
+		stream: st,
+		done:   errc,
+	}); err != nil {
+		return err
+	}
+	if errc != nil {
+		select {
+		case err := <-errc:
+			errChanPool.Put(errc)
+			return err
+		case <-sc.doneServing:
+			return errClientDisconnected
+		case <-st.cw:
+			return errStreamClosed
+		}
+	}
+	return nil
+}
+
+// called from handler goroutines.
+func (sc *serverConn) write100ContinueHeaders(st *stream) {
+	sc.writeFrameFromHandler(frameWriteMsg{
+		write:  write100ContinueHeadersFrame{st.id},
+		stream: st,
+	})
+}
+
+// A bodyReadMsg tells the server loop that the http.Handler read n
+// bytes of the DATA from the client on the given stream.
+type bodyReadMsg struct {
+	st *stream
+	n  int
+}
+
+// called from handler goroutines.
+// Notes that the handler for the given stream ID read n bytes of its body
+// and schedules flow control tokens to be sent.
+func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int) {
+	sc.serveG.checkNotOn() // NOT on
+	select {
+	case sc.bodyReadCh <- bodyReadMsg{st, n}:
+	case <-sc.doneServing:
+	}
+}
+
+func (sc *serverConn) noteBodyRead(st *stream, n int) {
+	sc.serveG.check()
+	sc.sendWindowUpdate(nil, n) // conn-level
+	if st.state != stateHalfClosedRemote && st.state != stateClosed {
+		// Don't send this WINDOW_UPDATE if the stream is closed
+		// remotely.
+		sc.sendWindowUpdate(st, n)
+	}
+}
+
+// st may be nil for conn-level
+func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
+	sc.serveG.check()
+	// "The legal range for the increment to the flow control
+	// window is 1 to 2^31-1 (2,147,483,647) octets."
+	// A Go Read call on 64-bit machines could in theory read
+	// a larger Read than this. Very unlikely, but we handle it here
+	// rather than elsewhere for now.
+	const maxUint31 = 1<<31 - 1
+	for n >= maxUint31 {
+		sc.sendWindowUpdate32(st, maxUint31)
+		n -= maxUint31
+	}
+	sc.sendWindowUpdate32(st, int32(n))
+}
+
+// st may be nil for conn-level
+func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
+	sc.serveG.check()
+	if n == 0 {
+		return
+	}
+	if n < 0 {
+		panic("negative update")
+	}
+	var streamID uint32
+	if st != nil {
+		streamID = st.id
+	}
+	sc.writeFrame(frameWriteMsg{
+		write:  writeWindowUpdate{streamID: streamID, n: uint32(n)},
+		stream: st,
+	})
+	var ok bool
+	if st == nil {
+		ok = sc.inflow.add(n)
+	} else {
+		ok = st.inflow.add(n)
+	}
+	if !ok {
+		panic("internal error; sent too many window updates without decrements?")
+	}
+}
+
+type requestBody struct {
+	stream        *stream
+	conn          *serverConn
+	closed        bool
+	pipe          *pipe // non-nil if we have a HTTP entity message body
+	needsContinue bool  // need to send a 100-continue
+}
+
+func (b *requestBody) Close() error {
+	if b.pipe != nil {
+		b.pipe.BreakWithError(errClosedBody)
+	}
+	b.closed = true
+	return nil
+}
+
+func (b *requestBody) Read(p []byte) (n int, err error) {
+	if b.needsContinue {
+		b.needsContinue = false
+		b.conn.write100ContinueHeaders(b.stream)
+	}
+	if b.pipe == nil {
+		return 0, io.EOF
+	}
+	n, err = b.pipe.Read(p)
+	if n > 0 {
+		b.conn.noteBodyReadFromHandler(b.stream, n)
+	}
+	return
+}
+
+// responseWriter is the http.ResponseWriter implementation.  It's
+// intentionally small (1 pointer wide) to minimize garbage.  The
+// responseWriterState pointer inside is zeroed at the end of a
+// request (in handlerDone) and calls on the responseWriter thereafter
+// simply crash (caller's mistake), but the much larger responseWriterState
+// and buffers are reused between multiple requests.
+type responseWriter struct {
+	rws *responseWriterState
+}
+
+// Optional http.ResponseWriter interfaces implemented.
+var (
+	_ http.CloseNotifier = (*responseWriter)(nil)
+	_ http.Flusher       = (*responseWriter)(nil)
+	_ stringWriter       = (*responseWriter)(nil)
+)
+
+type responseWriterState struct {
+	// immutable within a request:
+	stream *stream
+	req    *http.Request
+	body   *requestBody // to close at end of request, if DATA frames didn't
+	conn   *serverConn
+
+	// TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc
+	bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState}
+
+	// mutated by http.Handler goroutine:
+	handlerHeader http.Header // nil until called
+	snapHeader    http.Header // snapshot of handlerHeader at WriteHeader time
+	trailers      []string    // set in writeChunk
+	status        int         // status code passed to WriteHeader
+	wroteHeader   bool        // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
+	sentHeader    bool        // have we sent the header frame?
+	handlerDone   bool        // handler has finished
+
+	sentContentLen int64 // non-zero if handler set a Content-Length header
+	wroteBytes     int64
+
+	closeNotifierMu sync.Mutex // guards closeNotifierCh
+	closeNotifierCh chan bool  // nil until first used
+}
+
+type chunkWriter struct{ rws *responseWriterState }
+
+func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) }
+
+func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 }
+
+// declareTrailer is called for each Trailer header when the
+// response header is written. It notes that a header will need to be
+// written in the trailers at the end of the response.
+func (rws *responseWriterState) declareTrailer(k string) {
+	k = http.CanonicalHeaderKey(k)
+	if !ValidTrailerHeader(k) {
+		// Forbidden by RFC 2616 14.40.
+		rws.conn.logf("ignoring invalid trailer %q", k)
+		return
+	}
+	if !strSliceContains(rws.trailers, k) {
+		rws.trailers = append(rws.trailers, k)
+	}
+}
+
+// writeChunk writes chunks from the bufio.Writer. But because
+// bufio.Writer may bypass its chunking, sometimes p may be
+// arbitrarily large.
+//
+// writeChunk is also responsible (on the first chunk) for sending the
+// HEADER response.
+func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
+	if !rws.wroteHeader {
+		rws.writeHeader(200)
+	}
+
+	isHeadResp := rws.req.Method == "HEAD"
+	if !rws.sentHeader {
+		rws.sentHeader = true
+		var ctype, clen string
+		if clen = rws.snapHeader.Get("Content-Length"); clen != "" {
+			rws.snapHeader.Del("Content-Length")
+			clen64, err := strconv.ParseInt(clen, 10, 64)
+			if err == nil && clen64 >= 0 {
+				rws.sentContentLen = clen64
+			} else {
+				clen = ""
+			}
+		}
+		if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
+			clen = strconv.Itoa(len(p))
+		}
+		_, hasContentType := rws.snapHeader["Content-Type"]
+		if !hasContentType && bodyAllowedForStatus(rws.status) {
+			ctype = http.DetectContentType(p)
+		}
+		var date string
+		if _, ok := rws.snapHeader["Date"]; !ok {
+			// TODO(bradfitz): be faster here, like net/http? measure.
+			date = time.Now().UTC().Format(http.TimeFormat)
+		}
+
+		for _, v := range rws.snapHeader["Trailer"] {
+			foreachHeaderElement(v, rws.declareTrailer)
+		}
+
+		endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp
+		err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
+			streamID:      rws.stream.id,
+			httpResCode:   rws.status,
+			h:             rws.snapHeader,
+			endStream:     endStream,
+			contentType:   ctype,
+			contentLength: clen,
+			date:          date,
+		})
+		if err != nil {
+			return 0, err
+		}
+		if endStream {
+			return 0, nil
+		}
+	}
+	if isHeadResp {
+		return len(p), nil
+	}
+	if len(p) == 0 && !rws.handlerDone {
+		return 0, nil
+	}
+
+	if rws.handlerDone {
+		rws.promoteUndeclaredTrailers()
+	}
+
+	endStream := rws.handlerDone && !rws.hasTrailers()
+	if len(p) > 0 || endStream {
+		// only send a 0 byte DATA frame if we're ending the stream.
+		if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
+			return 0, err
+		}
+	}
+
+	if rws.handlerDone && rws.hasTrailers() {
+		err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
+			streamID:  rws.stream.id,
+			h:         rws.handlerHeader,
+			trailers:  rws.trailers,
+			endStream: true,
+		})
+		return len(p), err
+	}
+	return len(p), nil
+}
+
+// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
+// that, if present, signals that the map entry is actually for
+// the response trailers, and not the response headers. The prefix
+// is stripped after the ServeHTTP call finishes and the values are
+// sent in the trailers.
+//
+// This mechanism is intended only for trailers that are not known
+// prior to the headers being written. If the set of trailers is fixed
+// or known before the header is written, the normal Go trailers mechanism
+// is preferred:
+//    https://golang.org/pkg/net/http/#ResponseWriter
+//    https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
+const TrailerPrefix = "Trailer:"
+
+// promoteUndeclaredTrailers permits http.Handlers to set trailers
+// after the header has already been flushed. Because the Go
+// ResponseWriter interface has no way to set Trailers (only the
+// Header), and because we didn't want to expand the ResponseWriter
+// interface, and because nobody used trailers, and because RFC 2616
+// says you SHOULD (but not must) predeclare any trailers in the
+// header, the official ResponseWriter rules said trailers in Go must
+// be predeclared, and then we reuse the same ResponseWriter.Header()
+// map to mean both Headers and Trailers.  When it's time to write the
+// Trailers, we pick out the fields of Headers that were declared as
+// trailers. That worked for a while, until we found the first major
+// user of Trailers in the wild: gRPC (using them only over http2),
+// and gRPC libraries permit setting trailers mid-stream without
+// predeclarnig them. So: change of plans. We still permit the old
+// way, but we also permit this hack: if a Header() key begins with
+// "Trailer:", the suffix of that key is a Trailer. Because ':' is an
+// invalid token byte anyway, there is no ambiguity. (And it's already
+// filtered out) It's mildly hacky, but not terrible.
+//
+// This method runs after the Handler is done and promotes any Header
+// fields to be trailers.
+func (rws *responseWriterState) promoteUndeclaredTrailers() {
+	for k, vv := range rws.handlerHeader {
+		if !strings.HasPrefix(k, TrailerPrefix) {
+			continue
+		}
+		trailerKey := strings.TrimPrefix(k, TrailerPrefix)
+		rws.declareTrailer(trailerKey)
+		rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv
+	}
+
+	if len(rws.trailers) > 1 {
+		sorter := sorterPool.Get().(*sorter)
+		sorter.SortStrings(rws.trailers)
+		sorterPool.Put(sorter)
+	}
+}
+
+func (w *responseWriter) Flush() {
+	rws := w.rws
+	if rws == nil {
+		panic("Header called after Handler finished")
+	}
+	if rws.bw.Buffered() > 0 {
+		if err := rws.bw.Flush(); err != nil {
+			// Ignore the error. The frame writer already knows.
+			return
+		}
+	} else {
+		// The bufio.Writer won't call chunkWriter.Write
+		// (writeChunk with zero bytes, so we have to do it
+		// ourselves to force the HTTP response header and/or
+		// final DATA frame (with END_STREAM) to be sent.
+		rws.writeChunk(nil)
+	}
+}
+
+func (w *responseWriter) CloseNotify() <-chan bool {
+	rws := w.rws
+	if rws == nil {
+		panic("CloseNotify called after Handler finished")
+	}
+	rws.closeNotifierMu.Lock()
+	ch := rws.closeNotifierCh
+	if ch == nil {
+		ch = make(chan bool, 1)
+		rws.closeNotifierCh = ch
+		go func() {
+			rws.stream.cw.Wait() // wait for close
+			ch <- true
+		}()
+	}
+	rws.closeNotifierMu.Unlock()
+	return ch
+}
+
+func (w *responseWriter) Header() http.Header {
+	rws := w.rws
+	if rws == nil {
+		panic("Header called after Handler finished")
+	}
+	if rws.handlerHeader == nil {
+		rws.handlerHeader = make(http.Header)
+	}
+	return rws.handlerHeader
+}
+
+func (w *responseWriter) WriteHeader(code int) {
+	rws := w.rws
+	if rws == nil {
+		panic("WriteHeader called after Handler finished")
+	}
+	rws.writeHeader(code)
+}
+
+func (rws *responseWriterState) writeHeader(code int) {
+	if !rws.wroteHeader {
+		rws.wroteHeader = true
+		rws.status = code
+		if len(rws.handlerHeader) > 0 {
+			rws.snapHeader = cloneHeader(rws.handlerHeader)
+		}
+	}
+}
+
+func cloneHeader(h http.Header) http.Header {
+	h2 := make(http.Header, len(h))
+	for k, vv := range h {
+		vv2 := make([]string, len(vv))
+		copy(vv2, vv)
+		h2[k] = vv2
+	}
+	return h2
+}
+
+// The Life Of A Write is like this:
+//
+// * Handler calls w.Write or w.WriteString ->
+// * -> rws.bw (*bufio.Writer) ->
+// * (Handler migth call Flush)
+// * -> chunkWriter{rws}
+// * -> responseWriterState.writeChunk(p []byte)
+// * -> responseWriterState.writeChunk (most of the magic; see comment there)
+func (w *responseWriter) Write(p []byte) (n int, err error) {
+	return w.write(len(p), p, "")
+}
+
+func (w *responseWriter) WriteString(s string) (n int, err error) {
+	return w.write(len(s), nil, s)
+}
+
+// either dataB or dataS is non-zero.
+func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) {
+	rws := w.rws
+	if rws == nil {
+		panic("Write called after Handler finished")
+	}
+	if !rws.wroteHeader {
+		w.WriteHeader(200)
+	}
+	if !bodyAllowedForStatus(rws.status) {
+		return 0, http.ErrBodyNotAllowed
+	}
+	rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set
+	if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen {
+		// TODO: send a RST_STREAM
+		return 0, errors.New("http2: handler wrote more than declared Content-Length")
+	}
+
+	if dataB != nil {
+		return rws.bw.Write(dataB)
+	} else {
+		return rws.bw.WriteString(dataS)
+	}
+}
+
+func (w *responseWriter) handlerDone() {
+	rws := w.rws
+	rws.handlerDone = true
+	w.Flush()
+	w.rws = nil
+	responseWriterStatePool.Put(rws)
+}
+
+// foreachHeaderElement splits v according to the "#rule" construction
+// in RFC 2616 section 2.1 and calls fn for each non-empty element.
+func foreachHeaderElement(v string, fn func(string)) {
+	v = textproto.TrimString(v)
+	if v == "" {
+		return
+	}
+	if !strings.Contains(v, ",") {
+		fn(v)
+		return
+	}
+	for _, f := range strings.Split(v, ",") {
+		if f = textproto.TrimString(f); f != "" {
+			fn(f)
+		}
+	}
+}
+
+// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2
+var connHeaders = []string{
+	"Connection",
+	"Keep-Alive",
+	"Proxy-Connection",
+	"Transfer-Encoding",
+	"Upgrade",
+}
+
+// checkValidHTTP2Request checks whether req is a valid HTTP/2 request,
+// per RFC 7540 Section 8.1.2.2.
+// The returned error is reported to users.
+func checkValidHTTP2Request(req *http.Request) error {
+	for _, h := range connHeaders {
+		if _, ok := req.Header[h]; ok {
+			return fmt.Errorf("request header %q is not valid in HTTP/2", h)
+		}
+	}
+	te := req.Header["Te"]
+	if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) {
+		return errors.New(`request header "TE" may only be "trailers" in HTTP/2`)
+	}
+	return nil
+}
+
+func new400Handler(err error) http.HandlerFunc {
+	return func(w http.ResponseWriter, r *http.Request) {
+		http.Error(w, err.Error(), http.StatusBadRequest)
+	}
+}
+
+// ValidTrailerHeader reports whether name is a valid header field name to appear
+// in trailers.
+// See: http://tools.ietf.org/html/rfc7230#section-4.1.2
+func ValidTrailerHeader(name string) bool {
+	name = http.CanonicalHeaderKey(name)
+	if strings.HasPrefix(name, "If-") || badTrailer[name] {
+		return false
+	}
+	return true
+}
+
+var badTrailer = map[string]bool{
+	"Authorization":       true,
+	"Cache-Control":       true,
+	"Connection":          true,
+	"Content-Encoding":    true,
+	"Content-Length":      true,
+	"Content-Range":       true,
+	"Content-Type":        true,
+	"Expect":              true,
+	"Host":                true,
+	"Keep-Alive":          true,
+	"Max-Forwards":        true,
+	"Pragma":              true,
+	"Proxy-Authenticate":  true,
+	"Proxy-Authorization": true,
+	"Proxy-Connection":    true,
+	"Range":               true,
+	"Realm":               true,
+	"Te":                  true,
+	"Trailer":             true,
+	"Transfer-Encoding":   true,
+	"Www-Authenticate":    true,
+}
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
new file mode 100644
index 0000000..b939fed
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -0,0 +1,2084 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Transport code.
+
+package http2
+
+import (
+	"bufio"
+	"bytes"
+	"compress/gzip"
+	"crypto/rand"
+	"crypto/tls"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"log"
+	"math"
+	"net"
+	"net/http"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"golang.org/x/net/http2/hpack"
+	"golang.org/x/net/idna"
+	"golang.org/x/net/lex/httplex"
+)
+
+const (
+	// transportDefaultConnFlow is how many connection-level flow control
+	// tokens we give the server at start-up, past the default 64k.
+	transportDefaultConnFlow = 1 << 30
+
+	// transportDefaultStreamFlow is how many stream-level flow
+	// control tokens we announce to the peer, and how many bytes
+	// we buffer per stream.
+	transportDefaultStreamFlow = 4 << 20
+
+	// transportDefaultStreamMinRefresh is the minimum number of bytes we'll send
+	// a stream-level WINDOW_UPDATE for at a time.
+	transportDefaultStreamMinRefresh = 4 << 10
+
+	defaultUserAgent = "Go-http-client/2.0"
+)
+
+// Transport is an HTTP/2 Transport.
+//
+// A Transport internally caches connections to servers. It is safe
+// for concurrent use by multiple goroutines.
+type Transport struct {
+	// DialTLS specifies an optional dial function for creating
+	// TLS connections for requests.
+	//
+	// If DialTLS is nil, tls.Dial is used.
+	//
+	// If the returned net.Conn has a ConnectionState method like tls.Conn,
+	// it will be used to set http.Response.TLS.
+	DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error)
+
+	// TLSClientConfig specifies the TLS configuration to use with
+	// tls.Client. If nil, the default configuration is used.
+	TLSClientConfig *tls.Config
+
+	// ConnPool optionally specifies an alternate connection pool to use.
+	// If nil, the default is used.
+	ConnPool ClientConnPool
+
+	// DisableCompression, if true, prevents the Transport from
+	// requesting compression with an "Accept-Encoding: gzip"
+	// request header when the Request contains no existing
+	// Accept-Encoding value. If the Transport requests gzip on
+	// its own and gets a gzipped response, it's transparently
+	// decoded in the Response.Body. However, if the user
+	// explicitly requested gzip it is not automatically
+	// uncompressed.
+	DisableCompression bool
+
+	// AllowHTTP, if true, permits HTTP/2 requests using the insecure,
+	// plain-text "http" scheme. Note that this does not enable h2c support.
+	AllowHTTP bool
+
+	// MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to
+	// send in the initial settings frame. It is how many bytes
+	// of response headers are allow. Unlike the http2 spec, zero here
+	// means to use a default limit (currently 10MB). If you actually
+	// want to advertise an ulimited value to the peer, Transport
+	// interprets the highest possible value here (0xffffffff or 1<<32-1)
+	// to mean no limit.
+	MaxHeaderListSize uint32
+
+	// t1, if non-nil, is the standard library Transport using
+	// this transport. Its settings are used (but not its
+	// RoundTrip method, etc).
+	t1 *http.Transport
+
+	connPoolOnce  sync.Once
+	connPoolOrDef ClientConnPool // non-nil version of ConnPool
+}
+
+func (t *Transport) maxHeaderListSize() uint32 {
+	if t.MaxHeaderListSize == 0 {
+		return 10 << 20
+	}
+	if t.MaxHeaderListSize == 0xffffffff {
+		return 0
+	}
+	return t.MaxHeaderListSize
+}
+
+func (t *Transport) disableCompression() bool {
+	return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
+}
+
+var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6")
+
+// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
+// It requires Go 1.6 or later and returns an error if the net/http package is too old
+// or if t1 has already been HTTP/2-enabled.
+func ConfigureTransport(t1 *http.Transport) error {
+	_, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go
+	return err
+}
+
+func (t *Transport) connPool() ClientConnPool {
+	t.connPoolOnce.Do(t.initConnPool)
+	return t.connPoolOrDef
+}
+
+func (t *Transport) initConnPool() {
+	if t.ConnPool != nil {
+		t.connPoolOrDef = t.ConnPool
+	} else {
+		t.connPoolOrDef = &clientConnPool{t: t}
+	}
+}
+
+// ClientConn is the state of a single HTTP/2 client connection to an
+// HTTP/2 server.
+type ClientConn struct {
+	t         *Transport
+	tconn     net.Conn             // usually *tls.Conn, except specialized impls
+	tlsState  *tls.ConnectionState // nil only for specialized impls
+	singleUse bool                 // whether being used for a single http.Request
+
+	// readLoop goroutine fields:
+	readerDone chan struct{} // closed on error
+	readerErr  error         // set before readerDone is closed
+
+	idleTimeout time.Duration // or 0 for never
+	idleTimer   *time.Timer
+
+	mu              sync.Mutex // guards following
+	cond            *sync.Cond // hold mu; broadcast on flow/closed changes
+	flow            flow       // our conn-level flow control quota (cs.flow is per stream)
+	inflow          flow       // peer's conn-level flow control
+	closed          bool
+	wantSettingsAck bool                     // we sent a SETTINGS frame and haven't heard back
+	goAway          *GoAwayFrame             // if non-nil, the GoAwayFrame we received
+	goAwayDebug     string                   // goAway frame's debug data, retained as a string
+	streams         map[uint32]*clientStream // client-initiated
+	nextStreamID    uint32
+	pings           map[[8]byte]chan struct{} // in flight ping data to notification channel
+	bw              *bufio.Writer
+	br              *bufio.Reader
+	fr              *Framer
+	lastActive      time.Time
+	// Settings from peer: (also guarded by mu)
+	maxFrameSize         uint32
+	maxConcurrentStreams uint32
+	initialWindowSize    uint32
+
+	hbuf    bytes.Buffer // HPACK encoder writes into this
+	henc    *hpack.Encoder
+	freeBuf [][]byte
+
+	wmu  sync.Mutex // held while writing; acquire AFTER mu if holding both
+	werr error      // first write error that has occurred
+}
+
+// clientStream is the state for a single HTTP/2 stream. One of these
+// is created for each Transport.RoundTrip call.
+type clientStream struct {
+	cc            *ClientConn
+	req           *http.Request
+	trace         *clientTrace // or nil
+	ID            uint32
+	resc          chan resAndError
+	bufPipe       pipe // buffered pipe with the flow-controlled response payload
+	requestedGzip bool
+	on100         func() // optional code to run if get a 100 continue response
+
+	flow        flow  // guarded by cc.mu
+	inflow      flow  // guarded by cc.mu
+	bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
+	readErr     error // sticky read error; owned by transportResponseBody.Read
+	stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu
+
+	peerReset chan struct{} // closed on peer reset
+	resetErr  error         // populated before peerReset is closed
+
+	done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu
+
+	// owned by clientConnReadLoop:
+	firstByte    bool // got the first response byte
+	pastHeaders  bool // got first MetaHeadersFrame (actual headers)
+	pastTrailers bool // got optional second MetaHeadersFrame (trailers)
+
+	trailer    http.Header  // accumulated trailers
+	resTrailer *http.Header // client's Response.Trailer
+}
+
+// awaitRequestCancel runs in its own goroutine and waits for the user
+// to cancel a RoundTrip request, its context to expire, or for the
+// request to be done (any way it might be removed from the cc.streams
+// map: peer reset, successful completion, TCP connection breakage,
+// etc)
+func (cs *clientStream) awaitRequestCancel(req *http.Request) {
+	ctx := reqContext(req)
+	if req.Cancel == nil && ctx.Done() == nil {
+		return
+	}
+	select {
+	case <-req.Cancel:
+		cs.bufPipe.CloseWithError(errRequestCanceled)
+		cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+	case <-ctx.Done():
+		cs.bufPipe.CloseWithError(ctx.Err())
+		cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+	case <-cs.done:
+	}
+}
+
+// checkResetOrDone reports any error sent in a RST_STREAM frame by the
+// server, or errStreamClosed if the stream is complete.
+func (cs *clientStream) checkResetOrDone() error {
+	select {
+	case <-cs.peerReset:
+		return cs.resetErr
+	case <-cs.done:
+		return errStreamClosed
+	default:
+		return nil
+	}
+}
+
+func (cs *clientStream) abortRequestBodyWrite(err error) {
+	if err == nil {
+		panic("nil error")
+	}
+	cc := cs.cc
+	cc.mu.Lock()
+	cs.stopReqBody = err
+	cc.cond.Broadcast()
+	cc.mu.Unlock()
+}
+
+type stickyErrWriter struct {
+	w   io.Writer
+	err *error
+}
+
+func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
+	if *sew.err != nil {
+		return 0, *sew.err
+	}
+	n, err = sew.w.Write(p)
+	*sew.err = err
+	return
+}
+
+var ErrNoCachedConn = errors.New("http2: no cached connection was available")
+
+// RoundTripOpt are options for the Transport.RoundTripOpt method.
+type RoundTripOpt struct {
+	// OnlyCachedConn controls whether RoundTripOpt may
+	// create a new TCP connection. If set true and
+	// no cached connection is available, RoundTripOpt
+	// will return ErrNoCachedConn.
+	OnlyCachedConn bool
+}
+
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+	return t.RoundTripOpt(req, RoundTripOpt{})
+}
+
+// authorityAddr returns a given authority (a host/IP, or host:port / ip:port)
+// and returns a host:port. The port 443 is added if needed.
+func authorityAddr(scheme string, authority string) (addr string) {
+	host, port, err := net.SplitHostPort(authority)
+	if err != nil { // authority didn't have a port
+		port = "443"
+		if scheme == "http" {
+			port = "80"
+		}
+		host = authority
+	}
+	if a, err := idna.ToASCII(host); err == nil {
+		host = a
+	}
+	return net.JoinHostPort(host, port)
+}
+
+// RoundTripOpt is like RoundTrip, but takes options.
+func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) {
+	if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) {
+		return nil, errors.New("http2: unsupported scheme")
+	}
+
+	addr := authorityAddr(req.URL.Scheme, req.URL.Host)
+	for {
+		cc, err := t.connPool().GetClientConn(req, addr)
+		if err != nil {
+			t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err)
+			return nil, err
+		}
+		traceGotConn(req, cc)
+		res, err := cc.RoundTrip(req)
+		if shouldRetryRequest(req, err) {
+			continue
+		}
+		if err != nil {
+			t.vlogf("RoundTrip failure: %v", err)
+			return nil, err
+		}
+		return res, nil
+	}
+}
+
+// CloseIdleConnections closes any connections which were previously
+// connected from previous requests but are now sitting idle.
+// It does not interrupt any connections currently in use.
+func (t *Transport) CloseIdleConnections() {
+	if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok {
+		cp.closeIdleConnections()
+	}
+}
+
+var (
+	errClientConnClosed   = errors.New("http2: client conn is closed")
+	errClientConnUnusable = errors.New("http2: client conn not usable")
+)
+
+func shouldRetryRequest(req *http.Request, err error) bool {
+	// TODO: retry GET requests (no bodies) more aggressively, if shutdown
+	// before response.
+	return err == errClientConnUnusable
+}
+
+func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) {
+	host, _, err := net.SplitHostPort(addr)
+	if err != nil {
+		return nil, err
+	}
+	tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host))
+	if err != nil {
+		return nil, err
+	}
+	return t.newClientConn(tconn, singleUse)
+}
+
+func (t *Transport) newTLSConfig(host string) *tls.Config {
+	cfg := new(tls.Config)
+	if t.TLSClientConfig != nil {
+		*cfg = *cloneTLSConfig(t.TLSClientConfig)
+	}
+	if !strSliceContains(cfg.NextProtos, NextProtoTLS) {
+		cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...)
+	}
+	if cfg.ServerName == "" {
+		cfg.ServerName = host
+	}
+	return cfg
+}
+
+func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) {
+	if t.DialTLS != nil {
+		return t.DialTLS
+	}
+	return t.dialTLSDefault
+}
+
+func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) {
+	cn, err := tls.Dial(network, addr, cfg)
+	if err != nil {
+		return nil, err
+	}
+	if err := cn.Handshake(); err != nil {
+		return nil, err
+	}
+	if !cfg.InsecureSkipVerify {
+		if err := cn.VerifyHostname(cfg.ServerName); err != nil {
+			return nil, err
+		}
+	}
+	state := cn.ConnectionState()
+	if p := state.NegotiatedProtocol; p != NextProtoTLS {
+		return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS)
+	}
+	if !state.NegotiatedProtocolIsMutual {
+		return nil, errors.New("http2: could not negotiate protocol mutually")
+	}
+	return cn, nil
+}
+
+// disableKeepAlives reports whether connections should be closed as
+// soon as possible after handling the first request.
+func (t *Transport) disableKeepAlives() bool {
+	return t.t1 != nil && t.t1.DisableKeepAlives
+}
+
+func (t *Transport) expectContinueTimeout() time.Duration {
+	if t.t1 == nil {
+		return 0
+	}
+	return transportExpectContinueTimeout(t.t1)
+}
+
+func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
+	return t.newClientConn(c, false)
+}
+
+func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
+	cc := &ClientConn{
+		t:                    t,
+		tconn:                c,
+		readerDone:           make(chan struct{}),
+		nextStreamID:         1,
+		maxFrameSize:         16 << 10, // spec default
+		initialWindowSize:    65535,    // spec default
+		maxConcurrentStreams: 1000,     // "infinite", per spec. 1000 seems good enough.
+		streams:              make(map[uint32]*clientStream),
+		singleUse:            singleUse,
+		wantSettingsAck:      true,
+		pings:                make(map[[8]byte]chan struct{}),
+	}
+	if d := t.idleConnTimeout(); d != 0 {
+		cc.idleTimeout = d
+		cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout)
+	}
+	if VerboseLogs {
+		t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
+	}
+
+	cc.cond = sync.NewCond(&cc.mu)
+	cc.flow.add(int32(initialWindowSize))
+
+	// TODO: adjust this writer size to account for frame size +
+	// MTU + crypto/tls record padding.
+	cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr})
+	cc.br = bufio.NewReader(c)
+	cc.fr = NewFramer(cc.bw, cc.br)
+	cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
+	cc.fr.MaxHeaderListSize = t.maxHeaderListSize()
+
+	// TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on
+	// henc in response to SETTINGS frames?
+	cc.henc = hpack.NewEncoder(&cc.hbuf)
+
+	if cs, ok := c.(connectionStater); ok {
+		state := cs.ConnectionState()
+		cc.tlsState = &state
+	}
+
+	initialSettings := []Setting{
+		{ID: SettingEnablePush, Val: 0},
+		{ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow},
+	}
+	if max := t.maxHeaderListSize(); max != 0 {
+		initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max})
+	}
+
+	cc.bw.Write(clientPreface)
+	cc.fr.WriteSettings(initialSettings...)
+	cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow)
+	cc.inflow.add(transportDefaultConnFlow + initialWindowSize)
+	cc.bw.Flush()
+	if cc.werr != nil {
+		return nil, cc.werr
+	}
+
+	go cc.readLoop()
+	return cc, nil
+}
+
+func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
+	cc.mu.Lock()
+	defer cc.mu.Unlock()
+
+	old := cc.goAway
+	cc.goAway = f
+
+	// Merge the previous and current GoAway error frames.
+	if cc.goAwayDebug == "" {
+		cc.goAwayDebug = string(f.DebugData())
+	}
+	if old != nil && old.ErrCode != ErrCodeNo {
+		cc.goAway.ErrCode = old.ErrCode
+	}
+}
+
+func (cc *ClientConn) CanTakeNewRequest() bool {
+	cc.mu.Lock()
+	defer cc.mu.Unlock()
+	return cc.canTakeNewRequestLocked()
+}
+
+func (cc *ClientConn) canTakeNewRequestLocked() bool {
+	if cc.singleUse && cc.nextStreamID > 1 {
+		return false
+	}
+	return cc.goAway == nil && !cc.closed &&
+		int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) &&
+		cc.nextStreamID < math.MaxInt32
+}
+
+// onIdleTimeout is called from a time.AfterFunc goroutine.  It will
+// only be called when we're idle, but because we're coming from a new
+// goroutine, there could be a new request coming in at the same time,
+// so this simply calls the synchronized closeIfIdle to shut down this
+// connection. The timer could just call closeIfIdle, but this is more
+// clear.
+func (cc *ClientConn) onIdleTimeout() {
+	cc.closeIfIdle()
+}
+
+func (cc *ClientConn) closeIfIdle() {
+	cc.mu.Lock()
+	if len(cc.streams) > 0 {
+		cc.mu.Unlock()
+		return
+	}
+	cc.closed = true
+	nextID := cc.nextStreamID
+	// TODO: do clients send GOAWAY too? maybe? Just Close:
+	cc.mu.Unlock()
+
+	if VerboseLogs {
+		cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2)
+	}
+	cc.tconn.Close()
+}
+
+const maxAllocFrameSize = 512 << 10
+
+// frameBuffer returns a scratch buffer suitable for writing DATA frames.
+// They're capped at the min of the peer's max frame size or 512KB
+// (kinda arbitrarily), but definitely capped so we don't allocate 4GB
+// bufers.
+func (cc *ClientConn) frameScratchBuffer() []byte {
+	cc.mu.Lock()
+	size := cc.maxFrameSize
+	if size > maxAllocFrameSize {
+		size = maxAllocFrameSize
+	}
+	for i, buf := range cc.freeBuf {
+		if len(buf) >= int(size) {
+			cc.freeBuf[i] = nil
+			cc.mu.Unlock()
+			return buf[:size]
+		}
+	}
+	cc.mu.Unlock()
+	return make([]byte, size)
+}
+
+func (cc *ClientConn) putFrameScratchBuffer(buf []byte) {
+	cc.mu.Lock()
+	defer cc.mu.Unlock()
+	const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate.
+	if len(cc.freeBuf) < maxBufs {
+		cc.freeBuf = append(cc.freeBuf, buf)
+		return
+	}
+	for i, old := range cc.freeBuf {
+		if old == nil {
+			cc.freeBuf[i] = buf
+			return
+		}
+	}
+	// forget about it.
+}
+
+// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not
+// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests.
+var errRequestCanceled = errors.New("net/http: request canceled")
+
+func commaSeparatedTrailers(req *http.Request) (string, error) {
+	keys := make([]string, 0, len(req.Trailer))
+	for k := range req.Trailer {
+		k = http.CanonicalHeaderKey(k)
+		switch k {
+		case "Transfer-Encoding", "Trailer", "Content-Length":
+			return "", &badStringError{"invalid Trailer key", k}
+		}
+		keys = append(keys, k)
+	}
+	if len(keys) > 0 {
+		sort.Strings(keys)
+		// TODO: could do better allocation-wise here, but trailers are rare,
+		// so being lazy for now.
+		return strings.Join(keys, ","), nil
+	}
+	return "", nil
+}
+
+func (cc *ClientConn) responseHeaderTimeout() time.Duration {
+	if cc.t.t1 != nil {
+		return cc.t.t1.ResponseHeaderTimeout
+	}
+	// No way to do this (yet?) with just an http2.Transport. Probably
+	// no need. Request.Cancel this is the new way. We only need to support
+	// this for compatibility with the old http.Transport fields when
+	// we're doing transparent http2.
+	return 0
+}
+
+// checkConnHeaders checks whether req has any invalid connection-level headers.
+// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields.
+// Certain headers are special-cased as okay but not transmitted later.
+func checkConnHeaders(req *http.Request) error {
+	if v := req.Header.Get("Upgrade"); v != "" {
+		return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"])
+	}
+	if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
+		return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv)
+	}
+	if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") {
+		return fmt.Errorf("http2: invalid Connection request header: %q", vv)
+	}
+	return nil
+}
+
+func bodyAndLength(req *http.Request) (body io.Reader, contentLen int64) {
+	body = req.Body
+	if body == nil {
+		return nil, 0
+	}
+	if req.ContentLength != 0 {
+		return req.Body, req.ContentLength
+	}
+	// Don't try to sniff the size if they're doing an expect
+	// request (Issue 16002):
+	if req.Header.Get("Expect") == "100-continue" {
+		return req.Body, -1
+	}
+
+	// We have a body but a zero content length. Test to see if
+	// it's actually zero or just unset.
+	var buf [1]byte
+	n, rerr := body.Read(buf[:])
+	if rerr != nil && rerr != io.EOF {
+		return errorReader{rerr}, -1
+	}
+	if n == 1 {
+		// Oh, guess there is data in this Body Reader after all.
+		// The ContentLength field just wasn't set.
+		// Stitch the Body back together again, re-attaching our
+		// consumed byte.
+		if rerr == io.EOF {
+			return bytes.NewReader(buf[:]), 1
+		}
+		return io.MultiReader(bytes.NewReader(buf[:]), body), -1
+	}
+	// Body is actually zero bytes.
+	return nil, 0
+}
+
+func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
+	if err := checkConnHeaders(req); err != nil {
+		return nil, err
+	}
+	if cc.idleTimer != nil {
+		cc.idleTimer.Stop()
+	}
+
+	trailers, err := commaSeparatedTrailers(req)
+	if err != nil {
+		return nil, err
+	}
+	hasTrailers := trailers != ""
+
+	cc.mu.Lock()
+	cc.lastActive = time.Now()
+	if cc.closed || !cc.canTakeNewRequestLocked() {
+		cc.mu.Unlock()
+		return nil, errClientConnUnusable
+	}
+
+	body, contentLen := bodyAndLength(req)
+	hasBody := body != nil
+
+	// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
+	var requestedGzip bool
+	if !cc.t.disableCompression() &&
+		req.Header.Get("Accept-Encoding") == "" &&
+		req.Header.Get("Range") == "" &&
+		req.Method != "HEAD" {
+		// Request gzip only, not deflate. Deflate is ambiguous and
+		// not as universally supported anyway.
+		// See: http://www.gzip.org/zlib/zlib_faq.html#faq38
+		//
+		// Note that we don't request this for HEAD requests,
+		// due to a bug in nginx:
+		//   http://trac.nginx.org/nginx/ticket/358
+		//   https://golang.org/issue/5522
+		//
+		// We don't request gzip if the request is for a range, since
+		// auto-decoding a portion of a gzipped document will just fail
+		// anyway. See https://golang.org/issue/8923
+		requestedGzip = true
+	}
+
+	// we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
+	// sent by writeRequestBody below, along with any Trailers,
+	// again in form HEADERS{1}, CONTINUATION{0,})
+	hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen)
+	if err != nil {
+		cc.mu.Unlock()
+		return nil, err
+	}
+
+	cs := cc.newStream()
+	cs.req = req
+	cs.trace = requestTrace(req)
+	cs.requestedGzip = requestedGzip
+	bodyWriter := cc.t.getBodyWriterState(cs, body)
+	cs.on100 = bodyWriter.on100
+
+	cc.wmu.Lock()
+	endStream := !hasBody && !hasTrailers
+	werr := cc.writeHeaders(cs.ID, endStream, hdrs)
+	cc.wmu.Unlock()
+	traceWroteHeaders(cs.trace)
+	cc.mu.Unlock()
+
+	if werr != nil {
+		if hasBody {
+			req.Body.Close() // per RoundTripper contract
+			bodyWriter.cancel()
+		}
+		cc.forgetStreamID(cs.ID)
+		// Don't bother sending a RST_STREAM (our write already failed;
+		// no need to keep writing)
+		traceWroteRequest(cs.trace, werr)
+		return nil, werr
+	}
+
+	var respHeaderTimer <-chan time.Time
+	if hasBody {
+		bodyWriter.scheduleBodyWrite()
+	} else {
+		traceWroteRequest(cs.trace, nil)
+		if d := cc.responseHeaderTimeout(); d != 0 {
+			timer := time.NewTimer(d)
+			defer timer.Stop()
+			respHeaderTimer = timer.C
+		}
+	}
+
+	readLoopResCh := cs.resc
+	bodyWritten := false
+	ctx := reqContext(req)
+
+	handleReadLoopResponse := func(re resAndError) (*http.Response, error) {
+		res := re.res
+		if re.err != nil || res.StatusCode > 299 {
+			// On error or status code 3xx, 4xx, 5xx, etc abort any
+			// ongoing write, assuming that the server doesn't care
+			// about our request body. If the server replied with 1xx or
+			// 2xx, however, then assume the server DOES potentially
+			// want our body (e.g. full-duplex streaming:
+			// golang.org/issue/13444). If it turns out the server
+			// doesn't, they'll RST_STREAM us soon enough.  This is a
+			// heuristic to avoid adding knobs to Transport.  Hopefully
+			// we can keep it.
+			bodyWriter.cancel()
+			cs.abortRequestBodyWrite(errStopReqBodyWrite)
+		}
+		if re.err != nil {
+			cc.forgetStreamID(cs.ID)
+			return nil, re.err
+		}
+		res.Request = req
+		res.TLS = cc.tlsState
+		return res, nil
+	}
+
+	for {
+		select {
+		case re := <-readLoopResCh:
+			return handleReadLoopResponse(re)
+		case <-respHeaderTimer:
+			cc.forgetStreamID(cs.ID)
+			if !hasBody || bodyWritten {
+				cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+			} else {
+				bodyWriter.cancel()
+				cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
+			}
+			return nil, errTimeout
+		case <-ctx.Done():
+			cc.forgetStreamID(cs.ID)
+			if !hasBody || bodyWritten {
+				cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+			} else {
+				bodyWriter.cancel()
+				cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
+			}
+			return nil, ctx.Err()
+		case <-req.Cancel:
+			cc.forgetStreamID(cs.ID)
+			if !hasBody || bodyWritten {
+				cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+			} else {
+				bodyWriter.cancel()
+				cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
+			}
+			return nil, errRequestCanceled
+		case <-cs.peerReset:
+			// processResetStream already removed the
+			// stream from the streams map; no need for
+			// forgetStreamID.
+			return nil, cs.resetErr
+		case err := <-bodyWriter.resc:
+			// Prefer the read loop's response, if available. Issue 16102.
+			select {
+			case re := <-readLoopResCh:
+				return handleReadLoopResponse(re)
+			default:
+			}
+			if err != nil {
+				return nil, err
+			}
+			bodyWritten = true
+			if d := cc.responseHeaderTimeout(); d != 0 {
+				timer := time.NewTimer(d)
+				defer timer.Stop()
+				respHeaderTimer = timer.C
+			}
+		}
+	}
+}
+
+// requires cc.wmu be held
+func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error {
+	first := true // first frame written (HEADERS is first, then CONTINUATION)
+	frameSize := int(cc.maxFrameSize)
+	for len(hdrs) > 0 && cc.werr == nil {
+		chunk := hdrs
+		if len(chunk) > frameSize {
+			chunk = chunk[:frameSize]
+		}
+		hdrs = hdrs[len(chunk):]
+		endHeaders := len(hdrs) == 0
+		if first {
+			cc.fr.WriteHeaders(HeadersFrameParam{
+				StreamID:      streamID,
+				BlockFragment: chunk,
+				EndStream:     endStream,
+				EndHeaders:    endHeaders,
+			})
+			first = false
+		} else {
+			cc.fr.WriteContinuation(streamID, endHeaders, chunk)
+		}
+	}
+	// TODO(bradfitz): this Flush could potentially block (as
+	// could the WriteHeaders call(s) above), which means they
+	// wouldn't respond to Request.Cancel being readable. That's
+	// rare, but this should probably be in a goroutine.
+	cc.bw.Flush()
+	return cc.werr
+}
+
+// internal error values; they don't escape to callers
+var (
+	// abort request body write; don't send cancel
+	errStopReqBodyWrite = errors.New("http2: aborting request body write")
+
+	// abort request body write, but send stream reset of cancel.
+	errStopReqBodyWriteAndCancel = errors.New("http2: canceling request")
+)
+
+func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) {
+	cc := cs.cc
+	sentEnd := false // whether we sent the final DATA frame w/ END_STREAM
+	buf := cc.frameScratchBuffer()
+	defer cc.putFrameScratchBuffer(buf)
+
+	defer func() {
+		traceWroteRequest(cs.trace, err)
+		// TODO: write h12Compare test showing whether
+		// Request.Body is closed by the Transport,
+		// and in multiple cases: server replies <=299 and >299
+		// while still writing request body
+		cerr := bodyCloser.Close()
+		if err == nil {
+			err = cerr
+		}
+	}()
+
+	req := cs.req
+	hasTrailers := req.Trailer != nil
+
+	var sawEOF bool
+	for !sawEOF {
+		n, err := body.Read(buf)
+		if err == io.EOF {
+			sawEOF = true
+			err = nil
+		} else if err != nil {
+			return err
+		}
+
+		remain := buf[:n]
+		for len(remain) > 0 && err == nil {
+			var allowed int32
+			allowed, err = cs.awaitFlowControl(len(remain))
+			switch {
+			case err == errStopReqBodyWrite:
+				return err
+			case err == errStopReqBodyWriteAndCancel:
+				cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+				return err
+			case err != nil:
+				return err
+			}
+			cc.wmu.Lock()
+			data := remain[:allowed]
+			remain = remain[allowed:]
+			sentEnd = sawEOF && len(remain) == 0 && !hasTrailers
+			err = cc.fr.WriteData(cs.ID, sentEnd, data)
+			if err == nil {
+				// TODO(bradfitz): this flush is for latency, not bandwidth.
+				// Most requests won't need this. Make this opt-in or
+				// opt-out?  Use some heuristic on the body type? Nagel-like
+				// timers?  Based on 'n'? Only last chunk of this for loop,
+				// unless flow control tokens are low? For now, always.
+				// If we change this, see comment below.
+				err = cc.bw.Flush()
+			}
+			cc.wmu.Unlock()
+		}
+		if err != nil {
+			return err
+		}
+	}
+
+	if sentEnd {
+		// Already sent END_STREAM (which implies we have no
+		// trailers) and flushed, because currently all
+		// WriteData frames above get a flush. So we're done.
+		return nil
+	}
+
+	var trls []byte
+	if hasTrailers {
+		cc.mu.Lock()
+		defer cc.mu.Unlock()
+		trls = cc.encodeTrailers(req)
+	}
+
+	cc.wmu.Lock()
+	defer cc.wmu.Unlock()
+
+	// Two ways to send END_STREAM: either with trailers, or
+	// with an empty DATA frame.
+	if len(trls) > 0 {
+		err = cc.writeHeaders(cs.ID, true, trls)
+	} else {
+		err = cc.fr.WriteData(cs.ID, true, nil)
+	}
+	if ferr := cc.bw.Flush(); ferr != nil && err == nil {
+		err = ferr
+	}
+	return err
+}
+
+// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow
+// control tokens from the server.
+// It returns either the non-zero number of tokens taken or an error
+// if the stream is dead.
+func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) {
+	cc := cs.cc
+	cc.mu.Lock()
+	defer cc.mu.Unlock()
+	for {
+		if cc.closed {
+			return 0, errClientConnClosed
+		}
+		if cs.stopReqBody != nil {
+			return 0, cs.stopReqBody
+		}
+		if err := cs.checkResetOrDone(); err != nil {
+			return 0, err
+		}
+		if a := cs.flow.available(); a > 0 {
+			take := a
+			if int(take) > maxBytes {
+
+				take = int32(maxBytes) // can't truncate int; take is int32
+			}
+			if take > int32(cc.maxFrameSize) {
+				take = int32(cc.maxFrameSize)
+			}
+			cs.flow.take(take)
+			return take, nil
+		}
+		cc.cond.Wait()
+	}
+}
+
+type badStringError struct {
+	what string
+	str  string
+}
+
+func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) }
+
+// requires cc.mu be held.
+func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
+	cc.hbuf.Reset()
+
+	host := req.Host
+	if host == "" {
+		host = req.URL.Host
+	}
+	host, err := httplex.PunycodeHostPort(host)
+	if err != nil {
+		return nil, err
+	}
+
+	var path string
+	if req.Method != "CONNECT" {
+		path = req.URL.RequestURI()
+		if !validPseudoPath(path) {
+			orig := path
+			path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
+			if !validPseudoPath(path) {
+				if req.URL.Opaque != "" {
+					return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
+				} else {
+					return nil, fmt.Errorf("invalid request :path %q", orig)
+				}
+			}
+		}
+	}
+
+	// Check for any invalid headers and return an error before we
+	// potentially pollute our hpack state. (We want to be able to
+	// continue to reuse the hpack encoder for future requests)
+	for k, vv := range req.Header {
+		if !httplex.ValidHeaderFieldName(k) {
+			return nil, fmt.Errorf("invalid HTTP header name %q", k)
+		}
+		for _, v := range vv {
+			if !httplex.ValidHeaderFieldValue(v) {
+				return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k)
+			}
+		}
+	}
+
+	// 8.1.2.3 Request Pseudo-Header Fields
+	// The :path pseudo-header field includes the path and query parts of the
+	// target URI (the path-absolute production and optionally a '?' character
+	// followed by the query production (see Sections 3.3 and 3.4 of
+	// [RFC3986]).
+	cc.writeHeader(":authority", host)
+	cc.writeHeader(":method", req.Method)
+	if req.Method != "CONNECT" {
+		cc.writeHeader(":path", path)
+		cc.writeHeader(":scheme", req.URL.Scheme)
+	}
+	if trailers != "" {
+		cc.writeHeader("trailer", trailers)
+	}
+
+	var didUA bool
+	for k, vv := range req.Header {
+		lowKey := strings.ToLower(k)
+		switch lowKey {
+		case "host", "content-length":
+			// Host is :authority, already sent.
+			// Content-Length is automatic, set below.
+			continue
+		case "connection", "proxy-connection", "transfer-encoding", "upgrade", "keep-alive":
+			// Per 8.1.2.2 Connection-Specific Header
+			// Fields, don't send connection-specific
+			// fields. We have already checked if any
+			// are error-worthy so just ignore the rest.
+			continue
+		case "user-agent":
+			// Match Go's http1 behavior: at most one
+			// User-Agent. If set to nil or empty string,
+			// then omit it. Otherwise if not mentioned,
+			// include the default (below).
+			didUA = true
+			if len(vv) < 1 {
+				continue
+			}
+			vv = vv[:1]
+			if vv[0] == "" {
+				continue
+			}
+		}
+		for _, v := range vv {
+			cc.writeHeader(lowKey, v)
+		}
+	}
+	if shouldSendReqContentLength(req.Method, contentLength) {
+		cc.writeHeader("content-length", strconv.FormatInt(contentLength, 10))
+	}
+	if addGzipHeader {
+		cc.writeHeader("accept-encoding", "gzip")
+	}
+	if !didUA {
+		cc.writeHeader("user-agent", defaultUserAgent)
+	}
+	return cc.hbuf.Bytes(), nil
+}
+
+// shouldSendReqContentLength reports whether the http2.Transport should send
+// a "content-length" request header. This logic is basically a copy of the net/http
+// transferWriter.shouldSendContentLength.
+// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
+// -1 means unknown.
+func shouldSendReqContentLength(method string, contentLength int64) bool {
+	if contentLength > 0 {
+		return true
+	}
+	if contentLength < 0 {
+		return false
+	}
+	// For zero bodies, whether we send a content-length depends on the method.
+	// It also kinda doesn't matter for http2 either way, with END_STREAM.
+	switch method {
+	case "POST", "PUT", "PATCH":
+		return true
+	default:
+		return false
+	}
+}
+
+// requires cc.mu be held.
+func (cc *ClientConn) encodeTrailers(req *http.Request) []byte {
+	cc.hbuf.Reset()
+	for k, vv := range req.Trailer {
+		// Transfer-Encoding, etc.. have already been filter at the
+		// start of RoundTrip
+		lowKey := strings.ToLower(k)
+		for _, v := range vv {
+			cc.writeHeader(lowKey, v)
+		}
+	}
+	return cc.hbuf.Bytes()
+}
+
+func (cc *ClientConn) writeHeader(name, value string) {
+	if VerboseLogs {
+		log.Printf("http2: Transport encoding header %q = %q", name, value)
+	}
+	cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value})
+}
+
+type resAndError struct {
+	res *http.Response
+	err error
+}
+
+// requires cc.mu be held.
+func (cc *ClientConn) newStream() *clientStream {
+	cs := &clientStream{
+		cc:        cc,
+		ID:        cc.nextStreamID,
+		resc:      make(chan resAndError, 1),
+		peerReset: make(chan struct{}),
+		done:      make(chan struct{}),
+	}
+	cs.flow.add(int32(cc.initialWindowSize))
+	cs.flow.setConnFlow(&cc.flow)
+	cs.inflow.add(transportDefaultStreamFlow)
+	cs.inflow.setConnFlow(&cc.inflow)
+	cc.nextStreamID += 2
+	cc.streams[cs.ID] = cs
+	return cs
+}
+
+func (cc *ClientConn) forgetStreamID(id uint32) {
+	cc.streamByID(id, true)
+}
+
+func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
+	cc.mu.Lock()
+	defer cc.mu.Unlock()
+	cs := cc.streams[id]
+	if andRemove && cs != nil && !cc.closed {
+		cc.lastActive = time.Now()
+		delete(cc.streams, id)
+		if len(cc.streams) == 0 && cc.idleTimer != nil {
+			cc.idleTimer.Reset(cc.idleTimeout)
+		}
+		close(cs.done)
+		cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl
+	}
+	return cs
+}
+
+// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop.
+type clientConnReadLoop struct {
+	cc            *ClientConn
+	activeRes     map[uint32]*clientStream // keyed by streamID
+	closeWhenIdle bool
+}
+
+// readLoop runs in its own goroutine and reads and dispatches frames.
+func (cc *ClientConn) readLoop() {
+	rl := &clientConnReadLoop{
+		cc:        cc,
+		activeRes: make(map[uint32]*clientStream),
+	}
+
+	defer rl.cleanup()
+	cc.readerErr = rl.run()
+	if ce, ok := cc.readerErr.(ConnectionError); ok {
+		cc.wmu.Lock()
+		cc.fr.WriteGoAway(0, ErrCode(ce), nil)
+		cc.wmu.Unlock()
+	}
+}
+
+// GoAwayError is returned by the Transport when the server closes the
+// TCP connection after sending a GOAWAY frame.
+type GoAwayError struct {
+	LastStreamID uint32
+	ErrCode      ErrCode
+	DebugData    string
+}
+
+func (e GoAwayError) Error() string {
+	return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q",
+		e.LastStreamID, e.ErrCode, e.DebugData)
+}
+
+func isEOFOrNetReadError(err error) bool {
+	if err == io.EOF {
+		return true
+	}
+	ne, ok := err.(*net.OpError)
+	return ok && ne.Op == "read"
+}
+
+func (rl *clientConnReadLoop) cleanup() {
+	cc := rl.cc
+	defer cc.tconn.Close()
+	defer cc.t.connPool().MarkDead(cc)
+	defer close(cc.readerDone)
+
+	if cc.idleTimer != nil {
+		cc.idleTimer.Stop()
+	}
+
+	// Close any response bodies if the server closes prematurely.
+	// TODO: also do this if we've written the headers but not
+	// gotten a response yet.
+	err := cc.readerErr
+	cc.mu.Lock()
+	if cc.goAway != nil && isEOFOrNetReadError(err) {
+		err = GoAwayError{
+			LastStreamID: cc.goAway.LastStreamID,
+			ErrCode:      cc.goAway.ErrCode,
+			DebugData:    cc.goAwayDebug,
+		}
+	} else if err == io.EOF {
+		err = io.ErrUnexpectedEOF
+	}
+	for _, cs := range rl.activeRes {
+		cs.bufPipe.CloseWithError(err)
+	}
+	for _, cs := range cc.streams {
+		select {
+		case cs.resc <- resAndError{err: err}:
+		default:
+		}
+		close(cs.done)
+	}
+	cc.closed = true
+	cc.cond.Broadcast()
+	cc.mu.Unlock()
+}
+
+func (rl *clientConnReadLoop) run() error {
+	cc := rl.cc
+	rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse
+	gotReply := false // ever saw a HEADERS reply
+	gotSettings := false
+	for {
+		f, err := cc.fr.ReadFrame()
+		if err != nil {
+			cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
+		}
+		if se, ok := err.(StreamError); ok {
+			if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil {
+				cs.cc.writeStreamReset(cs.ID, se.Code, err)
+				if se.Cause == nil {
+					se.Cause = cc.fr.errDetail
+				}
+				rl.endStreamError(cs, se)
+			}
+			continue
+		} else if err != nil {
+			return err
+		}
+		if VerboseLogs {
+			cc.vlogf("http2: Transport received %s", summarizeFrame(f))
+		}
+		if !gotSettings {
+			if _, ok := f.(*SettingsFrame); !ok {
+				cc.logf("protocol error: received %T before a SETTINGS frame", f)
+				return ConnectionError(ErrCodeProtocol)
+			}
+			gotSettings = true
+		}
+		maybeIdle := false // whether frame might transition us to idle
+
+		switch f := f.(type) {
+		case *MetaHeadersFrame:
+			err = rl.processHeaders(f)
+			maybeIdle = true
+			gotReply = true
+		case *DataFrame:
+			err = rl.processData(f)
+			maybeIdle = true
+		case *GoAwayFrame:
+			err = rl.processGoAway(f)
+			maybeIdle = true
+		case *RSTStreamFrame:
+			err = rl.processResetStream(f)
+			maybeIdle = true
+		case *SettingsFrame:
+			err = rl.processSettings(f)
+		case *PushPromiseFrame:
+			err = rl.processPushPromise(f)
+		case *WindowUpdateFrame:
+			err = rl.processWindowUpdate(f)
+		case *PingFrame:
+			err = rl.processPing(f)
+		default:
+			cc.logf("Transport: unhandled response frame type %T", f)
+		}
+		if err != nil {
+			if VerboseLogs {
+				cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err)
+			}
+			return err
+		}
+		if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 {
+			cc.closeIfIdle()
+		}
+	}
+}
+
+func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
+	cc := rl.cc
+	cs := cc.streamByID(f.StreamID, f.StreamEnded())
+	if cs == nil {
+		// We'd get here if we canceled a request while the
+		// server had its response still in flight. So if this
+		// was just something we canceled, ignore it.
+		return nil
+	}
+	if !cs.firstByte {
+		if cs.trace != nil {
+			// TODO(bradfitz): move first response byte earlier,
+			// when we first read the 9 byte header, not waiting
+			// until all the HEADERS+CONTINUATION frames have been
+			// merged. This works for now.
+			traceFirstResponseByte(cs.trace)
+		}
+		cs.firstByte = true
+	}
+	if !cs.pastHeaders {
+		cs.pastHeaders = true
+	} else {
+		return rl.processTrailers(cs, f)
+	}
+
+	res, err := rl.handleResponse(cs, f)
+	if err != nil {
+		if _, ok := err.(ConnectionError); ok {
+			return err
+		}
+		// Any other error type is a stream error.
+		cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err)
+		cs.resc <- resAndError{err: err}
+		return nil // return nil from process* funcs to keep conn alive
+	}
+	if res == nil {
+		// (nil, nil) special case. See handleResponse docs.
+		return nil
+	}
+	if res.Body != noBody {
+		rl.activeRes[cs.ID] = cs
+	}
+	cs.resTrailer = &res.Trailer
+	cs.resc <- resAndError{res: res}
+	return nil
+}
+
+// may return error types nil, or ConnectionError. Any other error value
+// is a StreamError of type ErrCodeProtocol. The returned error in that case
+// is the detail.
+//
+// As a special case, handleResponse may return (nil, nil) to skip the
+// frame (currently only used for 100 expect continue). This special
+// case is going away after Issue 13851 is fixed.
+func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) {
+	if f.Truncated {
+		return nil, errResponseHeaderListSize
+	}
+
+	status := f.PseudoValue("status")
+	if status == "" {
+		return nil, errors.New("missing status pseudo header")
+	}
+	statusCode, err := strconv.Atoi(status)
+	if err != nil {
+		return nil, errors.New("malformed non-numeric status pseudo header")
+	}
+
+	if statusCode == 100 {
+		traceGot100Continue(cs.trace)
+		if cs.on100 != nil {
+			cs.on100() // forces any write delay timer to fire
+		}
+		cs.pastHeaders = false // do it all again
+		return nil, nil
+	}
+
+	header := make(http.Header)
+	res := &http.Response{
+		Proto:      "HTTP/2.0",
+		ProtoMajor: 2,
+		Header:     header,
+		StatusCode: statusCode,
+		Status:     status + " " + http.StatusText(statusCode),
+	}
+	for _, hf := range f.RegularFields() {
+		key := http.CanonicalHeaderKey(hf.Name)
+		if key == "Trailer" {
+			t := res.Trailer
+			if t == nil {
+				t = make(http.Header)
+				res.Trailer = t
+			}
+			foreachHeaderElement(hf.Value, func(v string) {
+				t[http.CanonicalHeaderKey(v)] = nil
+			})
+		} else {
+			header[key] = append(header[key], hf.Value)
+		}
+	}
+
+	streamEnded := f.StreamEnded()
+	isHead := cs.req.Method == "HEAD"
+	if !streamEnded || isHead {
+		res.ContentLength = -1
+		if clens := res.Header["Content-Length"]; len(clens) == 1 {
+			if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil {
+				res.ContentLength = clen64
+			} else {
+				// TODO: care? unlike http/1, it won't mess up our framing, so it's
+				// more safe smuggling-wise to ignore.
+			}
+		} else if len(clens) > 1 {
+			// TODO: care? unlike http/1, it won't mess up our framing, so it's
+			// more safe smuggling-wise to ignore.
+		}
+	}
+
+	if streamEnded || isHead {
+		res.Body = noBody
+		return res, nil
+	}
+
+	buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage
+	cs.bufPipe = pipe{b: buf}
+	cs.bytesRemain = res.ContentLength
+	res.Body = transportResponseBody{cs}
+	go cs.awaitRequestCancel(cs.req)
+
+	if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" {
+		res.Header.Del("Content-Encoding")
+		res.Header.Del("Content-Length")
+		res.ContentLength = -1
+		res.Body = &gzipReader{body: res.Body}
+		setResponseUncompressed(res)
+	}
+	return res, nil
+}
+
+func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error {
+	if cs.pastTrailers {
+		// Too many HEADERS frames for this stream.
+		return ConnectionError(ErrCodeProtocol)
+	}
+	cs.pastTrailers = true
+	if !f.StreamEnded() {
+		// We expect that any headers for trailers also
+		// has END_STREAM.
+		return ConnectionError(ErrCodeProtocol)
+	}
+	if len(f.PseudoFields()) > 0 {
+		// No pseudo header fields are defined for trailers.
+		// TODO: ConnectionError might be overly harsh? Check.
+		return ConnectionError(ErrCodeProtocol)
+	}
+
+	trailer := make(http.Header)
+	for _, hf := range f.RegularFields() {
+		key := http.CanonicalHeaderKey(hf.Name)
+		trailer[key] = append(trailer[key], hf.Value)
+	}
+	cs.trailer = trailer
+
+	rl.endStream(cs)
+	return nil
+}
+
+// transportResponseBody is the concrete type of Transport.RoundTrip's
+// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body.
+// On Close it sends RST_STREAM if EOF wasn't already seen.
+type transportResponseBody struct {
+	cs *clientStream
+}
+
+func (b transportResponseBody) Read(p []byte) (n int, err error) {
+	cs := b.cs
+	cc := cs.cc
+
+	if cs.readErr != nil {
+		return 0, cs.readErr
+	}
+	n, err = b.cs.bufPipe.Read(p)
+	if cs.bytesRemain != -1 {
+		if int64(n) > cs.bytesRemain {
+			n = int(cs.bytesRemain)
+			if err == nil {
+				err = errors.New("net/http: server replied with more than declared Content-Length; truncated")
+				cc.writeStreamReset(cs.ID, ErrCodeProtocol, err)
+			}
+			cs.readErr = err
+			return int(cs.bytesRemain), err
+		}
+		cs.bytesRemain -= int64(n)
+		if err == io.EOF && cs.bytesRemain > 0 {
+			err = io.ErrUnexpectedEOF
+			cs.readErr = err
+			return n, err
+		}
+	}
+	if n == 0 {
+		// No flow control tokens to send back.
+		return
+	}
+
+	cc.mu.Lock()
+	defer cc.mu.Unlock()
+
+	var connAdd, streamAdd int32
+	// Check the conn-level first, before the stream-level.
+	if v := cc.inflow.available(); v < transportDefaultConnFlow/2 {
+		connAdd = transportDefaultConnFlow - v
+		cc.inflow.add(connAdd)
+	}
+	if err == nil { // No need to refresh if the stream is over or failed.
+		// Consider any buffered body data (read from the conn but not
+		// consumed by the client) when computing flow control for this
+		// stream.
+		v := int(cs.inflow.available()) + cs.bufPipe.Len()
+		if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh {
+			streamAdd = int32(transportDefaultStreamFlow - v)
+			cs.inflow.add(streamAdd)
+		}
+	}
+	if connAdd != 0 || streamAdd != 0 {
+		cc.wmu.Lock()
+		defer cc.wmu.Unlock()
+		if connAdd != 0 {
+			cc.fr.WriteWindowUpdate(0, mustUint31(connAdd))
+		}
+		if streamAdd != 0 {
+			cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd))
+		}
+		cc.bw.Flush()
+	}
+	return
+}
+
+var errClosedResponseBody = errors.New("http2: response body closed")
+
+func (b transportResponseBody) Close() error {
+	cs := b.cs
+	cc := cs.cc
+
+	serverSentStreamEnd := cs.bufPipe.Err() == io.EOF
+	unread := cs.bufPipe.Len()
+
+	if unread > 0 || !serverSentStreamEnd {
+		cc.mu.Lock()
+		cc.wmu.Lock()
+		if !serverSentStreamEnd {
+			cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel)
+		}
+		// Return connection-level flow control.
+		if unread > 0 {
+			cc.inflow.add(int32(unread))
+			cc.fr.WriteWindowUpdate(0, uint32(unread))
+		}
+		cc.bw.Flush()
+		cc.wmu.Unlock()
+		cc.mu.Unlock()
+	}
+
+	cs.bufPipe.BreakWithError(errClosedResponseBody)
+	return nil
+}
+
+func (rl *clientConnReadLoop) processData(f *DataFrame) error {
+	cc := rl.cc
+	cs := cc.streamByID(f.StreamID, f.StreamEnded())
+	data := f.Data()
+	if cs == nil {
+		cc.mu.Lock()
+		neverSent := cc.nextStreamID
+		cc.mu.Unlock()
+		if f.StreamID >= neverSent {
+			// We never asked for this.
+			cc.logf("http2: Transport received unsolicited DATA frame; closing connection")
+			return ConnectionError(ErrCodeProtocol)
+		}
+		// We probably did ask for this, but canceled. Just ignore it.
+		// TODO: be stricter here? only silently ignore things which
+		// we canceled, but not things which were closed normally
+		// by the peer? Tough without accumulating too much state.
+
+		// But at least return their flow control:
+		if f.Length > 0 {
+			cc.mu.Lock()
+			cc.inflow.add(int32(f.Length))
+			cc.mu.Unlock()
+
+			cc.wmu.Lock()
+			cc.fr.WriteWindowUpdate(0, uint32(f.Length))
+			cc.bw.Flush()
+			cc.wmu.Unlock()
+		}
+		return nil
+	}
+	if f.Length > 0 {
+		if len(data) > 0 && cs.bufPipe.b == nil {
+			// Data frame after it's already closed?
+			cc.logf("http2: Transport received DATA frame for closed stream; closing connection")
+			return ConnectionError(ErrCodeProtocol)
+		}
+
+		// Check connection-level flow control.
+		cc.mu.Lock()
+		if cs.inflow.available() >= int32(f.Length) {
+			cs.inflow.take(int32(f.Length))
+		} else {
+			cc.mu.Unlock()
+			return ConnectionError(ErrCodeFlowControl)
+		}
+		// Return any padded flow control now, since we won't
+		// refund it later on body reads.
+		if pad := int32(f.Length) - int32(len(data)); pad > 0 {
+			cs.inflow.add(pad)
+			cc.inflow.add(pad)
+			cc.wmu.Lock()
+			cc.fr.WriteWindowUpdate(0, uint32(pad))
+			cc.fr.WriteWindowUpdate(cs.ID, uint32(pad))
+			cc.bw.Flush()
+			cc.wmu.Unlock()
+		}
+		cc.mu.Unlock()
+
+		if len(data) > 0 {
+			if _, err := cs.bufPipe.Write(data); err != nil {
+				rl.endStreamError(cs, err)
+				return err
+			}
+		}
+	}
+
+	if f.StreamEnded() {
+		rl.endStream(cs)
+	}
+	return nil
+}
+
+var errInvalidTrailers = errors.New("http2: invalid trailers")
+
+func (rl *clientConnReadLoop) endStream(cs *clientStream) {
+	// TODO: check that any declared content-length matches, like
+	// server.go's (*stream).endStream method.
+	rl.endStreamError(cs, nil)
+}
+
+func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) {
+	var code func()
+	if err == nil {
+		err = io.EOF
+		code = cs.copyTrailers
+	}
+	cs.bufPipe.closeWithErrorAndCode(err, code)
+	delete(rl.activeRes, cs.ID)
+	if isConnectionCloseRequest(cs.req) {
+		rl.closeWhenIdle = true
+	}
+
+	select {
+	case cs.resc <- resAndError{err: err}:
+	default:
+	}
+}
+
+func (cs *clientStream) copyTrailers() {
+	for k, vv := range cs.trailer {
+		t := cs.resTrailer
+		if *t == nil {
+			*t = make(http.Header)
+		}
+		(*t)[k] = vv
+	}
+}
+
+func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error {
+	cc := rl.cc
+	cc.t.connPool().MarkDead(cc)
+	if f.ErrCode != 0 {
+		// TODO: deal with GOAWAY more. particularly the error code
+		cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode)
+	}
+	cc.setGoAway(f)
+	return nil
+}
+
+func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
+	cc := rl.cc
+	cc.mu.Lock()
+	defer cc.mu.Unlock()
+
+	if f.IsAck() {
+		if cc.wantSettingsAck {
+			cc.wantSettingsAck = false
+			return nil
+		}
+		return ConnectionError(ErrCodeProtocol)
+	}
+
+	err := f.ForeachSetting(func(s Setting) error {
+		switch s.ID {
+		case SettingMaxFrameSize:
+			cc.maxFrameSize = s.Val
+		case SettingMaxConcurrentStreams:
+			cc.maxConcurrentStreams = s.Val
+		case SettingInitialWindowSize:
+			// Values above the maximum flow-control
+			// window size of 2^31-1 MUST be treated as a
+			// connection error (Section 5.4.1) of type
+			// FLOW_CONTROL_ERROR.
+			if s.Val > math.MaxInt32 {
+				return ConnectionError(ErrCodeFlowControl)
+			}
+
+			// Adjust flow control of currently-open
+			// frames by the difference of the old initial
+			// window size and this one.
+			delta := int32(s.Val) - int32(cc.initialWindowSize)
+			for _, cs := range cc.streams {
+				cs.flow.add(delta)
+			}
+			cc.cond.Broadcast()
+
+			cc.initialWindowSize = s.Val
+		default:
+			// TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably.
+			cc.vlogf("Unhandled Setting: %v", s)
+		}
+		return nil
+	})
+	if err != nil {
+		return err
+	}
+
+	cc.wmu.Lock()
+	defer cc.wmu.Unlock()
+
+	cc.fr.WriteSettingsAck()
+	cc.bw.Flush()
+	return cc.werr
+}
+
+func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
+	cc := rl.cc
+	cs := cc.streamByID(f.StreamID, false)
+	if f.StreamID != 0 && cs == nil {
+		return nil
+	}
+
+	cc.mu.Lock()
+	defer cc.mu.Unlock()
+
+	fl := &cc.flow
+	if cs != nil {
+		fl = &cs.flow
+	}
+	if !fl.add(int32(f.Increment)) {
+		return ConnectionError(ErrCodeFlowControl)
+	}
+	cc.cond.Broadcast()
+	return nil
+}
+
+func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error {
+	cs := rl.cc.streamByID(f.StreamID, true)
+	if cs == nil {
+		// TODO: return error if server tries to RST_STEAM an idle stream
+		return nil
+	}
+	select {
+	case <-cs.peerReset:
+		// Already reset.
+		// This is the only goroutine
+		// which closes this, so there
+		// isn't a race.
+	default:
+		err := streamError(cs.ID, f.ErrCode)
+		cs.resetErr = err
+		close(cs.peerReset)
+		cs.bufPipe.CloseWithError(err)
+		cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl
+	}
+	delete(rl.activeRes, cs.ID)
+	return nil
+}
+
+// Ping sends a PING frame to the server and waits for the ack.
+// Public implementation is in go17.go and not_go17.go
+func (cc *ClientConn) ping(ctx contextContext) error {
+	c := make(chan struct{})
+	// Generate a random payload
+	var p [8]byte
+	for {
+		if _, err := rand.Read(p[:]); err != nil {
+			return err
+		}
+		cc.mu.Lock()
+		// check for dup before insert
+		if _, found := cc.pings[p]; !found {
+			cc.pings[p] = c
+			cc.mu.Unlock()
+			break
+		}
+		cc.mu.Unlock()
+	}
+	cc.wmu.Lock()
+	if err := cc.fr.WritePing(false, p); err != nil {
+		cc.wmu.Unlock()
+		return err
+	}
+	if err := cc.bw.Flush(); err != nil {
+		cc.wmu.Unlock()
+		return err
+	}
+	cc.wmu.Unlock()
+	select {
+	case <-c:
+		return nil
+	case <-ctx.Done():
+		return ctx.Err()
+	case <-cc.readerDone:
+		// connection closed
+		return cc.readerErr
+	}
+}
+
+func (rl *clientConnReadLoop) processPing(f *PingFrame) error {
+	if f.IsAck() {
+		cc := rl.cc
+		cc.mu.Lock()
+		defer cc.mu.Unlock()
+		// If ack, notify listener if any
+		if c, ok := cc.pings[f.Data]; ok {
+			close(c)
+			delete(cc.pings, f.Data)
+		}
+		return nil
+	}
+	cc := rl.cc
+	cc.wmu.Lock()
+	defer cc.wmu.Unlock()
+	if err := cc.fr.WritePing(true, f.Data); err != nil {
+		return err
+	}
+	return cc.bw.Flush()
+}
+
+func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error {
+	// We told the peer we don't want them.
+	// Spec says:
+	// "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH
+	// setting of the peer endpoint is set to 0. An endpoint that
+	// has set this setting and has received acknowledgement MUST
+	// treat the receipt of a PUSH_PROMISE frame as a connection
+	// error (Section 5.4.1) of type PROTOCOL_ERROR."
+	return ConnectionError(ErrCodeProtocol)
+}
+
+func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) {
+	// TODO: map err to more interesting error codes, once the
+	// HTTP community comes up with some. But currently for
+	// RST_STREAM there's no equivalent to GOAWAY frame's debug
+	// data, and the error codes are all pretty vague ("cancel").
+	cc.wmu.Lock()
+	cc.fr.WriteRSTStream(streamID, code)
+	cc.bw.Flush()
+	cc.wmu.Unlock()
+}
+
+var (
+	errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
+	errPseudoTrailers         = errors.New("http2: invalid pseudo header in trailers")
+)
+
+func (cc *ClientConn) logf(format string, args ...interface{}) {
+	cc.t.logf(format, args...)
+}
+
+func (cc *ClientConn) vlogf(format string, args ...interface{}) {
+	cc.t.vlogf(format, args...)
+}
+
+func (t *Transport) vlogf(format string, args ...interface{}) {
+	if VerboseLogs {
+		t.logf(format, args...)
+	}
+}
+
+func (t *Transport) logf(format string, args ...interface{}) {
+	log.Printf(format, args...)
+}
+
+var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil))
+
+func strSliceContains(ss []string, s string) bool {
+	for _, v := range ss {
+		if v == s {
+			return true
+		}
+	}
+	return false
+}
+
+type erringRoundTripper struct{ err error }
+
+func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err }
+
+// gzipReader wraps a response body so it can lazily
+// call gzip.NewReader on the first call to Read
+type gzipReader struct {
+	body io.ReadCloser // underlying Response.Body
+	zr   *gzip.Reader  // lazily-initialized gzip reader
+	zerr error         // sticky error
+}
+
+func (gz *gzipReader) Read(p []byte) (n int, err error) {
+	if gz.zerr != nil {
+		return 0, gz.zerr
+	}
+	if gz.zr == nil {
+		gz.zr, err = gzip.NewReader(gz.body)
+		if err != nil {
+			gz.zerr = err
+			return 0, err
+		}
+	}
+	return gz.zr.Read(p)
+}
+
+func (gz *gzipReader) Close() error {
+	return gz.body.Close()
+}
+
+type errorReader struct{ err error }
+
+func (r errorReader) Read(p []byte) (int, error) { return 0, r.err }
+
+// bodyWriterState encapsulates various state around the Transport's writing
+// of the request body, particularly regarding doing delayed writes of the body
+// when the request contains "Expect: 100-continue".
+type bodyWriterState struct {
+	cs     *clientStream
+	timer  *time.Timer   // if non-nil, we're doing a delayed write
+	fnonce *sync.Once    // to call fn with
+	fn     func()        // the code to run in the goroutine, writing the body
+	resc   chan error    // result of fn's execution
+	delay  time.Duration // how long we should delay a delayed write for
+}
+
+func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) {
+	s.cs = cs
+	if body == nil {
+		return
+	}
+	resc := make(chan error, 1)
+	s.resc = resc
+	s.fn = func() {
+		resc <- cs.writeRequestBody(body, cs.req.Body)
+	}
+	s.delay = t.expectContinueTimeout()
+	if s.delay == 0 ||
+		!httplex.HeaderValuesContainsToken(
+			cs.req.Header["Expect"],
+			"100-continue") {
+		return
+	}
+	s.fnonce = new(sync.Once)
+
+	// Arm the timer with a very large duration, which we'll
+	// intentionally lower later. It has to be large now because
+	// we need a handle to it before writing the headers, but the
+	// s.delay value is defined to not start until after the
+	// request headers were written.
+	const hugeDuration = 365 * 24 * time.Hour
+	s.timer = time.AfterFunc(hugeDuration, func() {
+		s.fnonce.Do(s.fn)
+	})
+	return
+}
+
+func (s bodyWriterState) cancel() {
+	if s.timer != nil {
+		s.timer.Stop()
+	}
+}
+
+func (s bodyWriterState) on100() {
+	if s.timer == nil {
+		// If we didn't do a delayed write, ignore the server's
+		// bogus 100 continue response.
+		return
+	}
+	s.timer.Stop()
+	go func() { s.fnonce.Do(s.fn) }()
+}
+
+// scheduleBodyWrite starts writing the body, either immediately (in
+// the common case) or after the delay timeout. It should not be
+// called until after the headers have been written.
+func (s bodyWriterState) scheduleBodyWrite() {
+	if s.timer == nil {
+		// We're not doing a delayed write (see
+		// getBodyWriterState), so just start the writing
+		// goroutine immediately.
+		go s.fn()
+		return
+	}
+	traceWait100Continue(s.cs.trace)
+	if s.timer.Stop() {
+		s.timer.Reset(s.delay)
+	}
+}
+
+// isConnectionCloseRequest reports whether req should use its own
+// connection for a single request and then close the connection.
+func isConnectionCloseRequest(req *http.Request) bool {
+	return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close")
+}
diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go
new file mode 100644
index 0000000..27ef0dd
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/write.go
@@ -0,0 +1,264 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+	"bytes"
+	"fmt"
+	"log"
+	"net/http"
+	"time"
+
+	"golang.org/x/net/http2/hpack"
+	"golang.org/x/net/lex/httplex"
+)
+
+// writeFramer is implemented by any type that is used to write frames.
+type writeFramer interface {
+	writeFrame(writeContext) error
+}
+
+// writeContext is the interface needed by the various frame writer
+// types below. All the writeFrame methods below are scheduled via the
+// frame writing scheduler (see writeScheduler in writesched.go).
+//
+// This interface is implemented by *serverConn.
+//
+// TODO: decide whether to a) use this in the client code (which didn't
+// end up using this yet, because it has a simpler design, not
+// currently implementing priorities), or b) delete this and
+// make the server code a bit more concrete.
+type writeContext interface {
+	Framer() *Framer
+	Flush() error
+	CloseConn() error
+	// HeaderEncoder returns an HPACK encoder that writes to the
+	// returned buffer.
+	HeaderEncoder() (*hpack.Encoder, *bytes.Buffer)
+}
+
+// endsStream reports whether the given frame writer w will locally
+// close the stream.
+func endsStream(w writeFramer) bool {
+	switch v := w.(type) {
+	case *writeData:
+		return v.endStream
+	case *writeResHeaders:
+		return v.endStream
+	case nil:
+		// This can only happen if the caller reuses w after it's
+		// been intentionally nil'ed out to prevent use. Keep this
+		// here to catch future refactoring breaking it.
+		panic("endsStream called on nil writeFramer")
+	}
+	return false
+}
+
+type flushFrameWriter struct{}
+
+func (flushFrameWriter) writeFrame(ctx writeContext) error {
+	return ctx.Flush()
+}
+
+type writeSettings []Setting
+
+func (s writeSettings) writeFrame(ctx writeContext) error {
+	return ctx.Framer().WriteSettings([]Setting(s)...)
+}
+
+type writeGoAway struct {
+	maxStreamID uint32
+	code        ErrCode
+}
+
+func (p *writeGoAway) writeFrame(ctx writeContext) error {
+	err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)
+	if p.code != 0 {
+		ctx.Flush() // ignore error: we're hanging up on them anyway
+		time.Sleep(50 * time.Millisecond)
+		ctx.CloseConn()
+	}
+	return err
+}
+
+type writeData struct {
+	streamID  uint32
+	p         []byte
+	endStream bool
+}
+
+func (w *writeData) String() string {
+	return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream)
+}
+
+func (w *writeData) writeFrame(ctx writeContext) error {
+	return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
+}
+
+// handlerPanicRST is the message sent from handler goroutines when
+// the handler panics.
+type handlerPanicRST struct {
+	StreamID uint32
+}
+
+func (hp handlerPanicRST) writeFrame(ctx writeContext) error {
+	return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal)
+}
+
+func (se StreamError) writeFrame(ctx writeContext) error {
+	return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
+}
+
+type writePingAck struct{ pf *PingFrame }
+
+func (w writePingAck) writeFrame(ctx writeContext) error {
+	return ctx.Framer().WritePing(true, w.pf.Data)
+}
+
+type writeSettingsAck struct{}
+
+func (writeSettingsAck) writeFrame(ctx writeContext) error {
+	return ctx.Framer().WriteSettingsAck()
+}
+
+// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
+// for HTTP response headers or trailers from a server handler.
+type writeResHeaders struct {
+	streamID    uint32
+	httpResCode int         // 0 means no ":status" line
+	h           http.Header // may be nil
+	trailers    []string    // if non-nil, which keys of h to write. nil means all.
+	endStream   bool
+
+	date          string
+	contentType   string
+	contentLength string
+}
+
+func encKV(enc *hpack.Encoder, k, v string) {
+	if VerboseLogs {
+		log.Printf("http2: server encoding header %q = %q", k, v)
+	}
+	enc.WriteField(hpack.HeaderField{Name: k, Value: v})
+}
+
+func (w *writeResHeaders) writeFrame(ctx writeContext) error {
+	enc, buf := ctx.HeaderEncoder()
+	buf.Reset()
+
+	if w.httpResCode != 0 {
+		encKV(enc, ":status", httpCodeString(w.httpResCode))
+	}
+
+	encodeHeaders(enc, w.h, w.trailers)
+
+	if w.contentType != "" {
+		encKV(enc, "content-type", w.contentType)
+	}
+	if w.contentLength != "" {
+		encKV(enc, "content-length", w.contentLength)
+	}
+	if w.date != "" {
+		encKV(enc, "date", w.date)
+	}
+
+	headerBlock := buf.Bytes()
+	if len(headerBlock) == 0 && w.trailers == nil {
+		panic("unexpected empty hpack")
+	}
+
+	// For now we're lazy and just pick the minimum MAX_FRAME_SIZE
+	// that all peers must support (16KB). Later we could care
+	// more and send larger frames if the peer advertised it, but
+	// there's little point. Most headers are small anyway (so we
+	// generally won't have CONTINUATION frames), and extra frames
+	// only waste 9 bytes anyway.
+	const maxFrameSize = 16384
+
+	first := true
+	for len(headerBlock) > 0 {
+		frag := headerBlock
+		if len(frag) > maxFrameSize {
+			frag = frag[:maxFrameSize]
+		}
+		headerBlock = headerBlock[len(frag):]
+		endHeaders := len(headerBlock) == 0
+		var err error
+		if first {
+			first = false
+			err = ctx.Framer().WriteHeaders(HeadersFrameParam{
+				StreamID:      w.streamID,
+				BlockFragment: frag,
+				EndStream:     w.endStream,
+				EndHeaders:    endHeaders,
+			})
+		} else {
+			err = ctx.Framer().WriteContinuation(w.streamID, endHeaders, frag)
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+type write100ContinueHeadersFrame struct {
+	streamID uint32
+}
+
+func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error {
+	enc, buf := ctx.HeaderEncoder()
+	buf.Reset()
+	encKV(enc, ":status", "100")
+	return ctx.Framer().WriteHeaders(HeadersFrameParam{
+		StreamID:      w.streamID,
+		BlockFragment: buf.Bytes(),
+		EndStream:     false,
+		EndHeaders:    true,
+	})
+}
+
+type writeWindowUpdate struct {
+	streamID uint32 // or 0 for conn-level
+	n        uint32
+}
+
+func (wu writeWindowUpdate) writeFrame(ctx writeContext) error {
+	return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
+}
+
+func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
+	if keys == nil {
+		sorter := sorterPool.Get().(*sorter)
+		// Using defer here, since the returned keys from the
+		// sorter.Keys method is only valid until the sorter
+		// is returned:
+		defer sorterPool.Put(sorter)
+		keys = sorter.Keys(h)
+	}
+	for _, k := range keys {
+		vv := h[k]
+		k = lowerHeader(k)
+		if !validWireHeaderFieldName(k) {
+			// Skip it as backup paranoia. Per
+			// golang.org/issue/14048, these should
+			// already be rejected at a higher level.
+			continue
+		}
+		isTE := k == "transfer-encoding"
+		for _, v := range vv {
+			if !httplex.ValidHeaderFieldValue(v) {
+				// TODO: return an error? golang.org/issue/14048
+				// For now just omit it.
+				continue
+			}
+			// TODO: more of "8.1.2.2 Connection-Specific Header Fields"
+			if isTE && v != "trailers" {
+				continue
+			}
+			encKV(enc, k, v)
+		}
+	}
+}
diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go
new file mode 100644
index 0000000..c24316c
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched.go
@@ -0,0 +1,283 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import "fmt"
+
+// frameWriteMsg is a request to write a frame.
+type frameWriteMsg struct {
+	// write is the interface value that does the writing, once the
+	// writeScheduler (below) has decided to select this frame
+	// to write. The write functions are all defined in write.go.
+	write writeFramer
+
+	stream *stream // used for prioritization. nil for non-stream frames.
+
+	// done, if non-nil, must be a buffered channel with space for
+	// 1 message and is sent the return value from write (or an
+	// earlier error) when the frame has been written.
+	done chan error
+}
+
+// for debugging only:
+func (wm frameWriteMsg) String() string {
+	var streamID uint32
+	if wm.stream != nil {
+		streamID = wm.stream.id
+	}
+	var des string
+	if s, ok := wm.write.(fmt.Stringer); ok {
+		des = s.String()
+	} else {
+		des = fmt.Sprintf("%T", wm.write)
+	}
+	return fmt.Sprintf("[frameWriteMsg stream=%d, ch=%v, type: %v]", streamID, wm.done != nil, des)
+}
+
+// writeScheduler tracks pending frames to write, priorities, and decides
+// the next one to use. It is not thread-safe.
+type writeScheduler struct {
+	// zero are frames not associated with a specific stream.
+	// They're sent before any stream-specific freams.
+	zero writeQueue
+
+	// maxFrameSize is the maximum size of a DATA frame
+	// we'll write. Must be non-zero and between 16K-16M.
+	maxFrameSize uint32
+
+	// sq contains the stream-specific queues, keyed by stream ID.
+	// when a stream is idle, it's deleted from the map.
+	sq map[uint32]*writeQueue
+
+	// canSend is a slice of memory that's reused between frame
+	// scheduling decisions to hold the list of writeQueues (from sq)
+	// which have enough flow control data to send. After canSend is
+	// built, the best is selected.
+	canSend []*writeQueue
+
+	// pool of empty queues for reuse.
+	queuePool []*writeQueue
+}
+
+func (ws *writeScheduler) putEmptyQueue(q *writeQueue) {
+	if len(q.s) != 0 {
+		panic("queue must be empty")
+	}
+	ws.queuePool = append(ws.queuePool, q)
+}
+
+func (ws *writeScheduler) getEmptyQueue() *writeQueue {
+	ln := len(ws.queuePool)
+	if ln == 0 {
+		return new(writeQueue)
+	}
+	q := ws.queuePool[ln-1]
+	ws.queuePool = ws.queuePool[:ln-1]
+	return q
+}
+
+func (ws *writeScheduler) empty() bool { return ws.zero.empty() && len(ws.sq) == 0 }
+
+func (ws *writeScheduler) add(wm frameWriteMsg) {
+	st := wm.stream
+	if st == nil {
+		ws.zero.push(wm)
+	} else {
+		ws.streamQueue(st.id).push(wm)
+	}
+}
+
+func (ws *writeScheduler) streamQueue(streamID uint32) *writeQueue {
+	if q, ok := ws.sq[streamID]; ok {
+		return q
+	}
+	if ws.sq == nil {
+		ws.sq = make(map[uint32]*writeQueue)
+	}
+	q := ws.getEmptyQueue()
+	ws.sq[streamID] = q
+	return q
+}
+
+// take returns the most important frame to write and removes it from the scheduler.
+// It is illegal to call this if the scheduler is empty or if there are no connection-level
+// flow control bytes available.
+func (ws *writeScheduler) take() (wm frameWriteMsg, ok bool) {
+	if ws.maxFrameSize == 0 {
+		panic("internal error: ws.maxFrameSize not initialized or invalid")
+	}
+
+	// If there any frames not associated with streams, prefer those first.
+	// These are usually SETTINGS, etc.
+	if !ws.zero.empty() {
+		return ws.zero.shift(), true
+	}
+	if len(ws.sq) == 0 {
+		return
+	}
+
+	// Next, prioritize frames on streams that aren't DATA frames (no cost).
+	for id, q := range ws.sq {
+		if q.firstIsNoCost() {
+			return ws.takeFrom(id, q)
+		}
+	}
+
+	// Now, all that remains are DATA frames with non-zero bytes to
+	// send. So pick the best one.
+	if len(ws.canSend) != 0 {
+		panic("should be empty")
+	}
+	for _, q := range ws.sq {
+		if n := ws.streamWritableBytes(q); n > 0 {
+			ws.canSend = append(ws.canSend, q)
+		}
+	}
+	if len(ws.canSend) == 0 {
+		return
+	}
+	defer ws.zeroCanSend()
+
+	// TODO: find the best queue
+	q := ws.canSend[0]
+
+	return ws.takeFrom(q.streamID(), q)
+}
+
+// zeroCanSend is defered from take.
+func (ws *writeScheduler) zeroCanSend() {
+	for i := range ws.canSend {
+		ws.canSend[i] = nil
+	}
+	ws.canSend = ws.canSend[:0]
+}
+
+// streamWritableBytes returns the number of DATA bytes we could write
+// from the given queue's stream, if this stream/queue were
+// selected. It is an error to call this if q's head isn't a
+// *writeData.
+func (ws *writeScheduler) streamWritableBytes(q *writeQueue) int32 {
+	wm := q.head()
+	ret := wm.stream.flow.available() // max we can write
+	if ret == 0 {
+		return 0
+	}
+	if int32(ws.maxFrameSize) < ret {
+		ret = int32(ws.maxFrameSize)
+	}
+	if ret == 0 {
+		panic("internal error: ws.maxFrameSize not initialized or invalid")
+	}
+	wd := wm.write.(*writeData)
+	if len(wd.p) < int(ret) {
+		ret = int32(len(wd.p))
+	}
+	return ret
+}
+
+func (ws *writeScheduler) takeFrom(id uint32, q *writeQueue) (wm frameWriteMsg, ok bool) {
+	wm = q.head()
+	// If the first item in this queue costs flow control tokens
+	// and we don't have enough, write as much as we can.
+	if wd, ok := wm.write.(*writeData); ok && len(wd.p) > 0 {
+		allowed := wm.stream.flow.available() // max we can write
+		if allowed == 0 {
+			// No quota available. Caller can try the next stream.
+			return frameWriteMsg{}, false
+		}
+		if int32(ws.maxFrameSize) < allowed {
+			allowed = int32(ws.maxFrameSize)
+		}
+		// TODO: further restrict the allowed size, because even if
+		// the peer says it's okay to write 16MB data frames, we might
+		// want to write smaller ones to properly weight competing
+		// streams' priorities.
+
+		if len(wd.p) > int(allowed) {
+			wm.stream.flow.take(allowed)
+			chunk := wd.p[:allowed]
+			wd.p = wd.p[allowed:]
+			// Make up a new write message of a valid size, rather
+			// than shifting one off the queue.
+			return frameWriteMsg{
+				stream: wm.stream,
+				write: &writeData{
+					streamID: wd.streamID,
+					p:        chunk,
+					// even if the original had endStream set, there
+					// arebytes remaining because len(wd.p) > allowed,
+					// so we know endStream is false:
+					endStream: false,
+				},
+				// our caller is blocking on the final DATA frame, not
+				// these intermediates, so no need to wait:
+				done: nil,
+			}, true
+		}
+		wm.stream.flow.take(int32(len(wd.p)))
+	}
+
+	q.shift()
+	if q.empty() {
+		ws.putEmptyQueue(q)
+		delete(ws.sq, id)
+	}
+	return wm, true
+}
+
+func (ws *writeScheduler) forgetStream(id uint32) {
+	q, ok := ws.sq[id]
+	if !ok {
+		return
+	}
+	delete(ws.sq, id)
+
+	// But keep it for others later.
+	for i := range q.s {
+		q.s[i] = frameWriteMsg{}
+	}
+	q.s = q.s[:0]
+	ws.putEmptyQueue(q)
+}
+
+type writeQueue struct {
+	s []frameWriteMsg
+}
+
+// streamID returns the stream ID for a non-empty stream-specific queue.
+func (q *writeQueue) streamID() uint32 { return q.s[0].stream.id }
+
+func (q *writeQueue) empty() bool { return len(q.s) == 0 }
+
+func (q *writeQueue) push(wm frameWriteMsg) {
+	q.s = append(q.s, wm)
+}
+
+// head returns the next item that would be removed by shift.
+func (q *writeQueue) head() frameWriteMsg {
+	if len(q.s) == 0 {
+		panic("invalid use of queue")
+	}
+	return q.s[0]
+}
+
+func (q *writeQueue) shift() frameWriteMsg {
+	if len(q.s) == 0 {
+		panic("invalid use of queue")
+	}
+	wm := q.s[0]
+	// TODO: less copy-happy queue.
+	copy(q.s, q.s[1:])
+	q.s[len(q.s)-1] = frameWriteMsg{}
+	q.s = q.s[:len(q.s)-1]
+	return wm
+}
+
+func (q *writeQueue) firstIsNoCost() bool {
+	if df, ok := q.s[0].write.(*writeData); ok {
+		return len(df.p) == 0
+	}
+	return true
+}
diff --git a/vendor/golang.org/x/net/idna/idna.go b/vendor/golang.org/x/net/idna/idna.go
new file mode 100644
index 0000000..35ff39d
--- /dev/null
+++ b/vendor/golang.org/x/net/idna/idna.go
@@ -0,0 +1,68 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package idna implements IDNA2008 (Internationalized Domain Names for
+// Applications), defined in RFC 5890, RFC 5891, RFC 5892, RFC 5893 and
+// RFC 5894.
+package idna
+
+import (
+	"strings"
+	"unicode/utf8"
+)
+
+// TODO(nigeltao): specify when errors occur. For example, is ToASCII(".") or
+// ToASCII("foo\x00") an error? See also http://www.unicode.org/faq/idn.html#11
+
+// acePrefix is the ASCII Compatible Encoding prefix.
+const acePrefix = "xn--"
+
+// ToASCII converts a domain or domain label to its ASCII form. For example,
+// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
+// ToASCII("golang") is "golang".
+func ToASCII(s string) (string, error) {
+	if ascii(s) {
+		return s, nil
+	}
+	labels := strings.Split(s, ".")
+	for i, label := range labels {
+		if !ascii(label) {
+			a, err := encode(acePrefix, label)
+			if err != nil {
+				return "", err
+			}
+			labels[i] = a
+		}
+	}
+	return strings.Join(labels, "."), nil
+}
+
+// ToUnicode converts a domain or domain label to its Unicode form. For example,
+// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and
+// ToUnicode("golang") is "golang".
+func ToUnicode(s string) (string, error) {
+	if !strings.Contains(s, acePrefix) {
+		return s, nil
+	}
+	labels := strings.Split(s, ".")
+	for i, label := range labels {
+		if strings.HasPrefix(label, acePrefix) {
+			u, err := decode(label[len(acePrefix):])
+			if err != nil {
+				return "", err
+			}
+			labels[i] = u
+		}
+	}
+	return strings.Join(labels, "."), nil
+}
+
+func ascii(s string) bool {
+	for i := 0; i < len(s); i++ {
+		if s[i] >= utf8.RuneSelf {
+			return false
+		}
+	}
+	return true
+}
diff --git a/vendor/golang.org/x/net/idna/punycode.go b/vendor/golang.org/x/net/idna/punycode.go
new file mode 100644
index 0000000..92e733f
--- /dev/null
+++ b/vendor/golang.org/x/net/idna/punycode.go
@@ -0,0 +1,200 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package idna
+
+// This file implements the Punycode algorithm from RFC 3492.
+
+import (
+	"fmt"
+	"math"
+	"strings"
+	"unicode/utf8"
+)
+
+// These parameter values are specified in section 5.
+//
+// All computation is done with int32s, so that overflow behavior is identical
+// regardless of whether int is 32-bit or 64-bit.
+const (
+	base        int32 = 36
+	damp        int32 = 700
+	initialBias int32 = 72
+	initialN    int32 = 128
+	skew        int32 = 38
+	tmax        int32 = 26
+	tmin        int32 = 1
+)
+
+// decode decodes a string as specified in section 6.2.
+func decode(encoded string) (string, error) {
+	if encoded == "" {
+		return "", nil
+	}
+	pos := 1 + strings.LastIndex(encoded, "-")
+	if pos == 1 {
+		return "", fmt.Errorf("idna: invalid label %q", encoded)
+	}
+	if pos == len(encoded) {
+		return encoded[:len(encoded)-1], nil
+	}
+	output := make([]rune, 0, len(encoded))
+	if pos != 0 {
+		for _, r := range encoded[:pos-1] {
+			output = append(output, r)
+		}
+	}
+	i, n, bias := int32(0), initialN, initialBias
+	for pos < len(encoded) {
+		oldI, w := i, int32(1)
+		for k := base; ; k += base {
+			if pos == len(encoded) {
+				return "", fmt.Errorf("idna: invalid label %q", encoded)
+			}
+			digit, ok := decodeDigit(encoded[pos])
+			if !ok {
+				return "", fmt.Errorf("idna: invalid label %q", encoded)
+			}
+			pos++
+			i += digit * w
+			if i < 0 {
+				return "", fmt.Errorf("idna: invalid label %q", encoded)
+			}
+			t := k - bias
+			if t < tmin {
+				t = tmin
+			} else if t > tmax {
+				t = tmax
+			}
+			if digit < t {
+				break
+			}
+			w *= base - t
+			if w >= math.MaxInt32/base {
+				return "", fmt.Errorf("idna: invalid label %q", encoded)
+			}
+		}
+		x := int32(len(output) + 1)
+		bias = adapt(i-oldI, x, oldI == 0)
+		n += i / x
+		i %= x
+		if n > utf8.MaxRune || len(output) >= 1024 {
+			return "", fmt.Errorf("idna: invalid label %q", encoded)
+		}
+		output = append(output, 0)
+		copy(output[i+1:], output[i:])
+		output[i] = n
+		i++
+	}
+	return string(output), nil
+}
+
+// encode encodes a string as specified in section 6.3 and prepends prefix to
+// the result.
+//
+// The "while h < length(input)" line in the specification becomes "for
+// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes.
+func encode(prefix, s string) (string, error) {
+	output := make([]byte, len(prefix), len(prefix)+1+2*len(s))
+	copy(output, prefix)
+	delta, n, bias := int32(0), initialN, initialBias
+	b, remaining := int32(0), int32(0)
+	for _, r := range s {
+		if r < 0x80 {
+			b++
+			output = append(output, byte(r))
+		} else {
+			remaining++
+		}
+	}
+	h := b
+	if b > 0 {
+		output = append(output, '-')
+	}
+	for remaining != 0 {
+		m := int32(0x7fffffff)
+		for _, r := range s {
+			if m > r && r >= n {
+				m = r
+			}
+		}
+		delta += (m - n) * (h + 1)
+		if delta < 0 {
+			return "", fmt.Errorf("idna: invalid label %q", s)
+		}
+		n = m
+		for _, r := range s {
+			if r < n {
+				delta++
+				if delta < 0 {
+					return "", fmt.Errorf("idna: invalid label %q", s)
+				}
+				continue
+			}
+			if r > n {
+				continue
+			}
+			q := delta
+			for k := base; ; k += base {
+				t := k - bias
+				if t < tmin {
+					t = tmin
+				} else if t > tmax {
+					t = tmax
+				}
+				if q < t {
+					break
+				}
+				output = append(output, encodeDigit(t+(q-t)%(base-t)))
+				q = (q - t) / (base - t)
+			}
+			output = append(output, encodeDigit(q))
+			bias = adapt(delta, h+1, h == b)
+			delta = 0
+			h++
+			remaining--
+		}
+		delta++
+		n++
+	}
+	return string(output), nil
+}
+
+func decodeDigit(x byte) (digit int32, ok bool) {
+	switch {
+	case '0' <= x && x <= '9':
+		return int32(x - ('0' - 26)), true
+	case 'A' <= x && x <= 'Z':
+		return int32(x - 'A'), true
+	case 'a' <= x && x <= 'z':
+		return int32(x - 'a'), true
+	}
+	return 0, false
+}
+
+func encodeDigit(digit int32) byte {
+	switch {
+	case 0 <= digit && digit < 26:
+		return byte(digit + 'a')
+	case 26 <= digit && digit < 36:
+		return byte(digit + ('0' - 26))
+	}
+	panic("idna: internal error in punycode encoding")
+}
+
+// adapt is the bias adaptation function specified in section 6.1.
+func adapt(delta, numPoints int32, firstTime bool) int32 {
+	if firstTime {
+		delta /= damp
+	} else {
+		delta /= 2
+	}
+	delta += delta / numPoints
+	k := int32(0)
+	for delta > ((base-tmin)*tmax)/2 {
+		delta /= base - tmin
+		k += base
+	}
+	return k + (base-tmin+1)*delta/(delta+skew)
+}
diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go
new file mode 100644
index 0000000..3f90b73
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go
@@ -0,0 +1,525 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package timeseries implements a time series structure for stats collection.
+package timeseries
+
+import (
+	"fmt"
+	"log"
+	"time"
+)
+
+const (
+	timeSeriesNumBuckets       = 64
+	minuteHourSeriesNumBuckets = 60
+)
+
+var timeSeriesResolutions = []time.Duration{
+	1 * time.Second,
+	10 * time.Second,
+	1 * time.Minute,
+	10 * time.Minute,
+	1 * time.Hour,
+	6 * time.Hour,
+	24 * time.Hour,          // 1 day
+	7 * 24 * time.Hour,      // 1 week
+	4 * 7 * 24 * time.Hour,  // 4 weeks
+	16 * 7 * 24 * time.Hour, // 16 weeks
+}
+
+var minuteHourSeriesResolutions = []time.Duration{
+	1 * time.Second,
+	1 * time.Minute,
+}
+
+// An Observable is a kind of data that can be aggregated in a time series.
+type Observable interface {
+	Multiply(ratio float64)    // Multiplies the data in self by a given ratio
+	Add(other Observable)      // Adds the data from a different observation to self
+	Clear()                    // Clears the observation so it can be reused.
+	CopyFrom(other Observable) // Copies the contents of a given observation to self
+}
+
+// Float attaches the methods of Observable to a float64.
+type Float float64
+
+// NewFloat returns a Float.
+func NewFloat() Observable {
+	f := Float(0)
+	return &f
+}
+
+// String returns the float as a string.
+func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) }
+
+// Value returns the float's value.
+func (f *Float) Value() float64 { return float64(*f) }
+
+func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) }
+
+func (f *Float) Add(other Observable) {
+	o := other.(*Float)
+	*f += *o
+}
+
+func (f *Float) Clear() { *f = 0 }
+
+func (f *Float) CopyFrom(other Observable) {
+	o := other.(*Float)
+	*f = *o
+}
+
+// A Clock tells the current time.
+type Clock interface {
+	Time() time.Time
+}
+
+type defaultClock int
+
+var defaultClockInstance defaultClock
+
+func (defaultClock) Time() time.Time { return time.Now() }
+
+// Information kept per level. Each level consists of a circular list of
+// observations. The start of the level may be derived from end and the
+// len(buckets) * sizeInMillis.
+type tsLevel struct {
+	oldest   int               // index to oldest bucketed Observable
+	newest   int               // index to newest bucketed Observable
+	end      time.Time         // end timestamp for this level
+	size     time.Duration     // duration of the bucketed Observable
+	buckets  []Observable      // collections of observations
+	provider func() Observable // used for creating new Observable
+}
+
+func (l *tsLevel) Clear() {
+	l.oldest = 0
+	l.newest = len(l.buckets) - 1
+	l.end = time.Time{}
+	for i := range l.buckets {
+		if l.buckets[i] != nil {
+			l.buckets[i].Clear()
+			l.buckets[i] = nil
+		}
+	}
+}
+
+func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) {
+	l.size = size
+	l.provider = f
+	l.buckets = make([]Observable, numBuckets)
+}
+
+// Keeps a sequence of levels. Each level is responsible for storing data at
+// a given resolution. For example, the first level stores data at a one
+// minute resolution while the second level stores data at a one hour
+// resolution.
+
+// Each level is represented by a sequence of buckets. Each bucket spans an
+// interval equal to the resolution of the level. New observations are added
+// to the last bucket.
+type timeSeries struct {
+	provider    func() Observable // make more Observable
+	numBuckets  int               // number of buckets in each level
+	levels      []*tsLevel        // levels of bucketed Observable
+	lastAdd     time.Time         // time of last Observable tracked
+	total       Observable        // convenient aggregation of all Observable
+	clock       Clock             // Clock for getting current time
+	pending     Observable        // observations not yet bucketed
+	pendingTime time.Time         // what time are we keeping in pending
+	dirty       bool              // if there are pending observations
+}
+
+// init initializes a level according to the supplied criteria.
+func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) {
+	ts.provider = f
+	ts.numBuckets = numBuckets
+	ts.clock = clock
+	ts.levels = make([]*tsLevel, len(resolutions))
+
+	for i := range resolutions {
+		if i > 0 && resolutions[i-1] >= resolutions[i] {
+			log.Print("timeseries: resolutions must be monotonically increasing")
+			break
+		}
+		newLevel := new(tsLevel)
+		newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider)
+		ts.levels[i] = newLevel
+	}
+
+	ts.Clear()
+}
+
+// Clear removes all observations from the time series.
+func (ts *timeSeries) Clear() {
+	ts.lastAdd = time.Time{}
+	ts.total = ts.resetObservation(ts.total)
+	ts.pending = ts.resetObservation(ts.pending)
+	ts.pendingTime = time.Time{}
+	ts.dirty = false
+
+	for i := range ts.levels {
+		ts.levels[i].Clear()
+	}
+}
+
+// Add records an observation at the current time.
+func (ts *timeSeries) Add(observation Observable) {
+	ts.AddWithTime(observation, ts.clock.Time())
+}
+
+// AddWithTime records an observation at the specified time.
+func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) {
+
+	smallBucketDuration := ts.levels[0].size
+
+	if t.After(ts.lastAdd) {
+		ts.lastAdd = t
+	}
+
+	if t.After(ts.pendingTime) {
+		ts.advance(t)
+		ts.mergePendingUpdates()
+		ts.pendingTime = ts.levels[0].end
+		ts.pending.CopyFrom(observation)
+		ts.dirty = true
+	} else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) {
+		// The observation is close enough to go into the pending bucket.
+		// This compensates for clock skewing and small scheduling delays
+		// by letting the update stay in the fast path.
+		ts.pending.Add(observation)
+		ts.dirty = true
+	} else {
+		ts.mergeValue(observation, t)
+	}
+}
+
+// mergeValue inserts the observation at the specified time in the past into all levels.
+func (ts *timeSeries) mergeValue(observation Observable, t time.Time) {
+	for _, level := range ts.levels {
+		index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size)
+		if 0 <= index && index < ts.numBuckets {
+			bucketNumber := (level.oldest + index) % ts.numBuckets
+			if level.buckets[bucketNumber] == nil {
+				level.buckets[bucketNumber] = level.provider()
+			}
+			level.buckets[bucketNumber].Add(observation)
+		}
+	}
+	ts.total.Add(observation)
+}
+
+// mergePendingUpdates applies the pending updates into all levels.
+func (ts *timeSeries) mergePendingUpdates() {
+	if ts.dirty {
+		ts.mergeValue(ts.pending, ts.pendingTime)
+		ts.pending = ts.resetObservation(ts.pending)
+		ts.dirty = false
+	}
+}
+
+// advance cycles the buckets at each level until the latest bucket in
+// each level can hold the time specified.
+func (ts *timeSeries) advance(t time.Time) {
+	if !t.After(ts.levels[0].end) {
+		return
+	}
+	for i := 0; i < len(ts.levels); i++ {
+		level := ts.levels[i]
+		if !level.end.Before(t) {
+			break
+		}
+
+		// If the time is sufficiently far, just clear the level and advance
+		// directly.
+		if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) {
+			for _, b := range level.buckets {
+				ts.resetObservation(b)
+			}
+			level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds())
+		}
+
+		for t.After(level.end) {
+			level.end = level.end.Add(level.size)
+			level.newest = level.oldest
+			level.oldest = (level.oldest + 1) % ts.numBuckets
+			ts.resetObservation(level.buckets[level.newest])
+		}
+
+		t = level.end
+	}
+}
+
+// Latest returns the sum of the num latest buckets from the level.
+func (ts *timeSeries) Latest(level, num int) Observable {
+	now := ts.clock.Time()
+	if ts.levels[0].end.Before(now) {
+		ts.advance(now)
+	}
+
+	ts.mergePendingUpdates()
+
+	result := ts.provider()
+	l := ts.levels[level]
+	index := l.newest
+
+	for i := 0; i < num; i++ {
+		if l.buckets[index] != nil {
+			result.Add(l.buckets[index])
+		}
+		if index == 0 {
+			index = ts.numBuckets
+		}
+		index--
+	}
+
+	return result
+}
+
+// LatestBuckets returns a copy of the num latest buckets from level.
+func (ts *timeSeries) LatestBuckets(level, num int) []Observable {
+	if level < 0 || level > len(ts.levels) {
+		log.Print("timeseries: bad level argument: ", level)
+		return nil
+	}
+	if num < 0 || num >= ts.numBuckets {
+		log.Print("timeseries: bad num argument: ", num)
+		return nil
+	}
+
+	results := make([]Observable, num)
+	now := ts.clock.Time()
+	if ts.levels[0].end.Before(now) {
+		ts.advance(now)
+	}
+
+	ts.mergePendingUpdates()
+
+	l := ts.levels[level]
+	index := l.newest
+
+	for i := 0; i < num; i++ {
+		result := ts.provider()
+		results[i] = result
+		if l.buckets[index] != nil {
+			result.CopyFrom(l.buckets[index])
+		}
+
+		if index == 0 {
+			index = ts.numBuckets
+		}
+		index -= 1
+	}
+	return results
+}
+
+// ScaleBy updates observations by scaling by factor.
+func (ts *timeSeries) ScaleBy(factor float64) {
+	for _, l := range ts.levels {
+		for i := 0; i < ts.numBuckets; i++ {
+			l.buckets[i].Multiply(factor)
+		}
+	}
+
+	ts.total.Multiply(factor)
+	ts.pending.Multiply(factor)
+}
+
+// Range returns the sum of observations added over the specified time range.
+// If start or finish times don't fall on bucket boundaries of the same
+// level, then return values are approximate answers.
+func (ts *timeSeries) Range(start, finish time.Time) Observable {
+	return ts.ComputeRange(start, finish, 1)[0]
+}
+
+// Recent returns the sum of observations from the last delta.
+func (ts *timeSeries) Recent(delta time.Duration) Observable {
+	now := ts.clock.Time()
+	return ts.Range(now.Add(-delta), now)
+}
+
+// Total returns the total of all observations.
+func (ts *timeSeries) Total() Observable {
+	ts.mergePendingUpdates()
+	return ts.total
+}
+
+// ComputeRange computes a specified number of values into a slice using
+// the observations recorded over the specified time period. The return
+// values are approximate if the start or finish times don't fall on the
+// bucket boundaries at the same level or if the number of buckets spanning
+// the range is not an integral multiple of num.
+func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable {
+	if start.After(finish) {
+		log.Printf("timeseries: start > finish, %v>%v", start, finish)
+		return nil
+	}
+
+	if num < 0 {
+		log.Printf("timeseries: num < 0, %v", num)
+		return nil
+	}
+
+	results := make([]Observable, num)
+
+	for _, l := range ts.levels {
+		if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) {
+			ts.extract(l, start, finish, num, results)
+			return results
+		}
+	}
+
+	// Failed to find a level that covers the desired range.  So just
+	// extract from the last level, even if it doesn't cover the entire
+	// desired range.
+	ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results)
+
+	return results
+}
+
+// RecentList returns the specified number of values in slice over the most
+// recent time period of the specified range.
+func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable {
+	if delta < 0 {
+		return nil
+	}
+	now := ts.clock.Time()
+	return ts.ComputeRange(now.Add(-delta), now, num)
+}
+
+// extract returns a slice of specified number of observations from a given
+// level over a given range.
+func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) {
+	ts.mergePendingUpdates()
+
+	srcInterval := l.size
+	dstInterval := finish.Sub(start) / time.Duration(num)
+	dstStart := start
+	srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets))
+
+	srcIndex := 0
+
+	// Where should scanning start?
+	if dstStart.After(srcStart) {
+		advance := dstStart.Sub(srcStart) / srcInterval
+		srcIndex += int(advance)
+		srcStart = srcStart.Add(advance * srcInterval)
+	}
+
+	// The i'th value is computed as show below.
+	// interval = (finish/start)/num
+	// i'th value = sum of observation in range
+	//   [ start + i       * interval,
+	//     start + (i + 1) * interval )
+	for i := 0; i < num; i++ {
+		results[i] = ts.resetObservation(results[i])
+		dstEnd := dstStart.Add(dstInterval)
+		for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) {
+			srcEnd := srcStart.Add(srcInterval)
+			if srcEnd.After(ts.lastAdd) {
+				srcEnd = ts.lastAdd
+			}
+
+			if !srcEnd.Before(dstStart) {
+				srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets]
+				if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) {
+					// dst completely contains src.
+					if srcValue != nil {
+						results[i].Add(srcValue)
+					}
+				} else {
+					// dst partially overlaps src.
+					overlapStart := maxTime(srcStart, dstStart)
+					overlapEnd := minTime(srcEnd, dstEnd)
+					base := srcEnd.Sub(srcStart)
+					fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds()
+
+					used := ts.provider()
+					if srcValue != nil {
+						used.CopyFrom(srcValue)
+					}
+					used.Multiply(fraction)
+					results[i].Add(used)
+				}
+
+				if srcEnd.After(dstEnd) {
+					break
+				}
+			}
+			srcIndex++
+			srcStart = srcStart.Add(srcInterval)
+		}
+		dstStart = dstStart.Add(dstInterval)
+	}
+}
+
+// resetObservation clears the content so the struct may be reused.
+func (ts *timeSeries) resetObservation(observation Observable) Observable {
+	if observation == nil {
+		observation = ts.provider()
+	} else {
+		observation.Clear()
+	}
+	return observation
+}
+
+// TimeSeries tracks data at granularities from 1 second to 16 weeks.
+type TimeSeries struct {
+	timeSeries
+}
+
+// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable.
+func NewTimeSeries(f func() Observable) *TimeSeries {
+	return NewTimeSeriesWithClock(f, defaultClockInstance)
+}
+
+// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for
+// assigning timestamps.
+func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries {
+	ts := new(TimeSeries)
+	ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock)
+	return ts
+}
+
+// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour.
+type MinuteHourSeries struct {
+	timeSeries
+}
+
+// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable.
+func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries {
+	return NewMinuteHourSeriesWithClock(f, defaultClockInstance)
+}
+
+// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for
+// assigning timestamps.
+func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries {
+	ts := new(MinuteHourSeries)
+	ts.timeSeries.init(minuteHourSeriesResolutions, f,
+		minuteHourSeriesNumBuckets, clock)
+	return ts
+}
+
+func (ts *MinuteHourSeries) Minute() Observable {
+	return ts.timeSeries.Latest(0, 60)
+}
+
+func (ts *MinuteHourSeries) Hour() Observable {
+	return ts.timeSeries.Latest(1, 60)
+}
+
+func minTime(a, b time.Time) time.Time {
+	if a.Before(b) {
+		return a
+	}
+	return b
+}
+
+func maxTime(a, b time.Time) time.Time {
+	if a.After(b) {
+		return a
+	}
+	return b
+}
diff --git a/vendor/golang.org/x/net/lex/httplex/httplex.go b/vendor/golang.org/x/net/lex/httplex/httplex.go
new file mode 100644
index 0000000..20f2b89
--- /dev/null
+++ b/vendor/golang.org/x/net/lex/httplex/httplex.go
@@ -0,0 +1,351 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package httplex contains rules around lexical matters of various
+// HTTP-related specifications.
+//
+// This package is shared by the standard library (which vendors it)
+// and x/net/http2. It comes with no API stability promise.
+package httplex
+
+import (
+	"net"
+	"strings"
+	"unicode/utf8"
+
+	"golang.org/x/net/idna"
+)
+
+var isTokenTable = [127]bool{
+	'!':  true,
+	'#':  true,
+	'$':  true,
+	'%':  true,
+	'&':  true,
+	'\'': true,
+	'*':  true,
+	'+':  true,
+	'-':  true,
+	'.':  true,
+	'0':  true,
+	'1':  true,
+	'2':  true,
+	'3':  true,
+	'4':  true,
+	'5':  true,
+	'6':  true,
+	'7':  true,
+	'8':  true,
+	'9':  true,
+	'A':  true,
+	'B':  true,
+	'C':  true,
+	'D':  true,
+	'E':  true,
+	'F':  true,
+	'G':  true,
+	'H':  true,
+	'I':  true,
+	'J':  true,
+	'K':  true,
+	'L':  true,
+	'M':  true,
+	'N':  true,
+	'O':  true,
+	'P':  true,
+	'Q':  true,
+	'R':  true,
+	'S':  true,
+	'T':  true,
+	'U':  true,
+	'W':  true,
+	'V':  true,
+	'X':  true,
+	'Y':  true,
+	'Z':  true,
+	'^':  true,
+	'_':  true,
+	'`':  true,
+	'a':  true,
+	'b':  true,
+	'c':  true,
+	'd':  true,
+	'e':  true,
+	'f':  true,
+	'g':  true,
+	'h':  true,
+	'i':  true,
+	'j':  true,
+	'k':  true,
+	'l':  true,
+	'm':  true,
+	'n':  true,
+	'o':  true,
+	'p':  true,
+	'q':  true,
+	'r':  true,
+	's':  true,
+	't':  true,
+	'u':  true,
+	'v':  true,
+	'w':  true,
+	'x':  true,
+	'y':  true,
+	'z':  true,
+	'|':  true,
+	'~':  true,
+}
+
+func IsTokenRune(r rune) bool {
+	i := int(r)
+	return i < len(isTokenTable) && isTokenTable[i]
+}
+
+func isNotToken(r rune) bool {
+	return !IsTokenRune(r)
+}
+
+// HeaderValuesContainsToken reports whether any string in values
+// contains the provided token, ASCII case-insensitively.
+func HeaderValuesContainsToken(values []string, token string) bool {
+	for _, v := range values {
+		if headerValueContainsToken(v, token) {
+			return true
+		}
+	}
+	return false
+}
+
+// isOWS reports whether b is an optional whitespace byte, as defined
+// by RFC 7230 section 3.2.3.
+func isOWS(b byte) bool { return b == ' ' || b == '\t' }
+
+// trimOWS returns x with all optional whitespace removes from the
+// beginning and end.
+func trimOWS(x string) string {
+	// TODO: consider using strings.Trim(x, " \t") instead,
+	// if and when it's fast enough. See issue 10292.
+	// But this ASCII-only code will probably always beat UTF-8
+	// aware code.
+	for len(x) > 0 && isOWS(x[0]) {
+		x = x[1:]
+	}
+	for len(x) > 0 && isOWS(x[len(x)-1]) {
+		x = x[:len(x)-1]
+	}
+	return x
+}
+
+// headerValueContainsToken reports whether v (assumed to be a
+// 0#element, in the ABNF extension described in RFC 7230 section 7)
+// contains token amongst its comma-separated tokens, ASCII
+// case-insensitively.
+func headerValueContainsToken(v string, token string) bool {
+	v = trimOWS(v)
+	if comma := strings.IndexByte(v, ','); comma != -1 {
+		return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token)
+	}
+	return tokenEqual(v, token)
+}
+
+// lowerASCII returns the ASCII lowercase version of b.
+func lowerASCII(b byte) byte {
+	if 'A' <= b && b <= 'Z' {
+		return b + ('a' - 'A')
+	}
+	return b
+}
+
+// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively.
+func tokenEqual(t1, t2 string) bool {
+	if len(t1) != len(t2) {
+		return false
+	}
+	for i, b := range t1 {
+		if b >= utf8.RuneSelf {
+			// No UTF-8 or non-ASCII allowed in tokens.
+			return false
+		}
+		if lowerASCII(byte(b)) != lowerASCII(t2[i]) {
+			return false
+		}
+	}
+	return true
+}
+
+// isLWS reports whether b is linear white space, according
+// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
+//      LWS            = [CRLF] 1*( SP | HT )
+func isLWS(b byte) bool { return b == ' ' || b == '\t' }
+
+// isCTL reports whether b is a control byte, according
+// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
+//      CTL            = <any US-ASCII control character
+//                       (octets 0 - 31) and DEL (127)>
+func isCTL(b byte) bool {
+	const del = 0x7f // a CTL
+	return b < ' ' || b == del
+}
+
+// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name.
+// HTTP/2 imposes the additional restriction that uppercase ASCII
+// letters are not allowed.
+//
+//  RFC 7230 says:
+//   header-field   = field-name ":" OWS field-value OWS
+//   field-name     = token
+//   token          = 1*tchar
+//   tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
+//           "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA
+func ValidHeaderFieldName(v string) bool {
+	if len(v) == 0 {
+		return false
+	}
+	for _, r := range v {
+		if !IsTokenRune(r) {
+			return false
+		}
+	}
+	return true
+}
+
+// ValidHostHeader reports whether h is a valid host header.
+func ValidHostHeader(h string) bool {
+	// The latest spec is actually this:
+	//
+	// http://tools.ietf.org/html/rfc7230#section-5.4
+	//     Host = uri-host [ ":" port ]
+	//
+	// Where uri-host is:
+	//     http://tools.ietf.org/html/rfc3986#section-3.2.2
+	//
+	// But we're going to be much more lenient for now and just
+	// search for any byte that's not a valid byte in any of those
+	// expressions.
+	for i := 0; i < len(h); i++ {
+		if !validHostByte[h[i]] {
+			return false
+		}
+	}
+	return true
+}
+
+// See the validHostHeader comment.
+var validHostByte = [256]bool{
+	'0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true,
+	'8': true, '9': true,
+
+	'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true,
+	'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true,
+	'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true,
+	'y': true, 'z': true,
+
+	'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true,
+	'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true,
+	'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true,
+	'Y': true, 'Z': true,
+
+	'!':  true, // sub-delims
+	'$':  true, // sub-delims
+	'%':  true, // pct-encoded (and used in IPv6 zones)
+	'&':  true, // sub-delims
+	'(':  true, // sub-delims
+	')':  true, // sub-delims
+	'*':  true, // sub-delims
+	'+':  true, // sub-delims
+	',':  true, // sub-delims
+	'-':  true, // unreserved
+	'.':  true, // unreserved
+	':':  true, // IPv6address + Host expression's optional port
+	';':  true, // sub-delims
+	'=':  true, // sub-delims
+	'[':  true,
+	'\'': true, // sub-delims
+	']':  true,
+	'_':  true, // unreserved
+	'~':  true, // unreserved
+}
+
+// ValidHeaderFieldValue reports whether v is a valid "field-value" according to
+// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 :
+//
+//        message-header = field-name ":" [ field-value ]
+//        field-value    = *( field-content | LWS )
+//        field-content  = <the OCTETs making up the field-value
+//                         and consisting of either *TEXT or combinations
+//                         of token, separators, and quoted-string>
+//
+// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 :
+//
+//        TEXT           = <any OCTET except CTLs,
+//                          but including LWS>
+//        LWS            = [CRLF] 1*( SP | HT )
+//        CTL            = <any US-ASCII control character
+//                         (octets 0 - 31) and DEL (127)>
+//
+// RFC 7230 says:
+//  field-value    = *( field-content / obs-fold )
+//  obj-fold       =  N/A to http2, and deprecated
+//  field-content  = field-vchar [ 1*( SP / HTAB ) field-vchar ]
+//  field-vchar    = VCHAR / obs-text
+//  obs-text       = %x80-FF
+//  VCHAR          = "any visible [USASCII] character"
+//
+// http2 further says: "Similarly, HTTP/2 allows header field values
+// that are not valid. While most of the values that can be encoded
+// will not alter header field parsing, carriage return (CR, ASCII
+// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII
+// 0x0) might be exploited by an attacker if they are translated
+// verbatim. Any request or response that contains a character not
+// permitted in a header field value MUST be treated as malformed
+// (Section 8.1.2.6). Valid characters are defined by the
+// field-content ABNF rule in Section 3.2 of [RFC7230]."
+//
+// This function does not (yet?) properly handle the rejection of
+// strings that begin or end with SP or HTAB.
+func ValidHeaderFieldValue(v string) bool {
+	for i := 0; i < len(v); i++ {
+		b := v[i]
+		if isCTL(b) && !isLWS(b) {
+			return false
+		}
+	}
+	return true
+}
+
+func isASCII(s string) bool {
+	for i := 0; i < len(s); i++ {
+		if s[i] >= utf8.RuneSelf {
+			return false
+		}
+	}
+	return true
+}
+
+// PunycodeHostPort returns the IDNA Punycode version
+// of the provided "host" or "host:port" string.
+func PunycodeHostPort(v string) (string, error) {
+	if isASCII(v) {
+		return v, nil
+	}
+
+	host, port, err := net.SplitHostPort(v)
+	if err != nil {
+		// The input 'v' argument was just a "host" argument,
+		// without a port. This error should not be returned
+		// to the caller.
+		host = v
+		port = ""
+	}
+	host, err = idna.ToASCII(host)
+	if err != nil {
+		// Non-UTF-8? Not representable in Punycode, in any
+		// case.
+		return "", err
+	}
+	if port == "" {
+		return host, nil
+	}
+	return net.JoinHostPort(host, port), nil
+}
diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go
new file mode 100644
index 0000000..e66c7e3
--- /dev/null
+++ b/vendor/golang.org/x/net/trace/events.go
@@ -0,0 +1,524 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+	"bytes"
+	"fmt"
+	"html/template"
+	"io"
+	"log"
+	"net/http"
+	"runtime"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"text/tabwriter"
+	"time"
+)
+
+var eventsTmpl = template.Must(template.New("events").Funcs(template.FuncMap{
+	"elapsed":   elapsed,
+	"trimSpace": strings.TrimSpace,
+}).Parse(eventsHTML))
+
+const maxEventsPerLog = 100
+
+type bucket struct {
+	MaxErrAge time.Duration
+	String    string
+}
+
+var buckets = []bucket{
+	{0, "total"},
+	{10 * time.Second, "errs<10s"},
+	{1 * time.Minute, "errs<1m"},
+	{10 * time.Minute, "errs<10m"},
+	{1 * time.Hour, "errs<1h"},
+	{10 * time.Hour, "errs<10h"},
+	{24000 * time.Hour, "errors"},
+}
+
+// RenderEvents renders the HTML page typically served at /debug/events.
+// It does not do any auth checking; see AuthRequest for the default auth check
+// used by the handler registered on http.DefaultServeMux.
+// req may be nil.
+func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) {
+	now := time.Now()
+	data := &struct {
+		Families []string // family names
+		Buckets  []bucket
+		Counts   [][]int // eventLog count per family/bucket
+
+		// Set when a bucket has been selected.
+		Family    string
+		Bucket    int
+		EventLogs eventLogs
+		Expanded  bool
+	}{
+		Buckets: buckets,
+	}
+
+	data.Families = make([]string, 0, len(families))
+	famMu.RLock()
+	for name := range families {
+		data.Families = append(data.Families, name)
+	}
+	famMu.RUnlock()
+	sort.Strings(data.Families)
+
+	// Count the number of eventLogs in each family for each error age.
+	data.Counts = make([][]int, len(data.Families))
+	for i, name := range data.Families {
+		// TODO(sameer): move this loop under the family lock.
+		f := getEventFamily(name)
+		data.Counts[i] = make([]int, len(data.Buckets))
+		for j, b := range data.Buckets {
+			data.Counts[i][j] = f.Count(now, b.MaxErrAge)
+		}
+	}
+
+	if req != nil {
+		var ok bool
+		data.Family, data.Bucket, ok = parseEventsArgs(req)
+		if !ok {
+			// No-op
+		} else {
+			data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge)
+		}
+		if data.EventLogs != nil {
+			defer data.EventLogs.Free()
+			sort.Sort(data.EventLogs)
+		}
+		if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
+			data.Expanded = exp
+		}
+	}
+
+	famMu.RLock()
+	defer famMu.RUnlock()
+	if err := eventsTmpl.Execute(w, data); err != nil {
+		log.Printf("net/trace: Failed executing template: %v", err)
+	}
+}
+
+func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) {
+	fam, bStr := req.FormValue("fam"), req.FormValue("b")
+	if fam == "" || bStr == "" {
+		return "", 0, false
+	}
+	b, err := strconv.Atoi(bStr)
+	if err != nil || b < 0 || b >= len(buckets) {
+		return "", 0, false
+	}
+	return fam, b, true
+}
+
+// An EventLog provides a log of events associated with a specific object.
+type EventLog interface {
+	// Printf formats its arguments with fmt.Sprintf and adds the
+	// result to the event log.
+	Printf(format string, a ...interface{})
+
+	// Errorf is like Printf, but it marks this event as an error.
+	Errorf(format string, a ...interface{})
+
+	// Finish declares that this event log is complete.
+	// The event log should not be used after calling this method.
+	Finish()
+}
+
+// NewEventLog returns a new EventLog with the specified family name
+// and title.
+func NewEventLog(family, title string) EventLog {
+	el := newEventLog()
+	el.ref()
+	el.Family, el.Title = family, title
+	el.Start = time.Now()
+	el.events = make([]logEntry, 0, maxEventsPerLog)
+	el.stack = make([]uintptr, 32)
+	n := runtime.Callers(2, el.stack)
+	el.stack = el.stack[:n]
+
+	getEventFamily(family).add(el)
+	return el
+}
+
+func (el *eventLog) Finish() {
+	getEventFamily(el.Family).remove(el)
+	el.unref() // matches ref in New
+}
+
+var (
+	famMu    sync.RWMutex
+	families = make(map[string]*eventFamily) // family name => family
+)
+
+func getEventFamily(fam string) *eventFamily {
+	famMu.Lock()
+	defer famMu.Unlock()
+	f := families[fam]
+	if f == nil {
+		f = &eventFamily{}
+		families[fam] = f
+	}
+	return f
+}
+
+type eventFamily struct {
+	mu        sync.RWMutex
+	eventLogs eventLogs
+}
+
+func (f *eventFamily) add(el *eventLog) {
+	f.mu.Lock()
+	f.eventLogs = append(f.eventLogs, el)
+	f.mu.Unlock()
+}
+
+func (f *eventFamily) remove(el *eventLog) {
+	f.mu.Lock()
+	defer f.mu.Unlock()
+	for i, el0 := range f.eventLogs {
+		if el == el0 {
+			copy(f.eventLogs[i:], f.eventLogs[i+1:])
+			f.eventLogs = f.eventLogs[:len(f.eventLogs)-1]
+			return
+		}
+	}
+}
+
+func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) {
+	f.mu.RLock()
+	defer f.mu.RUnlock()
+	for _, el := range f.eventLogs {
+		if el.hasRecentError(now, maxErrAge) {
+			n++
+		}
+	}
+	return
+}
+
+func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) {
+	f.mu.RLock()
+	defer f.mu.RUnlock()
+	els = make(eventLogs, 0, len(f.eventLogs))
+	for _, el := range f.eventLogs {
+		if el.hasRecentError(now, maxErrAge) {
+			el.ref()
+			els = append(els, el)
+		}
+	}
+	return
+}
+
+type eventLogs []*eventLog
+
+// Free calls unref on each element of the list.
+func (els eventLogs) Free() {
+	for _, el := range els {
+		el.unref()
+	}
+}
+
+// eventLogs may be sorted in reverse chronological order.
+func (els eventLogs) Len() int           { return len(els) }
+func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) }
+func (els eventLogs) Swap(i, j int)      { els[i], els[j] = els[j], els[i] }
+
+// A logEntry is a timestamped log entry in an event log.
+type logEntry struct {
+	When    time.Time
+	Elapsed time.Duration // since previous event in log
+	NewDay  bool          // whether this event is on a different day to the previous event
+	What    string
+	IsErr   bool
+}
+
+// WhenString returns a string representation of the elapsed time of the event.
+// It will include the date if midnight was crossed.
+func (e logEntry) WhenString() string {
+	if e.NewDay {
+		return e.When.Format("2006/01/02 15:04:05.000000")
+	}
+	return e.When.Format("15:04:05.000000")
+}
+
+// An eventLog represents an active event log.
+type eventLog struct {
+	// Family is the top-level grouping of event logs to which this belongs.
+	Family string
+
+	// Title is the title of this event log.
+	Title string
+
+	// Timing information.
+	Start time.Time
+
+	// Call stack where this event log was created.
+	stack []uintptr
+
+	// Append-only sequence of events.
+	//
+	// TODO(sameer): change this to a ring buffer to avoid the array copy
+	// when we hit maxEventsPerLog.
+	mu            sync.RWMutex
+	events        []logEntry
+	LastErrorTime time.Time
+	discarded     int
+
+	refs int32 // how many buckets this is in
+}
+
+func (el *eventLog) reset() {
+	// Clear all but the mutex. Mutexes may not be copied, even when unlocked.
+	el.Family = ""
+	el.Title = ""
+	el.Start = time.Time{}
+	el.stack = nil
+	el.events = nil
+	el.LastErrorTime = time.Time{}
+	el.discarded = 0
+	el.refs = 0
+}
+
+func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool {
+	if maxErrAge == 0 {
+		return true
+	}
+	el.mu.RLock()
+	defer el.mu.RUnlock()
+	return now.Sub(el.LastErrorTime) < maxErrAge
+}
+
+// delta returns the elapsed time since the last event or the log start,
+// and whether it spans midnight.
+// L >= el.mu
+func (el *eventLog) delta(t time.Time) (time.Duration, bool) {
+	if len(el.events) == 0 {
+		return t.Sub(el.Start), false
+	}
+	prev := el.events[len(el.events)-1].When
+	return t.Sub(prev), prev.Day() != t.Day()
+
+}
+
+func (el *eventLog) Printf(format string, a ...interface{}) {
+	el.printf(false, format, a...)
+}
+
+func (el *eventLog) Errorf(format string, a ...interface{}) {
+	el.printf(true, format, a...)
+}
+
+func (el *eventLog) printf(isErr bool, format string, a ...interface{}) {
+	e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)}
+	el.mu.Lock()
+	e.Elapsed, e.NewDay = el.delta(e.When)
+	if len(el.events) < maxEventsPerLog {
+		el.events = append(el.events, e)
+	} else {
+		// Discard the oldest event.
+		if el.discarded == 0 {
+			// el.discarded starts at two to count for the event it
+			// is replacing, plus the next one that we are about to
+			// drop.
+			el.discarded = 2
+		} else {
+			el.discarded++
+		}
+		// TODO(sameer): if this causes allocations on a critical path,
+		// change eventLog.What to be a fmt.Stringer, as in trace.go.
+		el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded)
+		// The timestamp of the discarded meta-event should be
+		// the time of the last event it is representing.
+		el.events[0].When = el.events[1].When
+		copy(el.events[1:], el.events[2:])
+		el.events[maxEventsPerLog-1] = e
+	}
+	if e.IsErr {
+		el.LastErrorTime = e.When
+	}
+	el.mu.Unlock()
+}
+
+func (el *eventLog) ref() {
+	atomic.AddInt32(&el.refs, 1)
+}
+
+func (el *eventLog) unref() {
+	if atomic.AddInt32(&el.refs, -1) == 0 {
+		freeEventLog(el)
+	}
+}
+
+func (el *eventLog) When() string {
+	return el.Start.Format("2006/01/02 15:04:05.000000")
+}
+
+func (el *eventLog) ElapsedTime() string {
+	elapsed := time.Since(el.Start)
+	return fmt.Sprintf("%.6f", elapsed.Seconds())
+}
+
+func (el *eventLog) Stack() string {
+	buf := new(bytes.Buffer)
+	tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0)
+	printStackRecord(tw, el.stack)
+	tw.Flush()
+	return buf.String()
+}
+
+// printStackRecord prints the function + source line information
+// for a single stack trace.
+// Adapted from runtime/pprof/pprof.go.
+func printStackRecord(w io.Writer, stk []uintptr) {
+	for _, pc := range stk {
+		f := runtime.FuncForPC(pc)
+		if f == nil {
+			continue
+		}
+		file, line := f.FileLine(pc)
+		name := f.Name()
+		// Hide runtime.goexit and any runtime functions at the beginning.
+		if strings.HasPrefix(name, "runtime.") {
+			continue
+		}
+		fmt.Fprintf(w, "#   %s\t%s:%d\n", name, file, line)
+	}
+}
+
+func (el *eventLog) Events() []logEntry {
+	el.mu.RLock()
+	defer el.mu.RUnlock()
+	return el.events
+}
+
+// freeEventLogs is a freelist of *eventLog
+var freeEventLogs = make(chan *eventLog, 1000)
+
+// newEventLog returns a event log ready to use.
+func newEventLog() *eventLog {
+	select {
+	case el := <-freeEventLogs:
+		return el
+	default:
+		return new(eventLog)
+	}
+}
+
+// freeEventLog adds el to freeEventLogs if there's room.
+// This is non-blocking.
+func freeEventLog(el *eventLog) {
+	el.reset()
+	select {
+	case freeEventLogs <- el:
+	default:
+	}
+}
+
+const eventsHTML = `
+<html>
+	<head>
+		<title>events</title>
+	</head>
+	<style type="text/css">
+		body {
+			font-family: sans-serif;
+		}
+		table#req-status td.family {
+			padding-right: 2em;
+		}
+		table#req-status td.active {
+			padding-right: 1em;
+		}
+		table#req-status td.empty {
+			color: #aaa;
+		}
+		table#reqs {
+			margin-top: 1em;
+		}
+		table#reqs tr.first {
+			{{if $.Expanded}}font-weight: bold;{{end}}
+		}
+		table#reqs td {
+			font-family: monospace;
+		}
+		table#reqs td.when {
+			text-align: right;
+			white-space: nowrap;
+		}
+		table#reqs td.elapsed {
+			padding: 0 0.5em;
+			text-align: right;
+			white-space: pre;
+			width: 10em;
+		}
+		address {
+			font-size: smaller;
+			margin-top: 5em;
+		}
+	</style>
+	<body>
+
+<h1>/debug/events</h1>
+
+<table id="req-status">
+	{{range $i, $fam := .Families}}
+	<tr>
+		<td class="family">{{$fam}}</td>
+
+	        {{range $j, $bucket := $.Buckets}}
+	        {{$n := index $.Counts $i $j}}
+		<td class="{{if not $bucket.MaxErrAge}}active{{end}}{{if not $n}}empty{{end}}">
+	                {{if $n}}<a href="?fam={{$fam}}&b={{$j}}{{if $.Expanded}}&exp=1{{end}}">{{end}}
+		        [{{$n}} {{$bucket.String}}]
+			{{if $n}}</a>{{end}}
+		</td>
+                {{end}}
+
+	</tr>{{end}}
+</table>
+
+{{if $.EventLogs}}
+<hr />
+<h3>Family: {{$.Family}}</h3>
+
+{{if $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}">{{end}}
+[Summary]{{if $.Expanded}}</a>{{end}}
+
+{{if not $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1">{{end}}
+[Expanded]{{if not $.Expanded}}</a>{{end}}
+
+<table id="reqs">
+	<tr><th>When</th><th>Elapsed</th></tr>
+	{{range $el := $.EventLogs}}
+	<tr class="first">
+		<td class="when">{{$el.When}}</td>
+		<td class="elapsed">{{$el.ElapsedTime}}</td>
+		<td>{{$el.Title}}
+	</tr>
+	{{if $.Expanded}}
+	<tr>
+		<td class="when"></td>
+		<td class="elapsed"></td>
+		<td><pre>{{$el.Stack|trimSpace}}</pre></td>
+	</tr>
+	{{range $el.Events}}
+	<tr>
+		<td class="when">{{.WhenString}}</td>
+		<td class="elapsed">{{elapsed .Elapsed}}</td>
+		<td>.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}</td>
+	</tr>
+	{{end}}
+	{{end}}
+	{{end}}
+</table>
+{{end}}
+	</body>
+</html>
+`
diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go
new file mode 100644
index 0000000..bb42aa5
--- /dev/null
+++ b/vendor/golang.org/x/net/trace/histogram.go
@@ -0,0 +1,356 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+// This file implements histogramming for RPC statistics collection.
+
+import (
+	"bytes"
+	"fmt"
+	"html/template"
+	"log"
+	"math"
+
+	"golang.org/x/net/internal/timeseries"
+)
+
+const (
+	bucketCount = 38
+)
+
+// histogram keeps counts of values in buckets that are spaced
+// out in powers of 2: 0-1, 2-3, 4-7...
+// histogram implements timeseries.Observable
+type histogram struct {
+	sum          int64   // running total of measurements
+	sumOfSquares float64 // square of running total
+	buckets      []int64 // bucketed values for histogram
+	value        int     // holds a single value as an optimization
+	valueCount   int64   // number of values recorded for single value
+}
+
+// AddMeasurement records a value measurement observation to the histogram.
+func (h *histogram) addMeasurement(value int64) {
+	// TODO: assert invariant
+	h.sum += value
+	h.sumOfSquares += float64(value) * float64(value)
+
+	bucketIndex := getBucket(value)
+
+	if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) {
+		h.value = bucketIndex
+		h.valueCount++
+	} else {
+		h.allocateBuckets()
+		h.buckets[bucketIndex]++
+	}
+}
+
+func (h *histogram) allocateBuckets() {
+	if h.buckets == nil {
+		h.buckets = make([]int64, bucketCount)
+		h.buckets[h.value] = h.valueCount
+		h.value = 0
+		h.valueCount = -1
+	}
+}
+
+func log2(i int64) int {
+	n := 0
+	for ; i >= 0x100; i >>= 8 {
+		n += 8
+	}
+	for ; i > 0; i >>= 1 {
+		n += 1
+	}
+	return n
+}
+
+func getBucket(i int64) (index int) {
+	index = log2(i) - 1
+	if index < 0 {
+		index = 0
+	}
+	if index >= bucketCount {
+		index = bucketCount - 1
+	}
+	return
+}
+
+// Total returns the number of recorded observations.
+func (h *histogram) total() (total int64) {
+	if h.valueCount >= 0 {
+		total = h.valueCount
+	}
+	for _, val := range h.buckets {
+		total += int64(val)
+	}
+	return
+}
+
+// Average returns the average value of recorded observations.
+func (h *histogram) average() float64 {
+	t := h.total()
+	if t == 0 {
+		return 0
+	}
+	return float64(h.sum) / float64(t)
+}
+
+// Variance returns the variance of recorded observations.
+func (h *histogram) variance() float64 {
+	t := float64(h.total())
+	if t == 0 {
+		return 0
+	}
+	s := float64(h.sum) / t
+	return h.sumOfSquares/t - s*s
+}
+
+// StandardDeviation returns the standard deviation of recorded observations.
+func (h *histogram) standardDeviation() float64 {
+	return math.Sqrt(h.variance())
+}
+
+// PercentileBoundary estimates the value that the given fraction of recorded
+// observations are less than.
+func (h *histogram) percentileBoundary(percentile float64) int64 {
+	total := h.total()
+
+	// Corner cases (make sure result is strictly less than Total())
+	if total == 0 {
+		return 0
+	} else if total == 1 {
+		return int64(h.average())
+	}
+
+	percentOfTotal := round(float64(total) * percentile)
+	var runningTotal int64
+
+	for i := range h.buckets {
+		value := h.buckets[i]
+		runningTotal += value
+		if runningTotal == percentOfTotal {
+			// We hit an exact bucket boundary. If the next bucket has data, it is a
+			// good estimate of the value. If the bucket is empty, we interpolate the
+			// midpoint between the next bucket's boundary and the next non-zero
+			// bucket. If the remaining buckets are all empty, then we use the
+			// boundary for the next bucket as the estimate.
+			j := uint8(i + 1)
+			min := bucketBoundary(j)
+			if runningTotal < total {
+				for h.buckets[j] == 0 {
+					j++
+				}
+			}
+			max := bucketBoundary(j)
+			return min + round(float64(max-min)/2)
+		} else if runningTotal > percentOfTotal {
+			// The value is in this bucket. Interpolate the value.
+			delta := runningTotal - percentOfTotal
+			percentBucket := float64(value-delta) / float64(value)
+			bucketMin := bucketBoundary(uint8(i))
+			nextBucketMin := bucketBoundary(uint8(i + 1))
+			bucketSize := nextBucketMin - bucketMin
+			return bucketMin + round(percentBucket*float64(bucketSize))
+		}
+	}
+	return bucketBoundary(bucketCount - 1)
+}
+
+// Median returns the estimated median of the observed values.
+func (h *histogram) median() int64 {
+	return h.percentileBoundary(0.5)
+}
+
+// Add adds other to h.
+func (h *histogram) Add(other timeseries.Observable) {
+	o := other.(*histogram)
+	if o.valueCount == 0 {
+		// Other histogram is empty
+	} else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value {
+		// Both have a single bucketed value, aggregate them
+		h.valueCount += o.valueCount
+	} else {
+		// Two different values necessitate buckets in this histogram
+		h.allocateBuckets()
+		if o.valueCount >= 0 {
+			h.buckets[o.value] += o.valueCount
+		} else {
+			for i := range h.buckets {
+				h.buckets[i] += o.buckets[i]
+			}
+		}
+	}
+	h.sumOfSquares += o.sumOfSquares
+	h.sum += o.sum
+}
+
+// Clear resets the histogram to an empty state, removing all observed values.
+func (h *histogram) Clear() {
+	h.buckets = nil
+	h.value = 0
+	h.valueCount = 0
+	h.sum = 0
+	h.sumOfSquares = 0
+}
+
+// CopyFrom copies from other, which must be a *histogram, into h.
+func (h *histogram) CopyFrom(other timeseries.Observable) {
+	o := other.(*histogram)
+	if o.valueCount == -1 {
+		h.allocateBuckets()
+		copy(h.buckets, o.buckets)
+	}
+	h.sum = o.sum
+	h.sumOfSquares = o.sumOfSquares
+	h.value = o.value
+	h.valueCount = o.valueCount
+}
+
+// Multiply scales the histogram by the specified ratio.
+func (h *histogram) Multiply(ratio float64) {
+	if h.valueCount == -1 {
+		for i := range h.buckets {
+			h.buckets[i] = int64(float64(h.buckets[i]) * ratio)
+		}
+	} else {
+		h.valueCount = int64(float64(h.valueCount) * ratio)
+	}
+	h.sum = int64(float64(h.sum) * ratio)
+	h.sumOfSquares = h.sumOfSquares * ratio
+}
+
+// New creates a new histogram.
+func (h *histogram) New() timeseries.Observable {
+	r := new(histogram)
+	r.Clear()
+	return r
+}
+
+func (h *histogram) String() string {
+	return fmt.Sprintf("%d, %f, %d, %d, %v",
+		h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets)
+}
+
+// round returns the closest int64 to the argument
+func round(in float64) int64 {
+	return int64(math.Floor(in + 0.5))
+}
+
+// bucketBoundary returns the first value in the bucket.
+func bucketBoundary(bucket uint8) int64 {
+	if bucket == 0 {
+		return 0
+	}
+	return 1 << bucket
+}
+
+// bucketData holds data about a specific bucket for use in distTmpl.
+type bucketData struct {
+	Lower, Upper       int64
+	N                  int64
+	Pct, CumulativePct float64
+	GraphWidth         int
+}
+
+// data holds data about a Distribution for use in distTmpl.
+type data struct {
+	Buckets                 []*bucketData
+	Count, Median           int64
+	Mean, StandardDeviation float64
+}
+
+// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets.
+const maxHTMLBarWidth = 350.0
+
+// newData returns data representing h for use in distTmpl.
+func (h *histogram) newData() *data {
+	// Force the allocation of buckets to simplify the rendering implementation
+	h.allocateBuckets()
+	// We scale the bars on the right so that the largest bar is
+	// maxHTMLBarWidth pixels in width.
+	maxBucket := int64(0)
+	for _, n := range h.buckets {
+		if n > maxBucket {
+			maxBucket = n
+		}
+	}
+	total := h.total()
+	barsizeMult := maxHTMLBarWidth / float64(maxBucket)
+	var pctMult float64
+	if total == 0 {
+		pctMult = 1.0
+	} else {
+		pctMult = 100.0 / float64(total)
+	}
+
+	buckets := make([]*bucketData, len(h.buckets))
+	runningTotal := int64(0)
+	for i, n := range h.buckets {
+		if n == 0 {
+			continue
+		}
+		runningTotal += n
+		var upperBound int64
+		if i < bucketCount-1 {
+			upperBound = bucketBoundary(uint8(i + 1))
+		} else {
+			upperBound = math.MaxInt64
+		}
+		buckets[i] = &bucketData{
+			Lower:         bucketBoundary(uint8(i)),
+			Upper:         upperBound,
+			N:             n,
+			Pct:           float64(n) * pctMult,
+			CumulativePct: float64(runningTotal) * pctMult,
+			GraphWidth:    int(float64(n) * barsizeMult),
+		}
+	}
+	return &data{
+		Buckets:           buckets,
+		Count:             total,
+		Median:            h.median(),
+		Mean:              h.average(),
+		StandardDeviation: h.standardDeviation(),
+	}
+}
+
+func (h *histogram) html() template.HTML {
+	buf := new(bytes.Buffer)
+	if err := distTmpl.Execute(buf, h.newData()); err != nil {
+		buf.Reset()
+		log.Printf("net/trace: couldn't execute template: %v", err)
+	}
+	return template.HTML(buf.String())
+}
+
+// Input: data
+var distTmpl = template.Must(template.New("distTmpl").Parse(`
+<table>
+<tr>
+    <td style="padding:0.25em">Count: {{.Count}}</td>
+    <td style="padding:0.25em">Mean: {{printf "%.0f" .Mean}}</td>
+    <td style="padding:0.25em">StdDev: {{printf "%.0f" .StandardDeviation}}</td>
+    <td style="padding:0.25em">Median: {{.Median}}</td>
+</tr>
+</table>
+<hr>
+<table>
+{{range $b := .Buckets}}
+{{if $b}}
+  <tr>
+    <td style="padding:0 0 0 0.25em">[</td>
+    <td style="text-align:right;padding:0 0.25em">{{.Lower}},</td>
+    <td style="text-align:right;padding:0 0.25em">{{.Upper}})</td>
+    <td style="text-align:right;padding:0 0.25em">{{.N}}</td>
+    <td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .Pct}}%</td>
+    <td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .CumulativePct}}%</td>
+    <td><div style="background-color: blue; height: 1em; width: {{.GraphWidth}};"></div></td>
+  </tr>
+{{end}}
+{{end}}
+</table>
+`))
diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go
new file mode 100644
index 0000000..61123bc
--- /dev/null
+++ b/vendor/golang.org/x/net/trace/trace.go
@@ -0,0 +1,1071 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package trace implements tracing of requests and long-lived objects.
+It exports HTTP interfaces on /debug/requests and /debug/events.
+
+A trace.Trace provides tracing for short-lived objects, usually requests.
+A request handler might be implemented like this:
+
+	func fooHandler(w http.ResponseWriter, req *http.Request) {
+		tr := trace.New("mypkg.Foo", req.URL.Path)
+		defer tr.Finish()
+		...
+		tr.LazyPrintf("some event %q happened", str)
+		...
+		if err := somethingImportant(); err != nil {
+			tr.LazyPrintf("somethingImportant failed: %v", err)
+			tr.SetError()
+		}
+	}
+
+The /debug/requests HTTP endpoint organizes the traces by family,
+errors, and duration.  It also provides histogram of request duration
+for each family.
+
+A trace.EventLog provides tracing for long-lived objects, such as RPC
+connections.
+
+	// A Fetcher fetches URL paths for a single domain.
+	type Fetcher struct {
+		domain string
+		events trace.EventLog
+	}
+
+	func NewFetcher(domain string) *Fetcher {
+		return &Fetcher{
+			domain,
+			trace.NewEventLog("mypkg.Fetcher", domain),
+		}
+	}
+
+	func (f *Fetcher) Fetch(path string) (string, error) {
+		resp, err := http.Get("http://" + f.domain + "/" + path)
+		if err != nil {
+			f.events.Errorf("Get(%q) = %v", path, err)
+			return "", err
+		}
+		f.events.Printf("Get(%q) = %s", path, resp.Status)
+		...
+	}
+
+	func (f *Fetcher) Close() error {
+		f.events.Finish()
+		return nil
+	}
+
+The /debug/events HTTP endpoint organizes the event logs by family and
+by time since the last error.  The expanded view displays recent log
+entries and the log's call stack.
+*/
+package trace
+
+import (
+	"bytes"
+	"fmt"
+	"html/template"
+	"io"
+	"log"
+	"net"
+	"net/http"
+	"runtime"
+	"sort"
+	"strconv"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"golang.org/x/net/context"
+	"golang.org/x/net/internal/timeseries"
+)
+
+// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing.
+// FOR DEBUGGING ONLY. This will slow down the program.
+var DebugUseAfterFinish = false
+
+// AuthRequest determines whether a specific request is permitted to load the
+// /debug/requests or /debug/events pages.
+//
+// It returns two bools; the first indicates whether the page may be viewed at all,
+// and the second indicates whether sensitive events will be shown.
+//
+// AuthRequest may be replaced by a program to customize its authorization requirements.
+//
+// The default AuthRequest function returns (true, true) if and only if the request
+// comes from localhost/127.0.0.1/[::1].
+var AuthRequest = func(req *http.Request) (any, sensitive bool) {
+	// RemoteAddr is commonly in the form "IP" or "IP:port".
+	// If it is in the form "IP:port", split off the port.
+	host, _, err := net.SplitHostPort(req.RemoteAddr)
+	if err != nil {
+		host = req.RemoteAddr
+	}
+	switch host {
+	case "localhost", "127.0.0.1", "::1":
+		return true, true
+	default:
+		return false, false
+	}
+}
+
+func init() {
+	http.HandleFunc("/debug/requests", func(w http.ResponseWriter, req *http.Request) {
+		any, sensitive := AuthRequest(req)
+		if !any {
+			http.Error(w, "not allowed", http.StatusUnauthorized)
+			return
+		}
+		w.Header().Set("Content-Type", "text/html; charset=utf-8")
+		Render(w, req, sensitive)
+	})
+	http.HandleFunc("/debug/events", func(w http.ResponseWriter, req *http.Request) {
+		any, sensitive := AuthRequest(req)
+		if !any {
+			http.Error(w, "not allowed", http.StatusUnauthorized)
+			return
+		}
+		w.Header().Set("Content-Type", "text/html; charset=utf-8")
+		RenderEvents(w, req, sensitive)
+	})
+}
+
+// Render renders the HTML page typically served at /debug/requests.
+// It does not do any auth checking; see AuthRequest for the default auth check
+// used by the handler registered on http.DefaultServeMux.
+// req may be nil.
+func Render(w io.Writer, req *http.Request, sensitive bool) {
+	data := &struct {
+		Families         []string
+		ActiveTraceCount map[string]int
+		CompletedTraces  map[string]*family
+
+		// Set when a bucket has been selected.
+		Traces        traceList
+		Family        string
+		Bucket        int
+		Expanded      bool
+		Traced        bool
+		Active        bool
+		ShowSensitive bool // whether to show sensitive events
+
+		Histogram       template.HTML
+		HistogramWindow string // e.g. "last minute", "last hour", "all time"
+
+		// If non-zero, the set of traces is a partial set,
+		// and this is the total number.
+		Total int
+	}{
+		CompletedTraces: completedTraces,
+	}
+
+	data.ShowSensitive = sensitive
+	if req != nil {
+		// Allow show_sensitive=0 to force hiding of sensitive data for testing.
+		// This only goes one way; you can't use show_sensitive=1 to see things.
+		if req.FormValue("show_sensitive") == "0" {
+			data.ShowSensitive = false
+		}
+
+		if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
+			data.Expanded = exp
+		}
+		if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil {
+			data.Traced = exp
+		}
+	}
+
+	completedMu.RLock()
+	data.Families = make([]string, 0, len(completedTraces))
+	for fam := range completedTraces {
+		data.Families = append(data.Families, fam)
+	}
+	completedMu.RUnlock()
+	sort.Strings(data.Families)
+
+	// We are careful here to minimize the time spent locking activeMu,
+	// since that lock is required every time an RPC starts and finishes.
+	data.ActiveTraceCount = make(map[string]int, len(data.Families))
+	activeMu.RLock()
+	for fam, s := range activeTraces {
+		data.ActiveTraceCount[fam] = s.Len()
+	}
+	activeMu.RUnlock()
+
+	var ok bool
+	data.Family, data.Bucket, ok = parseArgs(req)
+	switch {
+	case !ok:
+		// No-op
+	case data.Bucket == -1:
+		data.Active = true
+		n := data.ActiveTraceCount[data.Family]
+		data.Traces = getActiveTraces(data.Family)
+		if len(data.Traces) < n {
+			data.Total = n
+		}
+	case data.Bucket < bucketsPerFamily:
+		if b := lookupBucket(data.Family, data.Bucket); b != nil {
+			data.Traces = b.Copy(data.Traced)
+		}
+	default:
+		if f := getFamily(data.Family, false); f != nil {
+			var obs timeseries.Observable
+			f.LatencyMu.RLock()
+			switch o := data.Bucket - bucketsPerFamily; o {
+			case 0:
+				obs = f.Latency.Minute()
+				data.HistogramWindow = "last minute"
+			case 1:
+				obs = f.Latency.Hour()
+				data.HistogramWindow = "last hour"
+			case 2:
+				obs = f.Latency.Total()
+				data.HistogramWindow = "all time"
+			}
+			f.LatencyMu.RUnlock()
+			if obs != nil {
+				data.Histogram = obs.(*histogram).html()
+			}
+		}
+	}
+
+	if data.Traces != nil {
+		defer data.Traces.Free()
+		sort.Sort(data.Traces)
+	}
+
+	completedMu.RLock()
+	defer completedMu.RUnlock()
+	if err := pageTmpl.ExecuteTemplate(w, "Page", data); err != nil {
+		log.Printf("net/trace: Failed executing template: %v", err)
+	}
+}
+
+func parseArgs(req *http.Request) (fam string, b int, ok bool) {
+	if req == nil {
+		return "", 0, false
+	}
+	fam, bStr := req.FormValue("fam"), req.FormValue("b")
+	if fam == "" || bStr == "" {
+		return "", 0, false
+	}
+	b, err := strconv.Atoi(bStr)
+	if err != nil || b < -1 {
+		return "", 0, false
+	}
+
+	return fam, b, true
+}
+
+func lookupBucket(fam string, b int) *traceBucket {
+	f := getFamily(fam, false)
+	if f == nil || b < 0 || b >= len(f.Buckets) {
+		return nil
+	}
+	return f.Buckets[b]
+}
+
+type contextKeyT string
+
+var contextKey = contextKeyT("golang.org/x/net/trace.Trace")
+
+// NewContext returns a copy of the parent context
+// and associates it with a Trace.
+func NewContext(ctx context.Context, tr Trace) context.Context {
+	return context.WithValue(ctx, contextKey, tr)
+}
+
+// FromContext returns the Trace bound to the context, if any.
+func FromContext(ctx context.Context) (tr Trace, ok bool) {
+	tr, ok = ctx.Value(contextKey).(Trace)
+	return
+}
+
+// Trace represents an active request.
+type Trace interface {
+	// LazyLog adds x to the event log. It will be evaluated each time the
+	// /debug/requests page is rendered. Any memory referenced by x will be
+	// pinned until the trace is finished and later discarded.
+	LazyLog(x fmt.Stringer, sensitive bool)
+
+	// LazyPrintf evaluates its arguments with fmt.Sprintf each time the
+	// /debug/requests page is rendered. Any memory referenced by a will be
+	// pinned until the trace is finished and later discarded.
+	LazyPrintf(format string, a ...interface{})
+
+	// SetError declares that this trace resulted in an error.
+	SetError()
+
+	// SetRecycler sets a recycler for the trace.
+	// f will be called for each event passed to LazyLog at a time when
+	// it is no longer required, whether while the trace is still active
+	// and the event is discarded, or when a completed trace is discarded.
+	SetRecycler(f func(interface{}))
+
+	// SetTraceInfo sets the trace info for the trace.
+	// This is currently unused.
+	SetTraceInfo(traceID, spanID uint64)
+
+	// SetMaxEvents sets the maximum number of events that will be stored
+	// in the trace. This has no effect if any events have already been
+	// added to the trace.
+	SetMaxEvents(m int)
+
+	// Finish declares that this trace is complete.
+	// The trace should not be used after calling this method.
+	Finish()
+}
+
+type lazySprintf struct {
+	format string
+	a      []interface{}
+}
+
+func (l *lazySprintf) String() string {
+	return fmt.Sprintf(l.format, l.a...)
+}
+
+// New returns a new Trace with the specified family and title.
+func New(family, title string) Trace {
+	tr := newTrace()
+	tr.ref()
+	tr.Family, tr.Title = family, title
+	tr.Start = time.Now()
+	tr.maxEvents = maxEventsPerTrace
+	tr.events = tr.eventsBuf[:0]
+
+	activeMu.RLock()
+	s := activeTraces[tr.Family]
+	activeMu.RUnlock()
+	if s == nil {
+		activeMu.Lock()
+		s = activeTraces[tr.Family] // check again
+		if s == nil {
+			s = new(traceSet)
+			activeTraces[tr.Family] = s
+		}
+		activeMu.Unlock()
+	}
+	s.Add(tr)
+
+	// Trigger allocation of the completed trace structure for this family.
+	// This will cause the family to be present in the request page during
+	// the first trace of this family. We don't care about the return value,
+	// nor is there any need for this to run inline, so we execute it in its
+	// own goroutine, but only if the family isn't allocated yet.
+	completedMu.RLock()
+	if _, ok := completedTraces[tr.Family]; !ok {
+		go allocFamily(tr.Family)
+	}
+	completedMu.RUnlock()
+
+	return tr
+}
+
+func (tr *trace) Finish() {
+	tr.Elapsed = time.Now().Sub(tr.Start)
+	if DebugUseAfterFinish {
+		buf := make([]byte, 4<<10) // 4 KB should be enough
+		n := runtime.Stack(buf, false)
+		tr.finishStack = buf[:n]
+	}
+
+	activeMu.RLock()
+	m := activeTraces[tr.Family]
+	activeMu.RUnlock()
+	m.Remove(tr)
+
+	f := getFamily(tr.Family, true)
+	for _, b := range f.Buckets {
+		if b.Cond.match(tr) {
+			b.Add(tr)
+		}
+	}
+	// Add a sample of elapsed time as microseconds to the family's timeseries
+	h := new(histogram)
+	h.addMeasurement(tr.Elapsed.Nanoseconds() / 1e3)
+	f.LatencyMu.Lock()
+	f.Latency.Add(h)
+	f.LatencyMu.Unlock()
+
+	tr.unref() // matches ref in New
+}
+
+const (
+	bucketsPerFamily    = 9
+	tracesPerBucket     = 10
+	maxActiveTraces     = 20 // Maximum number of active traces to show.
+	maxEventsPerTrace   = 10
+	numHistogramBuckets = 38
+)
+
+var (
+	// The active traces.
+	activeMu     sync.RWMutex
+	activeTraces = make(map[string]*traceSet) // family -> traces
+
+	// Families of completed traces.
+	completedMu     sync.RWMutex
+	completedTraces = make(map[string]*family) // family -> traces
+)
+
+type traceSet struct {
+	mu sync.RWMutex
+	m  map[*trace]bool
+
+	// We could avoid the entire map scan in FirstN by having a slice of all the traces
+	// ordered by start time, and an index into that from the trace struct, with a periodic
+	// repack of the slice after enough traces finish; we could also use a skip list or similar.
+	// However, that would shift some of the expense from /debug/requests time to RPC time,
+	// which is probably the wrong trade-off.
+}
+
+func (ts *traceSet) Len() int {
+	ts.mu.RLock()
+	defer ts.mu.RUnlock()
+	return len(ts.m)
+}
+
+func (ts *traceSet) Add(tr *trace) {
+	ts.mu.Lock()
+	if ts.m == nil {
+		ts.m = make(map[*trace]bool)
+	}
+	ts.m[tr] = true
+	ts.mu.Unlock()
+}
+
+func (ts *traceSet) Remove(tr *trace) {
+	ts.mu.Lock()
+	delete(ts.m, tr)
+	ts.mu.Unlock()
+}
+
+// FirstN returns the first n traces ordered by time.
+func (ts *traceSet) FirstN(n int) traceList {
+	ts.mu.RLock()
+	defer ts.mu.RUnlock()
+
+	if n > len(ts.m) {
+		n = len(ts.m)
+	}
+	trl := make(traceList, 0, n)
+
+	// Fast path for when no selectivity is needed.
+	if n == len(ts.m) {
+		for tr := range ts.m {
+			tr.ref()
+			trl = append(trl, tr)
+		}
+		sort.Sort(trl)
+		return trl
+	}
+
+	// Pick the oldest n traces.
+	// This is inefficient. See the comment in the traceSet struct.
+	for tr := range ts.m {
+		// Put the first n traces into trl in the order they occur.
+		// When we have n, sort trl, and thereafter maintain its order.
+		if len(trl) < n {
+			tr.ref()
+			trl = append(trl, tr)
+			if len(trl) == n {
+				// This is guaranteed to happen exactly once during this loop.
+				sort.Sort(trl)
+			}
+			continue
+		}
+		if tr.Start.After(trl[n-1].Start) {
+			continue
+		}
+
+		// Find where to insert this one.
+		tr.ref()
+		i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) })
+		trl[n-1].unref()
+		copy(trl[i+1:], trl[i:])
+		trl[i] = tr
+	}
+
+	return trl
+}
+
+func getActiveTraces(fam string) traceList {
+	activeMu.RLock()
+	s := activeTraces[fam]
+	activeMu.RUnlock()
+	if s == nil {
+		return nil
+	}
+	return s.FirstN(maxActiveTraces)
+}
+
+func getFamily(fam string, allocNew bool) *family {
+	completedMu.RLock()
+	f := completedTraces[fam]
+	completedMu.RUnlock()
+	if f == nil && allocNew {
+		f = allocFamily(fam)
+	}
+	return f
+}
+
+func allocFamily(fam string) *family {
+	completedMu.Lock()
+	defer completedMu.Unlock()
+	f := completedTraces[fam]
+	if f == nil {
+		f = newFamily()
+		completedTraces[fam] = f
+	}
+	return f
+}
+
+// family represents a set of trace buckets and associated latency information.
+type family struct {
+	// traces may occur in multiple buckets.
+	Buckets [bucketsPerFamily]*traceBucket
+
+	// latency time series
+	LatencyMu sync.RWMutex
+	Latency   *timeseries.MinuteHourSeries
+}
+
+func newFamily() *family {
+	return &family{
+		Buckets: [bucketsPerFamily]*traceBucket{
+			{Cond: minCond(0)},
+			{Cond: minCond(50 * time.Millisecond)},
+			{Cond: minCond(100 * time.Millisecond)},
+			{Cond: minCond(200 * time.Millisecond)},
+			{Cond: minCond(500 * time.Millisecond)},
+			{Cond: minCond(1 * time.Second)},
+			{Cond: minCond(10 * time.Second)},
+			{Cond: minCond(100 * time.Second)},
+			{Cond: errorCond{}},
+		},
+		Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }),
+	}
+}
+
+// traceBucket represents a size-capped bucket of historic traces,
+// along with a condition for a trace to belong to the bucket.
+type traceBucket struct {
+	Cond cond
+
+	// Ring buffer implementation of a fixed-size FIFO queue.
+	mu     sync.RWMutex
+	buf    [tracesPerBucket]*trace
+	start  int // < tracesPerBucket
+	length int // <= tracesPerBucket
+}
+
+func (b *traceBucket) Add(tr *trace) {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+
+	i := b.start + b.length
+	if i >= tracesPerBucket {
+		i -= tracesPerBucket
+	}
+	if b.length == tracesPerBucket {
+		// "Remove" an element from the bucket.
+		b.buf[i].unref()
+		b.start++
+		if b.start == tracesPerBucket {
+			b.start = 0
+		}
+	}
+	b.buf[i] = tr
+	if b.length < tracesPerBucket {
+		b.length++
+	}
+	tr.ref()
+}
+
+// Copy returns a copy of the traces in the bucket.
+// If tracedOnly is true, only the traces with trace information will be returned.
+// The logs will be ref'd before returning; the caller should call
+// the Free method when it is done with them.
+// TODO(dsymonds): keep track of traced requests in separate buckets.
+func (b *traceBucket) Copy(tracedOnly bool) traceList {
+	b.mu.RLock()
+	defer b.mu.RUnlock()
+
+	trl := make(traceList, 0, b.length)
+	for i, x := 0, b.start; i < b.length; i++ {
+		tr := b.buf[x]
+		if !tracedOnly || tr.spanID != 0 {
+			tr.ref()
+			trl = append(trl, tr)
+		}
+		x++
+		if x == b.length {
+			x = 0
+		}
+	}
+	return trl
+}
+
+func (b *traceBucket) Empty() bool {
+	b.mu.RLock()
+	defer b.mu.RUnlock()
+	return b.length == 0
+}
+
+// cond represents a condition on a trace.
+type cond interface {
+	match(t *trace) bool
+	String() string
+}
+
+type minCond time.Duration
+
+func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) }
+func (m minCond) String() string      { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) }
+
+type errorCond struct{}
+
+func (e errorCond) match(t *trace) bool { return t.IsError }
+func (e errorCond) String() string      { return "errors" }
+
+type traceList []*trace
+
+// Free calls unref on each element of the list.
+func (trl traceList) Free() {
+	for _, t := range trl {
+		t.unref()
+	}
+}
+
+// traceList may be sorted in reverse chronological order.
+func (trl traceList) Len() int           { return len(trl) }
+func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) }
+func (trl traceList) Swap(i, j int)      { trl[i], trl[j] = trl[j], trl[i] }
+
+// An event is a timestamped log entry in a trace.
+type event struct {
+	When       time.Time
+	Elapsed    time.Duration // since previous event in trace
+	NewDay     bool          // whether this event is on a different day to the previous event
+	Recyclable bool          // whether this event was passed via LazyLog
+	Sensitive  bool          // whether this event contains sensitive information
+	What       interface{}   // string or fmt.Stringer
+}
+
+// WhenString returns a string representation of the elapsed time of the event.
+// It will include the date if midnight was crossed.
+func (e event) WhenString() string {
+	if e.NewDay {
+		return e.When.Format("2006/01/02 15:04:05.000000")
+	}
+	return e.When.Format("15:04:05.000000")
+}
+
+// discarded represents a number of discarded events.
+// It is stored as *discarded to make it easier to update in-place.
+type discarded int
+
+func (d *discarded) String() string {
+	return fmt.Sprintf("(%d events discarded)", int(*d))
+}
+
+// trace represents an active or complete request,
+// either sent or received by this program.
+type trace struct {
+	// Family is the top-level grouping of traces to which this belongs.
+	Family string
+
+	// Title is the title of this trace.
+	Title string
+
+	// Timing information.
+	Start   time.Time
+	Elapsed time.Duration // zero while active
+
+	// Trace information if non-zero.
+	traceID uint64
+	spanID  uint64
+
+	// Whether this trace resulted in an error.
+	IsError bool
+
+	// Append-only sequence of events (modulo discards).
+	mu        sync.RWMutex
+	events    []event
+	maxEvents int
+
+	refs     int32 // how many buckets this is in
+	recycler func(interface{})
+	disc     discarded // scratch space to avoid allocation
+
+	finishStack []byte // where finish was called, if DebugUseAfterFinish is set
+
+	eventsBuf [4]event // preallocated buffer in case we only log a few events
+}
+
+func (tr *trace) reset() {
+	// Clear all but the mutex. Mutexes may not be copied, even when unlocked.
+	tr.Family = ""
+	tr.Title = ""
+	tr.Start = time.Time{}
+	tr.Elapsed = 0
+	tr.traceID = 0
+	tr.spanID = 0
+	tr.IsError = false
+	tr.maxEvents = 0
+	tr.events = nil
+	tr.refs = 0
+	tr.recycler = nil
+	tr.disc = 0
+	tr.finishStack = nil
+	for i := range tr.eventsBuf {
+		tr.eventsBuf[i] = event{}
+	}
+}
+
+// delta returns the elapsed time since the last event or the trace start,
+// and whether it spans midnight.
+// L >= tr.mu
+func (tr *trace) delta(t time.Time) (time.Duration, bool) {
+	if len(tr.events) == 0 {
+		return t.Sub(tr.Start), false
+	}
+	prev := tr.events[len(tr.events)-1].When
+	return t.Sub(prev), prev.Day() != t.Day()
+}
+
+func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) {
+	if DebugUseAfterFinish && tr.finishStack != nil {
+		buf := make([]byte, 4<<10) // 4 KB should be enough
+		n := runtime.Stack(buf, false)
+		log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n])
+	}
+
+	/*
+		NOTE TO DEBUGGERS
+
+		If you are here because your program panicked in this code,
+		it is almost definitely the fault of code using this package,
+		and very unlikely to be the fault of this code.
+
+		The most likely scenario is that some code elsewhere is using
+		a requestz.Trace after its Finish method is called.
+		You can temporarily set the DebugUseAfterFinish var
+		to help discover where that is; do not leave that var set,
+		since it makes this package much less efficient.
+	*/
+
+	e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive}
+	tr.mu.Lock()
+	e.Elapsed, e.NewDay = tr.delta(e.When)
+	if len(tr.events) < tr.maxEvents {
+		tr.events = append(tr.events, e)
+	} else {
+		// Discard the middle events.
+		di := int((tr.maxEvents - 1) / 2)
+		if d, ok := tr.events[di].What.(*discarded); ok {
+			(*d)++
+		} else {
+			// disc starts at two to count for the event it is replacing,
+			// plus the next one that we are about to drop.
+			tr.disc = 2
+			if tr.recycler != nil && tr.events[di].Recyclable {
+				go tr.recycler(tr.events[di].What)
+			}
+			tr.events[di].What = &tr.disc
+		}
+		// The timestamp of the discarded meta-event should be
+		// the time of the last event it is representing.
+		tr.events[di].When = tr.events[di+1].When
+
+		if tr.recycler != nil && tr.events[di+1].Recyclable {
+			go tr.recycler(tr.events[di+1].What)
+		}
+		copy(tr.events[di+1:], tr.events[di+2:])
+		tr.events[tr.maxEvents-1] = e
+	}
+	tr.mu.Unlock()
+}
+
+func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) {
+	tr.addEvent(x, true, sensitive)
+}
+
+func (tr *trace) LazyPrintf(format string, a ...interface{}) {
+	tr.addEvent(&lazySprintf{format, a}, false, false)
+}
+
+func (tr *trace) SetError() { tr.IsError = true }
+
+func (tr *trace) SetRecycler(f func(interface{})) {
+	tr.recycler = f
+}
+
+func (tr *trace) SetTraceInfo(traceID, spanID uint64) {
+	tr.traceID, tr.spanID = traceID, spanID
+}
+
+func (tr *trace) SetMaxEvents(m int) {
+	// Always keep at least three events: first, discarded count, last.
+	if len(tr.events) == 0 && m > 3 {
+		tr.maxEvents = m
+	}
+}
+
+func (tr *trace) ref() {
+	atomic.AddInt32(&tr.refs, 1)
+}
+
+func (tr *trace) unref() {
+	if atomic.AddInt32(&tr.refs, -1) == 0 {
+		if tr.recycler != nil {
+			// freeTrace clears tr, so we hold tr.recycler and tr.events here.
+			go func(f func(interface{}), es []event) {
+				for _, e := range es {
+					if e.Recyclable {
+						f(e.What)
+					}
+				}
+			}(tr.recycler, tr.events)
+		}
+
+		freeTrace(tr)
+	}
+}
+
+func (tr *trace) When() string {
+	return tr.Start.Format("2006/01/02 15:04:05.000000")
+}
+
+func (tr *trace) ElapsedTime() string {
+	t := tr.Elapsed
+	if t == 0 {
+		// Active trace.
+		t = time.Since(tr.Start)
+	}
+	return fmt.Sprintf("%.6f", t.Seconds())
+}
+
+func (tr *trace) Events() []event {
+	tr.mu.RLock()
+	defer tr.mu.RUnlock()
+	return tr.events
+}
+
+var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool?
+
+// newTrace returns a trace ready to use.
+func newTrace() *trace {
+	select {
+	case tr := <-traceFreeList:
+		return tr
+	default:
+		return new(trace)
+	}
+}
+
+// freeTrace adds tr to traceFreeList if there's room.
+// This is non-blocking.
+func freeTrace(tr *trace) {
+	if DebugUseAfterFinish {
+		return // never reuse
+	}
+	tr.reset()
+	select {
+	case traceFreeList <- tr:
+	default:
+	}
+}
+
+func elapsed(d time.Duration) string {
+	b := []byte(fmt.Sprintf("%.6f", d.Seconds()))
+
+	// For subsecond durations, blank all zeros before decimal point,
+	// and all zeros between the decimal point and the first non-zero digit.
+	if d < time.Second {
+		dot := bytes.IndexByte(b, '.')
+		for i := 0; i < dot; i++ {
+			b[i] = ' '
+		}
+		for i := dot + 1; i < len(b); i++ {
+			if b[i] == '0' {
+				b[i] = ' '
+			} else {
+				break
+			}
+		}
+	}
+
+	return string(b)
+}
+
+var pageTmpl = template.Must(template.New("Page").Funcs(template.FuncMap{
+	"elapsed": elapsed,
+	"add":     func(a, b int) int { return a + b },
+}).Parse(pageHTML))
+
+const pageHTML = `
+{{template "Prolog" .}}
+{{template "StatusTable" .}}
+{{template "Epilog" .}}
+
+{{define "Prolog"}}
+<html>
+	<head>
+	<title>/debug/requests</title>
+	<style type="text/css">
+		body {
+			font-family: sans-serif;
+		}
+		table#tr-status td.family {
+			padding-right: 2em;
+		}
+		table#tr-status td.active {
+			padding-right: 1em;
+		}
+		table#tr-status td.latency-first {
+			padding-left: 1em;
+		}
+		table#tr-status td.empty {
+			color: #aaa;
+		}
+		table#reqs {
+			margin-top: 1em;
+		}
+		table#reqs tr.first {
+			{{if $.Expanded}}font-weight: bold;{{end}}
+		}
+		table#reqs td {
+			font-family: monospace;
+		}
+		table#reqs td.when {
+			text-align: right;
+			white-space: nowrap;
+		}
+		table#reqs td.elapsed {
+			padding: 0 0.5em;
+			text-align: right;
+			white-space: pre;
+			width: 10em;
+		}
+		address {
+			font-size: smaller;
+			margin-top: 5em;
+		}
+	</style>
+	</head>
+	<body>
+
+<h1>/debug/requests</h1>
+{{end}} {{/* end of Prolog */}}
+
+{{define "StatusTable"}}
+<table id="tr-status">
+	{{range $fam := .Families}}
+	<tr>
+		<td class="family">{{$fam}}</td>
+
+		{{$n := index $.ActiveTraceCount $fam}}
+		<td class="active {{if not $n}}empty{{end}}">
+			{{if $n}}<a href="?fam={{$fam}}&b=-1{{if $.Expanded}}&exp=1{{end}}">{{end}}
+			[{{$n}} active]
+			{{if $n}}</a>{{end}}
+		</td>
+
+		{{$f := index $.CompletedTraces $fam}}
+		{{range $i, $b := $f.Buckets}}
+		{{$empty := $b.Empty}}
+		<td {{if $empty}}class="empty"{{end}}>
+		{{if not $empty}}<a href="?fam={{$fam}}&b={{$i}}{{if $.Expanded}}&exp=1{{end}}">{{end}}
+		[{{.Cond}}]
+		{{if not $empty}}</a>{{end}}
+		</td>
+		{{end}}
+
+		{{$nb := len $f.Buckets}}
+		<td class="latency-first">
+		<a href="?fam={{$fam}}&b={{$nb}}">[minute]</a>
+		</td>
+		<td>
+		<a href="?fam={{$fam}}&b={{add $nb 1}}">[hour]</a>
+		</td>
+		<td>
+		<a href="?fam={{$fam}}&b={{add $nb 2}}">[total]</a>
+		</td>
+
+	</tr>
+	{{end}}
+</table>
+{{end}} {{/* end of StatusTable */}}
+
+{{define "Epilog"}}
+{{if $.Traces}}
+<hr />
+<h3>Family: {{$.Family}}</h3>
+
+{{if or $.Expanded $.Traced}}
+  <a href="?fam={{$.Family}}&b={{$.Bucket}}">[Normal/Summary]</a>
+{{else}}
+  [Normal/Summary]
+{{end}}
+
+{{if or (not $.Expanded) $.Traced}}
+  <a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1">[Normal/Expanded]</a>
+{{else}}
+  [Normal/Expanded]
+{{end}}
+
+{{if not $.Active}}
+	{{if or $.Expanded (not $.Traced)}}
+	<a href="?fam={{$.Family}}&b={{$.Bucket}}&rtraced=1">[Traced/Summary]</a>
+	{{else}}
+	[Traced/Summary]
+	{{end}}
+	{{if or (not $.Expanded) (not $.Traced)}}
+	<a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1&rtraced=1">[Traced/Expanded]</a>
+        {{else}}
+	[Traced/Expanded]
+	{{end}}
+{{end}}
+
+{{if $.Total}}
+<p><em>Showing <b>{{len $.Traces}}</b> of <b>{{$.Total}}</b> traces.</em></p>
+{{end}}
+
+<table id="reqs">
+	<caption>
+		{{if $.Active}}Active{{else}}Completed{{end}} Requests
+	</caption>
+	<tr><th>When</th><th>Elapsed&nbsp;(s)</th></tr>
+	{{range $tr := $.Traces}}
+	<tr class="first">
+		<td class="when">{{$tr.When}}</td>
+		<td class="elapsed">{{$tr.ElapsedTime}}</td>
+		<td>{{$tr.Title}}</td>
+		{{/* TODO: include traceID/spanID */}}
+	</tr>
+	{{if $.Expanded}}
+	{{range $tr.Events}}
+	<tr>
+		<td class="when">{{.WhenString}}</td>
+		<td class="elapsed">{{elapsed .Elapsed}}</td>
+		<td>{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}<em>[redacted]</em>{{end}}</td>
+	</tr>
+	{{end}}
+	{{end}}
+	{{end}}
+</table>
+{{end}} {{/* if $.Traces */}}
+
+{{if $.Histogram}}
+<h4>Latency (&micro;s) of {{$.Family}} over {{$.HistogramWindow}}</h4>
+{{$.Histogram}}
+{{end}} {{/* if $.Histogram */}}
+
+	</body>
+</html>
+{{end}} {{/* end of Epilog */}}
+`
diff --git a/vendor/golang.org/x/oauth2/.travis.yml b/vendor/golang.org/x/oauth2/.travis.yml
new file mode 100644
index 0000000..fa139db
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/.travis.yml
@@ -0,0 +1,13 @@
+language: go
+
+go:
+  - tip
+
+install:
+  - export GOPATH="$HOME/gopath"
+  - mkdir -p "$GOPATH/src/golang.org/x"
+  - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2"
+  - go get -v -t -d golang.org/x/oauth2/...
+
+script:
+  - go test -v golang.org/x/oauth2/...
diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS
new file mode 100644
index 0000000..15167cd
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md
new file mode 100644
index 0000000..46aa2b1
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/CONTRIBUTING.md
@@ -0,0 +1,31 @@
+# Contributing to Go
+
+Go is an open source project.
+
+It is the work of hundreds of contributors. We appreciate your help!
+
+
+## Filing issues
+
+When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions:
+
+1. What version of Go are you using (`go version`)?
+2. What operating system and processor architecture are you using?
+3. What did you do?
+4. What did you expect to see?
+5. What did you see instead?
+
+General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
+The gophers there will answer or ask you to file an issue if you've tripped over a bug.
+
+## Contributing code
+
+Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
+before sending patches.
+
+**We do not accept GitHub pull requests**
+(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).
+
+Unless otherwise noted, the Go source files are distributed under
+the BSD-style license found in the LICENSE file.
+
diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS
new file mode 100644
index 0000000..1c4577e
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE
new file mode 100644
index 0000000..d02f24f
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The oauth2 Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md
new file mode 100644
index 0000000..1643c08
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/README.md
@@ -0,0 +1,65 @@
+# OAuth2 for Go
+
+[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2)
+[![GoDoc](https://godoc.org/golang.org/x/oauth2?status.svg)](https://godoc.org/golang.org/x/oauth2)
+
+oauth2 package contains a client implementation for OAuth 2.0 spec.
+
+## Installation
+
+~~~~
+go get golang.org/x/oauth2
+~~~~
+
+See godoc for further documentation and examples.
+
+* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2)
+* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google)
+
+
+## App Engine
+
+In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor
+of the [`context.Context`](https://golang.org/x/net/context#Context) type from
+the `golang.org/x/net/context` package
+
+This means its no longer possible to use the "Classic App Engine"
+`appengine.Context` type with the `oauth2` package. (You're using
+Classic App Engine if you import the package `"appengine"`.)
+
+To work around this, you may use the new `"google.golang.org/appengine"`
+package. This package has almost the same API as the `"appengine"` package,
+but it can be fetched with `go get` and used on "Managed VMs" and well as
+Classic App Engine.
+
+See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app)
+for information on updating your app.
+
+If you don't want to update your entire app to use the new App Engine packages,
+you may use both sets of packages in parallel, using only the new packages
+with the `oauth2` package.
+
+	import (
+		"golang.org/x/net/context"
+		"golang.org/x/oauth2"
+		"golang.org/x/oauth2/google"
+		newappengine "google.golang.org/appengine"
+		newurlfetch "google.golang.org/appengine/urlfetch"
+
+		"appengine"
+	)
+
+	func handler(w http.ResponseWriter, r *http.Request) {
+		var c appengine.Context = appengine.NewContext(r)
+		c.Infof("Logging a message with the old package")
+
+		var ctx context.Context = newappengine.NewContext(r)
+		client := &http.Client{
+			Transport: &oauth2.Transport{
+				Source: google.AppEngineTokenSource(ctx, "scope"),
+				Base:   &newurlfetch.Transport{Context: ctx},
+			},
+		}
+		client.Get("...")
+	}
+
diff --git a/vendor/golang.org/x/oauth2/client_appengine.go b/vendor/golang.org/x/oauth2/client_appengine.go
new file mode 100644
index 0000000..8962c49
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/client_appengine.go
@@ -0,0 +1,25 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+// App Engine hooks.
+
+package oauth2
+
+import (
+	"net/http"
+
+	"golang.org/x/net/context"
+	"golang.org/x/oauth2/internal"
+	"google.golang.org/appengine/urlfetch"
+)
+
+func init() {
+	internal.RegisterContextClientFunc(contextClientAppEngine)
+}
+
+func contextClientAppEngine(ctx context.Context) (*http.Client, error) {
+	return urlfetch.Client(ctx), nil
+}
diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go
new file mode 100644
index 0000000..dc993ef
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/appengine.go
@@ -0,0 +1,86 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+	"sort"
+	"strings"
+	"sync"
+	"time"
+
+	"golang.org/x/net/context"
+	"golang.org/x/oauth2"
+)
+
+// Set at init time by appenginevm_hook.go. If true, we are on App Engine Managed VMs.
+var appengineVM bool
+
+// Set at init time by appengine_hook.go. If nil, we're not on App Engine.
+var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error)
+
+// AppEngineTokenSource returns a token source that fetches tokens
+// issued to the current App Engine application's service account.
+// If you are implementing a 3-legged OAuth 2.0 flow on App Engine
+// that involves user accounts, see oauth2.Config instead.
+//
+// The provided context must have come from appengine.NewContext.
+func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource {
+	if appengineTokenFunc == nil {
+		panic("google: AppEngineTokenSource can only be used on App Engine.")
+	}
+	scopes := append([]string{}, scope...)
+	sort.Strings(scopes)
+	return &appEngineTokenSource{
+		ctx:    ctx,
+		scopes: scopes,
+		key:    strings.Join(scopes, " "),
+	}
+}
+
+// aeTokens helps the fetched tokens to be reused until their expiration.
+var (
+	aeTokensMu sync.Mutex
+	aeTokens   = make(map[string]*tokenLock) // key is space-separated scopes
+)
+
+type tokenLock struct {
+	mu sync.Mutex // guards t; held while fetching or updating t
+	t  *oauth2.Token
+}
+
+type appEngineTokenSource struct {
+	ctx    context.Context
+	scopes []string
+	key    string // to aeTokens map; space-separated scopes
+}
+
+func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) {
+	if appengineTokenFunc == nil {
+		panic("google: AppEngineTokenSource can only be used on App Engine.")
+	}
+
+	aeTokensMu.Lock()
+	tok, ok := aeTokens[ts.key]
+	if !ok {
+		tok = &tokenLock{}
+		aeTokens[ts.key] = tok
+	}
+	aeTokensMu.Unlock()
+
+	tok.mu.Lock()
+	defer tok.mu.Unlock()
+	if tok.t.Valid() {
+		return tok.t, nil
+	}
+	access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...)
+	if err != nil {
+		return nil, err
+	}
+	tok.t = &oauth2.Token{
+		AccessToken: access,
+		Expiry:      exp,
+	}
+	return tok.t, nil
+}
diff --git a/vendor/golang.org/x/oauth2/google/appengine_hook.go b/vendor/golang.org/x/oauth2/google/appengine_hook.go
new file mode 100644
index 0000000..4f42c8b
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/appengine_hook.go
@@ -0,0 +1,13 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package google
+
+import "google.golang.org/appengine"
+
+func init() {
+	appengineTokenFunc = appengine.AccessToken
+}
diff --git a/vendor/golang.org/x/oauth2/google/appenginevm_hook.go b/vendor/golang.org/x/oauth2/google/appenginevm_hook.go
new file mode 100644
index 0000000..633611c
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/appenginevm_hook.go
@@ -0,0 +1,14 @@
+// Copyright 2015 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appenginevm
+
+package google
+
+import "google.golang.org/appengine"
+
+func init() {
+	appengineVM = true
+	appengineTokenFunc = appengine.AccessToken
+}
diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go
new file mode 100644
index 0000000..565d731
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/default.go
@@ -0,0 +1,155 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"os"
+	"path/filepath"
+	"runtime"
+
+	"cloud.google.com/go/compute/metadata"
+	"golang.org/x/net/context"
+	"golang.org/x/oauth2"
+	"golang.org/x/oauth2/jwt"
+)
+
+// DefaultClient returns an HTTP Client that uses the
+// DefaultTokenSource to obtain authentication credentials.
+//
+// This client should be used when developing services
+// that run on Google App Engine or Google Compute Engine
+// and use "Application Default Credentials."
+//
+// For more details, see:
+// https://developers.google.com/accounts/docs/application-default-credentials
+//
+func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) {
+	ts, err := DefaultTokenSource(ctx, scope...)
+	if err != nil {
+		return nil, err
+	}
+	return oauth2.NewClient(ctx, ts), nil
+}
+
+// DefaultTokenSource is a token source that uses
+// "Application Default Credentials".
+//
+// It looks for credentials in the following places,
+// preferring the first location found:
+//
+//   1. A JSON file whose path is specified by the
+//      GOOGLE_APPLICATION_CREDENTIALS environment variable.
+//   2. A JSON file in a location known to the gcloud command-line tool.
+//      On Windows, this is %APPDATA%/gcloud/application_default_credentials.json.
+//      On other systems, $HOME/.config/gcloud/application_default_credentials.json.
+//   3. On Google App Engine it uses the appengine.AccessToken function.
+//   4. On Google Compute Engine and Google App Engine Managed VMs, it fetches
+//      credentials from the metadata server.
+//      (In this final case any provided scopes are ignored.)
+//
+// For more details, see:
+// https://developers.google.com/accounts/docs/application-default-credentials
+//
+func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) {
+	// First, try the environment variable.
+	const envVar = "GOOGLE_APPLICATION_CREDENTIALS"
+	if filename := os.Getenv(envVar); filename != "" {
+		ts, err := tokenSourceFromFile(ctx, filename, scope)
+		if err != nil {
+			return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err)
+		}
+		return ts, nil
+	}
+
+	// Second, try a well-known file.
+	filename := wellKnownFile()
+	_, err := os.Stat(filename)
+	if err == nil {
+		ts, err2 := tokenSourceFromFile(ctx, filename, scope)
+		if err2 == nil {
+			return ts, nil
+		}
+		err = err2
+	} else if os.IsNotExist(err) {
+		err = nil // ignore this error
+	}
+	if err != nil {
+		return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err)
+	}
+
+	// Third, if we're on Google App Engine use those credentials.
+	if appengineTokenFunc != nil && !appengineVM {
+		return AppEngineTokenSource(ctx, scope...), nil
+	}
+
+	// Fourth, if we're on Google Compute Engine use the metadata server.
+	if metadata.OnGCE() {
+		return ComputeTokenSource(""), nil
+	}
+
+	// None are found; return helpful error.
+	const url = "https://developers.google.com/accounts/docs/application-default-credentials"
+	return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url)
+}
+
+func wellKnownFile() string {
+	const f = "application_default_credentials.json"
+	if runtime.GOOS == "windows" {
+		return filepath.Join(os.Getenv("APPDATA"), "gcloud", f)
+	}
+	return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f)
+}
+
+func tokenSourceFromFile(ctx context.Context, filename string, scopes []string) (oauth2.TokenSource, error) {
+	b, err := ioutil.ReadFile(filename)
+	if err != nil {
+		return nil, err
+	}
+	var d struct {
+		// Common fields
+		Type     string
+		ClientID string `json:"client_id"`
+
+		// User Credential fields
+		ClientSecret string `json:"client_secret"`
+		RefreshToken string `json:"refresh_token"`
+
+		// Service Account fields
+		ClientEmail  string `json:"client_email"`
+		PrivateKeyID string `json:"private_key_id"`
+		PrivateKey   string `json:"private_key"`
+	}
+	if err := json.Unmarshal(b, &d); err != nil {
+		return nil, err
+	}
+	switch d.Type {
+	case "authorized_user":
+		cfg := &oauth2.Config{
+			ClientID:     d.ClientID,
+			ClientSecret: d.ClientSecret,
+			Scopes:       append([]string{}, scopes...), // copy
+			Endpoint:     Endpoint,
+		}
+		tok := &oauth2.Token{RefreshToken: d.RefreshToken}
+		return cfg.TokenSource(ctx, tok), nil
+	case "service_account":
+		cfg := &jwt.Config{
+			Email:      d.ClientEmail,
+			PrivateKey: []byte(d.PrivateKey),
+			Scopes:     append([]string{}, scopes...), // copy
+			TokenURL:   JWTTokenURL,
+		}
+		return cfg.TokenSource(ctx), nil
+	case "":
+		return nil, errors.New("missing 'type' field in credentials")
+	default:
+		return nil, fmt.Errorf("unknown credential type: %q", d.Type)
+	}
+}
diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go
new file mode 100644
index 0000000..4f73527
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/google.go
@@ -0,0 +1,153 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package google provides support for making OAuth2 authorized and
+// authenticated HTTP requests to Google APIs.
+// It supports the Web server flow, client-side credentials, service accounts,
+// Google Compute Engine service accounts, and Google App Engine service
+// accounts.
+//
+// For more information, please read
+// https://developers.google.com/accounts/docs/OAuth2
+// and
+// https://developers.google.com/accounts/docs/application-default-credentials.
+package google
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"strings"
+	"time"
+
+	"cloud.google.com/go/compute/metadata"
+	"golang.org/x/oauth2"
+	"golang.org/x/oauth2/jwt"
+)
+
+// Endpoint is Google's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+	AuthURL:  "https://accounts.google.com/o/oauth2/auth",
+	TokenURL: "https://accounts.google.com/o/oauth2/token",
+}
+
+// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow.
+const JWTTokenURL = "https://accounts.google.com/o/oauth2/token"
+
+// ConfigFromJSON uses a Google Developers Console client_credentials.json
+// file to construct a config.
+// client_credentials.json can be downloaded from
+// https://console.developers.google.com, under "Credentials". Download the Web
+// application credentials in the JSON format and provide the contents of the
+// file as jsonKey.
+func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) {
+	type cred struct {
+		ClientID     string   `json:"client_id"`
+		ClientSecret string   `json:"client_secret"`
+		RedirectURIs []string `json:"redirect_uris"`
+		AuthURI      string   `json:"auth_uri"`
+		TokenURI     string   `json:"token_uri"`
+	}
+	var j struct {
+		Web       *cred `json:"web"`
+		Installed *cred `json:"installed"`
+	}
+	if err := json.Unmarshal(jsonKey, &j); err != nil {
+		return nil, err
+	}
+	var c *cred
+	switch {
+	case j.Web != nil:
+		c = j.Web
+	case j.Installed != nil:
+		c = j.Installed
+	default:
+		return nil, fmt.Errorf("oauth2/google: no credentials found")
+	}
+	if len(c.RedirectURIs) < 1 {
+		return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json")
+	}
+	return &oauth2.Config{
+		ClientID:     c.ClientID,
+		ClientSecret: c.ClientSecret,
+		RedirectURL:  c.RedirectURIs[0],
+		Scopes:       scope,
+		Endpoint: oauth2.Endpoint{
+			AuthURL:  c.AuthURI,
+			TokenURL: c.TokenURI,
+		},
+	}, nil
+}
+
+// JWTConfigFromJSON uses a Google Developers service account JSON key file to read
+// the credentials that authorize and authenticate the requests.
+// Create a service account on "Credentials" for your project at
+// https://console.developers.google.com to download a JSON key file.
+func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) {
+	var key struct {
+		Email        string `json:"client_email"`
+		PrivateKey   string `json:"private_key"`
+		PrivateKeyID string `json:"private_key_id"`
+		TokenURL     string `json:"token_uri"`
+	}
+	if err := json.Unmarshal(jsonKey, &key); err != nil {
+		return nil, err
+	}
+	config := &jwt.Config{
+		Email:        key.Email,
+		PrivateKey:   []byte(key.PrivateKey),
+		PrivateKeyID: key.PrivateKeyID,
+		Scopes:       scope,
+		TokenURL:     key.TokenURL,
+	}
+	if config.TokenURL == "" {
+		config.TokenURL = JWTTokenURL
+	}
+	return config, nil
+}
+
+// ComputeTokenSource returns a token source that fetches access tokens
+// from Google Compute Engine (GCE)'s metadata server. It's only valid to use
+// this token source if your program is running on a GCE instance.
+// If no account is specified, "default" is used.
+// Further information about retrieving access tokens from the GCE metadata
+// server can be found at https://cloud.google.com/compute/docs/authentication.
+func ComputeTokenSource(account string) oauth2.TokenSource {
+	return oauth2.ReuseTokenSource(nil, computeSource{account: account})
+}
+
+type computeSource struct {
+	account string
+}
+
+func (cs computeSource) Token() (*oauth2.Token, error) {
+	if !metadata.OnGCE() {
+		return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE")
+	}
+	acct := cs.account
+	if acct == "" {
+		acct = "default"
+	}
+	tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token")
+	if err != nil {
+		return nil, err
+	}
+	var res struct {
+		AccessToken  string `json:"access_token"`
+		ExpiresInSec int    `json:"expires_in"`
+		TokenType    string `json:"token_type"`
+	}
+	err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res)
+	if err != nil {
+		return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err)
+	}
+	if res.ExpiresInSec == 0 || res.AccessToken == "" {
+		return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata")
+	}
+	return &oauth2.Token{
+		AccessToken: res.AccessToken,
+		TokenType:   res.TokenType,
+		Expiry:      time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second),
+	}, nil
+}
diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go
new file mode 100644
index 0000000..b0fdb3a
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/jwt.go
@@ -0,0 +1,74 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+	"crypto/rsa"
+	"fmt"
+	"time"
+
+	"golang.org/x/oauth2"
+	"golang.org/x/oauth2/internal"
+	"golang.org/x/oauth2/jws"
+)
+
+// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON
+// key file to read the credentials that authorize and authenticate the
+// requests, and returns a TokenSource that does not use any OAuth2 flow but
+// instead creates a JWT and sends that as the access token.
+// The audience is typically a URL that specifies the scope of the credentials.
+//
+// Note that this is not a standard OAuth flow, but rather an
+// optimization supported by a few Google services.
+// Unless you know otherwise, you should use JWTConfigFromJSON instead.
+func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) {
+	cfg, err := JWTConfigFromJSON(jsonKey)
+	if err != nil {
+		return nil, fmt.Errorf("google: could not parse JSON key: %v", err)
+	}
+	pk, err := internal.ParseKey(cfg.PrivateKey)
+	if err != nil {
+		return nil, fmt.Errorf("google: could not parse key: %v", err)
+	}
+	ts := &jwtAccessTokenSource{
+		email:    cfg.Email,
+		audience: audience,
+		pk:       pk,
+		pkID:     cfg.PrivateKeyID,
+	}
+	tok, err := ts.Token()
+	if err != nil {
+		return nil, err
+	}
+	return oauth2.ReuseTokenSource(tok, ts), nil
+}
+
+type jwtAccessTokenSource struct {
+	email, audience string
+	pk              *rsa.PrivateKey
+	pkID            string
+}
+
+func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) {
+	iat := time.Now()
+	exp := iat.Add(time.Hour)
+	cs := &jws.ClaimSet{
+		Iss: ts.email,
+		Sub: ts.email,
+		Aud: ts.audience,
+		Iat: iat.Unix(),
+		Exp: exp.Unix(),
+	}
+	hdr := &jws.Header{
+		Algorithm: "RS256",
+		Typ:       "JWT",
+		KeyID:     string(ts.pkID),
+	}
+	msg, err := jws.Encode(hdr, cs, ts.pk)
+	if err != nil {
+		return nil, fmt.Errorf("google: could not encode JWT: %v", err)
+	}
+	return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil
+}
diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go
new file mode 100644
index 0000000..d29a3bb
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/sdk.go
@@ -0,0 +1,168 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"net/http"
+	"os"
+	"os/user"
+	"path/filepath"
+	"runtime"
+	"strings"
+	"time"
+
+	"golang.org/x/net/context"
+	"golang.org/x/oauth2"
+	"golang.org/x/oauth2/internal"
+)
+
+type sdkCredentials struct {
+	Data []struct {
+		Credential struct {
+			ClientID     string     `json:"client_id"`
+			ClientSecret string     `json:"client_secret"`
+			AccessToken  string     `json:"access_token"`
+			RefreshToken string     `json:"refresh_token"`
+			TokenExpiry  *time.Time `json:"token_expiry"`
+		} `json:"credential"`
+		Key struct {
+			Account string `json:"account"`
+			Scope   string `json:"scope"`
+		} `json:"key"`
+	}
+}
+
+// An SDKConfig provides access to tokens from an account already
+// authorized via the Google Cloud SDK.
+type SDKConfig struct {
+	conf         oauth2.Config
+	initialToken *oauth2.Token
+}
+
+// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK
+// account. If account is empty, the account currently active in
+// Google Cloud SDK properties is used.
+// Google Cloud SDK credentials must be created by running `gcloud auth`
+// before using this function.
+// The Google Cloud SDK is available at https://cloud.google.com/sdk/.
+func NewSDKConfig(account string) (*SDKConfig, error) {
+	configPath, err := sdkConfigPath()
+	if err != nil {
+		return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err)
+	}
+	credentialsPath := filepath.Join(configPath, "credentials")
+	f, err := os.Open(credentialsPath)
+	if err != nil {
+		return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err)
+	}
+	defer f.Close()
+
+	var c sdkCredentials
+	if err := json.NewDecoder(f).Decode(&c); err != nil {
+		return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err)
+	}
+	if len(c.Data) == 0 {
+		return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath)
+	}
+	if account == "" {
+		propertiesPath := filepath.Join(configPath, "properties")
+		f, err := os.Open(propertiesPath)
+		if err != nil {
+			return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err)
+		}
+		defer f.Close()
+		ini, err := internal.ParseINI(f)
+		if err != nil {
+			return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err)
+		}
+		core, ok := ini["core"]
+		if !ok {
+			return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini)
+		}
+		active, ok := core["account"]
+		if !ok {
+			return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core)
+		}
+		account = active
+	}
+
+	for _, d := range c.Data {
+		if account == "" || d.Key.Account == account {
+			if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" {
+				return nil, fmt.Errorf("oauth2/google: no token available for account %q", account)
+			}
+			var expiry time.Time
+			if d.Credential.TokenExpiry != nil {
+				expiry = *d.Credential.TokenExpiry
+			}
+			return &SDKConfig{
+				conf: oauth2.Config{
+					ClientID:     d.Credential.ClientID,
+					ClientSecret: d.Credential.ClientSecret,
+					Scopes:       strings.Split(d.Key.Scope, " "),
+					Endpoint:     Endpoint,
+					RedirectURL:  "oob",
+				},
+				initialToken: &oauth2.Token{
+					AccessToken:  d.Credential.AccessToken,
+					RefreshToken: d.Credential.RefreshToken,
+					Expiry:       expiry,
+				},
+			}, nil
+		}
+	}
+	return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account)
+}
+
+// Client returns an HTTP client using Google Cloud SDK credentials to
+// authorize requests. The token will auto-refresh as necessary. The
+// underlying http.RoundTripper will be obtained using the provided
+// context. The returned client and its Transport should not be
+// modified.
+func (c *SDKConfig) Client(ctx context.Context) *http.Client {
+	return &http.Client{
+		Transport: &oauth2.Transport{
+			Source: c.TokenSource(ctx),
+		},
+	}
+}
+
+// TokenSource returns an oauth2.TokenSource that retrieve tokens from
+// Google Cloud SDK credentials using the provided context.
+// It will returns the current access token stored in the credentials,
+// and refresh it when it expires, but it won't update the credentials
+// with the new access token.
+func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource {
+	return c.conf.TokenSource(ctx, c.initialToken)
+}
+
+// Scopes are the OAuth 2.0 scopes the current account is authorized for.
+func (c *SDKConfig) Scopes() []string {
+	return c.conf.Scopes
+}
+
+// sdkConfigPath tries to guess where the gcloud config is located.
+// It can be overridden during tests.
+var sdkConfigPath = func() (string, error) {
+	if runtime.GOOS == "windows" {
+		return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil
+	}
+	homeDir := guessUnixHomeDir()
+	if homeDir == "" {
+		return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty")
+	}
+	return filepath.Join(homeDir, ".config", "gcloud"), nil
+}
+
+func guessUnixHomeDir() string {
+	usr, err := user.Current()
+	if err == nil {
+		return usr.HomeDir
+	}
+	return os.Getenv("HOME")
+}
diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go
new file mode 100644
index 0000000..fbe1028
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/oauth2.go
@@ -0,0 +1,76 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package internal contains support packages for oauth2 package.
+package internal
+
+import (
+	"bufio"
+	"crypto/rsa"
+	"crypto/x509"
+	"encoding/pem"
+	"errors"
+	"fmt"
+	"io"
+	"strings"
+)
+
+// ParseKey converts the binary contents of a private key file
+// to an *rsa.PrivateKey. It detects whether the private key is in a
+// PEM container or not. If so, it extracts the the private key
+// from PEM container before conversion. It only supports PEM
+// containers with no passphrase.
+func ParseKey(key []byte) (*rsa.PrivateKey, error) {
+	block, _ := pem.Decode(key)
+	if block != nil {
+		key = block.Bytes
+	}
+	parsedKey, err := x509.ParsePKCS8PrivateKey(key)
+	if err != nil {
+		parsedKey, err = x509.ParsePKCS1PrivateKey(key)
+		if err != nil {
+			return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err)
+		}
+	}
+	parsed, ok := parsedKey.(*rsa.PrivateKey)
+	if !ok {
+		return nil, errors.New("private key is invalid")
+	}
+	return parsed, nil
+}
+
+func ParseINI(ini io.Reader) (map[string]map[string]string, error) {
+	result := map[string]map[string]string{
+		"": map[string]string{}, // root section
+	}
+	scanner := bufio.NewScanner(ini)
+	currentSection := ""
+	for scanner.Scan() {
+		line := strings.TrimSpace(scanner.Text())
+		if strings.HasPrefix(line, ";") {
+			// comment.
+			continue
+		}
+		if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
+			currentSection = strings.TrimSpace(line[1 : len(line)-1])
+			result[currentSection] = map[string]string{}
+			continue
+		}
+		parts := strings.SplitN(line, "=", 2)
+		if len(parts) == 2 && parts[0] != "" {
+			result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
+		}
+	}
+	if err := scanner.Err(); err != nil {
+		return nil, fmt.Errorf("error scanning ini: %v", err)
+	}
+	return result, nil
+}
+
+func CondVal(v string) []string {
+	if v == "" {
+		return nil
+	}
+	return []string{v}
+}
diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go
new file mode 100644
index 0000000..18328a0
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/token.go
@@ -0,0 +1,225 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package internal contains support packages for oauth2 package.
+package internal
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"mime"
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+
+	"golang.org/x/net/context"
+)
+
+// Token represents the crendentials used to authorize
+// the requests to access protected resources on the OAuth 2.0
+// provider's backend.
+//
+// This type is a mirror of oauth2.Token and exists to break
+// an otherwise-circular dependency. Other internal packages
+// should convert this Token into an oauth2.Token before use.
+type Token struct {
+	// AccessToken is the token that authorizes and authenticates
+	// the requests.
+	AccessToken string
+
+	// TokenType is the type of token.
+	// The Type method returns either this or "Bearer", the default.
+	TokenType string
+
+	// RefreshToken is a token that's used by the application
+	// (as opposed to the user) to refresh the access token
+	// if it expires.
+	RefreshToken string
+
+	// Expiry is the optional expiration time of the access token.
+	//
+	// If zero, TokenSource implementations will reuse the same
+	// token forever and RefreshToken or equivalent
+	// mechanisms for that TokenSource will not be used.
+	Expiry time.Time
+
+	// Raw optionally contains extra metadata from the server
+	// when updating a token.
+	Raw interface{}
+}
+
+// tokenJSON is the struct representing the HTTP response from OAuth2
+// providers returning a token in JSON form.
+type tokenJSON struct {
+	AccessToken  string         `json:"access_token"`
+	TokenType    string         `json:"token_type"`
+	RefreshToken string         `json:"refresh_token"`
+	ExpiresIn    expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number
+	Expires      expirationTime `json:"expires"`    // broken Facebook spelling of expires_in
+}
+
+func (e *tokenJSON) expiry() (t time.Time) {
+	if v := e.ExpiresIn; v != 0 {
+		return time.Now().Add(time.Duration(v) * time.Second)
+	}
+	if v := e.Expires; v != 0 {
+		return time.Now().Add(time.Duration(v) * time.Second)
+	}
+	return
+}
+
+type expirationTime int32
+
+func (e *expirationTime) UnmarshalJSON(b []byte) error {
+	var n json.Number
+	err := json.Unmarshal(b, &n)
+	if err != nil {
+		return err
+	}
+	i, err := n.Int64()
+	if err != nil {
+		return err
+	}
+	*e = expirationTime(i)
+	return nil
+}
+
+var brokenAuthHeaderProviders = []string{
+	"https://accounts.google.com/",
+	"https://api.dropbox.com/",
+	"https://api.dropboxapi.com/",
+	"https://api.instagram.com/",
+	"https://api.netatmo.net/",
+	"https://api.odnoklassniki.ru/",
+	"https://api.pushbullet.com/",
+	"https://api.soundcloud.com/",
+	"https://api.twitch.tv/",
+	"https://app.box.com/",
+	"https://connect.stripe.com/",
+	"https://login.microsoftonline.com/",
+	"https://login.salesforce.com/",
+	"https://oauth.sandbox.trainingpeaks.com/",
+	"https://oauth.trainingpeaks.com/",
+	"https://oauth.vk.com/",
+	"https://openapi.baidu.com/",
+	"https://slack.com/",
+	"https://test-sandbox.auth.corp.google.com",
+	"https://test.salesforce.com/",
+	"https://user.gini.net/",
+	"https://www.douban.com/",
+	"https://www.googleapis.com/",
+	"https://www.linkedin.com/",
+	"https://www.strava.com/oauth/",
+	"https://www.wunderlist.com/oauth/",
+	"https://api.patreon.com/",
+}
+
+func RegisterBrokenAuthHeaderProvider(tokenURL string) {
+	brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL)
+}
+
+// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL
+// implements the OAuth2 spec correctly
+// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
+// In summary:
+// - Reddit only accepts client secret in the Authorization header
+// - Dropbox accepts either it in URL param or Auth header, but not both.
+// - Google only accepts URL param (not spec compliant?), not Auth header
+// - Stripe only accepts client secret in Auth header with Bearer method, not Basic
+func providerAuthHeaderWorks(tokenURL string) bool {
+	for _, s := range brokenAuthHeaderProviders {
+		if strings.HasPrefix(tokenURL, s) {
+			// Some sites fail to implement the OAuth2 spec fully.
+			return false
+		}
+	}
+
+	// Assume the provider implements the spec properly
+	// otherwise. We can add more exceptions as they're
+	// discovered. We will _not_ be adding configurable hooks
+	// to this package to let users select server bugs.
+	return true
+}
+
+func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values) (*Token, error) {
+	hc, err := ContextClient(ctx)
+	if err != nil {
+		return nil, err
+	}
+	v.Set("client_id", clientID)
+	bustedAuth := !providerAuthHeaderWorks(tokenURL)
+	if bustedAuth && clientSecret != "" {
+		v.Set("client_secret", clientSecret)
+	}
+	req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode()))
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+	if !bustedAuth {
+		req.SetBasicAuth(clientID, clientSecret)
+	}
+	r, err := hc.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	defer r.Body.Close()
+	body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
+	if err != nil {
+		return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+	}
+	if code := r.StatusCode; code < 200 || code > 299 {
+		return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body)
+	}
+
+	var token *Token
+	content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
+	switch content {
+	case "application/x-www-form-urlencoded", "text/plain":
+		vals, err := url.ParseQuery(string(body))
+		if err != nil {
+			return nil, err
+		}
+		token = &Token{
+			AccessToken:  vals.Get("access_token"),
+			TokenType:    vals.Get("token_type"),
+			RefreshToken: vals.Get("refresh_token"),
+			Raw:          vals,
+		}
+		e := vals.Get("expires_in")
+		if e == "" {
+			// TODO(jbd): Facebook's OAuth2 implementation is broken and
+			// returns expires_in field in expires. Remove the fallback to expires,
+			// when Facebook fixes their implementation.
+			e = vals.Get("expires")
+		}
+		expires, _ := strconv.Atoi(e)
+		if expires != 0 {
+			token.Expiry = time.Now().Add(time.Duration(expires) * time.Second)
+		}
+	default:
+		var tj tokenJSON
+		if err = json.Unmarshal(body, &tj); err != nil {
+			return nil, err
+		}
+		token = &Token{
+			AccessToken:  tj.AccessToken,
+			TokenType:    tj.TokenType,
+			RefreshToken: tj.RefreshToken,
+			Expiry:       tj.expiry(),
+			Raw:          make(map[string]interface{}),
+		}
+		json.Unmarshal(body, &token.Raw) // no error checks for optional fields
+	}
+	// Don't overwrite `RefreshToken` with an empty value
+	// if this was a token refreshing request.
+	if token.RefreshToken == "" {
+		token.RefreshToken = v.Get("refresh_token")
+	}
+	return token, nil
+}
diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go
new file mode 100644
index 0000000..f1f173e
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/transport.go
@@ -0,0 +1,69 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package internal contains support packages for oauth2 package.
+package internal
+
+import (
+	"net/http"
+
+	"golang.org/x/net/context"
+)
+
+// HTTPClient is the context key to use with golang.org/x/net/context's
+// WithValue function to associate an *http.Client value with a context.
+var HTTPClient ContextKey
+
+// ContextKey is just an empty struct. It exists so HTTPClient can be
+// an immutable public variable with a unique type. It's immutable
+// because nobody else can create a ContextKey, being unexported.
+type ContextKey struct{}
+
+// ContextClientFunc is a func which tries to return an *http.Client
+// given a Context value. If it returns an error, the search stops
+// with that error.  If it returns (nil, nil), the search continues
+// down the list of registered funcs.
+type ContextClientFunc func(context.Context) (*http.Client, error)
+
+var contextClientFuncs []ContextClientFunc
+
+func RegisterContextClientFunc(fn ContextClientFunc) {
+	contextClientFuncs = append(contextClientFuncs, fn)
+}
+
+func ContextClient(ctx context.Context) (*http.Client, error) {
+	if ctx != nil {
+		if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {
+			return hc, nil
+		}
+	}
+	for _, fn := range contextClientFuncs {
+		c, err := fn(ctx)
+		if err != nil {
+			return nil, err
+		}
+		if c != nil {
+			return c, nil
+		}
+	}
+	return http.DefaultClient, nil
+}
+
+func ContextTransport(ctx context.Context) http.RoundTripper {
+	hc, err := ContextClient(ctx)
+	// This is a rare error case (somebody using nil on App Engine).
+	if err != nil {
+		return ErrorTransport{err}
+	}
+	return hc.Transport
+}
+
+// ErrorTransport returns the specified error on RoundTrip.
+// This RoundTripper should be used in rare error cases where
+// error handling can be postponed to response handling time.
+type ErrorTransport struct{ Err error }
+
+func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) {
+	return nil, t.Err
+}
diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go
new file mode 100644
index 0000000..8bcecb4
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/jws/jws.go
@@ -0,0 +1,182 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package jws provides a partial implementation
+// of JSON Web Signature encoding and decoding.
+// It exists to support the golang.org/x/oauth2 package.
+//
+// See RFC 7515.
+//
+// Deprecated: this package is not intended for public use and might be
+// removed in the future. It exists for internal use only.
+// Please switch to another JWS package or copy this package into your own
+// source tree.
+package jws
+
+import (
+	"bytes"
+	"crypto"
+	"crypto/rand"
+	"crypto/rsa"
+	"crypto/sha256"
+	"encoding/base64"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"strings"
+	"time"
+)
+
+// ClaimSet contains information about the JWT signature including the
+// permissions being requested (scopes), the target of the token, the issuer,
+// the time the token was issued, and the lifetime of the token.
+type ClaimSet struct {
+	Iss   string `json:"iss"`             // email address of the client_id of the application making the access token request
+	Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests
+	Aud   string `json:"aud"`             // descriptor of the intended target of the assertion (Optional).
+	Exp   int64  `json:"exp"`             // the expiration time of the assertion (seconds since Unix epoch)
+	Iat   int64  `json:"iat"`             // the time the assertion was issued (seconds since Unix epoch)
+	Typ   string `json:"typ,omitempty"`   // token type (Optional).
+
+	// Email for which the application is requesting delegated access (Optional).
+	Sub string `json:"sub,omitempty"`
+
+	// The old name of Sub. Client keeps setting Prn to be
+	// complaint with legacy OAuth 2.0 providers. (Optional)
+	Prn string `json:"prn,omitempty"`
+
+	// See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3
+	// This array is marshalled using custom code (see (c *ClaimSet) encode()).
+	PrivateClaims map[string]interface{} `json:"-"`
+}
+
+func (c *ClaimSet) encode() (string, error) {
+	// Reverting time back for machines whose time is not perfectly in sync.
+	// If client machine's time is in the future according
+	// to Google servers, an access token will not be issued.
+	now := time.Now().Add(-10 * time.Second)
+	if c.Iat == 0 {
+		c.Iat = now.Unix()
+	}
+	if c.Exp == 0 {
+		c.Exp = now.Add(time.Hour).Unix()
+	}
+	if c.Exp < c.Iat {
+		return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat)
+	}
+
+	b, err := json.Marshal(c)
+	if err != nil {
+		return "", err
+	}
+
+	if len(c.PrivateClaims) == 0 {
+		return base64.RawURLEncoding.EncodeToString(b), nil
+	}
+
+	// Marshal private claim set and then append it to b.
+	prv, err := json.Marshal(c.PrivateClaims)
+	if err != nil {
+		return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims)
+	}
+
+	// Concatenate public and private claim JSON objects.
+	if !bytes.HasSuffix(b, []byte{'}'}) {
+		return "", fmt.Errorf("jws: invalid JSON %s", b)
+	}
+	if !bytes.HasPrefix(prv, []byte{'{'}) {
+		return "", fmt.Errorf("jws: invalid JSON %s", prv)
+	}
+	b[len(b)-1] = ','         // Replace closing curly brace with a comma.
+	b = append(b, prv[1:]...) // Append private claims.
+	return base64.RawURLEncoding.EncodeToString(b), nil
+}
+
+// Header represents the header for the signed JWS payloads.
+type Header struct {
+	// The algorithm used for signature.
+	Algorithm string `json:"alg"`
+
+	// Represents the token type.
+	Typ string `json:"typ"`
+
+	// The optional hint of which key is being used.
+	KeyID string `json:"kid,omitempty"`
+}
+
+func (h *Header) encode() (string, error) {
+	b, err := json.Marshal(h)
+	if err != nil {
+		return "", err
+	}
+	return base64.RawURLEncoding.EncodeToString(b), nil
+}
+
+// Decode decodes a claim set from a JWS payload.
+func Decode(payload string) (*ClaimSet, error) {
+	// decode returned id token to get expiry
+	s := strings.Split(payload, ".")
+	if len(s) < 2 {
+		// TODO(jbd): Provide more context about the error.
+		return nil, errors.New("jws: invalid token received")
+	}
+	decoded, err := base64.RawURLEncoding.DecodeString(s[1])
+	if err != nil {
+		return nil, err
+	}
+	c := &ClaimSet{}
+	err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c)
+	return c, err
+}
+
+// Signer returns a signature for the given data.
+type Signer func(data []byte) (sig []byte, err error)
+
+// EncodeWithSigner encodes a header and claim set with the provided signer.
+func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) {
+	head, err := header.encode()
+	if err != nil {
+		return "", err
+	}
+	cs, err := c.encode()
+	if err != nil {
+		return "", err
+	}
+	ss := fmt.Sprintf("%s.%s", head, cs)
+	sig, err := sg([]byte(ss))
+	if err != nil {
+		return "", err
+	}
+	return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil
+}
+
+// Encode encodes a signed JWS with provided header and claim set.
+// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key.
+func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) {
+	sg := func(data []byte) (sig []byte, err error) {
+		h := sha256.New()
+		h.Write(data)
+		return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil))
+	}
+	return EncodeWithSigner(header, c, sg)
+}
+
+// Verify tests whether the provided JWT token's signature was produced by the private key
+// associated with the supplied public key.
+func Verify(token string, key *rsa.PublicKey) error {
+	parts := strings.Split(token, ".")
+	if len(parts) != 3 {
+		return errors.New("jws: invalid token received, token must have 3 parts")
+	}
+
+	signedContent := parts[0] + "." + parts[1]
+	signatureString, err := base64.RawURLEncoding.DecodeString(parts[2])
+	if err != nil {
+		return err
+	}
+
+	h := sha256.New()
+	h.Write([]byte(signedContent))
+	return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString))
+}
diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go
new file mode 100644
index 0000000..f4b9523
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/jwt/jwt.go
@@ -0,0 +1,157 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly
+// known as "two-legged OAuth 2.0".
+//
+// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12
+package jwt
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"strings"
+	"time"
+
+	"golang.org/x/net/context"
+	"golang.org/x/oauth2"
+	"golang.org/x/oauth2/internal"
+	"golang.org/x/oauth2/jws"
+)
+
+var (
+	defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer"
+	defaultHeader    = &jws.Header{Algorithm: "RS256", Typ: "JWT"}
+)
+
+// Config is the configuration for using JWT to fetch tokens,
+// commonly known as "two-legged OAuth 2.0".
+type Config struct {
+	// Email is the OAuth client identifier used when communicating with
+	// the configured OAuth provider.
+	Email string
+
+	// PrivateKey contains the contents of an RSA private key or the
+	// contents of a PEM file that contains a private key. The provided
+	// private key is used to sign JWT payloads.
+	// PEM containers with a passphrase are not supported.
+	// Use the following command to convert a PKCS 12 file into a PEM.
+	//
+	//    $ openssl pkcs12 -in key.p12 -out key.pem -nodes
+	//
+	PrivateKey []byte
+
+	// PrivateKeyID contains an optional hint indicating which key is being
+	// used.
+	PrivateKeyID string
+
+	// Subject is the optional user to impersonate.
+	Subject string
+
+	// Scopes optionally specifies a list of requested permission scopes.
+	Scopes []string
+
+	// TokenURL is the endpoint required to complete the 2-legged JWT flow.
+	TokenURL string
+
+	// Expires optionally specifies how long the token is valid for.
+	Expires time.Duration
+}
+
+// TokenSource returns a JWT TokenSource using the configuration
+// in c and the HTTP client from the provided context.
+func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource {
+	return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c})
+}
+
+// Client returns an HTTP client wrapping the context's
+// HTTP transport and adding Authorization headers with tokens
+// obtained from c.
+//
+// The returned client and its Transport should not be modified.
+func (c *Config) Client(ctx context.Context) *http.Client {
+	return oauth2.NewClient(ctx, c.TokenSource(ctx))
+}
+
+// jwtSource is a source that always does a signed JWT request for a token.
+// It should typically be wrapped with a reuseTokenSource.
+type jwtSource struct {
+	ctx  context.Context
+	conf *Config
+}
+
+func (js jwtSource) Token() (*oauth2.Token, error) {
+	pk, err := internal.ParseKey(js.conf.PrivateKey)
+	if err != nil {
+		return nil, err
+	}
+	hc := oauth2.NewClient(js.ctx, nil)
+	claimSet := &jws.ClaimSet{
+		Iss:   js.conf.Email,
+		Scope: strings.Join(js.conf.Scopes, " "),
+		Aud:   js.conf.TokenURL,
+	}
+	if subject := js.conf.Subject; subject != "" {
+		claimSet.Sub = subject
+		// prn is the old name of sub. Keep setting it
+		// to be compatible with legacy OAuth 2.0 providers.
+		claimSet.Prn = subject
+	}
+	if t := js.conf.Expires; t > 0 {
+		claimSet.Exp = time.Now().Add(t).Unix()
+	}
+	payload, err := jws.Encode(defaultHeader, claimSet, pk)
+	if err != nil {
+		return nil, err
+	}
+	v := url.Values{}
+	v.Set("grant_type", defaultGrantType)
+	v.Set("assertion", payload)
+	resp, err := hc.PostForm(js.conf.TokenURL, v)
+	if err != nil {
+		return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+	}
+	defer resp.Body.Close()
+	body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20))
+	if err != nil {
+		return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+	}
+	if c := resp.StatusCode; c < 200 || c > 299 {
+		return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body)
+	}
+	// tokenRes is the JSON response body.
+	var tokenRes struct {
+		AccessToken string `json:"access_token"`
+		TokenType   string `json:"token_type"`
+		IDToken     string `json:"id_token"`
+		ExpiresIn   int64  `json:"expires_in"` // relative seconds from now
+	}
+	if err := json.Unmarshal(body, &tokenRes); err != nil {
+		return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+	}
+	token := &oauth2.Token{
+		AccessToken: tokenRes.AccessToken,
+		TokenType:   tokenRes.TokenType,
+	}
+	raw := make(map[string]interface{})
+	json.Unmarshal(body, &raw) // no error checks for optional fields
+	token = token.WithExtra(raw)
+
+	if secs := tokenRes.ExpiresIn; secs > 0 {
+		token.Expiry = time.Now().Add(time.Duration(secs) * time.Second)
+	}
+	if v := tokenRes.IDToken; v != "" {
+		// decode returned id token to get expiry
+		claimSet, err := jws.Decode(v)
+		if err != nil {
+			return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err)
+		}
+		token.Expiry = time.Unix(claimSet.Exp, 0)
+	}
+	return token, nil
+}
diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go
new file mode 100644
index 0000000..798edc8
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/oauth2.go
@@ -0,0 +1,341 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package oauth2 provides support for making
+// OAuth2 authorized and authenticated HTTP requests.
+// It can additionally grant authorization with Bearer JWT.
+package oauth2
+
+import (
+	"bytes"
+	"errors"
+	"net/http"
+	"net/url"
+	"strings"
+	"sync"
+
+	"golang.org/x/net/context"
+	"golang.org/x/oauth2/internal"
+)
+
+// NoContext is the default context you should supply if not using
+// your own context.Context (see https://golang.org/x/net/context).
+//
+// Deprecated: Use context.Background() or context.TODO() instead.
+var NoContext = context.TODO()
+
+// RegisterBrokenAuthHeaderProvider registers an OAuth2 server
+// identified by the tokenURL prefix as an OAuth2 implementation
+// which doesn't support the HTTP Basic authentication
+// scheme to authenticate with the authorization server.
+// Once a server is registered, credentials (client_id and client_secret)
+// will be passed as query parameters rather than being present
+// in the Authorization header.
+// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
+func RegisterBrokenAuthHeaderProvider(tokenURL string) {
+	internal.RegisterBrokenAuthHeaderProvider(tokenURL)
+}
+
+// Config describes a typical 3-legged OAuth2 flow, with both the
+// client application information and the server's endpoint URLs.
+// For the client credentials 2-legged OAuth2 flow, see the clientcredentials
+// package (https://golang.org/x/oauth2/clientcredentials).
+type Config struct {
+	// ClientID is the application's ID.
+	ClientID string
+
+	// ClientSecret is the application's secret.
+	ClientSecret string
+
+	// Endpoint contains the resource server's token endpoint
+	// URLs. These are constants specific to each server and are
+	// often available via site-specific packages, such as
+	// google.Endpoint or github.Endpoint.
+	Endpoint Endpoint
+
+	// RedirectURL is the URL to redirect users going through
+	// the OAuth flow, after the resource owner's URLs.
+	RedirectURL string
+
+	// Scope specifies optional requested permissions.
+	Scopes []string
+}
+
+// A TokenSource is anything that can return a token.
+type TokenSource interface {
+	// Token returns a token or an error.
+	// Token must be safe for concurrent use by multiple goroutines.
+	// The returned Token must not be modified.
+	Token() (*Token, error)
+}
+
+// Endpoint contains the OAuth 2.0 provider's authorization and token
+// endpoint URLs.
+type Endpoint struct {
+	AuthURL  string
+	TokenURL string
+}
+
+var (
+	// AccessTypeOnline and AccessTypeOffline are options passed
+	// to the Options.AuthCodeURL method. They modify the
+	// "access_type" field that gets sent in the URL returned by
+	// AuthCodeURL.
+	//
+	// Online is the default if neither is specified. If your
+	// application needs to refresh access tokens when the user
+	// is not present at the browser, then use offline. This will
+	// result in your application obtaining a refresh token the
+	// first time your application exchanges an authorization
+	// code for a user.
+	AccessTypeOnline  AuthCodeOption = SetAuthURLParam("access_type", "online")
+	AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline")
+
+	// ApprovalForce forces the users to view the consent dialog
+	// and confirm the permissions request at the URL returned
+	// from AuthCodeURL, even if they've already done so.
+	ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force")
+)
+
+// An AuthCodeOption is passed to Config.AuthCodeURL.
+type AuthCodeOption interface {
+	setValue(url.Values)
+}
+
+type setParam struct{ k, v string }
+
+func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) }
+
+// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters
+// to a provider's authorization endpoint.
+func SetAuthURLParam(key, value string) AuthCodeOption {
+	return setParam{key, value}
+}
+
+// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page
+// that asks for permissions for the required scopes explicitly.
+//
+// State is a token to protect the user from CSRF attacks. You must
+// always provide a non-zero string and validate that it matches the
+// the state query parameter on your redirect callback.
+// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
+//
+// Opts may include AccessTypeOnline or AccessTypeOffline, as well
+// as ApprovalForce.
+func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
+	var buf bytes.Buffer
+	buf.WriteString(c.Endpoint.AuthURL)
+	v := url.Values{
+		"response_type": {"code"},
+		"client_id":     {c.ClientID},
+		"redirect_uri":  internal.CondVal(c.RedirectURL),
+		"scope":         internal.CondVal(strings.Join(c.Scopes, " ")),
+		"state":         internal.CondVal(state),
+	}
+	for _, opt := range opts {
+		opt.setValue(v)
+	}
+	if strings.Contains(c.Endpoint.AuthURL, "?") {
+		buf.WriteByte('&')
+	} else {
+		buf.WriteByte('?')
+	}
+	buf.WriteString(v.Encode())
+	return buf.String()
+}
+
+// PasswordCredentialsToken converts a resource owner username and password
+// pair into a token.
+//
+// Per the RFC, this grant type should only be used "when there is a high
+// degree of trust between the resource owner and the client (e.g., the client
+// is part of the device operating system or a highly privileged application),
+// and when other authorization grant types are not available."
+// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info.
+//
+// The HTTP client to use is derived from the context.
+// If nil, http.DefaultClient is used.
+func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) {
+	return retrieveToken(ctx, c, url.Values{
+		"grant_type": {"password"},
+		"username":   {username},
+		"password":   {password},
+		"scope":      internal.CondVal(strings.Join(c.Scopes, " ")),
+	})
+}
+
+// Exchange converts an authorization code into a token.
+//
+// It is used after a resource provider redirects the user back
+// to the Redirect URI (the URL obtained from AuthCodeURL).
+//
+// The HTTP client to use is derived from the context.
+// If a client is not provided via the context, http.DefaultClient is used.
+//
+// The code will be in the *http.Request.FormValue("code"). Before
+// calling Exchange, be sure to validate FormValue("state").
+func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) {
+	return retrieveToken(ctx, c, url.Values{
+		"grant_type":   {"authorization_code"},
+		"code":         {code},
+		"redirect_uri": internal.CondVal(c.RedirectURL),
+		"scope":        internal.CondVal(strings.Join(c.Scopes, " ")),
+	})
+}
+
+// Client returns an HTTP client using the provided token.
+// The token will auto-refresh as necessary. The underlying
+// HTTP transport will be obtained using the provided context.
+// The returned client and its Transport should not be modified.
+func (c *Config) Client(ctx context.Context, t *Token) *http.Client {
+	return NewClient(ctx, c.TokenSource(ctx, t))
+}
+
+// TokenSource returns a TokenSource that returns t until t expires,
+// automatically refreshing it as necessary using the provided context.
+//
+// Most users will use Config.Client instead.
+func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource {
+	tkr := &tokenRefresher{
+		ctx:  ctx,
+		conf: c,
+	}
+	if t != nil {
+		tkr.refreshToken = t.RefreshToken
+	}
+	return &reuseTokenSource{
+		t:   t,
+		new: tkr,
+	}
+}
+
+// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token"
+// HTTP requests to renew a token using a RefreshToken.
+type tokenRefresher struct {
+	ctx          context.Context // used to get HTTP requests
+	conf         *Config
+	refreshToken string
+}
+
+// WARNING: Token is not safe for concurrent access, as it
+// updates the tokenRefresher's refreshToken field.
+// Within this package, it is used by reuseTokenSource which
+// synchronizes calls to this method with its own mutex.
+func (tf *tokenRefresher) Token() (*Token, error) {
+	if tf.refreshToken == "" {
+		return nil, errors.New("oauth2: token expired and refresh token is not set")
+	}
+
+	tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{
+		"grant_type":    {"refresh_token"},
+		"refresh_token": {tf.refreshToken},
+	})
+
+	if err != nil {
+		return nil, err
+	}
+	if tf.refreshToken != tk.RefreshToken {
+		tf.refreshToken = tk.RefreshToken
+	}
+	return tk, err
+}
+
+// reuseTokenSource is a TokenSource that holds a single token in memory
+// and validates its expiry before each call to retrieve it with
+// Token. If it's expired, it will be auto-refreshed using the
+// new TokenSource.
+type reuseTokenSource struct {
+	new TokenSource // called when t is expired.
+
+	mu sync.Mutex // guards t
+	t  *Token
+}
+
+// Token returns the current token if it's still valid, else will
+// refresh the current token (using r.Context for HTTP client
+// information) and return the new one.
+func (s *reuseTokenSource) Token() (*Token, error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if s.t.Valid() {
+		return s.t, nil
+	}
+	t, err := s.new.Token()
+	if err != nil {
+		return nil, err
+	}
+	s.t = t
+	return t, nil
+}
+
+// StaticTokenSource returns a TokenSource that always returns the same token.
+// Because the provided token t is never refreshed, StaticTokenSource is only
+// useful for tokens that never expire.
+func StaticTokenSource(t *Token) TokenSource {
+	return staticTokenSource{t}
+}
+
+// staticTokenSource is a TokenSource that always returns the same Token.
+type staticTokenSource struct {
+	t *Token
+}
+
+func (s staticTokenSource) Token() (*Token, error) {
+	return s.t, nil
+}
+
+// HTTPClient is the context key to use with golang.org/x/net/context's
+// WithValue function to associate an *http.Client value with a context.
+var HTTPClient internal.ContextKey
+
+// NewClient creates an *http.Client from a Context and TokenSource.
+// The returned client is not valid beyond the lifetime of the context.
+//
+// As a special case, if src is nil, a non-OAuth2 client is returned
+// using the provided context. This exists to support related OAuth2
+// packages.
+func NewClient(ctx context.Context, src TokenSource) *http.Client {
+	if src == nil {
+		c, err := internal.ContextClient(ctx)
+		if err != nil {
+			return &http.Client{Transport: internal.ErrorTransport{Err: err}}
+		}
+		return c
+	}
+	return &http.Client{
+		Transport: &Transport{
+			Base:   internal.ContextTransport(ctx),
+			Source: ReuseTokenSource(nil, src),
+		},
+	}
+}
+
+// ReuseTokenSource returns a TokenSource which repeatedly returns the
+// same token as long as it's valid, starting with t.
+// When its cached token is invalid, a new token is obtained from src.
+//
+// ReuseTokenSource is typically used to reuse tokens from a cache
+// (such as a file on disk) between runs of a program, rather than
+// obtaining new tokens unnecessarily.
+//
+// The initial token t may be nil, in which case the TokenSource is
+// wrapped in a caching version if it isn't one already. This also
+// means it's always safe to wrap ReuseTokenSource around any other
+// TokenSource without adverse effects.
+func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
+	// Don't wrap a reuseTokenSource in itself. That would work,
+	// but cause an unnecessary number of mutex operations.
+	// Just build the equivalent one.
+	if rt, ok := src.(*reuseTokenSource); ok {
+		if t == nil {
+			// Just use it directly.
+			return rt
+		}
+		src = rt.new
+	}
+	return &reuseTokenSource{
+		t:   t,
+		new: src,
+	}
+}
diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go
new file mode 100644
index 0000000..7a3167f
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/token.go
@@ -0,0 +1,158 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+
+	"golang.org/x/net/context"
+	"golang.org/x/oauth2/internal"
+)
+
+// expiryDelta determines how earlier a token should be considered
+// expired than its actual expiration time. It is used to avoid late
+// expirations due to client-server time mismatches.
+const expiryDelta = 10 * time.Second
+
+// Token represents the crendentials used to authorize
+// the requests to access protected resources on the OAuth 2.0
+// provider's backend.
+//
+// Most users of this package should not access fields of Token
+// directly. They're exported mostly for use by related packages
+// implementing derivative OAuth2 flows.
+type Token struct {
+	// AccessToken is the token that authorizes and authenticates
+	// the requests.
+	AccessToken string `json:"access_token"`
+
+	// TokenType is the type of token.
+	// The Type method returns either this or "Bearer", the default.
+	TokenType string `json:"token_type,omitempty"`
+
+	// RefreshToken is a token that's used by the application
+	// (as opposed to the user) to refresh the access token
+	// if it expires.
+	RefreshToken string `json:"refresh_token,omitempty"`
+
+	// Expiry is the optional expiration time of the access token.
+	//
+	// If zero, TokenSource implementations will reuse the same
+	// token forever and RefreshToken or equivalent
+	// mechanisms for that TokenSource will not be used.
+	Expiry time.Time `json:"expiry,omitempty"`
+
+	// raw optionally contains extra metadata from the server
+	// when updating a token.
+	raw interface{}
+}
+
+// Type returns t.TokenType if non-empty, else "Bearer".
+func (t *Token) Type() string {
+	if strings.EqualFold(t.TokenType, "bearer") {
+		return "Bearer"
+	}
+	if strings.EqualFold(t.TokenType, "mac") {
+		return "MAC"
+	}
+	if strings.EqualFold(t.TokenType, "basic") {
+		return "Basic"
+	}
+	if t.TokenType != "" {
+		return t.TokenType
+	}
+	return "Bearer"
+}
+
+// SetAuthHeader sets the Authorization header to r using the access
+// token in t.
+//
+// This method is unnecessary when using Transport or an HTTP Client
+// returned by this package.
+func (t *Token) SetAuthHeader(r *http.Request) {
+	r.Header.Set("Authorization", t.Type()+" "+t.AccessToken)
+}
+
+// WithExtra returns a new Token that's a clone of t, but using the
+// provided raw extra map. This is only intended for use by packages
+// implementing derivative OAuth2 flows.
+func (t *Token) WithExtra(extra interface{}) *Token {
+	t2 := new(Token)
+	*t2 = *t
+	t2.raw = extra
+	return t2
+}
+
+// Extra returns an extra field.
+// Extra fields are key-value pairs returned by the server as a
+// part of the token retrieval response.
+func (t *Token) Extra(key string) interface{} {
+	if raw, ok := t.raw.(map[string]interface{}); ok {
+		return raw[key]
+	}
+
+	vals, ok := t.raw.(url.Values)
+	if !ok {
+		return nil
+	}
+
+	v := vals.Get(key)
+	switch s := strings.TrimSpace(v); strings.Count(s, ".") {
+	case 0: // Contains no "."; try to parse as int
+		if i, err := strconv.ParseInt(s, 10, 64); err == nil {
+			return i
+		}
+	case 1: // Contains a single "."; try to parse as float
+		if f, err := strconv.ParseFloat(s, 64); err == nil {
+			return f
+		}
+	}
+
+	return v
+}
+
+// expired reports whether the token is expired.
+// t must be non-nil.
+func (t *Token) expired() bool {
+	if t.Expiry.IsZero() {
+		return false
+	}
+	return t.Expiry.Add(-expiryDelta).Before(time.Now())
+}
+
+// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
+func (t *Token) Valid() bool {
+	return t != nil && t.AccessToken != "" && !t.expired()
+}
+
+// tokenFromInternal maps an *internal.Token struct into
+// a *Token struct.
+func tokenFromInternal(t *internal.Token) *Token {
+	if t == nil {
+		return nil
+	}
+	return &Token{
+		AccessToken:  t.AccessToken,
+		TokenType:    t.TokenType,
+		RefreshToken: t.RefreshToken,
+		Expiry:       t.Expiry,
+		raw:          t.Raw,
+	}
+}
+
+// retrieveToken takes a *Config and uses that to retrieve an *internal.Token.
+// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along
+// with an error..
+func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) {
+	tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v)
+	if err != nil {
+		return nil, err
+	}
+	return tokenFromInternal(tk), nil
+}
diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go
new file mode 100644
index 0000000..92ac7e2
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/transport.go
@@ -0,0 +1,132 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+	"errors"
+	"io"
+	"net/http"
+	"sync"
+)
+
+// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests,
+// wrapping a base RoundTripper and adding an Authorization header
+// with a token from the supplied Sources.
+//
+// Transport is a low-level mechanism. Most code will use the
+// higher-level Config.Client method instead.
+type Transport struct {
+	// Source supplies the token to add to outgoing requests'
+	// Authorization headers.
+	Source TokenSource
+
+	// Base is the base RoundTripper used to make HTTP requests.
+	// If nil, http.DefaultTransport is used.
+	Base http.RoundTripper
+
+	mu     sync.Mutex                      // guards modReq
+	modReq map[*http.Request]*http.Request // original -> modified
+}
+
+// RoundTrip authorizes and authenticates the request with an
+// access token. If no token exists or token is expired,
+// tries to refresh/fetch a new token.
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+	if t.Source == nil {
+		return nil, errors.New("oauth2: Transport's Source is nil")
+	}
+	token, err := t.Source.Token()
+	if err != nil {
+		return nil, err
+	}
+
+	req2 := cloneRequest(req) // per RoundTripper contract
+	token.SetAuthHeader(req2)
+	t.setModReq(req, req2)
+	res, err := t.base().RoundTrip(req2)
+	if err != nil {
+		t.setModReq(req, nil)
+		return nil, err
+	}
+	res.Body = &onEOFReader{
+		rc: res.Body,
+		fn: func() { t.setModReq(req, nil) },
+	}
+	return res, nil
+}
+
+// CancelRequest cancels an in-flight request by closing its connection.
+func (t *Transport) CancelRequest(req *http.Request) {
+	type canceler interface {
+		CancelRequest(*http.Request)
+	}
+	if cr, ok := t.base().(canceler); ok {
+		t.mu.Lock()
+		modReq := t.modReq[req]
+		delete(t.modReq, req)
+		t.mu.Unlock()
+		cr.CancelRequest(modReq)
+	}
+}
+
+func (t *Transport) base() http.RoundTripper {
+	if t.Base != nil {
+		return t.Base
+	}
+	return http.DefaultTransport
+}
+
+func (t *Transport) setModReq(orig, mod *http.Request) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	if t.modReq == nil {
+		t.modReq = make(map[*http.Request]*http.Request)
+	}
+	if mod == nil {
+		delete(t.modReq, orig)
+	} else {
+		t.modReq[orig] = mod
+	}
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func cloneRequest(r *http.Request) *http.Request {
+	// shallow copy of the struct
+	r2 := new(http.Request)
+	*r2 = *r
+	// deep copy of the Header
+	r2.Header = make(http.Header, len(r.Header))
+	for k, s := range r.Header {
+		r2.Header[k] = append([]string(nil), s...)
+	}
+	return r2
+}
+
+type onEOFReader struct {
+	rc io.ReadCloser
+	fn func()
+}
+
+func (r *onEOFReader) Read(p []byte) (n int, err error) {
+	n, err = r.rc.Read(p)
+	if err == io.EOF {
+		r.runFunc()
+	}
+	return
+}
+
+func (r *onEOFReader) Close() error {
+	err := r.rc.Close()
+	r.runFunc()
+	return err
+}
+
+func (r *onEOFReader) runFunc() {
+	if fn := r.fn; fn != nil {
+		fn()
+		r.fn = nil
+	}
+}
diff --git a/vendor/google.golang.org/api/LICENSE b/vendor/google.golang.org/api/LICENSE
new file mode 100644
index 0000000..263aa7a
--- /dev/null
+++ b/vendor/google.golang.org/api/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2011 Google Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/google.golang.org/api/internal/pool.go b/vendor/google.golang.org/api/internal/pool.go
new file mode 100644
index 0000000..4150feb
--- /dev/null
+++ b/vendor/google.golang.org/api/internal/pool.go
@@ -0,0 +1,59 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import (
+	"errors"
+	"google.golang.org/grpc/naming"
+)
+
+// PoolResolver provides a fixed list of addresses to load balance between
+// and does not provide further updates.
+type PoolResolver struct {
+	poolSize int
+	dialOpt  *DialSettings
+	ch       chan []*naming.Update
+}
+
+// NewPoolResolver returns a PoolResolver
+// This is an EXPERIMENTAL API and may be changed or removed in the future.
+func NewPoolResolver(size int, o *DialSettings) *PoolResolver {
+	return &PoolResolver{poolSize: size, dialOpt: o}
+}
+
+// Resolve returns a Watcher for the endpoint defined by the DialSettings
+// provided to NewPoolResolver.
+func (r *PoolResolver) Resolve(target string) (naming.Watcher, error) {
+	if r.dialOpt.Endpoint == "" {
+		return nil, errors.New("No endpoint configured")
+	}
+	addrs := make([]*naming.Update, 0, r.poolSize)
+	for i := 0; i < r.poolSize; i++ {
+		addrs = append(addrs, &naming.Update{Op: naming.Add, Addr: r.dialOpt.Endpoint, Metadata: i})
+	}
+	r.ch = make(chan []*naming.Update, 1)
+	r.ch <- addrs
+	return r, nil
+}
+
+// Next returns a static list of updates on the first call,
+// and blocks indefinitely until Close is called on subsequent calls.
+func (r *PoolResolver) Next() ([]*naming.Update, error) {
+	return <-r.ch, nil
+}
+
+func (r *PoolResolver) Close() {
+	close(r.ch)
+}
diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go
new file mode 100644
index 0000000..976280b
--- /dev/null
+++ b/vendor/google.golang.org/api/internal/settings.go
@@ -0,0 +1,22 @@
+// Package internal supports the options and transport packages.
+package internal
+
+import (
+	"net/http"
+
+	"golang.org/x/oauth2"
+	"google.golang.org/grpc"
+)
+
+// DialSettings holds information needed to establish a connection with a
+// Google API service.
+type DialSettings struct {
+	Endpoint                   string
+	Scopes                     []string
+	ServiceAccountJSONFilename string // if set, TokenSource is ignored.
+	TokenSource                oauth2.TokenSource
+	UserAgent                  string
+	HTTPClient                 *http.Client
+	GRPCDialOpts               []grpc.DialOption
+	GRPCConn                   *grpc.ClientConn
+}
diff --git a/vendor/google.golang.org/api/iterator/iterator.go b/vendor/google.golang.org/api/iterator/iterator.go
new file mode 100644
index 0000000..0640c82
--- /dev/null
+++ b/vendor/google.golang.org/api/iterator/iterator.go
@@ -0,0 +1,231 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package iterator provides support for standard Google API iterators.
+// See https://github.com/GoogleCloudPlatform/gcloud-golang/wiki/Iterator-Guidelines.
+package iterator
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+)
+
+// Done is returned by an iterator's Next method when the iteration is
+// complete; when there are no more items to return.
+var Done = errors.New("no more items in iterator")
+
+// We don't support mixed calls to Next and NextPage because they play
+// with the paging state in incompatible ways.
+var errMixed = errors.New("iterator: Next and NextPage called on same iterator")
+
+// PageInfo contains information about an iterator's paging state.
+type PageInfo struct {
+	// Token is the token used to retrieve the next page of items from the
+	// API. You may set Token immediately after creating an iterator to
+	// begin iteration at a particular point. If Token is the empty string,
+	// the iterator will begin with the first eligible item.
+	//
+	// The result of setting Token after the first call to Next is undefined.
+	//
+	// After the underlying API method is called to retrieve a page of items,
+	// Token is set to the next-page token in the response.
+	Token string
+
+	// MaxSize is the maximum number of items returned by a call to the API.
+	// Set MaxSize as a hint to optimize the buffering behavior of the iterator.
+	// If zero, the page size is determined by the underlying service.
+	//
+	// Use Pager to retrieve a page of a specific, exact size.
+	MaxSize int
+
+	// The error state of the iterator. Manipulated by PageInfo.next and Pager.
+	// This is a latch: it starts as nil, and once set should never change.
+	err error
+
+	// If true, no more calls to fetch should be made. Set to true when fetch
+	// returns an empty page token. The iterator is Done when this is true AND
+	// the buffer is empty.
+	atEnd bool
+
+	// Function that fetches a page from the underlying service. It should pass
+	// the pageSize and pageToken arguments to the service, fill the buffer
+	// with the results from the call, and return the next-page token returned
+	// by the service. The function must not remove any existing items from the
+	// buffer. If the underlying RPC takes an int32 page size, pageSize should
+	// be silently truncated.
+	fetch func(pageSize int, pageToken string) (nextPageToken string, err error)
+
+	// Function that clears the iterator's buffer, returning any currently buffered items.
+	bufLen func() int
+
+	// Function that returns the buffer, after setting the buffer variable to nil.
+	takeBuf func() interface{}
+
+	// Set to true on first call to PageInfo.next or Pager.NextPage. Used to check
+	// for calls to both Next and NextPage with the same iterator.
+	nextCalled, nextPageCalled bool
+}
+
+// NewPageInfo exposes internals for iterator implementations.
+// It is not a stable interface.
+var NewPageInfo = newPageInfo
+
+// If an iterator can support paging, its iterator-creating method should call
+// this (via the NewPageInfo variable above).
+//
+// The fetch, bufLen and takeBuf arguments provide access to the
+// iterator's internal slice of buffered items. They behave as described in
+// PageInfo, above.
+//
+// The return value is the PageInfo.next method bound to the returned PageInfo value.
+// (Returning it avoids exporting PageInfo.next.)
+func newPageInfo(fetch func(int, string) (string, error), bufLen func() int, takeBuf func() interface{}) (*PageInfo, func() error) {
+	pi := &PageInfo{
+		fetch:   fetch,
+		bufLen:  bufLen,
+		takeBuf: takeBuf,
+	}
+	return pi, pi.next
+}
+
+// Remaining returns the number of items available before the iterator makes another API call.
+func (pi *PageInfo) Remaining() int { return pi.bufLen() }
+
+// next provides support for an iterator's Next function. An iterator's Next
+// should return the error returned by next if non-nil; else it can assume
+// there is at least one item in its buffer, and it should return that item and
+// remove it from the buffer.
+func (pi *PageInfo) next() error {
+	pi.nextCalled = true
+	if pi.err != nil { // Once we get an error, always return it.
+		// TODO(jba): fix so users can retry on transient errors? Probably not worth it.
+		return pi.err
+	}
+	if pi.nextPageCalled {
+		pi.err = errMixed
+		return pi.err
+	}
+	// Loop until we get some items or reach the end.
+	for pi.bufLen() == 0 && !pi.atEnd {
+		if err := pi.fill(pi.MaxSize); err != nil {
+			pi.err = err
+			return pi.err
+		}
+		if pi.Token == "" {
+			pi.atEnd = true
+		}
+	}
+	// Either the buffer is non-empty or pi.atEnd is true (or both).
+	if pi.bufLen() == 0 {
+		// The buffer is empty and pi.atEnd is true, i.e. the service has no
+		// more items.
+		pi.err = Done
+	}
+	return pi.err
+}
+
+// Call the service to fill the buffer, using size and pi.Token. Set pi.Token to the
+// next-page token returned by the call.
+// If fill returns a non-nil error, the buffer will be empty.
+func (pi *PageInfo) fill(size int) error {
+	tok, err := pi.fetch(size, pi.Token)
+	if err != nil {
+		pi.takeBuf() // clear the buffer
+		return err
+	}
+	pi.Token = tok
+	return nil
+}
+
+// Pageable is implemented by iterators that support paging.
+type Pageable interface {
+	// PageInfo returns paging information associated with the iterator.
+	PageInfo() *PageInfo
+}
+
+// Pager supports retrieving iterator items a page at a time.
+type Pager struct {
+	pageInfo *PageInfo
+	pageSize int
+}
+
+// NewPager returns a pager that uses iter. Calls to its NextPage method will
+// obtain exactly pageSize items, unless fewer remain. The pageToken argument
+// indicates where to start the iteration. Pass the empty string to start at
+// the beginning, or pass a token retrieved from a call to Pager.NextPage.
+//
+// If you use an iterator with a Pager, you must not call Next on the iterator.
+func NewPager(iter Pageable, pageSize int, pageToken string) *Pager {
+	p := &Pager{
+		pageInfo: iter.PageInfo(),
+		pageSize: pageSize,
+	}
+	p.pageInfo.Token = pageToken
+	if pageSize <= 0 {
+		p.pageInfo.err = errors.New("iterator: page size must be positive")
+	}
+	return p
+}
+
+// NextPage retrieves a sequence of items from the iterator and appends them
+// to slicep, which must be a pointer to a slice of the iterator's item type.
+// Exactly p.pageSize items will be appended, unless fewer remain.
+//
+// The first return value is the page token to use for the next page of items.
+// If empty, there are no more pages. Aside from checking for the end of the
+// iteration, the returned page token is only needed if the iteration is to be
+// resumed a later time, in another context (possibly another process).
+//
+// The second return value is non-nil if an error occurred. It will never be
+// the special iterator sentinel value Done. To recognize the end of the
+// iteration, compare nextPageToken to the empty string.
+//
+// It is possible for NextPage to return a single zero-length page along with
+// an empty page token when there are no more items in the iteration.
+func (p *Pager) NextPage(slicep interface{}) (nextPageToken string, err error) {
+	p.pageInfo.nextPageCalled = true
+	if p.pageInfo.err != nil {
+		return "", p.pageInfo.err
+	}
+	if p.pageInfo.nextCalled {
+		p.pageInfo.err = errMixed
+		return "", p.pageInfo.err
+	}
+	if p.pageInfo.bufLen() > 0 {
+		return "", errors.New("must call NextPage with an empty buffer")
+	}
+	// The buffer must be empty here, so takeBuf is a no-op. We call it just to get
+	// the buffer's type.
+	wantSliceType := reflect.PtrTo(reflect.ValueOf(p.pageInfo.takeBuf()).Type())
+	if slicep == nil {
+		return "", errors.New("nil passed to Pager.NextPage")
+	}
+	vslicep := reflect.ValueOf(slicep)
+	if vslicep.Type() != wantSliceType {
+		return "", fmt.Errorf("slicep should be of type %s, got %T", wantSliceType, slicep)
+	}
+	for p.pageInfo.bufLen() < p.pageSize {
+		if err := p.pageInfo.fill(p.pageSize - p.pageInfo.bufLen()); err != nil {
+			p.pageInfo.err = err
+			return "", p.pageInfo.err
+		}
+		if p.pageInfo.Token == "" {
+			break
+		}
+	}
+	e := vslicep.Elem()
+	e.Set(reflect.AppendSlice(e, reflect.ValueOf(p.pageInfo.takeBuf())))
+	return p.pageInfo.Token, nil
+}
diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go
new file mode 100644
index 0000000..b935e6d
--- /dev/null
+++ b/vendor/google.golang.org/api/option/option.go
@@ -0,0 +1,132 @@
+// Package option contains options for Google API clients.
+package option
+
+import (
+	"net/http"
+
+	"golang.org/x/oauth2"
+	"google.golang.org/api/internal"
+	"google.golang.org/grpc"
+)
+
+// A ClientOption is an option for a Google API client.
+type ClientOption interface {
+	Apply(*internal.DialSettings)
+}
+
+// WithTokenSource returns a ClientOption that specifies an OAuth2 token
+// source to be used as the basis for authentication.
+func WithTokenSource(s oauth2.TokenSource) ClientOption {
+	return withTokenSource{s}
+}
+
+type withTokenSource struct{ ts oauth2.TokenSource }
+
+func (w withTokenSource) Apply(o *internal.DialSettings) {
+	o.TokenSource = w.ts
+}
+
+// WithServiceAccountFile returns a ClientOption that uses a Google service
+// account credentials file to authenticate.
+// Use WithTokenSource with a token source created from
+// golang.org/x/oauth2/google.JWTConfigFromJSON
+// if reading the file from disk is not an option.
+func WithServiceAccountFile(filename string) ClientOption {
+	return withServiceAccountFile(filename)
+}
+
+type withServiceAccountFile string
+
+func (w withServiceAccountFile) Apply(o *internal.DialSettings) {
+	o.ServiceAccountJSONFilename = string(w)
+}
+
+// WithEndpoint returns a ClientOption that overrides the default endpoint
+// to be used for a service.
+func WithEndpoint(url string) ClientOption {
+	return withEndpoint(url)
+}
+
+type withEndpoint string
+
+func (w withEndpoint) Apply(o *internal.DialSettings) {
+	o.Endpoint = string(w)
+}
+
+// WithScopes returns a ClientOption that overrides the default OAuth2 scopes
+// to be used for a service.
+func WithScopes(scope ...string) ClientOption {
+	return withScopes(scope)
+}
+
+type withScopes []string
+
+func (w withScopes) Apply(o *internal.DialSettings) {
+	s := make([]string, len(w))
+	copy(s, w)
+	o.Scopes = s
+}
+
+// WithUserAgent returns a ClientOption that sets the User-Agent.
+func WithUserAgent(ua string) ClientOption {
+	return withUA(ua)
+}
+
+type withUA string
+
+func (w withUA) Apply(o *internal.DialSettings) { o.UserAgent = string(w) }
+
+// WithHTTPClient returns a ClientOption that specifies the HTTP client to use
+// as the basis of communications. This option may only be used with services
+// that support HTTP as their communication transport. When used, the
+// WithHTTPClient option takes precedent over all other supplied options.
+func WithHTTPClient(client *http.Client) ClientOption {
+	return withHTTPClient{client}
+}
+
+type withHTTPClient struct{ client *http.Client }
+
+func (w withHTTPClient) Apply(o *internal.DialSettings) {
+	o.HTTPClient = w.client
+}
+
+// WithGRPCConn returns a ClientOption that specifies the gRPC client
+// connection to use as the basis of communications. This option many only be
+// used with services that support gRPC as their communication transport. When
+// used, the WithGRPCConn option takes precedent over all other supplied
+// options.
+func WithGRPCConn(conn *grpc.ClientConn) ClientOption {
+	return withGRPCConn{conn}
+}
+
+type withGRPCConn struct{ conn *grpc.ClientConn }
+
+func (w withGRPCConn) Apply(o *internal.DialSettings) {
+	o.GRPCConn = w.conn
+}
+
+// WithGRPCDialOption returns a ClientOption that appends a new grpc.DialOption
+// to an underlying gRPC dial. It does not work with WithGRPCConn.
+func WithGRPCDialOption(opt grpc.DialOption) ClientOption {
+	return withGRPCDialOption{opt}
+}
+
+type withGRPCDialOption struct{ opt grpc.DialOption }
+
+func (w withGRPCDialOption) Apply(o *internal.DialSettings) {
+	o.GRPCDialOpts = append(o.GRPCDialOpts, w.opt)
+}
+
+// WithGRPCConnectionPool returns a ClientOption that creates a pool of gRPC
+// connections that requests will be balanced between.
+// This is an EXPERIMENTAL API and may be changed or removed in the future.
+func WithGRPCConnectionPool(size int) ClientOption {
+	return withGRPCConnectionPool(size)
+}
+
+type withGRPCConnectionPool int
+
+func (w withGRPCConnectionPool) Apply(o *internal.DialSettings) {
+	balancer := grpc.RoundRobin(internal.NewPoolResolver(int(w), o))
+	o.GRPCDialOpts = append(o.GRPCDialOpts, grpc.WithBalancer(balancer))
+}
diff --git a/vendor/google.golang.org/api/transport/dial.go b/vendor/google.golang.org/api/transport/dial.go
new file mode 100644
index 0000000..c054460
--- /dev/null
+++ b/vendor/google.golang.org/api/transport/dial.go
@@ -0,0 +1,124 @@
+// Copyright 2015 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package transport supports network connections to HTTP and GRPC servers.
+// This package is not intended for use by end developers. Use the
+// google.golang.org/api/option package to configure API clients.
+package transport
+
+import (
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+
+	"golang.org/x/net/context"
+	"golang.org/x/oauth2"
+	"golang.org/x/oauth2/google"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/credentials/oauth"
+
+	"google.golang.org/api/internal"
+	"google.golang.org/api/option"
+)
+
+// NewHTTPClient returns an HTTP client for use communicating with a Google cloud
+// service, configured with the given ClientOptions. It also returns the endpoint
+// for the service as specified in the options.
+func NewHTTPClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) {
+	var o internal.DialSettings
+	for _, opt := range opts {
+		opt.Apply(&o)
+	}
+	if o.GRPCConn != nil {
+		return nil, "", errors.New("unsupported gRPC connection specified")
+	}
+	// TODO(djd): Set UserAgent on all outgoing requests.
+	if o.HTTPClient != nil {
+		return o.HTTPClient, o.Endpoint, nil
+	}
+	if o.ServiceAccountJSONFilename != "" {
+		ts, err := serviceAcctTokenSource(ctx, o.ServiceAccountJSONFilename, o.Scopes...)
+		if err != nil {
+			return nil, "", err
+		}
+		o.TokenSource = ts
+	}
+	if o.TokenSource == nil {
+		var err error
+		o.TokenSource, err = google.DefaultTokenSource(ctx, o.Scopes...)
+		if err != nil {
+			return nil, "", fmt.Errorf("google.DefaultTokenSource: %v", err)
+		}
+	}
+	return oauth2.NewClient(ctx, o.TokenSource), o.Endpoint, nil
+}
+
+// Set at init time by dial_appengine.go. If nil, we're not on App Engine.
+var appengineDialerHook func(context.Context) grpc.DialOption
+
+// DialGRPC returns a GRPC connection for use communicating with a Google cloud
+// service, configured with the given ClientOptions.
+func DialGRPC(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) {
+	var o internal.DialSettings
+	for _, opt := range opts {
+		opt.Apply(&o)
+	}
+	if o.HTTPClient != nil {
+		return nil, errors.New("unsupported HTTP client specified")
+	}
+	if o.GRPCConn != nil {
+		return o.GRPCConn, nil
+	}
+	if o.ServiceAccountJSONFilename != "" {
+		ts, err := serviceAcctTokenSource(ctx, o.ServiceAccountJSONFilename, o.Scopes...)
+		if err != nil {
+			return nil, err
+		}
+		o.TokenSource = ts
+	}
+	if o.TokenSource == nil {
+		var err error
+		o.TokenSource, err = google.DefaultTokenSource(ctx, o.Scopes...)
+		if err != nil {
+			return nil, fmt.Errorf("google.DefaultTokenSource: %v", err)
+		}
+	}
+	grpcOpts := []grpc.DialOption{
+		grpc.WithPerRPCCredentials(oauth.TokenSource{o.TokenSource}),
+		grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")),
+	}
+	if appengineDialerHook != nil {
+		// Use the Socket API on App Engine.
+		grpcOpts = append(grpcOpts, appengineDialerHook(ctx))
+	}
+	grpcOpts = append(grpcOpts, o.GRPCDialOpts...)
+	if o.UserAgent != "" {
+		grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent))
+	}
+	return grpc.DialContext(ctx, o.Endpoint, grpcOpts...)
+}
+
+func serviceAcctTokenSource(ctx context.Context, filename string, scope ...string) (oauth2.TokenSource, error) {
+	data, err := ioutil.ReadFile(filename)
+	if err != nil {
+		return nil, fmt.Errorf("cannot read service account file: %v", err)
+	}
+	cfg, err := google.JWTConfigFromJSON(data, scope...)
+	if err != nil {
+		return nil, fmt.Errorf("google.JWTConfigFromJSON: %v", err)
+	}
+	return cfg.TokenSource(ctx), nil
+}
diff --git a/vendor/google.golang.org/api/transport/dial_appengine.go b/vendor/google.golang.org/api/transport/dial_appengine.go
new file mode 100644
index 0000000..201244d
--- /dev/null
+++ b/vendor/google.golang.org/api/transport/dial_appengine.go
@@ -0,0 +1,34 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build appengine
+
+package transport
+
+import (
+	"net"
+	"time"
+
+	"golang.org/x/net/context"
+	"google.golang.org/appengine/socket"
+	"google.golang.org/grpc"
+)
+
+func init() {
+	appengineDialerHook = func(ctx context.Context) grpc.DialOption {
+		return grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
+			return socket.DialTimeout(ctx, "tcp", addr, timeout)
+		})
+	}
+}
diff --git a/vendor/google.golang.org/appengine/.travis.yml b/vendor/google.golang.org/appengine/.travis.yml
new file mode 100644
index 0000000..0762cb9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/.travis.yml
@@ -0,0 +1,18 @@
+language: go
+
+go:
+  - 1.6.3
+  - 1.7.1
+
+install:
+  - go get -v -t -d google.golang.org/appengine/...
+  - mkdir sdk
+  - curl -o sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.40.zip"
+  - unzip -q sdk.zip -d sdk
+  - export APPENGINE_DEV_APPSERVER=$(pwd)/sdk/go_appengine/dev_appserver.py
+
+script:
+  - go version
+  - go test -v google.golang.org/appengine/...
+  - go test -v -race google.golang.org/appengine/...
+  - sdk/go_appengine/goapp test -v google.golang.org/appengine/...
diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/google.golang.org/appengine/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/google.golang.org/appengine/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md
new file mode 100644
index 0000000..b6b11d9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/README.md
@@ -0,0 +1,73 @@
+# Go App Engine packages
+
+[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine)
+
+This repository supports the Go runtime on App Engine,
+including both the standard App Engine and the
+"App Engine flexible environment" (formerly known as "Managed VMs").
+It provides APIs for interacting with App Engine services.
+Its canonical import path is `google.golang.org/appengine`.
+
+See https://cloud.google.com/appengine/docs/go/
+for more information.
+
+File issue reports and feature requests on the [Google App Engine issue
+tracker](https://code.google.com/p/googleappengine/issues/entry?template=Go%20defect).
+
+## Directory structure
+The top level directory of this repository is the `appengine` package. It
+contains the
+basic APIs (e.g. `appengine.NewContext`) that apply across APIs. Specific API
+packages are in subdirectories (e.g. `datastore`).
+
+There is an `internal` subdirectory that contains service protocol buffers,
+plus packages required for connectivity to make API calls. App Engine apps
+should not directly import any package under `internal`.
+
+## Updating a Go App Engine app
+
+This section describes how to update an older Go App Engine app to use
+these packages. A provided tool, `aefix`, can help automate steps 2 and 3
+(run `go get google.golang.org/appengine/cmd/aefix` to install it), but
+read the details below since `aefix` can't perform all the changes.
+
+### 1. Update YAML files (App Engine flexible environment / Managed VMs only)
+
+The `app.yaml` file (and YAML files for modules) should have these new lines added:
+```
+vm: true
+```
+See https://cloud.google.com/appengine/docs/go/modules/#Go_Instance_scaling_and_class for details.
+
+### 2. Update import paths
+
+The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`.
+You will need to update your code to use import paths starting with that; for instance,
+code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`.
+
+### 3. Update code using deprecated, removed or modified APIs
+
+Most App Engine services are available with exactly the same API.
+A few APIs were cleaned up, and some are not available yet.
+This list summarises the differences:
+
+* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`.
+* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`.
+* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead.
+* `appengine.Datacenter` now takes a `context.Context` argument.
+* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels.
+* `delay.Call` now returns an error.
+* `search.FieldLoadSaver` now handles document metadata.
+* `urlfetch.Transport` no longer has a Deadline field; set a deadline on the
+  `context.Context` instead.
+* `aetest` no longer declares its own Context type, and uses the standard one instead.
+* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been
+  deprecated and unused for a long time.
+* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature.
+  Use `appengine.ModuleHostname`and `appengine.ModuleName` instead.
+* Most of `appengine/file` and parts of `appengine/blobstore` are deprecated.
+  Use [Google Cloud Storage](https://godoc.org/cloud.google.com/go/storage) if the
+  feature you require is not present in the new
+  [blobstore package](https://google.golang.org/appengine/blobstore).
+* `appengine/socket` is not required on App Engine flexible environment / Managed VMs.
+  Use the standard `net` package instead.
diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go
new file mode 100644
index 0000000..8865c49
--- /dev/null
+++ b/vendor/google.golang.org/appengine/appengine.go
@@ -0,0 +1,112 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package appengine provides basic functionality for Google App Engine.
+//
+// For more information on how to write Go apps for Google App Engine, see:
+// https://cloud.google.com/appengine/docs/go/
+package appengine
+
+import (
+	"net/http"
+
+	"github.com/golang/protobuf/proto"
+	"golang.org/x/net/context"
+
+	"google.golang.org/appengine/internal"
+)
+
+// The gophers party all night; the rabbits provide the beats.
+
+// Main is the principal entry point for an app running in App Engine.
+//
+// On App Engine Flexible it installs a trivial health checker if one isn't
+// already registered, and starts listening on port 8080 (overridden by the
+// $PORT environment variable).
+//
+// See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests
+// for details on how to do your own health checking.
+//
+// Main is not yet supported on App Engine Standard.
+//
+// Main never returns.
+//
+// Main is designed so that the app's main package looks like this:
+//
+//      package main
+//
+//      import (
+//              "google.golang.org/appengine"
+//
+//              _ "myapp/package0"
+//              _ "myapp/package1"
+//      )
+//
+//      func main() {
+//              appengine.Main()
+//      }
+//
+// The "myapp/packageX" packages are expected to register HTTP handlers
+// in their init functions.
+func Main() {
+	internal.Main()
+}
+
+// IsDevAppServer reports whether the App Engine app is running in the
+// development App Server.
+func IsDevAppServer() bool {
+	return internal.IsDevAppServer()
+}
+
+// NewContext returns a context for an in-flight HTTP request.
+// This function is cheap.
+func NewContext(req *http.Request) context.Context {
+	return WithContext(context.Background(), req)
+}
+
+// WithContext returns a copy of the parent context
+// and associates it with an in-flight HTTP request.
+// This function is cheap.
+func WithContext(parent context.Context, req *http.Request) context.Context {
+	return internal.WithContext(parent, req)
+}
+
+// TODO(dsymonds): Add a Call function here? Otherwise other packages can't access internal.Call.
+
+// BlobKey is a key for a blobstore blob.
+//
+// Conceptually, this type belongs in the blobstore package, but it lives in
+// the appengine package to avoid a circular dependency: blobstore depends on
+// datastore, and datastore needs to refer to the BlobKey type.
+type BlobKey string
+
+// GeoPoint represents a location as latitude/longitude in degrees.
+type GeoPoint struct {
+	Lat, Lng float64
+}
+
+// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude.
+func (g GeoPoint) Valid() bool {
+	return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180
+}
+
+// APICallFunc defines a function type for handling an API call.
+// See WithCallOverride.
+type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error
+
+// WithAPICallFunc returns a copy of the parent context
+// that will cause API calls to invoke f instead of their normal operation.
+//
+// This is intended for advanced users only.
+func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context {
+	return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f))
+}
+
+// APICall performs an API call.
+//
+// This is not intended for general use; it is exported for use in conjunction
+// with WithAPICallFunc.
+func APICall(ctx context.Context, service, method string, in, out proto.Message) error {
+	return internal.Call(ctx, service, method, in, out)
+}
diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go
new file mode 100644
index 0000000..f4b645a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/appengine_vm.go
@@ -0,0 +1,20 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package appengine
+
+import (
+	"golang.org/x/net/context"
+
+	"google.golang.org/appengine/internal"
+)
+
+// BackgroundContext returns a context not associated with a request.
+// This should only be used when not servicing a request.
+// This only works in App Engine "flexible environment".
+func BackgroundContext() context.Context {
+	return internal.BackgroundContext()
+}
diff --git a/vendor/google.golang.org/appengine/errors.go b/vendor/google.golang.org/appengine/errors.go
new file mode 100644
index 0000000..16d0772
--- /dev/null
+++ b/vendor/google.golang.org/appengine/errors.go
@@ -0,0 +1,46 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// This file provides error functions for common API failure modes.
+
+package appengine
+
+import (
+	"fmt"
+
+	"google.golang.org/appengine/internal"
+)
+
+// IsOverQuota reports whether err represents an API call failure
+// due to insufficient available quota.
+func IsOverQuota(err error) bool {
+	callErr, ok := err.(*internal.CallError)
+	return ok && callErr.Code == 4
+}
+
+// MultiError is returned by batch operations when there are errors with
+// particular elements. Errors will be in a one-to-one correspondence with
+// the input elements; successful elements will have a nil entry.
+type MultiError []error
+
+func (m MultiError) Error() string {
+	s, n := "", 0
+	for _, e := range m {
+		if e != nil {
+			if n == 0 {
+				s = e.Error()
+			}
+			n++
+		}
+	}
+	switch n {
+	case 0:
+		return "(0 errors)"
+	case 1:
+		return s
+	case 2:
+		return s + " (and 1 other error)"
+	}
+	return fmt.Sprintf("%s (and %d other errors)", s, n-1)
+}
diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go
new file mode 100644
index 0000000..b8dcf8f
--- /dev/null
+++ b/vendor/google.golang.org/appengine/identity.go
@@ -0,0 +1,142 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import (
+	"time"
+
+	"golang.org/x/net/context"
+
+	"google.golang.org/appengine/internal"
+	pb "google.golang.org/appengine/internal/app_identity"
+	modpb "google.golang.org/appengine/internal/modules"
+)
+
+// AppID returns the application ID for the current application.
+// The string will be a plain application ID (e.g. "appid"), with a
+// domain prefix for custom domain deployments (e.g. "example.com:appid").
+func AppID(c context.Context) string { return internal.AppID(c) }
+
+// DefaultVersionHostname returns the standard hostname of the default version
+// of the current application (e.g. "my-app.appspot.com"). This is suitable for
+// use in constructing URLs.
+func DefaultVersionHostname(c context.Context) string {
+	return internal.DefaultVersionHostname(c)
+}
+
+// ModuleName returns the module name of the current instance.
+func ModuleName(c context.Context) string {
+	return internal.ModuleName(c)
+}
+
+// ModuleHostname returns a hostname of a module instance.
+// If module is the empty string, it refers to the module of the current instance.
+// If version is empty, it refers to the version of the current instance if valid,
+// or the default version of the module of the current instance.
+// If instance is empty, ModuleHostname returns the load-balancing hostname.
+func ModuleHostname(c context.Context, module, version, instance string) (string, error) {
+	req := &modpb.GetHostnameRequest{}
+	if module != "" {
+		req.Module = &module
+	}
+	if version != "" {
+		req.Version = &version
+	}
+	if instance != "" {
+		req.Instance = &instance
+	}
+	res := &modpb.GetHostnameResponse{}
+	if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil {
+		return "", err
+	}
+	return *res.Hostname, nil
+}
+
+// VersionID returns the version ID for the current application.
+// It will be of the form "X.Y", where X is specified in app.yaml,
+// and Y is a number generated when each version of the app is uploaded.
+// It does not include a module name.
+func VersionID(c context.Context) string { return internal.VersionID(c) }
+
+// InstanceID returns a mostly-unique identifier for this instance.
+func InstanceID() string { return internal.InstanceID() }
+
+// Datacenter returns an identifier for the datacenter that the instance is running in.
+func Datacenter(c context.Context) string { return internal.Datacenter(c) }
+
+// ServerSoftware returns the App Engine release version.
+// In production, it looks like "Google App Engine/X.Y.Z".
+// In the development appserver, it looks like "Development/X.Y".
+func ServerSoftware() string { return internal.ServerSoftware() }
+
+// RequestID returns a string that uniquely identifies the request.
+func RequestID(c context.Context) string { return internal.RequestID(c) }
+
+// AccessToken generates an OAuth2 access token for the specified scopes on
+// behalf of service account of this application. This token will expire after
+// the returned time.
+func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) {
+	req := &pb.GetAccessTokenRequest{Scope: scopes}
+	res := &pb.GetAccessTokenResponse{}
+
+	err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res)
+	if err != nil {
+		return "", time.Time{}, err
+	}
+	return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil
+}
+
+// Certificate represents a public certificate for the app.
+type Certificate struct {
+	KeyName string
+	Data    []byte // PEM-encoded X.509 certificate
+}
+
+// PublicCertificates retrieves the public certificates for the app.
+// They can be used to verify a signature returned by SignBytes.
+func PublicCertificates(c context.Context) ([]Certificate, error) {
+	req := &pb.GetPublicCertificateForAppRequest{}
+	res := &pb.GetPublicCertificateForAppResponse{}
+	if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil {
+		return nil, err
+	}
+	var cs []Certificate
+	for _, pc := range res.PublicCertificateList {
+		cs = append(cs, Certificate{
+			KeyName: pc.GetKeyName(),
+			Data:    []byte(pc.GetX509CertificatePem()),
+		})
+	}
+	return cs, nil
+}
+
+// ServiceAccount returns a string representing the service account name, in
+// the form of an email address (typically app_id@appspot.gserviceaccount.com).
+func ServiceAccount(c context.Context) (string, error) {
+	req := &pb.GetServiceAccountNameRequest{}
+	res := &pb.GetServiceAccountNameResponse{}
+
+	err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res)
+	if err != nil {
+		return "", err
+	}
+	return res.GetServiceAccountName(), err
+}
+
+// SignBytes signs bytes using a private key unique to your application.
+func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) {
+	req := &pb.SignForAppRequest{BytesToSign: bytes}
+	res := &pb.SignForAppResponse{}
+
+	if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil {
+		return "", nil, err
+	}
+	return res.GetKeyName(), res.GetSignatureBytes(), nil
+}
+
+func init() {
+	internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name)
+	internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go
new file mode 100644
index 0000000..e9c56d4
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api.go
@@ -0,0 +1,660 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"log"
+	"net"
+	"net/http"
+	"net/url"
+	"os"
+	"runtime"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	netcontext "golang.org/x/net/context"
+
+	basepb "google.golang.org/appengine/internal/base"
+	logpb "google.golang.org/appengine/internal/log"
+	remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+const (
+	apiPath = "/rpc_http"
+)
+
+var (
+	// Incoming headers.
+	ticketHeader       = http.CanonicalHeaderKey("X-AppEngine-API-Ticket")
+	dapperHeader       = http.CanonicalHeaderKey("X-Google-DapperTraceInfo")
+	traceHeader        = http.CanonicalHeaderKey("X-Cloud-Trace-Context")
+	curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
+	userIPHeader       = http.CanonicalHeaderKey("X-AppEngine-User-IP")
+	remoteAddrHeader   = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr")
+
+	// Outgoing headers.
+	apiEndpointHeader      = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint")
+	apiEndpointHeaderValue = []string{"app-engine-apis"}
+	apiMethodHeader        = http.CanonicalHeaderKey("X-Google-RPC-Service-Method")
+	apiMethodHeaderValue   = []string{"/VMRemoteAPI.CallRemoteAPI"}
+	apiDeadlineHeader      = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline")
+	apiContentType         = http.CanonicalHeaderKey("Content-Type")
+	apiContentTypeValue    = []string{"application/octet-stream"}
+	logFlushHeader         = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count")
+
+	apiHTTPClient = &http.Client{
+		Transport: &http.Transport{
+			Proxy: http.ProxyFromEnvironment,
+			Dial:  limitDial,
+		},
+	}
+
+	defaultTicketOnce sync.Once
+	defaultTicket     string
+)
+
+func apiURL() *url.URL {
+	host, port := "appengine.googleapis.internal", "10001"
+	if h := os.Getenv("API_HOST"); h != "" {
+		host = h
+	}
+	if p := os.Getenv("API_PORT"); p != "" {
+		port = p
+	}
+	return &url.URL{
+		Scheme: "http",
+		Host:   host + ":" + port,
+		Path:   apiPath,
+	}
+}
+
+func handleHTTP(w http.ResponseWriter, r *http.Request) {
+	c := &context{
+		req:       r,
+		outHeader: w.Header(),
+		apiURL:    apiURL(),
+	}
+	stopFlushing := make(chan int)
+
+	ctxs.Lock()
+	ctxs.m[r] = c
+	ctxs.Unlock()
+	defer func() {
+		ctxs.Lock()
+		delete(ctxs.m, r)
+		ctxs.Unlock()
+	}()
+
+	// Patch up RemoteAddr so it looks reasonable.
+	if addr := r.Header.Get(userIPHeader); addr != "" {
+		r.RemoteAddr = addr
+	} else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
+		r.RemoteAddr = addr
+	} else {
+		// Should not normally reach here, but pick a sensible default anyway.
+		r.RemoteAddr = "127.0.0.1"
+	}
+	// The address in the headers will most likely be of these forms:
+	//	123.123.123.123
+	//	2001:db8::1
+	// net/http.Request.RemoteAddr is specified to be in "IP:port" form.
+	if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
+		// Assume the remote address is only a host; add a default port.
+		r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
+	}
+
+	// Start goroutine responsible for flushing app logs.
+	// This is done after adding c to ctx.m (and stopped before removing it)
+	// because flushing logs requires making an API call.
+	go c.logFlusher(stopFlushing)
+
+	executeRequestSafely(c, r)
+	c.outHeader = nil // make sure header changes aren't respected any more
+
+	stopFlushing <- 1 // any logging beyond this point will be dropped
+
+	// Flush any pending logs asynchronously.
+	c.pendingLogs.Lock()
+	flushes := c.pendingLogs.flushes
+	if len(c.pendingLogs.lines) > 0 {
+		flushes++
+	}
+	c.pendingLogs.Unlock()
+	go c.flushLog(false)
+	w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
+
+	// Avoid nil Write call if c.Write is never called.
+	if c.outCode != 0 {
+		w.WriteHeader(c.outCode)
+	}
+	if c.outBody != nil {
+		w.Write(c.outBody)
+	}
+}
+
+func executeRequestSafely(c *context, r *http.Request) {
+	defer func() {
+		if x := recover(); x != nil {
+			logf(c, 4, "%s", renderPanic(x)) // 4 == critical
+			c.outCode = 500
+		}
+	}()
+
+	http.DefaultServeMux.ServeHTTP(c, r)
+}
+
+func renderPanic(x interface{}) string {
+	buf := make([]byte, 16<<10) // 16 KB should be plenty
+	buf = buf[:runtime.Stack(buf, false)]
+
+	// Remove the first few stack frames:
+	//   this func
+	//   the recover closure in the caller
+	// That will root the stack trace at the site of the panic.
+	const (
+		skipStart  = "internal.renderPanic"
+		skipFrames = 2
+	)
+	start := bytes.Index(buf, []byte(skipStart))
+	p := start
+	for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ {
+		p = bytes.IndexByte(buf[p+1:], '\n') + p + 1
+		if p < 0 {
+			break
+		}
+	}
+	if p >= 0 {
+		// buf[start:p+1] is the block to remove.
+		// Copy buf[p+1:] over buf[start:] and shrink buf.
+		copy(buf[start:], buf[p+1:])
+		buf = buf[:len(buf)-(p+1-start)]
+	}
+
+	// Add panic heading.
+	head := fmt.Sprintf("panic: %v\n\n", x)
+	if len(head) > len(buf) {
+		// Extremely unlikely to happen.
+		return head
+	}
+	copy(buf[len(head):], buf)
+	copy(buf, head)
+
+	return string(buf)
+}
+
+var ctxs = struct {
+	sync.Mutex
+	m  map[*http.Request]*context
+	bg *context // background context, lazily initialized
+	// dec is used by tests to decorate the netcontext.Context returned
+	// for a given request. This allows tests to add overrides (such as
+	// WithAppIDOverride) to the context. The map is nil outside tests.
+	dec map[*http.Request]func(netcontext.Context) netcontext.Context
+}{
+	m: make(map[*http.Request]*context),
+}
+
+// context represents the context of an in-flight HTTP request.
+// It implements the appengine.Context and http.ResponseWriter interfaces.
+type context struct {
+	req *http.Request
+
+	outCode   int
+	outHeader http.Header
+	outBody   []byte
+
+	pendingLogs struct {
+		sync.Mutex
+		lines   []*logpb.UserAppLogLine
+		flushes int
+	}
+
+	apiURL *url.URL
+}
+
+var contextKey = "holds a *context"
+
+func fromContext(ctx netcontext.Context) *context {
+	c, _ := ctx.Value(&contextKey).(*context)
+	return c
+}
+
+func withContext(parent netcontext.Context, c *context) netcontext.Context {
+	ctx := netcontext.WithValue(parent, &contextKey, c)
+	if ns := c.req.Header.Get(curNamespaceHeader); ns != "" {
+		ctx = withNamespace(ctx, ns)
+	}
+	return ctx
+}
+
+func toContext(c *context) netcontext.Context {
+	return withContext(netcontext.Background(), c)
+}
+
+func IncomingHeaders(ctx netcontext.Context) http.Header {
+	if c := fromContext(ctx); c != nil {
+		return c.req.Header
+	}
+	return nil
+}
+
+func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
+	ctxs.Lock()
+	c := ctxs.m[req]
+	d := ctxs.dec[req]
+	ctxs.Unlock()
+
+	if d != nil {
+		parent = d(parent)
+	}
+
+	if c == nil {
+		// Someone passed in an http.Request that is not in-flight.
+		// We panic here rather than panicking at a later point
+		// so that stack traces will be more sensible.
+		log.Panic("appengine: NewContext passed an unknown http.Request")
+	}
+	return withContext(parent, c)
+}
+
+func getDefaultTicket() string {
+	defaultTicketOnce.Do(func() {
+		appID := partitionlessAppID()
+		escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
+		majVersion := VersionID(nil)
+		if i := strings.Index(majVersion, "."); i > 0 {
+			majVersion = majVersion[:i]
+		}
+		defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
+	})
+	return defaultTicket
+}
+
+func BackgroundContext() netcontext.Context {
+	ctxs.Lock()
+	defer ctxs.Unlock()
+
+	if ctxs.bg != nil {
+		return toContext(ctxs.bg)
+	}
+
+	// Compute background security ticket.
+	ticket := getDefaultTicket()
+
+	ctxs.bg = &context{
+		req: &http.Request{
+			Header: http.Header{
+				ticketHeader: []string{ticket},
+			},
+		},
+		apiURL: apiURL(),
+	}
+
+	// TODO(dsymonds): Wire up the shutdown handler to do a final flush.
+	go ctxs.bg.logFlusher(make(chan int))
+
+	return toContext(ctxs.bg)
+}
+
+// RegisterTestRequest registers the HTTP request req for testing, such that
+// any API calls are sent to the provided URL. It returns a closure to delete
+// the registration.
+// It should only be used by aetest package.
+func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) func() {
+	c := &context{
+		req:    req,
+		apiURL: apiURL,
+	}
+	ctxs.Lock()
+	defer ctxs.Unlock()
+	if _, ok := ctxs.m[req]; ok {
+		log.Panic("req already associated with context")
+	}
+	if _, ok := ctxs.dec[req]; ok {
+		log.Panic("req already associated with context")
+	}
+	if ctxs.dec == nil {
+		ctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context)
+	}
+	ctxs.m[req] = c
+	ctxs.dec[req] = decorate
+
+	return func() {
+		ctxs.Lock()
+		delete(ctxs.m, req)
+		delete(ctxs.dec, req)
+		ctxs.Unlock()
+	}
+}
+
+var errTimeout = &CallError{
+	Detail:  "Deadline exceeded",
+	Code:    int32(remotepb.RpcError_CANCELLED),
+	Timeout: true,
+}
+
+func (c *context) Header() http.Header { return c.outHeader }
+
+// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status
+// codes do not permit a response body (nor response entity headers such as
+// Content-Length, Content-Type, etc).
+func bodyAllowedForStatus(status int) bool {
+	switch {
+	case status >= 100 && status <= 199:
+		return false
+	case status == 204:
+		return false
+	case status == 304:
+		return false
+	}
+	return true
+}
+
+func (c *context) Write(b []byte) (int, error) {
+	if c.outCode == 0 {
+		c.WriteHeader(http.StatusOK)
+	}
+	if len(b) > 0 && !bodyAllowedForStatus(c.outCode) {
+		return 0, http.ErrBodyNotAllowed
+	}
+	c.outBody = append(c.outBody, b...)
+	return len(b), nil
+}
+
+func (c *context) WriteHeader(code int) {
+	if c.outCode != 0 {
+		logf(c, 3, "WriteHeader called multiple times on request.") // error level
+		return
+	}
+	c.outCode = code
+}
+
+func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) {
+	hreq := &http.Request{
+		Method: "POST",
+		URL:    c.apiURL,
+		Header: http.Header{
+			apiEndpointHeader: apiEndpointHeaderValue,
+			apiMethodHeader:   apiMethodHeaderValue,
+			apiContentType:    apiContentTypeValue,
+			apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)},
+		},
+		Body:          ioutil.NopCloser(bytes.NewReader(body)),
+		ContentLength: int64(len(body)),
+		Host:          c.apiURL.Host,
+	}
+	if info := c.req.Header.Get(dapperHeader); info != "" {
+		hreq.Header.Set(dapperHeader, info)
+	}
+	if info := c.req.Header.Get(traceHeader); info != "" {
+		hreq.Header.Set(traceHeader, info)
+	}
+
+	tr := apiHTTPClient.Transport.(*http.Transport)
+
+	var timedOut int32 // atomic; set to 1 if timed out
+	t := time.AfterFunc(timeout, func() {
+		atomic.StoreInt32(&timedOut, 1)
+		tr.CancelRequest(hreq)
+	})
+	defer t.Stop()
+	defer func() {
+		// Check if timeout was exceeded.
+		if atomic.LoadInt32(&timedOut) != 0 {
+			err = errTimeout
+		}
+	}()
+
+	hresp, err := apiHTTPClient.Do(hreq)
+	if err != nil {
+		return nil, &CallError{
+			Detail: fmt.Sprintf("service bridge HTTP failed: %v", err),
+			Code:   int32(remotepb.RpcError_UNKNOWN),
+		}
+	}
+	defer hresp.Body.Close()
+	hrespBody, err := ioutil.ReadAll(hresp.Body)
+	if hresp.StatusCode != 200 {
+		return nil, &CallError{
+			Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody),
+			Code:   int32(remotepb.RpcError_UNKNOWN),
+		}
+	}
+	if err != nil {
+		return nil, &CallError{
+			Detail: fmt.Sprintf("service bridge response bad: %v", err),
+			Code:   int32(remotepb.RpcError_UNKNOWN),
+		}
+	}
+	return hrespBody, nil
+}
+
+func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
+	if ns := NamespaceFromContext(ctx); ns != "" {
+		if fn, ok := NamespaceMods[service]; ok {
+			fn(in, ns)
+		}
+	}
+
+	if f, ctx, ok := callOverrideFromContext(ctx); ok {
+		return f(ctx, service, method, in, out)
+	}
+
+	// Handle already-done contexts quickly.
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	default:
+	}
+
+	c := fromContext(ctx)
+	if c == nil {
+		// Give a good error message rather than a panic lower down.
+		return errors.New("not an App Engine context")
+	}
+
+	// Apply transaction modifications if we're in a transaction.
+	if t := transactionFromContext(ctx); t != nil {
+		if t.finished {
+			return errors.New("transaction context has expired")
+		}
+		applyTransaction(in, &t.transaction)
+	}
+
+	// Default RPC timeout is 60s.
+	timeout := 60 * time.Second
+	if deadline, ok := ctx.Deadline(); ok {
+		timeout = deadline.Sub(time.Now())
+	}
+
+	data, err := proto.Marshal(in)
+	if err != nil {
+		return err
+	}
+
+	ticket := c.req.Header.Get(ticketHeader)
+	// Fall back to use background ticket when the request ticket is not available in Flex.
+	if ticket == "" {
+		ticket = getDefaultTicket()
+	}
+	req := &remotepb.Request{
+		ServiceName: &service,
+		Method:      &method,
+		Request:     data,
+		RequestId:   &ticket,
+	}
+	hreqBody, err := proto.Marshal(req)
+	if err != nil {
+		return err
+	}
+
+	hrespBody, err := c.post(hreqBody, timeout)
+	if err != nil {
+		return err
+	}
+
+	res := &remotepb.Response{}
+	if err := proto.Unmarshal(hrespBody, res); err != nil {
+		return err
+	}
+	if res.RpcError != nil {
+		ce := &CallError{
+			Detail: res.RpcError.GetDetail(),
+			Code:   *res.RpcError.Code,
+		}
+		switch remotepb.RpcError_ErrorCode(ce.Code) {
+		case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED:
+			ce.Timeout = true
+		}
+		return ce
+	}
+	if res.ApplicationError != nil {
+		return &APIError{
+			Service: *req.ServiceName,
+			Detail:  res.ApplicationError.GetDetail(),
+			Code:    *res.ApplicationError.Code,
+		}
+	}
+	if res.Exception != nil || res.JavaException != nil {
+		// This shouldn't happen, but let's be defensive.
+		return &CallError{
+			Detail: "service bridge returned exception",
+			Code:   int32(remotepb.RpcError_UNKNOWN),
+		}
+	}
+	return proto.Unmarshal(res.Response, out)
+}
+
+func (c *context) Request() *http.Request {
+	return c.req
+}
+
+func (c *context) addLogLine(ll *logpb.UserAppLogLine) {
+	// Truncate long log lines.
+	// TODO(dsymonds): Check if this is still necessary.
+	const lim = 8 << 10
+	if len(*ll.Message) > lim {
+		suffix := fmt.Sprintf("...(length %d)", len(*ll.Message))
+		ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix)
+	}
+
+	c.pendingLogs.Lock()
+	c.pendingLogs.lines = append(c.pendingLogs.lines, ll)
+	c.pendingLogs.Unlock()
+}
+
+var logLevelName = map[int64]string{
+	0: "DEBUG",
+	1: "INFO",
+	2: "WARNING",
+	3: "ERROR",
+	4: "CRITICAL",
+}
+
+func logf(c *context, level int64, format string, args ...interface{}) {
+	s := fmt.Sprintf(format, args...)
+	s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
+	c.addLogLine(&logpb.UserAppLogLine{
+		TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
+		Level:         &level,
+		Message:       &s,
+	})
+	log.Print(logLevelName[level] + ": " + s)
+}
+
+// flushLog attempts to flush any pending logs to the appserver.
+// It should not be called concurrently.
+func (c *context) flushLog(force bool) (flushed bool) {
+	c.pendingLogs.Lock()
+	// Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious.
+	n, rem := 0, 30<<20
+	for ; n < len(c.pendingLogs.lines); n++ {
+		ll := c.pendingLogs.lines[n]
+		// Each log line will require about 3 bytes of overhead.
+		nb := proto.Size(ll) + 3
+		if nb > rem {
+			break
+		}
+		rem -= nb
+	}
+	lines := c.pendingLogs.lines[:n]
+	c.pendingLogs.lines = c.pendingLogs.lines[n:]
+	c.pendingLogs.Unlock()
+
+	if len(lines) == 0 && !force {
+		// Nothing to flush.
+		return false
+	}
+
+	rescueLogs := false
+	defer func() {
+		if rescueLogs {
+			c.pendingLogs.Lock()
+			c.pendingLogs.lines = append(lines, c.pendingLogs.lines...)
+			c.pendingLogs.Unlock()
+		}
+	}()
+
+	buf, err := proto.Marshal(&logpb.UserAppLogGroup{
+		LogLine: lines,
+	})
+	if err != nil {
+		log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err)
+		rescueLogs = true
+		return false
+	}
+
+	req := &logpb.FlushRequest{
+		Logs: buf,
+	}
+	res := &basepb.VoidProto{}
+	c.pendingLogs.Lock()
+	c.pendingLogs.flushes++
+	c.pendingLogs.Unlock()
+	if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil {
+		log.Printf("internal.flushLog: Flush RPC: %v", err)
+		rescueLogs = true
+		return false
+	}
+	return true
+}
+
+const (
+	// Log flushing parameters.
+	flushInterval      = 1 * time.Second
+	forceFlushInterval = 60 * time.Second
+)
+
+func (c *context) logFlusher(stop <-chan int) {
+	lastFlush := time.Now()
+	tick := time.NewTicker(flushInterval)
+	for {
+		select {
+		case <-stop:
+			// Request finished.
+			tick.Stop()
+			return
+		case <-tick.C:
+			force := time.Now().Sub(lastFlush) > forceFlushInterval
+			if c.flushLog(force) {
+				lastFlush = time.Now()
+			}
+		}
+	}
+}
+
+func ContextForTesting(req *http.Request) netcontext.Context {
+	return toContext(&context{req: req})
+}
diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go
new file mode 100644
index 0000000..597f66e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_classic.go
@@ -0,0 +1,159 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import (
+	"errors"
+	"fmt"
+	"net/http"
+	"time"
+
+	"appengine"
+	"appengine_internal"
+	basepb "appengine_internal/base"
+
+	"github.com/golang/protobuf/proto"
+	netcontext "golang.org/x/net/context"
+)
+
+var contextKey = "holds an appengine.Context"
+
+func fromContext(ctx netcontext.Context) appengine.Context {
+	c, _ := ctx.Value(&contextKey).(appengine.Context)
+	return c
+}
+
+// This is only for classic App Engine adapters.
+func ClassicContextFromContext(ctx netcontext.Context) appengine.Context {
+	return fromContext(ctx)
+}
+
+func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context {
+	ctx := netcontext.WithValue(parent, &contextKey, c)
+
+	s := &basepb.StringProto{}
+	c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil)
+	if ns := s.GetValue(); ns != "" {
+		ctx = NamespacedContext(ctx, ns)
+	}
+
+	return ctx
+}
+
+func IncomingHeaders(ctx netcontext.Context) http.Header {
+	if c := fromContext(ctx); c != nil {
+		if req, ok := c.Request().(*http.Request); ok {
+			return req.Header
+		}
+	}
+	return nil
+}
+
+func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
+	c := appengine.NewContext(req)
+	return withContext(parent, c)
+}
+
+type testingContext struct {
+	appengine.Context
+
+	req *http.Request
+}
+
+func (t *testingContext) FullyQualifiedAppID() string { return "dev~testcontext" }
+func (t *testingContext) Call(service, method string, _, _ appengine_internal.ProtoMessage, _ *appengine_internal.CallOptions) error {
+	if service == "__go__" && method == "GetNamespace" {
+		return nil
+	}
+	return fmt.Errorf("testingContext: unsupported Call")
+}
+func (t *testingContext) Request() interface{} { return t.req }
+
+func ContextForTesting(req *http.Request) netcontext.Context {
+	return withContext(netcontext.Background(), &testingContext{req: req})
+}
+
+func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
+	if ns := NamespaceFromContext(ctx); ns != "" {
+		if fn, ok := NamespaceMods[service]; ok {
+			fn(in, ns)
+		}
+	}
+
+	if f, ctx, ok := callOverrideFromContext(ctx); ok {
+		return f(ctx, service, method, in, out)
+	}
+
+	// Handle already-done contexts quickly.
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	default:
+	}
+
+	c := fromContext(ctx)
+	if c == nil {
+		// Give a good error message rather than a panic lower down.
+		return errors.New("not an App Engine context")
+	}
+
+	// Apply transaction modifications if we're in a transaction.
+	if t := transactionFromContext(ctx); t != nil {
+		if t.finished {
+			return errors.New("transaction context has expired")
+		}
+		applyTransaction(in, &t.transaction)
+	}
+
+	var opts *appengine_internal.CallOptions
+	if d, ok := ctx.Deadline(); ok {
+		opts = &appengine_internal.CallOptions{
+			Timeout: d.Sub(time.Now()),
+		}
+	}
+
+	err := c.Call(service, method, in, out, opts)
+	switch v := err.(type) {
+	case *appengine_internal.APIError:
+		return &APIError{
+			Service: v.Service,
+			Detail:  v.Detail,
+			Code:    v.Code,
+		}
+	case *appengine_internal.CallError:
+		return &CallError{
+			Detail:  v.Detail,
+			Code:    v.Code,
+			Timeout: v.Timeout,
+		}
+	}
+	return err
+}
+
+func handleHTTP(w http.ResponseWriter, r *http.Request) {
+	panic("handleHTTP called; this should be impossible")
+}
+
+func logf(c appengine.Context, level int64, format string, args ...interface{}) {
+	var fn func(format string, args ...interface{})
+	switch level {
+	case 0:
+		fn = c.Debugf
+	case 1:
+		fn = c.Infof
+	case 2:
+		fn = c.Warningf
+	case 3:
+		fn = c.Errorf
+	case 4:
+		fn = c.Criticalf
+	default:
+		// This shouldn't happen.
+		fn = c.Criticalf
+	}
+	fn(format, args...)
+}
diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go
new file mode 100644
index 0000000..2db33a7
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_common.go
@@ -0,0 +1,86 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+	"github.com/golang/protobuf/proto"
+	netcontext "golang.org/x/net/context"
+)
+
+type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error
+
+var callOverrideKey = "holds []CallOverrideFunc"
+
+func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context {
+	// We avoid appending to any existing call override
+	// so we don't risk overwriting a popped stack below.
+	var cofs []CallOverrideFunc
+	if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok {
+		cofs = append(cofs, uf...)
+	}
+	cofs = append(cofs, f)
+	return netcontext.WithValue(ctx, &callOverrideKey, cofs)
+}
+
+func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) {
+	cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc)
+	if len(cofs) == 0 {
+		return nil, nil, false
+	}
+	// We found a list of overrides; grab the last, and reconstitute a
+	// context that will hide it.
+	f := cofs[len(cofs)-1]
+	ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1])
+	return f, ctx, true
+}
+
+type logOverrideFunc func(level int64, format string, args ...interface{})
+
+var logOverrideKey = "holds a logOverrideFunc"
+
+func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context {
+	return netcontext.WithValue(ctx, &logOverrideKey, f)
+}
+
+var appIDOverrideKey = "holds a string, being the full app ID"
+
+func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context {
+	return netcontext.WithValue(ctx, &appIDOverrideKey, appID)
+}
+
+var namespaceKey = "holds the namespace string"
+
+func withNamespace(ctx netcontext.Context, ns string) netcontext.Context {
+	return netcontext.WithValue(ctx, &namespaceKey, ns)
+}
+
+func NamespaceFromContext(ctx netcontext.Context) string {
+	// If there's no namespace, return the empty string.
+	ns, _ := ctx.Value(&namespaceKey).(string)
+	return ns
+}
+
+// FullyQualifiedAppID returns the fully-qualified application ID.
+// This may contain a partition prefix (e.g. "s~" for High Replication apps),
+// or a domain prefix (e.g. "example.com:").
+func FullyQualifiedAppID(ctx netcontext.Context) string {
+	if id, ok := ctx.Value(&appIDOverrideKey).(string); ok {
+		return id
+	}
+	return fullyQualifiedAppID(ctx)
+}
+
+func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) {
+	if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok {
+		f(level, format, args...)
+		return
+	}
+	logf(fromContext(ctx), level, format, args...)
+}
+
+// NamespacedContext wraps a Context to support namespaces.
+func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context {
+	return withNamespace(ctx, namespace)
+}
diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go
new file mode 100644
index 0000000..11df8c0
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_id.go
@@ -0,0 +1,28 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+	"strings"
+)
+
+func parseFullAppID(appid string) (partition, domain, displayID string) {
+	if i := strings.Index(appid, "~"); i != -1 {
+		partition, appid = appid[:i], appid[i+1:]
+	}
+	if i := strings.Index(appid, ":"); i != -1 {
+		domain, appid = appid[:i], appid[i+1:]
+	}
+	return partition, domain, appid
+}
+
+// appID returns "appid" or "domain.com:appid".
+func appID(fullAppID string) string {
+	_, dom, dis := parseFullAppID(fullAppID)
+	if dom != "" {
+		return dom + ":" + dis
+	}
+	return dis
+}
diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
new file mode 100644
index 0000000..87d9701
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
@@ -0,0 +1,296 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto
+// DO NOT EDIT!
+
+/*
+Package app_identity is a generated protocol buffer package.
+
+It is generated from these files:
+	google.golang.org/appengine/internal/app_identity/app_identity_service.proto
+
+It has these top-level messages:
+	AppIdentityServiceError
+	SignForAppRequest
+	SignForAppResponse
+	GetPublicCertificateForAppRequest
+	PublicCertificate
+	GetPublicCertificateForAppResponse
+	GetServiceAccountNameRequest
+	GetServiceAccountNameResponse
+	GetAccessTokenRequest
+	GetAccessTokenResponse
+	GetDefaultGcsBucketNameRequest
+	GetDefaultGcsBucketNameResponse
+*/
+package app_identity
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type AppIdentityServiceError_ErrorCode int32
+
+const (
+	AppIdentityServiceError_SUCCESS           AppIdentityServiceError_ErrorCode = 0
+	AppIdentityServiceError_UNKNOWN_SCOPE     AppIdentityServiceError_ErrorCode = 9
+	AppIdentityServiceError_BLOB_TOO_LARGE    AppIdentityServiceError_ErrorCode = 1000
+	AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001
+	AppIdentityServiceError_NOT_A_VALID_APP   AppIdentityServiceError_ErrorCode = 1002
+	AppIdentityServiceError_UNKNOWN_ERROR     AppIdentityServiceError_ErrorCode = 1003
+	AppIdentityServiceError_NOT_ALLOWED       AppIdentityServiceError_ErrorCode = 1005
+	AppIdentityServiceError_NOT_IMPLEMENTED   AppIdentityServiceError_ErrorCode = 1006
+)
+
+var AppIdentityServiceError_ErrorCode_name = map[int32]string{
+	0:    "SUCCESS",
+	9:    "UNKNOWN_SCOPE",
+	1000: "BLOB_TOO_LARGE",
+	1001: "DEADLINE_EXCEEDED",
+	1002: "NOT_A_VALID_APP",
+	1003: "UNKNOWN_ERROR",
+	1005: "NOT_ALLOWED",
+	1006: "NOT_IMPLEMENTED",
+}
+var AppIdentityServiceError_ErrorCode_value = map[string]int32{
+	"SUCCESS":           0,
+	"UNKNOWN_SCOPE":     9,
+	"BLOB_TOO_LARGE":    1000,
+	"DEADLINE_EXCEEDED": 1001,
+	"NOT_A_VALID_APP":   1002,
+	"UNKNOWN_ERROR":     1003,
+	"NOT_ALLOWED":       1005,
+	"NOT_IMPLEMENTED":   1006,
+}
+
+func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode {
+	p := new(AppIdentityServiceError_ErrorCode)
+	*p = x
+	return p
+}
+func (x AppIdentityServiceError_ErrorCode) String() string {
+	return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x))
+}
+func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode")
+	if err != nil {
+		return err
+	}
+	*x = AppIdentityServiceError_ErrorCode(value)
+	return nil
+}
+
+type AppIdentityServiceError struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AppIdentityServiceError) Reset()         { *m = AppIdentityServiceError{} }
+func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) }
+func (*AppIdentityServiceError) ProtoMessage()    {}
+
+type SignForAppRequest struct {
+	BytesToSign      []byte `protobuf:"bytes,1,opt,name=bytes_to_sign" json:"bytes_to_sign,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SignForAppRequest) Reset()         { *m = SignForAppRequest{} }
+func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) }
+func (*SignForAppRequest) ProtoMessage()    {}
+
+func (m *SignForAppRequest) GetBytesToSign() []byte {
+	if m != nil {
+		return m.BytesToSign
+	}
+	return nil
+}
+
+type SignForAppResponse struct {
+	KeyName          *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"`
+	SignatureBytes   []byte  `protobuf:"bytes,2,opt,name=signature_bytes" json:"signature_bytes,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *SignForAppResponse) Reset()         { *m = SignForAppResponse{} }
+func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) }
+func (*SignForAppResponse) ProtoMessage()    {}
+
+func (m *SignForAppResponse) GetKeyName() string {
+	if m != nil && m.KeyName != nil {
+		return *m.KeyName
+	}
+	return ""
+}
+
+func (m *SignForAppResponse) GetSignatureBytes() []byte {
+	if m != nil {
+		return m.SignatureBytes
+	}
+	return nil
+}
+
+type GetPublicCertificateForAppRequest struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetPublicCertificateForAppRequest) Reset()         { *m = GetPublicCertificateForAppRequest{} }
+func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) }
+func (*GetPublicCertificateForAppRequest) ProtoMessage()    {}
+
+type PublicCertificate struct {
+	KeyName            *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"`
+	X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem" json:"x509_certificate_pem,omitempty"`
+	XXX_unrecognized   []byte  `json:"-"`
+}
+
+func (m *PublicCertificate) Reset()         { *m = PublicCertificate{} }
+func (m *PublicCertificate) String() string { return proto.CompactTextString(m) }
+func (*PublicCertificate) ProtoMessage()    {}
+
+func (m *PublicCertificate) GetKeyName() string {
+	if m != nil && m.KeyName != nil {
+		return *m.KeyName
+	}
+	return ""
+}
+
+func (m *PublicCertificate) GetX509CertificatePem() string {
+	if m != nil && m.X509CertificatePem != nil {
+		return *m.X509CertificatePem
+	}
+	return ""
+}
+
+type GetPublicCertificateForAppResponse struct {
+	PublicCertificateList      []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list" json:"public_certificate_list,omitempty"`
+	MaxClientCacheTimeInSecond *int64               `protobuf:"varint,2,opt,name=max_client_cache_time_in_second" json:"max_client_cache_time_in_second,omitempty"`
+	XXX_unrecognized           []byte               `json:"-"`
+}
+
+func (m *GetPublicCertificateForAppResponse) Reset()         { *m = GetPublicCertificateForAppResponse{} }
+func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) }
+func (*GetPublicCertificateForAppResponse) ProtoMessage()    {}
+
+func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate {
+	if m != nil {
+		return m.PublicCertificateList
+	}
+	return nil
+}
+
+func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 {
+	if m != nil && m.MaxClientCacheTimeInSecond != nil {
+		return *m.MaxClientCacheTimeInSecond
+	}
+	return 0
+}
+
+type GetServiceAccountNameRequest struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetServiceAccountNameRequest) Reset()         { *m = GetServiceAccountNameRequest{} }
+func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetServiceAccountNameRequest) ProtoMessage()    {}
+
+type GetServiceAccountNameResponse struct {
+	ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name" json:"service_account_name,omitempty"`
+	XXX_unrecognized   []byte  `json:"-"`
+}
+
+func (m *GetServiceAccountNameResponse) Reset()         { *m = GetServiceAccountNameResponse{} }
+func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) }
+func (*GetServiceAccountNameResponse) ProtoMessage()    {}
+
+func (m *GetServiceAccountNameResponse) GetServiceAccountName() string {
+	if m != nil && m.ServiceAccountName != nil {
+		return *m.ServiceAccountName
+	}
+	return ""
+}
+
+type GetAccessTokenRequest struct {
+	Scope              []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"`
+	ServiceAccountId   *int64   `protobuf:"varint,2,opt,name=service_account_id" json:"service_account_id,omitempty"`
+	ServiceAccountName *string  `protobuf:"bytes,3,opt,name=service_account_name" json:"service_account_name,omitempty"`
+	XXX_unrecognized   []byte   `json:"-"`
+}
+
+func (m *GetAccessTokenRequest) Reset()         { *m = GetAccessTokenRequest{} }
+func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) }
+func (*GetAccessTokenRequest) ProtoMessage()    {}
+
+func (m *GetAccessTokenRequest) GetScope() []string {
+	if m != nil {
+		return m.Scope
+	}
+	return nil
+}
+
+func (m *GetAccessTokenRequest) GetServiceAccountId() int64 {
+	if m != nil && m.ServiceAccountId != nil {
+		return *m.ServiceAccountId
+	}
+	return 0
+}
+
+func (m *GetAccessTokenRequest) GetServiceAccountName() string {
+	if m != nil && m.ServiceAccountName != nil {
+		return *m.ServiceAccountName
+	}
+	return ""
+}
+
+type GetAccessTokenResponse struct {
+	AccessToken      *string `protobuf:"bytes,1,opt,name=access_token" json:"access_token,omitempty"`
+	ExpirationTime   *int64  `protobuf:"varint,2,opt,name=expiration_time" json:"expiration_time,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *GetAccessTokenResponse) Reset()         { *m = GetAccessTokenResponse{} }
+func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) }
+func (*GetAccessTokenResponse) ProtoMessage()    {}
+
+func (m *GetAccessTokenResponse) GetAccessToken() string {
+	if m != nil && m.AccessToken != nil {
+		return *m.AccessToken
+	}
+	return ""
+}
+
+func (m *GetAccessTokenResponse) GetExpirationTime() int64 {
+	if m != nil && m.ExpirationTime != nil {
+		return *m.ExpirationTime
+	}
+	return 0
+}
+
+type GetDefaultGcsBucketNameRequest struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetDefaultGcsBucketNameRequest) Reset()         { *m = GetDefaultGcsBucketNameRequest{} }
+func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultGcsBucketNameRequest) ProtoMessage()    {}
+
+type GetDefaultGcsBucketNameResponse struct {
+	DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name" json:"default_gcs_bucket_name,omitempty"`
+	XXX_unrecognized     []byte  `json:"-"`
+}
+
+func (m *GetDefaultGcsBucketNameResponse) Reset()         { *m = GetDefaultGcsBucketNameResponse{} }
+func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultGcsBucketNameResponse) ProtoMessage()    {}
+
+func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string {
+	if m != nil && m.DefaultGcsBucketName != nil {
+		return *m.DefaultGcsBucketName
+	}
+	return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto
new file mode 100644
index 0000000..19610ca
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto
@@ -0,0 +1,64 @@
+syntax = "proto2";
+option go_package = "app_identity";
+
+package appengine;
+
+message AppIdentityServiceError {
+  enum ErrorCode {
+    SUCCESS = 0;
+    UNKNOWN_SCOPE = 9;
+    BLOB_TOO_LARGE = 1000;
+    DEADLINE_EXCEEDED = 1001;
+    NOT_A_VALID_APP = 1002;
+    UNKNOWN_ERROR = 1003;
+    NOT_ALLOWED = 1005;
+    NOT_IMPLEMENTED = 1006;
+  }
+}
+
+message SignForAppRequest {
+  optional bytes bytes_to_sign = 1;
+}
+
+message SignForAppResponse {
+  optional string key_name = 1;
+  optional bytes signature_bytes = 2;
+}
+
+message GetPublicCertificateForAppRequest {
+}
+
+message PublicCertificate {
+  optional string key_name = 1;
+  optional string x509_certificate_pem = 2;
+}
+
+message GetPublicCertificateForAppResponse {
+  repeated PublicCertificate public_certificate_list = 1;
+  optional int64 max_client_cache_time_in_second = 2;
+}
+
+message GetServiceAccountNameRequest {
+}
+
+message GetServiceAccountNameResponse {
+  optional string service_account_name = 1;
+}
+
+message GetAccessTokenRequest {
+  repeated string scope = 1;
+  optional int64 service_account_id = 2;
+  optional string service_account_name = 3;
+}
+
+message GetAccessTokenResponse {
+  optional string access_token = 1;
+  optional int64 expiration_time = 2;
+}
+
+message GetDefaultGcsBucketNameRequest {
+}
+
+message GetDefaultGcsBucketNameResponse {
+  optional string default_gcs_bucket_name = 1;
+}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
new file mode 100644
index 0000000..36a1956
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
@@ -0,0 +1,133 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/base/api_base.proto
+// DO NOT EDIT!
+
+/*
+Package base is a generated protocol buffer package.
+
+It is generated from these files:
+	google.golang.org/appengine/internal/base/api_base.proto
+
+It has these top-level messages:
+	StringProto
+	Integer32Proto
+	Integer64Proto
+	BoolProto
+	DoubleProto
+	BytesProto
+	VoidProto
+*/
+package base
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type StringProto struct {
+	Value            *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *StringProto) Reset()         { *m = StringProto{} }
+func (m *StringProto) String() string { return proto.CompactTextString(m) }
+func (*StringProto) ProtoMessage()    {}
+
+func (m *StringProto) GetValue() string {
+	if m != nil && m.Value != nil {
+		return *m.Value
+	}
+	return ""
+}
+
+type Integer32Proto struct {
+	Value            *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Integer32Proto) Reset()         { *m = Integer32Proto{} }
+func (m *Integer32Proto) String() string { return proto.CompactTextString(m) }
+func (*Integer32Proto) ProtoMessage()    {}
+
+func (m *Integer32Proto) GetValue() int32 {
+	if m != nil && m.Value != nil {
+		return *m.Value
+	}
+	return 0
+}
+
+type Integer64Proto struct {
+	Value            *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Integer64Proto) Reset()         { *m = Integer64Proto{} }
+func (m *Integer64Proto) String() string { return proto.CompactTextString(m) }
+func (*Integer64Proto) ProtoMessage()    {}
+
+func (m *Integer64Proto) GetValue() int64 {
+	if m != nil && m.Value != nil {
+		return *m.Value
+	}
+	return 0
+}
+
+type BoolProto struct {
+	Value            *bool  `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BoolProto) Reset()         { *m = BoolProto{} }
+func (m *BoolProto) String() string { return proto.CompactTextString(m) }
+func (*BoolProto) ProtoMessage()    {}
+
+func (m *BoolProto) GetValue() bool {
+	if m != nil && m.Value != nil {
+		return *m.Value
+	}
+	return false
+}
+
+type DoubleProto struct {
+	Value            *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"`
+	XXX_unrecognized []byte   `json:"-"`
+}
+
+func (m *DoubleProto) Reset()         { *m = DoubleProto{} }
+func (m *DoubleProto) String() string { return proto.CompactTextString(m) }
+func (*DoubleProto) ProtoMessage()    {}
+
+func (m *DoubleProto) GetValue() float64 {
+	if m != nil && m.Value != nil {
+		return *m.Value
+	}
+	return 0
+}
+
+type BytesProto struct {
+	Value            []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BytesProto) Reset()         { *m = BytesProto{} }
+func (m *BytesProto) String() string { return proto.CompactTextString(m) }
+func (*BytesProto) ProtoMessage()    {}
+
+func (m *BytesProto) GetValue() []byte {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+type VoidProto struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *VoidProto) Reset()         { *m = VoidProto{} }
+func (m *VoidProto) String() string { return proto.CompactTextString(m) }
+func (*VoidProto) ProtoMessage()    {}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/google.golang.org/appengine/internal/base/api_base.proto
new file mode 100644
index 0000000..56cd7a3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/base/api_base.proto
@@ -0,0 +1,33 @@
+// Built-in base types for API calls. Primarily useful as return types.
+
+syntax = "proto2";
+option go_package = "base";
+
+package appengine.base;
+
+message StringProto {
+  required string value = 1;
+}
+
+message Integer32Proto {
+  required int32 value = 1;
+}
+
+message Integer64Proto {
+  required int64 value = 1;
+}
+
+message BoolProto {
+  required bool value = 1;
+}
+
+message DoubleProto {
+  required double value = 1;
+}
+
+message BytesProto {
+  required bytes value = 1 [ctype=CORD];
+}
+
+message VoidProto {
+}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
new file mode 100644
index 0000000..8613cb7
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
@@ -0,0 +1,2778 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto
+// DO NOT EDIT!
+
+/*
+Package datastore is a generated protocol buffer package.
+
+It is generated from these files:
+	google.golang.org/appengine/internal/datastore/datastore_v3.proto
+
+It has these top-level messages:
+	Action
+	PropertyValue
+	Property
+	Path
+	Reference
+	User
+	EntityProto
+	CompositeProperty
+	Index
+	CompositeIndex
+	IndexPostfix
+	IndexPosition
+	Snapshot
+	InternalHeader
+	Transaction
+	Query
+	CompiledQuery
+	CompiledCursor
+	Cursor
+	Error
+	Cost
+	GetRequest
+	GetResponse
+	PutRequest
+	PutResponse
+	TouchRequest
+	TouchResponse
+	DeleteRequest
+	DeleteResponse
+	NextRequest
+	QueryResult
+	AllocateIdsRequest
+	AllocateIdsResponse
+	CompositeIndices
+	AddActionsRequest
+	AddActionsResponse
+	BeginTransactionRequest
+	CommitResponse
+*/
+package datastore
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type Property_Meaning int32
+
+const (
+	Property_NO_MEANING       Property_Meaning = 0
+	Property_BLOB             Property_Meaning = 14
+	Property_TEXT             Property_Meaning = 15
+	Property_BYTESTRING       Property_Meaning = 16
+	Property_ATOM_CATEGORY    Property_Meaning = 1
+	Property_ATOM_LINK        Property_Meaning = 2
+	Property_ATOM_TITLE       Property_Meaning = 3
+	Property_ATOM_CONTENT     Property_Meaning = 4
+	Property_ATOM_SUMMARY     Property_Meaning = 5
+	Property_ATOM_AUTHOR      Property_Meaning = 6
+	Property_GD_WHEN          Property_Meaning = 7
+	Property_GD_EMAIL         Property_Meaning = 8
+	Property_GEORSS_POINT     Property_Meaning = 9
+	Property_GD_IM            Property_Meaning = 10
+	Property_GD_PHONENUMBER   Property_Meaning = 11
+	Property_GD_POSTALADDRESS Property_Meaning = 12
+	Property_GD_RATING        Property_Meaning = 13
+	Property_BLOBKEY          Property_Meaning = 17
+	Property_ENTITY_PROTO     Property_Meaning = 19
+	Property_INDEX_VALUE      Property_Meaning = 18
+)
+
+var Property_Meaning_name = map[int32]string{
+	0:  "NO_MEANING",
+	14: "BLOB",
+	15: "TEXT",
+	16: "BYTESTRING",
+	1:  "ATOM_CATEGORY",
+	2:  "ATOM_LINK",
+	3:  "ATOM_TITLE",
+	4:  "ATOM_CONTENT",
+	5:  "ATOM_SUMMARY",
+	6:  "ATOM_AUTHOR",
+	7:  "GD_WHEN",
+	8:  "GD_EMAIL",
+	9:  "GEORSS_POINT",
+	10: "GD_IM",
+	11: "GD_PHONENUMBER",
+	12: "GD_POSTALADDRESS",
+	13: "GD_RATING",
+	17: "BLOBKEY",
+	19: "ENTITY_PROTO",
+	18: "INDEX_VALUE",
+}
+var Property_Meaning_value = map[string]int32{
+	"NO_MEANING":       0,
+	"BLOB":             14,
+	"TEXT":             15,
+	"BYTESTRING":       16,
+	"ATOM_CATEGORY":    1,
+	"ATOM_LINK":        2,
+	"ATOM_TITLE":       3,
+	"ATOM_CONTENT":     4,
+	"ATOM_SUMMARY":     5,
+	"ATOM_AUTHOR":      6,
+	"GD_WHEN":          7,
+	"GD_EMAIL":         8,
+	"GEORSS_POINT":     9,
+	"GD_IM":            10,
+	"GD_PHONENUMBER":   11,
+	"GD_POSTALADDRESS": 12,
+	"GD_RATING":        13,
+	"BLOBKEY":          17,
+	"ENTITY_PROTO":     19,
+	"INDEX_VALUE":      18,
+}
+
+func (x Property_Meaning) Enum() *Property_Meaning {
+	p := new(Property_Meaning)
+	*p = x
+	return p
+}
+func (x Property_Meaning) String() string {
+	return proto.EnumName(Property_Meaning_name, int32(x))
+}
+func (x *Property_Meaning) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning")
+	if err != nil {
+		return err
+	}
+	*x = Property_Meaning(value)
+	return nil
+}
+
+type Property_FtsTokenizationOption int32
+
+const (
+	Property_HTML Property_FtsTokenizationOption = 1
+	Property_ATOM Property_FtsTokenizationOption = 2
+)
+
+var Property_FtsTokenizationOption_name = map[int32]string{
+	1: "HTML",
+	2: "ATOM",
+}
+var Property_FtsTokenizationOption_value = map[string]int32{
+	"HTML": 1,
+	"ATOM": 2,
+}
+
+func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption {
+	p := new(Property_FtsTokenizationOption)
+	*p = x
+	return p
+}
+func (x Property_FtsTokenizationOption) String() string {
+	return proto.EnumName(Property_FtsTokenizationOption_name, int32(x))
+}
+func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption")
+	if err != nil {
+		return err
+	}
+	*x = Property_FtsTokenizationOption(value)
+	return nil
+}
+
+type EntityProto_Kind int32
+
+const (
+	EntityProto_GD_CONTACT EntityProto_Kind = 1
+	EntityProto_GD_EVENT   EntityProto_Kind = 2
+	EntityProto_GD_MESSAGE EntityProto_Kind = 3
+)
+
+var EntityProto_Kind_name = map[int32]string{
+	1: "GD_CONTACT",
+	2: "GD_EVENT",
+	3: "GD_MESSAGE",
+}
+var EntityProto_Kind_value = map[string]int32{
+	"GD_CONTACT": 1,
+	"GD_EVENT":   2,
+	"GD_MESSAGE": 3,
+}
+
+func (x EntityProto_Kind) Enum() *EntityProto_Kind {
+	p := new(EntityProto_Kind)
+	*p = x
+	return p
+}
+func (x EntityProto_Kind) String() string {
+	return proto.EnumName(EntityProto_Kind_name, int32(x))
+}
+func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind")
+	if err != nil {
+		return err
+	}
+	*x = EntityProto_Kind(value)
+	return nil
+}
+
+type Index_Property_Direction int32
+
+const (
+	Index_Property_ASCENDING  Index_Property_Direction = 1
+	Index_Property_DESCENDING Index_Property_Direction = 2
+)
+
+var Index_Property_Direction_name = map[int32]string{
+	1: "ASCENDING",
+	2: "DESCENDING",
+}
+var Index_Property_Direction_value = map[string]int32{
+	"ASCENDING":  1,
+	"DESCENDING": 2,
+}
+
+func (x Index_Property_Direction) Enum() *Index_Property_Direction {
+	p := new(Index_Property_Direction)
+	*p = x
+	return p
+}
+func (x Index_Property_Direction) String() string {
+	return proto.EnumName(Index_Property_Direction_name, int32(x))
+}
+func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction")
+	if err != nil {
+		return err
+	}
+	*x = Index_Property_Direction(value)
+	return nil
+}
+
+type CompositeIndex_State int32
+
+const (
+	CompositeIndex_WRITE_ONLY CompositeIndex_State = 1
+	CompositeIndex_READ_WRITE CompositeIndex_State = 2
+	CompositeIndex_DELETED    CompositeIndex_State = 3
+	CompositeIndex_ERROR      CompositeIndex_State = 4
+)
+
+var CompositeIndex_State_name = map[int32]string{
+	1: "WRITE_ONLY",
+	2: "READ_WRITE",
+	3: "DELETED",
+	4: "ERROR",
+}
+var CompositeIndex_State_value = map[string]int32{
+	"WRITE_ONLY": 1,
+	"READ_WRITE": 2,
+	"DELETED":    3,
+	"ERROR":      4,
+}
+
+func (x CompositeIndex_State) Enum() *CompositeIndex_State {
+	p := new(CompositeIndex_State)
+	*p = x
+	return p
+}
+func (x CompositeIndex_State) String() string {
+	return proto.EnumName(CompositeIndex_State_name, int32(x))
+}
+func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State")
+	if err != nil {
+		return err
+	}
+	*x = CompositeIndex_State(value)
+	return nil
+}
+
+type Snapshot_Status int32
+
+const (
+	Snapshot_INACTIVE Snapshot_Status = 0
+	Snapshot_ACTIVE   Snapshot_Status = 1
+)
+
+var Snapshot_Status_name = map[int32]string{
+	0: "INACTIVE",
+	1: "ACTIVE",
+}
+var Snapshot_Status_value = map[string]int32{
+	"INACTIVE": 0,
+	"ACTIVE":   1,
+}
+
+func (x Snapshot_Status) Enum() *Snapshot_Status {
+	p := new(Snapshot_Status)
+	*p = x
+	return p
+}
+func (x Snapshot_Status) String() string {
+	return proto.EnumName(Snapshot_Status_name, int32(x))
+}
+func (x *Snapshot_Status) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status")
+	if err != nil {
+		return err
+	}
+	*x = Snapshot_Status(value)
+	return nil
+}
+
+type Query_Hint int32
+
+const (
+	Query_ORDER_FIRST    Query_Hint = 1
+	Query_ANCESTOR_FIRST Query_Hint = 2
+	Query_FILTER_FIRST   Query_Hint = 3
+)
+
+var Query_Hint_name = map[int32]string{
+	1: "ORDER_FIRST",
+	2: "ANCESTOR_FIRST",
+	3: "FILTER_FIRST",
+}
+var Query_Hint_value = map[string]int32{
+	"ORDER_FIRST":    1,
+	"ANCESTOR_FIRST": 2,
+	"FILTER_FIRST":   3,
+}
+
+func (x Query_Hint) Enum() *Query_Hint {
+	p := new(Query_Hint)
+	*p = x
+	return p
+}
+func (x Query_Hint) String() string {
+	return proto.EnumName(Query_Hint_name, int32(x))
+}
+func (x *Query_Hint) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint")
+	if err != nil {
+		return err
+	}
+	*x = Query_Hint(value)
+	return nil
+}
+
+type Query_Filter_Operator int32
+
+const (
+	Query_Filter_LESS_THAN             Query_Filter_Operator = 1
+	Query_Filter_LESS_THAN_OR_EQUAL    Query_Filter_Operator = 2
+	Query_Filter_GREATER_THAN          Query_Filter_Operator = 3
+	Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4
+	Query_Filter_EQUAL                 Query_Filter_Operator = 5
+	Query_Filter_IN                    Query_Filter_Operator = 6
+	Query_Filter_EXISTS                Query_Filter_Operator = 7
+)
+
+var Query_Filter_Operator_name = map[int32]string{
+	1: "LESS_THAN",
+	2: "LESS_THAN_OR_EQUAL",
+	3: "GREATER_THAN",
+	4: "GREATER_THAN_OR_EQUAL",
+	5: "EQUAL",
+	6: "IN",
+	7: "EXISTS",
+}
+var Query_Filter_Operator_value = map[string]int32{
+	"LESS_THAN":             1,
+	"LESS_THAN_OR_EQUAL":    2,
+	"GREATER_THAN":          3,
+	"GREATER_THAN_OR_EQUAL": 4,
+	"EQUAL":                 5,
+	"IN":                    6,
+	"EXISTS":                7,
+}
+
+func (x Query_Filter_Operator) Enum() *Query_Filter_Operator {
+	p := new(Query_Filter_Operator)
+	*p = x
+	return p
+}
+func (x Query_Filter_Operator) String() string {
+	return proto.EnumName(Query_Filter_Operator_name, int32(x))
+}
+func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator")
+	if err != nil {
+		return err
+	}
+	*x = Query_Filter_Operator(value)
+	return nil
+}
+
+type Query_Order_Direction int32
+
+const (
+	Query_Order_ASCENDING  Query_Order_Direction = 1
+	Query_Order_DESCENDING Query_Order_Direction = 2
+)
+
+var Query_Order_Direction_name = map[int32]string{
+	1: "ASCENDING",
+	2: "DESCENDING",
+}
+var Query_Order_Direction_value = map[string]int32{
+	"ASCENDING":  1,
+	"DESCENDING": 2,
+}
+
+func (x Query_Order_Direction) Enum() *Query_Order_Direction {
+	p := new(Query_Order_Direction)
+	*p = x
+	return p
+}
+func (x Query_Order_Direction) String() string {
+	return proto.EnumName(Query_Order_Direction_name, int32(x))
+}
+func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction")
+	if err != nil {
+		return err
+	}
+	*x = Query_Order_Direction(value)
+	return nil
+}
+
+type Error_ErrorCode int32
+
+const (
+	Error_BAD_REQUEST                  Error_ErrorCode = 1
+	Error_CONCURRENT_TRANSACTION       Error_ErrorCode = 2
+	Error_INTERNAL_ERROR               Error_ErrorCode = 3
+	Error_NEED_INDEX                   Error_ErrorCode = 4
+	Error_TIMEOUT                      Error_ErrorCode = 5
+	Error_PERMISSION_DENIED            Error_ErrorCode = 6
+	Error_BIGTABLE_ERROR               Error_ErrorCode = 7
+	Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8
+	Error_CAPABILITY_DISABLED          Error_ErrorCode = 9
+	Error_TRY_ALTERNATE_BACKEND        Error_ErrorCode = 10
+	Error_SAFE_TIME_TOO_OLD            Error_ErrorCode = 11
+)
+
+var Error_ErrorCode_name = map[int32]string{
+	1:  "BAD_REQUEST",
+	2:  "CONCURRENT_TRANSACTION",
+	3:  "INTERNAL_ERROR",
+	4:  "NEED_INDEX",
+	5:  "TIMEOUT",
+	6:  "PERMISSION_DENIED",
+	7:  "BIGTABLE_ERROR",
+	8:  "COMMITTED_BUT_STILL_APPLYING",
+	9:  "CAPABILITY_DISABLED",
+	10: "TRY_ALTERNATE_BACKEND",
+	11: "SAFE_TIME_TOO_OLD",
+}
+var Error_ErrorCode_value = map[string]int32{
+	"BAD_REQUEST":                  1,
+	"CONCURRENT_TRANSACTION":       2,
+	"INTERNAL_ERROR":               3,
+	"NEED_INDEX":                   4,
+	"TIMEOUT":                      5,
+	"PERMISSION_DENIED":            6,
+	"BIGTABLE_ERROR":               7,
+	"COMMITTED_BUT_STILL_APPLYING": 8,
+	"CAPABILITY_DISABLED":          9,
+	"TRY_ALTERNATE_BACKEND":        10,
+	"SAFE_TIME_TOO_OLD":            11,
+}
+
+func (x Error_ErrorCode) Enum() *Error_ErrorCode {
+	p := new(Error_ErrorCode)
+	*p = x
+	return p
+}
+func (x Error_ErrorCode) String() string {
+	return proto.EnumName(Error_ErrorCode_name, int32(x))
+}
+func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode")
+	if err != nil {
+		return err
+	}
+	*x = Error_ErrorCode(value)
+	return nil
+}
+
+type PutRequest_AutoIdPolicy int32
+
+const (
+	PutRequest_CURRENT    PutRequest_AutoIdPolicy = 0
+	PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1
+)
+
+var PutRequest_AutoIdPolicy_name = map[int32]string{
+	0: "CURRENT",
+	1: "SEQUENTIAL",
+}
+var PutRequest_AutoIdPolicy_value = map[string]int32{
+	"CURRENT":    0,
+	"SEQUENTIAL": 1,
+}
+
+func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy {
+	p := new(PutRequest_AutoIdPolicy)
+	*p = x
+	return p
+}
+func (x PutRequest_AutoIdPolicy) String() string {
+	return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x))
+}
+func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy")
+	if err != nil {
+		return err
+	}
+	*x = PutRequest_AutoIdPolicy(value)
+	return nil
+}
+
+type Action struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Action) Reset()         { *m = Action{} }
+func (m *Action) String() string { return proto.CompactTextString(m) }
+func (*Action) ProtoMessage()    {}
+
+type PropertyValue struct {
+	Int64Value       *int64                        `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"`
+	BooleanValue     *bool                         `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"`
+	StringValue      *string                       `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"`
+	DoubleValue      *float64                      `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"`
+	Pointvalue       *PropertyValue_PointValue     `protobuf:"group,5,opt,name=PointValue" json:"pointvalue,omitempty"`
+	Uservalue        *PropertyValue_UserValue      `protobuf:"group,8,opt,name=UserValue" json:"uservalue,omitempty"`
+	Referencevalue   *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue" json:"referencevalue,omitempty"`
+	XXX_unrecognized []byte                        `json:"-"`
+}
+
+func (m *PropertyValue) Reset()         { *m = PropertyValue{} }
+func (m *PropertyValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue) ProtoMessage()    {}
+
+func (m *PropertyValue) GetInt64Value() int64 {
+	if m != nil && m.Int64Value != nil {
+		return *m.Int64Value
+	}
+	return 0
+}
+
+func (m *PropertyValue) GetBooleanValue() bool {
+	if m != nil && m.BooleanValue != nil {
+		return *m.BooleanValue
+	}
+	return false
+}
+
+func (m *PropertyValue) GetStringValue() string {
+	if m != nil && m.StringValue != nil {
+		return *m.StringValue
+	}
+	return ""
+}
+
+func (m *PropertyValue) GetDoubleValue() float64 {
+	if m != nil && m.DoubleValue != nil {
+		return *m.DoubleValue
+	}
+	return 0
+}
+
+func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue {
+	if m != nil {
+		return m.Pointvalue
+	}
+	return nil
+}
+
+func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue {
+	if m != nil {
+		return m.Uservalue
+	}
+	return nil
+}
+
+func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue {
+	if m != nil {
+		return m.Referencevalue
+	}
+	return nil
+}
+
+type PropertyValue_PointValue struct {
+	X                *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"`
+	Y                *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"`
+	XXX_unrecognized []byte   `json:"-"`
+}
+
+func (m *PropertyValue_PointValue) Reset()         { *m = PropertyValue_PointValue{} }
+func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_PointValue) ProtoMessage()    {}
+
+func (m *PropertyValue_PointValue) GetX() float64 {
+	if m != nil && m.X != nil {
+		return *m.X
+	}
+	return 0
+}
+
+func (m *PropertyValue_PointValue) GetY() float64 {
+	if m != nil && m.Y != nil {
+		return *m.Y
+	}
+	return 0
+}
+
+type PropertyValue_UserValue struct {
+	Email             *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"`
+	AuthDomain        *string `protobuf:"bytes,10,req,name=auth_domain" json:"auth_domain,omitempty"`
+	Nickname          *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"`
+	FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity" json:"federated_identity,omitempty"`
+	FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider" json:"federated_provider,omitempty"`
+	XXX_unrecognized  []byte  `json:"-"`
+}
+
+func (m *PropertyValue_UserValue) Reset()         { *m = PropertyValue_UserValue{} }
+func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_UserValue) ProtoMessage()    {}
+
+func (m *PropertyValue_UserValue) GetEmail() string {
+	if m != nil && m.Email != nil {
+		return *m.Email
+	}
+	return ""
+}
+
+func (m *PropertyValue_UserValue) GetAuthDomain() string {
+	if m != nil && m.AuthDomain != nil {
+		return *m.AuthDomain
+	}
+	return ""
+}
+
+func (m *PropertyValue_UserValue) GetNickname() string {
+	if m != nil && m.Nickname != nil {
+		return *m.Nickname
+	}
+	return ""
+}
+
+func (m *PropertyValue_UserValue) GetFederatedIdentity() string {
+	if m != nil && m.FederatedIdentity != nil {
+		return *m.FederatedIdentity
+	}
+	return ""
+}
+
+func (m *PropertyValue_UserValue) GetFederatedProvider() string {
+	if m != nil && m.FederatedProvider != nil {
+		return *m.FederatedProvider
+	}
+	return ""
+}
+
+type PropertyValue_ReferenceValue struct {
+	App              *string                                     `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
+	NameSpace        *string                                     `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"`
+	Pathelement      []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement" json:"pathelement,omitempty"`
+	XXX_unrecognized []byte                                      `json:"-"`
+}
+
+func (m *PropertyValue_ReferenceValue) Reset()         { *m = PropertyValue_ReferenceValue{} }
+func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_ReferenceValue) ProtoMessage()    {}
+
+func (m *PropertyValue_ReferenceValue) GetApp() string {
+	if m != nil && m.App != nil {
+		return *m.App
+	}
+	return ""
+}
+
+func (m *PropertyValue_ReferenceValue) GetNameSpace() string {
+	if m != nil && m.NameSpace != nil {
+		return *m.NameSpace
+	}
+	return ""
+}
+
+func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement {
+	if m != nil {
+		return m.Pathelement
+	}
+	return nil
+}
+
+type PropertyValue_ReferenceValue_PathElement struct {
+	Type             *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"`
+	Id               *int64  `protobuf:"varint,16,opt,name=id" json:"id,omitempty"`
+	Name             *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) Reset() {
+	*m = PropertyValue_ReferenceValue_PathElement{}
+}
+func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage()    {}
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetType() string {
+	if m != nil && m.Type != nil {
+		return *m.Type
+	}
+	return ""
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 {
+	if m != nil && m.Id != nil {
+		return *m.Id
+	}
+	return 0
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+type Property struct {
+	Meaning               *Property_Meaning               `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"`
+	MeaningUri            *string                         `protobuf:"bytes,2,opt,name=meaning_uri" json:"meaning_uri,omitempty"`
+	Name                  *string                         `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
+	Value                 *PropertyValue                  `protobuf:"bytes,5,req,name=value" json:"value,omitempty"`
+	Multiple              *bool                           `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"`
+	Searchable            *bool                           `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"`
+	FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"`
+	Locale                *string                         `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"`
+	XXX_unrecognized      []byte                          `json:"-"`
+}
+
+func (m *Property) Reset()         { *m = Property{} }
+func (m *Property) String() string { return proto.CompactTextString(m) }
+func (*Property) ProtoMessage()    {}
+
+const Default_Property_Meaning Property_Meaning = Property_NO_MEANING
+const Default_Property_Searchable bool = false
+const Default_Property_Locale string = "en"
+
+func (m *Property) GetMeaning() Property_Meaning {
+	if m != nil && m.Meaning != nil {
+		return *m.Meaning
+	}
+	return Default_Property_Meaning
+}
+
+func (m *Property) GetMeaningUri() string {
+	if m != nil && m.MeaningUri != nil {
+		return *m.MeaningUri
+	}
+	return ""
+}
+
+func (m *Property) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *Property) GetValue() *PropertyValue {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func (m *Property) GetMultiple() bool {
+	if m != nil && m.Multiple != nil {
+		return *m.Multiple
+	}
+	return false
+}
+
+func (m *Property) GetSearchable() bool {
+	if m != nil && m.Searchable != nil {
+		return *m.Searchable
+	}
+	return Default_Property_Searchable
+}
+
+func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption {
+	if m != nil && m.FtsTokenizationOption != nil {
+		return *m.FtsTokenizationOption
+	}
+	return Property_HTML
+}
+
+func (m *Property) GetLocale() string {
+	if m != nil && m.Locale != nil {
+		return *m.Locale
+	}
+	return Default_Property_Locale
+}
+
+type Path struct {
+	Element          []*Path_Element `protobuf:"group,1,rep,name=Element" json:"element,omitempty"`
+	XXX_unrecognized []byte          `json:"-"`
+}
+
+func (m *Path) Reset()         { *m = Path{} }
+func (m *Path) String() string { return proto.CompactTextString(m) }
+func (*Path) ProtoMessage()    {}
+
+func (m *Path) GetElement() []*Path_Element {
+	if m != nil {
+		return m.Element
+	}
+	return nil
+}
+
+type Path_Element struct {
+	Type             *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"`
+	Id               *int64  `protobuf:"varint,3,opt,name=id" json:"id,omitempty"`
+	Name             *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *Path_Element) Reset()         { *m = Path_Element{} }
+func (m *Path_Element) String() string { return proto.CompactTextString(m) }
+func (*Path_Element) ProtoMessage()    {}
+
+func (m *Path_Element) GetType() string {
+	if m != nil && m.Type != nil {
+		return *m.Type
+	}
+	return ""
+}
+
+func (m *Path_Element) GetId() int64 {
+	if m != nil && m.Id != nil {
+		return *m.Id
+	}
+	return 0
+}
+
+func (m *Path_Element) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+type Reference struct {
+	App              *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
+	NameSpace        *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"`
+	Path             *Path   `protobuf:"bytes,14,req,name=path" json:"path,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *Reference) Reset()         { *m = Reference{} }
+func (m *Reference) String() string { return proto.CompactTextString(m) }
+func (*Reference) ProtoMessage()    {}
+
+func (m *Reference) GetApp() string {
+	if m != nil && m.App != nil {
+		return *m.App
+	}
+	return ""
+}
+
+func (m *Reference) GetNameSpace() string {
+	if m != nil && m.NameSpace != nil {
+		return *m.NameSpace
+	}
+	return ""
+}
+
+func (m *Reference) GetPath() *Path {
+	if m != nil {
+		return m.Path
+	}
+	return nil
+}
+
+type User struct {
+	Email             *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"`
+	AuthDomain        *string `protobuf:"bytes,2,req,name=auth_domain" json:"auth_domain,omitempty"`
+	Nickname          *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"`
+	FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity" json:"federated_identity,omitempty"`
+	FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider" json:"federated_provider,omitempty"`
+	XXX_unrecognized  []byte  `json:"-"`
+}
+
+func (m *User) Reset()         { *m = User{} }
+func (m *User) String() string { return proto.CompactTextString(m) }
+func (*User) ProtoMessage()    {}
+
+func (m *User) GetEmail() string {
+	if m != nil && m.Email != nil {
+		return *m.Email
+	}
+	return ""
+}
+
+func (m *User) GetAuthDomain() string {
+	if m != nil && m.AuthDomain != nil {
+		return *m.AuthDomain
+	}
+	return ""
+}
+
+func (m *User) GetNickname() string {
+	if m != nil && m.Nickname != nil {
+		return *m.Nickname
+	}
+	return ""
+}
+
+func (m *User) GetFederatedIdentity() string {
+	if m != nil && m.FederatedIdentity != nil {
+		return *m.FederatedIdentity
+	}
+	return ""
+}
+
+func (m *User) GetFederatedProvider() string {
+	if m != nil && m.FederatedProvider != nil {
+		return *m.FederatedProvider
+	}
+	return ""
+}
+
+type EntityProto struct {
+	Key              *Reference        `protobuf:"bytes,13,req,name=key" json:"key,omitempty"`
+	EntityGroup      *Path             `protobuf:"bytes,16,req,name=entity_group" json:"entity_group,omitempty"`
+	Owner            *User             `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"`
+	Kind             *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"`
+	KindUri          *string           `protobuf:"bytes,5,opt,name=kind_uri" json:"kind_uri,omitempty"`
+	Property         []*Property       `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
+	RawProperty      []*Property       `protobuf:"bytes,15,rep,name=raw_property" json:"raw_property,omitempty"`
+	Rank             *int32            `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"`
+	XXX_unrecognized []byte            `json:"-"`
+}
+
+func (m *EntityProto) Reset()         { *m = EntityProto{} }
+func (m *EntityProto) String() string { return proto.CompactTextString(m) }
+func (*EntityProto) ProtoMessage()    {}
+
+func (m *EntityProto) GetKey() *Reference {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *EntityProto) GetEntityGroup() *Path {
+	if m != nil {
+		return m.EntityGroup
+	}
+	return nil
+}
+
+func (m *EntityProto) GetOwner() *User {
+	if m != nil {
+		return m.Owner
+	}
+	return nil
+}
+
+func (m *EntityProto) GetKind() EntityProto_Kind {
+	if m != nil && m.Kind != nil {
+		return *m.Kind
+	}
+	return EntityProto_GD_CONTACT
+}
+
+func (m *EntityProto) GetKindUri() string {
+	if m != nil && m.KindUri != nil {
+		return *m.KindUri
+	}
+	return ""
+}
+
+func (m *EntityProto) GetProperty() []*Property {
+	if m != nil {
+		return m.Property
+	}
+	return nil
+}
+
+func (m *EntityProto) GetRawProperty() []*Property {
+	if m != nil {
+		return m.RawProperty
+	}
+	return nil
+}
+
+func (m *EntityProto) GetRank() int32 {
+	if m != nil && m.Rank != nil {
+		return *m.Rank
+	}
+	return 0
+}
+
+type CompositeProperty struct {
+	IndexId          *int64   `protobuf:"varint,1,req,name=index_id" json:"index_id,omitempty"`
+	Value            []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
+	XXX_unrecognized []byte   `json:"-"`
+}
+
+func (m *CompositeProperty) Reset()         { *m = CompositeProperty{} }
+func (m *CompositeProperty) String() string { return proto.CompactTextString(m) }
+func (*CompositeProperty) ProtoMessage()    {}
+
+func (m *CompositeProperty) GetIndexId() int64 {
+	if m != nil && m.IndexId != nil {
+		return *m.IndexId
+	}
+	return 0
+}
+
+func (m *CompositeProperty) GetValue() []string {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+type Index struct {
+	EntityType       *string           `protobuf:"bytes,1,req,name=entity_type" json:"entity_type,omitempty"`
+	Ancestor         *bool             `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"`
+	Property         []*Index_Property `protobuf:"group,2,rep,name=Property" json:"property,omitempty"`
+	XXX_unrecognized []byte            `json:"-"`
+}
+
+func (m *Index) Reset()         { *m = Index{} }
+func (m *Index) String() string { return proto.CompactTextString(m) }
+func (*Index) ProtoMessage()    {}
+
+func (m *Index) GetEntityType() string {
+	if m != nil && m.EntityType != nil {
+		return *m.EntityType
+	}
+	return ""
+}
+
+func (m *Index) GetAncestor() bool {
+	if m != nil && m.Ancestor != nil {
+		return *m.Ancestor
+	}
+	return false
+}
+
+func (m *Index) GetProperty() []*Index_Property {
+	if m != nil {
+		return m.Property
+	}
+	return nil
+}
+
+type Index_Property struct {
+	Name             *string                   `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
+	Direction        *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"`
+	XXX_unrecognized []byte                    `json:"-"`
+}
+
+func (m *Index_Property) Reset()         { *m = Index_Property{} }
+func (m *Index_Property) String() string { return proto.CompactTextString(m) }
+func (*Index_Property) ProtoMessage()    {}
+
+const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING
+
+func (m *Index_Property) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *Index_Property) GetDirection() Index_Property_Direction {
+	if m != nil && m.Direction != nil {
+		return *m.Direction
+	}
+	return Default_Index_Property_Direction
+}
+
+type CompositeIndex struct {
+	AppId             *string               `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+	Id                *int64                `protobuf:"varint,2,req,name=id" json:"id,omitempty"`
+	Definition        *Index                `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"`
+	State             *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"`
+	OnlyUseIfRequired *bool                 `protobuf:"varint,6,opt,name=only_use_if_required,def=0" json:"only_use_if_required,omitempty"`
+	XXX_unrecognized  []byte                `json:"-"`
+}
+
+func (m *CompositeIndex) Reset()         { *m = CompositeIndex{} }
+func (m *CompositeIndex) String() string { return proto.CompactTextString(m) }
+func (*CompositeIndex) ProtoMessage()    {}
+
+const Default_CompositeIndex_OnlyUseIfRequired bool = false
+
+func (m *CompositeIndex) GetAppId() string {
+	if m != nil && m.AppId != nil {
+		return *m.AppId
+	}
+	return ""
+}
+
+func (m *CompositeIndex) GetId() int64 {
+	if m != nil && m.Id != nil {
+		return *m.Id
+	}
+	return 0
+}
+
+func (m *CompositeIndex) GetDefinition() *Index {
+	if m != nil {
+		return m.Definition
+	}
+	return nil
+}
+
+func (m *CompositeIndex) GetState() CompositeIndex_State {
+	if m != nil && m.State != nil {
+		return *m.State
+	}
+	return CompositeIndex_WRITE_ONLY
+}
+
+func (m *CompositeIndex) GetOnlyUseIfRequired() bool {
+	if m != nil && m.OnlyUseIfRequired != nil {
+		return *m.OnlyUseIfRequired
+	}
+	return Default_CompositeIndex_OnlyUseIfRequired
+}
+
+type IndexPostfix struct {
+	IndexValue       []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value" json:"index_value,omitempty"`
+	Key              *Reference                 `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"`
+	Before           *bool                      `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"`
+	XXX_unrecognized []byte                     `json:"-"`
+}
+
+func (m *IndexPostfix) Reset()         { *m = IndexPostfix{} }
+func (m *IndexPostfix) String() string { return proto.CompactTextString(m) }
+func (*IndexPostfix) ProtoMessage()    {}
+
+const Default_IndexPostfix_Before bool = true
+
+func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue {
+	if m != nil {
+		return m.IndexValue
+	}
+	return nil
+}
+
+func (m *IndexPostfix) GetKey() *Reference {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *IndexPostfix) GetBefore() bool {
+	if m != nil && m.Before != nil {
+		return *m.Before
+	}
+	return Default_IndexPostfix_Before
+}
+
+type IndexPostfix_IndexValue struct {
+	PropertyName     *string        `protobuf:"bytes,1,req,name=property_name" json:"property_name,omitempty"`
+	Value            *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+	XXX_unrecognized []byte         `json:"-"`
+}
+
+func (m *IndexPostfix_IndexValue) Reset()         { *m = IndexPostfix_IndexValue{} }
+func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) }
+func (*IndexPostfix_IndexValue) ProtoMessage()    {}
+
+func (m *IndexPostfix_IndexValue) GetPropertyName() string {
+	if m != nil && m.PropertyName != nil {
+		return *m.PropertyName
+	}
+	return ""
+}
+
+func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+type IndexPosition struct {
+	Key              *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"`
+	Before           *bool   `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *IndexPosition) Reset()         { *m = IndexPosition{} }
+func (m *IndexPosition) String() string { return proto.CompactTextString(m) }
+func (*IndexPosition) ProtoMessage()    {}
+
+const Default_IndexPosition_Before bool = true
+
+func (m *IndexPosition) GetKey() string {
+	if m != nil && m.Key != nil {
+		return *m.Key
+	}
+	return ""
+}
+
+func (m *IndexPosition) GetBefore() bool {
+	if m != nil && m.Before != nil {
+		return *m.Before
+	}
+	return Default_IndexPosition_Before
+}
+
+type Snapshot struct {
+	Ts               *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Snapshot) Reset()         { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage()    {}
+
+func (m *Snapshot) GetTs() int64 {
+	if m != nil && m.Ts != nil {
+		return *m.Ts
+	}
+	return 0
+}
+
+type InternalHeader struct {
+	Qos              *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *InternalHeader) Reset()         { *m = InternalHeader{} }
+func (m *InternalHeader) String() string { return proto.CompactTextString(m) }
+func (*InternalHeader) ProtoMessage()    {}
+
+func (m *InternalHeader) GetQos() string {
+	if m != nil && m.Qos != nil {
+		return *m.Qos
+	}
+	return ""
+}
+
+type Transaction struct {
+	Header           *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
+	Handle           *uint64         `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"`
+	App              *string         `protobuf:"bytes,2,req,name=app" json:"app,omitempty"`
+	MarkChanges      *bool           `protobuf:"varint,3,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"`
+	XXX_unrecognized []byte          `json:"-"`
+}
+
+func (m *Transaction) Reset()         { *m = Transaction{} }
+func (m *Transaction) String() string { return proto.CompactTextString(m) }
+func (*Transaction) ProtoMessage()    {}
+
+const Default_Transaction_MarkChanges bool = false
+
+func (m *Transaction) GetHeader() *InternalHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *Transaction) GetHandle() uint64 {
+	if m != nil && m.Handle != nil {
+		return *m.Handle
+	}
+	return 0
+}
+
+func (m *Transaction) GetApp() string {
+	if m != nil && m.App != nil {
+		return *m.App
+	}
+	return ""
+}
+
+func (m *Transaction) GetMarkChanges() bool {
+	if m != nil && m.MarkChanges != nil {
+		return *m.MarkChanges
+	}
+	return Default_Transaction_MarkChanges
+}
+
+type Query struct {
+	Header              *InternalHeader   `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"`
+	App                 *string           `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
+	NameSpace           *string           `protobuf:"bytes,29,opt,name=name_space" json:"name_space,omitempty"`
+	Kind                *string           `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"`
+	Ancestor            *Reference        `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"`
+	Filter              []*Query_Filter   `protobuf:"group,4,rep,name=Filter" json:"filter,omitempty"`
+	SearchQuery         *string           `protobuf:"bytes,8,opt,name=search_query" json:"search_query,omitempty"`
+	Order               []*Query_Order    `protobuf:"group,9,rep,name=Order" json:"order,omitempty"`
+	Hint                *Query_Hint       `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"`
+	Count               *int32            `protobuf:"varint,23,opt,name=count" json:"count,omitempty"`
+	Offset              *int32            `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"`
+	Limit               *int32            `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"`
+	CompiledCursor      *CompiledCursor   `protobuf:"bytes,30,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"`
+	EndCompiledCursor   *CompiledCursor   `protobuf:"bytes,31,opt,name=end_compiled_cursor" json:"end_compiled_cursor,omitempty"`
+	CompositeIndex      []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index" json:"composite_index,omitempty"`
+	RequirePerfectPlan  *bool             `protobuf:"varint,20,opt,name=require_perfect_plan,def=0" json:"require_perfect_plan,omitempty"`
+	KeysOnly            *bool             `protobuf:"varint,21,opt,name=keys_only,def=0" json:"keys_only,omitempty"`
+	Transaction         *Transaction      `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"`
+	Compile             *bool             `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"`
+	FailoverMs          *int64            `protobuf:"varint,26,opt,name=failover_ms" json:"failover_ms,omitempty"`
+	Strong              *bool             `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"`
+	PropertyName        []string          `protobuf:"bytes,33,rep,name=property_name" json:"property_name,omitempty"`
+	GroupByPropertyName []string          `protobuf:"bytes,34,rep,name=group_by_property_name" json:"group_by_property_name,omitempty"`
+	Distinct            *bool             `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"`
+	MinSafeTimeSeconds  *int64            `protobuf:"varint,35,opt,name=min_safe_time_seconds" json:"min_safe_time_seconds,omitempty"`
+	SafeReplicaName     []string          `protobuf:"bytes,36,rep,name=safe_replica_name" json:"safe_replica_name,omitempty"`
+	PersistOffset       *bool             `protobuf:"varint,37,opt,name=persist_offset,def=0" json:"persist_offset,omitempty"`
+	XXX_unrecognized    []byte            `json:"-"`
+}
+
+func (m *Query) Reset()         { *m = Query{} }
+func (m *Query) String() string { return proto.CompactTextString(m) }
+func (*Query) ProtoMessage()    {}
+
+const Default_Query_Offset int32 = 0
+const Default_Query_RequirePerfectPlan bool = false
+const Default_Query_KeysOnly bool = false
+const Default_Query_Compile bool = false
+const Default_Query_PersistOffset bool = false
+
+func (m *Query) GetHeader() *InternalHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *Query) GetApp() string {
+	if m != nil && m.App != nil {
+		return *m.App
+	}
+	return ""
+}
+
+func (m *Query) GetNameSpace() string {
+	if m != nil && m.NameSpace != nil {
+		return *m.NameSpace
+	}
+	return ""
+}
+
+func (m *Query) GetKind() string {
+	if m != nil && m.Kind != nil {
+		return *m.Kind
+	}
+	return ""
+}
+
+func (m *Query) GetAncestor() *Reference {
+	if m != nil {
+		return m.Ancestor
+	}
+	return nil
+}
+
+func (m *Query) GetFilter() []*Query_Filter {
+	if m != nil {
+		return m.Filter
+	}
+	return nil
+}
+
+func (m *Query) GetSearchQuery() string {
+	if m != nil && m.SearchQuery != nil {
+		return *m.SearchQuery
+	}
+	return ""
+}
+
+func (m *Query) GetOrder() []*Query_Order {
+	if m != nil {
+		return m.Order
+	}
+	return nil
+}
+
+func (m *Query) GetHint() Query_Hint {
+	if m != nil && m.Hint != nil {
+		return *m.Hint
+	}
+	return Query_ORDER_FIRST
+}
+
+func (m *Query) GetCount() int32 {
+	if m != nil && m.Count != nil {
+		return *m.Count
+	}
+	return 0
+}
+
+func (m *Query) GetOffset() int32 {
+	if m != nil && m.Offset != nil {
+		return *m.Offset
+	}
+	return Default_Query_Offset
+}
+
+func (m *Query) GetLimit() int32 {
+	if m != nil && m.Limit != nil {
+		return *m.Limit
+	}
+	return 0
+}
+
+func (m *Query) GetCompiledCursor() *CompiledCursor {
+	if m != nil {
+		return m.CompiledCursor
+	}
+	return nil
+}
+
+func (m *Query) GetEndCompiledCursor() *CompiledCursor {
+	if m != nil {
+		return m.EndCompiledCursor
+	}
+	return nil
+}
+
+func (m *Query) GetCompositeIndex() []*CompositeIndex {
+	if m != nil {
+		return m.CompositeIndex
+	}
+	return nil
+}
+
+func (m *Query) GetRequirePerfectPlan() bool {
+	if m != nil && m.RequirePerfectPlan != nil {
+		return *m.RequirePerfectPlan
+	}
+	return Default_Query_RequirePerfectPlan
+}
+
+func (m *Query) GetKeysOnly() bool {
+	if m != nil && m.KeysOnly != nil {
+		return *m.KeysOnly
+	}
+	return Default_Query_KeysOnly
+}
+
+func (m *Query) GetTransaction() *Transaction {
+	if m != nil {
+		return m.Transaction
+	}
+	return nil
+}
+
+func (m *Query) GetCompile() bool {
+	if m != nil && m.Compile != nil {
+		return *m.Compile
+	}
+	return Default_Query_Compile
+}
+
+func (m *Query) GetFailoverMs() int64 {
+	if m != nil && m.FailoverMs != nil {
+		return *m.FailoverMs
+	}
+	return 0
+}
+
+func (m *Query) GetStrong() bool {
+	if m != nil && m.Strong != nil {
+		return *m.Strong
+	}
+	return false
+}
+
+func (m *Query) GetPropertyName() []string {
+	if m != nil {
+		return m.PropertyName
+	}
+	return nil
+}
+
+func (m *Query) GetGroupByPropertyName() []string {
+	if m != nil {
+		return m.GroupByPropertyName
+	}
+	return nil
+}
+
+func (m *Query) GetDistinct() bool {
+	if m != nil && m.Distinct != nil {
+		return *m.Distinct
+	}
+	return false
+}
+
+func (m *Query) GetMinSafeTimeSeconds() int64 {
+	if m != nil && m.MinSafeTimeSeconds != nil {
+		return *m.MinSafeTimeSeconds
+	}
+	return 0
+}
+
+func (m *Query) GetSafeReplicaName() []string {
+	if m != nil {
+		return m.SafeReplicaName
+	}
+	return nil
+}
+
+func (m *Query) GetPersistOffset() bool {
+	if m != nil && m.PersistOffset != nil {
+		return *m.PersistOffset
+	}
+	return Default_Query_PersistOffset
+}
+
+type Query_Filter struct {
+	Op               *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"`
+	Property         []*Property            `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
+	XXX_unrecognized []byte                 `json:"-"`
+}
+
+func (m *Query_Filter) Reset()         { *m = Query_Filter{} }
+func (m *Query_Filter) String() string { return proto.CompactTextString(m) }
+func (*Query_Filter) ProtoMessage()    {}
+
+func (m *Query_Filter) GetOp() Query_Filter_Operator {
+	if m != nil && m.Op != nil {
+		return *m.Op
+	}
+	return Query_Filter_LESS_THAN
+}
+
+func (m *Query_Filter) GetProperty() []*Property {
+	if m != nil {
+		return m.Property
+	}
+	return nil
+}
+
+type Query_Order struct {
+	Property         *string                `protobuf:"bytes,10,req,name=property" json:"property,omitempty"`
+	Direction        *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"`
+	XXX_unrecognized []byte                 `json:"-"`
+}
+
+func (m *Query_Order) Reset()         { *m = Query_Order{} }
+func (m *Query_Order) String() string { return proto.CompactTextString(m) }
+func (*Query_Order) ProtoMessage()    {}
+
+const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING
+
+func (m *Query_Order) GetProperty() string {
+	if m != nil && m.Property != nil {
+		return *m.Property
+	}
+	return ""
+}
+
+func (m *Query_Order) GetDirection() Query_Order_Direction {
+	if m != nil && m.Direction != nil {
+		return *m.Direction
+	}
+	return Default_Query_Order_Direction
+}
+
+type CompiledQuery struct {
+	Primaryscan       *CompiledQuery_PrimaryScan     `protobuf:"group,1,req,name=PrimaryScan" json:"primaryscan,omitempty"`
+	Mergejoinscan     []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan" json:"mergejoinscan,omitempty"`
+	IndexDef          *Index                         `protobuf:"bytes,21,opt,name=index_def" json:"index_def,omitempty"`
+	Offset            *int32                         `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"`
+	Limit             *int32                         `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"`
+	KeysOnly          *bool                          `protobuf:"varint,12,req,name=keys_only" json:"keys_only,omitempty"`
+	PropertyName      []string                       `protobuf:"bytes,24,rep,name=property_name" json:"property_name,omitempty"`
+	DistinctInfixSize *int32                         `protobuf:"varint,25,opt,name=distinct_infix_size" json:"distinct_infix_size,omitempty"`
+	Entityfilter      *CompiledQuery_EntityFilter    `protobuf:"group,13,opt,name=EntityFilter" json:"entityfilter,omitempty"`
+	XXX_unrecognized  []byte                         `json:"-"`
+}
+
+func (m *CompiledQuery) Reset()         { *m = CompiledQuery{} }
+func (m *CompiledQuery) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery) ProtoMessage()    {}
+
+const Default_CompiledQuery_Offset int32 = 0
+
+func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan {
+	if m != nil {
+		return m.Primaryscan
+	}
+	return nil
+}
+
+func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan {
+	if m != nil {
+		return m.Mergejoinscan
+	}
+	return nil
+}
+
+func (m *CompiledQuery) GetIndexDef() *Index {
+	if m != nil {
+		return m.IndexDef
+	}
+	return nil
+}
+
+func (m *CompiledQuery) GetOffset() int32 {
+	if m != nil && m.Offset != nil {
+		return *m.Offset
+	}
+	return Default_CompiledQuery_Offset
+}
+
+func (m *CompiledQuery) GetLimit() int32 {
+	if m != nil && m.Limit != nil {
+		return *m.Limit
+	}
+	return 0
+}
+
+func (m *CompiledQuery) GetKeysOnly() bool {
+	if m != nil && m.KeysOnly != nil {
+		return *m.KeysOnly
+	}
+	return false
+}
+
+func (m *CompiledQuery) GetPropertyName() []string {
+	if m != nil {
+		return m.PropertyName
+	}
+	return nil
+}
+
+func (m *CompiledQuery) GetDistinctInfixSize() int32 {
+	if m != nil && m.DistinctInfixSize != nil {
+		return *m.DistinctInfixSize
+	}
+	return 0
+}
+
+func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter {
+	if m != nil {
+		return m.Entityfilter
+	}
+	return nil
+}
+
+type CompiledQuery_PrimaryScan struct {
+	IndexName                  *string  `protobuf:"bytes,2,opt,name=index_name" json:"index_name,omitempty"`
+	StartKey                   *string  `protobuf:"bytes,3,opt,name=start_key" json:"start_key,omitempty"`
+	StartInclusive             *bool    `protobuf:"varint,4,opt,name=start_inclusive" json:"start_inclusive,omitempty"`
+	EndKey                     *string  `protobuf:"bytes,5,opt,name=end_key" json:"end_key,omitempty"`
+	EndInclusive               *bool    `protobuf:"varint,6,opt,name=end_inclusive" json:"end_inclusive,omitempty"`
+	StartPostfixValue          []string `protobuf:"bytes,22,rep,name=start_postfix_value" json:"start_postfix_value,omitempty"`
+	EndPostfixValue            []string `protobuf:"bytes,23,rep,name=end_postfix_value" json:"end_postfix_value,omitempty"`
+	EndUnappliedLogTimestampUs *int64   `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us" json:"end_unapplied_log_timestamp_us,omitempty"`
+	XXX_unrecognized           []byte   `json:"-"`
+}
+
+func (m *CompiledQuery_PrimaryScan) Reset()         { *m = CompiledQuery_PrimaryScan{} }
+func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_PrimaryScan) ProtoMessage()    {}
+
+func (m *CompiledQuery_PrimaryScan) GetIndexName() string {
+	if m != nil && m.IndexName != nil {
+		return *m.IndexName
+	}
+	return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartKey() string {
+	if m != nil && m.StartKey != nil {
+		return *m.StartKey
+	}
+	return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool {
+	if m != nil && m.StartInclusive != nil {
+		return *m.StartInclusive
+	}
+	return false
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndKey() string {
+	if m != nil && m.EndKey != nil {
+		return *m.EndKey
+	}
+	return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool {
+	if m != nil && m.EndInclusive != nil {
+		return *m.EndInclusive
+	}
+	return false
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string {
+	if m != nil {
+		return m.StartPostfixValue
+	}
+	return nil
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string {
+	if m != nil {
+		return m.EndPostfixValue
+	}
+	return nil
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 {
+	if m != nil && m.EndUnappliedLogTimestampUs != nil {
+		return *m.EndUnappliedLogTimestampUs
+	}
+	return 0
+}
+
+type CompiledQuery_MergeJoinScan struct {
+	IndexName        *string  `protobuf:"bytes,8,req,name=index_name" json:"index_name,omitempty"`
+	PrefixValue      []string `protobuf:"bytes,9,rep,name=prefix_value" json:"prefix_value,omitempty"`
+	ValuePrefix      *bool    `protobuf:"varint,20,opt,name=value_prefix,def=0" json:"value_prefix,omitempty"`
+	XXX_unrecognized []byte   `json:"-"`
+}
+
+func (m *CompiledQuery_MergeJoinScan) Reset()         { *m = CompiledQuery_MergeJoinScan{} }
+func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_MergeJoinScan) ProtoMessage()    {}
+
+const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false
+
+func (m *CompiledQuery_MergeJoinScan) GetIndexName() string {
+	if m != nil && m.IndexName != nil {
+		return *m.IndexName
+	}
+	return ""
+}
+
+func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string {
+	if m != nil {
+		return m.PrefixValue
+	}
+	return nil
+}
+
+func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool {
+	if m != nil && m.ValuePrefix != nil {
+		return *m.ValuePrefix
+	}
+	return Default_CompiledQuery_MergeJoinScan_ValuePrefix
+}
+
+type CompiledQuery_EntityFilter struct {
+	Distinct         *bool      `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"`
+	Kind             *string    `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"`
+	Ancestor         *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"`
+	XXX_unrecognized []byte     `json:"-"`
+}
+
+func (m *CompiledQuery_EntityFilter) Reset()         { *m = CompiledQuery_EntityFilter{} }
+func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_EntityFilter) ProtoMessage()    {}
+
+const Default_CompiledQuery_EntityFilter_Distinct bool = false
+
+func (m *CompiledQuery_EntityFilter) GetDistinct() bool {
+	if m != nil && m.Distinct != nil {
+		return *m.Distinct
+	}
+	return Default_CompiledQuery_EntityFilter_Distinct
+}
+
+func (m *CompiledQuery_EntityFilter) GetKind() string {
+	if m != nil && m.Kind != nil {
+		return *m.Kind
+	}
+	return ""
+}
+
+func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference {
+	if m != nil {
+		return m.Ancestor
+	}
+	return nil
+}
+
+type CompiledCursor struct {
+	Position         *CompiledCursor_Position `protobuf:"group,2,opt,name=Position" json:"position,omitempty"`
+	XXX_unrecognized []byte                   `json:"-"`
+}
+
+func (m *CompiledCursor) Reset()         { *m = CompiledCursor{} }
+func (m *CompiledCursor) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor) ProtoMessage()    {}
+
+func (m *CompiledCursor) GetPosition() *CompiledCursor_Position {
+	if m != nil {
+		return m.Position
+	}
+	return nil
+}
+
+type CompiledCursor_Position struct {
+	StartKey         *string                               `protobuf:"bytes,27,opt,name=start_key" json:"start_key,omitempty"`
+	Indexvalue       []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue" json:"indexvalue,omitempty"`
+	Key              *Reference                            `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"`
+	StartInclusive   *bool                                 `protobuf:"varint,28,opt,name=start_inclusive,def=1" json:"start_inclusive,omitempty"`
+	XXX_unrecognized []byte                                `json:"-"`
+}
+
+func (m *CompiledCursor_Position) Reset()         { *m = CompiledCursor_Position{} }
+func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor_Position) ProtoMessage()    {}
+
+const Default_CompiledCursor_Position_StartInclusive bool = true
+
+func (m *CompiledCursor_Position) GetStartKey() string {
+	if m != nil && m.StartKey != nil {
+		return *m.StartKey
+	}
+	return ""
+}
+
+func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue {
+	if m != nil {
+		return m.Indexvalue
+	}
+	return nil
+}
+
+func (m *CompiledCursor_Position) GetKey() *Reference {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *CompiledCursor_Position) GetStartInclusive() bool {
+	if m != nil && m.StartInclusive != nil {
+		return *m.StartInclusive
+	}
+	return Default_CompiledCursor_Position_StartInclusive
+}
+
+type CompiledCursor_Position_IndexValue struct {
+	Property         *string        `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"`
+	Value            *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"`
+	XXX_unrecognized []byte         `json:"-"`
+}
+
+func (m *CompiledCursor_Position_IndexValue) Reset()         { *m = CompiledCursor_Position_IndexValue{} }
+func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor_Position_IndexValue) ProtoMessage()    {}
+
+func (m *CompiledCursor_Position_IndexValue) GetProperty() string {
+	if m != nil && m.Property != nil {
+		return *m.Property
+	}
+	return ""
+}
+
+func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+type Cursor struct {
+	Cursor           *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"`
+	App              *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *Cursor) Reset()         { *m = Cursor{} }
+func (m *Cursor) String() string { return proto.CompactTextString(m) }
+func (*Cursor) ProtoMessage()    {}
+
+func (m *Cursor) GetCursor() uint64 {
+	if m != nil && m.Cursor != nil {
+		return *m.Cursor
+	}
+	return 0
+}
+
+func (m *Cursor) GetApp() string {
+	if m != nil && m.App != nil {
+		return *m.App
+	}
+	return ""
+}
+
+type Error struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Error) Reset()         { *m = Error{} }
+func (m *Error) String() string { return proto.CompactTextString(m) }
+func (*Error) ProtoMessage()    {}
+
+type Cost struct {
+	IndexWrites             *int32           `protobuf:"varint,1,opt,name=index_writes" json:"index_writes,omitempty"`
+	IndexWriteBytes         *int32           `protobuf:"varint,2,opt,name=index_write_bytes" json:"index_write_bytes,omitempty"`
+	EntityWrites            *int32           `protobuf:"varint,3,opt,name=entity_writes" json:"entity_writes,omitempty"`
+	EntityWriteBytes        *int32           `protobuf:"varint,4,opt,name=entity_write_bytes" json:"entity_write_bytes,omitempty"`
+	Commitcost              *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost" json:"commitcost,omitempty"`
+	ApproximateStorageDelta *int32           `protobuf:"varint,8,opt,name=approximate_storage_delta" json:"approximate_storage_delta,omitempty"`
+	IdSequenceUpdates       *int32           `protobuf:"varint,9,opt,name=id_sequence_updates" json:"id_sequence_updates,omitempty"`
+	XXX_unrecognized        []byte           `json:"-"`
+}
+
+func (m *Cost) Reset()         { *m = Cost{} }
+func (m *Cost) String() string { return proto.CompactTextString(m) }
+func (*Cost) ProtoMessage()    {}
+
+func (m *Cost) GetIndexWrites() int32 {
+	if m != nil && m.IndexWrites != nil {
+		return *m.IndexWrites
+	}
+	return 0
+}
+
+func (m *Cost) GetIndexWriteBytes() int32 {
+	if m != nil && m.IndexWriteBytes != nil {
+		return *m.IndexWriteBytes
+	}
+	return 0
+}
+
+func (m *Cost) GetEntityWrites() int32 {
+	if m != nil && m.EntityWrites != nil {
+		return *m.EntityWrites
+	}
+	return 0
+}
+
+func (m *Cost) GetEntityWriteBytes() int32 {
+	if m != nil && m.EntityWriteBytes != nil {
+		return *m.EntityWriteBytes
+	}
+	return 0
+}
+
+func (m *Cost) GetCommitcost() *Cost_CommitCost {
+	if m != nil {
+		return m.Commitcost
+	}
+	return nil
+}
+
+func (m *Cost) GetApproximateStorageDelta() int32 {
+	if m != nil && m.ApproximateStorageDelta != nil {
+		return *m.ApproximateStorageDelta
+	}
+	return 0
+}
+
+func (m *Cost) GetIdSequenceUpdates() int32 {
+	if m != nil && m.IdSequenceUpdates != nil {
+		return *m.IdSequenceUpdates
+	}
+	return 0
+}
+
+type Cost_CommitCost struct {
+	RequestedEntityPuts    *int32 `protobuf:"varint,6,opt,name=requested_entity_puts" json:"requested_entity_puts,omitempty"`
+	RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes" json:"requested_entity_deletes,omitempty"`
+	XXX_unrecognized       []byte `json:"-"`
+}
+
+func (m *Cost_CommitCost) Reset()         { *m = Cost_CommitCost{} }
+func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) }
+func (*Cost_CommitCost) ProtoMessage()    {}
+
+func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 {
+	if m != nil && m.RequestedEntityPuts != nil {
+		return *m.RequestedEntityPuts
+	}
+	return 0
+}
+
+func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 {
+	if m != nil && m.RequestedEntityDeletes != nil {
+		return *m.RequestedEntityDeletes
+	}
+	return 0
+}
+
+type GetRequest struct {
+	Header           *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"`
+	Key              []*Reference    `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+	Transaction      *Transaction    `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
+	FailoverMs       *int64          `protobuf:"varint,3,opt,name=failover_ms" json:"failover_ms,omitempty"`
+	Strong           *bool           `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"`
+	AllowDeferred    *bool           `protobuf:"varint,5,opt,name=allow_deferred,def=0" json:"allow_deferred,omitempty"`
+	XXX_unrecognized []byte          `json:"-"`
+}
+
+func (m *GetRequest) Reset()         { *m = GetRequest{} }
+func (m *GetRequest) String() string { return proto.CompactTextString(m) }
+func (*GetRequest) ProtoMessage()    {}
+
+const Default_GetRequest_AllowDeferred bool = false
+
+func (m *GetRequest) GetHeader() *InternalHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *GetRequest) GetKey() []*Reference {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *GetRequest) GetTransaction() *Transaction {
+	if m != nil {
+		return m.Transaction
+	}
+	return nil
+}
+
+func (m *GetRequest) GetFailoverMs() int64 {
+	if m != nil && m.FailoverMs != nil {
+		return *m.FailoverMs
+	}
+	return 0
+}
+
+func (m *GetRequest) GetStrong() bool {
+	if m != nil && m.Strong != nil {
+		return *m.Strong
+	}
+	return false
+}
+
+func (m *GetRequest) GetAllowDeferred() bool {
+	if m != nil && m.AllowDeferred != nil {
+		return *m.AllowDeferred
+	}
+	return Default_GetRequest_AllowDeferred
+}
+
+type GetResponse struct {
+	Entity           []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity" json:"entity,omitempty"`
+	Deferred         []*Reference          `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"`
+	InOrder          *bool                 `protobuf:"varint,6,opt,name=in_order,def=1" json:"in_order,omitempty"`
+	XXX_unrecognized []byte                `json:"-"`
+}
+
+func (m *GetResponse) Reset()         { *m = GetResponse{} }
+func (m *GetResponse) String() string { return proto.CompactTextString(m) }
+func (*GetResponse) ProtoMessage()    {}
+
+const Default_GetResponse_InOrder bool = true
+
+func (m *GetResponse) GetEntity() []*GetResponse_Entity {
+	if m != nil {
+		return m.Entity
+	}
+	return nil
+}
+
+func (m *GetResponse) GetDeferred() []*Reference {
+	if m != nil {
+		return m.Deferred
+	}
+	return nil
+}
+
+func (m *GetResponse) GetInOrder() bool {
+	if m != nil && m.InOrder != nil {
+		return *m.InOrder
+	}
+	return Default_GetResponse_InOrder
+}
+
+type GetResponse_Entity struct {
+	Entity           *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"`
+	Key              *Reference   `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"`
+	Version          *int64       `protobuf:"varint,3,opt,name=version" json:"version,omitempty"`
+	XXX_unrecognized []byte       `json:"-"`
+}
+
+func (m *GetResponse_Entity) Reset()         { *m = GetResponse_Entity{} }
+func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) }
+func (*GetResponse_Entity) ProtoMessage()    {}
+
+func (m *GetResponse_Entity) GetEntity() *EntityProto {
+	if m != nil {
+		return m.Entity
+	}
+	return nil
+}
+
+func (m *GetResponse_Entity) GetKey() *Reference {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *GetResponse_Entity) GetVersion() int64 {
+	if m != nil && m.Version != nil {
+		return *m.Version
+	}
+	return 0
+}
+
+type PutRequest struct {
+	Header           *InternalHeader          `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"`
+	Entity           []*EntityProto           `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"`
+	Transaction      *Transaction             `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
+	CompositeIndex   []*CompositeIndex        `protobuf:"bytes,3,rep,name=composite_index" json:"composite_index,omitempty"`
+	Trusted          *bool                    `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
+	Force            *bool                    `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
+	MarkChanges      *bool                    `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"`
+	Snapshot         []*Snapshot              `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+	AutoIdPolicy     *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"`
+	XXX_unrecognized []byte                   `json:"-"`
+}
+
+func (m *PutRequest) Reset()         { *m = PutRequest{} }
+func (m *PutRequest) String() string { return proto.CompactTextString(m) }
+func (*PutRequest) ProtoMessage()    {}
+
+const Default_PutRequest_Trusted bool = false
+const Default_PutRequest_Force bool = false
+const Default_PutRequest_MarkChanges bool = false
+const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT
+
+func (m *PutRequest) GetHeader() *InternalHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *PutRequest) GetEntity() []*EntityProto {
+	if m != nil {
+		return m.Entity
+	}
+	return nil
+}
+
+func (m *PutRequest) GetTransaction() *Transaction {
+	if m != nil {
+		return m.Transaction
+	}
+	return nil
+}
+
+func (m *PutRequest) GetCompositeIndex() []*CompositeIndex {
+	if m != nil {
+		return m.CompositeIndex
+	}
+	return nil
+}
+
+func (m *PutRequest) GetTrusted() bool {
+	if m != nil && m.Trusted != nil {
+		return *m.Trusted
+	}
+	return Default_PutRequest_Trusted
+}
+
+func (m *PutRequest) GetForce() bool {
+	if m != nil && m.Force != nil {
+		return *m.Force
+	}
+	return Default_PutRequest_Force
+}
+
+func (m *PutRequest) GetMarkChanges() bool {
+	if m != nil && m.MarkChanges != nil {
+		return *m.MarkChanges
+	}
+	return Default_PutRequest_MarkChanges
+}
+
+func (m *PutRequest) GetSnapshot() []*Snapshot {
+	if m != nil {
+		return m.Snapshot
+	}
+	return nil
+}
+
+func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy {
+	if m != nil && m.AutoIdPolicy != nil {
+		return *m.AutoIdPolicy
+	}
+	return Default_PutRequest_AutoIdPolicy
+}
+
+type PutResponse struct {
+	Key              []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+	Cost             *Cost        `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"`
+	Version          []int64      `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
+	XXX_unrecognized []byte       `json:"-"`
+}
+
+func (m *PutResponse) Reset()         { *m = PutResponse{} }
+func (m *PutResponse) String() string { return proto.CompactTextString(m) }
+func (*PutResponse) ProtoMessage()    {}
+
+func (m *PutResponse) GetKey() []*Reference {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *PutResponse) GetCost() *Cost {
+	if m != nil {
+		return m.Cost
+	}
+	return nil
+}
+
+func (m *PutResponse) GetVersion() []int64 {
+	if m != nil {
+		return m.Version
+	}
+	return nil
+}
+
+type TouchRequest struct {
+	Header           *InternalHeader   `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
+	Key              []*Reference      `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+	CompositeIndex   []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index" json:"composite_index,omitempty"`
+	Force            *bool             `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"`
+	Snapshot         []*Snapshot       `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+	XXX_unrecognized []byte            `json:"-"`
+}
+
+func (m *TouchRequest) Reset()         { *m = TouchRequest{} }
+func (m *TouchRequest) String() string { return proto.CompactTextString(m) }
+func (*TouchRequest) ProtoMessage()    {}
+
+const Default_TouchRequest_Force bool = false
+
+func (m *TouchRequest) GetHeader() *InternalHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *TouchRequest) GetKey() []*Reference {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex {
+	if m != nil {
+		return m.CompositeIndex
+	}
+	return nil
+}
+
+func (m *TouchRequest) GetForce() bool {
+	if m != nil && m.Force != nil {
+		return *m.Force
+	}
+	return Default_TouchRequest_Force
+}
+
+func (m *TouchRequest) GetSnapshot() []*Snapshot {
+	if m != nil {
+		return m.Snapshot
+	}
+	return nil
+}
+
+type TouchResponse struct {
+	Cost             *Cost  `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TouchResponse) Reset()         { *m = TouchResponse{} }
+func (m *TouchResponse) String() string { return proto.CompactTextString(m) }
+func (*TouchResponse) ProtoMessage()    {}
+
+func (m *TouchResponse) GetCost() *Cost {
+	if m != nil {
+		return m.Cost
+	}
+	return nil
+}
+
+type DeleteRequest struct {
+	Header           *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
+	Key              []*Reference    `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"`
+	Transaction      *Transaction    `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"`
+	Trusted          *bool           `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
+	Force            *bool           `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
+	MarkChanges      *bool           `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"`
+	Snapshot         []*Snapshot     `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+	XXX_unrecognized []byte          `json:"-"`
+}
+
+func (m *DeleteRequest) Reset()         { *m = DeleteRequest{} }
+func (m *DeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteRequest) ProtoMessage()    {}
+
+const Default_DeleteRequest_Trusted bool = false
+const Default_DeleteRequest_Force bool = false
+const Default_DeleteRequest_MarkChanges bool = false
+
+func (m *DeleteRequest) GetHeader() *InternalHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *DeleteRequest) GetKey() []*Reference {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *DeleteRequest) GetTransaction() *Transaction {
+	if m != nil {
+		return m.Transaction
+	}
+	return nil
+}
+
+func (m *DeleteRequest) GetTrusted() bool {
+	if m != nil && m.Trusted != nil {
+		return *m.Trusted
+	}
+	return Default_DeleteRequest_Trusted
+}
+
+func (m *DeleteRequest) GetForce() bool {
+	if m != nil && m.Force != nil {
+		return *m.Force
+	}
+	return Default_DeleteRequest_Force
+}
+
+func (m *DeleteRequest) GetMarkChanges() bool {
+	if m != nil && m.MarkChanges != nil {
+		return *m.MarkChanges
+	}
+	return Default_DeleteRequest_MarkChanges
+}
+
+func (m *DeleteRequest) GetSnapshot() []*Snapshot {
+	if m != nil {
+		return m.Snapshot
+	}
+	return nil
+}
+
+type DeleteResponse struct {
+	Cost             *Cost   `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+	Version          []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *DeleteResponse) Reset()         { *m = DeleteResponse{} }
+func (m *DeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteResponse) ProtoMessage()    {}
+
+func (m *DeleteResponse) GetCost() *Cost {
+	if m != nil {
+		return m.Cost
+	}
+	return nil
+}
+
+func (m *DeleteResponse) GetVersion() []int64 {
+	if m != nil {
+		return m.Version
+	}
+	return nil
+}
+
+type NextRequest struct {
+	Header           *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"`
+	Cursor           *Cursor         `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"`
+	Count            *int32          `protobuf:"varint,2,opt,name=count" json:"count,omitempty"`
+	Offset           *int32          `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"`
+	Compile          *bool           `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"`
+	XXX_unrecognized []byte          `json:"-"`
+}
+
+func (m *NextRequest) Reset()         { *m = NextRequest{} }
+func (m *NextRequest) String() string { return proto.CompactTextString(m) }
+func (*NextRequest) ProtoMessage()    {}
+
+const Default_NextRequest_Offset int32 = 0
+const Default_NextRequest_Compile bool = false
+
+func (m *NextRequest) GetHeader() *InternalHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *NextRequest) GetCursor() *Cursor {
+	if m != nil {
+		return m.Cursor
+	}
+	return nil
+}
+
+func (m *NextRequest) GetCount() int32 {
+	if m != nil && m.Count != nil {
+		return *m.Count
+	}
+	return 0
+}
+
+func (m *NextRequest) GetOffset() int32 {
+	if m != nil && m.Offset != nil {
+		return *m.Offset
+	}
+	return Default_NextRequest_Offset
+}
+
+func (m *NextRequest) GetCompile() bool {
+	if m != nil && m.Compile != nil {
+		return *m.Compile
+	}
+	return Default_NextRequest_Compile
+}
+
+type QueryResult struct {
+	Cursor           *Cursor           `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"`
+	Result           []*EntityProto    `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"`
+	SkippedResults   *int32            `protobuf:"varint,7,opt,name=skipped_results" json:"skipped_results,omitempty"`
+	MoreResults      *bool             `protobuf:"varint,3,req,name=more_results" json:"more_results,omitempty"`
+	KeysOnly         *bool             `protobuf:"varint,4,opt,name=keys_only" json:"keys_only,omitempty"`
+	IndexOnly        *bool             `protobuf:"varint,9,opt,name=index_only" json:"index_only,omitempty"`
+	SmallOps         *bool             `protobuf:"varint,10,opt,name=small_ops" json:"small_ops,omitempty"`
+	CompiledQuery    *CompiledQuery    `protobuf:"bytes,5,opt,name=compiled_query" json:"compiled_query,omitempty"`
+	CompiledCursor   *CompiledCursor   `protobuf:"bytes,6,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"`
+	Index            []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"`
+	Version          []int64           `protobuf:"varint,11,rep,name=version" json:"version,omitempty"`
+	XXX_unrecognized []byte            `json:"-"`
+}
+
+func (m *QueryResult) Reset()         { *m = QueryResult{} }
+func (m *QueryResult) String() string { return proto.CompactTextString(m) }
+func (*QueryResult) ProtoMessage()    {}
+
+func (m *QueryResult) GetCursor() *Cursor {
+	if m != nil {
+		return m.Cursor
+	}
+	return nil
+}
+
+func (m *QueryResult) GetResult() []*EntityProto {
+	if m != nil {
+		return m.Result
+	}
+	return nil
+}
+
+func (m *QueryResult) GetSkippedResults() int32 {
+	if m != nil && m.SkippedResults != nil {
+		return *m.SkippedResults
+	}
+	return 0
+}
+
+func (m *QueryResult) GetMoreResults() bool {
+	if m != nil && m.MoreResults != nil {
+		return *m.MoreResults
+	}
+	return false
+}
+
+func (m *QueryResult) GetKeysOnly() bool {
+	if m != nil && m.KeysOnly != nil {
+		return *m.KeysOnly
+	}
+	return false
+}
+
+func (m *QueryResult) GetIndexOnly() bool {
+	if m != nil && m.IndexOnly != nil {
+		return *m.IndexOnly
+	}
+	return false
+}
+
+func (m *QueryResult) GetSmallOps() bool {
+	if m != nil && m.SmallOps != nil {
+		return *m.SmallOps
+	}
+	return false
+}
+
+func (m *QueryResult) GetCompiledQuery() *CompiledQuery {
+	if m != nil {
+		return m.CompiledQuery
+	}
+	return nil
+}
+
+func (m *QueryResult) GetCompiledCursor() *CompiledCursor {
+	if m != nil {
+		return m.CompiledCursor
+	}
+	return nil
+}
+
+func (m *QueryResult) GetIndex() []*CompositeIndex {
+	if m != nil {
+		return m.Index
+	}
+	return nil
+}
+
+func (m *QueryResult) GetVersion() []int64 {
+	if m != nil {
+		return m.Version
+	}
+	return nil
+}
+
+type AllocateIdsRequest struct {
+	Header           *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
+	ModelKey         *Reference      `protobuf:"bytes,1,opt,name=model_key" json:"model_key,omitempty"`
+	Size             *int64          `protobuf:"varint,2,opt,name=size" json:"size,omitempty"`
+	Max              *int64          `protobuf:"varint,3,opt,name=max" json:"max,omitempty"`
+	Reserve          []*Reference    `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"`
+	XXX_unrecognized []byte          `json:"-"`
+}
+
+func (m *AllocateIdsRequest) Reset()         { *m = AllocateIdsRequest{} }
+func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) }
+func (*AllocateIdsRequest) ProtoMessage()    {}
+
+func (m *AllocateIdsRequest) GetHeader() *InternalHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *AllocateIdsRequest) GetModelKey() *Reference {
+	if m != nil {
+		return m.ModelKey
+	}
+	return nil
+}
+
+func (m *AllocateIdsRequest) GetSize() int64 {
+	if m != nil && m.Size != nil {
+		return *m.Size
+	}
+	return 0
+}
+
+func (m *AllocateIdsRequest) GetMax() int64 {
+	if m != nil && m.Max != nil {
+		return *m.Max
+	}
+	return 0
+}
+
+func (m *AllocateIdsRequest) GetReserve() []*Reference {
+	if m != nil {
+		return m.Reserve
+	}
+	return nil
+}
+
+type AllocateIdsResponse struct {
+	Start            *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"`
+	End              *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"`
+	Cost             *Cost  `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AllocateIdsResponse) Reset()         { *m = AllocateIdsResponse{} }
+func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) }
+func (*AllocateIdsResponse) ProtoMessage()    {}
+
+func (m *AllocateIdsResponse) GetStart() int64 {
+	if m != nil && m.Start != nil {
+		return *m.Start
+	}
+	return 0
+}
+
+func (m *AllocateIdsResponse) GetEnd() int64 {
+	if m != nil && m.End != nil {
+		return *m.End
+	}
+	return 0
+}
+
+func (m *AllocateIdsResponse) GetCost() *Cost {
+	if m != nil {
+		return m.Cost
+	}
+	return nil
+}
+
+type CompositeIndices struct {
+	Index            []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"`
+	XXX_unrecognized []byte            `json:"-"`
+}
+
+func (m *CompositeIndices) Reset()         { *m = CompositeIndices{} }
+func (m *CompositeIndices) String() string { return proto.CompactTextString(m) }
+func (*CompositeIndices) ProtoMessage()    {}
+
+func (m *CompositeIndices) GetIndex() []*CompositeIndex {
+	if m != nil {
+		return m.Index
+	}
+	return nil
+}
+
+type AddActionsRequest struct {
+	Header           *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
+	Transaction      *Transaction    `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"`
+	Action           []*Action       `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"`
+	XXX_unrecognized []byte          `json:"-"`
+}
+
+func (m *AddActionsRequest) Reset()         { *m = AddActionsRequest{} }
+func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) }
+func (*AddActionsRequest) ProtoMessage()    {}
+
+func (m *AddActionsRequest) GetHeader() *InternalHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *AddActionsRequest) GetTransaction() *Transaction {
+	if m != nil {
+		return m.Transaction
+	}
+	return nil
+}
+
+func (m *AddActionsRequest) GetAction() []*Action {
+	if m != nil {
+		return m.Action
+	}
+	return nil
+}
+
+type AddActionsResponse struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AddActionsResponse) Reset()         { *m = AddActionsResponse{} }
+func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) }
+func (*AddActionsResponse) ProtoMessage()    {}
+
+type BeginTransactionRequest struct {
+	Header           *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
+	App              *string         `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
+	AllowMultipleEg  *bool           `protobuf:"varint,2,opt,name=allow_multiple_eg,def=0" json:"allow_multiple_eg,omitempty"`
+	XXX_unrecognized []byte          `json:"-"`
+}
+
+func (m *BeginTransactionRequest) Reset()         { *m = BeginTransactionRequest{} }
+func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) }
+func (*BeginTransactionRequest) ProtoMessage()    {}
+
+const Default_BeginTransactionRequest_AllowMultipleEg bool = false
+
+func (m *BeginTransactionRequest) GetHeader() *InternalHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *BeginTransactionRequest) GetApp() string {
+	if m != nil && m.App != nil {
+		return *m.App
+	}
+	return ""
+}
+
+func (m *BeginTransactionRequest) GetAllowMultipleEg() bool {
+	if m != nil && m.AllowMultipleEg != nil {
+		return *m.AllowMultipleEg
+	}
+	return Default_BeginTransactionRequest_AllowMultipleEg
+}
+
+type CommitResponse struct {
+	Cost             *Cost                     `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+	Version          []*CommitResponse_Version `protobuf:"group,3,rep,name=Version" json:"version,omitempty"`
+	XXX_unrecognized []byte                    `json:"-"`
+}
+
+func (m *CommitResponse) Reset()         { *m = CommitResponse{} }
+func (m *CommitResponse) String() string { return proto.CompactTextString(m) }
+func (*CommitResponse) ProtoMessage()    {}
+
+func (m *CommitResponse) GetCost() *Cost {
+	if m != nil {
+		return m.Cost
+	}
+	return nil
+}
+
+func (m *CommitResponse) GetVersion() []*CommitResponse_Version {
+	if m != nil {
+		return m.Version
+	}
+	return nil
+}
+
+type CommitResponse_Version struct {
+	RootEntityKey    *Reference `protobuf:"bytes,4,req,name=root_entity_key" json:"root_entity_key,omitempty"`
+	Version          *int64     `protobuf:"varint,5,req,name=version" json:"version,omitempty"`
+	XXX_unrecognized []byte     `json:"-"`
+}
+
+func (m *CommitResponse_Version) Reset()         { *m = CommitResponse_Version{} }
+func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) }
+func (*CommitResponse_Version) ProtoMessage()    {}
+
+func (m *CommitResponse_Version) GetRootEntityKey() *Reference {
+	if m != nil {
+		return m.RootEntityKey
+	}
+	return nil
+}
+
+func (m *CommitResponse_Version) GetVersion() int64 {
+	if m != nil && m.Version != nil {
+		return *m.Version
+	}
+	return 0
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
new file mode 100755
index 0000000..e76f126
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
@@ -0,0 +1,541 @@
+syntax = "proto2";
+option go_package = "datastore";
+
+package appengine;
+
+message Action{}
+
+message PropertyValue {
+  optional int64 int64Value = 1;
+  optional bool booleanValue = 2;
+  optional string stringValue = 3;
+  optional double doubleValue = 4;
+
+  optional group PointValue = 5 {
+    required double x = 6;
+    required double y = 7;
+  }
+
+  optional group UserValue = 8 {
+    required string email = 9;
+    required string auth_domain = 10;
+    optional string nickname = 11;
+    optional string federated_identity = 21;
+    optional string federated_provider = 22;
+  }
+
+  optional group ReferenceValue = 12 {
+    required string app = 13;
+    optional string name_space = 20;
+    repeated group PathElement = 14 {
+      required string type = 15;
+      optional int64 id = 16;
+      optional string name = 17;
+    }
+  }
+}
+
+message Property {
+  enum Meaning {
+    NO_MEANING = 0;
+    BLOB = 14;
+    TEXT = 15;
+    BYTESTRING = 16;
+
+    ATOM_CATEGORY = 1;
+    ATOM_LINK = 2;
+    ATOM_TITLE = 3;
+    ATOM_CONTENT = 4;
+    ATOM_SUMMARY = 5;
+    ATOM_AUTHOR = 6;
+
+    GD_WHEN = 7;
+    GD_EMAIL = 8;
+    GEORSS_POINT = 9;
+    GD_IM = 10;
+
+    GD_PHONENUMBER = 11;
+    GD_POSTALADDRESS = 12;
+
+    GD_RATING = 13;
+
+    BLOBKEY = 17;
+    ENTITY_PROTO = 19;
+
+    INDEX_VALUE = 18;
+  };
+
+  optional Meaning meaning = 1 [default = NO_MEANING];
+  optional string meaning_uri = 2;
+
+  required string name = 3;
+
+  required PropertyValue value = 5;
+
+  required bool multiple = 4;
+
+  optional bool searchable = 6 [default=false];
+
+  enum FtsTokenizationOption {
+    HTML = 1;
+    ATOM = 2;
+  }
+
+  optional FtsTokenizationOption fts_tokenization_option = 8;
+
+  optional string locale = 9 [default = "en"];
+}
+
+message Path {
+  repeated group Element = 1 {
+    required string type = 2;
+    optional int64 id = 3;
+    optional string name = 4;
+  }
+}
+
+message Reference {
+  required string app = 13;
+  optional string name_space = 20;
+  required Path path = 14;
+}
+
+message User {
+  required string email = 1;
+  required string auth_domain = 2;
+  optional string nickname = 3;
+  optional string federated_identity = 6;
+  optional string federated_provider = 7;
+}
+
+message EntityProto {
+  required Reference key = 13;
+  required Path entity_group = 16;
+  optional User owner = 17;
+
+  enum Kind {
+    GD_CONTACT = 1;
+    GD_EVENT = 2;
+    GD_MESSAGE = 3;
+  }
+  optional Kind kind = 4;
+  optional string kind_uri = 5;
+
+  repeated Property property = 14;
+  repeated Property raw_property = 15;
+
+  optional int32 rank = 18;
+}
+
+message CompositeProperty {
+  required int64 index_id = 1;
+  repeated string value = 2;
+}
+
+message Index {
+  required string entity_type = 1;
+  required bool ancestor = 5;
+  repeated group Property = 2 {
+    required string name = 3;
+    enum Direction {
+      ASCENDING = 1;
+      DESCENDING = 2;
+    }
+    optional Direction direction = 4 [default = ASCENDING];
+  }
+}
+
+message CompositeIndex {
+  required string app_id = 1;
+  required int64 id = 2;
+  required Index definition = 3;
+
+  enum State {
+    WRITE_ONLY = 1;
+    READ_WRITE = 2;
+    DELETED = 3;
+    ERROR = 4;
+  }
+  required State state = 4;
+
+  optional bool only_use_if_required = 6 [default = false];
+}
+
+message IndexPostfix {
+  message IndexValue {
+    required string property_name = 1;
+    required PropertyValue value = 2;
+  }
+
+  repeated IndexValue index_value = 1;
+
+  optional Reference key = 2;
+
+  optional bool before = 3 [default=true];
+}
+
+message IndexPosition {
+  optional string key = 1;
+
+  optional bool before = 2 [default=true];
+}
+
+message Snapshot {
+  enum Status {
+    INACTIVE = 0;
+    ACTIVE = 1;
+  }
+
+  required int64 ts = 1;
+}
+
+message InternalHeader {
+  optional string qos = 1;
+}
+
+message Transaction {
+  optional InternalHeader header = 4;
+  required fixed64 handle = 1;
+  required string app = 2;
+  optional bool mark_changes = 3 [default = false];
+}
+
+message Query {
+  optional InternalHeader header = 39;
+
+  required string app = 1;
+  optional string name_space = 29;
+
+  optional string kind = 3;
+  optional Reference ancestor = 17;
+
+  repeated group Filter = 4 {
+    enum Operator {
+      LESS_THAN = 1;
+      LESS_THAN_OR_EQUAL = 2;
+      GREATER_THAN = 3;
+      GREATER_THAN_OR_EQUAL = 4;
+      EQUAL = 5;
+      IN = 6;
+      EXISTS = 7;
+    }
+
+    required Operator op = 6;
+    repeated Property property = 14;
+  }
+
+  optional string search_query = 8;
+
+  repeated group Order = 9 {
+    enum Direction {
+      ASCENDING = 1;
+      DESCENDING = 2;
+    }
+
+    required string property = 10;
+    optional Direction direction = 11 [default = ASCENDING];
+  }
+
+  enum Hint {
+    ORDER_FIRST = 1;
+    ANCESTOR_FIRST = 2;
+    FILTER_FIRST = 3;
+  }
+  optional Hint hint = 18;
+
+  optional int32 count = 23;
+
+  optional int32 offset = 12 [default = 0];
+
+  optional int32 limit = 16;
+
+  optional CompiledCursor compiled_cursor = 30;
+  optional CompiledCursor end_compiled_cursor = 31;
+
+  repeated CompositeIndex composite_index = 19;
+
+  optional bool require_perfect_plan = 20 [default = false];
+
+  optional bool keys_only = 21 [default = false];
+
+  optional Transaction transaction = 22;
+
+  optional bool compile = 25 [default = false];
+
+  optional int64 failover_ms = 26;
+
+  optional bool strong = 32;
+
+  repeated string property_name = 33;
+
+  repeated string group_by_property_name = 34;
+
+  optional bool distinct = 24;
+
+  optional int64 min_safe_time_seconds = 35;
+
+  repeated string safe_replica_name = 36;
+
+  optional bool persist_offset = 37 [default=false];
+}
+
+message CompiledQuery {
+  required group PrimaryScan = 1 {
+    optional string index_name = 2;
+
+    optional string start_key = 3;
+    optional bool start_inclusive = 4;
+    optional string end_key = 5;
+    optional bool end_inclusive = 6;
+
+    repeated string start_postfix_value = 22;
+    repeated string end_postfix_value = 23;
+
+    optional int64 end_unapplied_log_timestamp_us = 19;
+  }
+
+  repeated group MergeJoinScan = 7 {
+    required string index_name = 8;
+
+    repeated string prefix_value = 9;
+
+    optional bool value_prefix = 20 [default=false];
+  }
+
+  optional Index index_def = 21;
+
+  optional int32 offset = 10 [default = 0];
+
+  optional int32 limit = 11;
+
+  required bool keys_only = 12;
+
+  repeated string property_name = 24;
+
+  optional int32 distinct_infix_size = 25;
+
+  optional group EntityFilter = 13 {
+    optional bool distinct = 14 [default=false];
+
+    optional string kind = 17;
+    optional Reference ancestor = 18;
+  }
+}
+
+message CompiledCursor {
+  optional group Position = 2 {
+    optional string start_key = 27;
+
+    repeated group IndexValue = 29 {
+      optional string property = 30;
+      required PropertyValue value = 31;
+    }
+
+    optional Reference key = 32;
+
+    optional bool start_inclusive = 28 [default=true];
+  }
+}
+
+message Cursor {
+  required fixed64 cursor = 1;
+
+  optional string app = 2;
+}
+
+message Error {
+  enum ErrorCode {
+    BAD_REQUEST = 1;
+    CONCURRENT_TRANSACTION = 2;
+    INTERNAL_ERROR = 3;
+    NEED_INDEX = 4;
+    TIMEOUT = 5;
+    PERMISSION_DENIED = 6;
+    BIGTABLE_ERROR = 7;
+    COMMITTED_BUT_STILL_APPLYING = 8;
+    CAPABILITY_DISABLED = 9;
+    TRY_ALTERNATE_BACKEND = 10;
+    SAFE_TIME_TOO_OLD = 11;
+  }
+}
+
+message Cost {
+  optional int32 index_writes = 1;
+  optional int32 index_write_bytes = 2;
+  optional int32 entity_writes = 3;
+  optional int32 entity_write_bytes = 4;
+  optional group CommitCost = 5 {
+    optional int32 requested_entity_puts = 6;
+    optional int32 requested_entity_deletes = 7;
+  };
+  optional int32 approximate_storage_delta = 8;
+  optional int32 id_sequence_updates = 9;
+}
+
+message GetRequest {
+  optional InternalHeader header = 6;
+
+  repeated Reference key = 1;
+  optional Transaction transaction = 2;
+
+  optional int64 failover_ms = 3;
+
+  optional bool strong = 4;
+
+  optional bool allow_deferred = 5 [default=false];
+}
+
+message GetResponse {
+  repeated group Entity = 1 {
+    optional EntityProto entity = 2;
+    optional Reference key = 4;
+
+    optional int64 version = 3;
+  }
+
+  repeated Reference deferred = 5;
+
+  optional bool in_order = 6 [default=true];
+}
+
+message PutRequest {
+  optional InternalHeader header = 11;
+
+  repeated EntityProto entity = 1;
+  optional Transaction transaction = 2;
+  repeated CompositeIndex composite_index = 3;
+
+  optional bool trusted = 4 [default = false];
+
+  optional bool force = 7 [default = false];
+
+  optional bool mark_changes = 8 [default = false];
+  repeated Snapshot snapshot = 9;
+
+  enum AutoIdPolicy {
+    CURRENT = 0;
+    SEQUENTIAL = 1;
+  }
+  optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT];
+}
+
+message PutResponse {
+  repeated Reference key = 1;
+  optional Cost cost = 2;
+  repeated int64 version = 3;
+}
+
+message TouchRequest {
+  optional InternalHeader header = 10;
+
+  repeated Reference key = 1;
+  repeated CompositeIndex composite_index = 2;
+  optional bool force = 3 [default = false];
+  repeated Snapshot snapshot = 9;
+}
+
+message TouchResponse {
+  optional Cost cost = 1;
+}
+
+message DeleteRequest {
+  optional InternalHeader header = 10;
+
+  repeated Reference key = 6;
+  optional Transaction transaction = 5;
+
+  optional bool trusted = 4 [default = false];
+
+  optional bool force = 7 [default = false];
+
+  optional bool mark_changes = 8 [default = false];
+  repeated Snapshot snapshot = 9;
+}
+
+message DeleteResponse {
+  optional Cost cost = 1;
+  repeated int64 version = 3;
+}
+
+message NextRequest {
+  optional InternalHeader header = 5;
+
+  required Cursor cursor = 1;
+  optional int32 count = 2;
+
+  optional int32 offset = 4 [default = 0];
+
+  optional bool compile = 3 [default = false];
+}
+
+message QueryResult {
+  optional Cursor cursor = 1;
+
+  repeated EntityProto result = 2;
+
+  optional int32 skipped_results = 7;
+
+  required bool more_results = 3;
+
+  optional bool keys_only = 4;
+
+  optional bool index_only = 9;
+
+  optional bool small_ops = 10;
+
+  optional CompiledQuery compiled_query = 5;
+
+  optional CompiledCursor compiled_cursor = 6;
+
+  repeated CompositeIndex index = 8;
+
+  repeated int64 version = 11;
+}
+
+message AllocateIdsRequest {
+  optional InternalHeader header = 4;
+
+  optional Reference model_key = 1;
+
+  optional int64 size = 2;
+
+  optional int64 max = 3;
+
+  repeated Reference reserve = 5;
+}
+
+message AllocateIdsResponse {
+  required int64 start = 1;
+  required int64 end = 2;
+  optional Cost cost = 3;
+}
+
+message CompositeIndices {
+  repeated CompositeIndex index = 1;
+}
+
+message AddActionsRequest {
+  optional InternalHeader header = 3;
+
+  required Transaction transaction = 1;
+  repeated Action action = 2;
+}
+
+message AddActionsResponse {
+}
+
+message BeginTransactionRequest {
+  optional InternalHeader header = 3;
+
+  required string app = 1;
+  optional bool allow_multiple_eg = 2 [default = false];
+}
+
+message CommitResponse {
+  optional Cost cost = 1;
+
+  repeated group Version = 3 {
+    required Reference root_entity_key = 4;
+    required int64 version = 5;
+  }
+}
diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go
new file mode 100644
index 0000000..d538701
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity.go
@@ -0,0 +1,14 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import netcontext "golang.org/x/net/context"
+
+// These functions are implementations of the wrapper functions
+// in ../appengine/identity.go. See that file for commentary.
+
+func AppID(c netcontext.Context) string {
+	return appID(FullyQualifiedAppID(c))
+}
diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go
new file mode 100644
index 0000000..e6b9227
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity_classic.go
@@ -0,0 +1,27 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import (
+	"appengine"
+
+	netcontext "golang.org/x/net/context"
+)
+
+func DefaultVersionHostname(ctx netcontext.Context) string {
+	return appengine.DefaultVersionHostname(fromContext(ctx))
+}
+
+func RequestID(ctx netcontext.Context) string  { return appengine.RequestID(fromContext(ctx)) }
+func Datacenter(_ netcontext.Context) string   { return appengine.Datacenter() }
+func ServerSoftware() string                   { return appengine.ServerSoftware() }
+func ModuleName(ctx netcontext.Context) string { return appengine.ModuleName(fromContext(ctx)) }
+func VersionID(ctx netcontext.Context) string  { return appengine.VersionID(fromContext(ctx)) }
+func InstanceID() string                       { return appengine.InstanceID() }
+func IsDevAppServer() bool                     { return appengine.IsDevAppServer() }
+
+func fullyQualifiedAppID(ctx netcontext.Context) string { return fromContext(ctx).FullyQualifiedAppID() }
diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go
new file mode 100644
index 0000000..ebe68b7
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity_vm.go
@@ -0,0 +1,97 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+	"net/http"
+	"os"
+
+	netcontext "golang.org/x/net/context"
+)
+
+// These functions are implementations of the wrapper functions
+// in ../appengine/identity.go. See that file for commentary.
+
+const (
+	hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname"
+	hRequestLogId           = "X-AppEngine-Request-Log-Id"
+	hDatacenter             = "X-AppEngine-Datacenter"
+)
+
+func ctxHeaders(ctx netcontext.Context) http.Header {
+	return fromContext(ctx).Request().Header
+}
+
+func DefaultVersionHostname(ctx netcontext.Context) string {
+	return ctxHeaders(ctx).Get(hDefaultVersionHostname)
+}
+
+func RequestID(ctx netcontext.Context) string {
+	return ctxHeaders(ctx).Get(hRequestLogId)
+}
+
+func Datacenter(ctx netcontext.Context) string {
+	return ctxHeaders(ctx).Get(hDatacenter)
+}
+
+func ServerSoftware() string {
+	// TODO(dsymonds): Remove fallback when we've verified this.
+	if s := os.Getenv("SERVER_SOFTWARE"); s != "" {
+		return s
+	}
+	return "Google App Engine/1.x.x"
+}
+
+// TODO(dsymonds): Remove the metadata fetches.
+
+func ModuleName(_ netcontext.Context) string {
+	if s := os.Getenv("GAE_MODULE_NAME"); s != "" {
+		return s
+	}
+	return string(mustGetMetadata("instance/attributes/gae_backend_name"))
+}
+
+func VersionID(_ netcontext.Context) string {
+	if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" {
+		return s1 + "." + s2
+	}
+	return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version"))
+}
+
+func InstanceID() string {
+	if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" {
+		return s
+	}
+	return string(mustGetMetadata("instance/attributes/gae_backend_instance"))
+}
+
+func partitionlessAppID() string {
+	// gae_project has everything except the partition prefix.
+	appID := os.Getenv("GAE_LONG_APP_ID")
+	if appID == "" {
+		appID = string(mustGetMetadata("instance/attributes/gae_project"))
+	}
+	return appID
+}
+
+func fullyQualifiedAppID(_ netcontext.Context) string {
+	appID := partitionlessAppID()
+
+	part := os.Getenv("GAE_PARTITION")
+	if part == "" {
+		part = string(mustGetMetadata("instance/attributes/gae_partition"))
+	}
+
+	if part != "" {
+		appID = part + "~" + appID
+	}
+	return appID
+}
+
+func IsDevAppServer() bool {
+	return os.Getenv("RUN_WITH_DEVAPPSERVER") != ""
+}
diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go
new file mode 100644
index 0000000..051ea39
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/internal.go
@@ -0,0 +1,110 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package internal provides support for package appengine.
+//
+// Programs should not use this package directly. Its API is not stable.
+// Use packages appengine and appengine/* instead.
+package internal
+
+import (
+	"fmt"
+
+	"github.com/golang/protobuf/proto"
+
+	remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+// errorCodeMaps is a map of service name to the error code map for the service.
+var errorCodeMaps = make(map[string]map[int32]string)
+
+// RegisterErrorCodeMap is called from API implementations to register their
+// error code map. This should only be called from init functions.
+func RegisterErrorCodeMap(service string, m map[int32]string) {
+	errorCodeMaps[service] = m
+}
+
+type timeoutCodeKey struct {
+	service string
+	code    int32
+}
+
+// timeoutCodes is the set of service+code pairs that represent timeouts.
+var timeoutCodes = make(map[timeoutCodeKey]bool)
+
+func RegisterTimeoutErrorCode(service string, code int32) {
+	timeoutCodes[timeoutCodeKey{service, code}] = true
+}
+
+// APIError is the type returned by appengine.Context's Call method
+// when an API call fails in an API-specific way. This may be, for instance,
+// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE.
+type APIError struct {
+	Service string
+	Detail  string
+	Code    int32 // API-specific error code
+}
+
+func (e *APIError) Error() string {
+	if e.Code == 0 {
+		if e.Detail == "" {
+			return "APIError <empty>"
+		}
+		return e.Detail
+	}
+	s := fmt.Sprintf("API error %d", e.Code)
+	if m, ok := errorCodeMaps[e.Service]; ok {
+		s += " (" + e.Service + ": " + m[e.Code] + ")"
+	} else {
+		// Shouldn't happen, but provide a bit more detail if it does.
+		s = e.Service + " " + s
+	}
+	if e.Detail != "" {
+		s += ": " + e.Detail
+	}
+	return s
+}
+
+func (e *APIError) IsTimeout() bool {
+	return timeoutCodes[timeoutCodeKey{e.Service, e.Code}]
+}
+
+// CallError is the type returned by appengine.Context's Call method when an
+// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED.
+type CallError struct {
+	Detail string
+	Code   int32
+	// TODO: Remove this if we get a distinguishable error code.
+	Timeout bool
+}
+
+func (e *CallError) Error() string {
+	var msg string
+	switch remotepb.RpcError_ErrorCode(e.Code) {
+	case remotepb.RpcError_UNKNOWN:
+		return e.Detail
+	case remotepb.RpcError_OVER_QUOTA:
+		msg = "Over quota"
+	case remotepb.RpcError_CAPABILITY_DISABLED:
+		msg = "Capability disabled"
+	case remotepb.RpcError_CANCELLED:
+		msg = "Canceled"
+	default:
+		msg = fmt.Sprintf("Call error %d", e.Code)
+	}
+	s := msg + ": " + e.Detail
+	if e.Timeout {
+		s += " (timeout)"
+	}
+	return s
+}
+
+func (e *CallError) IsTimeout() bool {
+	return e.Timeout
+}
+
+// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace.
+// The function should be prepared to be called on the same message more than once; it should only modify the
+// RPC request the first time.
+var NamespaceMods = make(map[string]func(m proto.Message, namespace string))
diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
new file mode 100644
index 0000000..20c595b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
@@ -0,0 +1,899 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/log/log_service.proto
+// DO NOT EDIT!
+
+/*
+Package log is a generated protocol buffer package.
+
+It is generated from these files:
+	google.golang.org/appengine/internal/log/log_service.proto
+
+It has these top-level messages:
+	LogServiceError
+	UserAppLogLine
+	UserAppLogGroup
+	FlushRequest
+	SetStatusRequest
+	LogOffset
+	LogLine
+	RequestLog
+	LogModuleVersion
+	LogReadRequest
+	LogReadResponse
+	LogUsageRecord
+	LogUsageRequest
+	LogUsageResponse
+*/
+package log
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type LogServiceError_ErrorCode int32
+
+const (
+	LogServiceError_OK              LogServiceError_ErrorCode = 0
+	LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1
+	LogServiceError_STORAGE_ERROR   LogServiceError_ErrorCode = 2
+)
+
+var LogServiceError_ErrorCode_name = map[int32]string{
+	0: "OK",
+	1: "INVALID_REQUEST",
+	2: "STORAGE_ERROR",
+}
+var LogServiceError_ErrorCode_value = map[string]int32{
+	"OK":              0,
+	"INVALID_REQUEST": 1,
+	"STORAGE_ERROR":   2,
+}
+
+func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode {
+	p := new(LogServiceError_ErrorCode)
+	*p = x
+	return p
+}
+func (x LogServiceError_ErrorCode) String() string {
+	return proto.EnumName(LogServiceError_ErrorCode_name, int32(x))
+}
+func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode")
+	if err != nil {
+		return err
+	}
+	*x = LogServiceError_ErrorCode(value)
+	return nil
+}
+
+type LogServiceError struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogServiceError) Reset()         { *m = LogServiceError{} }
+func (m *LogServiceError) String() string { return proto.CompactTextString(m) }
+func (*LogServiceError) ProtoMessage()    {}
+
+type UserAppLogLine struct {
+	TimestampUsec    *int64  `protobuf:"varint,1,req,name=timestamp_usec" json:"timestamp_usec,omitempty"`
+	Level            *int64  `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
+	Message          *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *UserAppLogLine) Reset()         { *m = UserAppLogLine{} }
+func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) }
+func (*UserAppLogLine) ProtoMessage()    {}
+
+func (m *UserAppLogLine) GetTimestampUsec() int64 {
+	if m != nil && m.TimestampUsec != nil {
+		return *m.TimestampUsec
+	}
+	return 0
+}
+
+func (m *UserAppLogLine) GetLevel() int64 {
+	if m != nil && m.Level != nil {
+		return *m.Level
+	}
+	return 0
+}
+
+func (m *UserAppLogLine) GetMessage() string {
+	if m != nil && m.Message != nil {
+		return *m.Message
+	}
+	return ""
+}
+
+type UserAppLogGroup struct {
+	LogLine          []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line" json:"log_line,omitempty"`
+	XXX_unrecognized []byte            `json:"-"`
+}
+
+func (m *UserAppLogGroup) Reset()         { *m = UserAppLogGroup{} }
+func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) }
+func (*UserAppLogGroup) ProtoMessage()    {}
+
+func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine {
+	if m != nil {
+		return m.LogLine
+	}
+	return nil
+}
+
+type FlushRequest struct {
+	Logs             []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FlushRequest) Reset()         { *m = FlushRequest{} }
+func (m *FlushRequest) String() string { return proto.CompactTextString(m) }
+func (*FlushRequest) ProtoMessage()    {}
+
+func (m *FlushRequest) GetLogs() []byte {
+	if m != nil {
+		return m.Logs
+	}
+	return nil
+}
+
+type SetStatusRequest struct {
+	Status           *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *SetStatusRequest) Reset()         { *m = SetStatusRequest{} }
+func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) }
+func (*SetStatusRequest) ProtoMessage()    {}
+
+func (m *SetStatusRequest) GetStatus() string {
+	if m != nil && m.Status != nil {
+		return *m.Status
+	}
+	return ""
+}
+
+type LogOffset struct {
+	RequestId        []byte `protobuf:"bytes,1,opt,name=request_id" json:"request_id,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogOffset) Reset()         { *m = LogOffset{} }
+func (m *LogOffset) String() string { return proto.CompactTextString(m) }
+func (*LogOffset) ProtoMessage()    {}
+
+func (m *LogOffset) GetRequestId() []byte {
+	if m != nil {
+		return m.RequestId
+	}
+	return nil
+}
+
+type LogLine struct {
+	Time             *int64  `protobuf:"varint,1,req,name=time" json:"time,omitempty"`
+	Level            *int32  `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
+	LogMessage       *string `protobuf:"bytes,3,req,name=log_message" json:"log_message,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *LogLine) Reset()         { *m = LogLine{} }
+func (m *LogLine) String() string { return proto.CompactTextString(m) }
+func (*LogLine) ProtoMessage()    {}
+
+func (m *LogLine) GetTime() int64 {
+	if m != nil && m.Time != nil {
+		return *m.Time
+	}
+	return 0
+}
+
+func (m *LogLine) GetLevel() int32 {
+	if m != nil && m.Level != nil {
+		return *m.Level
+	}
+	return 0
+}
+
+func (m *LogLine) GetLogMessage() string {
+	if m != nil && m.LogMessage != nil {
+		return *m.LogMessage
+	}
+	return ""
+}
+
+type RequestLog struct {
+	AppId                   *string    `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+	ModuleId                *string    `protobuf:"bytes,37,opt,name=module_id,def=default" json:"module_id,omitempty"`
+	VersionId               *string    `protobuf:"bytes,2,req,name=version_id" json:"version_id,omitempty"`
+	RequestId               []byte     `protobuf:"bytes,3,req,name=request_id" json:"request_id,omitempty"`
+	Offset                  *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"`
+	Ip                      *string    `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"`
+	Nickname                *string    `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"`
+	StartTime               *int64     `protobuf:"varint,6,req,name=start_time" json:"start_time,omitempty"`
+	EndTime                 *int64     `protobuf:"varint,7,req,name=end_time" json:"end_time,omitempty"`
+	Latency                 *int64     `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"`
+	Mcycles                 *int64     `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"`
+	Method                  *string    `protobuf:"bytes,10,req,name=method" json:"method,omitempty"`
+	Resource                *string    `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"`
+	HttpVersion             *string    `protobuf:"bytes,12,req,name=http_version" json:"http_version,omitempty"`
+	Status                  *int32     `protobuf:"varint,13,req,name=status" json:"status,omitempty"`
+	ResponseSize            *int64     `protobuf:"varint,14,req,name=response_size" json:"response_size,omitempty"`
+	Referrer                *string    `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"`
+	UserAgent               *string    `protobuf:"bytes,16,opt,name=user_agent" json:"user_agent,omitempty"`
+	UrlMapEntry             *string    `protobuf:"bytes,17,req,name=url_map_entry" json:"url_map_entry,omitempty"`
+	Combined                *string    `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"`
+	ApiMcycles              *int64     `protobuf:"varint,19,opt,name=api_mcycles" json:"api_mcycles,omitempty"`
+	Host                    *string    `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"`
+	Cost                    *float64   `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"`
+	TaskQueueName           *string    `protobuf:"bytes,22,opt,name=task_queue_name" json:"task_queue_name,omitempty"`
+	TaskName                *string    `protobuf:"bytes,23,opt,name=task_name" json:"task_name,omitempty"`
+	WasLoadingRequest       *bool      `protobuf:"varint,24,opt,name=was_loading_request" json:"was_loading_request,omitempty"`
+	PendingTime             *int64     `protobuf:"varint,25,opt,name=pending_time" json:"pending_time,omitempty"`
+	ReplicaIndex            *int32     `protobuf:"varint,26,opt,name=replica_index,def=-1" json:"replica_index,omitempty"`
+	Finished                *bool      `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"`
+	CloneKey                []byte     `protobuf:"bytes,28,opt,name=clone_key" json:"clone_key,omitempty"`
+	Line                    []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"`
+	LinesIncomplete         *bool      `protobuf:"varint,36,opt,name=lines_incomplete" json:"lines_incomplete,omitempty"`
+	AppEngineRelease        []byte     `protobuf:"bytes,38,opt,name=app_engine_release" json:"app_engine_release,omitempty"`
+	ExitReason              *int32     `protobuf:"varint,30,opt,name=exit_reason" json:"exit_reason,omitempty"`
+	WasThrottledForTime     *bool      `protobuf:"varint,31,opt,name=was_throttled_for_time" json:"was_throttled_for_time,omitempty"`
+	WasThrottledForRequests *bool      `protobuf:"varint,32,opt,name=was_throttled_for_requests" json:"was_throttled_for_requests,omitempty"`
+	ThrottledTime           *int64     `protobuf:"varint,33,opt,name=throttled_time" json:"throttled_time,omitempty"`
+	ServerName              []byte     `protobuf:"bytes,34,opt,name=server_name" json:"server_name,omitempty"`
+	XXX_unrecognized        []byte     `json:"-"`
+}
+
+func (m *RequestLog) Reset()         { *m = RequestLog{} }
+func (m *RequestLog) String() string { return proto.CompactTextString(m) }
+func (*RequestLog) ProtoMessage()    {}
+
+const Default_RequestLog_ModuleId string = "default"
+const Default_RequestLog_ReplicaIndex int32 = -1
+const Default_RequestLog_Finished bool = true
+
+func (m *RequestLog) GetAppId() string {
+	if m != nil && m.AppId != nil {
+		return *m.AppId
+	}
+	return ""
+}
+
+func (m *RequestLog) GetModuleId() string {
+	if m != nil && m.ModuleId != nil {
+		return *m.ModuleId
+	}
+	return Default_RequestLog_ModuleId
+}
+
+func (m *RequestLog) GetVersionId() string {
+	if m != nil && m.VersionId != nil {
+		return *m.VersionId
+	}
+	return ""
+}
+
+func (m *RequestLog) GetRequestId() []byte {
+	if m != nil {
+		return m.RequestId
+	}
+	return nil
+}
+
+func (m *RequestLog) GetOffset() *LogOffset {
+	if m != nil {
+		return m.Offset
+	}
+	return nil
+}
+
+func (m *RequestLog) GetIp() string {
+	if m != nil && m.Ip != nil {
+		return *m.Ip
+	}
+	return ""
+}
+
+func (m *RequestLog) GetNickname() string {
+	if m != nil && m.Nickname != nil {
+		return *m.Nickname
+	}
+	return ""
+}
+
+func (m *RequestLog) GetStartTime() int64 {
+	if m != nil && m.StartTime != nil {
+		return *m.StartTime
+	}
+	return 0
+}
+
+func (m *RequestLog) GetEndTime() int64 {
+	if m != nil && m.EndTime != nil {
+		return *m.EndTime
+	}
+	return 0
+}
+
+func (m *RequestLog) GetLatency() int64 {
+	if m != nil && m.Latency != nil {
+		return *m.Latency
+	}
+	return 0
+}
+
+func (m *RequestLog) GetMcycles() int64 {
+	if m != nil && m.Mcycles != nil {
+		return *m.Mcycles
+	}
+	return 0
+}
+
+func (m *RequestLog) GetMethod() string {
+	if m != nil && m.Method != nil {
+		return *m.Method
+	}
+	return ""
+}
+
+func (m *RequestLog) GetResource() string {
+	if m != nil && m.Resource != nil {
+		return *m.Resource
+	}
+	return ""
+}
+
+func (m *RequestLog) GetHttpVersion() string {
+	if m != nil && m.HttpVersion != nil {
+		return *m.HttpVersion
+	}
+	return ""
+}
+
+func (m *RequestLog) GetStatus() int32 {
+	if m != nil && m.Status != nil {
+		return *m.Status
+	}
+	return 0
+}
+
+func (m *RequestLog) GetResponseSize() int64 {
+	if m != nil && m.ResponseSize != nil {
+		return *m.ResponseSize
+	}
+	return 0
+}
+
+func (m *RequestLog) GetReferrer() string {
+	if m != nil && m.Referrer != nil {
+		return *m.Referrer
+	}
+	return ""
+}
+
+func (m *RequestLog) GetUserAgent() string {
+	if m != nil && m.UserAgent != nil {
+		return *m.UserAgent
+	}
+	return ""
+}
+
+func (m *RequestLog) GetUrlMapEntry() string {
+	if m != nil && m.UrlMapEntry != nil {
+		return *m.UrlMapEntry
+	}
+	return ""
+}
+
+func (m *RequestLog) GetCombined() string {
+	if m != nil && m.Combined != nil {
+		return *m.Combined
+	}
+	return ""
+}
+
+func (m *RequestLog) GetApiMcycles() int64 {
+	if m != nil && m.ApiMcycles != nil {
+		return *m.ApiMcycles
+	}
+	return 0
+}
+
+func (m *RequestLog) GetHost() string {
+	if m != nil && m.Host != nil {
+		return *m.Host
+	}
+	return ""
+}
+
+func (m *RequestLog) GetCost() float64 {
+	if m != nil && m.Cost != nil {
+		return *m.Cost
+	}
+	return 0
+}
+
+func (m *RequestLog) GetTaskQueueName() string {
+	if m != nil && m.TaskQueueName != nil {
+		return *m.TaskQueueName
+	}
+	return ""
+}
+
+func (m *RequestLog) GetTaskName() string {
+	if m != nil && m.TaskName != nil {
+		return *m.TaskName
+	}
+	return ""
+}
+
+func (m *RequestLog) GetWasLoadingRequest() bool {
+	if m != nil && m.WasLoadingRequest != nil {
+		return *m.WasLoadingRequest
+	}
+	return false
+}
+
+func (m *RequestLog) GetPendingTime() int64 {
+	if m != nil && m.PendingTime != nil {
+		return *m.PendingTime
+	}
+	return 0
+}
+
+func (m *RequestLog) GetReplicaIndex() int32 {
+	if m != nil && m.ReplicaIndex != nil {
+		return *m.ReplicaIndex
+	}
+	return Default_RequestLog_ReplicaIndex
+}
+
+func (m *RequestLog) GetFinished() bool {
+	if m != nil && m.Finished != nil {
+		return *m.Finished
+	}
+	return Default_RequestLog_Finished
+}
+
+func (m *RequestLog) GetCloneKey() []byte {
+	if m != nil {
+		return m.CloneKey
+	}
+	return nil
+}
+
+func (m *RequestLog) GetLine() []*LogLine {
+	if m != nil {
+		return m.Line
+	}
+	return nil
+}
+
+func (m *RequestLog) GetLinesIncomplete() bool {
+	if m != nil && m.LinesIncomplete != nil {
+		return *m.LinesIncomplete
+	}
+	return false
+}
+
+func (m *RequestLog) GetAppEngineRelease() []byte {
+	if m != nil {
+		return m.AppEngineRelease
+	}
+	return nil
+}
+
+func (m *RequestLog) GetExitReason() int32 {
+	if m != nil && m.ExitReason != nil {
+		return *m.ExitReason
+	}
+	return 0
+}
+
+func (m *RequestLog) GetWasThrottledForTime() bool {
+	if m != nil && m.WasThrottledForTime != nil {
+		return *m.WasThrottledForTime
+	}
+	return false
+}
+
+func (m *RequestLog) GetWasThrottledForRequests() bool {
+	if m != nil && m.WasThrottledForRequests != nil {
+		return *m.WasThrottledForRequests
+	}
+	return false
+}
+
+func (m *RequestLog) GetThrottledTime() int64 {
+	if m != nil && m.ThrottledTime != nil {
+		return *m.ThrottledTime
+	}
+	return 0
+}
+
+func (m *RequestLog) GetServerName() []byte {
+	if m != nil {
+		return m.ServerName
+	}
+	return nil
+}
+
+type LogModuleVersion struct {
+	ModuleId         *string `protobuf:"bytes,1,opt,name=module_id,def=default" json:"module_id,omitempty"`
+	VersionId        *string `protobuf:"bytes,2,opt,name=version_id" json:"version_id,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *LogModuleVersion) Reset()         { *m = LogModuleVersion{} }
+func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) }
+func (*LogModuleVersion) ProtoMessage()    {}
+
+const Default_LogModuleVersion_ModuleId string = "default"
+
+func (m *LogModuleVersion) GetModuleId() string {
+	if m != nil && m.ModuleId != nil {
+		return *m.ModuleId
+	}
+	return Default_LogModuleVersion_ModuleId
+}
+
+func (m *LogModuleVersion) GetVersionId() string {
+	if m != nil && m.VersionId != nil {
+		return *m.VersionId
+	}
+	return ""
+}
+
+type LogReadRequest struct {
+	AppId             *string             `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+	VersionId         []string            `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"`
+	ModuleVersion     []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version" json:"module_version,omitempty"`
+	StartTime         *int64              `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"`
+	EndTime           *int64              `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"`
+	Offset            *LogOffset          `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"`
+	RequestId         [][]byte            `protobuf:"bytes,6,rep,name=request_id" json:"request_id,omitempty"`
+	MinimumLogLevel   *int32              `protobuf:"varint,7,opt,name=minimum_log_level" json:"minimum_log_level,omitempty"`
+	IncludeIncomplete *bool               `protobuf:"varint,8,opt,name=include_incomplete" json:"include_incomplete,omitempty"`
+	Count             *int64              `protobuf:"varint,9,opt,name=count" json:"count,omitempty"`
+	CombinedLogRegex  *string             `protobuf:"bytes,14,opt,name=combined_log_regex" json:"combined_log_regex,omitempty"`
+	HostRegex         *string             `protobuf:"bytes,15,opt,name=host_regex" json:"host_regex,omitempty"`
+	ReplicaIndex      *int32              `protobuf:"varint,16,opt,name=replica_index" json:"replica_index,omitempty"`
+	IncludeAppLogs    *bool               `protobuf:"varint,10,opt,name=include_app_logs" json:"include_app_logs,omitempty"`
+	AppLogsPerRequest *int32              `protobuf:"varint,17,opt,name=app_logs_per_request" json:"app_logs_per_request,omitempty"`
+	IncludeHost       *bool               `protobuf:"varint,11,opt,name=include_host" json:"include_host,omitempty"`
+	IncludeAll        *bool               `protobuf:"varint,12,opt,name=include_all" json:"include_all,omitempty"`
+	CacheIterator     *bool               `protobuf:"varint,13,opt,name=cache_iterator" json:"cache_iterator,omitempty"`
+	NumShards         *int32              `protobuf:"varint,18,opt,name=num_shards" json:"num_shards,omitempty"`
+	XXX_unrecognized  []byte              `json:"-"`
+}
+
+func (m *LogReadRequest) Reset()         { *m = LogReadRequest{} }
+func (m *LogReadRequest) String() string { return proto.CompactTextString(m) }
+func (*LogReadRequest) ProtoMessage()    {}
+
+func (m *LogReadRequest) GetAppId() string {
+	if m != nil && m.AppId != nil {
+		return *m.AppId
+	}
+	return ""
+}
+
+func (m *LogReadRequest) GetVersionId() []string {
+	if m != nil {
+		return m.VersionId
+	}
+	return nil
+}
+
+func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion {
+	if m != nil {
+		return m.ModuleVersion
+	}
+	return nil
+}
+
+func (m *LogReadRequest) GetStartTime() int64 {
+	if m != nil && m.StartTime != nil {
+		return *m.StartTime
+	}
+	return 0
+}
+
+func (m *LogReadRequest) GetEndTime() int64 {
+	if m != nil && m.EndTime != nil {
+		return *m.EndTime
+	}
+	return 0
+}
+
+func (m *LogReadRequest) GetOffset() *LogOffset {
+	if m != nil {
+		return m.Offset
+	}
+	return nil
+}
+
+func (m *LogReadRequest) GetRequestId() [][]byte {
+	if m != nil {
+		return m.RequestId
+	}
+	return nil
+}
+
+func (m *LogReadRequest) GetMinimumLogLevel() int32 {
+	if m != nil && m.MinimumLogLevel != nil {
+		return *m.MinimumLogLevel
+	}
+	return 0
+}
+
+func (m *LogReadRequest) GetIncludeIncomplete() bool {
+	if m != nil && m.IncludeIncomplete != nil {
+		return *m.IncludeIncomplete
+	}
+	return false
+}
+
+func (m *LogReadRequest) GetCount() int64 {
+	if m != nil && m.Count != nil {
+		return *m.Count
+	}
+	return 0
+}
+
+func (m *LogReadRequest) GetCombinedLogRegex() string {
+	if m != nil && m.CombinedLogRegex != nil {
+		return *m.CombinedLogRegex
+	}
+	return ""
+}
+
+func (m *LogReadRequest) GetHostRegex() string {
+	if m != nil && m.HostRegex != nil {
+		return *m.HostRegex
+	}
+	return ""
+}
+
+func (m *LogReadRequest) GetReplicaIndex() int32 {
+	if m != nil && m.ReplicaIndex != nil {
+		return *m.ReplicaIndex
+	}
+	return 0
+}
+
+func (m *LogReadRequest) GetIncludeAppLogs() bool {
+	if m != nil && m.IncludeAppLogs != nil {
+		return *m.IncludeAppLogs
+	}
+	return false
+}
+
+func (m *LogReadRequest) GetAppLogsPerRequest() int32 {
+	if m != nil && m.AppLogsPerRequest != nil {
+		return *m.AppLogsPerRequest
+	}
+	return 0
+}
+
+func (m *LogReadRequest) GetIncludeHost() bool {
+	if m != nil && m.IncludeHost != nil {
+		return *m.IncludeHost
+	}
+	return false
+}
+
+func (m *LogReadRequest) GetIncludeAll() bool {
+	if m != nil && m.IncludeAll != nil {
+		return *m.IncludeAll
+	}
+	return false
+}
+
+func (m *LogReadRequest) GetCacheIterator() bool {
+	if m != nil && m.CacheIterator != nil {
+		return *m.CacheIterator
+	}
+	return false
+}
+
+func (m *LogReadRequest) GetNumShards() int32 {
+	if m != nil && m.NumShards != nil {
+		return *m.NumShards
+	}
+	return 0
+}
+
+type LogReadResponse struct {
+	Log              []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"`
+	Offset           *LogOffset    `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"`
+	LastEndTime      *int64        `protobuf:"varint,3,opt,name=last_end_time" json:"last_end_time,omitempty"`
+	XXX_unrecognized []byte        `json:"-"`
+}
+
+func (m *LogReadResponse) Reset()         { *m = LogReadResponse{} }
+func (m *LogReadResponse) String() string { return proto.CompactTextString(m) }
+func (*LogReadResponse) ProtoMessage()    {}
+
+func (m *LogReadResponse) GetLog() []*RequestLog {
+	if m != nil {
+		return m.Log
+	}
+	return nil
+}
+
+func (m *LogReadResponse) GetOffset() *LogOffset {
+	if m != nil {
+		return m.Offset
+	}
+	return nil
+}
+
+func (m *LogReadResponse) GetLastEndTime() int64 {
+	if m != nil && m.LastEndTime != nil {
+		return *m.LastEndTime
+	}
+	return 0
+}
+
+type LogUsageRecord struct {
+	VersionId        *string `protobuf:"bytes,1,opt,name=version_id" json:"version_id,omitempty"`
+	StartTime        *int32  `protobuf:"varint,2,opt,name=start_time" json:"start_time,omitempty"`
+	EndTime          *int32  `protobuf:"varint,3,opt,name=end_time" json:"end_time,omitempty"`
+	Count            *int64  `protobuf:"varint,4,opt,name=count" json:"count,omitempty"`
+	TotalSize        *int64  `protobuf:"varint,5,opt,name=total_size" json:"total_size,omitempty"`
+	Records          *int32  `protobuf:"varint,6,opt,name=records" json:"records,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *LogUsageRecord) Reset()         { *m = LogUsageRecord{} }
+func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) }
+func (*LogUsageRecord) ProtoMessage()    {}
+
+func (m *LogUsageRecord) GetVersionId() string {
+	if m != nil && m.VersionId != nil {
+		return *m.VersionId
+	}
+	return ""
+}
+
+func (m *LogUsageRecord) GetStartTime() int32 {
+	if m != nil && m.StartTime != nil {
+		return *m.StartTime
+	}
+	return 0
+}
+
+func (m *LogUsageRecord) GetEndTime() int32 {
+	if m != nil && m.EndTime != nil {
+		return *m.EndTime
+	}
+	return 0
+}
+
+func (m *LogUsageRecord) GetCount() int64 {
+	if m != nil && m.Count != nil {
+		return *m.Count
+	}
+	return 0
+}
+
+func (m *LogUsageRecord) GetTotalSize() int64 {
+	if m != nil && m.TotalSize != nil {
+		return *m.TotalSize
+	}
+	return 0
+}
+
+func (m *LogUsageRecord) GetRecords() int32 {
+	if m != nil && m.Records != nil {
+		return *m.Records
+	}
+	return 0
+}
+
+type LogUsageRequest struct {
+	AppId            *string  `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+	VersionId        []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"`
+	StartTime        *int32   `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"`
+	EndTime          *int32   `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"`
+	ResolutionHours  *uint32  `protobuf:"varint,5,opt,name=resolution_hours,def=1" json:"resolution_hours,omitempty"`
+	CombineVersions  *bool    `protobuf:"varint,6,opt,name=combine_versions" json:"combine_versions,omitempty"`
+	UsageVersion     *int32   `protobuf:"varint,7,opt,name=usage_version" json:"usage_version,omitempty"`
+	VersionsOnly     *bool    `protobuf:"varint,8,opt,name=versions_only" json:"versions_only,omitempty"`
+	XXX_unrecognized []byte   `json:"-"`
+}
+
+func (m *LogUsageRequest) Reset()         { *m = LogUsageRequest{} }
+func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) }
+func (*LogUsageRequest) ProtoMessage()    {}
+
+const Default_LogUsageRequest_ResolutionHours uint32 = 1
+
+func (m *LogUsageRequest) GetAppId() string {
+	if m != nil && m.AppId != nil {
+		return *m.AppId
+	}
+	return ""
+}
+
+func (m *LogUsageRequest) GetVersionId() []string {
+	if m != nil {
+		return m.VersionId
+	}
+	return nil
+}
+
+func (m *LogUsageRequest) GetStartTime() int32 {
+	if m != nil && m.StartTime != nil {
+		return *m.StartTime
+	}
+	return 0
+}
+
+func (m *LogUsageRequest) GetEndTime() int32 {
+	if m != nil && m.EndTime != nil {
+		return *m.EndTime
+	}
+	return 0
+}
+
+func (m *LogUsageRequest) GetResolutionHours() uint32 {
+	if m != nil && m.ResolutionHours != nil {
+		return *m.ResolutionHours
+	}
+	return Default_LogUsageRequest_ResolutionHours
+}
+
+func (m *LogUsageRequest) GetCombineVersions() bool {
+	if m != nil && m.CombineVersions != nil {
+		return *m.CombineVersions
+	}
+	return false
+}
+
+func (m *LogUsageRequest) GetUsageVersion() int32 {
+	if m != nil && m.UsageVersion != nil {
+		return *m.UsageVersion
+	}
+	return 0
+}
+
+func (m *LogUsageRequest) GetVersionsOnly() bool {
+	if m != nil && m.VersionsOnly != nil {
+		return *m.VersionsOnly
+	}
+	return false
+}
+
+type LogUsageResponse struct {
+	Usage            []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"`
+	Summary          *LogUsageRecord   `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"`
+	XXX_unrecognized []byte            `json:"-"`
+}
+
+func (m *LogUsageResponse) Reset()         { *m = LogUsageResponse{} }
+func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) }
+func (*LogUsageResponse) ProtoMessage()    {}
+
+func (m *LogUsageResponse) GetUsage() []*LogUsageRecord {
+	if m != nil {
+		return m.Usage
+	}
+	return nil
+}
+
+func (m *LogUsageResponse) GetSummary() *LogUsageRecord {
+	if m != nil {
+		return m.Summary
+	}
+	return nil
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/google.golang.org/appengine/internal/log/log_service.proto
new file mode 100644
index 0000000..8981dc4
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/log/log_service.proto
@@ -0,0 +1,150 @@
+syntax = "proto2";
+option go_package = "log";
+
+package appengine;
+
+message LogServiceError {
+  enum ErrorCode {
+    OK  = 0;
+    INVALID_REQUEST = 1;
+    STORAGE_ERROR = 2;
+  }
+}
+
+message UserAppLogLine {
+  required int64 timestamp_usec = 1;
+  required int64 level = 2;
+  required string message = 3;
+}
+
+message UserAppLogGroup {
+  repeated UserAppLogLine log_line = 2;
+}
+
+message FlushRequest {
+  optional bytes logs = 1;
+}
+
+message SetStatusRequest {
+  required string status = 1;
+}
+
+
+message LogOffset {
+  optional bytes request_id = 1;
+}
+
+message LogLine {
+  required int64 time = 1;
+  required int32 level = 2;
+  required string log_message = 3;
+}
+
+message RequestLog {
+  required string app_id = 1;
+  optional string module_id = 37 [default="default"];
+  required string version_id = 2;
+  required bytes request_id = 3;
+  optional LogOffset offset = 35;
+  required string ip = 4;
+  optional string nickname = 5;
+  required int64 start_time = 6;
+  required int64 end_time = 7;
+  required int64 latency = 8;
+  required int64 mcycles = 9;
+  required string method = 10;
+  required string resource = 11;
+  required string http_version = 12;
+  required int32 status = 13;
+  required int64 response_size = 14;
+  optional string referrer = 15;
+  optional string user_agent = 16;
+  required string url_map_entry = 17;
+  required string combined = 18;
+  optional int64 api_mcycles = 19;
+  optional string host = 20;
+  optional double cost = 21;
+
+  optional string task_queue_name = 22;
+  optional string task_name = 23;
+
+  optional bool was_loading_request = 24;
+  optional int64 pending_time = 25;
+  optional int32 replica_index = 26 [default = -1];
+  optional bool finished = 27 [default = true];
+  optional bytes clone_key = 28;
+
+  repeated LogLine line = 29;
+
+  optional bool lines_incomplete = 36;
+  optional bytes app_engine_release = 38;
+
+  optional int32 exit_reason = 30;
+  optional bool was_throttled_for_time = 31;
+  optional bool was_throttled_for_requests = 32;
+  optional int64 throttled_time = 33;
+
+  optional bytes server_name = 34;
+}
+
+message LogModuleVersion {
+  optional string module_id = 1 [default="default"];
+  optional string version_id = 2;
+}
+
+message LogReadRequest {
+  required string app_id = 1;
+  repeated string version_id = 2;
+  repeated LogModuleVersion module_version = 19;
+
+  optional int64 start_time = 3;
+  optional int64 end_time = 4;
+  optional LogOffset offset = 5;
+  repeated bytes request_id = 6;
+
+  optional int32 minimum_log_level = 7;
+  optional bool include_incomplete = 8;
+  optional int64 count = 9;
+
+  optional string combined_log_regex = 14;
+  optional string host_regex = 15;
+  optional int32 replica_index = 16;
+
+  optional bool include_app_logs = 10;
+  optional int32 app_logs_per_request = 17;
+  optional bool include_host = 11;
+  optional bool include_all = 12;
+  optional bool cache_iterator = 13;
+  optional int32 num_shards = 18;
+}
+
+message LogReadResponse {
+  repeated RequestLog log = 1;
+  optional LogOffset offset = 2;
+  optional int64 last_end_time = 3;
+}
+
+message LogUsageRecord {
+  optional string version_id = 1;
+  optional int32 start_time = 2;
+  optional int32 end_time = 3;
+  optional int64 count = 4;
+  optional int64 total_size = 5;
+  optional int32 records = 6;
+}
+
+message LogUsageRequest {
+  required string app_id = 1;
+  repeated string version_id = 2;
+  optional int32 start_time = 3;
+  optional int32 end_time = 4;
+  optional uint32 resolution_hours = 5 [default = 1];
+  optional bool combine_versions = 6;
+  optional int32 usage_version = 7;
+  optional bool versions_only = 8;
+}
+
+message LogUsageResponse {
+  repeated LogUsageRecord usage = 1;
+  optional LogUsageRecord summary = 2;
+}
diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go
new file mode 100644
index 0000000..4903616
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/main.go
@@ -0,0 +1,15 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import (
+	"appengine_internal"
+)
+
+func Main() {
+	appengine_internal.Main()
+}
diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go
new file mode 100644
index 0000000..57331ad
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/main_vm.go
@@ -0,0 +1,44 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+	"io"
+	"log"
+	"net/http"
+	"net/url"
+	"os"
+)
+
+func Main() {
+	installHealthChecker(http.DefaultServeMux)
+
+	port := "8080"
+	if s := os.Getenv("PORT"); s != "" {
+		port = s
+	}
+
+	if err := http.ListenAndServe(":"+port, http.HandlerFunc(handleHTTP)); err != nil {
+		log.Fatalf("http.ListenAndServe: %v", err)
+	}
+}
+
+func installHealthChecker(mux *http.ServeMux) {
+	// If no health check handler has been installed by this point, add a trivial one.
+	const healthPath = "/_ah/health"
+	hreq := &http.Request{
+		Method: "GET",
+		URL: &url.URL{
+			Path: healthPath,
+		},
+	}
+	if _, pat := mux.Handler(hreq); pat != healthPath {
+		mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) {
+			io.WriteString(w, "ok")
+		})
+	}
+}
diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go
new file mode 100644
index 0000000..9cc1f71
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/metadata.go
@@ -0,0 +1,61 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file has code for accessing metadata.
+//
+// References:
+//	https://cloud.google.com/compute/docs/metadata
+
+import (
+	"fmt"
+	"io/ioutil"
+	"log"
+	"net/http"
+	"net/url"
+)
+
+const (
+	metadataHost = "metadata"
+	metadataPath = "/computeMetadata/v1/"
+)
+
+var (
+	metadataRequestHeaders = http.Header{
+		"Metadata-Flavor": []string{"Google"},
+	}
+)
+
+// TODO(dsymonds): Do we need to support default values, like Python?
+func mustGetMetadata(key string) []byte {
+	b, err := getMetadata(key)
+	if err != nil {
+		log.Fatalf("Metadata fetch failed: %v", err)
+	}
+	return b
+}
+
+func getMetadata(key string) ([]byte, error) {
+	// TODO(dsymonds): May need to use url.Parse to support keys with query args.
+	req := &http.Request{
+		Method: "GET",
+		URL: &url.URL{
+			Scheme: "http",
+			Host:   metadataHost,
+			Path:   metadataPath + key,
+		},
+		Header: metadataRequestHeaders,
+		Host:   metadataHost,
+	}
+	resp, err := http.DefaultClient.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != 200 {
+		return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode)
+	}
+	return ioutil.ReadAll(resp.Body)
+}
diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go
new file mode 100644
index 0000000..a0145ed
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go
@@ -0,0 +1,375 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/modules/modules_service.proto
+// DO NOT EDIT!
+
+/*
+Package modules is a generated protocol buffer package.
+
+It is generated from these files:
+	google.golang.org/appengine/internal/modules/modules_service.proto
+
+It has these top-level messages:
+	ModulesServiceError
+	GetModulesRequest
+	GetModulesResponse
+	GetVersionsRequest
+	GetVersionsResponse
+	GetDefaultVersionRequest
+	GetDefaultVersionResponse
+	GetNumInstancesRequest
+	GetNumInstancesResponse
+	SetNumInstancesRequest
+	SetNumInstancesResponse
+	StartModuleRequest
+	StartModuleResponse
+	StopModuleRequest
+	StopModuleResponse
+	GetHostnameRequest
+	GetHostnameResponse
+*/
+package modules
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type ModulesServiceError_ErrorCode int32
+
+const (
+	ModulesServiceError_OK                ModulesServiceError_ErrorCode = 0
+	ModulesServiceError_INVALID_MODULE    ModulesServiceError_ErrorCode = 1
+	ModulesServiceError_INVALID_VERSION   ModulesServiceError_ErrorCode = 2
+	ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3
+	ModulesServiceError_TRANSIENT_ERROR   ModulesServiceError_ErrorCode = 4
+	ModulesServiceError_UNEXPECTED_STATE  ModulesServiceError_ErrorCode = 5
+)
+
+var ModulesServiceError_ErrorCode_name = map[int32]string{
+	0: "OK",
+	1: "INVALID_MODULE",
+	2: "INVALID_VERSION",
+	3: "INVALID_INSTANCES",
+	4: "TRANSIENT_ERROR",
+	5: "UNEXPECTED_STATE",
+}
+var ModulesServiceError_ErrorCode_value = map[string]int32{
+	"OK":                0,
+	"INVALID_MODULE":    1,
+	"INVALID_VERSION":   2,
+	"INVALID_INSTANCES": 3,
+	"TRANSIENT_ERROR":   4,
+	"UNEXPECTED_STATE":  5,
+}
+
+func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode {
+	p := new(ModulesServiceError_ErrorCode)
+	*p = x
+	return p
+}
+func (x ModulesServiceError_ErrorCode) String() string {
+	return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x))
+}
+func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode")
+	if err != nil {
+		return err
+	}
+	*x = ModulesServiceError_ErrorCode(value)
+	return nil
+}
+
+type ModulesServiceError struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ModulesServiceError) Reset()         { *m = ModulesServiceError{} }
+func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) }
+func (*ModulesServiceError) ProtoMessage()    {}
+
+type GetModulesRequest struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetModulesRequest) Reset()         { *m = GetModulesRequest{} }
+func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) }
+func (*GetModulesRequest) ProtoMessage()    {}
+
+type GetModulesResponse struct {
+	Module           []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"`
+	XXX_unrecognized []byte   `json:"-"`
+}
+
+func (m *GetModulesResponse) Reset()         { *m = GetModulesResponse{} }
+func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) }
+func (*GetModulesResponse) ProtoMessage()    {}
+
+func (m *GetModulesResponse) GetModule() []string {
+	if m != nil {
+		return m.Module
+	}
+	return nil
+}
+
+type GetVersionsRequest struct {
+	Module           *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *GetVersionsRequest) Reset()         { *m = GetVersionsRequest{} }
+func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) }
+func (*GetVersionsRequest) ProtoMessage()    {}
+
+func (m *GetVersionsRequest) GetModule() string {
+	if m != nil && m.Module != nil {
+		return *m.Module
+	}
+	return ""
+}
+
+type GetVersionsResponse struct {
+	Version          []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"`
+	XXX_unrecognized []byte   `json:"-"`
+}
+
+func (m *GetVersionsResponse) Reset()         { *m = GetVersionsResponse{} }
+func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) }
+func (*GetVersionsResponse) ProtoMessage()    {}
+
+func (m *GetVersionsResponse) GetVersion() []string {
+	if m != nil {
+		return m.Version
+	}
+	return nil
+}
+
+type GetDefaultVersionRequest struct {
+	Module           *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *GetDefaultVersionRequest) Reset()         { *m = GetDefaultVersionRequest{} }
+func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultVersionRequest) ProtoMessage()    {}
+
+func (m *GetDefaultVersionRequest) GetModule() string {
+	if m != nil && m.Module != nil {
+		return *m.Module
+	}
+	return ""
+}
+
+type GetDefaultVersionResponse struct {
+	Version          *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *GetDefaultVersionResponse) Reset()         { *m = GetDefaultVersionResponse{} }
+func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultVersionResponse) ProtoMessage()    {}
+
+func (m *GetDefaultVersionResponse) GetVersion() string {
+	if m != nil && m.Version != nil {
+		return *m.Version
+	}
+	return ""
+}
+
+type GetNumInstancesRequest struct {
+	Module           *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+	Version          *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *GetNumInstancesRequest) Reset()         { *m = GetNumInstancesRequest{} }
+func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
+func (*GetNumInstancesRequest) ProtoMessage()    {}
+
+func (m *GetNumInstancesRequest) GetModule() string {
+	if m != nil && m.Module != nil {
+		return *m.Module
+	}
+	return ""
+}
+
+func (m *GetNumInstancesRequest) GetVersion() string {
+	if m != nil && m.Version != nil {
+		return *m.Version
+	}
+	return ""
+}
+
+type GetNumInstancesResponse struct {
+	Instances        *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetNumInstancesResponse) Reset()         { *m = GetNumInstancesResponse{} }
+func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
+func (*GetNumInstancesResponse) ProtoMessage()    {}
+
+func (m *GetNumInstancesResponse) GetInstances() int64 {
+	if m != nil && m.Instances != nil {
+		return *m.Instances
+	}
+	return 0
+}
+
+type SetNumInstancesRequest struct {
+	Module           *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+	Version          *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+	Instances        *int64  `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *SetNumInstancesRequest) Reset()         { *m = SetNumInstancesRequest{} }
+func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
+func (*SetNumInstancesRequest) ProtoMessage()    {}
+
+func (m *SetNumInstancesRequest) GetModule() string {
+	if m != nil && m.Module != nil {
+		return *m.Module
+	}
+	return ""
+}
+
+func (m *SetNumInstancesRequest) GetVersion() string {
+	if m != nil && m.Version != nil {
+		return *m.Version
+	}
+	return ""
+}
+
+func (m *SetNumInstancesRequest) GetInstances() int64 {
+	if m != nil && m.Instances != nil {
+		return *m.Instances
+	}
+	return 0
+}
+
+type SetNumInstancesResponse struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SetNumInstancesResponse) Reset()         { *m = SetNumInstancesResponse{} }
+func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
+func (*SetNumInstancesResponse) ProtoMessage()    {}
+
+type StartModuleRequest struct {
+	Module           *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"`
+	Version          *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *StartModuleRequest) Reset()         { *m = StartModuleRequest{} }
+func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) }
+func (*StartModuleRequest) ProtoMessage()    {}
+
+func (m *StartModuleRequest) GetModule() string {
+	if m != nil && m.Module != nil {
+		return *m.Module
+	}
+	return ""
+}
+
+func (m *StartModuleRequest) GetVersion() string {
+	if m != nil && m.Version != nil {
+		return *m.Version
+	}
+	return ""
+}
+
+type StartModuleResponse struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StartModuleResponse) Reset()         { *m = StartModuleResponse{} }
+func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) }
+func (*StartModuleResponse) ProtoMessage()    {}
+
+type StopModuleRequest struct {
+	Module           *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+	Version          *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *StopModuleRequest) Reset()         { *m = StopModuleRequest{} }
+func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) }
+func (*StopModuleRequest) ProtoMessage()    {}
+
+func (m *StopModuleRequest) GetModule() string {
+	if m != nil && m.Module != nil {
+		return *m.Module
+	}
+	return ""
+}
+
+func (m *StopModuleRequest) GetVersion() string {
+	if m != nil && m.Version != nil {
+		return *m.Version
+	}
+	return ""
+}
+
+type StopModuleResponse struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StopModuleResponse) Reset()         { *m = StopModuleResponse{} }
+func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) }
+func (*StopModuleResponse) ProtoMessage()    {}
+
+type GetHostnameRequest struct {
+	Module           *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+	Version          *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+	Instance         *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *GetHostnameRequest) Reset()         { *m = GetHostnameRequest{} }
+func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetHostnameRequest) ProtoMessage()    {}
+
+func (m *GetHostnameRequest) GetModule() string {
+	if m != nil && m.Module != nil {
+		return *m.Module
+	}
+	return ""
+}
+
+func (m *GetHostnameRequest) GetVersion() string {
+	if m != nil && m.Version != nil {
+		return *m.Version
+	}
+	return ""
+}
+
+func (m *GetHostnameRequest) GetInstance() string {
+	if m != nil && m.Instance != nil {
+		return *m.Instance
+	}
+	return ""
+}
+
+type GetHostnameResponse struct {
+	Hostname         *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *GetHostnameResponse) Reset()         { *m = GetHostnameResponse{} }
+func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) }
+func (*GetHostnameResponse) ProtoMessage()    {}
+
+func (m *GetHostnameResponse) GetHostname() string {
+	if m != nil && m.Hostname != nil {
+		return *m.Hostname
+	}
+	return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto
new file mode 100644
index 0000000..d29f006
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto
@@ -0,0 +1,80 @@
+syntax = "proto2";
+option go_package = "modules";
+
+package appengine;
+
+message ModulesServiceError {
+  enum ErrorCode {
+    OK  = 0;
+    INVALID_MODULE = 1;
+    INVALID_VERSION = 2;
+    INVALID_INSTANCES = 3;
+    TRANSIENT_ERROR = 4;
+    UNEXPECTED_STATE = 5;
+  }
+}
+
+message GetModulesRequest {
+}
+
+message GetModulesResponse {
+  repeated string module = 1;
+}
+
+message GetVersionsRequest {
+  optional string module = 1;
+}
+
+message GetVersionsResponse {
+  repeated string version = 1;
+}
+
+message GetDefaultVersionRequest {
+  optional string module = 1;
+}
+
+message GetDefaultVersionResponse {
+  required string version = 1;
+}
+
+message GetNumInstancesRequest {
+  optional string module = 1;
+  optional string version = 2;
+}
+
+message GetNumInstancesResponse {
+  required int64 instances = 1;
+}
+
+message SetNumInstancesRequest {
+  optional string module = 1;
+  optional string version = 2;
+  required int64 instances = 3;
+}
+
+message SetNumInstancesResponse {}
+
+message StartModuleRequest {
+  required string module = 1;
+  required string version = 2;
+}
+
+message StartModuleResponse {}
+
+message StopModuleRequest {
+  optional string module = 1;
+  optional string version = 2;
+}
+
+message StopModuleResponse {}
+
+message GetHostnameRequest {
+  optional string module = 1;
+  optional string version = 2;
+  optional string instance = 3;
+}
+
+message GetHostnameResponse {
+  required string hostname = 1;
+}
+
diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go
new file mode 100644
index 0000000..3b94cf0
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/net.go
@@ -0,0 +1,56 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file implements a network dialer that limits the number of concurrent connections.
+// It is only used for API calls.
+
+import (
+	"log"
+	"net"
+	"runtime"
+	"sync"
+	"time"
+)
+
+var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable.
+
+func limitRelease() {
+	// non-blocking
+	select {
+	case <-limitSem:
+	default:
+		// This should not normally happen.
+		log.Print("appengine: unbalanced limitSem release!")
+	}
+}
+
+func limitDial(network, addr string) (net.Conn, error) {
+	limitSem <- 1
+
+	// Dial with a timeout in case the API host is MIA.
+	// The connection should normally be very fast.
+	conn, err := net.DialTimeout(network, addr, 500*time.Millisecond)
+	if err != nil {
+		limitRelease()
+		return nil, err
+	}
+	lc := &limitConn{Conn: conn}
+	runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required
+	return lc, nil
+}
+
+type limitConn struct {
+	close sync.Once
+	net.Conn
+}
+
+func (lc *limitConn) Close() error {
+	defer lc.close.Do(func() {
+		limitRelease()
+		runtime.SetFinalizer(lc, nil)
+	})
+	return lc.Conn.Close()
+}
diff --git a/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/google.golang.org/appengine/internal/regen.sh
new file mode 100755
index 0000000..2fdb546
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/regen.sh
@@ -0,0 +1,40 @@
+#!/bin/bash -e
+#
+# This script rebuilds the generated code for the protocol buffers.
+# To run this you will need protoc and goprotobuf installed;
+# see https://github.com/golang/protobuf for instructions.
+
+PKG=google.golang.org/appengine
+
+function die() {
+	echo 1>&2 $*
+	exit 1
+}
+
+# Sanity check that the right tools are accessible.
+for tool in go protoc protoc-gen-go; do
+	q=$(which $tool) || die "didn't find $tool"
+	echo 1>&2 "$tool: $q"
+done
+
+echo -n 1>&2 "finding package dir... "
+pkgdir=$(go list -f '{{.Dir}}' $PKG)
+echo 1>&2 $pkgdir
+base=$(echo $pkgdir | sed "s,/$PKG\$,,")
+echo 1>&2 "base: $base"
+cd $base
+
+# Run protoc once per package.
+for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do
+	echo 1>&2 "* $dir"
+	protoc --go_out=. $dir/*.proto
+done
+
+for f in $(find $PKG/internal -name '*.pb.go'); do
+  # Remove proto.RegisterEnum calls.
+  # These cause duplicate registration panics when these packages
+  # are used on classic App Engine. proto.RegisterEnum only affects
+  # parsing the text format; we don't care about that.
+  # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17
+  sed -i '/proto.RegisterEnum/d' $f
+done
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
new file mode 100644
index 0000000..526bd39
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
@@ -0,0 +1,231 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/remote_api/remote_api.proto
+// DO NOT EDIT!
+
+/*
+Package remote_api is a generated protocol buffer package.
+
+It is generated from these files:
+	google.golang.org/appengine/internal/remote_api/remote_api.proto
+
+It has these top-level messages:
+	Request
+	ApplicationError
+	RpcError
+	Response
+*/
+package remote_api
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type RpcError_ErrorCode int32
+
+const (
+	RpcError_UNKNOWN             RpcError_ErrorCode = 0
+	RpcError_CALL_NOT_FOUND      RpcError_ErrorCode = 1
+	RpcError_PARSE_ERROR         RpcError_ErrorCode = 2
+	RpcError_SECURITY_VIOLATION  RpcError_ErrorCode = 3
+	RpcError_OVER_QUOTA          RpcError_ErrorCode = 4
+	RpcError_REQUEST_TOO_LARGE   RpcError_ErrorCode = 5
+	RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6
+	RpcError_FEATURE_DISABLED    RpcError_ErrorCode = 7
+	RpcError_BAD_REQUEST         RpcError_ErrorCode = 8
+	RpcError_RESPONSE_TOO_LARGE  RpcError_ErrorCode = 9
+	RpcError_CANCELLED           RpcError_ErrorCode = 10
+	RpcError_REPLAY_ERROR        RpcError_ErrorCode = 11
+	RpcError_DEADLINE_EXCEEDED   RpcError_ErrorCode = 12
+)
+
+var RpcError_ErrorCode_name = map[int32]string{
+	0:  "UNKNOWN",
+	1:  "CALL_NOT_FOUND",
+	2:  "PARSE_ERROR",
+	3:  "SECURITY_VIOLATION",
+	4:  "OVER_QUOTA",
+	5:  "REQUEST_TOO_LARGE",
+	6:  "CAPABILITY_DISABLED",
+	7:  "FEATURE_DISABLED",
+	8:  "BAD_REQUEST",
+	9:  "RESPONSE_TOO_LARGE",
+	10: "CANCELLED",
+	11: "REPLAY_ERROR",
+	12: "DEADLINE_EXCEEDED",
+}
+var RpcError_ErrorCode_value = map[string]int32{
+	"UNKNOWN":             0,
+	"CALL_NOT_FOUND":      1,
+	"PARSE_ERROR":         2,
+	"SECURITY_VIOLATION":  3,
+	"OVER_QUOTA":          4,
+	"REQUEST_TOO_LARGE":   5,
+	"CAPABILITY_DISABLED": 6,
+	"FEATURE_DISABLED":    7,
+	"BAD_REQUEST":         8,
+	"RESPONSE_TOO_LARGE":  9,
+	"CANCELLED":           10,
+	"REPLAY_ERROR":        11,
+	"DEADLINE_EXCEEDED":   12,
+}
+
+func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode {
+	p := new(RpcError_ErrorCode)
+	*p = x
+	return p
+}
+func (x RpcError_ErrorCode) String() string {
+	return proto.EnumName(RpcError_ErrorCode_name, int32(x))
+}
+func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode")
+	if err != nil {
+		return err
+	}
+	*x = RpcError_ErrorCode(value)
+	return nil
+}
+
+type Request struct {
+	ServiceName      *string `protobuf:"bytes,2,req,name=service_name" json:"service_name,omitempty"`
+	Method           *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"`
+	Request          []byte  `protobuf:"bytes,4,req,name=request" json:"request,omitempty"`
+	RequestId        *string `protobuf:"bytes,5,opt,name=request_id" json:"request_id,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *Request) Reset()         { *m = Request{} }
+func (m *Request) String() string { return proto.CompactTextString(m) }
+func (*Request) ProtoMessage()    {}
+
+func (m *Request) GetServiceName() string {
+	if m != nil && m.ServiceName != nil {
+		return *m.ServiceName
+	}
+	return ""
+}
+
+func (m *Request) GetMethod() string {
+	if m != nil && m.Method != nil {
+		return *m.Method
+	}
+	return ""
+}
+
+func (m *Request) GetRequest() []byte {
+	if m != nil {
+		return m.Request
+	}
+	return nil
+}
+
+func (m *Request) GetRequestId() string {
+	if m != nil && m.RequestId != nil {
+		return *m.RequestId
+	}
+	return ""
+}
+
+type ApplicationError struct {
+	Code             *int32  `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
+	Detail           *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *ApplicationError) Reset()         { *m = ApplicationError{} }
+func (m *ApplicationError) String() string { return proto.CompactTextString(m) }
+func (*ApplicationError) ProtoMessage()    {}
+
+func (m *ApplicationError) GetCode() int32 {
+	if m != nil && m.Code != nil {
+		return *m.Code
+	}
+	return 0
+}
+
+func (m *ApplicationError) GetDetail() string {
+	if m != nil && m.Detail != nil {
+		return *m.Detail
+	}
+	return ""
+}
+
+type RpcError struct {
+	Code             *int32  `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
+	Detail           *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *RpcError) Reset()         { *m = RpcError{} }
+func (m *RpcError) String() string { return proto.CompactTextString(m) }
+func (*RpcError) ProtoMessage()    {}
+
+func (m *RpcError) GetCode() int32 {
+	if m != nil && m.Code != nil {
+		return *m.Code
+	}
+	return 0
+}
+
+func (m *RpcError) GetDetail() string {
+	if m != nil && m.Detail != nil {
+		return *m.Detail
+	}
+	return ""
+}
+
+type Response struct {
+	Response         []byte            `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
+	Exception        []byte            `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"`
+	ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error" json:"application_error,omitempty"`
+	JavaException    []byte            `protobuf:"bytes,4,opt,name=java_exception" json:"java_exception,omitempty"`
+	RpcError         *RpcError         `protobuf:"bytes,5,opt,name=rpc_error" json:"rpc_error,omitempty"`
+	XXX_unrecognized []byte            `json:"-"`
+}
+
+func (m *Response) Reset()         { *m = Response{} }
+func (m *Response) String() string { return proto.CompactTextString(m) }
+func (*Response) ProtoMessage()    {}
+
+func (m *Response) GetResponse() []byte {
+	if m != nil {
+		return m.Response
+	}
+	return nil
+}
+
+func (m *Response) GetException() []byte {
+	if m != nil {
+		return m.Exception
+	}
+	return nil
+}
+
+func (m *Response) GetApplicationError() *ApplicationError {
+	if m != nil {
+		return m.ApplicationError
+	}
+	return nil
+}
+
+func (m *Response) GetJavaException() []byte {
+	if m != nil {
+		return m.JavaException
+	}
+	return nil
+}
+
+func (m *Response) GetRpcError() *RpcError {
+	if m != nil {
+		return m.RpcError
+	}
+	return nil
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
new file mode 100644
index 0000000..f21763a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
@@ -0,0 +1,44 @@
+syntax = "proto2";
+option go_package = "remote_api";
+
+package remote_api;
+
+message Request {
+  required string service_name = 2;
+  required string method = 3;
+  required bytes request = 4;
+  optional string request_id = 5;
+}
+
+message ApplicationError {
+  required int32 code = 1;
+  required string detail = 2;
+}
+
+message RpcError {
+  enum ErrorCode {
+    UNKNOWN = 0;
+    CALL_NOT_FOUND = 1;
+    PARSE_ERROR = 2;
+    SECURITY_VIOLATION = 3;
+    OVER_QUOTA = 4;
+    REQUEST_TOO_LARGE = 5;
+    CAPABILITY_DISABLED = 6;
+    FEATURE_DISABLED = 7;
+    BAD_REQUEST = 8;
+    RESPONSE_TOO_LARGE = 9;
+    CANCELLED = 10;
+    REPLAY_ERROR = 11;
+    DEADLINE_EXCEEDED = 12;
+  }
+  required int32 code = 1;
+  optional string detail = 2;
+}
+
+message Response {
+  optional bytes response = 1;
+  optional bytes exception = 2;
+  optional ApplicationError application_error = 3;
+  optional bytes java_exception = 4;
+  optional RpcError rpc_error = 5;
+}
diff --git a/vendor/google.golang.org/appengine/internal/search/search.pb.go b/vendor/google.golang.org/appengine/internal/search/search.pb.go
new file mode 100644
index 0000000..7d8d11d
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/search/search.pb.go
@@ -0,0 +1,2127 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/search/search.proto
+// DO NOT EDIT!
+
+/*
+Package search is a generated protocol buffer package.
+
+It is generated from these files:
+	google.golang.org/appengine/internal/search/search.proto
+
+It has these top-level messages:
+	Scope
+	Entry
+	AccessControlList
+	FieldValue
+	Field
+	FieldTypes
+	IndexShardSettings
+	FacetValue
+	Facet
+	DocumentMetadata
+	Document
+	SearchServiceError
+	RequestStatus
+	IndexSpec
+	IndexMetadata
+	IndexDocumentParams
+	IndexDocumentRequest
+	IndexDocumentResponse
+	DeleteDocumentParams
+	DeleteDocumentRequest
+	DeleteDocumentResponse
+	ListDocumentsParams
+	ListDocumentsRequest
+	ListDocumentsResponse
+	ListIndexesParams
+	ListIndexesRequest
+	ListIndexesResponse
+	DeleteSchemaParams
+	DeleteSchemaRequest
+	DeleteSchemaResponse
+	SortSpec
+	ScorerSpec
+	FieldSpec
+	FacetRange
+	FacetRequestParam
+	FacetAutoDetectParam
+	FacetRequest
+	FacetRefinement
+	SearchParams
+	SearchRequest
+	FacetResultValue
+	FacetResult
+	SearchResult
+	SearchResponse
+*/
+package search
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type Scope_Type int32
+
+const (
+	Scope_USER_BY_CANONICAL_ID    Scope_Type = 1
+	Scope_USER_BY_EMAIL           Scope_Type = 2
+	Scope_GROUP_BY_CANONICAL_ID   Scope_Type = 3
+	Scope_GROUP_BY_EMAIL          Scope_Type = 4
+	Scope_GROUP_BY_DOMAIN         Scope_Type = 5
+	Scope_ALL_USERS               Scope_Type = 6
+	Scope_ALL_AUTHENTICATED_USERS Scope_Type = 7
+)
+
+var Scope_Type_name = map[int32]string{
+	1: "USER_BY_CANONICAL_ID",
+	2: "USER_BY_EMAIL",
+	3: "GROUP_BY_CANONICAL_ID",
+	4: "GROUP_BY_EMAIL",
+	5: "GROUP_BY_DOMAIN",
+	6: "ALL_USERS",
+	7: "ALL_AUTHENTICATED_USERS",
+}
+var Scope_Type_value = map[string]int32{
+	"USER_BY_CANONICAL_ID":    1,
+	"USER_BY_EMAIL":           2,
+	"GROUP_BY_CANONICAL_ID":   3,
+	"GROUP_BY_EMAIL":          4,
+	"GROUP_BY_DOMAIN":         5,
+	"ALL_USERS":               6,
+	"ALL_AUTHENTICATED_USERS": 7,
+}
+
+func (x Scope_Type) Enum() *Scope_Type {
+	p := new(Scope_Type)
+	*p = x
+	return p
+}
+func (x Scope_Type) String() string {
+	return proto.EnumName(Scope_Type_name, int32(x))
+}
+func (x *Scope_Type) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(Scope_Type_value, data, "Scope_Type")
+	if err != nil {
+		return err
+	}
+	*x = Scope_Type(value)
+	return nil
+}
+
+type Entry_Permission int32
+
+const (
+	Entry_READ         Entry_Permission = 1
+	Entry_WRITE        Entry_Permission = 2
+	Entry_FULL_CONTROL Entry_Permission = 3
+)
+
+var Entry_Permission_name = map[int32]string{
+	1: "READ",
+	2: "WRITE",
+	3: "FULL_CONTROL",
+}
+var Entry_Permission_value = map[string]int32{
+	"READ":         1,
+	"WRITE":        2,
+	"FULL_CONTROL": 3,
+}
+
+func (x Entry_Permission) Enum() *Entry_Permission {
+	p := new(Entry_Permission)
+	*p = x
+	return p
+}
+func (x Entry_Permission) String() string {
+	return proto.EnumName(Entry_Permission_name, int32(x))
+}
+func (x *Entry_Permission) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(Entry_Permission_value, data, "Entry_Permission")
+	if err != nil {
+		return err
+	}
+	*x = Entry_Permission(value)
+	return nil
+}
+
+type FieldValue_ContentType int32
+
+const (
+	FieldValue_TEXT   FieldValue_ContentType = 0
+	FieldValue_HTML   FieldValue_ContentType = 1
+	FieldValue_ATOM   FieldValue_ContentType = 2
+	FieldValue_DATE   FieldValue_ContentType = 3
+	FieldValue_NUMBER FieldValue_ContentType = 4
+	FieldValue_GEO    FieldValue_ContentType = 5
+)
+
+var FieldValue_ContentType_name = map[int32]string{
+	0: "TEXT",
+	1: "HTML",
+	2: "ATOM",
+	3: "DATE",
+	4: "NUMBER",
+	5: "GEO",
+}
+var FieldValue_ContentType_value = map[string]int32{
+	"TEXT":   0,
+	"HTML":   1,
+	"ATOM":   2,
+	"DATE":   3,
+	"NUMBER": 4,
+	"GEO":    5,
+}
+
+func (x FieldValue_ContentType) Enum() *FieldValue_ContentType {
+	p := new(FieldValue_ContentType)
+	*p = x
+	return p
+}
+func (x FieldValue_ContentType) String() string {
+	return proto.EnumName(FieldValue_ContentType_name, int32(x))
+}
+func (x *FieldValue_ContentType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FieldValue_ContentType_value, data, "FieldValue_ContentType")
+	if err != nil {
+		return err
+	}
+	*x = FieldValue_ContentType(value)
+	return nil
+}
+
+type FacetValue_ContentType int32
+
+const (
+	FacetValue_ATOM   FacetValue_ContentType = 2
+	FacetValue_NUMBER FacetValue_ContentType = 4
+)
+
+var FacetValue_ContentType_name = map[int32]string{
+	2: "ATOM",
+	4: "NUMBER",
+}
+var FacetValue_ContentType_value = map[string]int32{
+	"ATOM":   2,
+	"NUMBER": 4,
+}
+
+func (x FacetValue_ContentType) Enum() *FacetValue_ContentType {
+	p := new(FacetValue_ContentType)
+	*p = x
+	return p
+}
+func (x FacetValue_ContentType) String() string {
+	return proto.EnumName(FacetValue_ContentType_name, int32(x))
+}
+func (x *FacetValue_ContentType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FacetValue_ContentType_value, data, "FacetValue_ContentType")
+	if err != nil {
+		return err
+	}
+	*x = FacetValue_ContentType(value)
+	return nil
+}
+
+type Document_Storage int32
+
+const (
+	Document_DISK Document_Storage = 0
+)
+
+var Document_Storage_name = map[int32]string{
+	0: "DISK",
+}
+var Document_Storage_value = map[string]int32{
+	"DISK": 0,
+}
+
+func (x Document_Storage) Enum() *Document_Storage {
+	p := new(Document_Storage)
+	*p = x
+	return p
+}
+func (x Document_Storage) String() string {
+	return proto.EnumName(Document_Storage_name, int32(x))
+}
+func (x *Document_Storage) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(Document_Storage_value, data, "Document_Storage")
+	if err != nil {
+		return err
+	}
+	*x = Document_Storage(value)
+	return nil
+}
+
+type SearchServiceError_ErrorCode int32
+
+const (
+	SearchServiceError_OK                     SearchServiceError_ErrorCode = 0
+	SearchServiceError_INVALID_REQUEST        SearchServiceError_ErrorCode = 1
+	SearchServiceError_TRANSIENT_ERROR        SearchServiceError_ErrorCode = 2
+	SearchServiceError_INTERNAL_ERROR         SearchServiceError_ErrorCode = 3
+	SearchServiceError_PERMISSION_DENIED      SearchServiceError_ErrorCode = 4
+	SearchServiceError_TIMEOUT                SearchServiceError_ErrorCode = 5
+	SearchServiceError_CONCURRENT_TRANSACTION SearchServiceError_ErrorCode = 6
+)
+
+var SearchServiceError_ErrorCode_name = map[int32]string{
+	0: "OK",
+	1: "INVALID_REQUEST",
+	2: "TRANSIENT_ERROR",
+	3: "INTERNAL_ERROR",
+	4: "PERMISSION_DENIED",
+	5: "TIMEOUT",
+	6: "CONCURRENT_TRANSACTION",
+}
+var SearchServiceError_ErrorCode_value = map[string]int32{
+	"OK":                     0,
+	"INVALID_REQUEST":        1,
+	"TRANSIENT_ERROR":        2,
+	"INTERNAL_ERROR":         3,
+	"PERMISSION_DENIED":      4,
+	"TIMEOUT":                5,
+	"CONCURRENT_TRANSACTION": 6,
+}
+
+func (x SearchServiceError_ErrorCode) Enum() *SearchServiceError_ErrorCode {
+	p := new(SearchServiceError_ErrorCode)
+	*p = x
+	return p
+}
+func (x SearchServiceError_ErrorCode) String() string {
+	return proto.EnumName(SearchServiceError_ErrorCode_name, int32(x))
+}
+func (x *SearchServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(SearchServiceError_ErrorCode_value, data, "SearchServiceError_ErrorCode")
+	if err != nil {
+		return err
+	}
+	*x = SearchServiceError_ErrorCode(value)
+	return nil
+}
+
+type IndexSpec_Consistency int32
+
+const (
+	IndexSpec_GLOBAL       IndexSpec_Consistency = 0
+	IndexSpec_PER_DOCUMENT IndexSpec_Consistency = 1
+)
+
+var IndexSpec_Consistency_name = map[int32]string{
+	0: "GLOBAL",
+	1: "PER_DOCUMENT",
+}
+var IndexSpec_Consistency_value = map[string]int32{
+	"GLOBAL":       0,
+	"PER_DOCUMENT": 1,
+}
+
+func (x IndexSpec_Consistency) Enum() *IndexSpec_Consistency {
+	p := new(IndexSpec_Consistency)
+	*p = x
+	return p
+}
+func (x IndexSpec_Consistency) String() string {
+	return proto.EnumName(IndexSpec_Consistency_name, int32(x))
+}
+func (x *IndexSpec_Consistency) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(IndexSpec_Consistency_value, data, "IndexSpec_Consistency")
+	if err != nil {
+		return err
+	}
+	*x = IndexSpec_Consistency(value)
+	return nil
+}
+
+type IndexSpec_Source int32
+
+const (
+	IndexSpec_SEARCH        IndexSpec_Source = 0
+	IndexSpec_DATASTORE     IndexSpec_Source = 1
+	IndexSpec_CLOUD_STORAGE IndexSpec_Source = 2
+)
+
+var IndexSpec_Source_name = map[int32]string{
+	0: "SEARCH",
+	1: "DATASTORE",
+	2: "CLOUD_STORAGE",
+}
+var IndexSpec_Source_value = map[string]int32{
+	"SEARCH":        0,
+	"DATASTORE":     1,
+	"CLOUD_STORAGE": 2,
+}
+
+func (x IndexSpec_Source) Enum() *IndexSpec_Source {
+	p := new(IndexSpec_Source)
+	*p = x
+	return p
+}
+func (x IndexSpec_Source) String() string {
+	return proto.EnumName(IndexSpec_Source_name, int32(x))
+}
+func (x *IndexSpec_Source) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(IndexSpec_Source_value, data, "IndexSpec_Source")
+	if err != nil {
+		return err
+	}
+	*x = IndexSpec_Source(value)
+	return nil
+}
+
+type IndexSpec_Mode int32
+
+const (
+	IndexSpec_PRIORITY   IndexSpec_Mode = 0
+	IndexSpec_BACKGROUND IndexSpec_Mode = 1
+)
+
+var IndexSpec_Mode_name = map[int32]string{
+	0: "PRIORITY",
+	1: "BACKGROUND",
+}
+var IndexSpec_Mode_value = map[string]int32{
+	"PRIORITY":   0,
+	"BACKGROUND": 1,
+}
+
+func (x IndexSpec_Mode) Enum() *IndexSpec_Mode {
+	p := new(IndexSpec_Mode)
+	*p = x
+	return p
+}
+func (x IndexSpec_Mode) String() string {
+	return proto.EnumName(IndexSpec_Mode_name, int32(x))
+}
+func (x *IndexSpec_Mode) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(IndexSpec_Mode_value, data, "IndexSpec_Mode")
+	if err != nil {
+		return err
+	}
+	*x = IndexSpec_Mode(value)
+	return nil
+}
+
+type IndexDocumentParams_Freshness int32
+
+const (
+	IndexDocumentParams_SYNCHRONOUSLY   IndexDocumentParams_Freshness = 0
+	IndexDocumentParams_WHEN_CONVENIENT IndexDocumentParams_Freshness = 1
+)
+
+var IndexDocumentParams_Freshness_name = map[int32]string{
+	0: "SYNCHRONOUSLY",
+	1: "WHEN_CONVENIENT",
+}
+var IndexDocumentParams_Freshness_value = map[string]int32{
+	"SYNCHRONOUSLY":   0,
+	"WHEN_CONVENIENT": 1,
+}
+
+func (x IndexDocumentParams_Freshness) Enum() *IndexDocumentParams_Freshness {
+	p := new(IndexDocumentParams_Freshness)
+	*p = x
+	return p
+}
+func (x IndexDocumentParams_Freshness) String() string {
+	return proto.EnumName(IndexDocumentParams_Freshness_name, int32(x))
+}
+func (x *IndexDocumentParams_Freshness) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(IndexDocumentParams_Freshness_value, data, "IndexDocumentParams_Freshness")
+	if err != nil {
+		return err
+	}
+	*x = IndexDocumentParams_Freshness(value)
+	return nil
+}
+
+type ScorerSpec_Scorer int32
+
+const (
+	ScorerSpec_RESCORING_MATCH_SCORER ScorerSpec_Scorer = 0
+	ScorerSpec_MATCH_SCORER           ScorerSpec_Scorer = 2
+)
+
+var ScorerSpec_Scorer_name = map[int32]string{
+	0: "RESCORING_MATCH_SCORER",
+	2: "MATCH_SCORER",
+}
+var ScorerSpec_Scorer_value = map[string]int32{
+	"RESCORING_MATCH_SCORER": 0,
+	"MATCH_SCORER":           2,
+}
+
+func (x ScorerSpec_Scorer) Enum() *ScorerSpec_Scorer {
+	p := new(ScorerSpec_Scorer)
+	*p = x
+	return p
+}
+func (x ScorerSpec_Scorer) String() string {
+	return proto.EnumName(ScorerSpec_Scorer_name, int32(x))
+}
+func (x *ScorerSpec_Scorer) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(ScorerSpec_Scorer_value, data, "ScorerSpec_Scorer")
+	if err != nil {
+		return err
+	}
+	*x = ScorerSpec_Scorer(value)
+	return nil
+}
+
+type SearchParams_CursorType int32
+
+const (
+	SearchParams_NONE       SearchParams_CursorType = 0
+	SearchParams_SINGLE     SearchParams_CursorType = 1
+	SearchParams_PER_RESULT SearchParams_CursorType = 2
+)
+
+var SearchParams_CursorType_name = map[int32]string{
+	0: "NONE",
+	1: "SINGLE",
+	2: "PER_RESULT",
+}
+var SearchParams_CursorType_value = map[string]int32{
+	"NONE":       0,
+	"SINGLE":     1,
+	"PER_RESULT": 2,
+}
+
+func (x SearchParams_CursorType) Enum() *SearchParams_CursorType {
+	p := new(SearchParams_CursorType)
+	*p = x
+	return p
+}
+func (x SearchParams_CursorType) String() string {
+	return proto.EnumName(SearchParams_CursorType_name, int32(x))
+}
+func (x *SearchParams_CursorType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(SearchParams_CursorType_value, data, "SearchParams_CursorType")
+	if err != nil {
+		return err
+	}
+	*x = SearchParams_CursorType(value)
+	return nil
+}
+
+type SearchParams_ParsingMode int32
+
+const (
+	SearchParams_STRICT  SearchParams_ParsingMode = 0
+	SearchParams_RELAXED SearchParams_ParsingMode = 1
+)
+
+var SearchParams_ParsingMode_name = map[int32]string{
+	0: "STRICT",
+	1: "RELAXED",
+}
+var SearchParams_ParsingMode_value = map[string]int32{
+	"STRICT":  0,
+	"RELAXED": 1,
+}
+
+func (x SearchParams_ParsingMode) Enum() *SearchParams_ParsingMode {
+	p := new(SearchParams_ParsingMode)
+	*p = x
+	return p
+}
+func (x SearchParams_ParsingMode) String() string {
+	return proto.EnumName(SearchParams_ParsingMode_name, int32(x))
+}
+func (x *SearchParams_ParsingMode) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(SearchParams_ParsingMode_value, data, "SearchParams_ParsingMode")
+	if err != nil {
+		return err
+	}
+	*x = SearchParams_ParsingMode(value)
+	return nil
+}
+
+type Scope struct {
+	Type             *Scope_Type `protobuf:"varint,1,opt,name=type,enum=search.Scope_Type" json:"type,omitempty"`
+	Value            *string     `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+	XXX_unrecognized []byte      `json:"-"`
+}
+
+func (m *Scope) Reset()         { *m = Scope{} }
+func (m *Scope) String() string { return proto.CompactTextString(m) }
+func (*Scope) ProtoMessage()    {}
+
+func (m *Scope) GetType() Scope_Type {
+	if m != nil && m.Type != nil {
+		return *m.Type
+	}
+	return Scope_USER_BY_CANONICAL_ID
+}
+
+func (m *Scope) GetValue() string {
+	if m != nil && m.Value != nil {
+		return *m.Value
+	}
+	return ""
+}
+
+type Entry struct {
+	Scope            *Scope            `protobuf:"bytes,1,opt,name=scope" json:"scope,omitempty"`
+	Permission       *Entry_Permission `protobuf:"varint,2,opt,name=permission,enum=search.Entry_Permission" json:"permission,omitempty"`
+	DisplayName      *string           `protobuf:"bytes,3,opt,name=display_name" json:"display_name,omitempty"`
+	XXX_unrecognized []byte            `json:"-"`
+}
+
+func (m *Entry) Reset()         { *m = Entry{} }
+func (m *Entry) String() string { return proto.CompactTextString(m) }
+func (*Entry) ProtoMessage()    {}
+
+func (m *Entry) GetScope() *Scope {
+	if m != nil {
+		return m.Scope
+	}
+	return nil
+}
+
+func (m *Entry) GetPermission() Entry_Permission {
+	if m != nil && m.Permission != nil {
+		return *m.Permission
+	}
+	return Entry_READ
+}
+
+func (m *Entry) GetDisplayName() string {
+	if m != nil && m.DisplayName != nil {
+		return *m.DisplayName
+	}
+	return ""
+}
+
+type AccessControlList struct {
+	Owner            *string  `protobuf:"bytes,1,opt,name=owner" json:"owner,omitempty"`
+	Entries          []*Entry `protobuf:"bytes,2,rep,name=entries" json:"entries,omitempty"`
+	XXX_unrecognized []byte   `json:"-"`
+}
+
+func (m *AccessControlList) Reset()         { *m = AccessControlList{} }
+func (m *AccessControlList) String() string { return proto.CompactTextString(m) }
+func (*AccessControlList) ProtoMessage()    {}
+
+func (m *AccessControlList) GetOwner() string {
+	if m != nil && m.Owner != nil {
+		return *m.Owner
+	}
+	return ""
+}
+
+func (m *AccessControlList) GetEntries() []*Entry {
+	if m != nil {
+		return m.Entries
+	}
+	return nil
+}
+
+type FieldValue struct {
+	Type             *FieldValue_ContentType `protobuf:"varint,1,opt,name=type,enum=search.FieldValue_ContentType,def=0" json:"type,omitempty"`
+	Language         *string                 `protobuf:"bytes,2,opt,name=language,def=en" json:"language,omitempty"`
+	StringValue      *string                 `protobuf:"bytes,3,opt,name=string_value" json:"string_value,omitempty"`
+	Geo              *FieldValue_Geo         `protobuf:"group,4,opt,name=Geo" json:"geo,omitempty"`
+	XXX_unrecognized []byte                  `json:"-"`
+}
+
+func (m *FieldValue) Reset()         { *m = FieldValue{} }
+func (m *FieldValue) String() string { return proto.CompactTextString(m) }
+func (*FieldValue) ProtoMessage()    {}
+
+const Default_FieldValue_Type FieldValue_ContentType = FieldValue_TEXT
+const Default_FieldValue_Language string = "en"
+
+func (m *FieldValue) GetType() FieldValue_ContentType {
+	if m != nil && m.Type != nil {
+		return *m.Type
+	}
+	return Default_FieldValue_Type
+}
+
+func (m *FieldValue) GetLanguage() string {
+	if m != nil && m.Language != nil {
+		return *m.Language
+	}
+	return Default_FieldValue_Language
+}
+
+func (m *FieldValue) GetStringValue() string {
+	if m != nil && m.StringValue != nil {
+		return *m.StringValue
+	}
+	return ""
+}
+
+func (m *FieldValue) GetGeo() *FieldValue_Geo {
+	if m != nil {
+		return m.Geo
+	}
+	return nil
+}
+
+type FieldValue_Geo struct {
+	Lat              *float64 `protobuf:"fixed64,5,req,name=lat" json:"lat,omitempty"`
+	Lng              *float64 `protobuf:"fixed64,6,req,name=lng" json:"lng,omitempty"`
+	XXX_unrecognized []byte   `json:"-"`
+}
+
+func (m *FieldValue_Geo) Reset()         { *m = FieldValue_Geo{} }
+func (m *FieldValue_Geo) String() string { return proto.CompactTextString(m) }
+func (*FieldValue_Geo) ProtoMessage()    {}
+
+func (m *FieldValue_Geo) GetLat() float64 {
+	if m != nil && m.Lat != nil {
+		return *m.Lat
+	}
+	return 0
+}
+
+func (m *FieldValue_Geo) GetLng() float64 {
+	if m != nil && m.Lng != nil {
+		return *m.Lng
+	}
+	return 0
+}
+
+type Field struct {
+	Name             *string     `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+	Value            *FieldValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+	XXX_unrecognized []byte      `json:"-"`
+}
+
+func (m *Field) Reset()         { *m = Field{} }
+func (m *Field) String() string { return proto.CompactTextString(m) }
+func (*Field) ProtoMessage()    {}
+
+func (m *Field) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *Field) GetValue() *FieldValue {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+type FieldTypes struct {
+	Name             *string                  `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+	Type             []FieldValue_ContentType `protobuf:"varint,2,rep,name=type,enum=search.FieldValue_ContentType" json:"type,omitempty"`
+	XXX_unrecognized []byte                   `json:"-"`
+}
+
+func (m *FieldTypes) Reset()         { *m = FieldTypes{} }
+func (m *FieldTypes) String() string { return proto.CompactTextString(m) }
+func (*FieldTypes) ProtoMessage()    {}
+
+func (m *FieldTypes) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *FieldTypes) GetType() []FieldValue_ContentType {
+	if m != nil {
+		return m.Type
+	}
+	return nil
+}
+
+type IndexShardSettings struct {
+	PrevNumShards            []int32 `protobuf:"varint,1,rep,name=prev_num_shards" json:"prev_num_shards,omitempty"`
+	NumShards                *int32  `protobuf:"varint,2,req,name=num_shards,def=1" json:"num_shards,omitempty"`
+	PrevNumShardsSearchFalse []int32 `protobuf:"varint,3,rep,name=prev_num_shards_search_false" json:"prev_num_shards_search_false,omitempty"`
+	LocalReplica             *string `protobuf:"bytes,4,opt,name=local_replica,def=" json:"local_replica,omitempty"`
+	XXX_unrecognized         []byte  `json:"-"`
+}
+
+func (m *IndexShardSettings) Reset()         { *m = IndexShardSettings{} }
+func (m *IndexShardSettings) String() string { return proto.CompactTextString(m) }
+func (*IndexShardSettings) ProtoMessage()    {}
+
+const Default_IndexShardSettings_NumShards int32 = 1
+
+func (m *IndexShardSettings) GetPrevNumShards() []int32 {
+	if m != nil {
+		return m.PrevNumShards
+	}
+	return nil
+}
+
+func (m *IndexShardSettings) GetNumShards() int32 {
+	if m != nil && m.NumShards != nil {
+		return *m.NumShards
+	}
+	return Default_IndexShardSettings_NumShards
+}
+
+func (m *IndexShardSettings) GetPrevNumShardsSearchFalse() []int32 {
+	if m != nil {
+		return m.PrevNumShardsSearchFalse
+	}
+	return nil
+}
+
+func (m *IndexShardSettings) GetLocalReplica() string {
+	if m != nil && m.LocalReplica != nil {
+		return *m.LocalReplica
+	}
+	return ""
+}
+
+type FacetValue struct {
+	Type             *FacetValue_ContentType `protobuf:"varint,1,opt,name=type,enum=search.FacetValue_ContentType,def=2" json:"type,omitempty"`
+	StringValue      *string                 `protobuf:"bytes,3,opt,name=string_value" json:"string_value,omitempty"`
+	XXX_unrecognized []byte                  `json:"-"`
+}
+
+func (m *FacetValue) Reset()         { *m = FacetValue{} }
+func (m *FacetValue) String() string { return proto.CompactTextString(m) }
+func (*FacetValue) ProtoMessage()    {}
+
+const Default_FacetValue_Type FacetValue_ContentType = FacetValue_ATOM
+
+func (m *FacetValue) GetType() FacetValue_ContentType {
+	if m != nil && m.Type != nil {
+		return *m.Type
+	}
+	return Default_FacetValue_Type
+}
+
+func (m *FacetValue) GetStringValue() string {
+	if m != nil && m.StringValue != nil {
+		return *m.StringValue
+	}
+	return ""
+}
+
+type Facet struct {
+	Name             *string     `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+	Value            *FacetValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+	XXX_unrecognized []byte      `json:"-"`
+}
+
+func (m *Facet) Reset()         { *m = Facet{} }
+func (m *Facet) String() string { return proto.CompactTextString(m) }
+func (*Facet) ProtoMessage()    {}
+
+func (m *Facet) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *Facet) GetValue() *FacetValue {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+type DocumentMetadata struct {
+	Version            *int64 `protobuf:"varint,1,opt,name=version" json:"version,omitempty"`
+	CommittedStVersion *int64 `protobuf:"varint,2,opt,name=committed_st_version" json:"committed_st_version,omitempty"`
+	XXX_unrecognized   []byte `json:"-"`
+}
+
+func (m *DocumentMetadata) Reset()         { *m = DocumentMetadata{} }
+func (m *DocumentMetadata) String() string { return proto.CompactTextString(m) }
+func (*DocumentMetadata) ProtoMessage()    {}
+
+func (m *DocumentMetadata) GetVersion() int64 {
+	if m != nil && m.Version != nil {
+		return *m.Version
+	}
+	return 0
+}
+
+func (m *DocumentMetadata) GetCommittedStVersion() int64 {
+	if m != nil && m.CommittedStVersion != nil {
+		return *m.CommittedStVersion
+	}
+	return 0
+}
+
+type Document struct {
+	Id               *string           `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
+	Language         *string           `protobuf:"bytes,2,opt,name=language,def=en" json:"language,omitempty"`
+	Field            []*Field          `protobuf:"bytes,3,rep,name=field" json:"field,omitempty"`
+	OrderId          *int32            `protobuf:"varint,4,opt,name=order_id" json:"order_id,omitempty"`
+	Storage          *Document_Storage `protobuf:"varint,5,opt,name=storage,enum=search.Document_Storage,def=0" json:"storage,omitempty"`
+	Facet            []*Facet          `protobuf:"bytes,8,rep,name=facet" json:"facet,omitempty"`
+	XXX_unrecognized []byte            `json:"-"`
+}
+
+func (m *Document) Reset()         { *m = Document{} }
+func (m *Document) String() string { return proto.CompactTextString(m) }
+func (*Document) ProtoMessage()    {}
+
+const Default_Document_Language string = "en"
+const Default_Document_Storage Document_Storage = Document_DISK
+
+func (m *Document) GetId() string {
+	if m != nil && m.Id != nil {
+		return *m.Id
+	}
+	return ""
+}
+
+func (m *Document) GetLanguage() string {
+	if m != nil && m.Language != nil {
+		return *m.Language
+	}
+	return Default_Document_Language
+}
+
+func (m *Document) GetField() []*Field {
+	if m != nil {
+		return m.Field
+	}
+	return nil
+}
+
+func (m *Document) GetOrderId() int32 {
+	if m != nil && m.OrderId != nil {
+		return *m.OrderId
+	}
+	return 0
+}
+
+func (m *Document) GetStorage() Document_Storage {
+	if m != nil && m.Storage != nil {
+		return *m.Storage
+	}
+	return Default_Document_Storage
+}
+
+func (m *Document) GetFacet() []*Facet {
+	if m != nil {
+		return m.Facet
+	}
+	return nil
+}
+
+type SearchServiceError struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SearchServiceError) Reset()         { *m = SearchServiceError{} }
+func (m *SearchServiceError) String() string { return proto.CompactTextString(m) }
+func (*SearchServiceError) ProtoMessage()    {}
+
+type RequestStatus struct {
+	Code             *SearchServiceError_ErrorCode `protobuf:"varint,1,req,name=code,enum=search.SearchServiceError_ErrorCode" json:"code,omitempty"`
+	ErrorDetail      *string                       `protobuf:"bytes,2,opt,name=error_detail" json:"error_detail,omitempty"`
+	CanonicalCode    *int32                        `protobuf:"varint,3,opt,name=canonical_code" json:"canonical_code,omitempty"`
+	XXX_unrecognized []byte                        `json:"-"`
+}
+
+func (m *RequestStatus) Reset()         { *m = RequestStatus{} }
+func (m *RequestStatus) String() string { return proto.CompactTextString(m) }
+func (*RequestStatus) ProtoMessage()    {}
+
+func (m *RequestStatus) GetCode() SearchServiceError_ErrorCode {
+	if m != nil && m.Code != nil {
+		return *m.Code
+	}
+	return SearchServiceError_OK
+}
+
+func (m *RequestStatus) GetErrorDetail() string {
+	if m != nil && m.ErrorDetail != nil {
+		return *m.ErrorDetail
+	}
+	return ""
+}
+
+func (m *RequestStatus) GetCanonicalCode() int32 {
+	if m != nil && m.CanonicalCode != nil {
+		return *m.CanonicalCode
+	}
+	return 0
+}
+
+type IndexSpec struct {
+	Name             *string                `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+	Consistency      *IndexSpec_Consistency `protobuf:"varint,2,opt,name=consistency,enum=search.IndexSpec_Consistency,def=1" json:"consistency,omitempty"`
+	Namespace        *string                `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"`
+	Version          *int32                 `protobuf:"varint,4,opt,name=version" json:"version,omitempty"`
+	Source           *IndexSpec_Source      `protobuf:"varint,5,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"`
+	Mode             *IndexSpec_Mode        `protobuf:"varint,6,opt,name=mode,enum=search.IndexSpec_Mode,def=0" json:"mode,omitempty"`
+	XXX_unrecognized []byte                 `json:"-"`
+}
+
+func (m *IndexSpec) Reset()         { *m = IndexSpec{} }
+func (m *IndexSpec) String() string { return proto.CompactTextString(m) }
+func (*IndexSpec) ProtoMessage()    {}
+
+const Default_IndexSpec_Consistency IndexSpec_Consistency = IndexSpec_PER_DOCUMENT
+const Default_IndexSpec_Source IndexSpec_Source = IndexSpec_SEARCH
+const Default_IndexSpec_Mode IndexSpec_Mode = IndexSpec_PRIORITY
+
+func (m *IndexSpec) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *IndexSpec) GetConsistency() IndexSpec_Consistency {
+	if m != nil && m.Consistency != nil {
+		return *m.Consistency
+	}
+	return Default_IndexSpec_Consistency
+}
+
+func (m *IndexSpec) GetNamespace() string {
+	if m != nil && m.Namespace != nil {
+		return *m.Namespace
+	}
+	return ""
+}
+
+func (m *IndexSpec) GetVersion() int32 {
+	if m != nil && m.Version != nil {
+		return *m.Version
+	}
+	return 0
+}
+
+func (m *IndexSpec) GetSource() IndexSpec_Source {
+	if m != nil && m.Source != nil {
+		return *m.Source
+	}
+	return Default_IndexSpec_Source
+}
+
+func (m *IndexSpec) GetMode() IndexSpec_Mode {
+	if m != nil && m.Mode != nil {
+		return *m.Mode
+	}
+	return Default_IndexSpec_Mode
+}
+
+type IndexMetadata struct {
+	IndexSpec        *IndexSpec             `protobuf:"bytes,1,req,name=index_spec" json:"index_spec,omitempty"`
+	Field            []*FieldTypes          `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
+	Storage          *IndexMetadata_Storage `protobuf:"bytes,3,opt,name=storage" json:"storage,omitempty"`
+	XXX_unrecognized []byte                 `json:"-"`
+}
+
+func (m *IndexMetadata) Reset()         { *m = IndexMetadata{} }
+func (m *IndexMetadata) String() string { return proto.CompactTextString(m) }
+func (*IndexMetadata) ProtoMessage()    {}
+
+func (m *IndexMetadata) GetIndexSpec() *IndexSpec {
+	if m != nil {
+		return m.IndexSpec
+	}
+	return nil
+}
+
+func (m *IndexMetadata) GetField() []*FieldTypes {
+	if m != nil {
+		return m.Field
+	}
+	return nil
+}
+
+func (m *IndexMetadata) GetStorage() *IndexMetadata_Storage {
+	if m != nil {
+		return m.Storage
+	}
+	return nil
+}
+
+type IndexMetadata_Storage struct {
+	AmountUsed       *int64 `protobuf:"varint,1,opt,name=amount_used" json:"amount_used,omitempty"`
+	Limit            *int64 `protobuf:"varint,2,opt,name=limit" json:"limit,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexMetadata_Storage) Reset()         { *m = IndexMetadata_Storage{} }
+func (m *IndexMetadata_Storage) String() string { return proto.CompactTextString(m) }
+func (*IndexMetadata_Storage) ProtoMessage()    {}
+
+func (m *IndexMetadata_Storage) GetAmountUsed() int64 {
+	if m != nil && m.AmountUsed != nil {
+		return *m.AmountUsed
+	}
+	return 0
+}
+
+func (m *IndexMetadata_Storage) GetLimit() int64 {
+	if m != nil && m.Limit != nil {
+		return *m.Limit
+	}
+	return 0
+}
+
+type IndexDocumentParams struct {
+	Document         []*Document                    `protobuf:"bytes,1,rep,name=document" json:"document,omitempty"`
+	Freshness        *IndexDocumentParams_Freshness `protobuf:"varint,2,opt,name=freshness,enum=search.IndexDocumentParams_Freshness,def=0" json:"freshness,omitempty"`
+	IndexSpec        *IndexSpec                     `protobuf:"bytes,3,req,name=index_spec" json:"index_spec,omitempty"`
+	XXX_unrecognized []byte                         `json:"-"`
+}
+
+func (m *IndexDocumentParams) Reset()         { *m = IndexDocumentParams{} }
+func (m *IndexDocumentParams) String() string { return proto.CompactTextString(m) }
+func (*IndexDocumentParams) ProtoMessage()    {}
+
+const Default_IndexDocumentParams_Freshness IndexDocumentParams_Freshness = IndexDocumentParams_SYNCHRONOUSLY
+
+func (m *IndexDocumentParams) GetDocument() []*Document {
+	if m != nil {
+		return m.Document
+	}
+	return nil
+}
+
+func (m *IndexDocumentParams) GetFreshness() IndexDocumentParams_Freshness {
+	if m != nil && m.Freshness != nil {
+		return *m.Freshness
+	}
+	return Default_IndexDocumentParams_Freshness
+}
+
+func (m *IndexDocumentParams) GetIndexSpec() *IndexSpec {
+	if m != nil {
+		return m.IndexSpec
+	}
+	return nil
+}
+
+type IndexDocumentRequest struct {
+	Params           *IndexDocumentParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+	AppId            []byte               `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+	XXX_unrecognized []byte               `json:"-"`
+}
+
+func (m *IndexDocumentRequest) Reset()         { *m = IndexDocumentRequest{} }
+func (m *IndexDocumentRequest) String() string { return proto.CompactTextString(m) }
+func (*IndexDocumentRequest) ProtoMessage()    {}
+
+func (m *IndexDocumentRequest) GetParams() *IndexDocumentParams {
+	if m != nil {
+		return m.Params
+	}
+	return nil
+}
+
+func (m *IndexDocumentRequest) GetAppId() []byte {
+	if m != nil {
+		return m.AppId
+	}
+	return nil
+}
+
+type IndexDocumentResponse struct {
+	Status           []*RequestStatus `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"`
+	DocId            []string         `protobuf:"bytes,2,rep,name=doc_id" json:"doc_id,omitempty"`
+	XXX_unrecognized []byte           `json:"-"`
+}
+
+func (m *IndexDocumentResponse) Reset()         { *m = IndexDocumentResponse{} }
+func (m *IndexDocumentResponse) String() string { return proto.CompactTextString(m) }
+func (*IndexDocumentResponse) ProtoMessage()    {}
+
+func (m *IndexDocumentResponse) GetStatus() []*RequestStatus {
+	if m != nil {
+		return m.Status
+	}
+	return nil
+}
+
+func (m *IndexDocumentResponse) GetDocId() []string {
+	if m != nil {
+		return m.DocId
+	}
+	return nil
+}
+
+type DeleteDocumentParams struct {
+	DocId            []string   `protobuf:"bytes,1,rep,name=doc_id" json:"doc_id,omitempty"`
+	IndexSpec        *IndexSpec `protobuf:"bytes,2,req,name=index_spec" json:"index_spec,omitempty"`
+	XXX_unrecognized []byte     `json:"-"`
+}
+
+func (m *DeleteDocumentParams) Reset()         { *m = DeleteDocumentParams{} }
+func (m *DeleteDocumentParams) String() string { return proto.CompactTextString(m) }
+func (*DeleteDocumentParams) ProtoMessage()    {}
+
+func (m *DeleteDocumentParams) GetDocId() []string {
+	if m != nil {
+		return m.DocId
+	}
+	return nil
+}
+
+func (m *DeleteDocumentParams) GetIndexSpec() *IndexSpec {
+	if m != nil {
+		return m.IndexSpec
+	}
+	return nil
+}
+
+type DeleteDocumentRequest struct {
+	Params           *DeleteDocumentParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+	AppId            []byte                `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+	XXX_unrecognized []byte                `json:"-"`
+}
+
+func (m *DeleteDocumentRequest) Reset()         { *m = DeleteDocumentRequest{} }
+func (m *DeleteDocumentRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteDocumentRequest) ProtoMessage()    {}
+
+func (m *DeleteDocumentRequest) GetParams() *DeleteDocumentParams {
+	if m != nil {
+		return m.Params
+	}
+	return nil
+}
+
+func (m *DeleteDocumentRequest) GetAppId() []byte {
+	if m != nil {
+		return m.AppId
+	}
+	return nil
+}
+
+type DeleteDocumentResponse struct {
+	Status           []*RequestStatus `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"`
+	XXX_unrecognized []byte           `json:"-"`
+}
+
+func (m *DeleteDocumentResponse) Reset()         { *m = DeleteDocumentResponse{} }
+func (m *DeleteDocumentResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteDocumentResponse) ProtoMessage()    {}
+
+func (m *DeleteDocumentResponse) GetStatus() []*RequestStatus {
+	if m != nil {
+		return m.Status
+	}
+	return nil
+}
+
+type ListDocumentsParams struct {
+	IndexSpec        *IndexSpec `protobuf:"bytes,1,req,name=index_spec" json:"index_spec,omitempty"`
+	StartDocId       *string    `protobuf:"bytes,2,opt,name=start_doc_id" json:"start_doc_id,omitempty"`
+	IncludeStartDoc  *bool      `protobuf:"varint,3,opt,name=include_start_doc,def=1" json:"include_start_doc,omitempty"`
+	Limit            *int32     `protobuf:"varint,4,opt,name=limit,def=100" json:"limit,omitempty"`
+	KeysOnly         *bool      `protobuf:"varint,5,opt,name=keys_only" json:"keys_only,omitempty"`
+	XXX_unrecognized []byte     `json:"-"`
+}
+
+func (m *ListDocumentsParams) Reset()         { *m = ListDocumentsParams{} }
+func (m *ListDocumentsParams) String() string { return proto.CompactTextString(m) }
+func (*ListDocumentsParams) ProtoMessage()    {}
+
+const Default_ListDocumentsParams_IncludeStartDoc bool = true
+const Default_ListDocumentsParams_Limit int32 = 100
+
+func (m *ListDocumentsParams) GetIndexSpec() *IndexSpec {
+	if m != nil {
+		return m.IndexSpec
+	}
+	return nil
+}
+
+func (m *ListDocumentsParams) GetStartDocId() string {
+	if m != nil && m.StartDocId != nil {
+		return *m.StartDocId
+	}
+	return ""
+}
+
+func (m *ListDocumentsParams) GetIncludeStartDoc() bool {
+	if m != nil && m.IncludeStartDoc != nil {
+		return *m.IncludeStartDoc
+	}
+	return Default_ListDocumentsParams_IncludeStartDoc
+}
+
+func (m *ListDocumentsParams) GetLimit() int32 {
+	if m != nil && m.Limit != nil {
+		return *m.Limit
+	}
+	return Default_ListDocumentsParams_Limit
+}
+
+func (m *ListDocumentsParams) GetKeysOnly() bool {
+	if m != nil && m.KeysOnly != nil {
+		return *m.KeysOnly
+	}
+	return false
+}
+
+type ListDocumentsRequest struct {
+	Params           *ListDocumentsParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+	AppId            []byte               `protobuf:"bytes,2,opt,name=app_id" json:"app_id,omitempty"`
+	XXX_unrecognized []byte               `json:"-"`
+}
+
+func (m *ListDocumentsRequest) Reset()         { *m = ListDocumentsRequest{} }
+func (m *ListDocumentsRequest) String() string { return proto.CompactTextString(m) }
+func (*ListDocumentsRequest) ProtoMessage()    {}
+
+func (m *ListDocumentsRequest) GetParams() *ListDocumentsParams {
+	if m != nil {
+		return m.Params
+	}
+	return nil
+}
+
+func (m *ListDocumentsRequest) GetAppId() []byte {
+	if m != nil {
+		return m.AppId
+	}
+	return nil
+}
+
+type ListDocumentsResponse struct {
+	Status           *RequestStatus `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
+	Document         []*Document    `protobuf:"bytes,2,rep,name=document" json:"document,omitempty"`
+	XXX_unrecognized []byte         `json:"-"`
+}
+
+func (m *ListDocumentsResponse) Reset()         { *m = ListDocumentsResponse{} }
+func (m *ListDocumentsResponse) String() string { return proto.CompactTextString(m) }
+func (*ListDocumentsResponse) ProtoMessage()    {}
+
+func (m *ListDocumentsResponse) GetStatus() *RequestStatus {
+	if m != nil {
+		return m.Status
+	}
+	return nil
+}
+
+func (m *ListDocumentsResponse) GetDocument() []*Document {
+	if m != nil {
+		return m.Document
+	}
+	return nil
+}
+
+type ListIndexesParams struct {
+	FetchSchema       *bool             `protobuf:"varint,1,opt,name=fetch_schema" json:"fetch_schema,omitempty"`
+	Limit             *int32            `protobuf:"varint,2,opt,name=limit,def=20" json:"limit,omitempty"`
+	Namespace         *string           `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"`
+	StartIndexName    *string           `protobuf:"bytes,4,opt,name=start_index_name" json:"start_index_name,omitempty"`
+	IncludeStartIndex *bool             `protobuf:"varint,5,opt,name=include_start_index,def=1" json:"include_start_index,omitempty"`
+	IndexNamePrefix   *string           `protobuf:"bytes,6,opt,name=index_name_prefix" json:"index_name_prefix,omitempty"`
+	Offset            *int32            `protobuf:"varint,7,opt,name=offset" json:"offset,omitempty"`
+	Source            *IndexSpec_Source `protobuf:"varint,8,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"`
+	XXX_unrecognized  []byte            `json:"-"`
+}
+
+func (m *ListIndexesParams) Reset()         { *m = ListIndexesParams{} }
+func (m *ListIndexesParams) String() string { return proto.CompactTextString(m) }
+func (*ListIndexesParams) ProtoMessage()    {}
+
+const Default_ListIndexesParams_Limit int32 = 20
+const Default_ListIndexesParams_IncludeStartIndex bool = true
+const Default_ListIndexesParams_Source IndexSpec_Source = IndexSpec_SEARCH
+
+func (m *ListIndexesParams) GetFetchSchema() bool {
+	if m != nil && m.FetchSchema != nil {
+		return *m.FetchSchema
+	}
+	return false
+}
+
+func (m *ListIndexesParams) GetLimit() int32 {
+	if m != nil && m.Limit != nil {
+		return *m.Limit
+	}
+	return Default_ListIndexesParams_Limit
+}
+
+func (m *ListIndexesParams) GetNamespace() string {
+	if m != nil && m.Namespace != nil {
+		return *m.Namespace
+	}
+	return ""
+}
+
+func (m *ListIndexesParams) GetStartIndexName() string {
+	if m != nil && m.StartIndexName != nil {
+		return *m.StartIndexName
+	}
+	return ""
+}
+
+func (m *ListIndexesParams) GetIncludeStartIndex() bool {
+	if m != nil && m.IncludeStartIndex != nil {
+		return *m.IncludeStartIndex
+	}
+	return Default_ListIndexesParams_IncludeStartIndex
+}
+
+func (m *ListIndexesParams) GetIndexNamePrefix() string {
+	if m != nil && m.IndexNamePrefix != nil {
+		return *m.IndexNamePrefix
+	}
+	return ""
+}
+
+func (m *ListIndexesParams) GetOffset() int32 {
+	if m != nil && m.Offset != nil {
+		return *m.Offset
+	}
+	return 0
+}
+
+func (m *ListIndexesParams) GetSource() IndexSpec_Source {
+	if m != nil && m.Source != nil {
+		return *m.Source
+	}
+	return Default_ListIndexesParams_Source
+}
+
+type ListIndexesRequest struct {
+	Params           *ListIndexesParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+	AppId            []byte             `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+	XXX_unrecognized []byte             `json:"-"`
+}
+
+func (m *ListIndexesRequest) Reset()         { *m = ListIndexesRequest{} }
+func (m *ListIndexesRequest) String() string { return proto.CompactTextString(m) }
+func (*ListIndexesRequest) ProtoMessage()    {}
+
+func (m *ListIndexesRequest) GetParams() *ListIndexesParams {
+	if m != nil {
+		return m.Params
+	}
+	return nil
+}
+
+func (m *ListIndexesRequest) GetAppId() []byte {
+	if m != nil {
+		return m.AppId
+	}
+	return nil
+}
+
+type ListIndexesResponse struct {
+	Status           *RequestStatus   `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
+	IndexMetadata    []*IndexMetadata `protobuf:"bytes,2,rep,name=index_metadata" json:"index_metadata,omitempty"`
+	XXX_unrecognized []byte           `json:"-"`
+}
+
+func (m *ListIndexesResponse) Reset()         { *m = ListIndexesResponse{} }
+func (m *ListIndexesResponse) String() string { return proto.CompactTextString(m) }
+func (*ListIndexesResponse) ProtoMessage()    {}
+
+func (m *ListIndexesResponse) GetStatus() *RequestStatus {
+	if m != nil {
+		return m.Status
+	}
+	return nil
+}
+
+func (m *ListIndexesResponse) GetIndexMetadata() []*IndexMetadata {
+	if m != nil {
+		return m.IndexMetadata
+	}
+	return nil
+}
+
+type DeleteSchemaParams struct {
+	Source           *IndexSpec_Source `protobuf:"varint,1,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"`
+	IndexSpec        []*IndexSpec      `protobuf:"bytes,2,rep,name=index_spec" json:"index_spec,omitempty"`
+	XXX_unrecognized []byte            `json:"-"`
+}
+
+func (m *DeleteSchemaParams) Reset()         { *m = DeleteSchemaParams{} }
+func (m *DeleteSchemaParams) String() string { return proto.CompactTextString(m) }
+func (*DeleteSchemaParams) ProtoMessage()    {}
+
+const Default_DeleteSchemaParams_Source IndexSpec_Source = IndexSpec_SEARCH
+
+func (m *DeleteSchemaParams) GetSource() IndexSpec_Source {
+	if m != nil && m.Source != nil {
+		return *m.Source
+	}
+	return Default_DeleteSchemaParams_Source
+}
+
+func (m *DeleteSchemaParams) GetIndexSpec() []*IndexSpec {
+	if m != nil {
+		return m.IndexSpec
+	}
+	return nil
+}
+
+type DeleteSchemaRequest struct {
+	Params           *DeleteSchemaParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+	AppId            []byte              `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+	XXX_unrecognized []byte              `json:"-"`
+}
+
+func (m *DeleteSchemaRequest) Reset()         { *m = DeleteSchemaRequest{} }
+func (m *DeleteSchemaRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteSchemaRequest) ProtoMessage()    {}
+
+func (m *DeleteSchemaRequest) GetParams() *DeleteSchemaParams {
+	if m != nil {
+		return m.Params
+	}
+	return nil
+}
+
+func (m *DeleteSchemaRequest) GetAppId() []byte {
+	if m != nil {
+		return m.AppId
+	}
+	return nil
+}
+
+type DeleteSchemaResponse struct {
+	Status           []*RequestStatus `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"`
+	XXX_unrecognized []byte           `json:"-"`
+}
+
+func (m *DeleteSchemaResponse) Reset()         { *m = DeleteSchemaResponse{} }
+func (m *DeleteSchemaResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteSchemaResponse) ProtoMessage()    {}
+
+func (m *DeleteSchemaResponse) GetStatus() []*RequestStatus {
+	if m != nil {
+		return m.Status
+	}
+	return nil
+}
+
+type SortSpec struct {
+	SortExpression      *string  `protobuf:"bytes,1,req,name=sort_expression" json:"sort_expression,omitempty"`
+	SortDescending      *bool    `protobuf:"varint,2,opt,name=sort_descending,def=1" json:"sort_descending,omitempty"`
+	DefaultValueText    *string  `protobuf:"bytes,4,opt,name=default_value_text" json:"default_value_text,omitempty"`
+	DefaultValueNumeric *float64 `protobuf:"fixed64,5,opt,name=default_value_numeric" json:"default_value_numeric,omitempty"`
+	XXX_unrecognized    []byte   `json:"-"`
+}
+
+func (m *SortSpec) Reset()         { *m = SortSpec{} }
+func (m *SortSpec) String() string { return proto.CompactTextString(m) }
+func (*SortSpec) ProtoMessage()    {}
+
+const Default_SortSpec_SortDescending bool = true
+
+func (m *SortSpec) GetSortExpression() string {
+	if m != nil && m.SortExpression != nil {
+		return *m.SortExpression
+	}
+	return ""
+}
+
+func (m *SortSpec) GetSortDescending() bool {
+	if m != nil && m.SortDescending != nil {
+		return *m.SortDescending
+	}
+	return Default_SortSpec_SortDescending
+}
+
+func (m *SortSpec) GetDefaultValueText() string {
+	if m != nil && m.DefaultValueText != nil {
+		return *m.DefaultValueText
+	}
+	return ""
+}
+
+func (m *SortSpec) GetDefaultValueNumeric() float64 {
+	if m != nil && m.DefaultValueNumeric != nil {
+		return *m.DefaultValueNumeric
+	}
+	return 0
+}
+
+type ScorerSpec struct {
+	Scorer                *ScorerSpec_Scorer `protobuf:"varint,1,opt,name=scorer,enum=search.ScorerSpec_Scorer,def=2" json:"scorer,omitempty"`
+	Limit                 *int32             `protobuf:"varint,2,opt,name=limit,def=1000" json:"limit,omitempty"`
+	MatchScorerParameters *string            `protobuf:"bytes,9,opt,name=match_scorer_parameters" json:"match_scorer_parameters,omitempty"`
+	XXX_unrecognized      []byte             `json:"-"`
+}
+
+func (m *ScorerSpec) Reset()         { *m = ScorerSpec{} }
+func (m *ScorerSpec) String() string { return proto.CompactTextString(m) }
+func (*ScorerSpec) ProtoMessage()    {}
+
+const Default_ScorerSpec_Scorer ScorerSpec_Scorer = ScorerSpec_MATCH_SCORER
+const Default_ScorerSpec_Limit int32 = 1000
+
+func (m *ScorerSpec) GetScorer() ScorerSpec_Scorer {
+	if m != nil && m.Scorer != nil {
+		return *m.Scorer
+	}
+	return Default_ScorerSpec_Scorer
+}
+
+func (m *ScorerSpec) GetLimit() int32 {
+	if m != nil && m.Limit != nil {
+		return *m.Limit
+	}
+	return Default_ScorerSpec_Limit
+}
+
+func (m *ScorerSpec) GetMatchScorerParameters() string {
+	if m != nil && m.MatchScorerParameters != nil {
+		return *m.MatchScorerParameters
+	}
+	return ""
+}
+
+type FieldSpec struct {
+	Name             []string                `protobuf:"bytes,1,rep,name=name" json:"name,omitempty"`
+	Expression       []*FieldSpec_Expression `protobuf:"group,2,rep,name=Expression" json:"expression,omitempty"`
+	XXX_unrecognized []byte                  `json:"-"`
+}
+
+func (m *FieldSpec) Reset()         { *m = FieldSpec{} }
+func (m *FieldSpec) String() string { return proto.CompactTextString(m) }
+func (*FieldSpec) ProtoMessage()    {}
+
+func (m *FieldSpec) GetName() []string {
+	if m != nil {
+		return m.Name
+	}
+	return nil
+}
+
+func (m *FieldSpec) GetExpression() []*FieldSpec_Expression {
+	if m != nil {
+		return m.Expression
+	}
+	return nil
+}
+
+type FieldSpec_Expression struct {
+	Name             *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
+	Expression       *string `protobuf:"bytes,4,req,name=expression" json:"expression,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *FieldSpec_Expression) Reset()         { *m = FieldSpec_Expression{} }
+func (m *FieldSpec_Expression) String() string { return proto.CompactTextString(m) }
+func (*FieldSpec_Expression) ProtoMessage()    {}
+
+func (m *FieldSpec_Expression) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *FieldSpec_Expression) GetExpression() string {
+	if m != nil && m.Expression != nil {
+		return *m.Expression
+	}
+	return ""
+}
+
+type FacetRange struct {
+	Name             *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Start            *string `protobuf:"bytes,2,opt,name=start" json:"start,omitempty"`
+	End              *string `protobuf:"bytes,3,opt,name=end" json:"end,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *FacetRange) Reset()         { *m = FacetRange{} }
+func (m *FacetRange) String() string { return proto.CompactTextString(m) }
+func (*FacetRange) ProtoMessage()    {}
+
+func (m *FacetRange) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *FacetRange) GetStart() string {
+	if m != nil && m.Start != nil {
+		return *m.Start
+	}
+	return ""
+}
+
+func (m *FacetRange) GetEnd() string {
+	if m != nil && m.End != nil {
+		return *m.End
+	}
+	return ""
+}
+
+type FacetRequestParam struct {
+	ValueLimit       *int32        `protobuf:"varint,1,opt,name=value_limit" json:"value_limit,omitempty"`
+	Range            []*FacetRange `protobuf:"bytes,2,rep,name=range" json:"range,omitempty"`
+	ValueConstraint  []string      `protobuf:"bytes,3,rep,name=value_constraint" json:"value_constraint,omitempty"`
+	XXX_unrecognized []byte        `json:"-"`
+}
+
+func (m *FacetRequestParam) Reset()         { *m = FacetRequestParam{} }
+func (m *FacetRequestParam) String() string { return proto.CompactTextString(m) }
+func (*FacetRequestParam) ProtoMessage()    {}
+
+func (m *FacetRequestParam) GetValueLimit() int32 {
+	if m != nil && m.ValueLimit != nil {
+		return *m.ValueLimit
+	}
+	return 0
+}
+
+func (m *FacetRequestParam) GetRange() []*FacetRange {
+	if m != nil {
+		return m.Range
+	}
+	return nil
+}
+
+func (m *FacetRequestParam) GetValueConstraint() []string {
+	if m != nil {
+		return m.ValueConstraint
+	}
+	return nil
+}
+
+type FacetAutoDetectParam struct {
+	ValueLimit       *int32 `protobuf:"varint,1,opt,name=value_limit,def=10" json:"value_limit,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetAutoDetectParam) Reset()         { *m = FacetAutoDetectParam{} }
+func (m *FacetAutoDetectParam) String() string { return proto.CompactTextString(m) }
+func (*FacetAutoDetectParam) ProtoMessage()    {}
+
+const Default_FacetAutoDetectParam_ValueLimit int32 = 10
+
+func (m *FacetAutoDetectParam) GetValueLimit() int32 {
+	if m != nil && m.ValueLimit != nil {
+		return *m.ValueLimit
+	}
+	return Default_FacetAutoDetectParam_ValueLimit
+}
+
+type FacetRequest struct {
+	Name             *string            `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+	Params           *FacetRequestParam `protobuf:"bytes,2,opt,name=params" json:"params,omitempty"`
+	XXX_unrecognized []byte             `json:"-"`
+}
+
+func (m *FacetRequest) Reset()         { *m = FacetRequest{} }
+func (m *FacetRequest) String() string { return proto.CompactTextString(m) }
+func (*FacetRequest) ProtoMessage()    {}
+
+func (m *FacetRequest) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *FacetRequest) GetParams() *FacetRequestParam {
+	if m != nil {
+		return m.Params
+	}
+	return nil
+}
+
+type FacetRefinement struct {
+	Name             *string                `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+	Value            *string                `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+	Range            *FacetRefinement_Range `protobuf:"bytes,3,opt,name=range" json:"range,omitempty"`
+	XXX_unrecognized []byte                 `json:"-"`
+}
+
+func (m *FacetRefinement) Reset()         { *m = FacetRefinement{} }
+func (m *FacetRefinement) String() string { return proto.CompactTextString(m) }
+func (*FacetRefinement) ProtoMessage()    {}
+
+func (m *FacetRefinement) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *FacetRefinement) GetValue() string {
+	if m != nil && m.Value != nil {
+		return *m.Value
+	}
+	return ""
+}
+
+func (m *FacetRefinement) GetRange() *FacetRefinement_Range {
+	if m != nil {
+		return m.Range
+	}
+	return nil
+}
+
+type FacetRefinement_Range struct {
+	Start            *string `protobuf:"bytes,1,opt,name=start" json:"start,omitempty"`
+	End              *string `protobuf:"bytes,2,opt,name=end" json:"end,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *FacetRefinement_Range) Reset()         { *m = FacetRefinement_Range{} }
+func (m *FacetRefinement_Range) String() string { return proto.CompactTextString(m) }
+func (*FacetRefinement_Range) ProtoMessage()    {}
+
+func (m *FacetRefinement_Range) GetStart() string {
+	if m != nil && m.Start != nil {
+		return *m.Start
+	}
+	return ""
+}
+
+func (m *FacetRefinement_Range) GetEnd() string {
+	if m != nil && m.End != nil {
+		return *m.End
+	}
+	return ""
+}
+
+type SearchParams struct {
+	IndexSpec              *IndexSpec                `protobuf:"bytes,1,req,name=index_spec" json:"index_spec,omitempty"`
+	Query                  *string                   `protobuf:"bytes,2,req,name=query" json:"query,omitempty"`
+	Cursor                 *string                   `protobuf:"bytes,4,opt,name=cursor" json:"cursor,omitempty"`
+	Offset                 *int32                    `protobuf:"varint,11,opt,name=offset" json:"offset,omitempty"`
+	CursorType             *SearchParams_CursorType  `protobuf:"varint,5,opt,name=cursor_type,enum=search.SearchParams_CursorType,def=0" json:"cursor_type,omitempty"`
+	Limit                  *int32                    `protobuf:"varint,6,opt,name=limit,def=20" json:"limit,omitempty"`
+	MatchedCountAccuracy   *int32                    `protobuf:"varint,7,opt,name=matched_count_accuracy" json:"matched_count_accuracy,omitempty"`
+	SortSpec               []*SortSpec               `protobuf:"bytes,8,rep,name=sort_spec" json:"sort_spec,omitempty"`
+	ScorerSpec             *ScorerSpec               `protobuf:"bytes,9,opt,name=scorer_spec" json:"scorer_spec,omitempty"`
+	FieldSpec              *FieldSpec                `protobuf:"bytes,10,opt,name=field_spec" json:"field_spec,omitempty"`
+	KeysOnly               *bool                     `protobuf:"varint,12,opt,name=keys_only" json:"keys_only,omitempty"`
+	ParsingMode            *SearchParams_ParsingMode `protobuf:"varint,13,opt,name=parsing_mode,enum=search.SearchParams_ParsingMode,def=0" json:"parsing_mode,omitempty"`
+	AutoDiscoverFacetCount *int32                    `protobuf:"varint,15,opt,name=auto_discover_facet_count,def=0" json:"auto_discover_facet_count,omitempty"`
+	IncludeFacet           []*FacetRequest           `protobuf:"bytes,16,rep,name=include_facet" json:"include_facet,omitempty"`
+	FacetRefinement        []*FacetRefinement        `protobuf:"bytes,17,rep,name=facet_refinement" json:"facet_refinement,omitempty"`
+	FacetAutoDetectParam   *FacetAutoDetectParam     `protobuf:"bytes,18,opt,name=facet_auto_detect_param" json:"facet_auto_detect_param,omitempty"`
+	FacetDepth             *int32                    `protobuf:"varint,19,opt,name=facet_depth,def=1000" json:"facet_depth,omitempty"`
+	XXX_unrecognized       []byte                    `json:"-"`
+}
+
+func (m *SearchParams) Reset()         { *m = SearchParams{} }
+func (m *SearchParams) String() string { return proto.CompactTextString(m) }
+func (*SearchParams) ProtoMessage()    {}
+
+const Default_SearchParams_CursorType SearchParams_CursorType = SearchParams_NONE
+const Default_SearchParams_Limit int32 = 20
+const Default_SearchParams_ParsingMode SearchParams_ParsingMode = SearchParams_STRICT
+const Default_SearchParams_AutoDiscoverFacetCount int32 = 0
+const Default_SearchParams_FacetDepth int32 = 1000
+
+func (m *SearchParams) GetIndexSpec() *IndexSpec {
+	if m != nil {
+		return m.IndexSpec
+	}
+	return nil
+}
+
+func (m *SearchParams) GetQuery() string {
+	if m != nil && m.Query != nil {
+		return *m.Query
+	}
+	return ""
+}
+
+func (m *SearchParams) GetCursor() string {
+	if m != nil && m.Cursor != nil {
+		return *m.Cursor
+	}
+	return ""
+}
+
+func (m *SearchParams) GetOffset() int32 {
+	if m != nil && m.Offset != nil {
+		return *m.Offset
+	}
+	return 0
+}
+
+func (m *SearchParams) GetCursorType() SearchParams_CursorType {
+	if m != nil && m.CursorType != nil {
+		return *m.CursorType
+	}
+	return Default_SearchParams_CursorType
+}
+
+func (m *SearchParams) GetLimit() int32 {
+	if m != nil && m.Limit != nil {
+		return *m.Limit
+	}
+	return Default_SearchParams_Limit
+}
+
+func (m *SearchParams) GetMatchedCountAccuracy() int32 {
+	if m != nil && m.MatchedCountAccuracy != nil {
+		return *m.MatchedCountAccuracy
+	}
+	return 0
+}
+
+func (m *SearchParams) GetSortSpec() []*SortSpec {
+	if m != nil {
+		return m.SortSpec
+	}
+	return nil
+}
+
+func (m *SearchParams) GetScorerSpec() *ScorerSpec {
+	if m != nil {
+		return m.ScorerSpec
+	}
+	return nil
+}
+
+func (m *SearchParams) GetFieldSpec() *FieldSpec {
+	if m != nil {
+		return m.FieldSpec
+	}
+	return nil
+}
+
+func (m *SearchParams) GetKeysOnly() bool {
+	if m != nil && m.KeysOnly != nil {
+		return *m.KeysOnly
+	}
+	return false
+}
+
+func (m *SearchParams) GetParsingMode() SearchParams_ParsingMode {
+	if m != nil && m.ParsingMode != nil {
+		return *m.ParsingMode
+	}
+	return Default_SearchParams_ParsingMode
+}
+
+func (m *SearchParams) GetAutoDiscoverFacetCount() int32 {
+	if m != nil && m.AutoDiscoverFacetCount != nil {
+		return *m.AutoDiscoverFacetCount
+	}
+	return Default_SearchParams_AutoDiscoverFacetCount
+}
+
+func (m *SearchParams) GetIncludeFacet() []*FacetRequest {
+	if m != nil {
+		return m.IncludeFacet
+	}
+	return nil
+}
+
+func (m *SearchParams) GetFacetRefinement() []*FacetRefinement {
+	if m != nil {
+		return m.FacetRefinement
+	}
+	return nil
+}
+
+func (m *SearchParams) GetFacetAutoDetectParam() *FacetAutoDetectParam {
+	if m != nil {
+		return m.FacetAutoDetectParam
+	}
+	return nil
+}
+
+func (m *SearchParams) GetFacetDepth() int32 {
+	if m != nil && m.FacetDepth != nil {
+		return *m.FacetDepth
+	}
+	return Default_SearchParams_FacetDepth
+}
+
+type SearchRequest struct {
+	Params           *SearchParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+	AppId            []byte        `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+	XXX_unrecognized []byte        `json:"-"`
+}
+
+func (m *SearchRequest) Reset()         { *m = SearchRequest{} }
+func (m *SearchRequest) String() string { return proto.CompactTextString(m) }
+func (*SearchRequest) ProtoMessage()    {}
+
+func (m *SearchRequest) GetParams() *SearchParams {
+	if m != nil {
+		return m.Params
+	}
+	return nil
+}
+
+func (m *SearchRequest) GetAppId() []byte {
+	if m != nil {
+		return m.AppId
+	}
+	return nil
+}
+
+type FacetResultValue struct {
+	Name             *string          `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+	Count            *int32           `protobuf:"varint,2,req,name=count" json:"count,omitempty"`
+	Refinement       *FacetRefinement `protobuf:"bytes,3,req,name=refinement" json:"refinement,omitempty"`
+	XXX_unrecognized []byte           `json:"-"`
+}
+
+func (m *FacetResultValue) Reset()         { *m = FacetResultValue{} }
+func (m *FacetResultValue) String() string { return proto.CompactTextString(m) }
+func (*FacetResultValue) ProtoMessage()    {}
+
+func (m *FacetResultValue) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *FacetResultValue) GetCount() int32 {
+	if m != nil && m.Count != nil {
+		return *m.Count
+	}
+	return 0
+}
+
+func (m *FacetResultValue) GetRefinement() *FacetRefinement {
+	if m != nil {
+		return m.Refinement
+	}
+	return nil
+}
+
+type FacetResult struct {
+	Name             *string             `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+	Value            []*FacetResultValue `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
+	XXX_unrecognized []byte              `json:"-"`
+}
+
+func (m *FacetResult) Reset()         { *m = FacetResult{} }
+func (m *FacetResult) String() string { return proto.CompactTextString(m) }
+func (*FacetResult) ProtoMessage()    {}
+
+func (m *FacetResult) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *FacetResult) GetValue() []*FacetResultValue {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+type SearchResult struct {
+	Document         *Document `protobuf:"bytes,1,req,name=document" json:"document,omitempty"`
+	Expression       []*Field  `protobuf:"bytes,4,rep,name=expression" json:"expression,omitempty"`
+	Score            []float64 `protobuf:"fixed64,2,rep,name=score" json:"score,omitempty"`
+	Cursor           *string   `protobuf:"bytes,3,opt,name=cursor" json:"cursor,omitempty"`
+	XXX_unrecognized []byte    `json:"-"`
+}
+
+func (m *SearchResult) Reset()         { *m = SearchResult{} }
+func (m *SearchResult) String() string { return proto.CompactTextString(m) }
+func (*SearchResult) ProtoMessage()    {}
+
+func (m *SearchResult) GetDocument() *Document {
+	if m != nil {
+		return m.Document
+	}
+	return nil
+}
+
+func (m *SearchResult) GetExpression() []*Field {
+	if m != nil {
+		return m.Expression
+	}
+	return nil
+}
+
+func (m *SearchResult) GetScore() []float64 {
+	if m != nil {
+		return m.Score
+	}
+	return nil
+}
+
+func (m *SearchResult) GetCursor() string {
+	if m != nil && m.Cursor != nil {
+		return *m.Cursor
+	}
+	return ""
+}
+
+type SearchResponse struct {
+	Result           []*SearchResult           `protobuf:"bytes,1,rep,name=result" json:"result,omitempty"`
+	MatchedCount     *int64                    `protobuf:"varint,2,req,name=matched_count" json:"matched_count,omitempty"`
+	Status           *RequestStatus            `protobuf:"bytes,3,req,name=status" json:"status,omitempty"`
+	Cursor           *string                   `protobuf:"bytes,4,opt,name=cursor" json:"cursor,omitempty"`
+	FacetResult      []*FacetResult            `protobuf:"bytes,5,rep,name=facet_result" json:"facet_result,omitempty"`
+	XXX_extensions   map[int32]proto.Extension `json:"-"`
+	XXX_unrecognized []byte                    `json:"-"`
+}
+
+func (m *SearchResponse) Reset()         { *m = SearchResponse{} }
+func (m *SearchResponse) String() string { return proto.CompactTextString(m) }
+func (*SearchResponse) ProtoMessage()    {}
+
+var extRange_SearchResponse = []proto.ExtensionRange{
+	{1000, 9999},
+}
+
+func (*SearchResponse) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_SearchResponse
+}
+func (m *SearchResponse) ExtensionMap() map[int32]proto.Extension {
+	if m.XXX_extensions == nil {
+		m.XXX_extensions = make(map[int32]proto.Extension)
+	}
+	return m.XXX_extensions
+}
+
+func (m *SearchResponse) GetResult() []*SearchResult {
+	if m != nil {
+		return m.Result
+	}
+	return nil
+}
+
+func (m *SearchResponse) GetMatchedCount() int64 {
+	if m != nil && m.MatchedCount != nil {
+		return *m.MatchedCount
+	}
+	return 0
+}
+
+func (m *SearchResponse) GetStatus() *RequestStatus {
+	if m != nil {
+		return m.Status
+	}
+	return nil
+}
+
+func (m *SearchResponse) GetCursor() string {
+	if m != nil && m.Cursor != nil {
+		return *m.Cursor
+	}
+	return ""
+}
+
+func (m *SearchResponse) GetFacetResult() []*FacetResult {
+	if m != nil {
+		return m.FacetResult
+	}
+	return nil
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/search/search.proto b/vendor/google.golang.org/appengine/internal/search/search.proto
new file mode 100644
index 0000000..219f4c3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/search/search.proto
@@ -0,0 +1,388 @@
+syntax = "proto2";
+option go_package = "search";
+
+package search;
+
+message Scope {
+  enum Type {
+    USER_BY_CANONICAL_ID = 1;
+    USER_BY_EMAIL = 2;
+    GROUP_BY_CANONICAL_ID = 3;
+    GROUP_BY_EMAIL = 4;
+    GROUP_BY_DOMAIN = 5;
+    ALL_USERS = 6;
+    ALL_AUTHENTICATED_USERS = 7;
+  }
+
+  optional Type type = 1;
+  optional string value = 2;
+}
+
+message Entry {
+  enum Permission {
+    READ = 1;
+    WRITE = 2;
+    FULL_CONTROL = 3;
+  }
+
+  optional Scope scope = 1;
+  optional Permission permission = 2;
+  optional string display_name = 3;
+}
+
+message AccessControlList {
+  optional string owner = 1;
+  repeated Entry entries = 2;
+}
+
+message FieldValue {
+  enum ContentType {
+    TEXT = 0;
+    HTML = 1;
+    ATOM = 2;
+    DATE = 3;
+    NUMBER = 4;
+    GEO = 5;
+  }
+
+  optional ContentType type = 1 [default = TEXT];
+
+  optional string language = 2 [default = "en"];
+
+  optional string string_value = 3;
+
+  optional group Geo = 4 {
+    required double lat = 5;
+    required double lng = 6;
+  }
+}
+
+message Field {
+  required string name = 1;
+  required FieldValue value = 2;
+}
+
+message FieldTypes {
+  required string name = 1;
+  repeated FieldValue.ContentType type = 2;
+}
+
+message IndexShardSettings {
+  repeated int32 prev_num_shards = 1;
+  required int32 num_shards = 2 [default=1];
+  repeated int32 prev_num_shards_search_false = 3;
+  optional string local_replica = 4 [default = ""];
+}
+
+message FacetValue {
+  enum ContentType {
+    ATOM = 2;
+    NUMBER = 4;
+  }
+
+  optional ContentType type = 1 [default = ATOM];
+  optional string string_value = 3;
+}
+
+message Facet {
+  required string name = 1;
+  required FacetValue value = 2;
+}
+
+message DocumentMetadata  {
+  optional int64 version = 1;
+  optional int64 committed_st_version = 2;
+}
+
+message Document {
+  optional string id = 1;
+  optional string language = 2 [default = "en"];
+  repeated Field field = 3;
+  optional int32 order_id = 4;
+
+  enum Storage {
+    DISK = 0;
+  }
+
+  optional Storage storage = 5 [default = DISK];
+  repeated Facet facet = 8;
+}
+
+message SearchServiceError {
+  enum ErrorCode {
+    OK = 0;
+    INVALID_REQUEST = 1;
+    TRANSIENT_ERROR = 2;
+    INTERNAL_ERROR = 3;
+    PERMISSION_DENIED = 4;
+    TIMEOUT = 5;
+    CONCURRENT_TRANSACTION = 6;
+  }
+}
+
+message RequestStatus {
+  required SearchServiceError.ErrorCode code = 1;
+  optional string error_detail = 2;
+  optional int32 canonical_code = 3;
+}
+
+message IndexSpec {
+  required string name = 1;
+
+  enum Consistency {
+    GLOBAL = 0;
+    PER_DOCUMENT = 1;
+  }
+  optional Consistency consistency = 2 [default = PER_DOCUMENT];
+
+  optional string namespace = 3;
+  optional int32 version = 4;
+
+  enum Source {
+    SEARCH = 0;
+    DATASTORE = 1;
+    CLOUD_STORAGE = 2;
+  }
+  optional Source source = 5 [default = SEARCH];
+
+  enum Mode {
+    PRIORITY = 0;
+    BACKGROUND = 1;
+  }
+  optional Mode mode = 6 [default = PRIORITY];
+}
+
+message IndexMetadata {
+  required IndexSpec index_spec = 1;
+
+  repeated FieldTypes field = 2;
+
+  message Storage {
+    optional int64 amount_used = 1;
+    optional int64 limit = 2;
+  }
+  optional Storage storage = 3;
+}
+
+message IndexDocumentParams {
+  repeated Document document = 1;
+
+  enum Freshness {
+    SYNCHRONOUSLY = 0;
+    WHEN_CONVENIENT = 1;
+  }
+  optional Freshness freshness = 2 [default = SYNCHRONOUSLY, deprecated=true];
+
+  required IndexSpec index_spec = 3;
+}
+
+message IndexDocumentRequest {
+  required IndexDocumentParams params = 1;
+
+  optional bytes app_id = 3;
+}
+
+message IndexDocumentResponse {
+  repeated RequestStatus status = 1;
+
+  repeated string doc_id = 2;
+}
+
+message DeleteDocumentParams {
+  repeated string doc_id = 1;
+
+  required IndexSpec index_spec = 2;
+}
+
+message DeleteDocumentRequest {
+  required DeleteDocumentParams params = 1;
+
+  optional bytes app_id = 3;
+}
+
+message DeleteDocumentResponse {
+  repeated RequestStatus status = 1;
+}
+
+message ListDocumentsParams {
+  required IndexSpec index_spec = 1;
+  optional string start_doc_id = 2;
+  optional bool include_start_doc = 3 [default = true];
+  optional int32 limit = 4 [default = 100];
+  optional bool keys_only = 5;
+}
+
+message ListDocumentsRequest {
+  required ListDocumentsParams params = 1;
+
+  optional bytes app_id = 2;
+}
+
+message ListDocumentsResponse {
+  required RequestStatus status = 1;
+
+  repeated Document document = 2;
+}
+
+message ListIndexesParams {
+  optional bool fetch_schema = 1;
+  optional int32 limit = 2 [default = 20];
+  optional string namespace = 3;
+  optional string start_index_name = 4;
+  optional bool include_start_index = 5 [default = true];
+  optional string index_name_prefix = 6;
+  optional int32 offset = 7;
+  optional IndexSpec.Source source = 8 [default = SEARCH];
+}
+
+message ListIndexesRequest {
+  required ListIndexesParams params = 1;
+
+  optional bytes app_id = 3;
+}
+
+message ListIndexesResponse {
+  required RequestStatus status = 1;
+  repeated IndexMetadata index_metadata = 2;
+}
+
+message DeleteSchemaParams {
+  optional IndexSpec.Source source = 1 [default = SEARCH];
+  repeated IndexSpec index_spec = 2;
+}
+
+message DeleteSchemaRequest {
+  required DeleteSchemaParams params = 1;
+
+  optional bytes app_id = 3;
+}
+
+message DeleteSchemaResponse {
+  repeated RequestStatus status = 1;
+}
+
+message SortSpec {
+  required string sort_expression = 1;
+  optional bool sort_descending = 2 [default = true];
+  optional string default_value_text = 4;
+  optional double default_value_numeric = 5;
+}
+
+message ScorerSpec {
+  enum Scorer {
+    RESCORING_MATCH_SCORER = 0;
+    MATCH_SCORER = 2;
+  }
+  optional Scorer scorer = 1 [default = MATCH_SCORER];
+
+  optional int32 limit = 2 [default = 1000];
+  optional string match_scorer_parameters = 9;
+}
+
+message FieldSpec {
+  repeated string name = 1;
+
+  repeated group Expression = 2 {
+    required string name = 3;
+    required string expression = 4;
+  }
+}
+
+message FacetRange {
+  optional string name = 1;
+  optional string start = 2;
+  optional string end = 3;
+}
+
+message FacetRequestParam {
+  optional int32 value_limit = 1;
+  repeated FacetRange range = 2;
+  repeated string value_constraint = 3;
+}
+
+message FacetAutoDetectParam {
+  optional int32 value_limit = 1 [default = 10];
+}
+
+message FacetRequest {
+  required string name = 1;
+  optional FacetRequestParam params = 2;
+}
+
+message FacetRefinement {
+  required string name = 1;
+  optional string value = 2;
+
+  message Range {
+    optional string start = 1;
+    optional string end = 2;
+  }
+  optional Range range = 3;
+}
+
+message SearchParams {
+  required IndexSpec index_spec = 1;
+  required string query = 2;
+  optional string cursor = 4;
+  optional int32 offset = 11;
+
+  enum CursorType {
+    NONE = 0;
+    SINGLE = 1;
+    PER_RESULT = 2;
+  }
+  optional CursorType cursor_type = 5 [default = NONE];
+
+  optional int32 limit = 6 [default = 20];
+  optional int32 matched_count_accuracy = 7;
+  repeated SortSpec sort_spec = 8;
+  optional ScorerSpec scorer_spec = 9;
+  optional FieldSpec field_spec = 10;
+  optional bool keys_only = 12;
+
+  enum ParsingMode {
+    STRICT = 0;
+    RELAXED = 1;
+  }
+  optional ParsingMode parsing_mode = 13 [default = STRICT];
+
+  optional int32 auto_discover_facet_count = 15 [default = 0];
+  repeated FacetRequest include_facet = 16;
+  repeated FacetRefinement facet_refinement = 17;
+  optional FacetAutoDetectParam facet_auto_detect_param = 18;
+  optional int32 facet_depth = 19 [default=1000];
+}
+
+message SearchRequest {
+  required SearchParams params = 1;
+
+  optional bytes app_id = 3;
+}
+
+message FacetResultValue {
+  required string name = 1;
+  required int32 count = 2;
+  required FacetRefinement refinement = 3;
+}
+
+message FacetResult {
+  required string name = 1;
+  repeated FacetResultValue value = 2;
+}
+
+message SearchResult {
+  required Document document = 1;
+  repeated Field expression = 4;
+  repeated double score = 2;
+  optional string cursor = 3;
+}
+
+message SearchResponse {
+  repeated SearchResult result = 1;
+  required int64 matched_count = 2;
+  required RequestStatus status = 3;
+  optional string cursor = 4;
+  repeated FacetResult facet_result = 5;
+
+  extensions 1000 to 9999;
+}
diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go
new file mode 100644
index 0000000..28a6d18
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/transaction.go
@@ -0,0 +1,107 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file implements hooks for applying datastore transactions.
+
+import (
+	"errors"
+	"reflect"
+
+	"github.com/golang/protobuf/proto"
+	netcontext "golang.org/x/net/context"
+
+	basepb "google.golang.org/appengine/internal/base"
+	pb "google.golang.org/appengine/internal/datastore"
+)
+
+var transactionSetters = make(map[reflect.Type]reflect.Value)
+
+// RegisterTransactionSetter registers a function that sets transaction information
+// in a protocol buffer message. f should be a function with two arguments,
+// the first being a protocol buffer type, and the second being *datastore.Transaction.
+func RegisterTransactionSetter(f interface{}) {
+	v := reflect.ValueOf(f)
+	transactionSetters[v.Type().In(0)] = v
+}
+
+// applyTransaction applies the transaction t to message pb
+// by using the relevant setter passed to RegisterTransactionSetter.
+func applyTransaction(pb proto.Message, t *pb.Transaction) {
+	v := reflect.ValueOf(pb)
+	if f, ok := transactionSetters[v.Type()]; ok {
+		f.Call([]reflect.Value{v, reflect.ValueOf(t)})
+	}
+}
+
+var transactionKey = "used for *Transaction"
+
+func transactionFromContext(ctx netcontext.Context) *transaction {
+	t, _ := ctx.Value(&transactionKey).(*transaction)
+	return t
+}
+
+func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context {
+	return netcontext.WithValue(ctx, &transactionKey, t)
+}
+
+type transaction struct {
+	transaction pb.Transaction
+	finished    bool
+}
+
+var ErrConcurrentTransaction = errors.New("internal: concurrent transaction")
+
+func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool) error {
+	if transactionFromContext(c) != nil {
+		return errors.New("nested transactions are not supported")
+	}
+
+	// Begin the transaction.
+	t := &transaction{}
+	req := &pb.BeginTransactionRequest{
+		App: proto.String(FullyQualifiedAppID(c)),
+	}
+	if xg {
+		req.AllowMultipleEg = proto.Bool(true)
+	}
+	if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil {
+		return err
+	}
+
+	// Call f, rolling back the transaction if f returns a non-nil error, or panics.
+	// The panic is not recovered.
+	defer func() {
+		if t.finished {
+			return
+		}
+		t.finished = true
+		// Ignore the error return value, since we are already returning a non-nil
+		// error (or we're panicking).
+		Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{})
+	}()
+	if err := f(withTransaction(c, t)); err != nil {
+		return err
+	}
+	t.finished = true
+
+	// Commit the transaction.
+	res := &pb.CommitResponse{}
+	err := Call(c, "datastore_v3", "Commit", &t.transaction, res)
+	if ae, ok := err.(*APIError); ok {
+		/* TODO: restore this conditional
+		if appengine.IsDevAppServer() {
+		*/
+		// The Python Dev AppServer raises an ApplicationError with error code 2 (which is
+		// Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.".
+		if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." {
+			return ErrConcurrentTransaction
+		}
+		if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) {
+			return ErrConcurrentTransaction
+		}
+	}
+	return err
+}
diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go
new file mode 100644
index 0000000..21860ca
--- /dev/null
+++ b/vendor/google.golang.org/appengine/namespace.go
@@ -0,0 +1,25 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import (
+	"fmt"
+	"regexp"
+
+	"golang.org/x/net/context"
+
+	"google.golang.org/appengine/internal"
+)
+
+// Namespace returns a replacement context that operates within the given namespace.
+func Namespace(c context.Context, namespace string) (context.Context, error) {
+	if !validNamespace.MatchString(namespace) {
+		return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace)
+	}
+	return internal.NamespacedContext(c, namespace), nil
+}
+
+// validNamespace matches valid namespace names.
+var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`)
diff --git a/vendor/google.golang.org/appengine/search/doc.go b/vendor/google.golang.org/appengine/search/doc.go
new file mode 100644
index 0000000..da331ce
--- /dev/null
+++ b/vendor/google.golang.org/appengine/search/doc.go
@@ -0,0 +1,209 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package search provides a client for App Engine's search service.
+
+
+Basic Operations
+
+Indexes contain documents. Each index is identified by its name: a
+human-readable ASCII string.
+
+Within an index, documents are associated with an ID, which is also
+a human-readable ASCII string. A document's contents are a mapping from
+case-sensitive field names to values. Valid types for field values are:
+  - string,
+  - search.Atom,
+  - search.HTML,
+  - time.Time (stored with millisecond precision),
+  - float64 (value between -2,147,483,647 and 2,147,483,647 inclusive),
+  - appengine.GeoPoint.
+
+The Get and Put methods on an Index load and save a document.
+A document's contents are typically represented by a struct pointer.
+
+Example code:
+
+	type Doc struct {
+		Author   string
+		Comment  string
+		Creation time.Time
+	}
+
+	index, err := search.Open("comments")
+	if err != nil {
+		return err
+	}
+	newID, err := index.Put(ctx, "", &Doc{
+		Author:   "gopher",
+		Comment:  "the truth of the matter",
+		Creation: time.Now(),
+	})
+	if err != nil {
+		return err
+	}
+
+A single document can be retrieved by its ID. Pass a destination struct
+to Get to hold the resulting document.
+
+	var doc Doc
+	err := index.Get(ctx, id, &doc)
+	if err != nil {
+		return err
+	}
+
+
+Search and Listing Documents
+
+Indexes have two methods for retrieving multiple documents at once: Search and
+List.
+
+Searching an index for a query will result in an iterator. As with an iterator
+from package datastore, pass a destination struct to Next to decode the next
+result. Next will return Done when the iterator is exhausted.
+
+	for t := index.Search(ctx, "Comment:truth", nil); ; {
+		var doc Doc
+		id, err := t.Next(&doc)
+		if err == search.Done {
+			break
+		}
+		if err != nil {
+			return err
+		}
+		fmt.Fprintf(w, "%s -> %#v\n", id, doc)
+	}
+
+Search takes a string query to determine which documents to return. The query
+can be simple, such as a single word to match, or complex. The query
+language is described at
+https://cloud.google.com/appengine/docs/go/search/query_strings
+
+Search also takes an optional SearchOptions struct which gives much more
+control over how results are calculated and returned.
+
+Call List to iterate over all documents in an index.
+
+	for t := index.List(ctx, nil); ; {
+		var doc Doc
+		id, err := t.Next(&doc)
+		if err == search.Done {
+			break
+		}
+		if err != nil {
+			return err
+		}
+		fmt.Fprintf(w, "%s -> %#v\n", id, doc)
+	}
+
+
+Fields and Facets
+
+A document's contents can be represented by a variety of types. These are
+typically struct pointers, but they can also be represented by any type
+implementing the FieldLoadSaver interface. The FieldLoadSaver allows metadata
+to be set for the document with the DocumentMetadata type. Struct pointers are
+more strongly typed and are easier to use; FieldLoadSavers are more flexible.
+
+A document's contents can be expressed in two ways: fields and facets.
+
+Fields are the most common way of providing content for documents. Fields can
+store data in multiple types and can be matched in searches using query
+strings.
+
+Facets provide a way to attach categorical information to a document. The only
+valid types for facets are search.Atom and float64. Facets allow search
+results to contain summaries of the categories matched in a search, and to
+restrict searches to only match against specific categories.
+
+By default, for struct pointers, all of the struct fields are used as document
+fields, and the field name used is the same as on the struct (and hence must
+start with an upper case letter). Struct fields may have a
+`search:"name,options"` tag. The name must start with a letter and be
+composed only of word characters. A "-" tag name means that the field will be
+ignored.  If options is "facet" then the struct field will be used as a
+document facet. If options is "" then the comma may be omitted. There are no
+other recognized options.
+
+Example code:
+
+	// A and B are renamed to a and b.
+	// A, C and I are facets.
+	// D's tag is equivalent to having no tag at all (E).
+	// F and G are ignored entirely by the search package.
+	// I has tag information for both the search and json packages.
+	type TaggedStruct struct {
+		A float64 `search:"a,facet"`
+		B float64 `search:"b"`
+		C float64 `search:",facet"`
+		D float64 `search:""`
+		E float64
+		F float64 `search:"-"`
+		G float64 `search:"-,facet"`
+		I float64 `search:",facet" json:"i"`
+	}
+
+
+The FieldLoadSaver Interface
+
+A document's contents can also be represented by any type that implements the
+FieldLoadSaver interface. This type may be a struct pointer, but it
+does not have to be. The search package will call Load when loading the
+document's contents, and Save when saving them. In addition to a slice of
+Fields, the Load and Save methods also use the DocumentMetadata type to
+provide additional information about a document (such as its Rank, or set of
+Facets). Possible uses for this interface include deriving non-stored fields,
+verifying fields or setting specific languages for string and HTML fields.
+
+Example code:
+
+	type CustomFieldsExample struct {
+		// Item's title and which language it is in.
+		Title string
+		Lang  string
+		// Mass, in grams.
+		Mass int
+	}
+
+	func (x *CustomFieldsExample) Load(fields []search.Field, meta *search.DocumentMetadata) error {
+		// Load the title field, failing if any other field is found.
+		for _, f := range fields {
+			if f.Name != "title" {
+				return fmt.Errorf("unknown field %q", f.Name)
+			}
+			s, ok := f.Value.(string)
+			if !ok {
+				return fmt.Errorf("unsupported type %T for field %q", f.Value, f.Name)
+			}
+			x.Title = s
+			x.Lang = f.Language
+		}
+		// Load the mass facet, failing if any other facet is found.
+		for _, f := range meta.Facets {
+			if f.Name != "mass" {
+				return fmt.Errorf("unknown facet %q", f.Name)
+			}
+			m, ok := f.Value.(float64)
+			if !ok {
+				return fmt.Errorf("unsupported type %T for facet %q", f.Value, f.Name)
+			}
+			x.Mass = int(m)
+		}
+		return nil
+	}
+
+	func (x *CustomFieldsExample) Save() ([]search.Field, *search.DocumentMetadata, error) {
+		fields := []search.Field{
+			{Name: "title", Value: x.Title, Language: x.Lang},
+		}
+		meta := &search.DocumentMetadata{
+			Facets: {
+				{Name: "mass", Value: float64(x.Mass)},
+			},
+		}
+		return fields, meta, nil
+	}
+*/
+package search
diff --git a/vendor/google.golang.org/appengine/search/field.go b/vendor/google.golang.org/appengine/search/field.go
new file mode 100644
index 0000000..707c2d8
--- /dev/null
+++ b/vendor/google.golang.org/appengine/search/field.go
@@ -0,0 +1,82 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package search
+
+// Field is a name/value pair. A search index's document can be loaded and
+// saved as a sequence of Fields.
+type Field struct {
+	// Name is the field name. A valid field name matches /[A-Za-z][A-Za-z0-9_]*/.
+	Name string
+	// Value is the field value. The valid types are:
+	//  - string,
+	//  - search.Atom,
+	//  - search.HTML,
+	//  - time.Time (stored with millisecond precision),
+	//  - float64,
+	//  - GeoPoint.
+	Value interface{}
+	// Language is a two-letter ISO 639-1 code for the field's language,
+	// defaulting to "en" if nothing is specified. It may only be specified for
+	// fields of type string and search.HTML.
+	Language string
+	// Derived marks fields that were calculated as a result of a
+	// FieldExpression provided to Search. This field is ignored when saving a
+	// document.
+	Derived bool
+}
+
+// Facet is a name/value pair which is used to add categorical information to a
+// document.
+type Facet struct {
+	// Name is the facet name. A valid facet name matches /[A-Za-z][A-Za-z0-9_]*/.
+	// A facet name cannot be longer than 500 characters.
+	Name string
+	// Value is the facet value.
+	//
+	// When being used in documents (for example, in
+	// DocumentMetadata.Facets), the valid types are:
+	//  - search.Atom,
+	//  - float64.
+	//
+	// When being used in SearchOptions.Refinements or being returned
+	// in FacetResult, the valid types are:
+	//  - search.Atom,
+	//  - search.Range.
+	Value interface{}
+}
+
+// DocumentMetadata is a struct containing information describing a given document.
+type DocumentMetadata struct {
+	// Rank is an integer specifying the order the document will be returned in
+	// search results. If zero, the rank will be set to the number of seconds since
+	// 2011-01-01 00:00:00 UTC when being Put into an index.
+	Rank int
+	// Facets is the set of facets for this document.
+	Facets []Facet
+}
+
+// FieldLoadSaver can be converted from and to a slice of Fields
+// with additional document metadata.
+type FieldLoadSaver interface {
+	Load([]Field, *DocumentMetadata) error
+	Save() ([]Field, *DocumentMetadata, error)
+}
+
+// FieldList converts a []Field to implement FieldLoadSaver.
+type FieldList []Field
+
+// Load loads all of the provided fields into l.
+// It does not first reset *l to an empty slice.
+func (l *FieldList) Load(f []Field, _ *DocumentMetadata) error {
+	*l = append(*l, f...)
+	return nil
+}
+
+// Save returns all of l's fields as a slice of Fields.
+func (l *FieldList) Save() ([]Field, *DocumentMetadata, error) {
+	return *l, nil, nil
+}
+
+var _ FieldLoadSaver = (*FieldList)(nil)
diff --git a/vendor/google.golang.org/appengine/search/search.go b/vendor/google.golang.org/appengine/search/search.go
new file mode 100644
index 0000000..d7bdad3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/search/search.go
@@ -0,0 +1,1121 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package search
+
+// TODO: let Put specify the document language: "en", "fr", etc. Also: order_id?? storage??
+// TODO: Index.GetAll (or Iterator.GetAll)?
+// TODO: struct <-> protobuf tests.
+// TODO: enforce Python's MIN_NUMBER_VALUE and MIN_DATE (which would disallow a zero
+// time.Time)? _MAXIMUM_STRING_LENGTH?
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"reflect"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+	"unicode/utf8"
+
+	"github.com/golang/protobuf/proto"
+	"golang.org/x/net/context"
+
+	"google.golang.org/appengine"
+	"google.golang.org/appengine/internal"
+	pb "google.golang.org/appengine/internal/search"
+)
+
+var (
+	// ErrInvalidDocumentType is returned when methods like Put, Get or Next
+	// are passed a dst or src argument of invalid type.
+	ErrInvalidDocumentType = errors.New("search: invalid document type")
+
+	// ErrNoSuchDocument is returned when no document was found for a given ID.
+	ErrNoSuchDocument = errors.New("search: no such document")
+)
+
+// Atom is a document field whose contents are indexed as a single indivisible
+// string.
+type Atom string
+
+// HTML is a document field whose contents are indexed as HTML. Only text nodes
+// are indexed: "foo<b>bar" will be treated as "foobar".
+type HTML string
+
+// validIndexNameOrDocID is the Go equivalent of Python's
+// _ValidateVisiblePrintableAsciiNotReserved.
+func validIndexNameOrDocID(s string) bool {
+	if strings.HasPrefix(s, "!") {
+		return false
+	}
+	for _, c := range s {
+		if c < 0x21 || 0x7f <= c {
+			return false
+		}
+	}
+	return true
+}
+
+var (
+	fieldNameRE = regexp.MustCompile(`^[A-Za-z][A-Za-z0-9_]*$`)
+	languageRE  = regexp.MustCompile(`^[a-z]{2}$`)
+)
+
+// validFieldName is the Go equivalent of Python's _CheckFieldName. It checks
+// the validity of both field and facet names.
+func validFieldName(s string) bool {
+	return len(s) <= 500 && fieldNameRE.MatchString(s)
+}
+
+// validDocRank checks that the ranks is in the range [0, 2^31).
+func validDocRank(r int) bool {
+	return 0 <= r && r <= (1<<31-1)
+}
+
+// validLanguage checks that a language looks like ISO 639-1.
+func validLanguage(s string) bool {
+	return languageRE.MatchString(s)
+}
+
+// validFloat checks that f is in the range [-2147483647, 2147483647].
+func validFloat(f float64) bool {
+	return -(1<<31-1) <= f && f <= (1<<31-1)
+}
+
+// Index is an index of documents.
+type Index struct {
+	spec pb.IndexSpec
+}
+
+// orderIDEpoch forms the basis for populating OrderId on documents.
+var orderIDEpoch = time.Date(2011, 1, 1, 0, 0, 0, 0, time.UTC)
+
+// Open opens the index with the given name. The index is created if it does
+// not already exist.
+//
+// The name is a human-readable ASCII string. It must contain no whitespace
+// characters and not start with "!".
+func Open(name string) (*Index, error) {
+	if !validIndexNameOrDocID(name) {
+		return nil, fmt.Errorf("search: invalid index name %q", name)
+	}
+	return &Index{
+		spec: pb.IndexSpec{
+			Name: &name,
+		},
+	}, nil
+}
+
+// Put saves src to the index. If id is empty, a new ID is allocated by the
+// service and returned. If id is not empty, any existing index entry for that
+// ID is replaced.
+//
+// The ID is a human-readable ASCII string. It must contain no whitespace
+// characters and not start with "!".
+//
+// src must be a non-nil struct pointer or implement the FieldLoadSaver
+// interface.
+func (x *Index) Put(c context.Context, id string, src interface{}) (string, error) {
+	d, err := saveDoc(src)
+	if err != nil {
+		return "", err
+	}
+	if id != "" {
+		if !validIndexNameOrDocID(id) {
+			return "", fmt.Errorf("search: invalid ID %q", id)
+		}
+		d.Id = proto.String(id)
+	}
+	// spec is modified by Call when applying the current Namespace, so copy it to
+	// avoid retaining the namespace beyond the scope of the Call.
+	spec := x.spec
+	req := &pb.IndexDocumentRequest{
+		Params: &pb.IndexDocumentParams{
+			Document:  []*pb.Document{d},
+			IndexSpec: &spec,
+		},
+	}
+	res := &pb.IndexDocumentResponse{}
+	if err := internal.Call(c, "search", "IndexDocument", req, res); err != nil {
+		return "", err
+	}
+	if len(res.Status) > 0 {
+		if s := res.Status[0]; s.GetCode() != pb.SearchServiceError_OK {
+			return "", fmt.Errorf("search: %s: %s", s.GetCode(), s.GetErrorDetail())
+		}
+	}
+	if len(res.Status) != 1 || len(res.DocId) != 1 {
+		return "", fmt.Errorf("search: internal error: wrong number of results (%d Statuses, %d DocIDs)",
+			len(res.Status), len(res.DocId))
+	}
+	return res.DocId[0], nil
+}
+
+// Get loads the document with the given ID into dst.
+//
+// The ID is a human-readable ASCII string. It must be non-empty, contain no
+// whitespace characters and not start with "!".
+//
+// dst must be a non-nil struct pointer or implement the FieldLoadSaver
+// interface.
+//
+// ErrFieldMismatch is returned when a field is to be loaded into a different
+// type than the one it was stored from, or when a field is missing or
+// unexported in the destination struct. ErrFieldMismatch is only returned if
+// dst is a struct pointer. It is up to the callee to decide whether this error
+// is fatal, recoverable, or ignorable.
+func (x *Index) Get(c context.Context, id string, dst interface{}) error {
+	if id == "" || !validIndexNameOrDocID(id) {
+		return fmt.Errorf("search: invalid ID %q", id)
+	}
+	req := &pb.ListDocumentsRequest{
+		Params: &pb.ListDocumentsParams{
+			IndexSpec:  &x.spec,
+			StartDocId: proto.String(id),
+			Limit:      proto.Int32(1),
+		},
+	}
+	res := &pb.ListDocumentsResponse{}
+	if err := internal.Call(c, "search", "ListDocuments", req, res); err != nil {
+		return err
+	}
+	if res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK {
+		return fmt.Errorf("search: %s: %s", res.Status.GetCode(), res.Status.GetErrorDetail())
+	}
+	if len(res.Document) != 1 || res.Document[0].GetId() != id {
+		return ErrNoSuchDocument
+	}
+	return loadDoc(dst, res.Document[0], nil)
+}
+
+// Delete deletes a document from the index.
+func (x *Index) Delete(c context.Context, id string) error {
+	req := &pb.DeleteDocumentRequest{
+		Params: &pb.DeleteDocumentParams{
+			DocId:     []string{id},
+			IndexSpec: &x.spec,
+		},
+	}
+	res := &pb.DeleteDocumentResponse{}
+	if err := internal.Call(c, "search", "DeleteDocument", req, res); err != nil {
+		return err
+	}
+	if len(res.Status) != 1 {
+		return fmt.Errorf("search: internal error: wrong number of results (%d)", len(res.Status))
+	}
+	if s := res.Status[0]; s.GetCode() != pb.SearchServiceError_OK {
+		return fmt.Errorf("search: %s: %s", s.GetCode(), s.GetErrorDetail())
+	}
+	return nil
+}
+
+// List lists all of the documents in an index. The documents are returned in
+// increasing ID order.
+func (x *Index) List(c context.Context, opts *ListOptions) *Iterator {
+	t := &Iterator{
+		c:             c,
+		index:         x,
+		count:         -1,
+		listInclusive: true,
+		more:          moreList,
+	}
+	if opts != nil {
+		t.listStartID = opts.StartID
+		t.limit = opts.Limit
+		t.idsOnly = opts.IDsOnly
+	}
+	return t
+}
+
+func moreList(t *Iterator) error {
+	req := &pb.ListDocumentsRequest{
+		Params: &pb.ListDocumentsParams{
+			IndexSpec: &t.index.spec,
+		},
+	}
+	if t.listStartID != "" {
+		req.Params.StartDocId = &t.listStartID
+		req.Params.IncludeStartDoc = &t.listInclusive
+	}
+	if t.limit > 0 {
+		req.Params.Limit = proto.Int32(int32(t.limit))
+	}
+	if t.idsOnly {
+		req.Params.KeysOnly = &t.idsOnly
+	}
+
+	res := &pb.ListDocumentsResponse{}
+	if err := internal.Call(t.c, "search", "ListDocuments", req, res); err != nil {
+		return err
+	}
+	if res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK {
+		return fmt.Errorf("search: %s: %s", res.Status.GetCode(), res.Status.GetErrorDetail())
+	}
+	t.listRes = res.Document
+	t.listStartID, t.listInclusive, t.more = "", false, nil
+	if len(res.Document) != 0 && t.limit <= 0 {
+		if id := res.Document[len(res.Document)-1].GetId(); id != "" {
+			t.listStartID, t.more = id, moreList
+		}
+	}
+	return nil
+}
+
+// ListOptions are the options for listing documents in an index. Passing a nil
+// *ListOptions is equivalent to using the default values.
+type ListOptions struct {
+	// StartID is the inclusive lower bound for the ID of the returned
+	// documents. The zero value means all documents will be returned.
+	StartID string
+
+	// Limit is the maximum number of documents to return. The zero value
+	// indicates no limit.
+	Limit int
+
+	// IDsOnly indicates that only document IDs should be returned for the list
+	// operation; no document fields are populated.
+	IDsOnly bool
+}
+
+// Search searches the index for the given query.
+func (x *Index) Search(c context.Context, query string, opts *SearchOptions) *Iterator {
+	t := &Iterator{
+		c:           c,
+		index:       x,
+		searchQuery: query,
+		more:        moreSearch,
+	}
+	if opts != nil {
+		if opts.Cursor != "" {
+			if opts.Offset != 0 {
+				return errIter("at most one of Cursor and Offset may be specified")
+			}
+			t.searchCursor = proto.String(string(opts.Cursor))
+		}
+		t.limit = opts.Limit
+		t.fields = opts.Fields
+		t.idsOnly = opts.IDsOnly
+		t.sort = opts.Sort
+		t.exprs = opts.Expressions
+		t.refinements = opts.Refinements
+		t.facetOpts = opts.Facets
+		t.searchOffset = opts.Offset
+		t.countAccuracy = opts.CountAccuracy
+	}
+	return t
+}
+
+func moreSearch(t *Iterator) error {
+	// We use per-result (rather than single/per-page) cursors since this
+	// lets us return a Cursor for every iterator document. The two cursor
+	// types are largely interchangeable: a page cursor is the same as the
+	// last per-result cursor in a given search response.
+	req := &pb.SearchRequest{
+		Params: &pb.SearchParams{
+			IndexSpec:  &t.index.spec,
+			Query:      &t.searchQuery,
+			Cursor:     t.searchCursor,
+			CursorType: pb.SearchParams_PER_RESULT.Enum(),
+			FieldSpec: &pb.FieldSpec{
+				Name: t.fields,
+			},
+		},
+	}
+	if t.limit > 0 {
+		req.Params.Limit = proto.Int32(int32(t.limit))
+	}
+	if t.searchOffset > 0 {
+		req.Params.Offset = proto.Int32(int32(t.searchOffset))
+		t.searchOffset = 0
+	}
+	if t.countAccuracy > 0 {
+		req.Params.MatchedCountAccuracy = proto.Int32(int32(t.countAccuracy))
+	}
+	if t.idsOnly {
+		req.Params.KeysOnly = &t.idsOnly
+	}
+	if t.sort != nil {
+		if err := sortToProto(t.sort, req.Params); err != nil {
+			return err
+		}
+	}
+	if t.refinements != nil {
+		if err := refinementsToProto(t.refinements, req.Params); err != nil {
+			return err
+		}
+	}
+	for _, e := range t.exprs {
+		req.Params.FieldSpec.Expression = append(req.Params.FieldSpec.Expression, &pb.FieldSpec_Expression{
+			Name:       proto.String(e.Name),
+			Expression: proto.String(e.Expr),
+		})
+	}
+	for _, f := range t.facetOpts {
+		if err := f.setParams(req.Params); err != nil {
+			return fmt.Errorf("bad FacetSearchOption: %v", err)
+		}
+	}
+	// Don't repeat facet search.
+	t.facetOpts = nil
+
+	res := &pb.SearchResponse{}
+	if err := internal.Call(t.c, "search", "Search", req, res); err != nil {
+		return err
+	}
+	if res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK {
+		return fmt.Errorf("search: %s: %s", res.Status.GetCode(), res.Status.GetErrorDetail())
+	}
+	t.searchRes = res.Result
+	if len(res.FacetResult) > 0 {
+		t.facetRes = res.FacetResult
+	}
+	t.count = int(*res.MatchedCount)
+	if t.limit > 0 {
+		t.more = nil
+	} else {
+		t.more = moreSearch
+	}
+	return nil
+}
+
+// SearchOptions are the options for searching an index. Passing a nil
+// *SearchOptions is equivalent to using the default values.
+type SearchOptions struct {
+	// Limit is the maximum number of documents to return. The zero value
+	// indicates no limit.
+	Limit int
+
+	// IDsOnly indicates that only document IDs should be returned for the search
+	// operation; no document fields are populated.
+	IDsOnly bool
+
+	// Sort controls the ordering of search results.
+	Sort *SortOptions
+
+	// Fields specifies which document fields to include in the results. If omitted,
+	// all document fields are returned. No more than 100 fields may be specified.
+	Fields []string
+
+	// Expressions specifies additional computed fields to add to each returned
+	// document.
+	Expressions []FieldExpression
+
+	// Facets controls what facet information is returned for these search results.
+	// If no options are specified, no facet results will be returned.
+	Facets []FacetSearchOption
+
+	// Refinements filters the returned documents by requiring them to contain facets
+	// with specific values. Refinements are applied in conjunction for facets with
+	// different names, and in disjunction otherwise.
+	Refinements []Facet
+
+	// Cursor causes the results to commence with the first document after
+	// the document associated with the cursor.
+	Cursor Cursor
+
+	// Offset specifies the number of documents to skip over before returning results.
+	// When specified, Cursor must be nil.
+	Offset int
+
+	// CountAccuracy specifies the maximum result count that can be expected to
+	// be accurate. If zero, the count accuracy defaults to 20.
+	CountAccuracy int
+}
+
+// Cursor represents an iterator's position.
+//
+// The string value of a cursor is web-safe. It can be saved and restored
+// for later use.
+type Cursor string
+
+// FieldExpression defines a custom expression to evaluate for each result.
+type FieldExpression struct {
+	// Name is the name to use for the computed field.
+	Name string
+
+	// Expr is evaluated to provide a custom content snippet for each document.
+	// See https://cloud.google.com/appengine/docs/go/search/options for
+	// the supported expression syntax.
+	Expr string
+}
+
+// FacetSearchOption controls what facet information is returned in search results.
+type FacetSearchOption interface {
+	setParams(*pb.SearchParams) error
+}
+
+// AutoFacetDiscovery returns a FacetSearchOption which enables automatic facet
+// discovery for the search. Automatic facet discovery looks for the facets
+// which appear the most often in the aggregate in the matched documents.
+//
+// The maximum number of facets returned is controlled by facetLimit, and the
+// maximum number of values per facet by facetLimit. A limit of zero indicates
+// a default limit should be used.
+func AutoFacetDiscovery(facetLimit, valueLimit int) FacetSearchOption {
+	return &autoFacetOpt{facetLimit, valueLimit}
+}
+
+type autoFacetOpt struct {
+	facetLimit, valueLimit int
+}
+
+const defaultAutoFacetLimit = 10 // As per python runtime search.py.
+
+func (o *autoFacetOpt) setParams(params *pb.SearchParams) error {
+	lim := int32(o.facetLimit)
+	if lim == 0 {
+		lim = defaultAutoFacetLimit
+	}
+	params.AutoDiscoverFacetCount = &lim
+	if o.valueLimit > 0 {
+		params.FacetAutoDetectParam = &pb.FacetAutoDetectParam{
+			ValueLimit: proto.Int32(int32(o.valueLimit)),
+		}
+	}
+	return nil
+}
+
+// FacetDiscovery returns a FacetSearchOption which selects a facet to be
+// returned with the search results. By default, the most frequently
+// occurring values for that facet will be returned. However, you can also
+// specify a list of particular Atoms or specific Ranges to return.
+func FacetDiscovery(name string, value ...interface{}) FacetSearchOption {
+	return &facetOpt{name, value}
+}
+
+type facetOpt struct {
+	name   string
+	values []interface{}
+}
+
+func (o *facetOpt) setParams(params *pb.SearchParams) error {
+	req := &pb.FacetRequest{Name: &o.name}
+	params.IncludeFacet = append(params.IncludeFacet, req)
+	if len(o.values) == 0 {
+		return nil
+	}
+	vtype := reflect.TypeOf(o.values[0])
+	reqParam := &pb.FacetRequestParam{}
+	for _, v := range o.values {
+		if reflect.TypeOf(v) != vtype {
+			return errors.New("values must all be Atom, or must all be Range")
+		}
+		switch v := v.(type) {
+		case Atom:
+			reqParam.ValueConstraint = append(reqParam.ValueConstraint, string(v))
+		case Range:
+			rng, err := rangeToProto(v)
+			if err != nil {
+				return fmt.Errorf("invalid range: %v", err)
+			}
+			reqParam.Range = append(reqParam.Range, rng)
+		default:
+			return fmt.Errorf("unsupported value type %T", v)
+		}
+	}
+	req.Params = reqParam
+	return nil
+}
+
+// FacetDocumentDepth returns a FacetSearchOption which controls the number of
+// documents to be evaluated with preparing facet results.
+func FacetDocumentDepth(depth int) FacetSearchOption {
+	return facetDepthOpt(depth)
+}
+
+type facetDepthOpt int
+
+func (o facetDepthOpt) setParams(params *pb.SearchParams) error {
+	params.FacetDepth = proto.Int32(int32(o))
+	return nil
+}
+
+// FacetResult represents the number of times a particular facet and value
+// appeared in the documents matching a search request.
+type FacetResult struct {
+	Facet
+
+	// Count is the number of times this specific facet and value appeared in the
+	// matching documents.
+	Count int
+}
+
+// Range represents a numeric range with inclusive start and exclusive end.
+// Start may be specified as math.Inf(-1) to indicate there is no minimum
+// value, and End may similarly be specified as math.Inf(1); at least one of
+// Start or End must be a finite number.
+type Range struct {
+	Start, End float64
+}
+
+var (
+	negInf = math.Inf(-1)
+	posInf = math.Inf(1)
+)
+
+// AtLeast returns a Range matching any value greater than, or equal to, min.
+func AtLeast(min float64) Range {
+	return Range{Start: min, End: posInf}
+}
+
+// LessThan returns a Range matching any value less than max.
+func LessThan(max float64) Range {
+	return Range{Start: negInf, End: max}
+}
+
+// SortOptions control the ordering and scoring of search results.
+type SortOptions struct {
+	// Expressions is a slice of expressions representing a multi-dimensional
+	// sort.
+	Expressions []SortExpression
+
+	// Scorer, when specified, will cause the documents to be scored according to
+	// search term frequency.
+	Scorer Scorer
+
+	// Limit is the maximum number of objects to score and/or sort. Limit cannot
+	// be more than 10,000. The zero value indicates a default limit.
+	Limit int
+}
+
+// SortExpression defines a single dimension for sorting a document.
+type SortExpression struct {
+	// Expr is evaluated to provide a sorting value for each document.
+	// See https://cloud.google.com/appengine/docs/go/search/options for
+	// the supported expression syntax.
+	Expr string
+
+	// Reverse causes the documents to be sorted in ascending order.
+	Reverse bool
+
+	// The default value to use when no field is present or the expresion
+	// cannot be calculated for a document. For text sorts, Default must
+	// be of type string; for numeric sorts, float64.
+	Default interface{}
+}
+
+// A Scorer defines how a document is scored.
+type Scorer interface {
+	toProto(*pb.ScorerSpec)
+}
+
+type enumScorer struct {
+	enum pb.ScorerSpec_Scorer
+}
+
+func (e enumScorer) toProto(spec *pb.ScorerSpec) {
+	spec.Scorer = e.enum.Enum()
+}
+
+var (
+	// MatchScorer assigns a score based on term frequency in a document.
+	MatchScorer Scorer = enumScorer{pb.ScorerSpec_MATCH_SCORER}
+
+	// RescoringMatchScorer assigns a score based on the quality of the query
+	// match. It is similar to a MatchScorer but uses a more complex scoring
+	// algorithm based on match term frequency and other factors like field type.
+	// Please be aware that this algorithm is continually refined and can change
+	// over time without notice. This means that the ordering of search results
+	// that use this scorer can also change without notice.
+	RescoringMatchScorer Scorer = enumScorer{pb.ScorerSpec_RESCORING_MATCH_SCORER}
+)
+
+func sortToProto(sort *SortOptions, params *pb.SearchParams) error {
+	for _, e := range sort.Expressions {
+		spec := &pb.SortSpec{
+			SortExpression: proto.String(e.Expr),
+		}
+		if e.Reverse {
+			spec.SortDescending = proto.Bool(false)
+		}
+		if e.Default != nil {
+			switch d := e.Default.(type) {
+			case float64:
+				spec.DefaultValueNumeric = &d
+			case string:
+				spec.DefaultValueText = &d
+			default:
+				return fmt.Errorf("search: invalid Default type %T for expression %q", d, e.Expr)
+			}
+		}
+		params.SortSpec = append(params.SortSpec, spec)
+	}
+
+	spec := &pb.ScorerSpec{}
+	if sort.Limit > 0 {
+		spec.Limit = proto.Int32(int32(sort.Limit))
+		params.ScorerSpec = spec
+	}
+	if sort.Scorer != nil {
+		sort.Scorer.toProto(spec)
+		params.ScorerSpec = spec
+	}
+
+	return nil
+}
+
+func refinementsToProto(refinements []Facet, params *pb.SearchParams) error {
+	for _, r := range refinements {
+		ref := &pb.FacetRefinement{
+			Name: proto.String(r.Name),
+		}
+		switch v := r.Value.(type) {
+		case Atom:
+			ref.Value = proto.String(string(v))
+		case Range:
+			rng, err := rangeToProto(v)
+			if err != nil {
+				return fmt.Errorf("search: refinement for facet %q: %v", r.Name, err)
+			}
+			// Unfortunately there are two identical messages for identify Facet ranges.
+			ref.Range = &pb.FacetRefinement_Range{Start: rng.Start, End: rng.End}
+		default:
+			return fmt.Errorf("search: unsupported refinement for facet %q of type %T", r.Name, v)
+		}
+		params.FacetRefinement = append(params.FacetRefinement, ref)
+	}
+	return nil
+}
+
+func rangeToProto(r Range) (*pb.FacetRange, error) {
+	rng := &pb.FacetRange{}
+	if r.Start != negInf {
+		if !validFloat(r.Start) {
+			return nil, errors.New("invalid value for Start")
+		}
+		rng.Start = proto.String(strconv.FormatFloat(r.Start, 'e', -1, 64))
+	} else if r.End == posInf {
+		return nil, errors.New("either Start or End must be finite")
+	}
+	if r.End != posInf {
+		if !validFloat(r.End) {
+			return nil, errors.New("invalid value for End")
+		}
+		rng.End = proto.String(strconv.FormatFloat(r.End, 'e', -1, 64))
+	}
+	return rng, nil
+}
+
+func protoToRange(rng *pb.FacetRefinement_Range) Range {
+	r := Range{Start: negInf, End: posInf}
+	if x, err := strconv.ParseFloat(rng.GetStart(), 64); err != nil {
+		r.Start = x
+	}
+	if x, err := strconv.ParseFloat(rng.GetEnd(), 64); err != nil {
+		r.End = x
+	}
+	return r
+}
+
+// Iterator is the result of searching an index for a query or listing an
+// index.
+type Iterator struct {
+	c     context.Context
+	index *Index
+	err   error
+
+	listRes       []*pb.Document
+	listStartID   string
+	listInclusive bool
+
+	searchRes    []*pb.SearchResult
+	facetRes     []*pb.FacetResult
+	searchQuery  string
+	searchCursor *string
+	searchOffset int
+	sort         *SortOptions
+
+	fields      []string
+	exprs       []FieldExpression
+	refinements []Facet
+	facetOpts   []FacetSearchOption
+
+	more func(*Iterator) error
+
+	count         int
+	countAccuracy int
+	limit         int // items left to return; 0 for unlimited.
+	idsOnly       bool
+}
+
+// errIter returns an iterator that only returns the given error.
+func errIter(err string) *Iterator {
+	return &Iterator{
+		err: errors.New(err),
+	}
+}
+
+// Done is returned when a query iteration has completed.
+var Done = errors.New("search: query has no more results")
+
+// Count returns an approximation of the number of documents matched by the
+// query. It is only valid to call for iterators returned by Search.
+func (t *Iterator) Count() int { return t.count }
+
+// fetchMore retrieves more results, if there are no errors or pending results.
+func (t *Iterator) fetchMore() {
+	if t.err == nil && len(t.listRes)+len(t.searchRes) == 0 && t.more != nil {
+		t.err = t.more(t)
+	}
+}
+
+// Next returns the ID of the next result. When there are no more results,
+// Done is returned as the error.
+//
+// dst must be a non-nil struct pointer, implement the FieldLoadSaver
+// interface, or be a nil interface value. If a non-nil dst is provided, it
+// will be filled with the indexed fields. dst is ignored if this iterator was
+// created with an IDsOnly option.
+func (t *Iterator) Next(dst interface{}) (string, error) {
+	t.fetchMore()
+	if t.err != nil {
+		return "", t.err
+	}
+
+	var doc *pb.Document
+	var exprs []*pb.Field
+	switch {
+	case len(t.listRes) != 0:
+		doc = t.listRes[0]
+		t.listRes = t.listRes[1:]
+	case len(t.searchRes) != 0:
+		doc = t.searchRes[0].Document
+		exprs = t.searchRes[0].Expression
+		t.searchCursor = t.searchRes[0].Cursor
+		t.searchRes = t.searchRes[1:]
+	default:
+		return "", Done
+	}
+	if doc == nil {
+		return "", errors.New("search: internal error: no document returned")
+	}
+	if !t.idsOnly && dst != nil {
+		if err := loadDoc(dst, doc, exprs); err != nil {
+			return "", err
+		}
+	}
+	return doc.GetId(), nil
+}
+
+// Cursor returns the cursor associated with the current document (that is,
+// the document most recently returned by a call to Next).
+//
+// Passing this cursor in a future call to Search will cause those results
+// to commence with the first document after the current document.
+func (t *Iterator) Cursor() Cursor {
+	if t.searchCursor == nil {
+		return ""
+	}
+	return Cursor(*t.searchCursor)
+}
+
+// Facets returns the facets found within the search results, if any facets
+// were requested in the SearchOptions.
+func (t *Iterator) Facets() ([][]FacetResult, error) {
+	t.fetchMore()
+	if t.err != nil && t.err != Done {
+		return nil, t.err
+	}
+
+	var facets [][]FacetResult
+	for _, f := range t.facetRes {
+		fres := make([]FacetResult, 0, len(f.Value))
+		for _, v := range f.Value {
+			ref := v.Refinement
+			facet := FacetResult{
+				Facet: Facet{Name: ref.GetName()},
+				Count: int(v.GetCount()),
+			}
+			if ref.Value != nil {
+				facet.Value = Atom(*ref.Value)
+			} else {
+				facet.Value = protoToRange(ref.Range)
+			}
+			fres = append(fres, facet)
+		}
+		facets = append(facets, fres)
+	}
+	return facets, nil
+}
+
+// saveDoc converts from a struct pointer or
+// FieldLoadSaver/FieldMetadataLoadSaver to the Document protobuf.
+func saveDoc(src interface{}) (*pb.Document, error) {
+	var err error
+	var fields []Field
+	var meta *DocumentMetadata
+	switch x := src.(type) {
+	case FieldLoadSaver:
+		fields, meta, err = x.Save()
+	default:
+		fields, meta, err = saveStructWithMeta(src)
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	fieldsProto, err := fieldsToProto(fields)
+	if err != nil {
+		return nil, err
+	}
+	d := &pb.Document{
+		Field:   fieldsProto,
+		OrderId: proto.Int32(int32(time.Since(orderIDEpoch).Seconds())),
+	}
+	if meta != nil {
+		if meta.Rank != 0 {
+			if !validDocRank(meta.Rank) {
+				return nil, fmt.Errorf("search: invalid rank %d, must be [0, 2^31)", meta.Rank)
+			}
+			*d.OrderId = int32(meta.Rank)
+		}
+		if len(meta.Facets) > 0 {
+			facets, err := facetsToProto(meta.Facets)
+			if err != nil {
+				return nil, err
+			}
+			d.Facet = facets
+		}
+	}
+	return d, nil
+}
+
+func fieldsToProto(src []Field) ([]*pb.Field, error) {
+	// Maps to catch duplicate time or numeric fields.
+	timeFields, numericFields := make(map[string]bool), make(map[string]bool)
+	dst := make([]*pb.Field, 0, len(src))
+	for _, f := range src {
+		if !validFieldName(f.Name) {
+			return nil, fmt.Errorf("search: invalid field name %q", f.Name)
+		}
+		fieldValue := &pb.FieldValue{}
+		switch x := f.Value.(type) {
+		case string:
+			fieldValue.Type = pb.FieldValue_TEXT.Enum()
+			fieldValue.StringValue = proto.String(x)
+		case Atom:
+			fieldValue.Type = pb.FieldValue_ATOM.Enum()
+			fieldValue.StringValue = proto.String(string(x))
+		case HTML:
+			fieldValue.Type = pb.FieldValue_HTML.Enum()
+			fieldValue.StringValue = proto.String(string(x))
+		case time.Time:
+			if timeFields[f.Name] {
+				return nil, fmt.Errorf("search: duplicate time field %q", f.Name)
+			}
+			timeFields[f.Name] = true
+			fieldValue.Type = pb.FieldValue_DATE.Enum()
+			fieldValue.StringValue = proto.String(strconv.FormatInt(x.UnixNano()/1e6, 10))
+		case float64:
+			if numericFields[f.Name] {
+				return nil, fmt.Errorf("search: duplicate numeric field %q", f.Name)
+			}
+			if !validFloat(x) {
+				return nil, fmt.Errorf("search: numeric field %q with invalid value %f", f.Name, x)
+			}
+			numericFields[f.Name] = true
+			fieldValue.Type = pb.FieldValue_NUMBER.Enum()
+			fieldValue.StringValue = proto.String(strconv.FormatFloat(x, 'e', -1, 64))
+		case appengine.GeoPoint:
+			if !x.Valid() {
+				return nil, fmt.Errorf(
+					"search: GeoPoint field %q with invalid value %v",
+					f.Name, x)
+			}
+			fieldValue.Type = pb.FieldValue_GEO.Enum()
+			fieldValue.Geo = &pb.FieldValue_Geo{
+				Lat: proto.Float64(x.Lat),
+				Lng: proto.Float64(x.Lng),
+			}
+		default:
+			return nil, fmt.Errorf("search: unsupported field type: %v", reflect.TypeOf(f.Value))
+		}
+		if f.Language != "" {
+			switch f.Value.(type) {
+			case string, HTML:
+				if !validLanguage(f.Language) {
+					return nil, fmt.Errorf("search: invalid language for field %q: %q", f.Name, f.Language)
+				}
+				fieldValue.Language = proto.String(f.Language)
+			default:
+				return nil, fmt.Errorf("search: setting language not supported for field %q of type %T", f.Name, f.Value)
+			}
+		}
+		if p := fieldValue.StringValue; p != nil && !utf8.ValidString(*p) {
+			return nil, fmt.Errorf("search: %q field is invalid UTF-8: %q", f.Name, *p)
+		}
+		dst = append(dst, &pb.Field{
+			Name:  proto.String(f.Name),
+			Value: fieldValue,
+		})
+	}
+	return dst, nil
+}
+
+func facetsToProto(src []Facet) ([]*pb.Facet, error) {
+	dst := make([]*pb.Facet, 0, len(src))
+	for _, f := range src {
+		if !validFieldName(f.Name) {
+			return nil, fmt.Errorf("search: invalid facet name %q", f.Name)
+		}
+		facetValue := &pb.FacetValue{}
+		switch x := f.Value.(type) {
+		case Atom:
+			if !utf8.ValidString(string(x)) {
+				return nil, fmt.Errorf("search: %q facet is invalid UTF-8: %q", f.Name, x)
+			}
+			facetValue.Type = pb.FacetValue_ATOM.Enum()
+			facetValue.StringValue = proto.String(string(x))
+		case float64:
+			if !validFloat(x) {
+				return nil, fmt.Errorf("search: numeric facet %q with invalid value %f", f.Name, x)
+			}
+			facetValue.Type = pb.FacetValue_NUMBER.Enum()
+			facetValue.StringValue = proto.String(strconv.FormatFloat(x, 'e', -1, 64))
+		default:
+			return nil, fmt.Errorf("search: unsupported facet type: %v", reflect.TypeOf(f.Value))
+		}
+		dst = append(dst, &pb.Facet{
+			Name:  proto.String(f.Name),
+			Value: facetValue,
+		})
+	}
+	return dst, nil
+}
+
+// loadDoc converts from protobufs to a struct pointer or
+// FieldLoadSaver/FieldMetadataLoadSaver. The src param provides the document's
+// stored fields and facets, and any document metadata.  An additional slice of
+// fields, exprs, may optionally be provided to contain any derived expressions
+// requested by the developer.
+func loadDoc(dst interface{}, src *pb.Document, exprs []*pb.Field) (err error) {
+	fields, err := protoToFields(src.Field)
+	if err != nil {
+		return err
+	}
+	facets, err := protoToFacets(src.Facet)
+	if err != nil {
+		return err
+	}
+	if len(exprs) > 0 {
+		exprFields, err := protoToFields(exprs)
+		if err != nil {
+			return err
+		}
+		// Mark each field as derived.
+		for i := range exprFields {
+			exprFields[i].Derived = true
+		}
+		fields = append(fields, exprFields...)
+	}
+	meta := &DocumentMetadata{
+		Rank:   int(src.GetOrderId()),
+		Facets: facets,
+	}
+	switch x := dst.(type) {
+	case FieldLoadSaver:
+		return x.Load(fields, meta)
+	default:
+		return loadStructWithMeta(dst, fields, meta)
+	}
+}
+
+func protoToFields(fields []*pb.Field) ([]Field, error) {
+	dst := make([]Field, 0, len(fields))
+	for _, field := range fields {
+		fieldValue := field.GetValue()
+		f := Field{
+			Name: field.GetName(),
+		}
+		switch fieldValue.GetType() {
+		case pb.FieldValue_TEXT:
+			f.Value = fieldValue.GetStringValue()
+			f.Language = fieldValue.GetLanguage()
+		case pb.FieldValue_ATOM:
+			f.Value = Atom(fieldValue.GetStringValue())
+		case pb.FieldValue_HTML:
+			f.Value = HTML(fieldValue.GetStringValue())
+			f.Language = fieldValue.GetLanguage()
+		case pb.FieldValue_DATE:
+			sv := fieldValue.GetStringValue()
+			millis, err := strconv.ParseInt(sv, 10, 64)
+			if err != nil {
+				return nil, fmt.Errorf("search: internal error: bad time.Time encoding %q: %v", sv, err)
+			}
+			f.Value = time.Unix(0, millis*1e6)
+		case pb.FieldValue_NUMBER:
+			sv := fieldValue.GetStringValue()
+			x, err := strconv.ParseFloat(sv, 64)
+			if err != nil {
+				return nil, err
+			}
+			f.Value = x
+		case pb.FieldValue_GEO:
+			geoValue := fieldValue.GetGeo()
+			geoPoint := appengine.GeoPoint{geoValue.GetLat(), geoValue.GetLng()}
+			if !geoPoint.Valid() {
+				return nil, fmt.Errorf("search: internal error: invalid GeoPoint encoding: %v", geoPoint)
+			}
+			f.Value = geoPoint
+		default:
+			return nil, fmt.Errorf("search: internal error: unknown data type %s", fieldValue.GetType())
+		}
+		dst = append(dst, f)
+	}
+	return dst, nil
+}
+
+func protoToFacets(facets []*pb.Facet) ([]Facet, error) {
+	if len(facets) == 0 {
+		return nil, nil
+	}
+	dst := make([]Facet, 0, len(facets))
+	for _, facet := range facets {
+		facetValue := facet.GetValue()
+		f := Facet{
+			Name: facet.GetName(),
+		}
+		switch facetValue.GetType() {
+		case pb.FacetValue_ATOM:
+			f.Value = Atom(facetValue.GetStringValue())
+		case pb.FacetValue_NUMBER:
+			sv := facetValue.GetStringValue()
+			x, err := strconv.ParseFloat(sv, 64)
+			if err != nil {
+				return nil, err
+			}
+			f.Value = x
+		default:
+			return nil, fmt.Errorf("search: internal error: unknown data type %s", facetValue.GetType())
+		}
+		dst = append(dst, f)
+	}
+	return dst, nil
+}
+
+func namespaceMod(m proto.Message, namespace string) {
+	set := func(s **string) {
+		if *s == nil {
+			*s = &namespace
+		}
+	}
+	switch m := m.(type) {
+	case *pb.IndexDocumentRequest:
+		set(&m.Params.IndexSpec.Namespace)
+	case *pb.ListDocumentsRequest:
+		set(&m.Params.IndexSpec.Namespace)
+	case *pb.DeleteDocumentRequest:
+		set(&m.Params.IndexSpec.Namespace)
+	case *pb.SearchRequest:
+		set(&m.Params.IndexSpec.Namespace)
+	}
+}
+
+func init() {
+	internal.RegisterErrorCodeMap("search", pb.SearchServiceError_ErrorCode_name)
+	internal.NamespaceMods["search"] = namespaceMod
+}
diff --git a/vendor/google.golang.org/appengine/search/struct.go b/vendor/google.golang.org/appengine/search/struct.go
new file mode 100644
index 0000000..e73d2f2
--- /dev/null
+++ b/vendor/google.golang.org/appengine/search/struct.go
@@ -0,0 +1,251 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package search
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+)
+
+// ErrFieldMismatch is returned when a field is to be loaded into a different
+// than the one it was stored from, or when a field is missing or unexported in
+// the destination struct.
+type ErrFieldMismatch struct {
+	FieldName string
+	Reason    string
+}
+
+func (e *ErrFieldMismatch) Error() string {
+	return fmt.Sprintf("search: cannot load field %q: %s", e.FieldName, e.Reason)
+}
+
+// ErrFacetMismatch is returned when a facet is to be loaded into a different
+// type than the one it was stored from, or when a field is missing or
+// unexported in the destination struct. StructType is the type of the struct
+// pointed to by the destination argument passed to Iterator.Next.
+type ErrFacetMismatch struct {
+	StructType reflect.Type
+	FacetName  string
+	Reason     string
+}
+
+func (e *ErrFacetMismatch) Error() string {
+	return fmt.Sprintf("search: cannot load facet %q into a %q: %s", e.FacetName, e.StructType, e.Reason)
+}
+
+// structCodec defines how to convert a given struct to/from a search document.
+type structCodec struct {
+	// byIndex returns the struct tag for the i'th struct field.
+	byIndex []structTag
+
+	// fieldByName returns the index of the struct field for the given field name.
+	fieldByName map[string]int
+
+	// facetByName returns the index of the struct field for the given facet name,
+	facetByName map[string]int
+}
+
+// structTag holds a structured version of each struct field's parsed tag.
+type structTag struct {
+	name   string
+	facet  bool
+	ignore bool
+}
+
+var (
+	codecsMu sync.RWMutex
+	codecs   = map[reflect.Type]*structCodec{}
+)
+
+func loadCodec(t reflect.Type) (*structCodec, error) {
+	codecsMu.RLock()
+	codec, ok := codecs[t]
+	codecsMu.RUnlock()
+	if ok {
+		return codec, nil
+	}
+
+	codecsMu.Lock()
+	defer codecsMu.Unlock()
+	if codec, ok := codecs[t]; ok {
+		return codec, nil
+	}
+
+	codec = &structCodec{
+		fieldByName: make(map[string]int),
+		facetByName: make(map[string]int),
+	}
+
+	for i, I := 0, t.NumField(); i < I; i++ {
+		f := t.Field(i)
+		name, opts := f.Tag.Get("search"), ""
+		if i := strings.Index(name, ","); i != -1 {
+			name, opts = name[:i], name[i+1:]
+		}
+		ignore := false
+		if name == "-" {
+			ignore = true
+		} else if name == "" {
+			name = f.Name
+		} else if !validFieldName(name) {
+			return nil, fmt.Errorf("search: struct tag has invalid field name: %q", name)
+		}
+		facet := opts == "facet"
+		codec.byIndex = append(codec.byIndex, structTag{name: name, facet: facet, ignore: ignore})
+		if facet {
+			codec.facetByName[name] = i
+		} else {
+			codec.fieldByName[name] = i
+		}
+	}
+
+	codecs[t] = codec
+	return codec, nil
+}
+
+// structFLS adapts a struct to be a FieldLoadSaver.
+type structFLS struct {
+	v     reflect.Value
+	codec *structCodec
+}
+
+func (s structFLS) Load(fields []Field, meta *DocumentMetadata) error {
+	var err error
+	for _, field := range fields {
+		i, ok := s.codec.fieldByName[field.Name]
+		if !ok {
+			// Note the error, but keep going.
+			err = &ErrFieldMismatch{
+				FieldName: field.Name,
+				Reason:    "no such struct field",
+			}
+			continue
+
+		}
+		f := s.v.Field(i)
+		if !f.CanSet() {
+			// Note the error, but keep going.
+			err = &ErrFieldMismatch{
+				FieldName: field.Name,
+				Reason:    "cannot set struct field",
+			}
+			continue
+		}
+		v := reflect.ValueOf(field.Value)
+		if ft, vt := f.Type(), v.Type(); ft != vt {
+			err = &ErrFieldMismatch{
+				FieldName: field.Name,
+				Reason:    fmt.Sprintf("type mismatch: %v for %v data", ft, vt),
+			}
+			continue
+		}
+		f.Set(v)
+	}
+	if meta == nil {
+		return err
+	}
+	for _, facet := range meta.Facets {
+		i, ok := s.codec.facetByName[facet.Name]
+		if !ok {
+			// Note the error, but keep going.
+			if err == nil {
+				err = &ErrFacetMismatch{
+					StructType: s.v.Type(),
+					FacetName:  facet.Name,
+					Reason:     "no matching field found",
+				}
+			}
+			continue
+		}
+		f := s.v.Field(i)
+		if !f.CanSet() {
+			// Note the error, but keep going.
+			if err == nil {
+				err = &ErrFacetMismatch{
+					StructType: s.v.Type(),
+					FacetName:  facet.Name,
+					Reason:     "unable to set unexported field of struct",
+				}
+			}
+			continue
+		}
+		v := reflect.ValueOf(facet.Value)
+		if ft, vt := f.Type(), v.Type(); ft != vt {
+			if err == nil {
+				err = &ErrFacetMismatch{
+					StructType: s.v.Type(),
+					FacetName:  facet.Name,
+					Reason:     fmt.Sprintf("type mismatch: %v for %d data", ft, vt),
+				}
+				continue
+			}
+		}
+		f.Set(v)
+	}
+	return err
+}
+
+func (s structFLS) Save() ([]Field, *DocumentMetadata, error) {
+	fields := make([]Field, 0, len(s.codec.fieldByName))
+	var facets []Facet
+	for i, tag := range s.codec.byIndex {
+		if tag.ignore {
+			continue
+		}
+		f := s.v.Field(i)
+		if !f.CanSet() {
+			continue
+		}
+		if tag.facet {
+			facets = append(facets, Facet{Name: tag.name, Value: f.Interface()})
+		} else {
+			fields = append(fields, Field{Name: tag.name, Value: f.Interface()})
+		}
+	}
+	return fields, &DocumentMetadata{Facets: facets}, nil
+}
+
+// newStructFLS returns a FieldLoadSaver for the struct pointer p.
+func newStructFLS(p interface{}) (FieldLoadSaver, error) {
+	v := reflect.ValueOf(p)
+	if v.Kind() != reflect.Ptr || v.IsNil() || v.Elem().Kind() != reflect.Struct {
+		return nil, ErrInvalidDocumentType
+	}
+	codec, err := loadCodec(v.Elem().Type())
+	if err != nil {
+		return nil, err
+	}
+	return structFLS{v.Elem(), codec}, nil
+}
+
+func loadStructWithMeta(dst interface{}, f []Field, meta *DocumentMetadata) error {
+	x, err := newStructFLS(dst)
+	if err != nil {
+		return err
+	}
+	return x.Load(f, meta)
+}
+
+func saveStructWithMeta(src interface{}) ([]Field, *DocumentMetadata, error) {
+	x, err := newStructFLS(src)
+	if err != nil {
+		return nil, nil, err
+	}
+	return x.Save()
+}
+
+// LoadStruct loads the fields from f to dst. dst must be a struct pointer.
+func LoadStruct(dst interface{}, f []Field) error {
+	return loadStructWithMeta(dst, f, nil)
+}
+
+// SaveStruct returns the fields from src as a slice of Field.
+// src must be a struct pointer.
+func SaveStruct(src interface{}) ([]Field, error) {
+	f, _, err := saveStructWithMeta(src)
+	return f, err
+}
diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go
new file mode 100644
index 0000000..05642a9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/timeout.go
@@ -0,0 +1,20 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import "golang.org/x/net/context"
+
+// IsTimeoutError reports whether err is a timeout error.
+func IsTimeoutError(err error) bool {
+	if err == context.DeadlineExceeded {
+		return true
+	}
+	if t, ok := err.(interface {
+		IsTimeout() bool
+	}); ok {
+		return t.IsTimeout()
+	}
+	return false
+}
diff --git a/vendor/google.golang.org/genproto/LICENSE b/vendor/google.golang.org/genproto/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/google.golang.org/genproto/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go b/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go
new file mode 100644
index 0000000..be1c42f
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go
@@ -0,0 +1,100 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/googleapis/api/label/label.proto
+// DO NOT EDIT!
+
+/*
+Package google_api is a generated protocol buffer package.
+
+It is generated from these files:
+	google.golang.org/genproto/googleapis/api/label/label.proto
+
+It has these top-level messages:
+	LabelDescriptor
+*/
+package google_api
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// Value types that can be used as label values.
+type LabelDescriptor_ValueType int32
+
+const (
+	// A variable-length string. This is the default.
+	LabelDescriptor_STRING LabelDescriptor_ValueType = 0
+	// Boolean; true or false.
+	LabelDescriptor_BOOL LabelDescriptor_ValueType = 1
+	// A 64-bit signed integer.
+	LabelDescriptor_INT64 LabelDescriptor_ValueType = 2
+)
+
+var LabelDescriptor_ValueType_name = map[int32]string{
+	0: "STRING",
+	1: "BOOL",
+	2: "INT64",
+}
+var LabelDescriptor_ValueType_value = map[string]int32{
+	"STRING": 0,
+	"BOOL":   1,
+	"INT64":  2,
+}
+
+func (x LabelDescriptor_ValueType) String() string {
+	return proto.EnumName(LabelDescriptor_ValueType_name, int32(x))
+}
+func (LabelDescriptor_ValueType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
+
+// A description of a label.
+type LabelDescriptor struct {
+	// The label key.
+	Key string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"`
+	// The type of data that can be assigned to the label.
+	ValueType LabelDescriptor_ValueType `protobuf:"varint,2,opt,name=value_type,json=valueType,enum=google.api.LabelDescriptor_ValueType" json:"value_type,omitempty"`
+	// A human-readable description for the label.
+	Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
+}
+
+func (m *LabelDescriptor) Reset()                    { *m = LabelDescriptor{} }
+func (m *LabelDescriptor) String() string            { return proto.CompactTextString(m) }
+func (*LabelDescriptor) ProtoMessage()               {}
+func (*LabelDescriptor) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func init() {
+	proto.RegisterType((*LabelDescriptor)(nil), "google.api.LabelDescriptor")
+	proto.RegisterEnum("google.api.LabelDescriptor_ValueType", LabelDescriptor_ValueType_name, LabelDescriptor_ValueType_value)
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/googleapis/api/label/label.proto", fileDescriptor0)
+}
+
+var fileDescriptor0 = []byte{
+	// 240 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x4b, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0xcb, 0x2f, 0x4a, 0xd7, 0x4f, 0x4f,
+	0xcd, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x87, 0x48, 0x25, 0x16, 0x64, 0x16, 0xeb, 0x03, 0x09,
+	0xfd, 0x9c, 0xc4, 0xa4, 0xd4, 0x1c, 0x08, 0xa9, 0x07, 0x56, 0x20, 0xc4, 0x05, 0xd5, 0x0c, 0x94,
+	0x55, 0xda, 0xc9, 0xc8, 0xc5, 0xef, 0x03, 0x92, 0x73, 0x49, 0x2d, 0x4e, 0x2e, 0xca, 0x2c, 0x28,
+	0xc9, 0x2f, 0x12, 0x12, 0xe0, 0x62, 0xce, 0x4e, 0xad, 0x94, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c,
+	0x02, 0x31, 0x85, 0x5c, 0xb8, 0xb8, 0xca, 0x12, 0x73, 0x4a, 0x53, 0xe3, 0x4b, 0x2a, 0x0b, 0x52,
+	0x25, 0x98, 0x80, 0x12, 0x7c, 0x46, 0xaa, 0x7a, 0x08, 0x63, 0xf4, 0xd0, 0x8c, 0xd0, 0x0b, 0x03,
+	0xa9, 0x0e, 0x01, 0x2a, 0x0e, 0xe2, 0x2c, 0x83, 0x31, 0x85, 0x14, 0xb8, 0xb8, 0x53, 0xa0, 0x4a,
+	0x32, 0xf3, 0xf3, 0x24, 0x98, 0xc1, 0xe6, 0x23, 0x0b, 0x29, 0xe9, 0x70, 0x71, 0xc2, 0x75, 0x0a,
+	0x71, 0x71, 0xb1, 0x05, 0x87, 0x04, 0x79, 0xfa, 0xb9, 0x0b, 0x30, 0x08, 0x71, 0x70, 0xb1, 0x38,
+	0xf9, 0xfb, 0xfb, 0x08, 0x30, 0x0a, 0x71, 0x72, 0xb1, 0x7a, 0xfa, 0x85, 0x98, 0x99, 0x08, 0x30,
+	0x39, 0x69, 0x70, 0xf1, 0x25, 0xe7, 0xe7, 0x22, 0x39, 0xc3, 0x89, 0x0b, 0xec, 0x8e, 0x00, 0x90,
+	0x2f, 0x03, 0x18, 0x7f, 0x30, 0x32, 0x2e, 0x62, 0x62, 0x71, 0x77, 0x0c, 0xf0, 0x4c, 0x62, 0x03,
+	0x7b, 0xdc, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xca, 0x32, 0x56, 0x5f, 0x37, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/label/label.proto b/vendor/google.golang.org/genproto/googleapis/api/label/label.proto
new file mode 100644
index 0000000..fec9812
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/label/label.proto
@@ -0,0 +1,48 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+option cc_enable_arenas = true;
+option java_multiple_files = true;
+option java_outer_classname = "LabelProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// A description of a label.
+message LabelDescriptor {
+  // Value types that can be used as label values.
+  enum ValueType {
+    // A variable-length string. This is the default.
+    STRING = 0;
+
+    // Boolean; true or false.
+    BOOL = 1;
+
+    // A 64-bit signed integer.
+    INT64 = 2;
+  }
+
+  // The label key.
+  string key = 1;
+
+  // The type of data that can be assigned to the label.
+  ValueType value_type = 2;
+
+  // A human-readable description for the label.
+  string description = 3;
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go
new file mode 100644
index 0000000..73a0fc1
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go
@@ -0,0 +1,303 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/googleapis/api/metric/metric.proto
+// DO NOT EDIT!
+
+/*
+Package google_api is a generated protocol buffer package.
+
+It is generated from these files:
+	google.golang.org/genproto/googleapis/api/metric/metric.proto
+
+It has these top-level messages:
+	MetricDescriptor
+	Metric
+*/
+package google_api
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import google_api1 "google.golang.org/genproto/googleapis/api/label"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// The kind of measurement. It describes how the data is reported.
+type MetricDescriptor_MetricKind int32
+
+const (
+	// Do not use this default value.
+	MetricDescriptor_METRIC_KIND_UNSPECIFIED MetricDescriptor_MetricKind = 0
+	// An instantaneous measurement of a value.
+	MetricDescriptor_GAUGE MetricDescriptor_MetricKind = 1
+	// The change in a value during a time interval.
+	MetricDescriptor_DELTA MetricDescriptor_MetricKind = 2
+	// A value accumulated over a time interval.  Cumulative
+	// measurements in a time series should have the same start time
+	// and increasing end times, until an event resets the cumulative
+	// value to zero and sets a new start time for the following
+	// points.
+	MetricDescriptor_CUMULATIVE MetricDescriptor_MetricKind = 3
+)
+
+var MetricDescriptor_MetricKind_name = map[int32]string{
+	0: "METRIC_KIND_UNSPECIFIED",
+	1: "GAUGE",
+	2: "DELTA",
+	3: "CUMULATIVE",
+}
+var MetricDescriptor_MetricKind_value = map[string]int32{
+	"METRIC_KIND_UNSPECIFIED": 0,
+	"GAUGE":                   1,
+	"DELTA":                   2,
+	"CUMULATIVE":              3,
+}
+
+func (x MetricDescriptor_MetricKind) String() string {
+	return proto.EnumName(MetricDescriptor_MetricKind_name, int32(x))
+}
+func (MetricDescriptor_MetricKind) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor0, []int{0, 0}
+}
+
+// The value type of a metric.
+type MetricDescriptor_ValueType int32
+
+const (
+	// Do not use this default value.
+	MetricDescriptor_VALUE_TYPE_UNSPECIFIED MetricDescriptor_ValueType = 0
+	// The value is a boolean.
+	// This value type can be used only if the metric kind is `GAUGE`.
+	MetricDescriptor_BOOL MetricDescriptor_ValueType = 1
+	// The value is a signed 64-bit integer.
+	MetricDescriptor_INT64 MetricDescriptor_ValueType = 2
+	// The value is a double precision floating point number.
+	MetricDescriptor_DOUBLE MetricDescriptor_ValueType = 3
+	// The value is a text string.
+	// This value type can be used only if the metric kind is `GAUGE`.
+	MetricDescriptor_STRING MetricDescriptor_ValueType = 4
+	// The value is a [`Distribution`][google.api.Distribution].
+	MetricDescriptor_DISTRIBUTION MetricDescriptor_ValueType = 5
+	// The value is money.
+	MetricDescriptor_MONEY MetricDescriptor_ValueType = 6
+)
+
+var MetricDescriptor_ValueType_name = map[int32]string{
+	0: "VALUE_TYPE_UNSPECIFIED",
+	1: "BOOL",
+	2: "INT64",
+	3: "DOUBLE",
+	4: "STRING",
+	5: "DISTRIBUTION",
+	6: "MONEY",
+}
+var MetricDescriptor_ValueType_value = map[string]int32{
+	"VALUE_TYPE_UNSPECIFIED": 0,
+	"BOOL":         1,
+	"INT64":        2,
+	"DOUBLE":       3,
+	"STRING":       4,
+	"DISTRIBUTION": 5,
+	"MONEY":        6,
+}
+
+func (x MetricDescriptor_ValueType) String() string {
+	return proto.EnumName(MetricDescriptor_ValueType_name, int32(x))
+}
+func (MetricDescriptor_ValueType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor0, []int{0, 1}
+}
+
+// Defines a metric type and its schema.
+type MetricDescriptor struct {
+	// Resource name. The format of the name may vary between different
+	// implementations. For examples:
+	//
+	//     projects/{project_id}/metricDescriptors/{type=**}
+	//     metricDescriptors/{type=**}
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// The metric type including a DNS name prefix, for example
+	// `"compute.googleapis.com/instance/cpu/utilization"`. Metric types
+	// should use a natural hierarchical grouping such as the following:
+	//
+	//     compute.googleapis.com/instance/cpu/utilization
+	//     compute.googleapis.com/instance/disk/read_ops_count
+	//     compute.googleapis.com/instance/network/received_bytes_count
+	//
+	// Note that if the metric type changes, the monitoring data will be
+	// discontinued, and anything depends on it will break, such as monitoring
+	// dashboards, alerting rules and quota limits. Therefore, once a metric has
+	// been published, its type should be immutable.
+	Type string `protobuf:"bytes,8,opt,name=type" json:"type,omitempty"`
+	// The set of labels that can be used to describe a specific instance of this
+	// metric type. For example, the
+	// `compute.googleapis.com/instance/network/received_bytes_count` metric type
+	// has a label, `loadbalanced`, that specifies whether the traffic was
+	// received through a load balanced IP address.
+	Labels []*google_api1.LabelDescriptor `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty"`
+	// Whether the metric records instantaneous values, changes to a value, etc.
+	MetricKind MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"`
+	// Whether the measurement is an integer, a floating-point number, etc.
+	ValueType MetricDescriptor_ValueType `protobuf:"varint,4,opt,name=value_type,json=valueType,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"`
+	// The unit in which the metric value is reported. It is only applicable
+	// if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The
+	// supported units are a subset of [The Unified Code for Units of
+	// Measure](http://unitsofmeasure.org/ucum.html) standard:
+	//
+	// **Basic units (UNIT)**
+	//
+	// * `bit`   bit
+	// * `By`    byte
+	// * `s`     second
+	// * `min`   minute
+	// * `h`     hour
+	// * `d`     day
+	//
+	// **Prefixes (PREFIX)**
+	//
+	// * `k`     kilo    (10**3)
+	// * `M`     mega    (10**6)
+	// * `G`     giga    (10**9)
+	// * `T`     tera    (10**12)
+	// * `P`     peta    (10**15)
+	// * `E`     exa     (10**18)
+	// * `Z`     zetta   (10**21)
+	// * `Y`     yotta   (10**24)
+	// * `m`     milli   (10**-3)
+	// * `u`     micro   (10**-6)
+	// * `n`     nano    (10**-9)
+	// * `p`     pico    (10**-12)
+	// * `f`     femto   (10**-15)
+	// * `a`     atto    (10**-18)
+	// * `z`     zepto   (10**-21)
+	// * `y`     yocto   (10**-24)
+	// * `Ki`    kibi    (2**10)
+	// * `Mi`    mebi    (2**20)
+	// * `Gi`    gibi    (2**30)
+	// * `Ti`    tebi    (2**40)
+	//
+	// **Grammar**
+	//
+	// The grammar includes the dimensionless unit `1`, such as `1/s`.
+	//
+	// The grammar also includes these connectors:
+	//
+	// * `/`    division (as an infix operator, e.g. `1/s`).
+	// * `.`    multiplication (as an infix operator, e.g. `GBy.d`)
+	//
+	// The grammar for a unit is as follows:
+	//
+	//     Expression = Component { "." Component } { "/" Component } ;
+	//
+	//     Component = [ PREFIX ] UNIT [ Annotation ]
+	//               | Annotation
+	//               | "1"
+	//               ;
+	//
+	//     Annotation = "{" NAME "}" ;
+	//
+	// Notes:
+	//
+	// * `Annotation` is just a comment if it follows a `UNIT` and is
+	//    equivalent to `1` if it is used alone. For examples,
+	//    `{requests}/s == 1/s`, `By{transmitted}/s == By/s`.
+	// * `NAME` is a sequence of non-blank printable ASCII characters not
+	//    containing '{' or '}'.
+	Unit string `protobuf:"bytes,5,opt,name=unit" json:"unit,omitempty"`
+	// A detailed description of the metric, which can be used in documentation.
+	Description string `protobuf:"bytes,6,opt,name=description" json:"description,omitempty"`
+	// A concise name for the metric, which can be displayed in user interfaces.
+	// Use sentence case without an ending period, for example "Request count".
+	DisplayName string `protobuf:"bytes,7,opt,name=display_name,json=displayName" json:"display_name,omitempty"`
+}
+
+func (m *MetricDescriptor) Reset()                    { *m = MetricDescriptor{} }
+func (m *MetricDescriptor) String() string            { return proto.CompactTextString(m) }
+func (*MetricDescriptor) ProtoMessage()               {}
+func (*MetricDescriptor) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *MetricDescriptor) GetLabels() []*google_api1.LabelDescriptor {
+	if m != nil {
+		return m.Labels
+	}
+	return nil
+}
+
+// A specific metric identified by specifying values for all of the
+// labels of a [`MetricDescriptor`][google.api.MetricDescriptor].
+type Metric struct {
+	// An existing metric type, see [google.api.MetricDescriptor][google.api.MetricDescriptor].
+	// For example, `compute.googleapis.com/instance/cpu/usage_time`.
+	Type string `protobuf:"bytes,3,opt,name=type" json:"type,omitempty"`
+	// The set of labels that uniquely identify a metric. To specify a
+	// metric, all labels enumerated in the `MetricDescriptor` must be
+	// assigned values.
+	Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+}
+
+func (m *Metric) Reset()                    { *m = Metric{} }
+func (m *Metric) String() string            { return proto.CompactTextString(m) }
+func (*Metric) ProtoMessage()               {}
+func (*Metric) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *Metric) GetLabels() map[string]string {
+	if m != nil {
+		return m.Labels
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*MetricDescriptor)(nil), "google.api.MetricDescriptor")
+	proto.RegisterType((*Metric)(nil), "google.api.Metric")
+	proto.RegisterEnum("google.api.MetricDescriptor_MetricKind", MetricDescriptor_MetricKind_name, MetricDescriptor_MetricKind_value)
+	proto.RegisterEnum("google.api.MetricDescriptor_ValueType", MetricDescriptor_ValueType_name, MetricDescriptor_ValueType_value)
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/googleapis/api/metric/metric.proto", fileDescriptor0)
+}
+
+var fileDescriptor0 = []byte{
+	// 498 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x52, 0xdf, 0x6b, 0x9b, 0x50,
+	0x14, 0x9e, 0x89, 0x71, 0xcd, 0x49, 0x09, 0x72, 0x19, 0x9b, 0xa4, 0x30, 0xb2, 0x3c, 0x74, 0x7d,
+	0x4a, 0xa0, 0x1d, 0x65, 0x3f, 0xd8, 0x83, 0xc6, 0xbb, 0x4c, 0x6a, 0x54, 0xac, 0x06, 0xfa, 0x24,
+	0x36, 0x11, 0x91, 0x1a, 0x75, 0x6a, 0x0b, 0xf9, 0x2b, 0xf6, 0x17, 0xec, 0x65, 0x7f, 0xe9, 0xee,
+	0x0f, 0x9b, 0x48, 0x06, 0x63, 0x2f, 0xe6, 0xbb, 0xdf, 0x39, 0xe7, 0xbb, 0xdf, 0xb9, 0xf9, 0xe0,
+	0x6b, 0x9c, 0xe7, 0x71, 0x1a, 0x4d, 0xe3, 0x3c, 0x0d, 0xb3, 0x78, 0x9a, 0x97, 0xf1, 0x2c, 0x8e,
+	0xb2, 0xa2, 0xcc, 0xeb, 0x7c, 0xc6, 0x4b, 0x61, 0x91, 0x54, 0x33, 0xf2, 0x99, 0x6d, 0xa3, 0xba,
+	0x4c, 0xd6, 0xcd, 0xcf, 0x94, 0xb5, 0x20, 0x68, 0xc6, 0x49, 0x7d, 0xf4, 0xe5, 0xff, 0xa5, 0xd2,
+	0xf0, 0x3e, 0x4a, 0xf9, 0x97, 0x0b, 0x4d, 0x7e, 0x89, 0x20, 0x2f, 0x99, 0xb2, 0x1e, 0x55, 0xeb,
+	0x32, 0x29, 0xea, 0xbc, 0x44, 0x08, 0xc4, 0x2c, 0xdc, 0x46, 0x8a, 0x30, 0x16, 0x2e, 0xfa, 0x2e,
+	0xc3, 0x94, 0xab, 0x77, 0x45, 0xa4, 0x9c, 0x70, 0x8e, 0x62, 0x74, 0x05, 0x12, 0xd3, 0xaa, 0x94,
+	0xce, 0xb8, 0x7b, 0x31, 0xb8, 0x3c, 0x9b, 0x1e, 0x6c, 0x4d, 0x4d, 0x5a, 0x39, 0x88, 0xba, 0x4d,
+	0x2b, 0xfa, 0x0e, 0x03, 0xbe, 0x4a, 0xf0, 0x90, 0x64, 0x1b, 0xa5, 0x4b, 0xf4, 0x86, 0x97, 0xef,
+	0xdb, 0x93, 0xc7, 0x7e, 0x1a, 0xe2, 0x86, 0xb4, 0xbb, 0xb0, 0xdd, 0x63, 0x84, 0x01, 0x9e, 0xc2,
+	0xf4, 0x31, 0x0a, 0x98, 0x31, 0x91, 0x09, 0x9d, 0xff, 0x53, 0x68, 0x45, 0xdb, 0x3d, 0xd2, 0xed,
+	0xf6, 0x9f, 0x9e, 0x21, 0xdd, 0xec, 0x31, 0x4b, 0x6a, 0xa5, 0xc7, 0x37, 0xa3, 0x18, 0x8d, 0x61,
+	0xb0, 0x69, 0xc6, 0x92, 0x3c, 0x53, 0x24, 0x56, 0x6a, 0x53, 0xe8, 0x1d, 0x9c, 0x6e, 0x92, 0xaa,
+	0x48, 0xc3, 0x5d, 0xc0, 0xde, 0xea, 0x65, 0xd3, 0xc2, 0x39, 0x8b, 0x50, 0x13, 0x1b, 0xe0, 0xe0,
+	0x1c, 0x9d, 0xc1, 0x9b, 0x25, 0xf6, 0x5c, 0x63, 0x1e, 0xdc, 0x18, 0x96, 0x1e, 0xf8, 0xd6, 0xad,
+	0x83, 0xe7, 0xc6, 0x37, 0x03, 0xeb, 0xf2, 0x0b, 0xd4, 0x87, 0xde, 0x42, 0xf5, 0x17, 0x58, 0x16,
+	0x28, 0xd4, 0xb1, 0xe9, 0xa9, 0x72, 0x07, 0x0d, 0x01, 0xe6, 0xfe, 0xd2, 0x37, 0x55, 0xcf, 0x58,
+	0x61, 0xb9, 0x3b, 0xf9, 0x01, 0xfd, 0xfd, 0x06, 0x68, 0x04, 0xaf, 0x57, 0xaa, 0xe9, 0xe3, 0xc0,
+	0xbb, 0x73, 0xf0, 0x91, 0xdc, 0x09, 0x88, 0x9a, 0x6d, 0x9b, 0x5c, 0xcd, 0xb0, 0xbc, 0xeb, 0x0f,
+	0x44, 0x0d, 0x40, 0xd2, 0x6d, 0x5f, 0x33, 0x89, 0x12, 0xc5, 0xb7, 0xc4, 0x8b, 0xb5, 0x90, 0x45,
+	0x24, 0xc3, 0xa9, 0x6e, 0xd0, 0x93, 0xe6, 0x7b, 0x86, 0x6d, 0xc9, 0x3d, 0x3a, 0xb4, 0xb4, 0x2d,
+	0x7c, 0x27, 0x4b, 0x93, 0x9f, 0x02, 0x48, 0x7c, 0x89, 0x7d, 0x02, 0xba, 0xad, 0x04, 0x5c, 0x1f,
+	0x25, 0xe0, 0xed, 0xdf, 0xcf, 0xcf, 0x83, 0x50, 0xe1, 0xac, 0x2e, 0x77, 0xcf, 0x21, 0x18, 0x7d,
+	0x82, 0x41, 0x8b, 0x26, 0x16, 0xba, 0x0f, 0xd1, 0xae, 0xc9, 0x1b, 0x85, 0xe8, 0x15, 0xf4, 0xd8,
+	0x3f, 0x44, 0x74, 0x29, 0xc7, 0x0f, 0x9f, 0x3b, 0x1f, 0x05, 0xed, 0x1c, 0x86, 0xeb, 0x7c, 0xdb,
+	0xba, 0x47, 0x1b, 0xf0, 0x8b, 0x1c, 0x1a, 0x68, 0x47, 0xf8, 0xdd, 0x11, 0x17, 0xaa, 0x63, 0xdc,
+	0x4b, 0x2c, 0xe0, 0x57, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x9e, 0x86, 0xb0, 0x69, 0x6a, 0x03,
+	0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/metric/metric.proto b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.proto
new file mode 100644
index 0000000..e896705
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.proto
@@ -0,0 +1,193 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google.golang.org/genproto/googleapis/api/label/label.proto"; // from google/api/label.proto
+
+option java_multiple_files = true;
+option java_outer_classname = "MetricProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// Defines a metric type and its schema.
+message MetricDescriptor {
+  // The kind of measurement. It describes how the data is reported.
+  enum MetricKind {
+    // Do not use this default value.
+    METRIC_KIND_UNSPECIFIED = 0;
+
+    // An instantaneous measurement of a value.
+    GAUGE = 1;
+
+    // The change in a value during a time interval.
+    DELTA = 2;
+
+    // A value accumulated over a time interval.  Cumulative
+    // measurements in a time series should have the same start time
+    // and increasing end times, until an event resets the cumulative
+    // value to zero and sets a new start time for the following
+    // points.
+    CUMULATIVE = 3;
+  }
+
+  // The value type of a metric.
+  enum ValueType {
+    // Do not use this default value.
+    VALUE_TYPE_UNSPECIFIED = 0;
+
+    // The value is a boolean.
+    // This value type can be used only if the metric kind is `GAUGE`.
+    BOOL = 1;
+
+    // The value is a signed 64-bit integer.
+    INT64 = 2;
+
+    // The value is a double precision floating point number.
+    DOUBLE = 3;
+
+    // The value is a text string.
+    // This value type can be used only if the metric kind is `GAUGE`.
+    STRING = 4;
+
+    // The value is a [`Distribution`][google.api.Distribution].
+    DISTRIBUTION = 5;
+
+    // The value is money.
+    MONEY = 6;
+  }
+
+  // Resource name. The format of the name may vary between different
+  // implementations. For examples:
+  //
+  //     projects/{project_id}/metricDescriptors/{type=**}
+  //     metricDescriptors/{type=**}
+  string name = 1;
+
+  // The metric type including a DNS name prefix, for example
+  // `"compute.googleapis.com/instance/cpu/utilization"`. Metric types
+  // should use a natural hierarchical grouping such as the following:
+  //
+  //     compute.googleapis.com/instance/cpu/utilization
+  //     compute.googleapis.com/instance/disk/read_ops_count
+  //     compute.googleapis.com/instance/network/received_bytes_count
+  //
+  // Note that if the metric type changes, the monitoring data will be
+  // discontinued, and anything depends on it will break, such as monitoring
+  // dashboards, alerting rules and quota limits. Therefore, once a metric has
+  // been published, its type should be immutable.
+  string type = 8;
+
+  // The set of labels that can be used to describe a specific instance of this
+  // metric type. For example, the
+  // `compute.googleapis.com/instance/network/received_bytes_count` metric type
+  // has a label, `loadbalanced`, that specifies whether the traffic was
+  // received through a load balanced IP address.
+  repeated LabelDescriptor labels = 2;
+
+  // Whether the metric records instantaneous values, changes to a value, etc.
+  MetricKind metric_kind = 3;
+
+  // Whether the measurement is an integer, a floating-point number, etc.
+  ValueType value_type = 4;
+
+  // The unit in which the metric value is reported. It is only applicable
+  // if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The
+  // supported units are a subset of [The Unified Code for Units of
+  // Measure](http://unitsofmeasure.org/ucum.html) standard:
+  //
+  // **Basic units (UNIT)**
+  //
+  // * `bit`   bit
+  // * `By`    byte
+  // * `s`     second
+  // * `min`   minute
+  // * `h`     hour
+  // * `d`     day
+  //
+  // **Prefixes (PREFIX)**
+  //
+  // * `k`     kilo    (10**3)
+  // * `M`     mega    (10**6)
+  // * `G`     giga    (10**9)
+  // * `T`     tera    (10**12)
+  // * `P`     peta    (10**15)
+  // * `E`     exa     (10**18)
+  // * `Z`     zetta   (10**21)
+  // * `Y`     yotta   (10**24)
+  // * `m`     milli   (10**-3)
+  // * `u`     micro   (10**-6)
+  // * `n`     nano    (10**-9)
+  // * `p`     pico    (10**-12)
+  // * `f`     femto   (10**-15)
+  // * `a`     atto    (10**-18)
+  // * `z`     zepto   (10**-21)
+  // * `y`     yocto   (10**-24)
+  // * `Ki`    kibi    (2**10)
+  // * `Mi`    mebi    (2**20)
+  // * `Gi`    gibi    (2**30)
+  // * `Ti`    tebi    (2**40)
+  //
+  // **Grammar**
+  //
+  // The grammar includes the dimensionless unit `1`, such as `1/s`.
+  //
+  // The grammar also includes these connectors:
+  //
+  // * `/`    division (as an infix operator, e.g. `1/s`).
+  // * `.`    multiplication (as an infix operator, e.g. `GBy.d`)
+  //
+  // The grammar for a unit is as follows:
+  //
+  //     Expression = Component { "." Component } { "/" Component } ;
+  //
+  //     Component = [ PREFIX ] UNIT [ Annotation ]
+  //               | Annotation
+  //               | "1"
+  //               ;
+  //
+  //     Annotation = "{" NAME "}" ;
+  //
+  // Notes:
+  //
+  // * `Annotation` is just a comment if it follows a `UNIT` and is
+  //    equivalent to `1` if it is used alone. For examples,
+  //    `{requests}/s == 1/s`, `By{transmitted}/s == By/s`.
+  // * `NAME` is a sequence of non-blank printable ASCII characters not
+  //    containing '{' or '}'.
+  string unit = 5;
+
+  // A detailed description of the metric, which can be used in documentation.
+  string description = 6;
+
+  // A concise name for the metric, which can be displayed in user interfaces.
+  // Use sentence case without an ending period, for example "Request count".
+  string display_name = 7;
+}
+
+// A specific metric identified by specifying values for all of the
+// labels of a [`MetricDescriptor`][google.api.MetricDescriptor].
+message Metric {
+  // An existing metric type, see [google.api.MetricDescriptor][google.api.MetricDescriptor].
+  // For example, `compute.googleapis.com/instance/cpu/usage_time`.
+  string type = 3;
+
+  // The set of labels that uniquely identify a metric. To specify a
+  // metric, all labels enumerated in the `MetricDescriptor` must be
+  // assigned values.
+  map<string, string> labels = 2;
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go
new file mode 100644
index 0000000..13a7873
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go
@@ -0,0 +1,148 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.proto
+// DO NOT EDIT!
+
+/*
+Package monitoredres is a generated protocol buffer package.
+
+It is generated from these files:
+	google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.proto
+
+It has these top-level messages:
+	MonitoredResourceDescriptor
+	MonitoredResource
+*/
+package monitoredres
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import google_api "google.golang.org/genproto/googleapis/api/label"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// An object that describes the schema of a [MonitoredResource][google.api.MonitoredResource] object using a
+// type name and a set of labels.  For example, the monitored resource
+// descriptor for Google Compute Engine VM instances has a type of
+// `"gce_instance"` and specifies the use of the labels `"instance_id"` and
+// `"zone"` to identify particular VM instances.
+//
+// Different APIs can support different monitored resource types. APIs generally
+// provide a `list` method that returns the monitored resource descriptors used
+// by the API.
+type MonitoredResourceDescriptor struct {
+	// Optional. The resource name of the monitored resource descriptor:
+	// `"projects/{project_id}/monitoredResourceDescriptors/{type}"` where
+	// {type} is the value of the `type` field in this object and
+	// {project_id} is a project ID that provides API-specific context for
+	// accessing the type.  APIs that do not use project information can use the
+	// resource name format `"monitoredResourceDescriptors/{type}"`.
+	Name string `protobuf:"bytes,5,opt,name=name" json:"name,omitempty"`
+	// Required. The monitored resource type. For example, the type
+	// `"cloudsql_database"` represents databases in Google Cloud SQL.
+	// The maximum length of this value is 256 characters.
+	Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
+	// Optional. A concise name for the monitored resource type that might be
+	// displayed in user interfaces. It should be a Title Cased Noun Phrase,
+	// without any article or other determiners. For example,
+	// `"Google Cloud SQL Database"`.
+	DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName" json:"display_name,omitempty"`
+	// Optional. A detailed description of the monitored resource type that might
+	// be used in documentation.
+	Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
+	// Required. A set of labels used to describe instances of this monitored
+	// resource type. For example, an individual Google Cloud SQL database is
+	// identified by values for the labels `"database_id"` and `"zone"`.
+	Labels []*google_api.LabelDescriptor `protobuf:"bytes,4,rep,name=labels" json:"labels,omitempty"`
+}
+
+func (m *MonitoredResourceDescriptor) Reset()                    { *m = MonitoredResourceDescriptor{} }
+func (m *MonitoredResourceDescriptor) String() string            { return proto.CompactTextString(m) }
+func (*MonitoredResourceDescriptor) ProtoMessage()               {}
+func (*MonitoredResourceDescriptor) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *MonitoredResourceDescriptor) GetLabels() []*google_api.LabelDescriptor {
+	if m != nil {
+		return m.Labels
+	}
+	return nil
+}
+
+// An object representing a resource that can be used for monitoring, logging,
+// billing, or other purposes. Examples include virtual machine instances,
+// databases, and storage devices such as disks. The `type` field identifies a
+// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object that describes the resource's
+// schema. Information in the `labels` field identifies the actual resource and
+// its attributes according to the schema. For example, a particular Compute
+// Engine VM instance could be represented by the following object, because the
+// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] for `"gce_instance"` has labels
+// `"instance_id"` and `"zone"`:
+//
+//     { "type": "gce_instance",
+//       "labels": { "instance_id": "12345678901234",
+//                   "zone": "us-central1-a" }}
+type MonitoredResource struct {
+	// Required. The monitored resource type. This field must match
+	// the `type` field of a [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object. For
+	// example, the type of a Cloud SQL database is `"cloudsql_database"`.
+	Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
+	// Required. Values for all of the labels listed in the associated monitored
+	// resource descriptor. For example, Cloud SQL databases use the labels
+	// `"database_id"` and `"zone"`.
+	Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+}
+
+func (m *MonitoredResource) Reset()                    { *m = MonitoredResource{} }
+func (m *MonitoredResource) String() string            { return proto.CompactTextString(m) }
+func (*MonitoredResource) ProtoMessage()               {}
+func (*MonitoredResource) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *MonitoredResource) GetLabels() map[string]string {
+	if m != nil {
+		return m.Labels
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*MonitoredResourceDescriptor)(nil), "google.api.MonitoredResourceDescriptor")
+	proto.RegisterType((*MonitoredResource)(nil), "google.api.MonitoredResource")
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.proto", fileDescriptor0)
+}
+
+var fileDescriptor0 = []byte{
+	// 324 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xf2, 0x4f, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x4b, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0xcb, 0x2f, 0x4a, 0xd7, 0x4f, 0x4f,
+	0xcd, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x87, 0x48, 0x25, 0x16, 0x64, 0x16, 0xeb, 0x03, 0x09,
+	0xfd, 0xdc, 0xfc, 0xbc, 0xcc, 0x92, 0xfc, 0xa2, 0xd4, 0x94, 0xa2, 0xd4, 0x62, 0x04, 0x27, 0x1e,
+	0xc8, 0xcb, 0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x03, 0x6b, 0x12, 0xe2, 0x82, 0x1a, 0x08, 0xd4, 0x21,
+	0x65, 0x4d, 0xbc, 0xe1, 0x39, 0x89, 0x49, 0xa9, 0x39, 0x10, 0x12, 0x62, 0x90, 0xd2, 0x7e, 0x46,
+	0x2e, 0x69, 0x5f, 0x98, 0x2d, 0x41, 0x50, 0x4b, 0x5c, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x80,
+	0x62, 0x42, 0x42, 0x5c, 0x2c, 0x79, 0x89, 0xb9, 0xa9, 0x12, 0xac, 0x0a, 0x8c, 0x1a, 0x9c, 0x41,
+	0x60, 0x36, 0x48, 0xac, 0xa4, 0xb2, 0x20, 0x55, 0x82, 0x11, 0x22, 0x06, 0x62, 0x0b, 0x29, 0x72,
+	0xf1, 0xa4, 0x64, 0x16, 0x17, 0xe4, 0x24, 0x56, 0xc6, 0x83, 0xd5, 0x33, 0x81, 0xe5, 0xb8, 0xa1,
+	0x62, 0x7e, 0x20, 0x6d, 0x0a, 0x5c, 0xdc, 0x29, 0x50, 0x83, 0x33, 0xf3, 0xf3, 0x24, 0x98, 0xa1,
+	0x2a, 0x10, 0x42, 0x42, 0xc6, 0x5c, 0x6c, 0x60, 0xb7, 0x15, 0x4b, 0xb0, 0x28, 0x30, 0x6b, 0x70,
+	0x1b, 0x49, 0xeb, 0x21, 0xbc, 0xa9, 0xe7, 0x03, 0x92, 0x41, 0xb8, 0x2c, 0x08, 0xaa, 0x54, 0x69,
+	0x29, 0x23, 0x97, 0x20, 0x86, 0x0f, 0xb0, 0xba, 0xd1, 0x11, 0x6e, 0x3c, 0x13, 0xd8, 0x78, 0x4d,
+	0x64, 0xe3, 0x31, 0x8c, 0x80, 0x58, 0x58, 0xec, 0x9a, 0x57, 0x52, 0x54, 0x09, 0xb3, 0x4c, 0xca,
+	0x92, 0x8b, 0x1b, 0x49, 0x58, 0x48, 0x80, 0x8b, 0x39, 0x3b, 0xb5, 0x12, 0x6a, 0x09, 0x88, 0x29,
+	0x24, 0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x0a, 0x0b, 0x00, 0x08, 0xc7, 0x8a, 0xc9, 0x82, 0xd1,
+	0x29, 0x87, 0x8b, 0x2f, 0x39, 0x3f, 0x17, 0xc9, 0x4a, 0x27, 0x31, 0x0c, 0x3b, 0x03, 0x40, 0x71,
+	0x12, 0xc0, 0x18, 0x65, 0x46, 0x5e, 0x7a, 0xf9, 0xc1, 0xc8, 0xb8, 0x88, 0x89, 0xc5, 0xdd, 0x31,
+	0xc0, 0x33, 0x89, 0x0d, 0xac, 0xd8, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x6b, 0x87, 0xa2, 0x37,
+	0x7a, 0x02, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.proto b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.proto
new file mode 100644
index 0000000..e01b621
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.proto
@@ -0,0 +1,91 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google.golang.org/genproto/googleapis/api/label/label.proto"; // from google/api/label.proto
+
+option cc_enable_arenas = true;
+option java_multiple_files = true;
+option java_outer_classname = "MonitoredResourceProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+option go_package = "google.golang.org/genproto/googleapis/api/monitoredres";
+
+// An object that describes the schema of a [MonitoredResource][google.api.MonitoredResource] object using a
+// type name and a set of labels.  For example, the monitored resource
+// descriptor for Google Compute Engine VM instances has a type of
+// `"gce_instance"` and specifies the use of the labels `"instance_id"` and
+// `"zone"` to identify particular VM instances.
+//
+// Different APIs can support different monitored resource types. APIs generally
+// provide a `list` method that returns the monitored resource descriptors used
+// by the API.
+message MonitoredResourceDescriptor {
+  // Optional. The resource name of the monitored resource descriptor:
+  // `"projects/{project_id}/monitoredResourceDescriptors/{type}"` where
+  // {type} is the value of the `type` field in this object and
+  // {project_id} is a project ID that provides API-specific context for
+  // accessing the type.  APIs that do not use project information can use the
+  // resource name format `"monitoredResourceDescriptors/{type}"`.
+  string name = 5;
+
+  // Required. The monitored resource type. For example, the type
+  // `"cloudsql_database"` represents databases in Google Cloud SQL.
+  // The maximum length of this value is 256 characters.
+  string type = 1;
+
+  // Optional. A concise name for the monitored resource type that might be
+  // displayed in user interfaces. It should be a Title Cased Noun Phrase,
+  // without any article or other determiners. For example,
+  // `"Google Cloud SQL Database"`.
+  string display_name = 2;
+
+  // Optional. A detailed description of the monitored resource type that might
+  // be used in documentation.
+  string description = 3;
+
+  // Required. A set of labels used to describe instances of this monitored
+  // resource type. For example, an individual Google Cloud SQL database is
+  // identified by values for the labels `"database_id"` and `"zone"`.
+  repeated LabelDescriptor labels = 4;
+}
+
+// An object representing a resource that can be used for monitoring, logging,
+// billing, or other purposes. Examples include virtual machine instances,
+// databases, and storage devices such as disks. The `type` field identifies a
+// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object that describes the resource's
+// schema. Information in the `labels` field identifies the actual resource and
+// its attributes according to the schema. For example, a particular Compute
+// Engine VM instance could be represented by the following object, because the
+// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] for `"gce_instance"` has labels
+// `"instance_id"` and `"zone"`:
+//
+//     { "type": "gce_instance",
+//       "labels": { "instance_id": "12345678901234",
+//                   "zone": "us-central1-a" }}
+message MonitoredResource {
+  // Required. The monitored resource type. This field must match
+  // the `type` field of a [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object. For
+  // example, the type of a Cloud SQL database is `"cloudsql_database"`.
+  string type = 1;
+
+  // Required. Values for all of the labels listed in the associated monitored
+  // resource descriptor. For example, Cloud SQL databases use the labels
+  // `"database_id"` and `"zone"`.
+  map<string, string> labels = 2;
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/annotations.pb.go
new file mode 100644
index 0000000..e5d4bf7
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/annotations.pb.go
@@ -0,0 +1,108 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/googleapis/api/serviceconfig/annotations.proto
+// DO NOT EDIT!
+
+/*
+Package google_api is a generated protocol buffer package.
+
+It is generated from these files:
+	google.golang.org/genproto/googleapis/api/serviceconfig/annotations.proto
+	google.golang.org/genproto/googleapis/api/serviceconfig/auth.proto
+	google.golang.org/genproto/googleapis/api/serviceconfig/backend.proto
+	google.golang.org/genproto/googleapis/api/serviceconfig/billing.proto
+	google.golang.org/genproto/googleapis/api/serviceconfig/consumer.proto
+	google.golang.org/genproto/googleapis/api/serviceconfig/context.proto
+	google.golang.org/genproto/googleapis/api/serviceconfig/control.proto
+	google.golang.org/genproto/googleapis/api/serviceconfig/documentation.proto
+	google.golang.org/genproto/googleapis/api/serviceconfig/endpoint.proto
+	google.golang.org/genproto/googleapis/api/serviceconfig/http.proto
+	google.golang.org/genproto/googleapis/api/serviceconfig/log.proto
+	google.golang.org/genproto/googleapis/api/serviceconfig/logging.proto
+	google.golang.org/genproto/googleapis/api/serviceconfig/monitoring.proto
+	google.golang.org/genproto/googleapis/api/serviceconfig/service.proto
+	google.golang.org/genproto/googleapis/api/serviceconfig/system_parameter.proto
+	google.golang.org/genproto/googleapis/api/serviceconfig/usage.proto
+
+It has these top-level messages:
+	Authentication
+	AuthenticationRule
+	AuthProvider
+	OAuthRequirements
+	AuthRequirement
+	Backend
+	BackendRule
+	Billing
+	BillingStatusRule
+	ProjectProperties
+	Property
+	Context
+	ContextRule
+	Control
+	Documentation
+	DocumentationRule
+	Page
+	Endpoint
+	Http
+	HttpRule
+	CustomHttpPattern
+	LogDescriptor
+	Logging
+	Monitoring
+	Service
+	SystemParameters
+	SystemParameterRule
+	SystemParameter
+	Usage
+	UsageRule
+*/
+package google_api
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import google_protobuf "google.golang.org/genproto/protobuf"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+var E_Http = &proto.ExtensionDesc{
+	ExtendedType:  (*google_protobuf.MethodOptions)(nil),
+	ExtensionType: (*HttpRule)(nil),
+	Field:         72295728,
+	Name:          "google.api.http",
+	Tag:           "bytes,72295728,opt,name=http",
+}
+
+func init() {
+	proto.RegisterExtension(E_Http)
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/googleapis/api/serviceconfig/annotations.proto", fileDescriptor0)
+}
+
+var fileDescriptor0 = []byte{
+	// 211 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xf2, 0x4c, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x4b, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0xcb, 0x2f, 0x4a, 0xd7, 0x4f, 0x4f,
+	0xcd, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x87, 0x48, 0x25, 0x16, 0x64, 0x16, 0xeb, 0x03, 0x09,
+	0xfd, 0xe2, 0xd4, 0xa2, 0xb2, 0xcc, 0xe4, 0xd4, 0xe4, 0xfc, 0xbc, 0xb4, 0xcc, 0x74, 0xfd, 0xc4,
+	0xbc, 0xbc, 0xfc, 0x92, 0xc4, 0x92, 0xcc, 0xfc, 0xbc, 0x62, 0x3d, 0xb0, 0x72, 0x21, 0x2e, 0xa8,
+	0x51, 0x40, 0xb5, 0x52, 0x4e, 0xe4, 0x1a, 0x9b, 0x51, 0x52, 0x52, 0x00, 0x31, 0x4f, 0xca, 0x04,
+	0x8f, 0x19, 0x60, 0x32, 0xa9, 0x34, 0x4d, 0x3f, 0x25, 0xb5, 0x38, 0xb9, 0x28, 0xb3, 0xa0, 0x24,
+	0xbf, 0x08, 0xa2, 0xcb, 0xca, 0x9b, 0x8b, 0x05, 0x64, 0x86, 0x90, 0x9c, 0x1e, 0x54, 0x3b, 0x4c,
+	0xa9, 0x9e, 0x6f, 0x6a, 0x49, 0x46, 0x7e, 0x8a, 0x7f, 0x01, 0xd8, 0xcd, 0x12, 0x1b, 0x4e, 0xed,
+	0x51, 0x52, 0x60, 0xd4, 0xe0, 0x36, 0x12, 0xd1, 0x43, 0xb8, 0x5b, 0xcf, 0x03, 0xa8, 0x35, 0xa8,
+	0x34, 0x27, 0x35, 0x08, 0x6c, 0x88, 0x93, 0x36, 0x17, 0x5f, 0x72, 0x7e, 0x2e, 0x92, 0x02, 0x27,
+	0x01, 0x47, 0x84, 0xbf, 0x03, 0x40, 0x26, 0x07, 0x30, 0x2e, 0x62, 0x62, 0x71, 0x77, 0x0c, 0xf0,
+	0x4c, 0x62, 0x03, 0xdb, 0x64, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x01, 0xd8, 0x8e, 0xc1, 0x53,
+	0x01, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/annotations.proto b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/annotations.proto
new file mode 100644
index 0000000..306bc40
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/annotations.proto
@@ -0,0 +1,30 @@
+// Copyright (c) 2015, Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google.golang.org/genproto/googleapis/api/serviceconfig/http.proto"; // from google/api/http.proto
+import "google.golang.org/genproto/protobuf/descriptor.proto"; // from google/protobuf/descriptor.proto
+
+option java_multiple_files = true;
+option java_outer_classname = "AnnotationsProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+extend google.protobuf.MethodOptions {
+  // See `HttpRule`.
+  HttpRule http = 72295728;
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/auth.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/auth.pb.go
new file mode 100644
index 0000000..8f3670a
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/auth.pb.go
@@ -0,0 +1,242 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/googleapis/api/serviceconfig/auth.proto
+// DO NOT EDIT!
+
+package google_api
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// `Authentication` defines the authentication configuration for an API.
+//
+// Example for an API targeted for external use:
+//
+//     name: calendar.googleapis.com
+//     authentication:
+//       rules:
+//       - selector: "*"
+//         oauth:
+//           canonical_scopes: https://www.googleapis.com/auth/calendar
+//
+//       - selector: google.calendar.Delegate
+//         oauth:
+//           canonical_scopes: https://www.googleapis.com/auth/calendar.read
+type Authentication struct {
+	// A list of authentication rules that apply to individual API methods.
+	//
+	// **NOTE:** All service configuration rules follow "last one wins" order.
+	Rules []*AuthenticationRule `protobuf:"bytes,3,rep,name=rules" json:"rules,omitempty"`
+	// Defines a set of authentication providers that a service supports.
+	Providers []*AuthProvider `protobuf:"bytes,4,rep,name=providers" json:"providers,omitempty"`
+}
+
+func (m *Authentication) Reset()                    { *m = Authentication{} }
+func (m *Authentication) String() string            { return proto.CompactTextString(m) }
+func (*Authentication) ProtoMessage()               {}
+func (*Authentication) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
+
+func (m *Authentication) GetRules() []*AuthenticationRule {
+	if m != nil {
+		return m.Rules
+	}
+	return nil
+}
+
+func (m *Authentication) GetProviders() []*AuthProvider {
+	if m != nil {
+		return m.Providers
+	}
+	return nil
+}
+
+// Authentication rules for the service.
+//
+// By default, if a method has any authentication requirements, every request
+// must include a valid credential matching one of the requirements.
+// It's an error to include more than one kind of credential in a single
+// request.
+//
+// If a method doesn't have any auth requirements, request credentials will be
+// ignored.
+type AuthenticationRule struct {
+	// Selects the methods to which this rule applies.
+	//
+	// Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
+	Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"`
+	// The requirements for OAuth credentials.
+	Oauth *OAuthRequirements `protobuf:"bytes,2,opt,name=oauth" json:"oauth,omitempty"`
+	// Whether to allow requests without a credential. The credential can be
+	// an OAuth token, Google cookies (first-party auth) or EndUserCreds.
+	//
+	// For requests without credentials, if the service control environment is
+	// specified, each incoming request **must** be associated with a service
+	// consumer. This can be done by passing an API key that belongs to a consumer
+	// project.
+	AllowWithoutCredential bool `protobuf:"varint,5,opt,name=allow_without_credential,json=allowWithoutCredential" json:"allow_without_credential,omitempty"`
+	// Requirements for additional authentication providers.
+	Requirements []*AuthRequirement `protobuf:"bytes,7,rep,name=requirements" json:"requirements,omitempty"`
+}
+
+func (m *AuthenticationRule) Reset()                    { *m = AuthenticationRule{} }
+func (m *AuthenticationRule) String() string            { return proto.CompactTextString(m) }
+func (*AuthenticationRule) ProtoMessage()               {}
+func (*AuthenticationRule) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} }
+
+func (m *AuthenticationRule) GetOauth() *OAuthRequirements {
+	if m != nil {
+		return m.Oauth
+	}
+	return nil
+}
+
+func (m *AuthenticationRule) GetRequirements() []*AuthRequirement {
+	if m != nil {
+		return m.Requirements
+	}
+	return nil
+}
+
+// Configuration for an anthentication provider, including support for
+// [JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).
+type AuthProvider struct {
+	// The unique identifier of the auth provider. It will be referred to by
+	// `AuthRequirement.provider_id`.
+	//
+	// Example: "bookstore_auth".
+	Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
+	// Identifies the principal that issued the JWT. See
+	// https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.1
+	// Usually a URL or an email address.
+	//
+	// Example: https://securetoken.google.com
+	// Example: 1234567-compute@developer.gserviceaccount.com
+	Issuer string `protobuf:"bytes,2,opt,name=issuer" json:"issuer,omitempty"`
+	// URL of the provider's public key set to validate signature of the JWT. See
+	// [OpenID Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata).
+	// Optional if the key set document:
+	//  - can be retrieved from
+	//    [OpenID Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html
+	//    of the issuer.
+	//  - can be inferred from the email domain of the issuer (e.g. a Google service account).
+	//
+	// Example: https://www.googleapis.com/oauth2/v1/certs
+	JwksUri string `protobuf:"bytes,3,opt,name=jwks_uri,json=jwksUri" json:"jwks_uri,omitempty"`
+}
+
+func (m *AuthProvider) Reset()                    { *m = AuthProvider{} }
+func (m *AuthProvider) String() string            { return proto.CompactTextString(m) }
+func (*AuthProvider) ProtoMessage()               {}
+func (*AuthProvider) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} }
+
+// OAuth scopes are a way to define data and permissions on data. For example,
+// there are scopes defined for "Read-only access to Google Calendar" and
+// "Access to Cloud Platform". Users can consent to a scope for an application,
+// giving it permission to access that data on their behalf.
+//
+// OAuth scope specifications should be fairly coarse grained; a user will need
+// to see and understand the text description of what your scope means.
+//
+// In most cases: use one or at most two OAuth scopes for an entire family of
+// products. If your product has multiple APIs, you should probably be sharing
+// the OAuth scope across all of those APIs.
+//
+// When you need finer grained OAuth consent screens: talk with your product
+// management about how developers will use them in practice.
+//
+// Please note that even though each of the canonical scopes is enough for a
+// request to be accepted and passed to the backend, a request can still fail
+// due to the backend requiring additional scopes or permissions.
+type OAuthRequirements struct {
+	// The list of publicly documented OAuth scopes that are allowed access. An
+	// OAuth token containing any of these scopes will be accepted.
+	//
+	// Example:
+	//
+	//      canonical_scopes: https://www.googleapis.com/auth/calendar,
+	//                        https://www.googleapis.com/auth/calendar.read
+	CanonicalScopes string `protobuf:"bytes,1,opt,name=canonical_scopes,json=canonicalScopes" json:"canonical_scopes,omitempty"`
+}
+
+func (m *OAuthRequirements) Reset()                    { *m = OAuthRequirements{} }
+func (m *OAuthRequirements) String() string            { return proto.CompactTextString(m) }
+func (*OAuthRequirements) ProtoMessage()               {}
+func (*OAuthRequirements) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} }
+
+// User-defined authentication requirements, including support for
+// [JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).
+type AuthRequirement struct {
+	// [id][google.api.AuthProvider.id] from authentication provider.
+	//
+	// Example:
+	//
+	//     provider_id: bookstore_auth
+	ProviderId string `protobuf:"bytes,1,opt,name=provider_id,json=providerId" json:"provider_id,omitempty"`
+	// The list of JWT
+	// [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).
+	// that are allowed to access. A JWT containing any of these audiences will
+	// be accepted. When this setting is absent, only JWTs with audience
+	// "https://[Service_name][google.api.Service.name]/[API_name][google.protobuf.Api.name]"
+	// will be accepted. For example, if no audiences are in the setting,
+	// LibraryService API will only accept JWTs with the following audience
+	// "https://library-example.googleapis.com/google.example.library.v1.LibraryService".
+	//
+	// Example:
+	//
+	//     audiences: bookstore_android.apps.googleusercontent.com,
+	//                bookstore_web.apps.googleusercontent.com
+	Audiences string `protobuf:"bytes,2,opt,name=audiences" json:"audiences,omitempty"`
+}
+
+func (m *AuthRequirement) Reset()                    { *m = AuthRequirement{} }
+func (m *AuthRequirement) String() string            { return proto.CompactTextString(m) }
+func (*AuthRequirement) ProtoMessage()               {}
+func (*AuthRequirement) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} }
+
+func init() {
+	proto.RegisterType((*Authentication)(nil), "google.api.Authentication")
+	proto.RegisterType((*AuthenticationRule)(nil), "google.api.AuthenticationRule")
+	proto.RegisterType((*AuthProvider)(nil), "google.api.AuthProvider")
+	proto.RegisterType((*OAuthRequirements)(nil), "google.api.OAuthRequirements")
+	proto.RegisterType((*AuthRequirement)(nil), "google.api.AuthRequirement")
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/googleapis/api/serviceconfig/auth.proto", fileDescriptor1)
+}
+
+var fileDescriptor1 = []byte{
+	// 425 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x52, 0x4d, 0x6f, 0x13, 0x31,
+	0x10, 0x55, 0xd2, 0xa6, 0xcd, 0x4e, 0xaa, 0x14, 0x7c, 0xa8, 0x4c, 0xf9, 0xaa, 0x56, 0x1c, 0xca,
+	0x65, 0x57, 0x6a, 0x11, 0xe2, 0x04, 0x6a, 0x38, 0xa0, 0x9c, 0x08, 0x46, 0x88, 0xe3, 0xca, 0x78,
+	0xcd, 0xd6, 0xe0, 0x7a, 0x82, 0xed, 0x6d, 0x6e, 0xfc, 0x18, 0x7e, 0x19, 0x3f, 0x05, 0xaf, 0x77,
+	0x9b, 0x6c, 0x93, 0x1b, 0x97, 0x28, 0x33, 0xef, 0xcd, 0x7b, 0x7e, 0x33, 0x0b, 0xb3, 0x0a, 0xb1,
+	0xd2, 0x32, 0xab, 0x50, 0x73, 0x53, 0x65, 0x68, 0xab, 0xbc, 0x92, 0x66, 0x69, 0xd1, 0x63, 0xde,
+	0x42, 0x7c, 0xa9, 0x5c, 0x1e, 0x7e, 0x72, 0x27, 0xed, 0xad, 0x12, 0x52, 0xa0, 0xf9, 0xae, 0xaa,
+	0x9c, 0xd7, 0xfe, 0x3a, 0x8b, 0x3c, 0x02, 0x9d, 0x46, 0x20, 0x9d, 0xce, 0xff, 0x5b, 0xcf, 0x18,
+	0xf4, 0xdc, 0x2b, 0x34, 0xae, 0x95, 0x4d, 0x7f, 0xc3, 0xf4, 0x2a, 0x98, 0x48, 0xe3, 0x95, 0x88,
+	0x00, 0x79, 0x05, 0x23, 0x5b, 0x6b, 0xe9, 0xe8, 0xde, 0xd9, 0xde, 0xf9, 0xe4, 0xe2, 0x59, 0xb6,
+	0x31, 0xce, 0xee, 0x53, 0x59, 0xa0, 0xb1, 0x96, 0x4c, 0x5e, 0x43, 0x12, 0x04, 0x6f, 0x55, 0x29,
+	0xad, 0xa3, 0xfb, 0x71, 0x92, 0x6e, 0x4f, 0x2e, 0x3a, 0x02, 0xdb, 0x50, 0xd3, 0xbf, 0x03, 0x20,
+	0xbb, 0xaa, 0xe4, 0x14, 0xc6, 0x4e, 0x6a, 0x29, 0x3c, 0x5a, 0x3a, 0x38, 0x1b, 0x9c, 0x27, 0x6c,
+	0x5d, 0x93, 0x4b, 0x18, 0x61, 0xb3, 0x18, 0x3a, 0x0c, 0xc0, 0xe4, 0xe2, 0x69, 0xdf, 0xe6, 0x63,
+	0xa3, 0xc5, 0xe4, 0xaf, 0x5a, 0x59, 0x79, 0x13, 0x34, 0x1d, 0x6b, 0xb9, 0xe4, 0x0d, 0x50, 0xae,
+	0x35, 0xae, 0x8a, 0x95, 0xf2, 0xd7, 0x58, 0xfb, 0x42, 0x58, 0x59, 0x36, 0xa6, 0x5c, 0xd3, 0x51,
+	0xd0, 0x19, 0xb3, 0x93, 0x88, 0x7f, 0x6d, 0xe1, 0xf7, 0x6b, 0x94, 0xbc, 0x83, 0x23, 0xdb, 0x13,
+	0xa4, 0x87, 0x31, 0xdc, 0xe3, 0xed, 0x70, 0x3d, 0x53, 0x76, 0x6f, 0x20, 0xfd, 0x04, 0x47, 0xfd,
+	0xf4, 0x64, 0x0a, 0x43, 0x55, 0x76, 0xa9, 0xc2, 0x3f, 0x72, 0x02, 0x07, 0xca, 0xb9, 0x5a, 0xda,
+	0x18, 0x28, 0x61, 0x5d, 0x45, 0x1e, 0xc1, 0xf8, 0xc7, 0xea, 0xa7, 0x2b, 0x6a, 0xab, 0xc2, 0x2d,
+	0x1a, 0xe4, 0xb0, 0xa9, 0xbf, 0x58, 0x95, 0xbe, 0x85, 0x87, 0x3b, 0x49, 0xc9, 0x4b, 0x78, 0x20,
+	0xb8, 0x41, 0x13, 0xf6, 0xa8, 0x0b, 0x27, 0x70, 0x19, 0x6e, 0xd8, 0xba, 0x1c, 0xaf, 0xfb, 0x9f,
+	0x63, 0x3b, 0x5d, 0xc0, 0xf1, 0xd6, 0x38, 0x79, 0x0e, 0x93, 0xbb, 0xab, 0x14, 0xeb, 0xe7, 0xc1,
+	0x5d, 0x6b, 0x5e, 0x92, 0x27, 0x90, 0xf0, 0xba, 0x54, 0xd2, 0x88, 0xa0, 0xdb, 0xbe, 0x74, 0xd3,
+	0x98, 0xbd, 0x80, 0xa9, 0xc0, 0x9b, 0xde, 0x52, 0x66, 0x49, 0x17, 0xda, 0xe3, 0x62, 0xf0, 0x67,
+	0xb8, 0xff, 0xe1, 0x6a, 0x31, 0xff, 0x76, 0x10, 0x3f, 0xba, 0xcb, 0x7f, 0x01, 0x00, 0x00, 0xff,
+	0xff, 0x0d, 0x41, 0xfd, 0x7a, 0x11, 0x03, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/auth.proto b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/auth.proto
new file mode 100644
index 0000000..8193b36
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/auth.proto
@@ -0,0 +1,164 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google.golang.org/genproto/googleapis/api/serviceconfig/annotations.proto"; // from google/api/annotations.proto
+
+option java_multiple_files = true;
+option java_outer_classname = "AuthProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// `Authentication` defines the authentication configuration for an API.
+//
+// Example for an API targeted for external use:
+//
+//     name: calendar.googleapis.com
+//     authentication:
+//       rules:
+//       - selector: "*"
+//         oauth:
+//           canonical_scopes: https://www.googleapis.com/auth/calendar
+//
+//       - selector: google.calendar.Delegate
+//         oauth:
+//           canonical_scopes: https://www.googleapis.com/auth/calendar.read
+message Authentication {
+  // A list of authentication rules that apply to individual API methods.
+  //
+  // **NOTE:** All service configuration rules follow "last one wins" order.
+  repeated AuthenticationRule rules = 3;
+
+  // Defines a set of authentication providers that a service supports.
+  repeated AuthProvider providers = 4;
+}
+
+// Authentication rules for the service.
+//
+// By default, if a method has any authentication requirements, every request
+// must include a valid credential matching one of the requirements.
+// It's an error to include more than one kind of credential in a single
+// request.
+//
+// If a method doesn't have any auth requirements, request credentials will be
+// ignored.
+message AuthenticationRule {
+  // Selects the methods to which this rule applies.
+  //
+  // Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
+  string selector = 1;
+
+  // The requirements for OAuth credentials.
+  OAuthRequirements oauth = 2;
+
+  // Whether to allow requests without a credential. The credential can be
+  // an OAuth token, Google cookies (first-party auth) or EndUserCreds.
+  //
+  // For requests without credentials, if the service control environment is
+  // specified, each incoming request **must** be associated with a service
+  // consumer. This can be done by passing an API key that belongs to a consumer
+  // project.
+  bool allow_without_credential = 5;
+
+  // Requirements for additional authentication providers.
+  repeated AuthRequirement requirements = 7;
+}
+
+// Configuration for an anthentication provider, including support for
+// [JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).
+message AuthProvider {
+  // The unique identifier of the auth provider. It will be referred to by
+  // `AuthRequirement.provider_id`.
+  //
+  // Example: "bookstore_auth".
+  string id = 1;
+
+  // Identifies the principal that issued the JWT. See
+  // https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.1
+  // Usually a URL or an email address.
+  //
+  // Example: https://securetoken.google.com
+  // Example: 1234567-compute@developer.gserviceaccount.com
+  string issuer = 2;
+
+  // URL of the provider's public key set to validate signature of the JWT. See
+  // [OpenID Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata).
+  // Optional if the key set document:
+  //  - can be retrieved from
+  //    [OpenID Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html
+  //    of the issuer.
+  //  - can be inferred from the email domain of the issuer (e.g. a Google service account).
+  //
+  // Example: https://www.googleapis.com/oauth2/v1/certs
+  string jwks_uri = 3;
+}
+
+// OAuth scopes are a way to define data and permissions on data. For example,
+// there are scopes defined for "Read-only access to Google Calendar" and
+// "Access to Cloud Platform". Users can consent to a scope for an application,
+// giving it permission to access that data on their behalf.
+//
+// OAuth scope specifications should be fairly coarse grained; a user will need
+// to see and understand the text description of what your scope means.
+//
+// In most cases: use one or at most two OAuth scopes for an entire family of
+// products. If your product has multiple APIs, you should probably be sharing
+// the OAuth scope across all of those APIs.
+//
+// When you need finer grained OAuth consent screens: talk with your product
+// management about how developers will use them in practice.
+//
+// Please note that even though each of the canonical scopes is enough for a
+// request to be accepted and passed to the backend, a request can still fail
+// due to the backend requiring additional scopes or permissions.
+message OAuthRequirements {
+  // The list of publicly documented OAuth scopes that are allowed access. An
+  // OAuth token containing any of these scopes will be accepted.
+  //
+  // Example:
+  //
+  //      canonical_scopes: https://www.googleapis.com/auth/calendar,
+  //                        https://www.googleapis.com/auth/calendar.read
+  string canonical_scopes = 1;
+}
+
+// User-defined authentication requirements, including support for
+// [JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).
+message AuthRequirement {
+  // [id][google.api.AuthProvider.id] from authentication provider.
+  //
+  // Example:
+  //
+  //     provider_id: bookstore_auth
+  string provider_id = 1;
+
+  // The list of JWT
+  // [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).
+  // that are allowed to access. A JWT containing any of these audiences will
+  // be accepted. When this setting is absent, only JWTs with audience
+  // "https://[Service_name][google.api.Service.name]/[API_name][google.protobuf.Api.name]"
+  // will be accepted. For example, if no audiences are in the setting,
+  // LibraryService API will only accept JWTs with the following audience
+  // "https://library-example.googleapis.com/google.example.library.v1.LibraryService".
+  //
+  // Example:
+  //
+  //     audiences: bookstore_android.apps.googleusercontent.com,
+  //                bookstore_web.apps.googleusercontent.com
+  string audiences = 2;
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/backend.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/backend.pb.go
new file mode 100644
index 0000000..8c34328
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/backend.pb.go
@@ -0,0 +1,79 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/googleapis/api/serviceconfig/backend.proto
+// DO NOT EDIT!
+
+package google_api
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// `Backend` defines the backend configuration for a service.
+type Backend struct {
+	// A list of API backend rules that apply to individual API methods.
+	//
+	// **NOTE:** All service configuration rules follow "last one wins" order.
+	Rules []*BackendRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"`
+}
+
+func (m *Backend) Reset()                    { *m = Backend{} }
+func (m *Backend) String() string            { return proto.CompactTextString(m) }
+func (*Backend) ProtoMessage()               {}
+func (*Backend) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} }
+
+func (m *Backend) GetRules() []*BackendRule {
+	if m != nil {
+		return m.Rules
+	}
+	return nil
+}
+
+// A backend rule provides configuration for an individual API element.
+type BackendRule struct {
+	// Selects the methods to which this rule applies.
+	//
+	// Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
+	Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"`
+	// The address of the API backend.
+	Address string `protobuf:"bytes,2,opt,name=address" json:"address,omitempty"`
+	// The number of seconds to wait for a response from a request.  The
+	// default depends on the deployment context.
+	Deadline float64 `protobuf:"fixed64,3,opt,name=deadline" json:"deadline,omitempty"`
+}
+
+func (m *BackendRule) Reset()                    { *m = BackendRule{} }
+func (m *BackendRule) String() string            { return proto.CompactTextString(m) }
+func (*BackendRule) ProtoMessage()               {}
+func (*BackendRule) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} }
+
+func init() {
+	proto.RegisterType((*Backend)(nil), "google.api.Backend")
+	proto.RegisterType((*BackendRule)(nil), "google.api.BackendRule")
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/googleapis/api/serviceconfig/backend.proto", fileDescriptor2)
+}
+
+var fileDescriptor2 = []byte{
+	// 217 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x4c, 0x8f, 0xcf, 0x4e, 0x03, 0x21,
+	0x10, 0xc6, 0x43, 0xab, 0x56, 0xa7, 0xc6, 0x03, 0x17, 0x89, 0x27, 0xd3, 0x8b, 0xbd, 0x08, 0x89,
+	0x5e, 0xbc, 0xba, 0x89, 0x31, 0xde, 0x36, 0xbc, 0x80, 0xa1, 0x30, 0x12, 0x22, 0x32, 0x0d, 0x54,
+	0x1f, 0xc8, 0x27, 0x95, 0xfd, 0xe3, 0x76, 0x2f, 0x24, 0x1f, 0xbf, 0x1f, 0xc3, 0x7c, 0xf0, 0xe2,
+	0x89, 0x7c, 0x44, 0xe9, 0x29, 0x9a, 0xe4, 0x25, 0x65, 0xaf, 0x3c, 0xa6, 0x7d, 0xa6, 0x03, 0xa9,
+	0x01, 0x99, 0x7d, 0x28, 0xaa, 0x1e, 0xaa, 0x60, 0xfe, 0x09, 0x16, 0x2d, 0xa5, 0x8f, 0xe0, 0xd5,
+	0xce, 0xd8, 0x4f, 0x4c, 0x4e, 0xf6, 0x2a, 0x87, 0x71, 0x4c, 0xf5, 0x36, 0x4f, 0xb0, 0x6a, 0x06,
+	0xc8, 0xef, 0xe1, 0x34, 0x7f, 0x47, 0x2c, 0x82, 0xdd, 0x2e, 0xb7, 0xeb, 0x87, 0x6b, 0x79, 0xd4,
+	0xe4, 0xe8, 0xe8, 0xca, 0xf5, 0x60, 0x6d, 0xde, 0x61, 0x3d, 0xbb, 0xe5, 0x37, 0x70, 0x5e, 0x30,
+	0xa2, 0x3d, 0x50, 0xae, 0x03, 0xd8, 0xf6, 0x42, 0x4f, 0x99, 0x0b, 0x58, 0x19, 0xe7, 0x32, 0x96,
+	0x22, 0x16, 0x3d, 0xfa, 0x8f, 0xdd, 0x2b, 0x87, 0xc6, 0xc5, 0x90, 0x50, 0x2c, 0x2b, 0x62, 0x7a,
+	0xca, 0xcd, 0x1d, 0x5c, 0x59, 0xfa, 0x9a, 0x6d, 0xd1, 0x5c, 0x8e, 0x1f, 0xb6, 0x5d, 0x8d, 0x96,
+	0xfd, 0x2e, 0x4e, 0x5e, 0x9f, 0xdb, 0xb7, 0xdd, 0x59, 0x5f, 0xeb, 0xf1, 0x2f, 0x00, 0x00, 0xff,
+	0xff, 0x1b, 0xf2, 0x31, 0x3a, 0x1f, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/backend.proto b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/backend.proto
new file mode 100644
index 0000000..3c12c8c
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/backend.proto
@@ -0,0 +1,46 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+option java_multiple_files = true;
+option java_outer_classname = "BackendProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// `Backend` defines the backend configuration for a service.
+message Backend {
+  // A list of API backend rules that apply to individual API methods.
+  //
+  // **NOTE:** All service configuration rules follow "last one wins" order.
+  repeated BackendRule rules = 1;
+}
+
+// A backend rule provides configuration for an individual API element.
+message BackendRule {
+  // Selects the methods to which this rule applies.
+  //
+  // Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
+  string selector = 1;
+
+  // The address of the API backend.
+  string address = 2;
+
+  // The number of seconds to wait for a response from a request.  The
+  // default depends on the deployment context.
+  double deadline = 3;
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/billing.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/billing.pb.go
new file mode 100644
index 0000000..814d839
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/billing.pb.go
@@ -0,0 +1,131 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/googleapis/api/serviceconfig/billing.proto
+// DO NOT EDIT!
+
+package google_api
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "google.golang.org/genproto/googleapis/api/metric"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// Billing related configuration of the service.
+//
+// The following example shows how to configure metrics for billing:
+//
+//     metrics:
+//     - name: library.googleapis.com/read_calls
+//       metric_kind: DELTA
+//       value_type: INT64
+//     - name: library.googleapis.com/write_calls
+//       metric_kind: DELTA
+//       value_type: INT64
+//     billing:
+//       metrics:
+//       - library.googleapis.com/read_calls
+//       - library.googleapis.com/write_calls
+//
+// The next example shows how to enable billing status check and customize the
+// check behavior. It makes sure billing status check is included in the `Check`
+// method of [Service Control API](https://cloud.google.com/service-control/).
+// In the example, "google.storage.Get" method can be served when the billing
+// status is either `current` or `delinquent`, while "google.storage.Write"
+// method can only be served when the billing status is `current`:
+//
+//     billing:
+//       rules:
+//       - selector: google.storage.Get
+//         allowed_statuses:
+//         - current
+//         - delinquent
+//       - selector: google.storage.Write
+//         allowed_statuses: current
+//
+// Mostly services should only allow `current` status when serving requests.
+// In addition, services can choose to allow both `current` and `delinquent`
+// statuses when serving read-only requests to resources. If there's no
+// matching selector for operation, no billing status check will be performed.
+//
+type Billing struct {
+	// Names of the metrics to report to billing. Each name must
+	// be defined in [Service.metrics][google.api.Service.metrics] section.
+	Metrics []string `protobuf:"bytes,1,rep,name=metrics" json:"metrics,omitempty"`
+	// A list of billing status rules for configuring billing status check.
+	Rules []*BillingStatusRule `protobuf:"bytes,5,rep,name=rules" json:"rules,omitempty"`
+}
+
+func (m *Billing) Reset()                    { *m = Billing{} }
+func (m *Billing) String() string            { return proto.CompactTextString(m) }
+func (*Billing) ProtoMessage()               {}
+func (*Billing) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} }
+
+func (m *Billing) GetRules() []*BillingStatusRule {
+	if m != nil {
+		return m.Rules
+	}
+	return nil
+}
+
+// Defines the billing status requirements for operations.
+//
+// When used with
+// [Service Control API](https://cloud.google.com/service-control/), the
+// following statuses are supported:
+//
+// - **current**: the associated billing account is up to date and capable of
+//                paying for resource usages.
+// - **delinquent**: the associated billing account has a correctable problem,
+//                   such as late payment.
+//
+// Mostly services should only allow `current` status when serving requests.
+// In addition, services can choose to allow both `current` and `delinquent`
+// statuses when serving read-only requests to resources. If the list of
+// allowed_statuses is empty, it means no billing requirement.
+//
+type BillingStatusRule struct {
+	// Selects the operation names to which this rule applies.
+	// Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
+	Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"`
+	// Allowed billing statuses. The billing status check passes if the actual
+	// billing status matches any of the provided values here.
+	AllowedStatuses []string `protobuf:"bytes,2,rep,name=allowed_statuses,json=allowedStatuses" json:"allowed_statuses,omitempty"`
+}
+
+func (m *BillingStatusRule) Reset()                    { *m = BillingStatusRule{} }
+func (m *BillingStatusRule) String() string            { return proto.CompactTextString(m) }
+func (*BillingStatusRule) ProtoMessage()               {}
+func (*BillingStatusRule) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} }
+
+func init() {
+	proto.RegisterType((*Billing)(nil), "google.api.Billing")
+	proto.RegisterType((*BillingStatusRule)(nil), "google.api.BillingStatusRule")
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/googleapis/api/serviceconfig/billing.proto", fileDescriptor3)
+}
+
+var fileDescriptor3 = []byte{
+	// 245 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x4f, 0xbd, 0x4b, 0x43, 0x31,
+	0x10, 0xe7, 0x29, 0xb5, 0x7a, 0x8a, 0x1f, 0x99, 0x1e, 0x0f, 0x84, 0xd2, 0x49, 0x97, 0x17, 0xb0,
+	0xb3, 0xcb, 0x03, 0x07, 0xb7, 0xf2, 0xba, 0x88, 0x8b, 0xa4, 0xf1, 0x0c, 0x81, 0x34, 0x57, 0x92,
+	0x54, 0xff, 0x7d, 0xcf, 0x24, 0x7e, 0x80, 0x8b, 0xb8, 0x24, 0xdc, 0xdd, 0xef, 0x13, 0xee, 0x0c,
+	0x91, 0x71, 0xd8, 0x1b, 0x72, 0xca, 0x9b, 0x9e, 0x82, 0x91, 0x06, 0xfd, 0x36, 0x50, 0x22, 0x59,
+	0x4e, 0x6a, 0x6b, 0xa3, 0xe4, 0x47, 0x46, 0x0c, 0xaf, 0x56, 0xa3, 0x26, 0xff, 0x62, 0x8d, 0x5c,
+	0x5b, 0xe7, 0x2c, 0x33, 0x32, 0x54, 0x40, 0x95, 0x61, 0x5c, 0x77, 0xff, 0x5f, 0x49, 0xe5, 0x3d,
+	0x25, 0x95, 0x2c, 0xf9, 0x58, 0x64, 0xbb, 0xdb, 0xbf, 0x4b, 0x6d, 0x30, 0x05, 0xab, 0xeb, 0x57,
+	0xe8, 0xf3, 0x07, 0x98, 0x0e, 0x25, 0xa6, 0x68, 0x61, 0x5a, 0x4e, 0xb1, 0x6d, 0x66, 0xfb, 0x57,
+	0x47, 0xe3, 0xe7, 0x28, 0x16, 0x30, 0x09, 0x3b, 0x87, 0xb1, 0x9d, 0xf0, 0xfe, 0xf8, 0xe6, 0xb2,
+	0xff, 0xae, 0xd2, 0x57, 0xf6, 0x8a, 0x53, 0xed, 0xe2, 0xc8, 0xa8, 0xb1, 0x60, 0xe7, 0x8f, 0x70,
+	0xf1, 0xeb, 0x26, 0x3a, 0x38, 0x8c, 0xe8, 0x50, 0x27, 0x0a, 0x6c, 0xd2, 0xb0, 0xc9, 0xd7, 0x2c,
+	0xae, 0xe1, 0x5c, 0x39, 0x47, 0x6f, 0xf8, 0xfc, 0x14, 0x33, 0x83, 0x0d, 0xf7, 0x72, 0x90, 0xb3,
+	0xba, 0x5f, 0xd5, 0xf5, 0x30, 0x83, 0x53, 0x4d, 0x9b, 0x1f, 0x31, 0x86, 0x93, 0xea, 0xb5, 0xfc,
+	0x68, 0xb5, 0x6c, 0xd6, 0x07, 0xb9, 0xde, 0xe2, 0x3d, 0x00, 0x00, 0xff, 0xff, 0xe0, 0xe1, 0x19,
+	0xb1, 0xbd, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/billing.proto b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/billing.proto
new file mode 100644
index 0000000..f686720
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/billing.proto
@@ -0,0 +1,97 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google.golang.org/genproto/googleapis/api/serviceconfig/annotations.proto"; // from google/api/annotations.proto
+import "google.golang.org/genproto/googleapis/api/metric/metric.proto"; // from google/api/metric.proto
+
+option java_multiple_files = true;
+option java_outer_classname = "BillingProto";
+option java_package = "com.google.api";
+
+
+// Billing related configuration of the service.
+//
+// The following example shows how to configure metrics for billing:
+//
+//     metrics:
+//     - name: library.googleapis.com/read_calls
+//       metric_kind: DELTA
+//       value_type: INT64
+//     - name: library.googleapis.com/write_calls
+//       metric_kind: DELTA
+//       value_type: INT64
+//     billing:
+//       metrics:
+//       - library.googleapis.com/read_calls
+//       - library.googleapis.com/write_calls
+//
+// The next example shows how to enable billing status check and customize the
+// check behavior. It makes sure billing status check is included in the `Check`
+// method of [Service Control API](https://cloud.google.com/service-control/).
+// In the example, "google.storage.Get" method can be served when the billing
+// status is either `current` or `delinquent`, while "google.storage.Write"
+// method can only be served when the billing status is `current`:
+//
+//     billing:
+//       rules:
+//       - selector: google.storage.Get
+//         allowed_statuses:
+//         - current
+//         - delinquent
+//       - selector: google.storage.Write
+//         allowed_statuses: current
+//
+// Mostly services should only allow `current` status when serving requests.
+// In addition, services can choose to allow both `current` and `delinquent`
+// statuses when serving read-only requests to resources. If there's no
+// matching selector for operation, no billing status check will be performed.
+//
+message Billing {
+  // Names of the metrics to report to billing. Each name must
+  // be defined in [Service.metrics][google.api.Service.metrics] section.
+  repeated string metrics = 1;
+
+  // A list of billing status rules for configuring billing status check.
+  repeated BillingStatusRule rules = 5;
+}
+
+// Defines the billing status requirements for operations.
+//
+// When used with
+// [Service Control API](https://cloud.google.com/service-control/), the
+// following statuses are supported:
+//
+// - **current**: the associated billing account is up to date and capable of
+//                paying for resource usages.
+// - **delinquent**: the associated billing account has a correctable problem,
+//                   such as late payment.
+//
+// Mostly services should only allow `current` status when serving requests.
+// In addition, services can choose to allow both `current` and `delinquent`
+// statuses when serving read-only requests to resources. If the list of
+// allowed_statuses is empty, it means no billing requirement.
+//
+message BillingStatusRule {
+  // Selects the operation names to which this rule applies.
+  // Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
+  string selector = 1;
+
+  // Allowed billing statuses. The billing status check passes if the actual
+  // billing status matches any of the provided values here.
+  repeated string allowed_statuses = 2;
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/consumer.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/consumer.pb.go
new file mode 100644
index 0000000..499371a
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/consumer.pb.go
@@ -0,0 +1,139 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/googleapis/api/serviceconfig/consumer.proto
+// DO NOT EDIT!
+
+package google_api
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// Supported data type of the property values
+type Property_PropertyType int32
+
+const (
+	// The type is unspecified, and will result in an error.
+	Property_UNSPECIFIED Property_PropertyType = 0
+	// The type is `int64`.
+	Property_INT64 Property_PropertyType = 1
+	// The type is `bool`.
+	Property_BOOL Property_PropertyType = 2
+	// The type is `string`.
+	Property_STRING Property_PropertyType = 3
+	// The type is 'double'.
+	Property_DOUBLE Property_PropertyType = 4
+)
+
+var Property_PropertyType_name = map[int32]string{
+	0: "UNSPECIFIED",
+	1: "INT64",
+	2: "BOOL",
+	3: "STRING",
+	4: "DOUBLE",
+}
+var Property_PropertyType_value = map[string]int32{
+	"UNSPECIFIED": 0,
+	"INT64":       1,
+	"BOOL":        2,
+	"STRING":      3,
+	"DOUBLE":      4,
+}
+
+func (x Property_PropertyType) String() string {
+	return proto.EnumName(Property_PropertyType_name, int32(x))
+}
+func (Property_PropertyType) EnumDescriptor() ([]byte, []int) { return fileDescriptor4, []int{1, 0} }
+
+// A descriptor for defining project properties for a service. One service may
+// have many consumer projects, and the service may want to behave differently
+// depending on some properties on the project. For example, a project may be
+// associated with a school, or a business, or a government agency, a business
+// type property on the project may affect how a service responds to the client.
+// This descriptor defines which properties are allowed to be set on a project.
+//
+// Example:
+//
+//    project_properties:
+//      properties:
+//      - name: NO_WATERMARK
+//        type: BOOL
+//        description: Allows usage of the API without watermarks.
+//      - name: EXTENDED_TILE_CACHE_PERIOD
+//        type: INT64
+type ProjectProperties struct {
+	// List of per consumer project-specific properties.
+	Properties []*Property `protobuf:"bytes,1,rep,name=properties" json:"properties,omitempty"`
+}
+
+func (m *ProjectProperties) Reset()                    { *m = ProjectProperties{} }
+func (m *ProjectProperties) String() string            { return proto.CompactTextString(m) }
+func (*ProjectProperties) ProtoMessage()               {}
+func (*ProjectProperties) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} }
+
+func (m *ProjectProperties) GetProperties() []*Property {
+	if m != nil {
+		return m.Properties
+	}
+	return nil
+}
+
+// Defines project properties.
+//
+// API services can define properties that can be assigned to consumer projects
+// so that backends can perform response customization without having to make
+// additional calls or maintain additional storage. For example, Maps API
+// defines properties that controls map tile cache period, or whether to embed a
+// watermark in a result.
+//
+// These values can be set via API producer console. Only API providers can
+// define and set these properties.
+type Property struct {
+	// The name of the property (a.k.a key).
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// The type of this property.
+	Type Property_PropertyType `protobuf:"varint,2,opt,name=type,enum=google.api.Property_PropertyType" json:"type,omitempty"`
+	// The description of the property
+	Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
+}
+
+func (m *Property) Reset()                    { *m = Property{} }
+func (m *Property) String() string            { return proto.CompactTextString(m) }
+func (*Property) ProtoMessage()               {}
+func (*Property) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} }
+
+func init() {
+	proto.RegisterType((*ProjectProperties)(nil), "google.api.ProjectProperties")
+	proto.RegisterType((*Property)(nil), "google.api.Property")
+	proto.RegisterEnum("google.api.Property_PropertyType", Property_PropertyType_name, Property_PropertyType_value)
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/googleapis/api/serviceconfig/consumer.proto", fileDescriptor4)
+}
+
+var fileDescriptor4 = []byte{
+	// 287 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4e, 0xb3, 0x40,
+	0x14, 0x85, 0x7f, 0x0a, 0x7f, 0xd3, 0x5e, 0xb4, 0xe2, 0xc4, 0x05, 0xcb, 0x96, 0x55, 0x57, 0x90,
+	0xd4, 0xea, 0x03, 0xd0, 0x52, 0x43, 0x42, 0x80, 0x50, 0xfa, 0x00, 0x88, 0xd7, 0xc9, 0x98, 0xc2,
+	0x4c, 0x06, 0x34, 0xe9, 0x03, 0xfa, 0x5e, 0x4e, 0x11, 0x2b, 0x0b, 0x37, 0xdc, 0xc3, 0xbd, 0xdf,
+	0x39, 0x99, 0x1c, 0xd8, 0x51, 0xce, 0xe9, 0x11, 0x5d, 0xca, 0x8f, 0x45, 0x4d, 0x5d, 0x2e, 0xa9,
+	0x47, 0xb1, 0x16, 0x92, 0xb7, 0xdc, 0xfb, 0x3e, 0x15, 0x82, 0x35, 0x9e, 0xfa, 0x78, 0x0d, 0xca,
+	0x0f, 0x56, 0x62, 0xc9, 0xeb, 0x57, 0x46, 0x3d, 0x35, 0x9a, 0xf7, 0x0a, 0xa5, 0xdb, 0xb1, 0x04,
+	0xfa, 0x1c, 0x05, 0x3a, 0x21, 0xdc, 0xa6, 0x92, 0xbf, 0x61, 0xd9, 0xaa, 0x21, 0x50, 0xb6, 0x0c,
+	0x1b, 0xb2, 0x06, 0x10, 0x97, 0x3f, 0x5b, 0x9b, 0xeb, 0x4b, 0x73, 0x75, 0xe7, 0xfe, 0xba, 0xdc,
+	0x9e, 0x3d, 0x65, 0x03, 0xce, 0xf9, 0xd4, 0x60, 0xf2, 0x73, 0x20, 0x04, 0x8c, 0xba, 0xa8, 0x50,
+	0x99, 0xb5, 0xe5, 0x34, 0xeb, 0x34, 0x79, 0x00, 0xa3, 0x3d, 0x09, 0xb4, 0x47, 0x6a, 0x37, 0x5b,
+	0x2d, 0xfe, 0x0a, 0xbc, 0x88, 0x5c, 0x81, 0x59, 0x87, 0x93, 0x39, 0x98, 0x2f, 0xd8, 0x94, 0x92,
+	0x89, 0x96, 0xf1, 0xda, 0xd6, 0xbb, 0xc4, 0xe1, 0xca, 0x89, 0xe0, 0x6a, 0xe8, 0x23, 0x37, 0x60,
+	0x1e, 0xe2, 0x7d, 0x1a, 0x6c, 0xc2, 0x5d, 0x18, 0x6c, 0xad, 0x7f, 0x64, 0x0a, 0xff, 0xc3, 0x38,
+	0x7f, 0x5c, 0x5b, 0x1a, 0x99, 0x80, 0xe1, 0x27, 0x49, 0x64, 0x8d, 0x08, 0xc0, 0x78, 0x9f, 0x67,
+	0x61, 0xfc, 0x64, 0xe9, 0x67, 0xbd, 0x4d, 0x0e, 0x7e, 0x14, 0x58, 0x86, 0xbf, 0x80, 0x59, 0xc9,
+	0xab, 0xc1, 0xeb, 0xfc, 0xeb, 0x4d, 0x5f, 0x60, 0x7a, 0xee, 0x2f, 0xd5, 0x9e, 0xc7, 0x5d, 0x91,
+	0xf7, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x61, 0xba, 0x74, 0x16, 0x92, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/consumer.proto b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/consumer.proto
new file mode 100644
index 0000000..8887934
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/consumer.proto
@@ -0,0 +1,82 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+option java_multiple_files = true;
+option java_outer_classname = "ConsumerProto";
+option java_package = "com.google.api";
+
+
+// A descriptor for defining project properties for a service. One service may
+// have many consumer projects, and the service may want to behave differently
+// depending on some properties on the project. For example, a project may be
+// associated with a school, or a business, or a government agency, a business
+// type property on the project may affect how a service responds to the client.
+// This descriptor defines which properties are allowed to be set on a project.
+//
+// Example:
+//
+//    project_properties:
+//      properties:
+//      - name: NO_WATERMARK
+//        type: BOOL
+//        description: Allows usage of the API without watermarks.
+//      - name: EXTENDED_TILE_CACHE_PERIOD
+//        type: INT64
+message ProjectProperties {
+  // List of per consumer project-specific properties.
+  repeated Property properties = 1;
+}
+
+// Defines project properties.
+//
+// API services can define properties that can be assigned to consumer projects
+// so that backends can perform response customization without having to make
+// additional calls or maintain additional storage. For example, Maps API
+// defines properties that controls map tile cache period, or whether to embed a
+// watermark in a result.
+//
+// These values can be set via API producer console. Only API providers can
+// define and set these properties.
+message Property {
+  // Supported data type of the property values
+  enum PropertyType {
+    // The type is unspecified, and will result in an error.
+    UNSPECIFIED = 0;
+
+    // The type is `int64`.
+    INT64 = 1;
+
+    // The type is `bool`.
+    BOOL = 2;
+
+    // The type is `string`.
+    STRING = 3;
+
+    // The type is 'double'.
+    DOUBLE = 4;
+  }
+
+  // The name of the property (a.k.a key).
+  string name = 1;
+
+  // The type of this property.
+  PropertyType type = 2;
+
+  // The description of the property
+  string description = 3;
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/context.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/context.pb.go
new file mode 100644
index 0000000..d992555
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/context.pb.go
@@ -0,0 +1,95 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/googleapis/api/serviceconfig/context.proto
+// DO NOT EDIT!
+
+package google_api
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// `Context` defines which contexts an API requests.
+//
+// Example:
+//
+//     context:
+//       rules:
+//       - selector: "*"
+//         requested:
+//         - google.rpc.context.ProjectContext
+//         - google.rpc.context.OriginContext
+//
+// The above specifies that all methods in the API request
+// `google.rpc.context.ProjectContext` and
+// `google.rpc.context.OriginContext`.
+//
+// Available context types are defined in package
+// `google.rpc.context`.
+type Context struct {
+	// A list of RPC context rules that apply to individual API methods.
+	//
+	// **NOTE:** All service configuration rules follow "last one wins" order.
+	Rules []*ContextRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"`
+}
+
+func (m *Context) Reset()                    { *m = Context{} }
+func (m *Context) String() string            { return proto.CompactTextString(m) }
+func (*Context) ProtoMessage()               {}
+func (*Context) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0} }
+
+func (m *Context) GetRules() []*ContextRule {
+	if m != nil {
+		return m.Rules
+	}
+	return nil
+}
+
+// A context rule provides information about the context for an individual API
+// element.
+type ContextRule struct {
+	// Selects the methods to which this rule applies.
+	//
+	// Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
+	Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"`
+	// A list of full type names of requested contexts.
+	Requested []string `protobuf:"bytes,2,rep,name=requested" json:"requested,omitempty"`
+	// A list of full type names of provided contexts.
+	Provided []string `protobuf:"bytes,3,rep,name=provided" json:"provided,omitempty"`
+}
+
+func (m *ContextRule) Reset()                    { *m = ContextRule{} }
+func (m *ContextRule) String() string            { return proto.CompactTextString(m) }
+func (*ContextRule) ProtoMessage()               {}
+func (*ContextRule) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{1} }
+
+func init() {
+	proto.RegisterType((*Context)(nil), "google.api.Context")
+	proto.RegisterType((*ContextRule)(nil), "google.api.ContextRule")
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/googleapis/api/serviceconfig/context.proto", fileDescriptor5)
+}
+
+var fileDescriptor5 = []byte{
+	// 219 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x72, 0x4d, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x4b, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0xcb, 0x2f, 0x4a, 0xd7, 0x4f, 0x4f,
+	0xcd, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x87, 0x48, 0x25, 0x16, 0x64, 0x16, 0xeb, 0x03, 0x09,
+	0xfd, 0xe2, 0xd4, 0xa2, 0xb2, 0xcc, 0xe4, 0xd4, 0xe4, 0xfc, 0xbc, 0xb4, 0xcc, 0x74, 0x7d, 0x20,
+	0x55, 0x92, 0x5a, 0x51, 0xa2, 0x07, 0x56, 0x2a, 0xc4, 0x05, 0x35, 0x06, 0xa8, 0x4e, 0xc9, 0x82,
+	0x8b, 0xdd, 0x19, 0x22, 0x29, 0xa4, 0xcb, 0xc5, 0x5a, 0x54, 0x9a, 0x93, 0x5a, 0x2c, 0xc1, 0xa8,
+	0xc0, 0xac, 0xc1, 0x6d, 0x24, 0xae, 0x87, 0x50, 0xa6, 0x07, 0x55, 0x13, 0x04, 0x94, 0x0f, 0x82,
+	0xa8, 0x52, 0x4a, 0xe6, 0xe2, 0x46, 0x12, 0x15, 0x92, 0xe2, 0xe2, 0x28, 0x4e, 0xcd, 0x49, 0x4d,
+	0x2e, 0xc9, 0x2f, 0x02, 0x1a, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0xe7, 0x0b, 0xc9, 0x70, 0x71, 0x16,
+	0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0xa4, 0xa6, 0x48, 0x30, 0x01, 0x4d, 0xe7, 0x0c, 0x42, 0x08,
+	0x80, 0x74, 0x02, 0xdd, 0x55, 0x96, 0x99, 0x02, 0x94, 0x64, 0x06, 0x4b, 0xc2, 0xf9, 0x4e, 0xea,
+	0x5c, 0x7c, 0xc9, 0xf9, 0xb9, 0x48, 0x2e, 0x71, 0xe2, 0x81, 0x5a, 0x1a, 0x00, 0xf2, 0x4a, 0x00,
+	0xe3, 0x22, 0x26, 0x16, 0x77, 0xc7, 0x00, 0xcf, 0x24, 0x36, 0xb0, 0xd7, 0x8c, 0x01, 0x01, 0x00,
+	0x00, 0xff, 0xff, 0xe7, 0x43, 0x17, 0x5f, 0x23, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/context.proto b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/context.proto
new file mode 100644
index 0000000..427524f
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/context.proto
@@ -0,0 +1,62 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+option java_multiple_files = true;
+option java_outer_classname = "ContextProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// `Context` defines which contexts an API requests.
+//
+// Example:
+//
+//     context:
+//       rules:
+//       - selector: "*"
+//         requested:
+//         - google.rpc.context.ProjectContext
+//         - google.rpc.context.OriginContext
+//
+// The above specifies that all methods in the API request
+// `google.rpc.context.ProjectContext` and
+// `google.rpc.context.OriginContext`.
+//
+// Available context types are defined in package
+// `google.rpc.context`.
+message Context {
+  // A list of RPC context rules that apply to individual API methods.
+  //
+  // **NOTE:** All service configuration rules follow "last one wins" order.
+  repeated ContextRule rules = 1;
+}
+
+// A context rule provides information about the context for an individual API
+// element.
+message ContextRule {
+  // Selects the methods to which this rule applies.
+  //
+  // Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
+  string selector = 1;
+
+  // A list of full type names of requested contexts.
+  repeated string requested = 2;
+
+  // A list of full type names of provided contexts.
+  repeated string provided = 3;
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/control.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/control.pb.go
new file mode 100644
index 0000000..ef9dd11
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/control.pb.go
@@ -0,0 +1,50 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/googleapis/api/serviceconfig/control.proto
+// DO NOT EDIT!
+
+package google_api
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// Selects and configures the service controller used by the service.  The
+// service controller handles features like abuse, quota, billing, logging,
+// monitoring, etc.
+type Control struct {
+	// The service control environment to use. If empty, no control plane
+	// feature (like quota and billing) will be enabled.
+	Environment string `protobuf:"bytes,1,opt,name=environment" json:"environment,omitempty"`
+}
+
+func (m *Control) Reset()                    { *m = Control{} }
+func (m *Control) String() string            { return proto.CompactTextString(m) }
+func (*Control) ProtoMessage()               {}
+func (*Control) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{0} }
+
+func init() {
+	proto.RegisterType((*Control)(nil), "google.api.Control")
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/googleapis/api/serviceconfig/control.proto", fileDescriptor6)
+}
+
+var fileDescriptor6 = []byte{
+	// 154 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x72, 0x4d, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x4b, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0xcb, 0x2f, 0x4a, 0xd7, 0x4f, 0x4f,
+	0xcd, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x87, 0x48, 0x25, 0x16, 0x64, 0x16, 0xeb, 0x03, 0x09,
+	0xfd, 0xe2, 0xd4, 0xa2, 0xb2, 0xcc, 0xe4, 0xd4, 0xe4, 0xfc, 0xbc, 0xb4, 0xcc, 0x74, 0x7d, 0x20,
+	0x55, 0x52, 0x94, 0x9f, 0xa3, 0x07, 0x56, 0x2a, 0xc4, 0x05, 0x35, 0x06, 0xa8, 0x4e, 0x49, 0x9b,
+	0x8b, 0xdd, 0x19, 0x22, 0x29, 0xa4, 0xc0, 0xc5, 0x9d, 0x9a, 0x57, 0x96, 0x59, 0x94, 0x9f, 0x97,
+	0x9b, 0x9a, 0x57, 0x22, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x84, 0x2c, 0xe4, 0xa4, 0xce, 0xc5,
+	0x97, 0x9c, 0x9f, 0xab, 0x87, 0xd0, 0xee, 0xc4, 0x03, 0xd5, 0x1c, 0x00, 0x32, 0x38, 0x80, 0x71,
+	0x11, 0x13, 0x8b, 0xbb, 0x63, 0x80, 0x67, 0x12, 0x1b, 0xd8, 0x22, 0x63, 0x40, 0x00, 0x00, 0x00,
+	0xff, 0xff, 0x77, 0x67, 0xbb, 0x6f, 0xb1, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/control.proto b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/control.proto
new file mode 100644
index 0000000..d99b2b0
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/control.proto
@@ -0,0 +1,32 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+option java_multiple_files = true;
+option java_outer_classname = "ControlProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// Selects and configures the service controller used by the service.  The
+// service controller handles features like abuse, quota, billing, logging,
+// monitoring, etc.
+message Control {
+  // The service control environment to use. If empty, no control plane
+  // feature (like quota and billing) will be enabled.
+  string environment = 1;
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/documentation.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/documentation.pb.go
new file mode 100644
index 0000000..ed5bd22
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/documentation.pb.go
@@ -0,0 +1,213 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/googleapis/api/serviceconfig/documentation.proto
+// DO NOT EDIT!
+
+package google_api
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// `Documentation` provides the information for describing a service.
+//
+// Example:
+// <pre><code>documentation:
+//   summary: >
+//     The Google Calendar API gives access
+//     to most calendar features.
+//   pages:
+//   - name: Overview
+//     content: &#40;== include google/foo/overview.md ==&#41;
+//   - name: Tutorial
+//     content: &#40;== include google/foo/tutorial.md ==&#41;
+//     subpages;
+//     - name: Java
+//       content: &#40;== include google/foo/tutorial_java.md ==&#41;
+//   rules:
+//   - selector: google.calendar.Calendar.Get
+//     description: >
+//       ...
+//   - selector: google.calendar.Calendar.Put
+//     description: >
+//       ...
+// </code></pre>
+// Documentation is provided in markdown syntax. In addition to
+// standard markdown features, definition lists, tables and fenced
+// code blocks are supported. Section headers can be provided and are
+// interpreted relative to the section nesting of the context where
+// a documentation fragment is embedded.
+//
+// Documentation from the IDL is merged with documentation defined
+// via the config at normalization time, where documentation provided
+// by config rules overrides IDL provided.
+//
+// A number of constructs specific to the API platform are supported
+// in documentation text.
+//
+// In order to reference a proto element, the following
+// notation can be used:
+// <pre><code>&#91;fully.qualified.proto.name]&#91;]</code></pre>
+// To override the display text used for the link, this can be used:
+// <pre><code>&#91;display text]&#91;fully.qualified.proto.name]</code></pre>
+// Text can be excluded from doc using the following notation:
+// <pre><code>&#40;-- internal comment --&#41;</code></pre>
+// Comments can be made conditional using a visibility label. The below
+// text will be only rendered if the `BETA` label is available:
+// <pre><code>&#40;--BETA: comment for BETA users --&#41;</code></pre>
+// A few directives are available in documentation. Note that
+// directives must appear on a single line to be properly
+// identified. The `include` directive includes a markdown file from
+// an external source:
+// <pre><code>&#40;== include path/to/file ==&#41;</code></pre>
+// The `resource_for` directive marks a message to be the resource of
+// a collection in REST view. If it is not specified, tools attempt
+// to infer the resource from the operations in a collection:
+// <pre><code>&#40;== resource_for v1.shelves.books ==&#41;</code></pre>
+// The directive `suppress_warning` does not directly affect documentation
+// and is documented together with service config validation.
+type Documentation struct {
+	// A short summary of what the service does. Can only be provided by
+	// plain text.
+	Summary string `protobuf:"bytes,1,opt,name=summary" json:"summary,omitempty"`
+	// The top level pages for the documentation set.
+	Pages []*Page `protobuf:"bytes,5,rep,name=pages" json:"pages,omitempty"`
+	// A list of documentation rules that apply to individual API elements.
+	//
+	// **NOTE:** All service configuration rules follow "last one wins" order.
+	Rules []*DocumentationRule `protobuf:"bytes,3,rep,name=rules" json:"rules,omitempty"`
+	// The URL to the root of documentation.
+	DocumentationRootUrl string `protobuf:"bytes,4,opt,name=documentation_root_url,json=documentationRootUrl" json:"documentation_root_url,omitempty"`
+	// Declares a single overview page. For example:
+	// <pre><code>documentation:
+	//   summary: ...
+	//   overview: &#40;== include overview.md ==&#41;
+	// </code></pre>
+	// This is a shortcut for the following declaration (using pages style):
+	// <pre><code>documentation:
+	//   summary: ...
+	//   pages:
+	//   - name: Overview
+	//     content: &#40;== include overview.md ==&#41;
+	// </code></pre>
+	// Note: you cannot specify both `overview` field and `pages` field.
+	Overview string `protobuf:"bytes,2,opt,name=overview" json:"overview,omitempty"`
+}
+
+func (m *Documentation) Reset()                    { *m = Documentation{} }
+func (m *Documentation) String() string            { return proto.CompactTextString(m) }
+func (*Documentation) ProtoMessage()               {}
+func (*Documentation) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{0} }
+
+func (m *Documentation) GetPages() []*Page {
+	if m != nil {
+		return m.Pages
+	}
+	return nil
+}
+
+func (m *Documentation) GetRules() []*DocumentationRule {
+	if m != nil {
+		return m.Rules
+	}
+	return nil
+}
+
+// A documentation rule provides information about individual API elements.
+type DocumentationRule struct {
+	// The selector is a comma-separated list of patterns. Each pattern is a
+	// qualified name of the element which may end in "*", indicating a wildcard.
+	// Wildcards are only allowed at the end and for a whole component of the
+	// qualified name, i.e. "foo.*" is ok, but not "foo.b*" or "foo.*.bar". To
+	// specify a default for all applicable elements, the whole pattern "*"
+	// is used.
+	Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"`
+	// Description of the selected API(s).
+	Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"`
+	// Deprecation description of the selected element(s). It can be provided if an
+	// element is marked as `deprecated`.
+	DeprecationDescription string `protobuf:"bytes,3,opt,name=deprecation_description,json=deprecationDescription" json:"deprecation_description,omitempty"`
+}
+
+func (m *DocumentationRule) Reset()                    { *m = DocumentationRule{} }
+func (m *DocumentationRule) String() string            { return proto.CompactTextString(m) }
+func (*DocumentationRule) ProtoMessage()               {}
+func (*DocumentationRule) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{1} }
+
+// Represents a documentation page. A page can contain subpages to represent
+// nested documentation set structure.
+type Page struct {
+	// The name of the page. It will be used as an identity of the page to
+	// generate URI of the page, text of the link to this page in navigation,
+	// etc. The full page name (start from the root page name to this page
+	// concatenated with `.`) can be used as reference to the page in your
+	// documentation. For example:
+	// <pre><code>pages:
+	// - name: Tutorial
+	//   content: &#40;== include tutorial.md ==&#41;
+	//   subpages:
+	//   - name: Java
+	//     content: &#40;== include tutorial_java.md ==&#41;
+	// </code></pre>
+	// You can reference `Java` page using Markdown reference link syntax:
+	// `[Java][Tutorial.Java]`.
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// The Markdown content of the page. You can use <code>&#40;== include {path} ==&#41;</code>
+	// to include content from a Markdown file.
+	Content string `protobuf:"bytes,2,opt,name=content" json:"content,omitempty"`
+	// Subpages of this page. The order of subpages specified here will be
+	// honored in the generated docset.
+	Subpages []*Page `protobuf:"bytes,3,rep,name=subpages" json:"subpages,omitempty"`
+}
+
+func (m *Page) Reset()                    { *m = Page{} }
+func (m *Page) String() string            { return proto.CompactTextString(m) }
+func (*Page) ProtoMessage()               {}
+func (*Page) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{2} }
+
+func (m *Page) GetSubpages() []*Page {
+	if m != nil {
+		return m.Subpages
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*Documentation)(nil), "google.api.Documentation")
+	proto.RegisterType((*DocumentationRule)(nil), "google.api.DocumentationRule")
+	proto.RegisterType((*Page)(nil), "google.api.Page")
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/googleapis/api/serviceconfig/documentation.proto", fileDescriptor7)
+}
+
+var fileDescriptor7 = []byte{
+	// 342 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x92, 0xcf, 0x4a, 0xc3, 0x40,
+	0x10, 0xc6, 0x49, 0x93, 0xfa, 0x67, 0x8a, 0xa2, 0x8b, 0xd4, 0x20, 0x08, 0xa5, 0x07, 0xe9, 0x41,
+	0x13, 0xb0, 0x82, 0x67, 0x4b, 0x41, 0xc4, 0x4b, 0x08, 0x78, 0x2e, 0xe9, 0x76, 0x5c, 0x02, 0x49,
+	0x26, 0x6c, 0x92, 0x8a, 0xaf, 0xe0, 0x63, 0xf8, 0x54, 0x3e, 0x8e, 0x9b, 0x4d, 0x9a, 0x6e, 0x10,
+	0x2f, 0x21, 0x93, 0xef, 0xb7, 0x33, 0xdf, 0x7c, 0x1b, 0x78, 0x15, 0x44, 0x22, 0x41, 0x4f, 0x50,
+	0x12, 0x65, 0xc2, 0x23, 0x29, 0x7c, 0x81, 0x59, 0x2e, 0xa9, 0x24, 0xbf, 0x91, 0xa2, 0x3c, 0x2e,
+	0x7c, 0xf5, 0xf0, 0x0b, 0x94, 0xdb, 0x98, 0x23, 0xa7, 0xec, 0x3d, 0x16, 0xfe, 0x86, 0x78, 0x95,
+	0x62, 0x56, 0x46, 0x65, 0x4c, 0x99, 0xa7, 0x0f, 0x30, 0x68, 0x9b, 0x29, 0x7a, 0xfa, 0x63, 0xc1,
+	0xc9, 0xd2, 0x64, 0x98, 0x0b, 0x87, 0x45, 0x95, 0xa6, 0x91, 0xfc, 0x74, 0xad, 0x89, 0x35, 0x3b,
+	0x0e, 0x77, 0x25, 0xbb, 0x81, 0x61, 0x1e, 0x09, 0x2c, 0xdc, 0xe1, 0xc4, 0x9e, 0x8d, 0xee, 0xcf,
+	0xbc, 0x7d, 0x1f, 0x2f, 0x50, 0x42, 0xd8, 0xc8, 0x6c, 0x0e, 0x43, 0x59, 0x25, 0x8a, 0xb3, 0x35,
+	0x77, 0x6d, 0x72, 0xbd, 0x59, 0xa1, 0xa2, 0xc2, 0x86, 0x65, 0x0f, 0x30, 0xee, 0x79, 0x5d, 0x49,
+	0xa2, 0x72, 0x55, 0xc9, 0xc4, 0x75, 0xb4, 0x8b, 0x8b, 0x9e, 0x1a, 0x2a, 0xf1, 0x4d, 0x26, 0xec,
+	0x0a, 0x8e, 0x68, 0x5b, 0x2f, 0x8c, 0x1f, 0xee, 0x40, 0x73, 0x5d, 0x3d, 0xfd, 0xb2, 0xe0, 0xfc,
+	0xcf, 0xb8, 0xfa, 0x44, 0x81, 0x09, 0xf2, 0x92, 0x64, 0xbb, 0x5f, 0x57, 0xb3, 0x09, 0x8c, 0x36,
+	0x58, 0x70, 0x19, 0xe7, 0x35, 0xde, 0x36, 0x34, 0x3f, 0xb1, 0x47, 0xb8, 0xdc, 0x60, 0x2e, 0x91,
+	0x37, 0x1e, 0x4d, 0xda, 0xd6, 0xf4, 0xd8, 0x90, 0x97, 0x7b, 0x75, 0xba, 0x06, 0xa7, 0x8e, 0x88,
+	0x31, 0x70, 0xb2, 0x28, 0xc5, 0x76, 0xb4, 0x7e, 0xaf, 0x13, 0x57, 0xb7, 0x55, 0x2a, 0x9b, 0xed,
+	0xc8, 0x5d, 0xc9, 0x6e, 0x95, 0xd9, 0x6a, 0xdd, 0x84, 0x6e, 0xff, 0x13, 0x7a, 0x47, 0x2c, 0xee,
+	0xe0, 0x94, 0x53, 0x6a, 0x00, 0x0b, 0xd6, 0xdb, 0x3f, 0xa8, 0x6f, 0x3f, 0xb0, 0xbe, 0x07, 0xce,
+	0xf3, 0x53, 0xf0, 0xb2, 0x3e, 0xd0, 0x7f, 0xc3, 0xfc, 0x37, 0x00, 0x00, 0xff, 0xff, 0x62, 0xd9,
+	0x85, 0x51, 0x5c, 0x02, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/documentation.proto b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/documentation.proto
new file mode 100644
index 0000000..190f8af
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/documentation.proto
@@ -0,0 +1,158 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+option java_multiple_files = true;
+option java_outer_classname = "DocumentationProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// `Documentation` provides the information for describing a service.
+//
+// Example:
+// <pre><code>documentation:
+//   summary: >
+//     The Google Calendar API gives access
+//     to most calendar features.
+//   pages:
+//   - name: Overview
+//     content: &#40;== include google/foo/overview.md ==&#41;
+//   - name: Tutorial
+//     content: &#40;== include google/foo/tutorial.md ==&#41;
+//     subpages;
+//     - name: Java
+//       content: &#40;== include google/foo/tutorial_java.md ==&#41;
+//   rules:
+//   - selector: google.calendar.Calendar.Get
+//     description: >
+//       ...
+//   - selector: google.calendar.Calendar.Put
+//     description: >
+//       ...
+// </code></pre>
+// Documentation is provided in markdown syntax. In addition to
+// standard markdown features, definition lists, tables and fenced
+// code blocks are supported. Section headers can be provided and are
+// interpreted relative to the section nesting of the context where
+// a documentation fragment is embedded.
+//
+// Documentation from the IDL is merged with documentation defined
+// via the config at normalization time, where documentation provided
+// by config rules overrides IDL provided.
+//
+// A number of constructs specific to the API platform are supported
+// in documentation text.
+//
+// In order to reference a proto element, the following
+// notation can be used:
+// <pre><code>&#91;fully.qualified.proto.name]&#91;]</code></pre>
+// To override the display text used for the link, this can be used:
+// <pre><code>&#91;display text]&#91;fully.qualified.proto.name]</code></pre>
+// Text can be excluded from doc using the following notation:
+// <pre><code>&#40;-- internal comment --&#41;</code></pre>
+// Comments can be made conditional using a visibility label. The below
+// text will be only rendered if the `BETA` label is available:
+// <pre><code>&#40;--BETA: comment for BETA users --&#41;</code></pre>
+// A few directives are available in documentation. Note that
+// directives must appear on a single line to be properly
+// identified. The `include` directive includes a markdown file from
+// an external source:
+// <pre><code>&#40;== include path/to/file ==&#41;</code></pre>
+// The `resource_for` directive marks a message to be the resource of
+// a collection in REST view. If it is not specified, tools attempt
+// to infer the resource from the operations in a collection:
+// <pre><code>&#40;== resource_for v1.shelves.books ==&#41;</code></pre>
+// The directive `suppress_warning` does not directly affect documentation
+// and is documented together with service config validation.
+message Documentation {
+  // A short summary of what the service does. Can only be provided by
+  // plain text.
+  string summary = 1;
+
+  // The top level pages for the documentation set.
+  repeated Page pages = 5;
+
+  // A list of documentation rules that apply to individual API elements.
+  //
+  // **NOTE:** All service configuration rules follow "last one wins" order.
+  repeated DocumentationRule rules = 3;
+
+  // The URL to the root of documentation.
+  string documentation_root_url = 4;
+
+  // Declares a single overview page. For example:
+  // <pre><code>documentation:
+  //   summary: ...
+  //   overview: &#40;== include overview.md ==&#41;
+  // </code></pre>
+  // This is a shortcut for the following declaration (using pages style):
+  // <pre><code>documentation:
+  //   summary: ...
+  //   pages:
+  //   - name: Overview
+  //     content: &#40;== include overview.md ==&#41;
+  // </code></pre>
+  // Note: you cannot specify both `overview` field and `pages` field.
+  string overview = 2;
+}
+
+// A documentation rule provides information about individual API elements.
+message DocumentationRule {
+  // The selector is a comma-separated list of patterns. Each pattern is a
+  // qualified name of the element which may end in "*", indicating a wildcard.
+  // Wildcards are only allowed at the end and for a whole component of the
+  // qualified name, i.e. "foo.*" is ok, but not "foo.b*" or "foo.*.bar". To
+  // specify a default for all applicable elements, the whole pattern "*"
+  // is used.
+  string selector = 1;
+
+  // Description of the selected API(s).
+  string description = 2;
+
+  // Deprecation description of the selected element(s). It can be provided if an
+  // element is marked as `deprecated`.
+  string deprecation_description = 3;
+}
+
+// Represents a documentation page. A page can contain subpages to represent
+// nested documentation set structure.
+message Page {
+  // The name of the page. It will be used as an identity of the page to
+  // generate URI of the page, text of the link to this page in navigation,
+  // etc. The full page name (start from the root page name to this page
+  // concatenated with `.`) can be used as reference to the page in your
+  // documentation. For example:
+  // <pre><code>pages:
+  // - name: Tutorial
+  //   content: &#40;== include tutorial.md ==&#41;
+  //   subpages:
+  //   - name: Java
+  //     content: &#40;== include tutorial_java.md ==&#41;
+  // </code></pre>
+  // You can reference `Java` page using Markdown reference link syntax:
+  // `[Java][Tutorial.Java]`.
+  string name = 1;
+
+  // The Markdown content of the page. You can use <code>&#40;== include {path} ==&#41;</code>
+  // to include content from a Markdown file.
+  string content = 2;
+
+  // Subpages of this page. The order of subpages specified here will be
+  // honored in the generated docset.
+  repeated Page subpages = 3;
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/endpoint.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/endpoint.pb.go
new file mode 100644
index 0000000..61b6b7f
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/endpoint.pb.go
@@ -0,0 +1,106 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/googleapis/api/serviceconfig/endpoint.proto
+// DO NOT EDIT!
+
+package google_api
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// `Endpoint` describes a network endpoint that serves a set of APIs.
+// A service may expose any number of endpoints, and all endpoints share the
+// same service configuration, such as quota configuration and monitoring
+// configuration.
+//
+// Example service configuration:
+//
+//     name: library-example.googleapis.com
+//     endpoints:
+//       # Below entry makes 'google.example.library.v1.Library'
+//       # API be served from endpoint address library-example.googleapis.com.
+//       # It also allows HTTP OPTIONS calls to be passed to the backend, for
+//       # it to decide whether the subsequent cross-origin request is
+//       # allowed to proceed.
+//     - name: library-example.googleapis.com
+//       apis: google.example.library.v1.Library
+//       allow_cors: true
+//       # Below entry makes 'google.example.library.v1.Library'
+//       # API be served from endpoint address
+//       # google.example.library-example.v1.LibraryManager.
+//     - name: library-manager.googleapis.com
+//       apis: google.example.library.v1.LibraryManager
+//       # BNS address for a borg job. Can specify a task by appending
+//       # "/taskId" (e.g. "/0") to the job spec.
+//
+// Example OpenAPI extension for endpoint with allow_cors set to true:
+//
+//     {
+//       "swagger": "2.0",
+//       "info": {
+//         "description": "A simple..."
+//       },
+//       "host": "MY_PROJECT_ID.appspot.com",
+//       "x-google-endpoints": [{
+//         "name": "MY_PROJECT_ID.appspot.com",
+//         "allow_cors": "true"
+//       }]
+//     }
+type Endpoint struct {
+	// The canonical name of this endpoint.
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// DEPRECATED: This field is no longer supported. Instead of using aliases,
+	// please specify multiple [google.api.Endpoint][google.api.Endpoint] for each of the intented
+	// alias.
+	//
+	// Additional names that this endpoint will be hosted on.
+	Aliases []string `protobuf:"bytes,2,rep,name=aliases" json:"aliases,omitempty"`
+	// The list of APIs served by this endpoint.
+	Apis []string `protobuf:"bytes,3,rep,name=apis" json:"apis,omitempty"`
+	// The list of features enabled on this endpoint.
+	Features []string `protobuf:"bytes,4,rep,name=features" json:"features,omitempty"`
+	// Allowing
+	// [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka
+	// cross-domain traffic, would allow the backends served from this endpoint to
+	// receive and respond to HTTP OPTIONS requests. The response will be used by
+	// the browser to determine whether the subsequent cross-origin request is
+	// allowed to proceed.
+	AllowCors bool `protobuf:"varint,5,opt,name=allow_cors,json=allowCors" json:"allow_cors,omitempty"`
+}
+
+func (m *Endpoint) Reset()                    { *m = Endpoint{} }
+func (m *Endpoint) String() string            { return proto.CompactTextString(m) }
+func (*Endpoint) ProtoMessage()               {}
+func (*Endpoint) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{0} }
+
+func init() {
+	proto.RegisterType((*Endpoint)(nil), "google.api.Endpoint")
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/googleapis/api/serviceconfig/endpoint.proto", fileDescriptor8)
+}
+
+var fileDescriptor8 = []byte{
+	// 234 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x8f, 0xc1, 0x4a, 0x03, 0x31,
+	0x10, 0x86, 0xd9, 0x76, 0xd5, 0xed, 0x80, 0x1e, 0x72, 0x0a, 0x05, 0xa1, 0x78, 0xea, 0x69, 0x73,
+	0xf0, 0x09, 0xac, 0xa8, 0xf4, 0xb6, 0xf4, 0x05, 0x64, 0x5c, 0xa7, 0x21, 0x90, 0xce, 0x2c, 0x49,
+	0xd4, 0x47, 0xf0, 0x3d, 0x7c, 0x52, 0xb3, 0x69, 0x57, 0xef, 0xbd, 0x84, 0x99, 0x2f, 0x3f, 0xff,
+	0xff, 0x0f, 0x3c, 0x5b, 0x11, 0xeb, 0xa9, 0xb5, 0xe2, 0x91, 0x6d, 0x2b, 0xc1, 0x1a, 0x4b, 0x3c,
+	0x04, 0x49, 0x62, 0x8e, 0x5f, 0x38, 0xb8, 0x68, 0xf2, 0x63, 0x22, 0x85, 0x4f, 0xd7, 0x53, 0x2f,
+	0xbc, 0x77, 0xd6, 0x10, 0xbf, 0x0f, 0xe2, 0x38, 0xb5, 0x45, 0xab, 0xe0, 0xe4, 0x93, 0x85, 0xcb,
+	0xed, 0xb9, 0x9e, 0xc8, 0x2c, 0x09, 0x93, 0x13, 0x8e, 0x47, 0xdb, 0xbb, 0xef, 0x0a, 0x9a, 0xa7,
+	0x53, 0x92, 0x52, 0x50, 0x33, 0x1e, 0x48, 0x57, 0xab, 0x6a, 0xbd, 0xd8, 0x95, 0x59, 0x69, 0xb8,
+	0x42, 0xef, 0x30, 0x52, 0xd4, 0xb3, 0xd5, 0x3c, 0xe3, 0x69, 0x1d, 0xd5, 0x63, 0x8c, 0x9e, 0x17,
+	0x5c, 0x66, 0xb5, 0x84, 0x66, 0x4f, 0x98, 0x3e, 0x42, 0x96, 0xd7, 0x85, 0xff, 0xed, 0xea, 0x16,
+	0x00, 0xbd, 0x97, 0xaf, 0xd7, 0x5e, 0x42, 0xd4, 0x17, 0x39, 0xa3, 0xd9, 0x2d, 0x0a, 0x79, 0xcc,
+	0x60, 0xb3, 0x86, 0x9b, 0x5e, 0x0e, 0xed, 0xff, 0x99, 0x9b, 0xeb, 0xa9, 0x58, 0x37, 0x56, 0xed,
+	0xaa, 0x9f, 0x59, 0xfd, 0xf2, 0xd0, 0x6d, 0xdf, 0x2e, 0x4b, 0xf5, 0xfb, 0xdf, 0x00, 0x00, 0x00,
+	0xff, 0xff, 0x2f, 0xf3, 0xbc, 0x78, 0x5b, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/endpoint.proto b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/endpoint.proto
new file mode 100644
index 0000000..05da2f8
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/endpoint.proto
@@ -0,0 +1,89 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google.golang.org/genproto/googleapis/api/serviceconfig/annotations.proto"; // from google/api/annotations.proto
+
+option java_multiple_files = true;
+option java_outer_classname = "EndpointProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// `Endpoint` describes a network endpoint that serves a set of APIs.
+// A service may expose any number of endpoints, and all endpoints share the
+// same service configuration, such as quota configuration and monitoring
+// configuration.
+//
+// Example service configuration:
+//
+//     name: library-example.googleapis.com
+//     endpoints:
+//       # Below entry makes 'google.example.library.v1.Library'
+//       # API be served from endpoint address library-example.googleapis.com.
+//       # It also allows HTTP OPTIONS calls to be passed to the backend, for
+//       # it to decide whether the subsequent cross-origin request is
+//       # allowed to proceed.
+//     - name: library-example.googleapis.com
+//       apis: google.example.library.v1.Library
+//       allow_cors: true
+//       # Below entry makes 'google.example.library.v1.Library'
+//       # API be served from endpoint address
+//       # google.example.library-example.v1.LibraryManager.
+//     - name: library-manager.googleapis.com
+//       apis: google.example.library.v1.LibraryManager
+//       # BNS address for a borg job. Can specify a task by appending
+//       # "/taskId" (e.g. "/0") to the job spec.
+//
+// Example OpenAPI extension for endpoint with allow_cors set to true:
+//
+//     {
+//       "swagger": "2.0",
+//       "info": {
+//         "description": "A simple..."
+//       },
+//       "host": "MY_PROJECT_ID.appspot.com",
+//       "x-google-endpoints": [{
+//         "name": "MY_PROJECT_ID.appspot.com",
+//         "allow_cors": "true"
+//       }]
+//     }
+message Endpoint {
+  // The canonical name of this endpoint.
+  string name = 1;
+
+  // DEPRECATED: This field is no longer supported. Instead of using aliases,
+  // please specify multiple [google.api.Endpoint][google.api.Endpoint] for each of the intented
+  // alias.
+  //
+  // Additional names that this endpoint will be hosted on.
+  repeated string aliases = 2;
+
+  // The list of APIs served by this endpoint.
+  repeated string apis = 3;
+
+  // The list of features enabled on this endpoint.
+  repeated string features = 4;
+
+  // Allowing
+  // [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka
+  // cross-domain traffic, would allow the backends served from this endpoint to
+  // receive and respond to HTTP OPTIONS requests. The response will be used by
+  // the browser to determine whether the subsequent cross-origin request is
+  // allowed to proceed.
+  bool allow_cors = 5;
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/http.pb.go
new file mode 100644
index 0000000..6592d5f
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/http.pb.go
@@ -0,0 +1,535 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/googleapis/api/serviceconfig/http.proto
+// DO NOT EDIT!
+
+package google_api
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// Defines the HTTP configuration for a service. It contains a list of
+// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method
+// to one or more HTTP REST API methods.
+type Http struct {
+	// A list of HTTP configuration rules that apply to individual API methods.
+	//
+	// **NOTE:** All service configuration rules follow "last one wins" order.
+	Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"`
+}
+
+func (m *Http) Reset()                    { *m = Http{} }
+func (m *Http) String() string            { return proto.CompactTextString(m) }
+func (*Http) ProtoMessage()               {}
+func (*Http) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{0} }
+
+func (m *Http) GetRules() []*HttpRule {
+	if m != nil {
+		return m.Rules
+	}
+	return nil
+}
+
+// `HttpRule` defines the mapping of an RPC method to one or more HTTP
+// REST APIs.  The mapping determines what portions of the request
+// message are populated from the path, query parameters, or body of
+// the HTTP request.  The mapping is typically specified as an
+// `google.api.http` annotation, see "google/api/annotations.proto"
+// for details.
+//
+// The mapping consists of a field specifying the path template and
+// method kind.  The path template can refer to fields in the request
+// message, as in the example below which describes a REST GET
+// operation on a resource collection of messages:
+//
+// ```proto
+// service Messaging {
+//   rpc GetMessage(GetMessageRequest) returns (Message) {
+//     option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}";
+//   }
+// }
+// message GetMessageRequest {
+//   message SubMessage {
+//     string subfield = 1;
+//   }
+//   string message_id = 1; // mapped to the URL
+//   SubMessage sub = 2;    // `sub.subfield` is url-mapped
+// }
+// message Message {
+//   string text = 1; // content of the resource
+// }
+// ```
+//
+// This definition enables an automatic, bidrectional mapping of HTTP
+// JSON to RPC. Example:
+//
+// HTTP | RPC
+// -----|-----
+// `GET /v1/messages/123456/foo`  | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))`
+//
+// In general, not only fields but also field paths can be referenced
+// from a path pattern. Fields mapped to the path pattern cannot be
+// repeated and must have a primitive (non-message) type.
+//
+// Any fields in the request message which are not bound by the path
+// pattern automatically become (optional) HTTP query
+// parameters. Assume the following definition of the request message:
+//
+// ```proto
+// message GetMessageRequest {
+//   message SubMessage {
+//     string subfield = 1;
+//   }
+//   string message_id = 1; // mapped to the URL
+//   int64 revision = 2;    // becomes a parameter
+//   SubMessage sub = 3;    // `sub.subfield` becomes a parameter
+// }
+// ```
+//
+// This enables a HTTP JSON to RPC mapping as below:
+//
+// HTTP | RPC
+// -----|-----
+// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))`
+//
+// Note that fields which are mapped to HTTP parameters must have a
+// primitive type or a repeated primitive type. Message types are not
+// allowed. In the case of a repeated type, the parameter can be
+// repeated in the URL, as in `...?param=A&param=B`.
+//
+// For HTTP method kinds which allow a request body, the `body` field
+// specifies the mapping. Consider a REST update method on the
+// message resource collection:
+//
+// ```proto
+// service Messaging {
+//   rpc UpdateMessage(UpdateMessageRequest) returns (Message) {
+//     option (google.api.http) = {
+//       put: "/v1/messages/{message_id}"
+//       body: "message"
+//     };
+//   }
+// }
+// message UpdateMessageRequest {
+//   string message_id = 1; // mapped to the URL
+//   Message message = 2;   // mapped to the body
+// }
+// ```
+//
+// The following HTTP JSON to RPC mapping is enabled, where the
+// representation of the JSON in the request body is determined by
+// protos JSON encoding:
+//
+// HTTP | RPC
+// -----|-----
+// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })`
+//
+// The special name `*` can be used in the body mapping to define that
+// every field not bound by the path template should be mapped to the
+// request body.  This enables the following alternative definition of
+// the update method:
+//
+// ```proto
+// service Messaging {
+//   rpc UpdateMessage(Message) returns (Message) {
+//     option (google.api.http) = {
+//       put: "/v1/messages/{message_id}"
+//       body: "*"
+//     };
+//   }
+// }
+// message Message {
+//   string message_id = 1;
+//   string text = 2;
+// }
+// ```
+//
+// The following HTTP JSON to RPC mapping is enabled:
+//
+// HTTP | RPC
+// -----|-----
+// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")`
+//
+// Note that when using `*` in the body mapping, it is not possible to
+// have HTTP parameters, as all fields not bound by the path end in
+// the body. This makes this option more rarely used in practice of
+// defining REST APIs. The common usage of `*` is in custom methods
+// which don't use the URL at all for transferring data.
+//
+// It is possible to define multiple HTTP methods for one RPC by using
+// the `additional_bindings` option. Example:
+//
+// ```proto
+// service Messaging {
+//   rpc GetMessage(GetMessageRequest) returns (Message) {
+//     option (google.api.http) = {
+//       get: "/v1/messages/{message_id}"
+//       additional_bindings {
+//         get: "/v1/users/{user_id}/messages/{message_id}"
+//       }
+//     };
+//   }
+// }
+// message GetMessageRequest {
+//   string message_id = 1;
+//   string user_id = 2;
+// }
+// ```
+//
+// This enables the following two alternative HTTP JSON to RPC
+// mappings:
+//
+// HTTP | RPC
+// -----|-----
+// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")`
+// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")`
+//
+// # Rules for HTTP mapping
+//
+// The rules for mapping HTTP path, query parameters, and body fields
+// to the request message are as follows:
+//
+// 1. The `body` field specifies either `*` or a field path, or is
+//    omitted. If omitted, it assumes there is no HTTP body.
+// 2. Leaf fields (recursive expansion of nested messages in the
+//    request) can be classified into three types:
+//     (a) Matched in the URL template.
+//     (b) Covered by body (if body is `*`, everything except (a) fields;
+//         else everything under the body field)
+//     (c) All other fields.
+// 3. URL query parameters found in the HTTP request are mapped to (c) fields.
+// 4. Any body sent with an HTTP request can contain only (b) fields.
+//
+// The syntax of the path template is as follows:
+//
+//     Template = "/" Segments [ Verb ] ;
+//     Segments = Segment { "/" Segment } ;
+//     Segment  = "*" | "**" | LITERAL | Variable ;
+//     Variable = "{" FieldPath [ "=" Segments ] "}" ;
+//     FieldPath = IDENT { "." IDENT } ;
+//     Verb     = ":" LITERAL ;
+//
+// The syntax `*` matches a single path segment. It follows the semantics of
+// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String
+// Expansion.
+//
+// The syntax `**` matches zero or more path segments. It follows the semantics
+// of [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.3 Reserved
+// Expansion.
+//
+// The syntax `LITERAL` matches literal text in the URL path.
+//
+// The syntax `Variable` matches the entire path as specified by its template;
+// this nested template must not contain further variables. If a variable
+// matches a single path segment, its template may be omitted, e.g. `{var}`
+// is equivalent to `{var=*}`.
+//
+// NOTE: the field paths in variables and in the `body` must not refer to
+// repeated fields or map fields.
+//
+// Use CustomHttpPattern to specify any HTTP method that is not included in the
+// `pattern` field, such as HEAD, or "*" to leave the HTTP method unspecified for
+// a given URL path rule. The wild-card rule is useful for services that provide
+// content to Web (HTML) clients.
+type HttpRule struct {
+	// Selects methods to which this rule applies.
+	//
+	// Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
+	Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"`
+	// Determines the URL pattern is matched by this rules. This pattern can be
+	// used with any of the {get|put|post|delete|patch} methods. A custom method
+	// can be defined using the 'custom' field.
+	//
+	// Types that are valid to be assigned to Pattern:
+	//	*HttpRule_Get
+	//	*HttpRule_Put
+	//	*HttpRule_Post
+	//	*HttpRule_Delete
+	//	*HttpRule_Patch
+	//	*HttpRule_Custom
+	Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"`
+	// The name of the request field whose value is mapped to the HTTP body, or
+	// `*` for mapping all fields not captured by the path pattern to the HTTP
+	// body. NOTE: the referred field must not be a repeated field and must be
+	// present at the top-level of response message type.
+	Body string `protobuf:"bytes,7,opt,name=body" json:"body,omitempty"`
+	// Additional HTTP bindings for the selector. Nested bindings must
+	// not contain an `additional_bindings` field themselves (that is,
+	// the nesting may only be one level deep).
+	AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings" json:"additional_bindings,omitempty"`
+}
+
+func (m *HttpRule) Reset()                    { *m = HttpRule{} }
+func (m *HttpRule) String() string            { return proto.CompactTextString(m) }
+func (*HttpRule) ProtoMessage()               {}
+func (*HttpRule) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{1} }
+
+type isHttpRule_Pattern interface {
+	isHttpRule_Pattern()
+}
+
+type HttpRule_Get struct {
+	Get string `protobuf:"bytes,2,opt,name=get,oneof"`
+}
+type HttpRule_Put struct {
+	Put string `protobuf:"bytes,3,opt,name=put,oneof"`
+}
+type HttpRule_Post struct {
+	Post string `protobuf:"bytes,4,opt,name=post,oneof"`
+}
+type HttpRule_Delete struct {
+	Delete string `protobuf:"bytes,5,opt,name=delete,oneof"`
+}
+type HttpRule_Patch struct {
+	Patch string `protobuf:"bytes,6,opt,name=patch,oneof"`
+}
+type HttpRule_Custom struct {
+	Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,oneof"`
+}
+
+func (*HttpRule_Get) isHttpRule_Pattern()    {}
+func (*HttpRule_Put) isHttpRule_Pattern()    {}
+func (*HttpRule_Post) isHttpRule_Pattern()   {}
+func (*HttpRule_Delete) isHttpRule_Pattern() {}
+func (*HttpRule_Patch) isHttpRule_Pattern()  {}
+func (*HttpRule_Custom) isHttpRule_Pattern() {}
+
+func (m *HttpRule) GetPattern() isHttpRule_Pattern {
+	if m != nil {
+		return m.Pattern
+	}
+	return nil
+}
+
+func (m *HttpRule) GetGet() string {
+	if x, ok := m.GetPattern().(*HttpRule_Get); ok {
+		return x.Get
+	}
+	return ""
+}
+
+func (m *HttpRule) GetPut() string {
+	if x, ok := m.GetPattern().(*HttpRule_Put); ok {
+		return x.Put
+	}
+	return ""
+}
+
+func (m *HttpRule) GetPost() string {
+	if x, ok := m.GetPattern().(*HttpRule_Post); ok {
+		return x.Post
+	}
+	return ""
+}
+
+func (m *HttpRule) GetDelete() string {
+	if x, ok := m.GetPattern().(*HttpRule_Delete); ok {
+		return x.Delete
+	}
+	return ""
+}
+
+func (m *HttpRule) GetPatch() string {
+	if x, ok := m.GetPattern().(*HttpRule_Patch); ok {
+		return x.Patch
+	}
+	return ""
+}
+
+func (m *HttpRule) GetCustom() *CustomHttpPattern {
+	if x, ok := m.GetPattern().(*HttpRule_Custom); ok {
+		return x.Custom
+	}
+	return nil
+}
+
+func (m *HttpRule) GetAdditionalBindings() []*HttpRule {
+	if m != nil {
+		return m.AdditionalBindings
+	}
+	return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*HttpRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+	return _HttpRule_OneofMarshaler, _HttpRule_OneofUnmarshaler, _HttpRule_OneofSizer, []interface{}{
+		(*HttpRule_Get)(nil),
+		(*HttpRule_Put)(nil),
+		(*HttpRule_Post)(nil),
+		(*HttpRule_Delete)(nil),
+		(*HttpRule_Patch)(nil),
+		(*HttpRule_Custom)(nil),
+	}
+}
+
+func _HttpRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+	m := msg.(*HttpRule)
+	// pattern
+	switch x := m.Pattern.(type) {
+	case *HttpRule_Get:
+		b.EncodeVarint(2<<3 | proto.WireBytes)
+		b.EncodeStringBytes(x.Get)
+	case *HttpRule_Put:
+		b.EncodeVarint(3<<3 | proto.WireBytes)
+		b.EncodeStringBytes(x.Put)
+	case *HttpRule_Post:
+		b.EncodeVarint(4<<3 | proto.WireBytes)
+		b.EncodeStringBytes(x.Post)
+	case *HttpRule_Delete:
+		b.EncodeVarint(5<<3 | proto.WireBytes)
+		b.EncodeStringBytes(x.Delete)
+	case *HttpRule_Patch:
+		b.EncodeVarint(6<<3 | proto.WireBytes)
+		b.EncodeStringBytes(x.Patch)
+	case *HttpRule_Custom:
+		b.EncodeVarint(8<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.Custom); err != nil {
+			return err
+		}
+	case nil:
+	default:
+		return fmt.Errorf("HttpRule.Pattern has unexpected type %T", x)
+	}
+	return nil
+}
+
+func _HttpRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+	m := msg.(*HttpRule)
+	switch tag {
+	case 2: // pattern.get
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		x, err := b.DecodeStringBytes()
+		m.Pattern = &HttpRule_Get{x}
+		return true, err
+	case 3: // pattern.put
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		x, err := b.DecodeStringBytes()
+		m.Pattern = &HttpRule_Put{x}
+		return true, err
+	case 4: // pattern.post
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		x, err := b.DecodeStringBytes()
+		m.Pattern = &HttpRule_Post{x}
+		return true, err
+	case 5: // pattern.delete
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		x, err := b.DecodeStringBytes()
+		m.Pattern = &HttpRule_Delete{x}
+		return true, err
+	case 6: // pattern.patch
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		x, err := b.DecodeStringBytes()
+		m.Pattern = &HttpRule_Patch{x}
+		return true, err
+	case 8: // pattern.custom
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(CustomHttpPattern)
+		err := b.DecodeMessage(msg)
+		m.Pattern = &HttpRule_Custom{msg}
+		return true, err
+	default:
+		return false, nil
+	}
+}
+
+func _HttpRule_OneofSizer(msg proto.Message) (n int) {
+	m := msg.(*HttpRule)
+	// pattern
+	switch x := m.Pattern.(type) {
+	case *HttpRule_Get:
+		n += proto.SizeVarint(2<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(len(x.Get)))
+		n += len(x.Get)
+	case *HttpRule_Put:
+		n += proto.SizeVarint(3<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(len(x.Put)))
+		n += len(x.Put)
+	case *HttpRule_Post:
+		n += proto.SizeVarint(4<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(len(x.Post)))
+		n += len(x.Post)
+	case *HttpRule_Delete:
+		n += proto.SizeVarint(5<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(len(x.Delete)))
+		n += len(x.Delete)
+	case *HttpRule_Patch:
+		n += proto.SizeVarint(6<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(len(x.Patch)))
+		n += len(x.Patch)
+	case *HttpRule_Custom:
+		s := proto.Size(x.Custom)
+		n += proto.SizeVarint(8<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case nil:
+	default:
+		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+	}
+	return n
+}
+
+// A custom pattern is used for defining custom HTTP verb.
+type CustomHttpPattern struct {
+	// The name of this custom HTTP verb.
+	Kind string `protobuf:"bytes,1,opt,name=kind" json:"kind,omitempty"`
+	// The path matched by this custom verb.
+	Path string `protobuf:"bytes,2,opt,name=path" json:"path,omitempty"`
+}
+
+func (m *CustomHttpPattern) Reset()                    { *m = CustomHttpPattern{} }
+func (m *CustomHttpPattern) String() string            { return proto.CompactTextString(m) }
+func (*CustomHttpPattern) ProtoMessage()               {}
+func (*CustomHttpPattern) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{2} }
+
+func init() {
+	proto.RegisterType((*Http)(nil), "google.api.Http")
+	proto.RegisterType((*HttpRule)(nil), "google.api.HttpRule")
+	proto.RegisterType((*CustomHttpPattern)(nil), "google.api.CustomHttpPattern")
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/googleapis/api/serviceconfig/http.proto", fileDescriptor9)
+}
+
+var fileDescriptor9 = []byte{
+	// 348 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x51, 0xcd, 0x4a, 0x2b, 0x31,
+	0x14, 0xbe, 0xd3, 0x4e, 0xa7, 0xed, 0x29, 0x5c, 0xb8, 0xb9, 0x45, 0x82, 0x20, 0x48, 0x37, 0x16,
+	0x17, 0x33, 0x50, 0x17, 0x2e, 0x5c, 0x39, 0x22, 0xd6, 0x5d, 0x99, 0x17, 0x90, 0xe9, 0x4c, 0x4c,
+	0x83, 0xd3, 0x24, 0x4c, 0x4e, 0x05, 0x5f, 0xc7, 0x77, 0xf0, 0xdd, 0x5c, 0x9a, 0x64, 0x52, 0x5b,
+	0x10, 0xdc, 0x84, 0xf3, 0xfd, 0x9c, 0x9f, 0x9c, 0x03, 0x39, 0x57, 0x8a, 0x37, 0x2c, 0xe5, 0xaa,
+	0x29, 0x25, 0x4f, 0x55, 0xcb, 0x33, 0xce, 0xa4, 0x6e, 0x15, 0xaa, 0xac, 0x93, 0x4a, 0x2d, 0x4c,
+	0x66, 0x9f, 0xcc, 0xb0, 0xf6, 0x55, 0x54, 0xac, 0x52, 0xf2, 0x59, 0xf0, 0x6c, 0x83, 0xa8, 0x53,
+	0xef, 0x23, 0x10, 0x6a, 0x58, 0xd3, 0x6c, 0x01, 0xf1, 0xd2, 0x2a, 0xe4, 0x12, 0x06, 0xed, 0xae,
+	0x61, 0x86, 0x46, 0xe7, 0xfd, 0xf9, 0x64, 0x31, 0x4d, 0x0f, 0x9e, 0xd4, 0x19, 0x0a, 0x2b, 0x16,
+	0x9d, 0x65, 0xf6, 0xd1, 0x83, 0xd1, 0x9e, 0x23, 0xa7, 0x30, 0x32, 0xac, 0x61, 0x15, 0xaa, 0xd6,
+	0xe6, 0x46, 0xf3, 0x71, 0xf1, 0x8d, 0x09, 0x81, 0x3e, 0x67, 0x48, 0x7b, 0x8e, 0x5e, 0xfe, 0x29,
+	0x1c, 0x70, 0x9c, 0xde, 0x21, 0xed, 0xef, 0x39, 0x0b, 0xc8, 0x14, 0x62, 0xad, 0x0c, 0xd2, 0x38,
+	0x90, 0x1e, 0x11, 0x0a, 0x49, 0x6d, 0x2b, 0x21, 0xa3, 0x83, 0xc0, 0x07, 0x4c, 0x4e, 0x60, 0xa0,
+	0x4b, 0xac, 0x36, 0x34, 0x09, 0x42, 0x07, 0xc9, 0x35, 0x24, 0xd5, 0xce, 0xa0, 0xda, 0xd2, 0x91,
+	0x15, 0x26, 0x8b, 0xb3, 0xe3, 0x5f, 0xdc, 0x79, 0xc5, 0xcd, 0xbd, 0x2a, 0x11, 0x59, 0x2b, 0x5d,
+	0xc1, 0xce, 0x6e, 0x87, 0x8a, 0xd7, 0xaa, 0x7e, 0xa3, 0x43, 0xff, 0x01, 0x1f, 0x93, 0x7b, 0xf8,
+	0x5f, 0xd6, 0xb5, 0x40, 0xa1, 0x64, 0xd9, 0x3c, 0xad, 0x85, 0xac, 0x85, 0xe4, 0x86, 0x4e, 0x7e,
+	0xd9, 0x0f, 0x39, 0x24, 0xe4, 0xc1, 0x9f, 0x8f, 0x61, 0xa8, 0xbb, 0x7e, 0xb3, 0x1b, 0xf8, 0xf7,
+	0x63, 0x08, 0xd7, 0xfa, 0xc5, 0x7a, 0xc3, 0xee, 0x7c, 0xec, 0x38, 0x9b, 0xb3, 0xe9, 0x16, 0x57,
+	0xf8, 0x38, 0xbf, 0x80, 0xbf, 0x95, 0xda, 0x1e, 0xb5, 0xcd, 0xc7, 0xbe, 0x8c, 0xbb, 0xe8, 0x2a,
+	0xfa, 0x8c, 0xa2, 0xf7, 0x5e, 0xfc, 0x70, 0xbb, 0x7a, 0x5c, 0x27, 0xfe, 0xc8, 0x57, 0x5f, 0x01,
+	0x00, 0x00, 0xff, 0xff, 0x32, 0x48, 0x5c, 0x87, 0x2a, 0x02, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/http.proto b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/http.proto
new file mode 100644
index 0000000..ad9406d
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/http.proto
@@ -0,0 +1,285 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+option cc_enable_arenas = true;
+option java_multiple_files = true;
+option java_outer_classname = "HttpProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// Defines the HTTP configuration for a service. It contains a list of
+// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method
+// to one or more HTTP REST API methods.
+message Http {
+  // A list of HTTP configuration rules that apply to individual API methods.
+  //
+  // **NOTE:** All service configuration rules follow "last one wins" order.
+  repeated HttpRule rules = 1;
+}
+
+// `HttpRule` defines the mapping of an RPC method to one or more HTTP
+// REST APIs.  The mapping determines what portions of the request
+// message are populated from the path, query parameters, or body of
+// the HTTP request.  The mapping is typically specified as an
+// `google.api.http` annotation, see "google/api/annotations.proto"
+// for details.
+//
+// The mapping consists of a field specifying the path template and
+// method kind.  The path template can refer to fields in the request
+// message, as in the example below which describes a REST GET
+// operation on a resource collection of messages:
+//
+// ```proto
+// service Messaging {
+//   rpc GetMessage(GetMessageRequest) returns (Message) {
+//     option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}";
+//   }
+// }
+// message GetMessageRequest {
+//   message SubMessage {
+//     string subfield = 1;
+//   }
+//   string message_id = 1; // mapped to the URL
+//   SubMessage sub = 2;    // `sub.subfield` is url-mapped
+// }
+// message Message {
+//   string text = 1; // content of the resource
+// }
+// ```
+//
+// This definition enables an automatic, bidrectional mapping of HTTP
+// JSON to RPC. Example:
+//
+// HTTP | RPC
+// -----|-----
+// `GET /v1/messages/123456/foo`  | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))`
+//
+// In general, not only fields but also field paths can be referenced
+// from a path pattern. Fields mapped to the path pattern cannot be
+// repeated and must have a primitive (non-message) type.
+//
+// Any fields in the request message which are not bound by the path
+// pattern automatically become (optional) HTTP query
+// parameters. Assume the following definition of the request message:
+//
+// ```proto
+// message GetMessageRequest {
+//   message SubMessage {
+//     string subfield = 1;
+//   }
+//   string message_id = 1; // mapped to the URL
+//   int64 revision = 2;    // becomes a parameter
+//   SubMessage sub = 3;    // `sub.subfield` becomes a parameter
+// }
+// ```
+//
+// This enables a HTTP JSON to RPC mapping as below:
+//
+// HTTP | RPC
+// -----|-----
+// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))`
+//
+// Note that fields which are mapped to HTTP parameters must have a
+// primitive type or a repeated primitive type. Message types are not
+// allowed. In the case of a repeated type, the parameter can be
+// repeated in the URL, as in `...?param=A&param=B`.
+//
+// For HTTP method kinds which allow a request body, the `body` field
+// specifies the mapping. Consider a REST update method on the
+// message resource collection:
+//
+// ```proto
+// service Messaging {
+//   rpc UpdateMessage(UpdateMessageRequest) returns (Message) {
+//     option (google.api.http) = {
+//       put: "/v1/messages/{message_id}"
+//       body: "message"
+//     };
+//   }
+// }
+// message UpdateMessageRequest {
+//   string message_id = 1; // mapped to the URL
+//   Message message = 2;   // mapped to the body
+// }
+// ```
+//
+// The following HTTP JSON to RPC mapping is enabled, where the
+// representation of the JSON in the request body is determined by
+// protos JSON encoding:
+//
+// HTTP | RPC
+// -----|-----
+// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })`
+//
+// The special name `*` can be used in the body mapping to define that
+// every field not bound by the path template should be mapped to the
+// request body.  This enables the following alternative definition of
+// the update method:
+//
+// ```proto
+// service Messaging {
+//   rpc UpdateMessage(Message) returns (Message) {
+//     option (google.api.http) = {
+//       put: "/v1/messages/{message_id}"
+//       body: "*"
+//     };
+//   }
+// }
+// message Message {
+//   string message_id = 1;
+//   string text = 2;
+// }
+// ```
+//
+// The following HTTP JSON to RPC mapping is enabled:
+//
+// HTTP | RPC
+// -----|-----
+// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")`
+//
+// Note that when using `*` in the body mapping, it is not possible to
+// have HTTP parameters, as all fields not bound by the path end in
+// the body. This makes this option more rarely used in practice of
+// defining REST APIs. The common usage of `*` is in custom methods
+// which don't use the URL at all for transferring data.
+//
+// It is possible to define multiple HTTP methods for one RPC by using
+// the `additional_bindings` option. Example:
+//
+// ```proto
+// service Messaging {
+//   rpc GetMessage(GetMessageRequest) returns (Message) {
+//     option (google.api.http) = {
+//       get: "/v1/messages/{message_id}"
+//       additional_bindings {
+//         get: "/v1/users/{user_id}/messages/{message_id}"
+//       }
+//     };
+//   }
+// }
+// message GetMessageRequest {
+//   string message_id = 1;
+//   string user_id = 2;
+// }
+// ```
+//
+// This enables the following two alternative HTTP JSON to RPC
+// mappings:
+//
+// HTTP | RPC
+// -----|-----
+// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")`
+// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")`
+//
+// # Rules for HTTP mapping
+//
+// The rules for mapping HTTP path, query parameters, and body fields
+// to the request message are as follows:
+//
+// 1. The `body` field specifies either `*` or a field path, or is
+//    omitted. If omitted, it assumes there is no HTTP body.
+// 2. Leaf fields (recursive expansion of nested messages in the
+//    request) can be classified into three types:
+//     (a) Matched in the URL template.
+//     (b) Covered by body (if body is `*`, everything except (a) fields;
+//         else everything under the body field)
+//     (c) All other fields.
+// 3. URL query parameters found in the HTTP request are mapped to (c) fields.
+// 4. Any body sent with an HTTP request can contain only (b) fields.
+//
+// The syntax of the path template is as follows:
+//
+//     Template = "/" Segments [ Verb ] ;
+//     Segments = Segment { "/" Segment } ;
+//     Segment  = "*" | "**" | LITERAL | Variable ;
+//     Variable = "{" FieldPath [ "=" Segments ] "}" ;
+//     FieldPath = IDENT { "." IDENT } ;
+//     Verb     = ":" LITERAL ;
+//
+// The syntax `*` matches a single path segment. It follows the semantics of
+// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String
+// Expansion.
+//
+// The syntax `**` matches zero or more path segments. It follows the semantics
+// of [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.3 Reserved
+// Expansion.
+//
+// The syntax `LITERAL` matches literal text in the URL path.
+//
+// The syntax `Variable` matches the entire path as specified by its template;
+// this nested template must not contain further variables. If a variable
+// matches a single path segment, its template may be omitted, e.g. `{var}`
+// is equivalent to `{var=*}`.
+//
+// NOTE: the field paths in variables and in the `body` must not refer to
+// repeated fields or map fields.
+//
+// Use CustomHttpPattern to specify any HTTP method that is not included in the
+// `pattern` field, such as HEAD, or "*" to leave the HTTP method unspecified for
+// a given URL path rule. The wild-card rule is useful for services that provide
+// content to Web (HTML) clients.
+message HttpRule {
+  // Selects methods to which this rule applies.
+  //
+  // Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
+  string selector = 1;
+
+  // Determines the URL pattern is matched by this rules. This pattern can be
+  // used with any of the {get|put|post|delete|patch} methods. A custom method
+  // can be defined using the 'custom' field.
+  oneof pattern {
+    // Used for listing and getting information about resources.
+    string get = 2;
+
+    // Used for updating a resource.
+    string put = 3;
+
+    // Used for creating a resource.
+    string post = 4;
+
+    // Used for deleting a resource.
+    string delete = 5;
+
+    // Used for updating a resource.
+    string patch = 6;
+
+    // Custom pattern is used for defining custom verbs.
+    CustomHttpPattern custom = 8;
+  }
+
+  // The name of the request field whose value is mapped to the HTTP body, or
+  // `*` for mapping all fields not captured by the path pattern to the HTTP
+  // body. NOTE: the referred field must not be a repeated field and must be
+  // present at the top-level of response message type.
+  string body = 7;
+
+  // Additional HTTP bindings for the selector. Nested bindings must
+  // not contain an `additional_bindings` field themselves (that is,
+  // the nesting may only be one level deep).
+  repeated HttpRule additional_bindings = 11;
+}
+
+// A custom pattern is used for defining custom HTTP verb.
+message CustomHttpPattern {
+  // The name of this custom HTTP verb.
+  string kind = 1;
+
+  // The path matched by this custom verb.
+  string path = 2;
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/log.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/log.pb.go
new file mode 100644
index 0000000..f55a369
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/log.pb.go
@@ -0,0 +1,80 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/googleapis/api/serviceconfig/log.proto
+// DO NOT EDIT!
+
+package google_api
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import google_api1 "google.golang.org/genproto/googleapis/api/label"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// A description of a log type. Example in YAML format:
+//
+//     - name: library.googleapis.com/activity_history
+//       description: The history of borrowing and returning library items.
+//       display_name: Activity
+//       labels:
+//       - key: /customer_id
+//         description: Identifier of a library customer
+type LogDescriptor struct {
+	// The name of the log. It must be less than 512 characters long and can
+	// include the following characters: upper- and lower-case alphanumeric
+	// characters [A-Za-z0-9], and punctuation characters including
+	// slash, underscore, hyphen, period [/_-.].
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// The set of labels that are available to describe a specific log entry.
+	// Runtime requests that contain labels not specified here are
+	// considered invalid.
+	Labels []*google_api1.LabelDescriptor `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty"`
+	// A human-readable description of this log. This information appears in
+	// the documentation and can contain details.
+	Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
+	// The human-readable name for this log. This information appears on
+	// the user interface and should be concise.
+	DisplayName string `protobuf:"bytes,4,opt,name=display_name,json=displayName" json:"display_name,omitempty"`
+}
+
+func (m *LogDescriptor) Reset()                    { *m = LogDescriptor{} }
+func (m *LogDescriptor) String() string            { return proto.CompactTextString(m) }
+func (*LogDescriptor) ProtoMessage()               {}
+func (*LogDescriptor) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{0} }
+
+func (m *LogDescriptor) GetLabels() []*google_api1.LabelDescriptor {
+	if m != nil {
+		return m.Labels
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*LogDescriptor)(nil), "google.api.LogDescriptor")
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/googleapis/api/serviceconfig/log.proto", fileDescriptor10)
+}
+
+var fileDescriptor10 = []byte{
+	// 233 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x72, 0x4c, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x4b, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0xcb, 0x2f, 0x4a, 0xd7, 0x4f, 0x4f,
+	0xcd, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x87, 0x48, 0x25, 0x16, 0x64, 0x16, 0xeb, 0x03, 0x09,
+	0xfd, 0xe2, 0xd4, 0xa2, 0xb2, 0xcc, 0xe4, 0xd4, 0xe4, 0xfc, 0xbc, 0xb4, 0xcc, 0x74, 0xfd, 0x9c,
+	0xfc, 0x74, 0x3d, 0xb0, 0x32, 0x21, 0x2e, 0xa8, 0x11, 0x40, 0x35, 0x52, 0xd6, 0xc4, 0x1b, 0x97,
+	0x93, 0x98, 0x94, 0x9a, 0x03, 0x21, 0x21, 0x06, 0x29, 0xcd, 0x65, 0xe4, 0xe2, 0xf5, 0xc9, 0x4f,
+	0x77, 0x49, 0x2d, 0x4e, 0x2e, 0xca, 0x2c, 0x28, 0xc9, 0x2f, 0x12, 0x12, 0xe2, 0x62, 0xc9, 0x4b,
+	0xcc, 0x4d, 0x95, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, 0x02, 0xb3, 0x85, 0x8c, 0xb9, 0xd8, 0xc0,
+	0x9a, 0x8a, 0x25, 0x98, 0x14, 0x98, 0x35, 0xb8, 0x8d, 0xa4, 0xf5, 0x10, 0xf6, 0xeb, 0xf9, 0x80,
+	0x64, 0x10, 0x06, 0x04, 0x41, 0x95, 0x0a, 0x29, 0x70, 0x71, 0xa7, 0x40, 0x45, 0x33, 0xf3, 0xf3,
+	0x24, 0x98, 0xc1, 0xe6, 0x21, 0x0b, 0x09, 0x29, 0x72, 0xf1, 0xa4, 0x64, 0x16, 0x17, 0xe4, 0x24,
+	0x56, 0xc6, 0x83, 0xad, 0x64, 0x81, 0x2a, 0x81, 0x88, 0xf9, 0x01, 0x85, 0x9c, 0x94, 0xb9, 0xf8,
+	0x92, 0xf3, 0x73, 0x91, 0xac, 0x73, 0xe2, 0x00, 0x3a, 0x37, 0x00, 0xe4, 0xf6, 0x00, 0xc6, 0x45,
+	0x4c, 0x2c, 0xee, 0x8e, 0x01, 0x9e, 0x49, 0x6c, 0x60, 0xbf, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff,
+	0xff, 0x32, 0x96, 0x08, 0x72, 0x59, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/log.proto b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/log.proto
new file mode 100644
index 0000000..e258b8a
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/log.proto
@@ -0,0 +1,54 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google.golang.org/genproto/googleapis/api/label/label.proto"; // from google/api/label.proto
+
+option java_multiple_files = true;
+option java_outer_classname = "LogProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// A description of a log type. Example in YAML format:
+//
+//     - name: library.googleapis.com/activity_history
+//       description: The history of borrowing and returning library items.
+//       display_name: Activity
+//       labels:
+//       - key: /customer_id
+//         description: Identifier of a library customer
+message LogDescriptor {
+  // The name of the log. It must be less than 512 characters long and can
+  // include the following characters: upper- and lower-case alphanumeric
+  // characters [A-Za-z0-9], and punctuation characters including
+  // slash, underscore, hyphen, period [/_-.].
+  string name = 1;
+
+  // The set of labels that are available to describe a specific log entry.
+  // Runtime requests that contain labels not specified here are
+  // considered invalid.
+  repeated LabelDescriptor labels = 2;
+
+  // A human-readable description of this log. This information appears in
+  // the documentation and can contain details.
+  string description = 3;
+
+  // The human-readable name for this log. This information appears on
+  // the user interface and should be concise.
+  string display_name = 4;
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/logging.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/logging.pb.go
new file mode 100644
index 0000000..935eea4
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/logging.pb.go
@@ -0,0 +1,123 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/googleapis/api/serviceconfig/logging.proto
+// DO NOT EDIT!
+
+package google_api
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// Logging configuration of the service.
+//
+// The following example shows how to configure logs to be sent to the
+// producer and consumer projects. In the example,
+// the `library.googleapis.com/activity_history` log is
+// sent to both the producer and consumer projects, whereas
+// the `library.googleapis.com/purchase_history` log is only sent to the
+// producer project:
+//
+//     monitored_resources:
+//     - type: library.googleapis.com/branch
+//       labels:
+//       - key: /city
+//         description: The city where the library branch is located in.
+//       - key: /name
+//         description: The name of the branch.
+//     logs:
+//     - name: library.googleapis.com/activity_history
+//       labels:
+//       - key: /customer_id
+//     - name: library.googleapis.com/purchase_history
+//     logging:
+//       producer_destinations:
+//       - monitored_resource: library.googleapis.com/branch
+//         logs:
+//         - library.googleapis.com/activity_history
+//         - library.googleapis.com/purchase_history
+//       consumer_destinations:
+//       - monitored_resource: library.googleapis.com/branch
+//         logs:
+//         - library.googleapis.com/activity_history
+type Logging struct {
+	// Logging configurations for sending logs to the producer project.
+	// There can be multiple producer destinations, each one must have a
+	// different monitored resource type. A log can be used in at most
+	// one producer destination.
+	ProducerDestinations []*Logging_LoggingDestination `protobuf:"bytes,1,rep,name=producer_destinations,json=producerDestinations" json:"producer_destinations,omitempty"`
+	// Logging configurations for sending logs to the consumer project.
+	// There can be multiple consumer destinations, each one must have a
+	// different monitored resource type. A log can be used in at most
+	// one consumer destination.
+	ConsumerDestinations []*Logging_LoggingDestination `protobuf:"bytes,2,rep,name=consumer_destinations,json=consumerDestinations" json:"consumer_destinations,omitempty"`
+}
+
+func (m *Logging) Reset()                    { *m = Logging{} }
+func (m *Logging) String() string            { return proto.CompactTextString(m) }
+func (*Logging) ProtoMessage()               {}
+func (*Logging) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{0} }
+
+func (m *Logging) GetProducerDestinations() []*Logging_LoggingDestination {
+	if m != nil {
+		return m.ProducerDestinations
+	}
+	return nil
+}
+
+func (m *Logging) GetConsumerDestinations() []*Logging_LoggingDestination {
+	if m != nil {
+		return m.ConsumerDestinations
+	}
+	return nil
+}
+
+// Configuration of a specific logging destination (the producer project
+// or the consumer project).
+type Logging_LoggingDestination struct {
+	// The monitored resource type. The type must be defined in
+	// [Service.monitored_resources][google.api.Service.monitored_resources] section.
+	MonitoredResource string `protobuf:"bytes,3,opt,name=monitored_resource,json=monitoredResource" json:"monitored_resource,omitempty"`
+	// Names of the logs to be sent to this destination. Each name must
+	// be defined in the [Service.logs][google.api.Service.logs] section.
+	Logs []string `protobuf:"bytes,1,rep,name=logs" json:"logs,omitempty"`
+}
+
+func (m *Logging_LoggingDestination) Reset()                    { *m = Logging_LoggingDestination{} }
+func (m *Logging_LoggingDestination) String() string            { return proto.CompactTextString(m) }
+func (*Logging_LoggingDestination) ProtoMessage()               {}
+func (*Logging_LoggingDestination) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{0, 0} }
+
+func init() {
+	proto.RegisterType((*Logging)(nil), "google.api.Logging")
+	proto.RegisterType((*Logging_LoggingDestination)(nil), "google.api.Logging.LoggingDestination")
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/googleapis/api/serviceconfig/logging.proto", fileDescriptor11)
+}
+
+var fileDescriptor11 = []byte{
+	// 264 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x90, 0xc1, 0x4a, 0xc4, 0x30,
+	0x10, 0x86, 0x69, 0x77, 0x51, 0x36, 0x8a, 0x60, 0x50, 0x58, 0xf6, 0xb4, 0x78, 0xd0, 0xbd, 0xd8,
+	0x80, 0x3e, 0x81, 0x8b, 0x22, 0x0b, 0x1e, 0x4a, 0x2f, 0x1e, 0x3c, 0x2c, 0x35, 0x1d, 0x87, 0x40,
+	0x9b, 0x59, 0x92, 0xd4, 0xa7, 0xf1, 0xe4, 0x93, 0x9a, 0x6d, 0x52, 0x5b, 0xf4, 0xa4, 0x97, 0x24,
+	0xcc, 0xfc, 0xf3, 0xcd, 0x9f, 0x9f, 0x3d, 0x20, 0x11, 0xd6, 0x90, 0x21, 0xd5, 0xa5, 0xc6, 0x8c,
+	0x0c, 0x0a, 0x04, 0xbd, 0x33, 0xe4, 0x48, 0x84, 0x56, 0xb9, 0x53, 0x56, 0xf8, 0x43, 0x58, 0x30,
+	0xef, 0x4a, 0x82, 0x24, 0xfd, 0xa6, 0x50, 0xd4, 0x84, 0xa8, 0xfc, 0x44, 0x27, 0xe5, 0x2c, 0x62,
+	0xbc, 0x6e, 0xb1, 0xf9, 0x2f, 0xb2, 0xd4, 0x9a, 0x5c, 0xe9, 0x14, 0x69, 0x1b, 0xb0, 0x17, 0x1f,
+	0x29, 0x3b, 0x7c, 0x0a, 0x8b, 0xf8, 0x0b, 0x3b, 0xf7, 0xc5, 0xaa, 0x95, 0x60, 0xb6, 0x15, 0x58,
+	0xa7, 0x74, 0x90, 0xce, 0x93, 0xe5, 0x64, 0x75, 0x74, 0x73, 0x99, 0x0d, 0x16, 0xb2, 0x38, 0xd3,
+	0xdf, 0xf7, 0x83, 0xbc, 0x38, 0xeb, 0x21, 0xa3, 0xa2, 0xdd, 0xc3, 0xbd, 0x09, 0xdb, 0x36, 0x3f,
+	0xe1, 0xe9, 0xdf, 0xe0, 0x3d, 0x64, 0x0c, 0x5f, 0x3c, 0x33, 0xfe, 0x5b, 0xcb, 0xaf, 0x19, 0x6f,
+	0x48, 0x2b, 0x47, 0x06, 0xaa, 0xad, 0x01, 0x4b, 0xad, 0x91, 0x30, 0x9f, 0x2c, 0x93, 0xd5, 0xac,
+	0x38, 0xfd, 0xee, 0x14, 0xb1, 0xc1, 0x39, 0x9b, 0xfa, 0xc8, 0xc3, 0x6f, 0x67, 0x45, 0xf7, 0x5e,
+	0x5f, 0xb1, 0x13, 0x49, 0xcd, 0xc8, 0xdb, 0xfa, 0x38, 0x2e, 0xca, 0xf7, 0xf1, 0xe5, 0xc9, 0x67,
+	0x3a, 0x7d, 0xbc, 0xcb, 0x37, 0xaf, 0x07, 0x5d, 0x9c, 0xb7, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff,
+	0x48, 0x22, 0x03, 0x10, 0xee, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/logging.proto b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/logging.proto
new file mode 100644
index 0000000..d37ddbb
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/logging.proto
@@ -0,0 +1,82 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google.golang.org/genproto/googleapis/api/serviceconfig/annotations.proto"; // from google/api/annotations.proto
+
+option java_multiple_files = true;
+option java_outer_classname = "LoggingProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// Logging configuration of the service.
+//
+// The following example shows how to configure logs to be sent to the
+// producer and consumer projects. In the example,
+// the `library.googleapis.com/activity_history` log is
+// sent to both the producer and consumer projects, whereas
+// the `library.googleapis.com/purchase_history` log is only sent to the
+// producer project:
+//
+//     monitored_resources:
+//     - type: library.googleapis.com/branch
+//       labels:
+//       - key: /city
+//         description: The city where the library branch is located in.
+//       - key: /name
+//         description: The name of the branch.
+//     logs:
+//     - name: library.googleapis.com/activity_history
+//       labels:
+//       - key: /customer_id
+//     - name: library.googleapis.com/purchase_history
+//     logging:
+//       producer_destinations:
+//       - monitored_resource: library.googleapis.com/branch
+//         logs:
+//         - library.googleapis.com/activity_history
+//         - library.googleapis.com/purchase_history
+//       consumer_destinations:
+//       - monitored_resource: library.googleapis.com/branch
+//         logs:
+//         - library.googleapis.com/activity_history
+message Logging {
+  // Configuration of a specific logging destination (the producer project
+  // or the consumer project).
+  message LoggingDestination {
+    // The monitored resource type. The type must be defined in
+    // [Service.monitored_resources][google.api.Service.monitored_resources] section.
+    string monitored_resource = 3;
+
+    // Names of the logs to be sent to this destination. Each name must
+    // be defined in the [Service.logs][google.api.Service.logs] section.
+    repeated string logs = 1;
+  }
+
+  // Logging configurations for sending logs to the producer project.
+  // There can be multiple producer destinations, each one must have a
+  // different monitored resource type. A log can be used in at most
+  // one producer destination.
+  repeated LoggingDestination producer_destinations = 1;
+
+  // Logging configurations for sending logs to the consumer project.
+  // There can be multiple consumer destinations, each one must have a
+  // different monitored resource type. A log can be used in at most
+  // one consumer destination.
+  repeated LoggingDestination consumer_destinations = 2;
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/monitoring.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/monitoring.pb.go
new file mode 100644
index 0000000..62eef21
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/monitoring.pb.go
@@ -0,0 +1,131 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/googleapis/api/serviceconfig/monitoring.proto
+// DO NOT EDIT!
+
+package google_api
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// Monitoring configuration of the service.
+//
+// The example below shows how to configure monitored resources and metrics
+// for monitoring. In the example, a monitored resource and two metrics are
+// defined. The `library.googleapis.com/book/returned_count` metric is sent
+// to both producer and consumer projects, whereas the
+// `library.googleapis.com/book/overdue_count` metric is only sent to the
+// consumer project.
+//
+//     monitored_resources:
+//     - type: library.googleapis.com/branch
+//       labels:
+//       - key: /city
+//         description: The city where the library branch is located in.
+//       - key: /name
+//         description: The name of the branch.
+//     metrics:
+//     - name: library.googleapis.com/book/returned_count
+//       metric_kind: DELTA
+//       value_type: INT64
+//       labels:
+//       - key: /customer_id
+//     - name: library.googleapis.com/book/overdue_count
+//       metric_kind: GAUGE
+//       value_type: INT64
+//       labels:
+//       - key: /customer_id
+//     monitoring:
+//       producer_destinations:
+//       - monitored_resource: library.googleapis.com/branch
+//         metrics:
+//         - library.googleapis.com/book/returned_count
+//       consumer_destinations:
+//       - monitored_resource: library.googleapis.com/branch
+//         metrics:
+//         - library.googleapis.com/book/returned_count
+//         - library.googleapis.com/book/overdue_count
+type Monitoring struct {
+	// Monitoring configurations for sending metrics to the producer project.
+	// There can be multiple producer destinations, each one must have a
+	// different monitored resource type. A metric can be used in at most
+	// one producer destination.
+	ProducerDestinations []*Monitoring_MonitoringDestination `protobuf:"bytes,1,rep,name=producer_destinations,json=producerDestinations" json:"producer_destinations,omitempty"`
+	// Monitoring configurations for sending metrics to the consumer project.
+	// There can be multiple consumer destinations, each one must have a
+	// different monitored resource type. A metric can be used in at most
+	// one consumer destination.
+	ConsumerDestinations []*Monitoring_MonitoringDestination `protobuf:"bytes,2,rep,name=consumer_destinations,json=consumerDestinations" json:"consumer_destinations,omitempty"`
+}
+
+func (m *Monitoring) Reset()                    { *m = Monitoring{} }
+func (m *Monitoring) String() string            { return proto.CompactTextString(m) }
+func (*Monitoring) ProtoMessage()               {}
+func (*Monitoring) Descriptor() ([]byte, []int) { return fileDescriptor12, []int{0} }
+
+func (m *Monitoring) GetProducerDestinations() []*Monitoring_MonitoringDestination {
+	if m != nil {
+		return m.ProducerDestinations
+	}
+	return nil
+}
+
+func (m *Monitoring) GetConsumerDestinations() []*Monitoring_MonitoringDestination {
+	if m != nil {
+		return m.ConsumerDestinations
+	}
+	return nil
+}
+
+// Configuration of a specific monitoring destination (the producer project
+// or the consumer project).
+type Monitoring_MonitoringDestination struct {
+	// The monitored resource type. The type must be defined in
+	// [Service.monitored_resources][google.api.Service.monitored_resources] section.
+	MonitoredResource string `protobuf:"bytes,1,opt,name=monitored_resource,json=monitoredResource" json:"monitored_resource,omitempty"`
+	// Names of the metrics to report to this monitoring destination.
+	// Each name must be defined in [Service.metrics][google.api.Service.metrics] section.
+	Metrics []string `protobuf:"bytes,2,rep,name=metrics" json:"metrics,omitempty"`
+}
+
+func (m *Monitoring_MonitoringDestination) Reset()         { *m = Monitoring_MonitoringDestination{} }
+func (m *Monitoring_MonitoringDestination) String() string { return proto.CompactTextString(m) }
+func (*Monitoring_MonitoringDestination) ProtoMessage()    {}
+func (*Monitoring_MonitoringDestination) Descriptor() ([]byte, []int) {
+	return fileDescriptor12, []int{0, 0}
+}
+
+func init() {
+	proto.RegisterType((*Monitoring)(nil), "google.api.Monitoring")
+	proto.RegisterType((*Monitoring_MonitoringDestination)(nil), "google.api.Monitoring.MonitoringDestination")
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/googleapis/api/serviceconfig/monitoring.proto", fileDescriptor12)
+}
+
+var fileDescriptor12 = []byte{
+	// 264 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x90, 0xd1, 0x4a, 0xc3, 0x30,
+	0x14, 0x86, 0x69, 0x15, 0x65, 0x47, 0x50, 0x0c, 0x0e, 0xc6, 0xae, 0xc4, 0xab, 0x21, 0xda, 0x80,
+	0x3e, 0x81, 0x43, 0xd0, 0x5d, 0x08, 0xa5, 0x2f, 0x30, 0x63, 0x7a, 0x0c, 0x81, 0x35, 0x67, 0x9c,
+	0xa4, 0x3e, 0x90, 0xcf, 0xe0, 0x03, 0x9a, 0xad, 0xed, 0x5a, 0xc4, 0xab, 0xde, 0x84, 0xe4, 0x9c,
+	0xff, 0xfc, 0xdf, 0x9f, 0x03, 0xaf, 0x86, 0xc8, 0x6c, 0x30, 0x33, 0xb4, 0x51, 0xce, 0x64, 0xc4,
+	0x46, 0x1a, 0x74, 0x5b, 0xa6, 0x40, 0xb2, 0x69, 0xa9, 0xad, 0xf5, 0x32, 0x1e, 0xd2, 0x23, 0x7f,
+	0x59, 0x8d, 0x9a, 0xdc, 0xa7, 0x35, 0xb2, 0x22, 0x67, 0x03, 0xb1, 0x8d, 0x43, 0x7b, 0xb5, 0x80,
+	0xd6, 0x29, 0x4a, 0xe7, 0xab, 0xb1, 0xae, 0xca, 0x39, 0x0a, 0x2a, 0x58, 0x72, 0xbe, 0xb1, 0xbd,
+	0xf9, 0x49, 0x01, 0xde, 0x0e, 0x2c, 0xa1, 0x60, 0x1a, 0xeb, 0x65, 0xad, 0x91, 0xd7, 0x25, 0xfa,
+	0x60, 0x5d, 0xa3, 0x9e, 0x25, 0xd7, 0x47, 0x8b, 0xb3, 0x87, 0xbb, 0xac, 0x4f, 0x91, 0xf5, 0x63,
+	0x83, 0xeb, 0x73, 0x3f, 0x54, 0x5c, 0x75, 0x56, 0x83, 0xa2, 0xdf, 0x21, 0x62, 0x1a, 0x5f, 0x57,
+	0x7f, 0x11, 0xe9, 0x18, 0x44, 0x67, 0x35, 0x44, 0xcc, 0xdf, 0x61, 0xfa, 0xaf, 0x5c, 0xdc, 0x83,
+	0x68, 0x17, 0x8b, 0xe5, 0x9a, 0xd1, 0x53, 0xcd, 0x1a, 0xe3, 0xdf, 0x92, 0xc5, 0xa4, 0xb8, 0x3c,
+	0x74, 0x8a, 0xb6, 0x21, 0x66, 0x70, 0x5a, 0x61, 0x60, 0xab, 0x9b, 0x70, 0x93, 0xa2, 0x7b, 0x2e,
+	0x6f, 0xe1, 0x5c, 0x53, 0x35, 0x88, 0xba, 0xbc, 0xe8, 0x89, 0xf9, 0x6e, 0xb3, 0x79, 0xf2, 0x9d,
+	0x1e, 0xbf, 0x3c, 0xe5, 0xab, 0x8f, 0x93, 0xfd, 0xa6, 0x1f, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff,
+	0x1a, 0x02, 0x76, 0xbb, 0x0c, 0x02, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/monitoring.proto b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/monitoring.proto
new file mode 100644
index 0000000..d29cbe5
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/monitoring.proto
@@ -0,0 +1,88 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google.golang.org/genproto/googleapis/api/serviceconfig/annotations.proto"; // from google/api/annotations.proto
+
+option java_multiple_files = true;
+option java_outer_classname = "MonitoringProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// Monitoring configuration of the service.
+//
+// The example below shows how to configure monitored resources and metrics
+// for monitoring. In the example, a monitored resource and two metrics are
+// defined. The `library.googleapis.com/book/returned_count` metric is sent
+// to both producer and consumer projects, whereas the
+// `library.googleapis.com/book/overdue_count` metric is only sent to the
+// consumer project.
+//
+//     monitored_resources:
+//     - type: library.googleapis.com/branch
+//       labels:
+//       - key: /city
+//         description: The city where the library branch is located in.
+//       - key: /name
+//         description: The name of the branch.
+//     metrics:
+//     - name: library.googleapis.com/book/returned_count
+//       metric_kind: DELTA
+//       value_type: INT64
+//       labels:
+//       - key: /customer_id
+//     - name: library.googleapis.com/book/overdue_count
+//       metric_kind: GAUGE
+//       value_type: INT64
+//       labels:
+//       - key: /customer_id
+//     monitoring:
+//       producer_destinations:
+//       - monitored_resource: library.googleapis.com/branch
+//         metrics:
+//         - library.googleapis.com/book/returned_count
+//       consumer_destinations:
+//       - monitored_resource: library.googleapis.com/branch
+//         metrics:
+//         - library.googleapis.com/book/returned_count
+//         - library.googleapis.com/book/overdue_count
+message Monitoring {
+  // Configuration of a specific monitoring destination (the producer project
+  // or the consumer project).
+  message MonitoringDestination {
+    // The monitored resource type. The type must be defined in
+    // [Service.monitored_resources][google.api.Service.monitored_resources] section.
+    string monitored_resource = 1;
+
+    // Names of the metrics to report to this monitoring destination.
+    // Each name must be defined in [Service.metrics][google.api.Service.metrics] section.
+    repeated string metrics = 2;
+  }
+
+  // Monitoring configurations for sending metrics to the producer project.
+  // There can be multiple producer destinations, each one must have a
+  // different monitored resource type. A metric can be used in at most
+  // one producer destination.
+  repeated MonitoringDestination producer_destinations = 1;
+
+  // Monitoring configurations for sending metrics to the consumer project.
+  // There can be multiple consumer destinations, each one must have a
+  // different monitored resource type. A metric can be used in at most
+  // one consumer destination.
+  repeated MonitoringDestination consumer_destinations = 2;
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/service.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/service.pb.go
new file mode 100644
index 0000000..ea0c9ae
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/service.pb.go
@@ -0,0 +1,305 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/googleapis/api/serviceconfig/service.proto
+// DO NOT EDIT!
+
+package google_api
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "google.golang.org/genproto/googleapis/api/label"
+import google_api2 "google.golang.org/genproto/googleapis/api/metric"
+import google_api3 "google.golang.org/genproto/googleapis/api/monitoredres"
+import _ "github.com/golang/protobuf/ptypes/any"
+import google_protobuf4 "google.golang.org/genproto/protobuf"
+import google_protobuf3 "google.golang.org/genproto/protobuf"
+import google_protobuf5 "github.com/golang/protobuf/ptypes/wrappers"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// `Service` is the root object of the configuration schema. It
+// describes basic information like the name of the service and the
+// exposed API interfaces, and delegates other aspects to configuration
+// sub-sections.
+//
+// Example:
+//
+//     type: google.api.Service
+//     config_version: 1
+//     name: calendar.googleapis.com
+//     title: Google Calendar API
+//     apis:
+//     - name: google.calendar.Calendar
+//     backend:
+//       rules:
+//       - selector: "*"
+//         address: calendar.example.com
+type Service struct {
+	// The version of the service configuration. The config version may
+	// influence interpretation of the configuration, for example, to
+	// determine defaults. This is documented together with applicable
+	// options. The current default for the config version itself is `3`.
+	ConfigVersion *google_protobuf5.UInt32Value `protobuf:"bytes,20,opt,name=config_version,json=configVersion" json:"config_version,omitempty"`
+	// The DNS address at which this service is available,
+	// e.g. `calendar.googleapis.com`.
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// A unique ID for a specific instance of this message, typically assigned
+	// by the client for tracking purpose. If empty, the server may choose to
+	// generate one instead.
+	Id string `protobuf:"bytes,33,opt,name=id" json:"id,omitempty"`
+	// The product title associated with this service.
+	Title string `protobuf:"bytes,2,opt,name=title" json:"title,omitempty"`
+	// The id of the Google developer project that owns the service.
+	// Members of this project can manage the service configuration,
+	// manage consumption of the service, etc.
+	ProducerProjectId string `protobuf:"bytes,22,opt,name=producer_project_id,json=producerProjectId" json:"producer_project_id,omitempty"`
+	// A list of API interfaces exported by this service. Only the `name` field
+	// of the [google.protobuf.Api][google.protobuf.Api] needs to be provided by the configuration
+	// author, as the remaining fields will be derived from the IDL during the
+	// normalization process. It is an error to specify an API interface here
+	// which cannot be resolved against the associated IDL files.
+	Apis []*google_protobuf4.Api `protobuf:"bytes,3,rep,name=apis" json:"apis,omitempty"`
+	// A list of all proto message types included in this API service.
+	// Types referenced directly or indirectly by the `apis` are
+	// automatically included.  Messages which are not referenced but
+	// shall be included, such as types used by the `google.protobuf.Any` type,
+	// should be listed here by name. Example:
+	//
+	//     types:
+	//     - name: google.protobuf.Int32
+	Types []*google_protobuf3.Type `protobuf:"bytes,4,rep,name=types" json:"types,omitempty"`
+	// A list of all enum types included in this API service.  Enums
+	// referenced directly or indirectly by the `apis` are automatically
+	// included.  Enums which are not referenced but shall be included
+	// should be listed here by name. Example:
+	//
+	//     enums:
+	//     - name: google.someapi.v1.SomeEnum
+	Enums []*google_protobuf3.Enum `protobuf:"bytes,5,rep,name=enums" json:"enums,omitempty"`
+	// Additional API documentation.
+	Documentation *Documentation `protobuf:"bytes,6,opt,name=documentation" json:"documentation,omitempty"`
+	// API backend configuration.
+	Backend *Backend `protobuf:"bytes,8,opt,name=backend" json:"backend,omitempty"`
+	// HTTP configuration.
+	Http *Http `protobuf:"bytes,9,opt,name=http" json:"http,omitempty"`
+	// Auth configuration.
+	Authentication *Authentication `protobuf:"bytes,11,opt,name=authentication" json:"authentication,omitempty"`
+	// Context configuration.
+	Context *Context `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"`
+	// Configuration controlling usage of this service.
+	Usage *Usage `protobuf:"bytes,15,opt,name=usage" json:"usage,omitempty"`
+	// Configuration for network endpoints.  If this is empty, then an endpoint
+	// with the same name as the service is automatically generated to service all
+	// defined APIs.
+	Endpoints []*Endpoint `protobuf:"bytes,18,rep,name=endpoints" json:"endpoints,omitempty"`
+	// Configuration for the service control plane.
+	Control *Control `protobuf:"bytes,21,opt,name=control" json:"control,omitempty"`
+	// Defines the logs used by this service.
+	Logs []*LogDescriptor `protobuf:"bytes,23,rep,name=logs" json:"logs,omitempty"`
+	// Defines the metrics used by this service.
+	Metrics []*google_api2.MetricDescriptor `protobuf:"bytes,24,rep,name=metrics" json:"metrics,omitempty"`
+	// Defines the monitored resources used by this service. This is required
+	// by the [Service.monitoring][google.api.Service.monitoring] and [Service.logging][google.api.Service.logging] configurations.
+	MonitoredResources []*google_api3.MonitoredResourceDescriptor `protobuf:"bytes,25,rep,name=monitored_resources,json=monitoredResources" json:"monitored_resources,omitempty"`
+	// Logging configuration of the service.
+	Logging *Logging `protobuf:"bytes,27,opt,name=logging" json:"logging,omitempty"`
+	// Monitoring configuration of the service.
+	Monitoring *Monitoring `protobuf:"bytes,28,opt,name=monitoring" json:"monitoring,omitempty"`
+	// Configuration for system parameters.
+	SystemParameters *SystemParameters `protobuf:"bytes,29,opt,name=system_parameters,json=systemParameters" json:"system_parameters,omitempty"`
+}
+
+func (m *Service) Reset()                    { *m = Service{} }
+func (m *Service) String() string            { return proto.CompactTextString(m) }
+func (*Service) ProtoMessage()               {}
+func (*Service) Descriptor() ([]byte, []int) { return fileDescriptor13, []int{0} }
+
+func (m *Service) GetConfigVersion() *google_protobuf5.UInt32Value {
+	if m != nil {
+		return m.ConfigVersion
+	}
+	return nil
+}
+
+func (m *Service) GetApis() []*google_protobuf4.Api {
+	if m != nil {
+		return m.Apis
+	}
+	return nil
+}
+
+func (m *Service) GetTypes() []*google_protobuf3.Type {
+	if m != nil {
+		return m.Types
+	}
+	return nil
+}
+
+func (m *Service) GetEnums() []*google_protobuf3.Enum {
+	if m != nil {
+		return m.Enums
+	}
+	return nil
+}
+
+func (m *Service) GetDocumentation() *Documentation {
+	if m != nil {
+		return m.Documentation
+	}
+	return nil
+}
+
+func (m *Service) GetBackend() *Backend {
+	if m != nil {
+		return m.Backend
+	}
+	return nil
+}
+
+func (m *Service) GetHttp() *Http {
+	if m != nil {
+		return m.Http
+	}
+	return nil
+}
+
+func (m *Service) GetAuthentication() *Authentication {
+	if m != nil {
+		return m.Authentication
+	}
+	return nil
+}
+
+func (m *Service) GetContext() *Context {
+	if m != nil {
+		return m.Context
+	}
+	return nil
+}
+
+func (m *Service) GetUsage() *Usage {
+	if m != nil {
+		return m.Usage
+	}
+	return nil
+}
+
+func (m *Service) GetEndpoints() []*Endpoint {
+	if m != nil {
+		return m.Endpoints
+	}
+	return nil
+}
+
+func (m *Service) GetControl() *Control {
+	if m != nil {
+		return m.Control
+	}
+	return nil
+}
+
+func (m *Service) GetLogs() []*LogDescriptor {
+	if m != nil {
+		return m.Logs
+	}
+	return nil
+}
+
+func (m *Service) GetMetrics() []*google_api2.MetricDescriptor {
+	if m != nil {
+		return m.Metrics
+	}
+	return nil
+}
+
+func (m *Service) GetMonitoredResources() []*google_api3.MonitoredResourceDescriptor {
+	if m != nil {
+		return m.MonitoredResources
+	}
+	return nil
+}
+
+func (m *Service) GetLogging() *Logging {
+	if m != nil {
+		return m.Logging
+	}
+	return nil
+}
+
+func (m *Service) GetMonitoring() *Monitoring {
+	if m != nil {
+		return m.Monitoring
+	}
+	return nil
+}
+
+func (m *Service) GetSystemParameters() *SystemParameters {
+	if m != nil {
+		return m.SystemParameters
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*Service)(nil), "google.api.Service")
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/googleapis/api/serviceconfig/service.proto", fileDescriptor13)
+}
+
+var fileDescriptor13 = []byte{
+	// 772 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x95, 0x6b, 0x4f, 0xd4, 0x4c,
+	0x14, 0xc7, 0x03, 0x2c, 0xb7, 0xe1, 0xf2, 0xc0, 0x70, 0x79, 0x86, 0x15, 0x13, 0x34, 0x26, 0x90,
+	0x18, 0xda, 0x04, 0x12, 0x13, 0x63, 0x8c, 0xd9, 0x05, 0x94, 0x8d, 0xa8, 0x9b, 0x41, 0x88, 0xef,
+	0x36, 0xdd, 0xee, 0x50, 0xaa, 0xdd, 0x99, 0xa6, 0x9d, 0xa2, 0x7c, 0x1d, 0x3f, 0x8b, 0x1f, 0xcc,
+	0xd3, 0xb9, 0xb0, 0xed, 0x2e, 0x2a, 0x16, 0x5f, 0xec, 0xb6, 0x9d, 0xf3, 0xfb, 0xff, 0xe7, 0x9c,
+	0x99, 0xcc, 0x19, 0x74, 0x14, 0x08, 0x11, 0x44, 0xcc, 0x09, 0x44, 0xe4, 0xf1, 0xc0, 0x11, 0x49,
+	0xe0, 0x06, 0x8c, 0xc7, 0x89, 0x90, 0xc2, 0xd5, 0x21, 0x2f, 0x0e, 0x53, 0x17, 0xfe, 0xdc, 0x94,
+	0x25, 0x57, 0xa1, 0xcf, 0x7c, 0xc1, 0x2f, 0xc2, 0xc0, 0x7e, 0x39, 0x0a, 0xc5, 0xc8, 0xd8, 0x00,
+	0x57, 0x6f, 0x55, 0xb5, 0xf4, 0x38, 0x17, 0xd2, 0x93, 0xa1, 0xe0, 0xa9, 0xb6, 0xad, 0x37, 0x2b,
+	0x5b, 0x65, 0xf2, 0xd2, 0x78, 0x54, 0xae, 0xb0, 0xeb, 0xf9, 0x5f, 0x18, 0xef, 0xdd, 0xd7, 0x06,
+	0x1e, 0x92, 0x7d, 0x93, 0xff, 0xc2, 0x26, 0x11, 0x91, 0xb1, 0x79, 0x5b, 0xd5, 0xa6, 0x27, 0xfc,
+	0xac, 0xcf, 0xb8, 0x5e, 0x66, 0x63, 0xf6, 0xba, 0xaa, 0x19, 0xac, 0x4e, 0x2c, 0x42, 0x2e, 0xef,
+	0xbb, 0x5b, 0x97, 0x52, 0xc6, 0xc6, 0xe3, 0xc5, 0xdd, 0x3d, 0x22, 0xaf, 0xcb, 0x22, 0xfd, 0x6f,
+	0xc4, 0x8d, 0xaa, 0x09, 0x44, 0x22, 0xb8, 0xef, 0xfe, 0x80, 0x45, 0x10, 0x72, 0x6b, 0xf3, 0xf2,
+	0xee, 0x36, 0x7d, 0x26, 0x93, 0xd0, 0x37, 0x0f, 0x23, 0xff, 0xf0, 0x17, 0x72, 0xc1, 0x43, 0x29,
+	0x12, 0xd6, 0x4b, 0x58, 0x3a, 0xf8, 0xe8, 0xc0, 0x97, 0xc8, 0x12, 0x7b, 0x3e, 0xeb, 0xc7, 0x55,
+	0xcb, 0x32, 0x8e, 0x83, 0xca, 0xde, 0x57, 0x6e, 0x18, 0xd7, 0xa9, 0x64, 0xfd, 0x4e, 0xec, 0x25,
+	0x1e, 0xd4, 0xca, 0x12, 0xe3, 0x77, 0x50, 0xd5, 0x2f, 0x4b, 0xbd, 0xc0, 0x96, 0xe7, 0x06, 0xa1,
+	0xbc, 0xcc, 0xba, 0x8e, 0x2f, 0xfa, 0xae, 0x36, 0x72, 0x55, 0xa0, 0x9b, 0x5d, 0xb8, 0xb1, 0xbc,
+	0x8e, 0x61, 0x69, 0x3c, 0x7e, 0x9d, 0xff, 0x8c, 0x60, 0xf7, 0x37, 0xb3, 0xde, 0x28, 0x61, 0x4e,
+	0x83, 0x3b, 0x77, 0xc1, 0xf3, 0x79, 0x0c, 0xff, 0xfc, 0xcf, 0xf9, 0x7c, 0x4d, 0xbc, 0x38, 0x66,
+	0xc9, 0xe0, 0x45, 0x4b, 0x1f, 0xff, 0x98, 0x41, 0xd3, 0xa7, 0xba, 0x50, 0x7c, 0x80, 0x16, 0x75,
+	0xb1, 0x9d, 0x2b, 0x00, 0xe0, 0xc0, 0x92, 0xd5, 0xad, 0xb1, 0x9d, 0xb9, 0xbd, 0x4d, 0x9b, 0x8f,
+	0x35, 0x75, 0xce, 0x5a, 0x5c, 0xee, 0xef, 0x9d, 0x7b, 0x51, 0xc6, 0xe8, 0x82, 0xd6, 0x9c, 0x6b,
+	0x09, 0xc6, 0xa8, 0xc6, 0x61, 0xc5, 0xc9, 0x18, 0x48, 0x67, 0xa9, 0x7a, 0xc7, 0x8b, 0x68, 0x3c,
+	0xec, 0x91, 0x47, 0x6a, 0x04, 0xde, 0xf0, 0x2a, 0x9a, 0x94, 0xa1, 0x8c, 0x18, 0x19, 0x57, 0x43,
+	0xfa, 0x03, 0x3b, 0x68, 0x05, 0x26, 0xe8, 0x65, 0x3e, 0x4b, 0x3a, 0xf0, 0xf2, 0x99, 0xf9, 0xb2,
+	0x03, 0xb2, 0x75, 0xc5, 0x2c, 0xdb, 0x50, 0x5b, 0x47, 0x5a, 0x3d, 0xbc, 0x83, 0x6a, 0xf9, 0x5e,
+	0x91, 0x89, 0xad, 0x09, 0x48, 0x72, 0x75, 0x24, 0xc9, 0x46, 0x1c, 0x52, 0x45, 0xe0, 0xa7, 0x30,
+	0x5f, 0xbe, 0x0a, 0xa4, 0xa6, 0xd0, 0xb5, 0x11, 0xf4, 0x23, 0x44, 0xa9, 0x66, 0x72, 0x98, 0xf1,
+	0xac, 0x9f, 0x92, 0xc9, 0x5f, 0xc0, 0x47, 0x10, 0xa5, 0x9a, 0xc1, 0xaf, 0xd0, 0x42, 0xa9, 0xc5,
+	0x91, 0x29, 0xb5, 0x62, 0x1b, 0xce, 0xe0, 0x82, 0x72, 0x0e, 0x8b, 0x00, 0x2d, 0xf3, 0x78, 0x17,
+	0x4d, 0x9b, 0xc6, 0x4f, 0x66, 0x94, 0x74, 0xa5, 0x28, 0x6d, 0xea, 0x10, 0xb5, 0x0c, 0x7e, 0x82,
+	0x6a, 0x79, 0xf7, 0x22, 0xb3, 0x8a, 0x5d, 0x2a, 0xb2, 0xc7, 0x30, 0x4e, 0x55, 0x14, 0x37, 0xd1,
+	0x62, 0x7e, 0x23, 0xc1, 0x24, 0xa1, 0xaf, 0xd3, 0x9a, 0x53, 0x7c, 0xbd, 0xc8, 0x37, 0x4a, 0x04,
+	0x1d, 0x52, 0xe4, 0x89, 0x99, 0xab, 0x84, 0xcc, 0x8f, 0x26, 0x76, 0xa0, 0x43, 0xd4, 0x32, 0x78,
+	0x1b, 0x4d, 0xaa, 0x13, 0x42, 0xfe, 0x53, 0xf0, 0x72, 0x11, 0x3e, 0xcb, 0x03, 0x54, 0xc7, 0xf1,
+	0x1e, 0x9a, 0xb5, 0x7d, 0x3c, 0x25, 0xb8, 0xbc, 0x75, 0x39, 0x7c, 0x64, 0x82, 0x74, 0x80, 0xd9,
+	0x5c, 0xe0, 0x3e, 0x22, 0x6b, 0xb7, 0xe7, 0x02, 0x21, 0x6a, 0x19, 0xc0, 0x6b, 0xd0, 0x1e, 0x53,
+	0xf2, 0xbf, 0x72, 0x2f, 0xed, 0xc5, 0x89, 0x08, 0x0e, 0x59, 0xea, 0x27, 0x61, 0x0c, 0x5d, 0x86,
+	0x2a, 0x0c, 0x3f, 0x43, 0xd3, 0xba, 0x1b, 0xa6, 0x84, 0x28, 0xc5, 0x66, 0x51, 0xf1, 0x4e, 0x85,
+	0x0a, 0x22, 0x0b, 0xe3, 0x4f, 0x68, 0x65, 0xb4, 0x01, 0xa6, 0x64, 0x43, 0x79, 0x6c, 0x97, 0x3c,
+	0x2c, 0x46, 0x0d, 0x55, 0xb0, 0xc3, 0xfd, 0xe1, 0xa0, 0xaa, 0xd7, 0xf4, 0x77, 0xf2, 0x60, 0xb4,
+	0xde, 0x13, 0x1d, 0xa2, 0x96, 0x81, 0x02, 0xd0, 0xa0, 0x6f, 0x92, 0x4d, 0xa5, 0x58, 0xbf, 0x65,
+	0xfe, 0x5c, 0x54, 0x20, 0x71, 0x0b, 0x2d, 0x0f, 0x77, 0xc9, 0x94, 0x3c, 0x2c, 0x1f, 0xf9, 0x5c,
+	0x7e, 0xaa, 0xa0, 0xf6, 0x0d, 0x43, 0x97, 0xd2, 0xa1, 0x91, 0xe6, 0x76, 0xde, 0x3a, 0xfa, 0x05,
+	0x51, 0x73, 0xde, 0x74, 0x95, 0x76, 0x7e, 0x6c, 0xda, 0x63, 0xdf, 0xc7, 0x6b, 0x6f, 0x1a, 0xed,
+	0x56, 0x77, 0x4a, 0x1d, 0xa3, 0xfd, 0x9f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x22, 0x08, 0x2f,
+	0x09, 0x0a, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/service.proto b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/service.proto
new file mode 100644
index 0000000..bbcd9f1
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/service.proto
@@ -0,0 +1,157 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google.golang.org/genproto/googleapis/api/serviceconfig/annotations.proto"; // from google/api/annotations.proto
+import "google.golang.org/genproto/googleapis/api/serviceconfig/auth.proto"; // from google/api/auth.proto
+import "google.golang.org/genproto/googleapis/api/serviceconfig/backend.proto"; // from google/api/backend.proto
+import "google.golang.org/genproto/googleapis/api/serviceconfig/context.proto"; // from google/api/context.proto
+import "google.golang.org/genproto/googleapis/api/serviceconfig/control.proto"; // from google/api/control.proto
+import "google.golang.org/genproto/googleapis/api/serviceconfig/documentation.proto"; // from google/api/documentation.proto
+import "google.golang.org/genproto/googleapis/api/serviceconfig/endpoint.proto"; // from google/api/endpoint.proto
+import "google.golang.org/genproto/googleapis/api/serviceconfig/http.proto"; // from google/api/http.proto
+import "google.golang.org/genproto/googleapis/api/label/label.proto"; // from google/api/label.proto
+import "google.golang.org/genproto/googleapis/api/serviceconfig/log.proto"; // from google/api/log.proto
+import "google.golang.org/genproto/googleapis/api/serviceconfig/logging.proto"; // from google/api/logging.proto
+import "google.golang.org/genproto/googleapis/api/metric/metric.proto"; // from google/api/metric.proto
+import "google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.proto"; // from google/api/monitored_resource.proto
+import "google.golang.org/genproto/googleapis/api/serviceconfig/monitoring.proto"; // from google/api/monitoring.proto
+import "google.golang.org/genproto/googleapis/api/serviceconfig/system_parameter.proto"; // from google/api/system_parameter.proto
+import "google.golang.org/genproto/googleapis/api/serviceconfig/usage.proto"; // from google/api/usage.proto
+import "github.com/golang/protobuf/ptypes/any/any.proto"; // from google/protobuf/any.proto
+import "google.golang.org/genproto/protobuf/api.proto"; // from google/protobuf/api.proto
+import "google.golang.org/genproto/protobuf/type.proto"; // from google/protobuf/type.proto
+import "github.com/golang/protobuf/ptypes/wrappers/wrappers.proto"; // from google/protobuf/wrappers.proto
+
+option java_multiple_files = true;
+option java_outer_classname = "ServiceProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// `Service` is the root object of the configuration schema. It
+// describes basic information like the name of the service and the
+// exposed API interfaces, and delegates other aspects to configuration
+// sub-sections.
+//
+// Example:
+//
+//     type: google.api.Service
+//     config_version: 1
+//     name: calendar.googleapis.com
+//     title: Google Calendar API
+//     apis:
+//     - name: google.calendar.Calendar
+//     backend:
+//       rules:
+//       - selector: "*"
+//         address: calendar.example.com
+message Service {
+  // The version of the service configuration. The config version may
+  // influence interpretation of the configuration, for example, to
+  // determine defaults. This is documented together with applicable
+  // options. The current default for the config version itself is `3`.
+  google.protobuf.UInt32Value config_version = 20;
+
+  // The DNS address at which this service is available,
+  // e.g. `calendar.googleapis.com`.
+  string name = 1;
+
+  // A unique ID for a specific instance of this message, typically assigned
+  // by the client for tracking purpose. If empty, the server may choose to
+  // generate one instead.
+  string id = 33;
+
+  // The product title associated with this service.
+  string title = 2;
+
+  // The id of the Google developer project that owns the service.
+  // Members of this project can manage the service configuration,
+  // manage consumption of the service, etc.
+  string producer_project_id = 22;
+
+  // A list of API interfaces exported by this service. Only the `name` field
+  // of the [google.protobuf.Api][google.protobuf.Api] needs to be provided by the configuration
+  // author, as the remaining fields will be derived from the IDL during the
+  // normalization process. It is an error to specify an API interface here
+  // which cannot be resolved against the associated IDL files.
+  repeated google.protobuf.Api apis = 3;
+
+  // A list of all proto message types included in this API service.
+  // Types referenced directly or indirectly by the `apis` are
+  // automatically included.  Messages which are not referenced but
+  // shall be included, such as types used by the `google.protobuf.Any` type,
+  // should be listed here by name. Example:
+  //
+  //     types:
+  //     - name: google.protobuf.Int32
+  repeated google.protobuf.Type types = 4;
+
+  // A list of all enum types included in this API service.  Enums
+  // referenced directly or indirectly by the `apis` are automatically
+  // included.  Enums which are not referenced but shall be included
+  // should be listed here by name. Example:
+  //
+  //     enums:
+  //     - name: google.someapi.v1.SomeEnum
+  repeated google.protobuf.Enum enums = 5;
+
+  // Additional API documentation.
+  Documentation documentation = 6;
+
+  // API backend configuration.
+  Backend backend = 8;
+
+  // HTTP configuration.
+  Http http = 9;
+
+  // Auth configuration.
+  Authentication authentication = 11;
+
+  // Context configuration.
+  Context context = 12;
+
+  // Configuration controlling usage of this service.
+  Usage usage = 15;
+
+  // Configuration for network endpoints.  If this is empty, then an endpoint
+  // with the same name as the service is automatically generated to service all
+  // defined APIs.
+  repeated Endpoint endpoints = 18;
+
+  // Configuration for the service control plane.
+  Control control = 21;
+
+  // Defines the logs used by this service.
+  repeated LogDescriptor logs = 23;
+
+  // Defines the metrics used by this service.
+  repeated MetricDescriptor metrics = 24;
+
+  // Defines the monitored resources used by this service. This is required
+  // by the [Service.monitoring][google.api.Service.monitoring] and [Service.logging][google.api.Service.logging] configurations.
+  repeated MonitoredResourceDescriptor monitored_resources = 25;
+
+  // Logging configuration of the service.
+  Logging logging = 27;
+
+  // Monitoring configuration of the service.
+  Monitoring monitoring = 28;
+
+  // Configuration for system parameters.
+  SystemParameters system_parameters = 29;
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/system_parameter.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/system_parameter.pb.go
new file mode 100644
index 0000000..f84ff75
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/system_parameter.pb.go
@@ -0,0 +1,146 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/googleapis/api/serviceconfig/system_parameter.proto
+// DO NOT EDIT!
+
+package google_api
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// ### System parameter configuration
+//
+// A system parameter is a special kind of parameter defined by the API
+// system, not by an individual API. It is typically mapped to an HTTP header
+// and/or a URL query parameter. This configuration specifies which methods
+// change the names of the system parameters.
+type SystemParameters struct {
+	// Define system parameters.
+	//
+	// The parameters defined here will override the default parameters
+	// implemented by the system. If this field is missing from the service
+	// config, default system parameters will be used. Default system parameters
+	// and names is implementation-dependent.
+	//
+	// Example: define api key and alt name for all methods
+	//
+	// system_parameters
+	//   rules:
+	//     - selector: "*"
+	//       parameters:
+	//         - name: api_key
+	//           url_query_parameter: api_key
+	//         - name: alt
+	//           http_header: Response-Content-Type
+	//
+	// Example: define 2 api key names for a specific method.
+	//
+	// system_parameters
+	//   rules:
+	//     - selector: "/ListShelves"
+	//       parameters:
+	//         - name: api_key
+	//           http_header: Api-Key1
+	//         - name: api_key
+	//           http_header: Api-Key2
+	//
+	// **NOTE:** All service configuration rules follow "last one wins" order.
+	Rules []*SystemParameterRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"`
+}
+
+func (m *SystemParameters) Reset()                    { *m = SystemParameters{} }
+func (m *SystemParameters) String() string            { return proto.CompactTextString(m) }
+func (*SystemParameters) ProtoMessage()               {}
+func (*SystemParameters) Descriptor() ([]byte, []int) { return fileDescriptor14, []int{0} }
+
+func (m *SystemParameters) GetRules() []*SystemParameterRule {
+	if m != nil {
+		return m.Rules
+	}
+	return nil
+}
+
+// Define a system parameter rule mapping system parameter definitions to
+// methods.
+type SystemParameterRule struct {
+	// Selects the methods to which this rule applies. Use '*' to indicate all
+	// methods in all APIs.
+	//
+	// Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
+	Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"`
+	// Define parameters. Multiple names may be defined for a parameter.
+	// For a given method call, only one of them should be used. If multiple
+	// names are used the behavior is implementation-dependent.
+	// If none of the specified names are present the behavior is
+	// parameter-dependent.
+	Parameters []*SystemParameter `protobuf:"bytes,2,rep,name=parameters" json:"parameters,omitempty"`
+}
+
+func (m *SystemParameterRule) Reset()                    { *m = SystemParameterRule{} }
+func (m *SystemParameterRule) String() string            { return proto.CompactTextString(m) }
+func (*SystemParameterRule) ProtoMessage()               {}
+func (*SystemParameterRule) Descriptor() ([]byte, []int) { return fileDescriptor14, []int{1} }
+
+func (m *SystemParameterRule) GetParameters() []*SystemParameter {
+	if m != nil {
+		return m.Parameters
+	}
+	return nil
+}
+
+// Define a parameter's name and location. The parameter may be passed as either
+// an HTTP header or a URL query parameter, and if both are passed the behavior
+// is implementation-dependent.
+type SystemParameter struct {
+	// Define the name of the parameter, such as "api_key", "alt", "callback",
+	// and etc. It is case sensitive.
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// Define the HTTP header name to use for the parameter. It is case
+	// insensitive.
+	HttpHeader string `protobuf:"bytes,2,opt,name=http_header,json=httpHeader" json:"http_header,omitempty"`
+	// Define the URL query parameter name to use for the parameter. It is case
+	// sensitive.
+	UrlQueryParameter string `protobuf:"bytes,3,opt,name=url_query_parameter,json=urlQueryParameter" json:"url_query_parameter,omitempty"`
+}
+
+func (m *SystemParameter) Reset()                    { *m = SystemParameter{} }
+func (m *SystemParameter) String() string            { return proto.CompactTextString(m) }
+func (*SystemParameter) ProtoMessage()               {}
+func (*SystemParameter) Descriptor() ([]byte, []int) { return fileDescriptor14, []int{2} }
+
+func init() {
+	proto.RegisterType((*SystemParameters)(nil), "google.api.SystemParameters")
+	proto.RegisterType((*SystemParameterRule)(nil), "google.api.SystemParameterRule")
+	proto.RegisterType((*SystemParameter)(nil), "google.api.SystemParameter")
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/googleapis/api/serviceconfig/system_parameter.proto", fileDescriptor14)
+}
+
+var fileDescriptor14 = []byte{
+	// 277 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x90, 0xbf, 0x4e, 0xc3, 0x30,
+	0x10, 0xc6, 0x95, 0xb6, 0x20, 0xb8, 0x4a, 0xfc, 0x71, 0x19, 0x22, 0x18, 0x8a, 0x32, 0x75, 0xb2,
+	0x25, 0x10, 0x13, 0x13, 0x5d, 0xa0, 0x0b, 0x0a, 0xe1, 0x01, 0xa2, 0x10, 0x0e, 0x37, 0x52, 0x62,
+	0x87, 0xb3, 0x53, 0xa9, 0xaf, 0xc3, 0x93, 0xe2, 0xb8, 0x25, 0xad, 0x22, 0xd4, 0xc5, 0x3a, 0xdf,
+	0xf7, 0xbb, 0xfb, 0x4e, 0x1f, 0xbc, 0x4a, 0xad, 0x65, 0x89, 0x5c, 0xea, 0x32, 0x53, 0x92, 0x6b,
+	0x92, 0x42, 0xa2, 0xaa, 0x49, 0x5b, 0x2d, 0x36, 0x52, 0x56, 0x17, 0x46, 0xb8, 0x47, 0x18, 0xa4,
+	0x55, 0x91, 0x63, 0xae, 0xd5, 0x57, 0x21, 0x85, 0x59, 0x1b, 0x8b, 0x55, 0x5a, 0x67, 0x94, 0x55,
+	0x68, 0x91, 0xb8, 0x9f, 0x61, 0xb0, 0xdd, 0xe7, 0x06, 0xa2, 0x05, 0x5c, 0xbc, 0x7b, 0x2a, 0xfe,
+	0x83, 0x0c, 0x7b, 0x80, 0x23, 0x6a, 0x4a, 0x34, 0x61, 0x70, 0x3b, 0x9c, 0x8d, 0xef, 0xa6, 0x7c,
+	0xc7, 0xf3, 0x1e, 0x9c, 0x38, 0x2e, 0xd9, 0xd0, 0x91, 0x82, 0xc9, 0x3f, 0x2a, 0xbb, 0x86, 0x13,
+	0x83, 0x25, 0xe6, 0x56, 0x93, 0x5b, 0x18, 0xcc, 0x4e, 0x93, 0xee, 0xcf, 0x1e, 0x01, 0xba, 0xe3,
+	0x4c, 0x38, 0xf0, 0x76, 0x37, 0x87, 0xec, 0xf6, 0xf0, 0x68, 0x05, 0xe7, 0x3d, 0x99, 0x31, 0x18,
+	0x29, 0x57, 0x6e, 0x7d, 0x7c, 0xcd, 0xa6, 0x30, 0x5e, 0x5a, 0x5b, 0xa7, 0x4b, 0xcc, 0x3e, 0x91,
+	0x9c, 0x49, 0x2b, 0x41, 0xdb, 0x7a, 0xf1, 0x1d, 0xc6, 0x61, 0xd2, 0x50, 0x99, 0x7e, 0x37, 0x48,
+	0xeb, 0x5d, 0x56, 0xe1, 0xd0, 0x83, 0x97, 0x4e, 0x7a, 0x6b, 0x95, 0xce, 0x64, 0x2e, 0xe0, 0x2c,
+	0xd7, 0xd5, 0xde, 0x95, 0xf3, 0xab, 0xde, 0x1d, 0x71, 0x1b, 0x73, 0x1c, 0xfc, 0x0c, 0x46, 0xcf,
+	0x4f, 0xf1, 0xe2, 0xe3, 0xd8, 0xc7, 0x7e, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x56, 0xd1, 0x77,
+	0xac, 0xc8, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/system_parameter.proto b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/system_parameter.proto
new file mode 100644
index 0000000..ebd1103
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/system_parameter.proto
@@ -0,0 +1,97 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+option java_multiple_files = true;
+option java_outer_classname = "SystemParameterProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// ### System parameter configuration
+//
+// A system parameter is a special kind of parameter defined by the API
+// system, not by an individual API. It is typically mapped to an HTTP header
+// and/or a URL query parameter. This configuration specifies which methods
+// change the names of the system parameters.
+message SystemParameters {
+  // Define system parameters.
+  //
+  // The parameters defined here will override the default parameters
+  // implemented by the system. If this field is missing from the service
+  // config, default system parameters will be used. Default system parameters
+  // and names is implementation-dependent.
+  //
+  // Example: define api key and alt name for all methods
+  //
+  // system_parameters
+  //   rules:
+  //     - selector: "*"
+  //       parameters:
+  //         - name: api_key
+  //           url_query_parameter: api_key
+  //         - name: alt
+  //           http_header: Response-Content-Type
+  //
+  // Example: define 2 api key names for a specific method.
+  //
+  // system_parameters
+  //   rules:
+  //     - selector: "/ListShelves"
+  //       parameters:
+  //         - name: api_key
+  //           http_header: Api-Key1
+  //         - name: api_key
+  //           http_header: Api-Key2
+  //
+  // **NOTE:** All service configuration rules follow "last one wins" order.
+  repeated SystemParameterRule rules = 1;
+}
+
+// Define a system parameter rule mapping system parameter definitions to
+// methods.
+message SystemParameterRule {
+  // Selects the methods to which this rule applies. Use '*' to indicate all
+  // methods in all APIs.
+  //
+  // Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
+  string selector = 1;
+
+  // Define parameters. Multiple names may be defined for a parameter.
+  // For a given method call, only one of them should be used. If multiple
+  // names are used the behavior is implementation-dependent.
+  // If none of the specified names are present the behavior is
+  // parameter-dependent.
+  repeated SystemParameter parameters = 2;
+}
+
+// Define a parameter's name and location. The parameter may be passed as either
+// an HTTP header or a URL query parameter, and if both are passed the behavior
+// is implementation-dependent.
+message SystemParameter {
+  // Define the name of the parameter, such as "api_key", "alt", "callback",
+  // and etc. It is case sensitive.
+  string name = 1;
+
+  // Define the HTTP header name to use for the parameter. It is case
+  // insensitive.
+  string http_header = 2;
+
+  // Define the URL query parameter name to use for the parameter. It is case
+  // sensitive.
+  string url_query_parameter = 3;
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/usage.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/usage.pb.go
new file mode 100644
index 0000000..24970fc
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/usage.pb.go
@@ -0,0 +1,107 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/googleapis/api/serviceconfig/usage.proto
+// DO NOT EDIT!
+
+package google_api
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// Configuration controlling usage of a service.
+type Usage struct {
+	// Requirements that must be satisfied before a consumer project can use the
+	// service. Each requirement is of the form <service.name>/<requirement-id>;
+	// for example 'serviceusage.googleapis.com/billing-enabled'.
+	Requirements []string `protobuf:"bytes,1,rep,name=requirements" json:"requirements,omitempty"`
+	// A list of usage rules that apply to individual API methods.
+	//
+	// **NOTE:** All service configuration rules follow "last one wins" order.
+	Rules []*UsageRule `protobuf:"bytes,6,rep,name=rules" json:"rules,omitempty"`
+}
+
+func (m *Usage) Reset()                    { *m = Usage{} }
+func (m *Usage) String() string            { return proto.CompactTextString(m) }
+func (*Usage) ProtoMessage()               {}
+func (*Usage) Descriptor() ([]byte, []int) { return fileDescriptor15, []int{0} }
+
+func (m *Usage) GetRules() []*UsageRule {
+	if m != nil {
+		return m.Rules
+	}
+	return nil
+}
+
+// Usage configuration rules for the service.
+//
+// NOTE: Under development.
+//
+//
+// Use this rule to configure unregistered calls for the service. Unregistered
+// calls are calls that do not contain consumer project identity.
+// (Example: calls that do not contain an API key).
+// By default, API methods do not allow unregistered calls, and each method call
+// must be identified by a consumer project identity. Use this rule to
+// allow/disallow unregistered calls.
+//
+// Example of an API that wants to allow unregistered calls for entire service.
+//
+//     usage:
+//       rules:
+//       - selector: "*"
+//         allow_unregistered_calls: true
+//
+// Example of a method that wants to allow unregistered calls.
+//
+//     usage:
+//       rules:
+//       - selector: "google.example.library.v1.LibraryService.CreateBook"
+//         allow_unregistered_calls: true
+type UsageRule struct {
+	// Selects the methods to which this rule applies. Use '*' to indicate all
+	// methods in all APIs.
+	//
+	// Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
+	Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"`
+	// True, if the method allows unregistered calls; false otherwise.
+	AllowUnregisteredCalls bool `protobuf:"varint,2,opt,name=allow_unregistered_calls,json=allowUnregisteredCalls" json:"allow_unregistered_calls,omitempty"`
+}
+
+func (m *UsageRule) Reset()                    { *m = UsageRule{} }
+func (m *UsageRule) String() string            { return proto.CompactTextString(m) }
+func (*UsageRule) ProtoMessage()               {}
+func (*UsageRule) Descriptor() ([]byte, []int) { return fileDescriptor15, []int{1} }
+
+func init() {
+	proto.RegisterType((*Usage)(nil), "google.api.Usage")
+	proto.RegisterType((*UsageRule)(nil), "google.api.UsageRule")
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/googleapis/api/serviceconfig/usage.proto", fileDescriptor15)
+}
+
+var fileDescriptor15 = []byte{
+	// 254 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x8f, 0xc1, 0x4a, 0x03, 0x31,
+	0x10, 0x86, 0xd9, 0x6a, 0x4b, 0x3b, 0x8a, 0x87, 0x05, 0x65, 0xe9, 0x49, 0x16, 0x04, 0x41, 0x48,
+	0x40, 0x2f, 0x5e, 0x6d, 0x0f, 0xd2, 0xdb, 0xb2, 0x50, 0xf0, 0x56, 0x62, 0x1c, 0x43, 0x20, 0xcd,
+	0xd4, 0x4c, 0x56, 0xdf, 0xc7, 0x27, 0x35, 0x9b, 0x95, 0x5a, 0xaf, 0xbd, 0x04, 0xf2, 0x7f, 0x3f,
+	0xdf, 0xcc, 0xc0, 0xd2, 0x10, 0x19, 0x87, 0xc2, 0x90, 0x53, 0xde, 0x08, 0x0a, 0x46, 0x1a, 0xf4,
+	0xbb, 0x40, 0x91, 0xe4, 0x80, 0xd4, 0xce, 0xb2, 0x4c, 0x8f, 0x64, 0x0c, 0x9f, 0x56, 0xa3, 0x26,
+	0xff, 0x6e, 0x8d, 0xec, 0x58, 0x19, 0x14, 0xb9, 0x58, 0xc2, 0xaf, 0x24, 0xb5, 0xe6, 0xab, 0x63,
+	0x85, 0xca, 0x7b, 0x8a, 0x2a, 0x5a, 0xf2, 0x3c, 0x68, 0xeb, 0x17, 0x18, 0xaf, 0xfb, 0x29, 0x65,
+	0x0d, 0xe7, 0x01, 0x3f, 0x3a, 0x1b, 0x70, 0x8b, 0x3e, 0x72, 0x55, 0x5c, 0x9f, 0xdc, 0xce, 0xda,
+	0x7f, 0x59, 0x79, 0x07, 0xe3, 0xd0, 0x39, 0xe4, 0x6a, 0x92, 0xe0, 0xd9, 0xfd, 0xa5, 0xf8, 0xdb,
+	0x49, 0x64, 0x4b, 0x9b, 0x68, 0x3b, 0x74, 0x6a, 0x05, 0xb3, 0x7d, 0x56, 0xce, 0x61, 0xca, 0xe8,
+	0x50, 0x47, 0x0a, 0xc9, 0x5c, 0x24, 0xf3, 0xfe, 0x5f, 0x3e, 0x42, 0xa5, 0x9c, 0xa3, 0xaf, 0x4d,
+	0xe7, 0x03, 0x1a, 0xcb, 0x11, 0x03, 0xbe, 0x6d, 0x74, 0xca, 0xb8, 0x1a, 0xa5, 0xee, 0xb4, 0xbd,
+	0xca, 0x7c, 0x7d, 0x80, 0x97, 0x3d, 0x5d, 0xdc, 0xc0, 0x85, 0xa6, 0xed, 0xc1, 0x16, 0x0b, 0xc8,
+	0x23, 0x9b, 0xfe, 0xb4, 0xa6, 0xf8, 0x1e, 0x9d, 0x3e, 0x3f, 0x35, 0xab, 0xd7, 0x49, 0x3e, 0xf5,
+	0xe1, 0x27, 0x00, 0x00, 0xff, 0xff, 0x72, 0x2d, 0x47, 0x30, 0x88, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/usage.proto b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/usage.proto
new file mode 100644
index 0000000..0c46d95
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/usage.proto
@@ -0,0 +1,74 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google.golang.org/genproto/googleapis/api/serviceconfig/annotations.proto"; // from google/api/annotations.proto
+
+option java_multiple_files = true;
+option java_outer_classname = "UsageProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// Configuration controlling usage of a service.
+message Usage {
+  // Requirements that must be satisfied before a consumer project can use the
+  // service. Each requirement is of the form <service.name>/<requirement-id>;
+  // for example 'serviceusage.googleapis.com/billing-enabled'.
+  repeated string requirements = 1;
+
+  // A list of usage rules that apply to individual API methods.
+  //
+  // **NOTE:** All service configuration rules follow "last one wins" order.
+  repeated UsageRule rules = 6;
+}
+
+// Usage configuration rules for the service.
+//
+// NOTE: Under development.
+//
+//
+// Use this rule to configure unregistered calls for the service. Unregistered
+// calls are calls that do not contain consumer project identity.
+// (Example: calls that do not contain an API key).
+// By default, API methods do not allow unregistered calls, and each method call
+// must be identified by a consumer project identity. Use this rule to
+// allow/disallow unregistered calls.
+//
+// Example of an API that wants to allow unregistered calls for entire service.
+//
+//     usage:
+//       rules:
+//       - selector: "*"
+//         allow_unregistered_calls: true
+//
+// Example of a method that wants to allow unregistered calls.
+//
+//     usage:
+//       rules:
+//       - selector: "google.example.library.v1.LibraryService.CreateBook"
+//         allow_unregistered_calls: true
+message UsageRule {
+  // Selects the methods to which this rule applies. Use '*' to indicate all
+  // methods in all APIs.
+  //
+  // Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
+  string selector = 1;
+
+  // True, if the method allows unregistered calls; false otherwise.
+  bool allow_unregistered_calls = 2;
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/logging/type/http_request.pb.go b/vendor/google.golang.org/genproto/googleapis/logging/type/http_request.pb.go
new file mode 100644
index 0000000..e0676e0
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/logging/type/http_request.pb.go
@@ -0,0 +1,134 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/googleapis/logging/type/http_request.proto
+// DO NOT EDIT!
+
+/*
+Package google_logging_type is a generated protocol buffer package.
+
+It is generated from these files:
+	google.golang.org/genproto/googleapis/logging/type/http_request.proto
+	google.golang.org/genproto/googleapis/logging/type/log_severity.proto
+
+It has these top-level messages:
+	HttpRequest
+*/
+package google_logging_type
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "google.golang.org/genproto/googleapis/api/serviceconfig"
+import google_protobuf1 "github.com/golang/protobuf/ptypes/duration"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// A common proto for logging HTTP requests. Only contains semantics
+// defined by the HTTP specification. Product-specific logging
+// information MUST be defined in a separate message.
+type HttpRequest struct {
+	// The request method. Examples: `"GET"`, `"HEAD"`, `"PUT"`, `"POST"`.
+	RequestMethod string `protobuf:"bytes,1,opt,name=request_method,json=requestMethod" json:"request_method,omitempty"`
+	// The scheme (http, https), the host name, the path and the query
+	// portion of the URL that was requested.
+	// Example: `"http://example.com/some/info?color=red"`.
+	RequestUrl string `protobuf:"bytes,2,opt,name=request_url,json=requestUrl" json:"request_url,omitempty"`
+	// The size of the HTTP request message in bytes, including the request
+	// headers and the request body.
+	RequestSize int64 `protobuf:"varint,3,opt,name=request_size,json=requestSize" json:"request_size,omitempty"`
+	// The response code indicating the status of response.
+	// Examples: 200, 404.
+	Status int32 `protobuf:"varint,4,opt,name=status" json:"status,omitempty"`
+	// The size of the HTTP response message sent back to the client, in bytes,
+	// including the response headers and the response body.
+	ResponseSize int64 `protobuf:"varint,5,opt,name=response_size,json=responseSize" json:"response_size,omitempty"`
+	// The user agent sent by the client. Example:
+	// `"Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; Q312461; .NET CLR 1.0.3705)"`.
+	UserAgent string `protobuf:"bytes,6,opt,name=user_agent,json=userAgent" json:"user_agent,omitempty"`
+	// The IP address (IPv4 or IPv6) of the client that issued the HTTP
+	// request. Examples: `"192.168.1.1"`, `"FE80::0202:B3FF:FE1E:8329"`.
+	RemoteIp string `protobuf:"bytes,7,opt,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"`
+	// The IP address (IPv4 or IPv6) of the origin server that the request was
+	// sent to.
+	ServerIp string `protobuf:"bytes,13,opt,name=server_ip,json=serverIp" json:"server_ip,omitempty"`
+	// The referer URL of the request, as defined in
+	// [HTTP/1.1 Header Field Definitions](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html).
+	Referer string `protobuf:"bytes,8,opt,name=referer" json:"referer,omitempty"`
+	// The request processing latency on the server, from the time the request was
+	// received until the response was sent.
+	Latency *google_protobuf1.Duration `protobuf:"bytes,14,opt,name=latency" json:"latency,omitempty"`
+	// Whether or not a cache lookup was attempted.
+	CacheLookup bool `protobuf:"varint,11,opt,name=cache_lookup,json=cacheLookup" json:"cache_lookup,omitempty"`
+	// Whether or not an entity was served from cache
+	// (with or without validation).
+	CacheHit bool `protobuf:"varint,9,opt,name=cache_hit,json=cacheHit" json:"cache_hit,omitempty"`
+	// Whether or not the response was validated with the origin server before
+	// being served from cache. This field is only meaningful if `cache_hit` is
+	// True.
+	CacheValidatedWithOriginServer bool `protobuf:"varint,10,opt,name=cache_validated_with_origin_server,json=cacheValidatedWithOriginServer" json:"cache_validated_with_origin_server,omitempty"`
+	// The number of HTTP response bytes inserted into cache. Set only when a
+	// cache fill was attempted.
+	CacheFillBytes int64 `protobuf:"varint,12,opt,name=cache_fill_bytes,json=cacheFillBytes" json:"cache_fill_bytes,omitempty"`
+}
+
+func (m *HttpRequest) Reset()                    { *m = HttpRequest{} }
+func (m *HttpRequest) String() string            { return proto.CompactTextString(m) }
+func (*HttpRequest) ProtoMessage()               {}
+func (*HttpRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *HttpRequest) GetLatency() *google_protobuf1.Duration {
+	if m != nil {
+		return m.Latency
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*HttpRequest)(nil), "google.logging.type.HttpRequest")
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/googleapis/logging/type/http_request.proto", fileDescriptor0)
+}
+
+var fileDescriptor0 = []byte{
+	// 477 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x92, 0x4f, 0x6f, 0x13, 0x31,
+	0x10, 0xc5, 0x15, 0xda, 0xe6, 0x8f, 0x37, 0x8d, 0x2a, 0x23, 0x81, 0x29, 0xe2, 0x5f, 0x11, 0x52,
+	0x2f, 0xac, 0x25, 0x7a, 0xe2, 0x48, 0x04, 0xa8, 0x45, 0x20, 0xaa, 0xad, 0x80, 0xe3, 0x6a, 0xb3,
+	0x71, 0xbc, 0x16, 0xce, 0xda, 0xd8, 0xde, 0xa0, 0xf0, 0x35, 0xf8, 0xc2, 0x8c, 0xc7, 0xbb, 0x88,
+	0x03, 0x87, 0x5e, 0xa2, 0xec, 0xef, 0xbd, 0x37, 0x1e, 0x8f, 0x87, 0xbc, 0x93, 0xc6, 0x48, 0x2d,
+	0x72, 0x69, 0x74, 0xd5, 0xca, 0xdc, 0x38, 0xc9, 0xa5, 0x68, 0xad, 0x33, 0xc1, 0xf0, 0x24, 0x55,
+	0x56, 0x79, 0xae, 0x8d, 0x94, 0xaa, 0x95, 0x3c, 0xec, 0xad, 0xe0, 0x4d, 0x08, 0xb6, 0x74, 0xe2,
+	0x47, 0x27, 0x7c, 0xc8, 0xd1, 0x4a, 0xef, 0xf6, 0x65, 0x7a, 0x5f, 0x1e, 0x7d, 0xa7, 0x57, 0xb7,
+	0xab, 0x0d, 0x3f, 0xdc, 0x0b, 0xb7, 0x53, 0xb5, 0xa8, 0x4d, 0xbb, 0x51, 0x92, 0x57, 0x6d, 0x6b,
+	0x42, 0x15, 0x94, 0x69, 0x7d, 0xaa, 0x7f, 0xfa, 0x5a, 0xaa, 0xd0, 0x74, 0xab, 0xbc, 0x36, 0x5b,
+	0x9e, 0xca, 0x71, 0x14, 0x56, 0xdd, 0x86, 0xdb, 0x78, 0x98, 0xe7, 0xeb, 0xce, 0x61, 0xe4, 0xef,
+	0x9f, 0x14, 0x3d, 0xfb, 0x7d, 0x48, 0xb2, 0x4b, 0xe8, 0xb8, 0x48, 0x0d, 0xd3, 0x17, 0x64, 0xd1,
+	0xf7, 0x5e, 0x6e, 0x45, 0x68, 0xcc, 0x9a, 0x8d, 0x9e, 0x8e, 0xce, 0x67, 0xc5, 0x71, 0x4f, 0x3f,
+	0x21, 0xa4, 0x4f, 0x48, 0x36, 0xd8, 0x3a, 0xa7, 0xd9, 0x1d, 0xf4, 0x90, 0x1e, 0x7d, 0x71, 0x9a,
+	0x3e, 0x23, 0xf3, 0xc1, 0xe0, 0xd5, 0x2f, 0xc1, 0x0e, 0xc0, 0x71, 0x50, 0x0c, 0xa1, 0x1b, 0x40,
+	0xf4, 0x1e, 0x19, 0x7b, 0xb8, 0x47, 0xe7, 0xd9, 0x21, 0x88, 0x47, 0x45, 0xff, 0x45, 0x9f, 0x13,
+	0x38, 0xcc, 0x5b, 0xb8, 0x9e, 0x48, 0xd9, 0x23, 0xcc, 0xce, 0x07, 0x88, 0xe1, 0x47, 0x84, 0x74,
+	0x30, 0x96, 0xb2, 0x82, 0x99, 0x05, 0x36, 0xc6, 0xf3, 0x67, 0x91, 0xbc, 0x89, 0x80, 0x3e, 0x24,
+	0x33, 0x27, 0xb6, 0x26, 0x88, 0x52, 0x59, 0x36, 0x41, 0x75, 0x9a, 0xc0, 0x95, 0x8d, 0x62, 0x9c,
+	0x28, 0xa4, 0x41, 0x3c, 0x4e, 0x62, 0x02, 0x20, 0x32, 0x32, 0x71, 0x62, 0x23, 0x9c, 0x70, 0x6c,
+	0x8a, 0xd2, 0xf0, 0x49, 0x2f, 0xc8, 0x44, 0x57, 0x41, 0xb4, 0xf5, 0x9e, 0x2d, 0x40, 0xc9, 0x5e,
+	0x3d, 0xc8, 0xfb, 0x27, 0x1c, 0x86, 0x9d, 0xbf, 0xed, 0x87, 0x5b, 0x0c, 0xce, 0x38, 0x87, 0xba,
+	0xaa, 0x1b, 0x51, 0x6a, 0x63, 0xbe, 0x77, 0x96, 0x65, 0x90, 0x9c, 0x16, 0x19, 0xb2, 0x8f, 0x88,
+	0x62, 0x3b, 0xc9, 0xd2, 0xa8, 0xc0, 0x66, 0xa8, 0x4f, 0x11, 0x5c, 0xaa, 0x40, 0x3f, 0x90, 0xb3,
+	0x24, 0xee, 0x2a, 0xad, 0xd6, 0x50, 0x74, 0x5d, 0xfe, 0x84, 0xc7, 0x2e, 0x8d, 0x53, 0xb0, 0x4a,
+	0x65, 0x6a, 0x9b, 0x11, 0x4c, 0x3d, 0x46, 0xe7, 0xd7, 0xc1, 0xf8, 0x0d, 0x7c, 0x9f, 0xd1, 0x76,
+	0x83, 0x2e, 0x7a, 0x4e, 0x4e, 0x52, 0xad, 0x8d, 0xd2, 0xba, 0x5c, 0xed, 0x83, 0xf0, 0x6c, 0x8e,
+	0xb3, 0x5d, 0x20, 0x7f, 0x0f, 0x78, 0x19, 0xe9, 0xf2, 0x25, 0xb9, 0x0f, 0xbb, 0x94, 0xff, 0x67,
+	0x6d, 0x97, 0x27, 0xff, 0x6c, 0xcb, 0x75, 0xbc, 0xf7, 0xf5, 0x68, 0x35, 0xc6, 0x01, 0x5c, 0xfc,
+	0x09, 0x00, 0x00, 0xff, 0xff, 0x4b, 0x1c, 0x8f, 0x8c, 0x2f, 0x03, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/logging/type/http_request.proto b/vendor/google.golang.org/genproto/googleapis/logging/type/http_request.proto
new file mode 100644
index 0000000..34345b7
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/logging/type/http_request.proto
@@ -0,0 +1,86 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.logging.type;
+
+import "google.golang.org/genproto/googleapis/api/serviceconfig/annotations.proto"; // from google/api/annotations.proto
+import "github.com/golang/protobuf/ptypes/duration/duration.proto"; // from google/protobuf/duration.proto
+
+option java_multiple_files = true;
+option java_outer_classname = "HttpRequestProto";
+option java_package = "com.google.logging.type";
+
+
+// A common proto for logging HTTP requests. Only contains semantics
+// defined by the HTTP specification. Product-specific logging
+// information MUST be defined in a separate message.
+message HttpRequest {
+  // The request method. Examples: `"GET"`, `"HEAD"`, `"PUT"`, `"POST"`.
+  string request_method = 1;
+
+  // The scheme (http, https), the host name, the path and the query
+  // portion of the URL that was requested.
+  // Example: `"http://example.com/some/info?color=red"`.
+  string request_url = 2;
+
+  // The size of the HTTP request message in bytes, including the request
+  // headers and the request body.
+  int64 request_size = 3;
+
+  // The response code indicating the status of response.
+  // Examples: 200, 404.
+  int32 status = 4;
+
+  // The size of the HTTP response message sent back to the client, in bytes,
+  // including the response headers and the response body.
+  int64 response_size = 5;
+
+  // The user agent sent by the client. Example:
+  // `"Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; Q312461; .NET CLR 1.0.3705)"`.
+  string user_agent = 6;
+
+  // The IP address (IPv4 or IPv6) of the client that issued the HTTP
+  // request. Examples: `"192.168.1.1"`, `"FE80::0202:B3FF:FE1E:8329"`.
+  string remote_ip = 7;
+
+  // The IP address (IPv4 or IPv6) of the origin server that the request was
+  // sent to.
+  string server_ip = 13;
+
+  // The referer URL of the request, as defined in
+  // [HTTP/1.1 Header Field Definitions](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html).
+  string referer = 8;
+
+  // The request processing latency on the server, from the time the request was
+  // received until the response was sent.
+  google.protobuf.Duration latency = 14;
+
+  // Whether or not a cache lookup was attempted.
+  bool cache_lookup = 11;
+
+  // Whether or not an entity was served from cache
+  // (with or without validation).
+  bool cache_hit = 9;
+
+  // Whether or not the response was validated with the origin server before
+  // being served from cache. This field is only meaningful if `cache_hit` is
+  // True.
+  bool cache_validated_with_origin_server = 10;
+
+  // The number of HTTP response bytes inserted into cache. Set only when a
+  // cache fill was attempted.
+  int64 cache_fill_bytes = 12;
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/logging/type/log_severity.pb.go b/vendor/google.golang.org/genproto/googleapis/logging/type/log_severity.pb.go
new file mode 100644
index 0000000..3fedd5c
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/logging/type/log_severity.pb.go
@@ -0,0 +1,112 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/googleapis/logging/type/log_severity.proto
+// DO NOT EDIT!
+
+package google_logging_type
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "google.golang.org/genproto/googleapis/api/serviceconfig"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// The severity of the event described in a log entry, expressed as one of the
+// standard severity levels listed below.  For your reference, the levels are
+// assigned the listed numeric values. The effect of using numeric values other
+// than those listed is undefined.
+//
+// You can filter for log entries by severity.  For example, the following
+// filter expression will match log entries with severities `INFO`, `NOTICE`,
+// and `WARNING`:
+//
+//     severity > DEBUG AND severity <= WARNING
+//
+// If you are writing log entries, you should map other severity encodings to
+// one of these standard levels. For example, you might map all of Java's FINE,
+// FINER, and FINEST levels to `LogSeverity.DEBUG`. You can preserve the
+// original severity level in the log entry payload if you wish.
+type LogSeverity int32
+
+const (
+	// (0) The log entry has no assigned severity level.
+	LogSeverity_DEFAULT LogSeverity = 0
+	// (100) Debug or trace information.
+	LogSeverity_DEBUG LogSeverity = 100
+	// (200) Routine information, such as ongoing status or performance.
+	LogSeverity_INFO LogSeverity = 200
+	// (300) Normal but significant events, such as start up, shut down, or
+	// a configuration change.
+	LogSeverity_NOTICE LogSeverity = 300
+	// (400) Warning events might cause problems.
+	LogSeverity_WARNING LogSeverity = 400
+	// (500) Error events are likely to cause problems.
+	LogSeverity_ERROR LogSeverity = 500
+	// (600) Critical events cause more severe problems or outages.
+	LogSeverity_CRITICAL LogSeverity = 600
+	// (700) A person must take an action immediately.
+	LogSeverity_ALERT LogSeverity = 700
+	// (800) One or more systems are unusable.
+	LogSeverity_EMERGENCY LogSeverity = 800
+)
+
+var LogSeverity_name = map[int32]string{
+	0:   "DEFAULT",
+	100: "DEBUG",
+	200: "INFO",
+	300: "NOTICE",
+	400: "WARNING",
+	500: "ERROR",
+	600: "CRITICAL",
+	700: "ALERT",
+	800: "EMERGENCY",
+}
+var LogSeverity_value = map[string]int32{
+	"DEFAULT":   0,
+	"DEBUG":     100,
+	"INFO":      200,
+	"NOTICE":    300,
+	"WARNING":   400,
+	"ERROR":     500,
+	"CRITICAL":  600,
+	"ALERT":     700,
+	"EMERGENCY": 800,
+}
+
+func (x LogSeverity) String() string {
+	return proto.EnumName(LogSeverity_name, int32(x))
+}
+func (LogSeverity) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
+
+func init() {
+	proto.RegisterEnum("google.logging.type.LogSeverity", LogSeverity_name, LogSeverity_value)
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/googleapis/logging/type/log_severity.proto", fileDescriptor1)
+}
+
+var fileDescriptor1 = []byte{
+	// 278 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x72, 0x4d, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x4b, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0xcb, 0x2f, 0x4a, 0xd7, 0x4f, 0x4f,
+	0xcd, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x87, 0x48, 0x25, 0x16, 0x64, 0x16, 0xeb, 0xe7, 0xe4,
+	0xa7, 0xa7, 0x67, 0xe6, 0xa5, 0xeb, 0x97, 0x54, 0x16, 0xa4, 0x82, 0x38, 0xf1, 0xc5, 0xa9, 0x65,
+	0xa9, 0x45, 0x99, 0x25, 0x95, 0x7a, 0x60, 0xa5, 0x42, 0xc2, 0x50, 0x63, 0xa0, 0xea, 0xf4, 0x40,
+	0xea, 0xa4, 0x3c, 0x89, 0x33, 0x1b, 0x48, 0xe8, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x26,
+	0xe7, 0xe7, 0xa5, 0x65, 0xa6, 0xeb, 0x27, 0xe6, 0xe5, 0xe5, 0x97, 0x24, 0x96, 0x64, 0xe6, 0xe7,
+	0x15, 0x43, 0xcc, 0xd7, 0x6a, 0x62, 0xe4, 0xe2, 0xf6, 0xc9, 0x4f, 0x0f, 0x86, 0xda, 0x2a, 0xc4,
+	0xcd, 0xc5, 0xee, 0xe2, 0xea, 0xe6, 0x18, 0xea, 0x13, 0x22, 0xc0, 0x20, 0xc4, 0xc9, 0xc5, 0xea,
+	0xe2, 0xea, 0x14, 0xea, 0x2e, 0x90, 0x02, 0x64, 0xb2, 0x78, 0xfa, 0xb9, 0xf9, 0x0b, 0x9c, 0x60,
+	0x04, 0x2a, 0x61, 0xf3, 0xf3, 0x0f, 0xf1, 0x74, 0x76, 0x15, 0x58, 0xc3, 0x24, 0xc4, 0xc3, 0xc5,
+	0x1e, 0xee, 0x18, 0xe4, 0xe7, 0xe9, 0xe7, 0x2e, 0x30, 0x81, 0x59, 0x88, 0x8b, 0x8b, 0xd5, 0x35,
+	0x28, 0xc8, 0x3f, 0x48, 0xe0, 0x0b, 0xb3, 0x10, 0x2f, 0x17, 0x87, 0x73, 0x90, 0x27, 0x50, 0x9d,
+	0xa3, 0x8f, 0xc0, 0x0d, 0x16, 0x90, 0x94, 0xa3, 0x8f, 0x6b, 0x50, 0x88, 0xc0, 0x1e, 0x56, 0x21,
+	0x3e, 0x2e, 0x4e, 0x57, 0x5f, 0xd7, 0x20, 0x77, 0x57, 0x3f, 0xe7, 0x48, 0x81, 0x05, 0x6c, 0x4e,
+	0xba, 0x5c, 0xe2, 0xc9, 0xf9, 0xb9, 0x7a, 0x58, 0xbc, 0xea, 0x24, 0x80, 0xe4, 0xb8, 0x00, 0x90,
+	0x8b, 0x03, 0x18, 0x93, 0xd8, 0xc0, 0x4e, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x01, 0xa8,
+	0xad, 0x71, 0x63, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/logging/type/log_severity.proto b/vendor/google.golang.org/genproto/googleapis/logging/type/log_severity.proto
new file mode 100644
index 0000000..bcb451a
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/logging/type/log_severity.proto
@@ -0,0 +1,69 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.logging.type;
+
+import "google.golang.org/genproto/googleapis/api/serviceconfig/annotations.proto"; // from google/api/annotations.proto
+
+option java_multiple_files = true;
+option java_outer_classname = "LogSeverityProto";
+option java_package = "com.google.logging.type";
+
+
+// The severity of the event described in a log entry, expressed as one of the
+// standard severity levels listed below.  For your reference, the levels are
+// assigned the listed numeric values. The effect of using numeric values other
+// than those listed is undefined.
+//
+// You can filter for log entries by severity.  For example, the following
+// filter expression will match log entries with severities `INFO`, `NOTICE`,
+// and `WARNING`:
+//
+//     severity > DEBUG AND severity <= WARNING
+//
+// If you are writing log entries, you should map other severity encodings to
+// one of these standard levels. For example, you might map all of Java's FINE,
+// FINER, and FINEST levels to `LogSeverity.DEBUG`. You can preserve the
+// original severity level in the log entry payload if you wish.
+enum LogSeverity {
+  // (0) The log entry has no assigned severity level.
+  DEFAULT = 0;
+
+  // (100) Debug or trace information.
+  DEBUG = 100;
+
+  // (200) Routine information, such as ongoing status or performance.
+  INFO = 200;
+
+  // (300) Normal but significant events, such as start up, shut down, or
+  // a configuration change.
+  NOTICE = 300;
+
+  // (400) Warning events might cause problems.
+  WARNING = 400;
+
+  // (500) Error events are likely to cause problems.
+  ERROR = 500;
+
+  // (600) Critical events cause more severe problems or outages.
+  CRITICAL = 600;
+
+  // (700) A person must take an action immediately.
+  ALERT = 700;
+
+  // (800) One or more systems are unusable.
+  EMERGENCY = 800;
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/logging/v2/log_entry.pb.go b/vendor/google.golang.org/genproto/googleapis/logging/v2/log_entry.pb.go
new file mode 100644
index 0000000..3c3f28a
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/logging/v2/log_entry.pb.go
@@ -0,0 +1,358 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/googleapis/logging/v2/log_entry.proto
+// DO NOT EDIT!
+
+/*
+Package v2 is a generated protocol buffer package.
+
+It is generated from these files:
+	google.golang.org/genproto/googleapis/logging/v2/log_entry.proto
+	google.golang.org/genproto/googleapis/logging/v2/logging.proto
+	google.golang.org/genproto/googleapis/logging/v2/logging_config.proto
+	google.golang.org/genproto/googleapis/logging/v2/logging_metrics.proto
+
+It has these top-level messages:
+	LogEntry
+	LogEntryOperation
+	DeleteLogRequest
+	WriteLogEntriesRequest
+	WriteLogEntriesResponse
+	ListLogEntriesRequest
+	ListLogEntriesResponse
+	ListMonitoredResourceDescriptorsRequest
+	ListMonitoredResourceDescriptorsResponse
+	LogSink
+	ListSinksRequest
+	ListSinksResponse
+	GetSinkRequest
+	CreateSinkRequest
+	UpdateSinkRequest
+	DeleteSinkRequest
+	LogMetric
+	ListLogMetricsRequest
+	ListLogMetricsResponse
+	GetLogMetricRequest
+	CreateLogMetricRequest
+	UpdateLogMetricRequest
+	DeleteLogMetricRequest
+*/
+package v2
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "google.golang.org/genproto/googleapis/api/serviceconfig"
+import google_api3 "google.golang.org/genproto/googleapis/api/monitoredres"
+import google_logging_type "google.golang.org/genproto/googleapis/logging/type"
+import google_logging_type1 "google.golang.org/genproto/googleapis/logging/type"
+import google_protobuf2 "github.com/golang/protobuf/ptypes/any"
+import google_protobuf3 "github.com/golang/protobuf/ptypes/struct"
+import google_protobuf4 "github.com/golang/protobuf/ptypes/timestamp"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// An individual entry in a log.
+type LogEntry struct {
+	// Required. The resource name of the log to which this log entry
+	// belongs. The format of the name is
+	// `"projects/<project-id>/logs/<log-id>"`.  Examples:
+	// `"projects/my-projectid/logs/syslog"`,
+	// `"projects/my-projectid/logs/library.googleapis.com%2Fbook_log"`.
+	//
+	// The log ID part of resource name must be less than 512 characters
+	// long and can only include the following characters: upper and
+	// lower case alphanumeric characters: [A-Za-z0-9]; and punctuation
+	// characters: forward-slash, underscore, hyphen, and period.
+	// Forward-slash (`/`) characters in the log ID must be URL-encoded.
+	LogName string `protobuf:"bytes,12,opt,name=log_name,json=logName" json:"log_name,omitempty"`
+	// Required. The monitored resource associated with this log entry.
+	// Example: a log entry that reports a database error would be
+	// associated with the monitored resource designating the particular
+	// database that reported the error.
+	Resource *google_api3.MonitoredResource `protobuf:"bytes,8,opt,name=resource" json:"resource,omitempty"`
+	// Optional. The log entry payload, which can be one of multiple types.
+	//
+	// Types that are valid to be assigned to Payload:
+	//	*LogEntry_ProtoPayload
+	//	*LogEntry_TextPayload
+	//	*LogEntry_JsonPayload
+	Payload isLogEntry_Payload `protobuf_oneof:"payload"`
+	// Optional. The time the event described by the log entry occurred.  If
+	// omitted, Stackdriver Logging will use the time the log entry is received.
+	Timestamp *google_protobuf4.Timestamp `protobuf:"bytes,9,opt,name=timestamp" json:"timestamp,omitempty"`
+	// Optional. The severity of the log entry. The default value is
+	// `LogSeverity.DEFAULT`.
+	Severity google_logging_type1.LogSeverity `protobuf:"varint,10,opt,name=severity,enum=google.logging.type.LogSeverity" json:"severity,omitempty"`
+	// Optional. A unique ID for the log entry. If you provide this
+	// field, the logging service considers other log entries in the
+	// same project with the same ID as duplicates which can be removed.  If
+	// omitted, Stackdriver Logging will generate a unique ID for this
+	// log entry.
+	InsertId string `protobuf:"bytes,4,opt,name=insert_id,json=insertId" json:"insert_id,omitempty"`
+	// Optional. Information about the HTTP request associated with this
+	// log entry, if applicable.
+	HttpRequest *google_logging_type.HttpRequest `protobuf:"bytes,7,opt,name=http_request,json=httpRequest" json:"http_request,omitempty"`
+	// Optional. A set of user-defined (key, value) data that provides additional
+	// information about the log entry.
+	Labels map[string]string `protobuf:"bytes,11,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	// Optional. Information about an operation associated with the log entry, if
+	// applicable.
+	Operation *LogEntryOperation `protobuf:"bytes,15,opt,name=operation" json:"operation,omitempty"`
+}
+
+func (m *LogEntry) Reset()                    { *m = LogEntry{} }
+func (m *LogEntry) String() string            { return proto.CompactTextString(m) }
+func (*LogEntry) ProtoMessage()               {}
+func (*LogEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+type isLogEntry_Payload interface {
+	isLogEntry_Payload()
+}
+
+type LogEntry_ProtoPayload struct {
+	ProtoPayload *google_protobuf2.Any `protobuf:"bytes,2,opt,name=proto_payload,json=protoPayload,oneof"`
+}
+type LogEntry_TextPayload struct {
+	TextPayload string `protobuf:"bytes,3,opt,name=text_payload,json=textPayload,oneof"`
+}
+type LogEntry_JsonPayload struct {
+	JsonPayload *google_protobuf3.Struct `protobuf:"bytes,6,opt,name=json_payload,json=jsonPayload,oneof"`
+}
+
+func (*LogEntry_ProtoPayload) isLogEntry_Payload() {}
+func (*LogEntry_TextPayload) isLogEntry_Payload()  {}
+func (*LogEntry_JsonPayload) isLogEntry_Payload()  {}
+
+func (m *LogEntry) GetPayload() isLogEntry_Payload {
+	if m != nil {
+		return m.Payload
+	}
+	return nil
+}
+
+func (m *LogEntry) GetResource() *google_api3.MonitoredResource {
+	if m != nil {
+		return m.Resource
+	}
+	return nil
+}
+
+func (m *LogEntry) GetProtoPayload() *google_protobuf2.Any {
+	if x, ok := m.GetPayload().(*LogEntry_ProtoPayload); ok {
+		return x.ProtoPayload
+	}
+	return nil
+}
+
+func (m *LogEntry) GetTextPayload() string {
+	if x, ok := m.GetPayload().(*LogEntry_TextPayload); ok {
+		return x.TextPayload
+	}
+	return ""
+}
+
+func (m *LogEntry) GetJsonPayload() *google_protobuf3.Struct {
+	if x, ok := m.GetPayload().(*LogEntry_JsonPayload); ok {
+		return x.JsonPayload
+	}
+	return nil
+}
+
+func (m *LogEntry) GetTimestamp() *google_protobuf4.Timestamp {
+	if m != nil {
+		return m.Timestamp
+	}
+	return nil
+}
+
+func (m *LogEntry) GetHttpRequest() *google_logging_type.HttpRequest {
+	if m != nil {
+		return m.HttpRequest
+	}
+	return nil
+}
+
+func (m *LogEntry) GetLabels() map[string]string {
+	if m != nil {
+		return m.Labels
+	}
+	return nil
+}
+
+func (m *LogEntry) GetOperation() *LogEntryOperation {
+	if m != nil {
+		return m.Operation
+	}
+	return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*LogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+	return _LogEntry_OneofMarshaler, _LogEntry_OneofUnmarshaler, _LogEntry_OneofSizer, []interface{}{
+		(*LogEntry_ProtoPayload)(nil),
+		(*LogEntry_TextPayload)(nil),
+		(*LogEntry_JsonPayload)(nil),
+	}
+}
+
+func _LogEntry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+	m := msg.(*LogEntry)
+	// payload
+	switch x := m.Payload.(type) {
+	case *LogEntry_ProtoPayload:
+		b.EncodeVarint(2<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.ProtoPayload); err != nil {
+			return err
+		}
+	case *LogEntry_TextPayload:
+		b.EncodeVarint(3<<3 | proto.WireBytes)
+		b.EncodeStringBytes(x.TextPayload)
+	case *LogEntry_JsonPayload:
+		b.EncodeVarint(6<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.JsonPayload); err != nil {
+			return err
+		}
+	case nil:
+	default:
+		return fmt.Errorf("LogEntry.Payload has unexpected type %T", x)
+	}
+	return nil
+}
+
+func _LogEntry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+	m := msg.(*LogEntry)
+	switch tag {
+	case 2: // payload.proto_payload
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(google_protobuf2.Any)
+		err := b.DecodeMessage(msg)
+		m.Payload = &LogEntry_ProtoPayload{msg}
+		return true, err
+	case 3: // payload.text_payload
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		x, err := b.DecodeStringBytes()
+		m.Payload = &LogEntry_TextPayload{x}
+		return true, err
+	case 6: // payload.json_payload
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(google_protobuf3.Struct)
+		err := b.DecodeMessage(msg)
+		m.Payload = &LogEntry_JsonPayload{msg}
+		return true, err
+	default:
+		return false, nil
+	}
+}
+
+func _LogEntry_OneofSizer(msg proto.Message) (n int) {
+	m := msg.(*LogEntry)
+	// payload
+	switch x := m.Payload.(type) {
+	case *LogEntry_ProtoPayload:
+		s := proto.Size(x.ProtoPayload)
+		n += proto.SizeVarint(2<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *LogEntry_TextPayload:
+		n += proto.SizeVarint(3<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(len(x.TextPayload)))
+		n += len(x.TextPayload)
+	case *LogEntry_JsonPayload:
+		s := proto.Size(x.JsonPayload)
+		n += proto.SizeVarint(6<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case nil:
+	default:
+		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+	}
+	return n
+}
+
+// Additional information about a potentially long-running operation with which
+// a log entry is associated.
+type LogEntryOperation struct {
+	// Optional. An arbitrary operation identifier. Log entries with the
+	// same identifier are assumed to be part of the same operation.
+	Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
+	// Optional. An arbitrary producer identifier. The combination of
+	// `id` and `producer` must be globally unique.  Examples for `producer`:
+	// `"MyDivision.MyBigCompany.com"`, `"github.com/MyProject/MyApplication"`.
+	Producer string `protobuf:"bytes,2,opt,name=producer" json:"producer,omitempty"`
+	// Optional. Set this to True if this is the first log entry in the operation.
+	First bool `protobuf:"varint,3,opt,name=first" json:"first,omitempty"`
+	// Optional. Set this to True if this is the last log entry in the operation.
+	Last bool `protobuf:"varint,4,opt,name=last" json:"last,omitempty"`
+}
+
+func (m *LogEntryOperation) Reset()                    { *m = LogEntryOperation{} }
+func (m *LogEntryOperation) String() string            { return proto.CompactTextString(m) }
+func (*LogEntryOperation) ProtoMessage()               {}
+func (*LogEntryOperation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func init() {
+	proto.RegisterType((*LogEntry)(nil), "google.logging.v2.LogEntry")
+	proto.RegisterType((*LogEntryOperation)(nil), "google.logging.v2.LogEntryOperation")
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/googleapis/logging/v2/log_entry.proto", fileDescriptor0)
+}
+
+var fileDescriptor0 = []byte{
+	// 617 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x54, 0x5f, 0x6f, 0xd3, 0x3e,
+	0x14, 0xfd, 0x65, 0xff, 0x9a, 0xb8, 0xdd, 0x7e, 0xcc, 0x1a, 0x22, 0x04, 0x21, 0x55, 0x03, 0x09,
+	0x9e, 0x1c, 0x54, 0x84, 0xb4, 0x31, 0x24, 0xa0, 0x68, 0xd2, 0x26, 0x0d, 0x36, 0x79, 0x3c, 0x21,
+	0xa1, 0x2a, 0x4d, 0x5d, 0xd7, 0x90, 0xda, 0xc1, 0x71, 0x2a, 0xf2, 0x81, 0xf8, 0x8e, 0x3c, 0x62,
+	0x3b, 0x4e, 0x3a, 0x51, 0xc4, 0x36, 0xf1, 0xd0, 0xf6, 0x5e, 0xfb, 0x9c, 0x73, 0x7d, 0xcf, 0xb5,
+	0x0b, 0xde, 0x50, 0x21, 0x68, 0x46, 0x10, 0x15, 0x59, 0xc2, 0x29, 0x12, 0x92, 0xc6, 0x94, 0xf0,
+	0x5c, 0x0a, 0x25, 0xe2, 0x7a, 0x2b, 0xc9, 0x59, 0x11, 0x67, 0x82, 0x52, 0xc6, 0x69, 0xbc, 0x18,
+	0x98, 0x70, 0x44, 0xb8, 0x92, 0x15, 0xb2, 0x28, 0xb8, 0xeb, 0x14, 0x1c, 0x04, 0x2d, 0x06, 0xd1,
+	0xe9, 0xcd, 0x44, 0xf5, 0x57, 0x5c, 0x10, 0xb9, 0x60, 0x29, 0x49, 0x05, 0x9f, 0x32, 0x1a, 0x27,
+	0x9c, 0x0b, 0x95, 0x28, 0x26, 0x78, 0x51, 0xab, 0x47, 0xe7, 0x37, 0x97, 0x9a, 0x0b, 0xce, 0x94,
+	0x90, 0x64, 0x22, 0x49, 0xb1, 0x4c, 0x46, 0x3a, 0x13, 0xa5, 0x4c, 0x89, 0x13, 0x3c, 0xbe, 0x5d,
+	0xc3, 0xaa, 0xca, 0x49, 0x3c, 0x53, 0x2a, 0xd7, 0x3a, 0xdf, 0x4a, 0x52, 0xa8, 0x7f, 0x90, 0x31,
+	0xce, 0x15, 0x64, 0x41, 0x24, 0x53, 0xce, 0xbc, 0x28, 0xa6, 0x4c, 0xcd, 0xca, 0x31, 0x4a, 0xc5,
+	0x3c, 0xae, 0xa5, 0x62, 0xbb, 0x31, 0x2e, 0xa7, 0x71, 0x6e, 0x48, 0xba, 0x35, 0x5e, 0x99, 0x8f,
+	0x23, 0xbc, 0xb8, 0x9e, 0x50, 0x28, 0x59, 0xa6, 0xca, 0xfd, 0x38, 0xda, 0xd1, 0xf5, 0x34, 0xc5,
+	0xe6, 0xba, 0xbd, 0x64, 0x9e, 0x2f, 0xa3, 0x9a, 0xbc, 0xff, 0x63, 0x13, 0xf8, 0x67, 0x82, 0x1e,
+	0x9b, 0xa1, 0xc3, 0xfb, 0xc0, 0x37, 0x7d, 0xf0, 0x64, 0x4e, 0xc2, 0x5e, 0xdf, 0x7b, 0x1a, 0xe0,
+	0x8e, 0xce, 0x3f, 0xe8, 0x14, 0x1e, 0x02, 0xbf, 0x31, 0x3b, 0xf4, 0xf5, 0x56, 0x77, 0xf0, 0x10,
+	0x39, 0x9b, 0xb4, 0x19, 0xe8, 0x7d, 0x33, 0x12, 0xec, 0x40, 0xb8, 0x85, 0xc3, 0x23, 0xb0, 0x6d,
+	0x6b, 0x8d, 0xf2, 0xa4, 0xca, 0x44, 0x32, 0x09, 0xd7, 0x2c, 0x7f, 0xaf, 0xe1, 0x37, 0x87, 0x45,
+	0x6f, 0x79, 0x75, 0xf2, 0x1f, 0xee, 0xd9, 0xfc, 0xa2, 0xc6, 0xc2, 0x47, 0xa0, 0xa7, 0xc8, 0x77,
+	0xd5, 0x72, 0xd7, 0xcd, 0xb1, 0x34, 0xaa, 0x6b, 0x56, 0x1b, 0xd0, 0x2b, 0xd0, 0xfb, 0x52, 0x08,
+	0xde, 0x82, 0xb6, 0x6c, 0x81, 0x7b, 0x2b, 0x05, 0x2e, 0xad, 0x6d, 0x86, 0x6d, 0xe0, 0x0d, 0xfb,
+	0x00, 0x04, 0xad, 0x2b, 0x61, 0x60, 0xa9, 0xd1, 0x0a, 0xf5, 0x63, 0x83, 0xc0, 0x4b, 0xb0, 0xae,
+	0xeb, 0x37, 0x33, 0x0f, 0x81, 0x26, 0xee, 0x0c, 0xfa, 0xe8, 0xb7, 0x17, 0x63, 0xfc, 0x47, 0xda,
+	0xe0, 0x4b, 0x87, 0xc3, 0x2d, 0x03, 0x3e, 0x00, 0x01, 0xe3, 0xfa, 0x8d, 0xa8, 0x11, 0x9b, 0x84,
+	0x1b, 0xd6, 0x6e, 0xbf, 0x5e, 0x38, 0x9d, 0xc0, 0x77, 0xa0, 0x77, 0xf5, 0x66, 0x86, 0x1d, 0x7b,
+	0xae, 0x3f, 0xcb, 0x9f, 0x68, 0x20, 0xae, 0x71, 0xb8, 0x3b, 0x5b, 0x26, 0xf0, 0x35, 0xd8, 0xca,
+	0x92, 0x31, 0xc9, 0x8a, 0xb0, 0xdb, 0x5f, 0xd7, 0xf4, 0x27, 0x68, 0xe5, 0x3d, 0xa3, 0x66, 0xf8,
+	0xe8, 0xcc, 0x22, 0x6d, 0x8c, 0x1d, 0x0d, 0x0e, 0x41, 0x20, 0x72, 0x22, 0xed, 0xab, 0x0d, 0xff,
+	0xb7, 0x47, 0x78, 0xfc, 0x17, 0x8d, 0xf3, 0x06, 0x8b, 0x97, 0xb4, 0xe8, 0x10, 0x74, 0xaf, 0x48,
+	0xc3, 0x3b, 0x60, 0xfd, 0x2b, 0xa9, 0x42, 0xcf, 0xf6, 0x6b, 0x42, 0xb8, 0x07, 0x36, 0x17, 0x49,
+	0x56, 0x12, 0x7b, 0x2f, 0x02, 0x5c, 0x27, 0x2f, 0xd7, 0x0e, 0xbc, 0x61, 0x00, 0x3a, 0x6e, 0xa4,
+	0xfb, 0x0c, 0xec, 0xae, 0x54, 0x81, 0x3b, 0x60, 0x4d, 0x5b, 0x57, 0x4b, 0xe9, 0x08, 0x46, 0xc0,
+	0xd7, 0x03, 0x9b, 0x94, 0x29, 0x91, 0x4e, 0xac, 0xcd, 0x4d, 0x95, 0x29, 0x93, 0xda, 0x49, 0x73,
+	0x83, 0x7c, 0x5c, 0x27, 0x10, 0x82, 0x8d, 0x2c, 0xd1, 0x8b, 0x1b, 0x76, 0xd1, 0xc6, 0xc3, 0xcf,
+	0xe0, 0xae, 0x7e, 0x4a, 0xab, 0x6d, 0x0e, 0xb7, 0x9b, 0x13, 0x5c, 0xd8, 0x1b, 0xea, 0x7d, 0x7a,
+	0x76, 0xdb, 0x3f, 0xd8, 0x9f, 0x9e, 0x37, 0xde, 0xb2, 0xfb, 0xcf, 0x7f, 0x05, 0x00, 0x00, 0xff,
+	0xff, 0x1e, 0x4b, 0x59, 0x17, 0x9e, 0x05, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/logging/v2/log_entry.proto b/vendor/google.golang.org/genproto/googleapis/logging/v2/log_entry.proto
new file mode 100644
index 0000000..709a98f
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/logging/v2/log_entry.proto
@@ -0,0 +1,115 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.logging.v2;
+
+import "google.golang.org/genproto/googleapis/api/serviceconfig/annotations.proto"; // from google/api/annotations.proto
+import "google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.proto"; // from google/api/monitored_resource.proto
+import "google.golang.org/genproto/googleapis/logging/type/http_request.proto"; // from google/logging/type/http_request.proto
+import "google.golang.org/genproto/googleapis/logging/type/log_severity.proto"; // from google/logging/type/log_severity.proto
+import "github.com/golang/protobuf/ptypes/any/any.proto"; // from google/protobuf/any.proto
+import "github.com/golang/protobuf/ptypes/struct/struct.proto"; // from google/protobuf/struct.proto
+import "github.com/golang/protobuf/ptypes/timestamp/timestamp.proto"; // from google/protobuf/timestamp.proto
+
+option cc_enable_arenas = true;
+option java_multiple_files = true;
+option java_outer_classname = "LogEntryProto";
+option java_package = "com.google.logging.v2";
+
+option go_package = "google.golang.org/genproto/googleapis/logging/v2";
+
+// An individual entry in a log.
+message LogEntry {
+  // Required. The resource name of the log to which this log entry
+  // belongs. The format of the name is
+  // `"projects/<project-id>/logs/<log-id>"`.  Examples:
+  // `"projects/my-projectid/logs/syslog"`,
+  // `"projects/my-projectid/logs/library.googleapis.com%2Fbook_log"`.
+  //
+  // The log ID part of resource name must be less than 512 characters
+  // long and can only include the following characters: upper and
+  // lower case alphanumeric characters: [A-Za-z0-9]; and punctuation
+  // characters: forward-slash, underscore, hyphen, and period.
+  // Forward-slash (`/`) characters in the log ID must be URL-encoded.
+  string log_name = 12;
+
+  // Required. The monitored resource associated with this log entry.
+  // Example: a log entry that reports a database error would be
+  // associated with the monitored resource designating the particular
+  // database that reported the error.
+  google.api.MonitoredResource resource = 8;
+
+  // Optional. The log entry payload, which can be one of multiple types.
+  oneof payload {
+    // The log entry payload, represented as a protocol buffer.  Some
+    // Google Cloud Platform services use this field for their log
+    // entry payloads.
+    google.protobuf.Any proto_payload = 2;
+
+    // The log entry payload, represented as a Unicode string (UTF-8).
+    string text_payload = 3;
+
+    // The log entry payload, represented as a structure that
+    // is expressed as a JSON object.
+    google.protobuf.Struct json_payload = 6;
+  }
+
+  // Optional. The time the event described by the log entry occurred.  If
+  // omitted, Stackdriver Logging will use the time the log entry is received.
+  google.protobuf.Timestamp timestamp = 9;
+
+  // Optional. The severity of the log entry. The default value is
+  // `LogSeverity.DEFAULT`.
+  google.logging.type.LogSeverity severity = 10;
+
+  // Optional. A unique ID for the log entry. If you provide this
+  // field, the logging service considers other log entries in the
+  // same project with the same ID as duplicates which can be removed.  If
+  // omitted, Stackdriver Logging will generate a unique ID for this
+  // log entry.
+  string insert_id = 4;
+
+  // Optional. Information about the HTTP request associated with this
+  // log entry, if applicable.
+  google.logging.type.HttpRequest http_request = 7;
+
+  // Optional. A set of user-defined (key, value) data that provides additional
+  // information about the log entry.
+  map<string, string> labels = 11;
+
+  // Optional. Information about an operation associated with the log entry, if
+  // applicable.
+  LogEntryOperation operation = 15;
+}
+
+// Additional information about a potentially long-running operation with which
+// a log entry is associated.
+message LogEntryOperation {
+  // Optional. An arbitrary operation identifier. Log entries with the
+  // same identifier are assumed to be part of the same operation.
+  string id = 1;
+
+  // Optional. An arbitrary producer identifier. The combination of
+  // `id` and `producer` must be globally unique.  Examples for `producer`:
+  // `"MyDivision.MyBigCompany.com"`, `"github.com/MyProject/MyApplication"`.
+  string producer = 2;
+
+  // Optional. Set this to True if this is the first log entry in the operation.
+  bool first = 3;
+
+  // Optional. Set this to True if this is the last log entry in the operation.
+  bool last = 4;
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/logging/v2/logging.pb.go b/vendor/google.golang.org/genproto/googleapis/logging/v2/logging.pb.go
new file mode 100644
index 0000000..98c0afa
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/logging/v2/logging.pb.go
@@ -0,0 +1,479 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/googleapis/logging/v2/logging.proto
+// DO NOT EDIT!
+
+package v2
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "google.golang.org/genproto/googleapis/api/serviceconfig"
+import google_api3 "google.golang.org/genproto/googleapis/api/monitoredres"
+import _ "github.com/golang/protobuf/ptypes/duration"
+import google_protobuf5 "github.com/golang/protobuf/ptypes/empty"
+import _ "github.com/golang/protobuf/ptypes/timestamp"
+import _ "google.golang.org/genproto/googleapis/rpc/status"
+
+import (
+	context "golang.org/x/net/context"
+	grpc "google.golang.org/grpc"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// The parameters to DeleteLog.
+type DeleteLogRequest struct {
+	// Required. The resource name of the log to delete.  Example:
+	// `"projects/my-project/logs/syslog"`.
+	LogName string `protobuf:"bytes,1,opt,name=log_name,json=logName" json:"log_name,omitempty"`
+}
+
+func (m *DeleteLogRequest) Reset()                    { *m = DeleteLogRequest{} }
+func (m *DeleteLogRequest) String() string            { return proto.CompactTextString(m) }
+func (*DeleteLogRequest) ProtoMessage()               {}
+func (*DeleteLogRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
+
+// The parameters to WriteLogEntries.
+type WriteLogEntriesRequest struct {
+	// Optional. A default log resource name that is assigned to all log entries
+	// in `entries` that do not specify a value for `log_name`.  Example:
+	// `"projects/my-project/logs/syslog"`.  See
+	// [LogEntry][google.logging.v2.LogEntry].
+	LogName string `protobuf:"bytes,1,opt,name=log_name,json=logName" json:"log_name,omitempty"`
+	// Optional. A default monitored resource object that is assigned to all log
+	// entries in `entries` that do not specify a value for `resource`. Example:
+	//
+	//     { "type": "gce_instance",
+	//       "labels": {
+	//         "zone": "us-central1-a", "instance_id": "00000000000000000000" }}
+	//
+	// See [LogEntry][google.logging.v2.LogEntry].
+	Resource *google_api3.MonitoredResource `protobuf:"bytes,2,opt,name=resource" json:"resource,omitempty"`
+	// Optional. Default labels that are added to the `labels` field of all log
+	// entries in `entries`. If a log entry already has a label with the same key
+	// as a label in this parameter, then the log entry's label is not changed.
+	// See [LogEntry][google.logging.v2.LogEntry].
+	Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	// Required. The log entries to write. Values supplied for the fields
+	// `log_name`, `resource`, and `labels` in this `entries.write` request are
+	// added to those log entries that do not provide their own values for the
+	// fields.
+	//
+	// To improve throughput and to avoid exceeding the
+	// [quota limit](/logging/quota-policy) for calls to `entries.write`,
+	// you should write multiple log entries at once rather than
+	// calling this method for each individual log entry.
+	Entries []*LogEntry `protobuf:"bytes,4,rep,name=entries" json:"entries,omitempty"`
+	// Optional. Whether valid entries should be written even if some other
+	// entries fail due to INVALID_ARGUMENT or PERMISSION_DENIED errors. If any
+	// entry is not written, the response status will be the error associated
+	// with one of the failed entries and include error details in the form of
+	// WriteLogEntriesPartialErrors.
+	PartialSuccess bool `protobuf:"varint,5,opt,name=partial_success,json=partialSuccess" json:"partial_success,omitempty"`
+}
+
+func (m *WriteLogEntriesRequest) Reset()                    { *m = WriteLogEntriesRequest{} }
+func (m *WriteLogEntriesRequest) String() string            { return proto.CompactTextString(m) }
+func (*WriteLogEntriesRequest) ProtoMessage()               {}
+func (*WriteLogEntriesRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} }
+
+func (m *WriteLogEntriesRequest) GetResource() *google_api3.MonitoredResource {
+	if m != nil {
+		return m.Resource
+	}
+	return nil
+}
+
+func (m *WriteLogEntriesRequest) GetLabels() map[string]string {
+	if m != nil {
+		return m.Labels
+	}
+	return nil
+}
+
+func (m *WriteLogEntriesRequest) GetEntries() []*LogEntry {
+	if m != nil {
+		return m.Entries
+	}
+	return nil
+}
+
+// Result returned from WriteLogEntries.
+// empty
+type WriteLogEntriesResponse struct {
+}
+
+func (m *WriteLogEntriesResponse) Reset()                    { *m = WriteLogEntriesResponse{} }
+func (m *WriteLogEntriesResponse) String() string            { return proto.CompactTextString(m) }
+func (*WriteLogEntriesResponse) ProtoMessage()               {}
+func (*WriteLogEntriesResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} }
+
+// The parameters to `ListLogEntries`.
+type ListLogEntriesRequest struct {
+	// Deprecated. One or more project identifiers or project numbers from which
+	// to retrieve log entries.  Examples: `"my-project-1A"`, `"1234567890"`. If
+	// present, these project identifiers are converted to resource format and
+	// added to the list of resources in `resourceNames`. Callers should use
+	// `resourceNames` rather than this parameter.
+	ProjectIds []string `protobuf:"bytes,1,rep,name=project_ids,json=projectIds" json:"project_ids,omitempty"`
+	// Optional. One or more cloud resources from which to retrieve log entries.
+	// Example: `"projects/my-project-1A"`, `"projects/1234567890"`.  Projects
+	// listed in `projectIds` are added to this list.
+	ResourceNames []string `protobuf:"bytes,8,rep,name=resource_names,json=resourceNames" json:"resource_names,omitempty"`
+	// Optional. A filter that chooses which log entries to return.  See [Advanced
+	// Logs Filters](/logging/docs/view/advanced_filters).  Only log entries that
+	// match the filter are returned.  An empty filter matches all log entries.
+	Filter string `protobuf:"bytes,2,opt,name=filter" json:"filter,omitempty"`
+	// Optional. How the results should be sorted.  Presently, the only permitted
+	// values are `"timestamp asc"` (default) and `"timestamp desc"`. The first
+	// option returns entries in order of increasing values of
+	// `LogEntry.timestamp` (oldest first), and the second option returns entries
+	// in order of decreasing timestamps (newest first).  Entries with equal
+	// timestamps are returned in order of `LogEntry.insertId`.
+	OrderBy string `protobuf:"bytes,3,opt,name=order_by,json=orderBy" json:"order_by,omitempty"`
+	// Optional. The maximum number of results to return from this request.
+	// Non-positive values are ignored.  The presence of `nextPageToken` in the
+	// response indicates that more results might be available.
+	PageSize int32 `protobuf:"varint,4,opt,name=page_size,json=pageSize" json:"page_size,omitempty"`
+	// Optional. If present, then retrieve the next batch of results from the
+	// preceding call to this method.  `pageToken` must be the value of
+	// `nextPageToken` from the previous response.  The values of other method
+	// parameters should be identical to those in the previous call.
+	PageToken string `protobuf:"bytes,5,opt,name=page_token,json=pageToken" json:"page_token,omitempty"`
+}
+
+func (m *ListLogEntriesRequest) Reset()                    { *m = ListLogEntriesRequest{} }
+func (m *ListLogEntriesRequest) String() string            { return proto.CompactTextString(m) }
+func (*ListLogEntriesRequest) ProtoMessage()               {}
+func (*ListLogEntriesRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} }
+
+// Result returned from `ListLogEntries`.
+type ListLogEntriesResponse struct {
+	// A list of log entries.
+	Entries []*LogEntry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"`
+	// If there might be more results than appear in this response, then
+	// `nextPageToken` is included.  To get the next set of results, call this
+	// method again using the value of `nextPageToken` as `pageToken`.
+	NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"`
+}
+
+func (m *ListLogEntriesResponse) Reset()                    { *m = ListLogEntriesResponse{} }
+func (m *ListLogEntriesResponse) String() string            { return proto.CompactTextString(m) }
+func (*ListLogEntriesResponse) ProtoMessage()               {}
+func (*ListLogEntriesResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} }
+
+func (m *ListLogEntriesResponse) GetEntries() []*LogEntry {
+	if m != nil {
+		return m.Entries
+	}
+	return nil
+}
+
+// The parameters to ListMonitoredResourceDescriptors
+type ListMonitoredResourceDescriptorsRequest struct {
+	// Optional. The maximum number of results to return from this request.
+	// Non-positive values are ignored.  The presence of `nextPageToken` in the
+	// response indicates that more results might be available.
+	PageSize int32 `protobuf:"varint,1,opt,name=page_size,json=pageSize" json:"page_size,omitempty"`
+	// Optional. If present, then retrieve the next batch of results from the
+	// preceding call to this method.  `pageToken` must be the value of
+	// `nextPageToken` from the previous response.  The values of other method
+	// parameters should be identical to those in the previous call.
+	PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken" json:"page_token,omitempty"`
+}
+
+func (m *ListMonitoredResourceDescriptorsRequest) Reset() {
+	*m = ListMonitoredResourceDescriptorsRequest{}
+}
+func (m *ListMonitoredResourceDescriptorsRequest) String() string { return proto.CompactTextString(m) }
+func (*ListMonitoredResourceDescriptorsRequest) ProtoMessage()    {}
+func (*ListMonitoredResourceDescriptorsRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor1, []int{5}
+}
+
+// Result returned from ListMonitoredResourceDescriptors.
+type ListMonitoredResourceDescriptorsResponse struct {
+	// A list of resource descriptors.
+	ResourceDescriptors []*google_api3.MonitoredResourceDescriptor `protobuf:"bytes,1,rep,name=resource_descriptors,json=resourceDescriptors" json:"resource_descriptors,omitempty"`
+	// If there might be more results than appear in this response, then
+	// `nextPageToken` is included.  To get the next set of results, call this
+	// method again using the value of `nextPageToken` as `pageToken`.
+	NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"`
+}
+
+func (m *ListMonitoredResourceDescriptorsResponse) Reset() {
+	*m = ListMonitoredResourceDescriptorsResponse{}
+}
+func (m *ListMonitoredResourceDescriptorsResponse) String() string { return proto.CompactTextString(m) }
+func (*ListMonitoredResourceDescriptorsResponse) ProtoMessage()    {}
+func (*ListMonitoredResourceDescriptorsResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor1, []int{6}
+}
+
+func (m *ListMonitoredResourceDescriptorsResponse) GetResourceDescriptors() []*google_api3.MonitoredResourceDescriptor {
+	if m != nil {
+		return m.ResourceDescriptors
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*DeleteLogRequest)(nil), "google.logging.v2.DeleteLogRequest")
+	proto.RegisterType((*WriteLogEntriesRequest)(nil), "google.logging.v2.WriteLogEntriesRequest")
+	proto.RegisterType((*WriteLogEntriesResponse)(nil), "google.logging.v2.WriteLogEntriesResponse")
+	proto.RegisterType((*ListLogEntriesRequest)(nil), "google.logging.v2.ListLogEntriesRequest")
+	proto.RegisterType((*ListLogEntriesResponse)(nil), "google.logging.v2.ListLogEntriesResponse")
+	proto.RegisterType((*ListMonitoredResourceDescriptorsRequest)(nil), "google.logging.v2.ListMonitoredResourceDescriptorsRequest")
+	proto.RegisterType((*ListMonitoredResourceDescriptorsResponse)(nil), "google.logging.v2.ListMonitoredResourceDescriptorsResponse")
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion3
+
+// Client API for LoggingServiceV2 service
+
+type LoggingServiceV2Client interface {
+	// Deletes a log and all its log entries.
+	// The log will reappear if it receives new entries.
+	DeleteLog(ctx context.Context, in *DeleteLogRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error)
+	// Writes log entries to Stackdriver Logging.  All log entries are
+	// written by this method.
+	WriteLogEntries(ctx context.Context, in *WriteLogEntriesRequest, opts ...grpc.CallOption) (*WriteLogEntriesResponse, error)
+	// Lists log entries.  Use this method to retrieve log entries from Cloud
+	// Logging.  For ways to export log entries, see
+	// [Exporting Logs](/logging/docs/export).
+	ListLogEntries(ctx context.Context, in *ListLogEntriesRequest, opts ...grpc.CallOption) (*ListLogEntriesResponse, error)
+	// Lists the monitored resource descriptors used by Stackdriver Logging.
+	ListMonitoredResourceDescriptors(ctx context.Context, in *ListMonitoredResourceDescriptorsRequest, opts ...grpc.CallOption) (*ListMonitoredResourceDescriptorsResponse, error)
+}
+
+type loggingServiceV2Client struct {
+	cc *grpc.ClientConn
+}
+
+func NewLoggingServiceV2Client(cc *grpc.ClientConn) LoggingServiceV2Client {
+	return &loggingServiceV2Client{cc}
+}
+
+func (c *loggingServiceV2Client) DeleteLog(ctx context.Context, in *DeleteLogRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) {
+	out := new(google_protobuf5.Empty)
+	err := grpc.Invoke(ctx, "/google.logging.v2.LoggingServiceV2/DeleteLog", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *loggingServiceV2Client) WriteLogEntries(ctx context.Context, in *WriteLogEntriesRequest, opts ...grpc.CallOption) (*WriteLogEntriesResponse, error) {
+	out := new(WriteLogEntriesResponse)
+	err := grpc.Invoke(ctx, "/google.logging.v2.LoggingServiceV2/WriteLogEntries", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *loggingServiceV2Client) ListLogEntries(ctx context.Context, in *ListLogEntriesRequest, opts ...grpc.CallOption) (*ListLogEntriesResponse, error) {
+	out := new(ListLogEntriesResponse)
+	err := grpc.Invoke(ctx, "/google.logging.v2.LoggingServiceV2/ListLogEntries", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *loggingServiceV2Client) ListMonitoredResourceDescriptors(ctx context.Context, in *ListMonitoredResourceDescriptorsRequest, opts ...grpc.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) {
+	out := new(ListMonitoredResourceDescriptorsResponse)
+	err := grpc.Invoke(ctx, "/google.logging.v2.LoggingServiceV2/ListMonitoredResourceDescriptors", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Server API for LoggingServiceV2 service
+
+type LoggingServiceV2Server interface {
+	// Deletes a log and all its log entries.
+	// The log will reappear if it receives new entries.
+	DeleteLog(context.Context, *DeleteLogRequest) (*google_protobuf5.Empty, error)
+	// Writes log entries to Stackdriver Logging.  All log entries are
+	// written by this method.
+	WriteLogEntries(context.Context, *WriteLogEntriesRequest) (*WriteLogEntriesResponse, error)
+	// Lists log entries.  Use this method to retrieve log entries from Cloud
+	// Logging.  For ways to export log entries, see
+	// [Exporting Logs](/logging/docs/export).
+	ListLogEntries(context.Context, *ListLogEntriesRequest) (*ListLogEntriesResponse, error)
+	// Lists the monitored resource descriptors used by Stackdriver Logging.
+	ListMonitoredResourceDescriptors(context.Context, *ListMonitoredResourceDescriptorsRequest) (*ListMonitoredResourceDescriptorsResponse, error)
+}
+
+func RegisterLoggingServiceV2Server(s *grpc.Server, srv LoggingServiceV2Server) {
+	s.RegisterService(&_LoggingServiceV2_serviceDesc, srv)
+}
+
+func _LoggingServiceV2_DeleteLog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DeleteLogRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(LoggingServiceV2Server).DeleteLog(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.logging.v2.LoggingServiceV2/DeleteLog",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(LoggingServiceV2Server).DeleteLog(ctx, req.(*DeleteLogRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _LoggingServiceV2_WriteLogEntries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(WriteLogEntriesRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(LoggingServiceV2Server).WriteLogEntries(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.logging.v2.LoggingServiceV2/WriteLogEntries",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(LoggingServiceV2Server).WriteLogEntries(ctx, req.(*WriteLogEntriesRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _LoggingServiceV2_ListLogEntries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ListLogEntriesRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(LoggingServiceV2Server).ListLogEntries(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.logging.v2.LoggingServiceV2/ListLogEntries",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(LoggingServiceV2Server).ListLogEntries(ctx, req.(*ListLogEntriesRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _LoggingServiceV2_ListMonitoredResourceDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ListMonitoredResourceDescriptorsRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(LoggingServiceV2Server).ListMonitoredResourceDescriptors(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.logging.v2.LoggingServiceV2/ListMonitoredResourceDescriptors",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(LoggingServiceV2Server).ListMonitoredResourceDescriptors(ctx, req.(*ListMonitoredResourceDescriptorsRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _LoggingServiceV2_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "google.logging.v2.LoggingServiceV2",
+	HandlerType: (*LoggingServiceV2Server)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "DeleteLog",
+			Handler:    _LoggingServiceV2_DeleteLog_Handler,
+		},
+		{
+			MethodName: "WriteLogEntries",
+			Handler:    _LoggingServiceV2_WriteLogEntries_Handler,
+		},
+		{
+			MethodName: "ListLogEntries",
+			Handler:    _LoggingServiceV2_ListLogEntries_Handler,
+		},
+		{
+			MethodName: "ListMonitoredResourceDescriptors",
+			Handler:    _LoggingServiceV2_ListMonitoredResourceDescriptors_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: fileDescriptor1,
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/googleapis/logging/v2/logging.proto", fileDescriptor1)
+}
+
+var fileDescriptor1 = []byte{
+	// 846 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x55, 0x41, 0x6f, 0xeb, 0x44,
+	0x10, 0x96, 0x93, 0xd7, 0xbe, 0x64, 0xcb, 0x6b, 0xf3, 0x96, 0xd7, 0x90, 0x97, 0xf2, 0x44, 0x64,
+	0x54, 0x92, 0x46, 0xaa, 0x0d, 0xa9, 0x2a, 0xd1, 0x54, 0x45, 0xa8, 0x6a, 0x0f, 0x95, 0x52, 0xa8,
+	0x5c, 0x04, 0x52, 0x85, 0x14, 0x39, 0xce, 0xd6, 0x2c, 0x75, 0xbc, 0x66, 0x77, 0x9d, 0x12, 0x10,
+	0x97, 0x1e, 0xf8, 0x03, 0xfc, 0x0f, 0xfe, 0x05, 0x57, 0x2e, 0x5c, 0xb8, 0x22, 0xf1, 0x23, 0x38,
+	0xb2, 0xbb, 0x5e, 0x27, 0x69, 0x92, 0xd7, 0xb8, 0xbd, 0x24, 0x3b, 0xb3, 0x33, 0xdf, 0xcc, 0x37,
+	0x33, 0x3b, 0x06, 0x9f, 0xf9, 0x84, 0xf8, 0x01, 0xb2, 0x7c, 0x12, 0xb8, 0xa1, 0x6f, 0x11, 0xea,
+	0xdb, 0x3e, 0x0a, 0x23, 0x4a, 0x38, 0xb1, 0x93, 0x2b, 0x37, 0xc2, 0xcc, 0x0e, 0x88, 0xef, 0xe3,
+	0xd0, 0xb7, 0x87, 0xad, 0xf4, 0x68, 0x29, 0x1b, 0xf8, 0x52, 0xfb, 0xa7, 0xda, 0x61, 0xab, 0x7a,
+	0x96, 0x0d, 0x52, 0xfc, 0xd8, 0x0c, 0xd1, 0x21, 0xf6, 0x90, 0x47, 0xc2, 0x6b, 0xec, 0xdb, 0x6e,
+	0x18, 0x12, 0xee, 0x72, 0x4c, 0x42, 0x96, 0xa0, 0x57, 0xbf, 0xcc, 0x0e, 0x35, 0x20, 0x21, 0xe6,
+	0x84, 0xa2, 0x3e, 0x45, 0x6c, 0x22, 0x74, 0x85, 0x44, 0x62, 0xea, 0x21, 0x0d, 0xf8, 0xf9, 0x53,
+	0xe8, 0x76, 0x51, 0xc8, 0xe9, 0x48, 0x23, 0x1c, 0xf8, 0x98, 0x7f, 0x17, 0xf7, 0x2c, 0x8f, 0x0c,
+	0xec, 0x04, 0xc5, 0x56, 0x17, 0xbd, 0xf8, 0xda, 0x8e, 0xf8, 0x28, 0x12, 0xd1, 0xfb, 0x31, 0x55,
+	0x2c, 0xc6, 0x07, 0xed, 0xba, 0xb7, 0xdc, 0x15, 0x0d, 0xc4, 0x21, 0xf9, 0xd5, 0x4e, 0x87, 0xcb,
+	0x9d, 0x38, 0x1e, 0x20, 0xc6, 0xdd, 0x41, 0x34, 0x39, 0x69, 0xe7, 0xa3, 0x6c, 0x74, 0x69, 0xe4,
+	0xd9, 0xc2, 0x8d, 0xc7, 0x4c, 0xff, 0x25, 0xee, 0xe6, 0x2e, 0x28, 0x9d, 0xa0, 0x00, 0x71, 0xd4,
+	0x21, 0xbe, 0x83, 0x7e, 0x88, 0x05, 0x36, 0x7c, 0x0d, 0x0a, 0xb2, 0x24, 0xa1, 0x3b, 0x40, 0x15,
+	0xa3, 0x66, 0x34, 0x8a, 0xce, 0x73, 0x21, 0x7f, 0x21, 0x44, 0xf3, 0xef, 0x1c, 0x28, 0x7f, 0x43,
+	0xb1, 0x32, 0x3f, 0x15, 0x25, 0xc3, 0x88, 0x2d, 0xf7, 0x82, 0x07, 0xa0, 0x90, 0x36, 0xa9, 0x92,
+	0x13, 0x57, 0x6b, 0xad, 0x37, 0x96, 0x4e, 0x5b, 0x24, 0x67, 0x9d, 0xa7, 0xad, 0x74, 0xb4, 0x91,
+	0x33, 0x36, 0x87, 0xe7, 0x60, 0x35, 0x70, 0x7b, 0x28, 0x60, 0x95, 0x7c, 0x2d, 0x2f, 0x1c, 0xf7,
+	0xad, 0xb9, 0x69, 0xb4, 0x16, 0x27, 0x64, 0x75, 0x94, 0x9f, 0x54, 0x8e, 0x1c, 0x0d, 0x02, 0xf7,
+	0xc1, 0x73, 0x94, 0x58, 0x55, 0x9e, 0x29, 0xbc, 0xad, 0x05, 0x78, 0x1a, 0x6a, 0xe4, 0xa4, 0xb6,
+	0xb0, 0x0e, 0x36, 0x22, 0x97, 0x72, 0xec, 0x06, 0x5d, 0x16, 0x7b, 0x1e, 0x62, 0xac, 0xb2, 0x22,
+	0x78, 0x14, 0x9c, 0x75, 0xad, 0xbe, 0x4c, 0xb4, 0xd5, 0x03, 0xb0, 0x36, 0x15, 0x16, 0x96, 0x40,
+	0xfe, 0x06, 0x8d, 0x74, 0x39, 0xe4, 0x11, 0xbe, 0x02, 0x2b, 0x43, 0x37, 0x88, 0x93, 0x3a, 0x14,
+	0x9d, 0x44, 0x68, 0xe7, 0x3e, 0x35, 0xcc, 0xd7, 0xe0, 0xbd, 0x39, 0x22, 0x2c, 0x12, 0x0f, 0x05,
+	0x99, 0x7f, 0x1a, 0x60, 0xb3, 0x83, 0x19, 0x9f, 0x2f, 0xfa, 0x07, 0x60, 0x4d, 0xf4, 0xf1, 0x7b,
+	0xe4, 0xf1, 0x2e, 0xee, 0x33, 0x11, 0x28, 0x2f, 0x40, 0x81, 0x56, 0x9d, 0xf5, 0x19, 0xdc, 0x06,
+	0xeb, 0x69, 0x2d, 0x55, 0x6b, 0x58, 0xa5, 0xa0, 0x6c, 0x5e, 0xa4, 0x5a, 0xd9, 0x20, 0x06, 0xcb,
+	0x60, 0xf5, 0x1a, 0x07, 0x1c, 0x51, 0x9d, 0x97, 0x96, 0x64, 0x53, 0x09, 0xed, 0x23, 0xda, 0xed,
+	0x8d, 0x44, 0x03, 0x54, 0x53, 0x95, 0x7c, 0x3c, 0x82, 0x5b, 0xa0, 0x18, 0xb9, 0x3e, 0xea, 0x32,
+	0xfc, 0x13, 0x12, 0xc5, 0x34, 0x1a, 0x2b, 0x4e, 0x41, 0x2a, 0x2e, 0x85, 0x0c, 0xdf, 0x00, 0xa0,
+	0x2e, 0x39, 0xb9, 0x41, 0xa1, 0xaa, 0x55, 0xd1, 0x51, 0xe6, 0x5f, 0x49, 0x85, 0x79, 0x0b, 0xca,
+	0xb3, 0x7c, 0x12, 0xaa, 0xd3, 0x0d, 0x32, 0x1e, 0xd1, 0xa0, 0x8f, 0xc0, 0x46, 0x88, 0x7e, 0xe4,
+	0xdd, 0xa9, 0xa0, 0x09, 0x91, 0x17, 0x52, 0x7d, 0x31, 0x0e, 0x8c, 0x40, 0x5d, 0x06, 0x9e, 0x9b,
+	0xb8, 0x13, 0xc4, 0x3c, 0x8a, 0x23, 0xa1, 0x1b, 0x97, 0xf6, 0x1e, 0x3f, 0xe3, 0x41, 0x7e, 0xb9,
+	0x59, 0x7e, 0xbf, 0x1b, 0xa0, 0xb1, 0x3c, 0x8e, 0xa6, 0x7c, 0x05, 0x5e, 0x8d, 0x5b, 0xd4, 0x9f,
+	0xdc, 0x6b, 0xfe, 0xf5, 0x07, 0x5f, 0xca, 0x04, 0xcf, 0x79, 0x97, 0xce, 0xc7, 0xc8, 0x5a, 0x97,
+	0xd6, 0x3f, 0xcf, 0x40, 0xa9, 0x93, 0x14, 0xf8, 0x32, 0x59, 0xd8, 0x5f, 0xb7, 0xe0, 0x2d, 0x28,
+	0x8e, 0x77, 0x03, 0xfc, 0x70, 0x41, 0x1f, 0x66, 0x37, 0x47, 0xb5, 0x9c, 0x1a, 0xa5, 0xfb, 0xcb,
+	0x3a, 0x95, 0x7b, 0xce, 0xdc, 0xbd, 0xfb, 0xeb, 0xdf, 0xdf, 0x72, 0xf5, 0xe6, 0xb6, 0x58, 0xb7,
+	0x3d, 0xc4, 0xdd, 0x4f, 0xec, 0x9f, 0xd3, 0x5d, 0x71, 0xa4, 0x87, 0x95, 0xd9, 0x4d, 0xb9, 0x88,
+	0xc5, 0xdf, 0x2f, 0xf0, 0x57, 0x03, 0x6c, 0xcc, 0xbc, 0x05, 0xb8, 0x93, 0xf9, 0xe1, 0x57, 0x9b,
+	0x59, 0x4c, 0xf5, 0xd3, 0x7a, 0x5f, 0x65, 0x56, 0x36, 0x5f, 0xca, 0x0f, 0x81, 0x9e, 0xa6, 0xf6,
+	0xad, 0x34, 0x6e, 0x1b, 0x4d, 0x78, 0x67, 0x80, 0xf5, 0xfb, 0x83, 0x0a, 0x1b, 0x8b, 0xe6, 0x71,
+	0xd1, 0xdb, 0xac, 0xee, 0x64, 0xb0, 0xd4, 0x59, 0x6c, 0xa9, 0x2c, 0x36, 0xcd, 0xd2, 0x74, 0x16,
+	0x81, 0xb0, 0x95, 0x49, 0xfc, 0x61, 0x80, 0xda, 0xb2, 0x61, 0x82, 0xed, 0xb7, 0x04, 0xcb, 0x30,
+	0xe9, 0xd5, 0xc3, 0x27, 0xf9, 0xea, 0xd4, 0x1b, 0x2a, 0x75, 0x13, 0xd6, 0x64, 0xea, 0x83, 0x07,
+	0x3c, 0x8e, 0xbf, 0x05, 0x9b, 0xe2, 0x0b, 0x37, 0x1f, 0xeb, 0xf8, 0x1d, 0x3d, 0x79, 0x17, 0x72,
+	0x68, 0x2e, 0x8c, 0xab, 0x8f, 0x1f, 0xfb, 0x05, 0xff, 0xcf, 0x30, 0x7a, 0xab, 0xea, 0x7e, 0xef,
+	0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x21, 0x93, 0x51, 0x60, 0xee, 0x08, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/logging/v2/logging.proto b/vendor/google.golang.org/genproto/googleapis/logging/v2/logging.proto
new file mode 100644
index 0000000..9258cf7
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/logging/v2/logging.proto
@@ -0,0 +1,190 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.logging.v2;
+
+import "google.golang.org/genproto/googleapis/api/serviceconfig/annotations.proto"; // from google/api/annotations.proto
+import "google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.proto"; // from google/api/monitored_resource.proto
+import "google.golang.org/genproto/googleapis/logging/v2/log_entry.proto"; // from google/logging/v2/log_entry.proto
+import "github.com/golang/protobuf/ptypes/duration/duration.proto"; // from google/protobuf/duration.proto
+import "github.com/golang/protobuf/ptypes/empty/empty.proto"; // from google/protobuf/empty.proto
+import "github.com/golang/protobuf/ptypes/timestamp/timestamp.proto"; // from google/protobuf/timestamp.proto
+import "google.golang.org/genproto/googleapis/rpc/status/status.proto"; // from google/rpc/status.proto
+
+option cc_enable_arenas = true;
+option java_multiple_files = true;
+option java_outer_classname = "LoggingProto";
+option java_package = "com.google.logging.v2";
+
+option go_package = "google.golang.org/genproto/googleapis/logging/v2";
+
+// Service for ingesting and querying logs.
+service LoggingServiceV2 {
+  // Deletes a log and all its log entries.
+  // The log will reappear if it receives new entries.
+  rpc DeleteLog(DeleteLogRequest) returns (google.protobuf.Empty) {
+    option (google.api.http) = { delete: "/v2beta1/{log_name=projects/*/logs/*}" };
+  }
+
+  // Writes log entries to Stackdriver Logging.  All log entries are
+  // written by this method.
+  rpc WriteLogEntries(WriteLogEntriesRequest) returns (WriteLogEntriesResponse) {
+    option (google.api.http) = { post: "/v2/entries:write" body: "*" };
+  }
+
+  // Lists log entries.  Use this method to retrieve log entries from Cloud
+  // Logging.  For ways to export log entries, see
+  // [Exporting Logs](/logging/docs/export).
+  rpc ListLogEntries(ListLogEntriesRequest) returns (ListLogEntriesResponse) {
+    option (google.api.http) = { post: "/v2/entries:list" body: "*" };
+  }
+
+  // Lists the monitored resource descriptors used by Stackdriver Logging.
+  rpc ListMonitoredResourceDescriptors(ListMonitoredResourceDescriptorsRequest) returns (ListMonitoredResourceDescriptorsResponse) {
+    option (google.api.http) = { get: "/v2/monitoredResourceDescriptors" };
+  }
+}
+
+// The parameters to DeleteLog.
+message DeleteLogRequest {
+  // Required. The resource name of the log to delete.  Example:
+  // `"projects/my-project/logs/syslog"`.
+  string log_name = 1;
+}
+
+// The parameters to WriteLogEntries.
+message WriteLogEntriesRequest {
+  // Optional. A default log resource name that is assigned to all log entries
+  // in `entries` that do not specify a value for `log_name`.  Example:
+  // `"projects/my-project/logs/syslog"`.  See
+  // [LogEntry][google.logging.v2.LogEntry].
+  string log_name = 1;
+
+  // Optional. A default monitored resource object that is assigned to all log
+  // entries in `entries` that do not specify a value for `resource`. Example:
+  //
+  //     { "type": "gce_instance",
+  //       "labels": {
+  //         "zone": "us-central1-a", "instance_id": "00000000000000000000" }}
+  //
+  // See [LogEntry][google.logging.v2.LogEntry].
+  google.api.MonitoredResource resource = 2;
+
+  // Optional. Default labels that are added to the `labels` field of all log
+  // entries in `entries`. If a log entry already has a label with the same key
+  // as a label in this parameter, then the log entry's label is not changed.
+  // See [LogEntry][google.logging.v2.LogEntry].
+  map<string, string> labels = 3;
+
+  // Required. The log entries to write. Values supplied for the fields
+  // `log_name`, `resource`, and `labels` in this `entries.write` request are
+  // added to those log entries that do not provide their own values for the
+  // fields.
+  //
+  // To improve throughput and to avoid exceeding the
+  // [quota limit](/logging/quota-policy) for calls to `entries.write`,
+  // you should write multiple log entries at once rather than
+  // calling this method for each individual log entry.
+  repeated LogEntry entries = 4;
+
+  // Optional. Whether valid entries should be written even if some other
+  // entries fail due to INVALID_ARGUMENT or PERMISSION_DENIED errors. If any
+  // entry is not written, the response status will be the error associated
+  // with one of the failed entries and include error details in the form of
+  // WriteLogEntriesPartialErrors.
+  bool partial_success = 5;
+}
+
+// Result returned from WriteLogEntries.
+// empty
+message WriteLogEntriesResponse {
+
+}
+
+// The parameters to `ListLogEntries`.
+message ListLogEntriesRequest {
+  // Deprecated. One or more project identifiers or project numbers from which
+  // to retrieve log entries.  Examples: `"my-project-1A"`, `"1234567890"`. If
+  // present, these project identifiers are converted to resource format and
+  // added to the list of resources in `resourceNames`. Callers should use
+  // `resourceNames` rather than this parameter.
+  repeated string project_ids = 1;
+
+  // Optional. One or more cloud resources from which to retrieve log entries.
+  // Example: `"projects/my-project-1A"`, `"projects/1234567890"`.  Projects
+  // listed in `projectIds` are added to this list.
+  repeated string resource_names = 8;
+
+  // Optional. A filter that chooses which log entries to return.  See [Advanced
+  // Logs Filters](/logging/docs/view/advanced_filters).  Only log entries that
+  // match the filter are returned.  An empty filter matches all log entries.
+  string filter = 2;
+
+  // Optional. How the results should be sorted.  Presently, the only permitted
+  // values are `"timestamp asc"` (default) and `"timestamp desc"`. The first
+  // option returns entries in order of increasing values of
+  // `LogEntry.timestamp` (oldest first), and the second option returns entries
+  // in order of decreasing timestamps (newest first).  Entries with equal
+  // timestamps are returned in order of `LogEntry.insertId`.
+  string order_by = 3;
+
+  // Optional. The maximum number of results to return from this request.
+  // Non-positive values are ignored.  The presence of `nextPageToken` in the
+  // response indicates that more results might be available.
+  int32 page_size = 4;
+
+  // Optional. If present, then retrieve the next batch of results from the
+  // preceding call to this method.  `pageToken` must be the value of
+  // `nextPageToken` from the previous response.  The values of other method
+  // parameters should be identical to those in the previous call.
+  string page_token = 5;
+}
+
+// Result returned from `ListLogEntries`.
+message ListLogEntriesResponse {
+  // A list of log entries.
+  repeated LogEntry entries = 1;
+
+  // If there might be more results than appear in this response, then
+  // `nextPageToken` is included.  To get the next set of results, call this
+  // method again using the value of `nextPageToken` as `pageToken`.
+  string next_page_token = 2;
+}
+
+// The parameters to ListMonitoredResourceDescriptors
+message ListMonitoredResourceDescriptorsRequest {
+  // Optional. The maximum number of results to return from this request.
+  // Non-positive values are ignored.  The presence of `nextPageToken` in the
+  // response indicates that more results might be available.
+  int32 page_size = 1;
+
+  // Optional. If present, then retrieve the next batch of results from the
+  // preceding call to this method.  `pageToken` must be the value of
+  // `nextPageToken` from the previous response.  The values of other method
+  // parameters should be identical to those in the previous call.
+  string page_token = 2;
+}
+
+// Result returned from ListMonitoredResourceDescriptors.
+message ListMonitoredResourceDescriptorsResponse {
+  // A list of resource descriptors.
+  repeated google.api.MonitoredResourceDescriptor resource_descriptors = 1;
+
+  // If there might be more results than appear in this response, then
+  // `nextPageToken` is included.  To get the next set of results, call this
+  // method again using the value of `nextPageToken` as `pageToken`.
+  string next_page_token = 2;
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/logging/v2/logging_config.pb.go b/vendor/google.golang.org/genproto/googleapis/logging/v2/logging_config.pb.go
new file mode 100644
index 0000000..6a16908
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/logging/v2/logging_config.pb.go
@@ -0,0 +1,494 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/googleapis/logging/v2/logging_config.proto
+// DO NOT EDIT!
+
+package v2
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "google.golang.org/genproto/googleapis/api/serviceconfig"
+import google_protobuf5 "github.com/golang/protobuf/ptypes/empty"
+import _ "github.com/golang/protobuf/ptypes/timestamp"
+
+import (
+	context "golang.org/x/net/context"
+	grpc "google.golang.org/grpc"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// Available log entry formats. Log entries can be written to Cloud
+// Logging in either format and can be exported in either format.
+// Version 2 is the preferred format.
+type LogSink_VersionFormat int32
+
+const (
+	// An unspecified version format will default to V2.
+	LogSink_VERSION_FORMAT_UNSPECIFIED LogSink_VersionFormat = 0
+	// `LogEntry` version 2 format.
+	LogSink_V2 LogSink_VersionFormat = 1
+	// `LogEntry` version 1 format.
+	LogSink_V1 LogSink_VersionFormat = 2
+)
+
+var LogSink_VersionFormat_name = map[int32]string{
+	0: "VERSION_FORMAT_UNSPECIFIED",
+	1: "V2",
+	2: "V1",
+}
+var LogSink_VersionFormat_value = map[string]int32{
+	"VERSION_FORMAT_UNSPECIFIED": 0,
+	"V2": 1,
+	"V1": 2,
+}
+
+func (x LogSink_VersionFormat) String() string {
+	return proto.EnumName(LogSink_VersionFormat_name, int32(x))
+}
+func (LogSink_VersionFormat) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{0, 0} }
+
+// Describes a sink used to export log entries outside Stackdriver Logging.
+type LogSink struct {
+	// Required. The client-assigned sink identifier, unique within the
+	// project. Example: `"my-syslog-errors-to-pubsub"`.  Sink identifiers are
+	// limited to 1000 characters and can include only the following characters:
+	// `A-Z`, `a-z`, `0-9`, and the special characters `_-.`.  The maximum length
+	// of the name is 100 characters.
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// Required. The export destination. See
+	// [Exporting Logs With Sinks](/logging/docs/api/tasks/exporting-logs).
+	// Examples:
+	//
+	//     "storage.googleapis.com/my-gcs-bucket"
+	//     "bigquery.googleapis.com/projects/my-project-id/datasets/my-dataset"
+	//     "pubsub.googleapis.com/projects/my-project/topics/my-topic"
+	Destination string `protobuf:"bytes,3,opt,name=destination" json:"destination,omitempty"`
+	// Optional. An [advanced logs filter](/logging/docs/view/advanced_filters).
+	// Only log entries matching the filter are exported. The filter
+	// must be consistent with the log entry format specified by the
+	// `outputVersionFormat` parameter, regardless of the format of the
+	// log entry that was originally written to Stackdriver Logging.
+	// Example filter (V2 format):
+	//
+	//     logName=projects/my-projectid/logs/syslog AND severity>=ERROR
+	Filter string `protobuf:"bytes,5,opt,name=filter" json:"filter,omitempty"`
+	// Optional. The log entry version to use for this sink's exported log
+	// entries.  This version does not have to correspond to the version of the
+	// log entry that was written to Stackdriver Logging. If omitted, the
+	// v2 format is used.
+	OutputVersionFormat LogSink_VersionFormat `protobuf:"varint,6,opt,name=output_version_format,json=outputVersionFormat,enum=google.logging.v2.LogSink_VersionFormat" json:"output_version_format,omitempty"`
+	// Output only. The iam identity to which the destination needs to grant write
+	// access.  This may be a service account or a group.
+	// Examples (Do not assume these specific values):
+	//    "serviceAccount:cloud-logs@system.gserviceaccount.com"
+	//    "group:cloud-logs@google.com"
+	//
+	//   For GCS destinations, the role "roles/owner" is required on the bucket
+	//   For Cloud Pubsub destinations, the role "roles/pubsub.publisher" is
+	//     required on the topic
+	//   For BigQuery, the role "roles/editor" is required on the dataset
+	WriterIdentity string `protobuf:"bytes,8,opt,name=writer_identity,json=writerIdentity" json:"writer_identity,omitempty"`
+}
+
+func (m *LogSink) Reset()                    { *m = LogSink{} }
+func (m *LogSink) String() string            { return proto.CompactTextString(m) }
+func (*LogSink) ProtoMessage()               {}
+func (*LogSink) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} }
+
+// The parameters to `ListSinks`.
+type ListSinksRequest struct {
+	// Required. The cloud resource containing the sinks.
+	// Example: `"projects/my-logging-project"`.
+	Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"`
+	// Optional. If present, then retrieve the next batch of results from the
+	// preceding call to this method.  `pageToken` must be the value of
+	// `nextPageToken` from the previous response.  The values of other method
+	// parameters should be identical to those in the previous call.
+	PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken" json:"page_token,omitempty"`
+	// Optional. The maximum number of results to return from this request.
+	// Non-positive values are ignored.  The presence of `nextPageToken` in the
+	// response indicates that more results might be available.
+	PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize" json:"page_size,omitempty"`
+}
+
+func (m *ListSinksRequest) Reset()                    { *m = ListSinksRequest{} }
+func (m *ListSinksRequest) String() string            { return proto.CompactTextString(m) }
+func (*ListSinksRequest) ProtoMessage()               {}
+func (*ListSinksRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} }
+
+// Result returned from `ListSinks`.
+type ListSinksResponse struct {
+	// A list of sinks.
+	Sinks []*LogSink `protobuf:"bytes,1,rep,name=sinks" json:"sinks,omitempty"`
+	// If there might be more results than appear in this response, then
+	// `nextPageToken` is included.  To get the next set of results, call the same
+	// method again using the value of `nextPageToken` as `pageToken`.
+	NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"`
+}
+
+func (m *ListSinksResponse) Reset()                    { *m = ListSinksResponse{} }
+func (m *ListSinksResponse) String() string            { return proto.CompactTextString(m) }
+func (*ListSinksResponse) ProtoMessage()               {}
+func (*ListSinksResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} }
+
+func (m *ListSinksResponse) GetSinks() []*LogSink {
+	if m != nil {
+		return m.Sinks
+	}
+	return nil
+}
+
+// The parameters to `GetSink`.
+type GetSinkRequest struct {
+	// Required. The resource name of the sink to return.
+	// Example: `"projects/my-project-id/sinks/my-sink-id"`.
+	SinkName string `protobuf:"bytes,1,opt,name=sink_name,json=sinkName" json:"sink_name,omitempty"`
+}
+
+func (m *GetSinkRequest) Reset()                    { *m = GetSinkRequest{} }
+func (m *GetSinkRequest) String() string            { return proto.CompactTextString(m) }
+func (*GetSinkRequest) ProtoMessage()               {}
+func (*GetSinkRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} }
+
+// The parameters to `CreateSink`.
+type CreateSinkRequest struct {
+	// Required. The resource in which to create the sink.
+	// Example: `"projects/my-project-id"`.
+	// The new sink must be provided in the request.
+	Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"`
+	// Required. The new sink, whose `name` parameter is a sink identifier that
+	// is not already in use.
+	Sink *LogSink `protobuf:"bytes,2,opt,name=sink" json:"sink,omitempty"`
+}
+
+func (m *CreateSinkRequest) Reset()                    { *m = CreateSinkRequest{} }
+func (m *CreateSinkRequest) String() string            { return proto.CompactTextString(m) }
+func (*CreateSinkRequest) ProtoMessage()               {}
+func (*CreateSinkRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{4} }
+
+func (m *CreateSinkRequest) GetSink() *LogSink {
+	if m != nil {
+		return m.Sink
+	}
+	return nil
+}
+
+// The parameters to `UpdateSink`.
+type UpdateSinkRequest struct {
+	// Required. The resource name of the sink to update, including the parent
+	// resource and the sink identifier.  If the sink does not exist, this method
+	// creates the sink.  Example: `"projects/my-project-id/sinks/my-sink-id"`.
+	SinkName string `protobuf:"bytes,1,opt,name=sink_name,json=sinkName" json:"sink_name,omitempty"`
+	// Required. The updated sink, whose name is the same identifier that appears
+	// as part of `sinkName`.  If `sinkName` does not exist, then
+	// this method creates a new sink.
+	Sink *LogSink `protobuf:"bytes,2,opt,name=sink" json:"sink,omitempty"`
+}
+
+func (m *UpdateSinkRequest) Reset()                    { *m = UpdateSinkRequest{} }
+func (m *UpdateSinkRequest) String() string            { return proto.CompactTextString(m) }
+func (*UpdateSinkRequest) ProtoMessage()               {}
+func (*UpdateSinkRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{5} }
+
+func (m *UpdateSinkRequest) GetSink() *LogSink {
+	if m != nil {
+		return m.Sink
+	}
+	return nil
+}
+
+// The parameters to `DeleteSink`.
+type DeleteSinkRequest struct {
+	// Required. The resource name of the sink to delete, including the parent
+	// resource and the sink identifier.  Example:
+	// `"projects/my-project-id/sinks/my-sink-id"`.  It is an error if the sink
+	// does not exist.
+	SinkName string `protobuf:"bytes,1,opt,name=sink_name,json=sinkName" json:"sink_name,omitempty"`
+}
+
+func (m *DeleteSinkRequest) Reset()                    { *m = DeleteSinkRequest{} }
+func (m *DeleteSinkRequest) String() string            { return proto.CompactTextString(m) }
+func (*DeleteSinkRequest) ProtoMessage()               {}
+func (*DeleteSinkRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{6} }
+
+func init() {
+	proto.RegisterType((*LogSink)(nil), "google.logging.v2.LogSink")
+	proto.RegisterType((*ListSinksRequest)(nil), "google.logging.v2.ListSinksRequest")
+	proto.RegisterType((*ListSinksResponse)(nil), "google.logging.v2.ListSinksResponse")
+	proto.RegisterType((*GetSinkRequest)(nil), "google.logging.v2.GetSinkRequest")
+	proto.RegisterType((*CreateSinkRequest)(nil), "google.logging.v2.CreateSinkRequest")
+	proto.RegisterType((*UpdateSinkRequest)(nil), "google.logging.v2.UpdateSinkRequest")
+	proto.RegisterType((*DeleteSinkRequest)(nil), "google.logging.v2.DeleteSinkRequest")
+	proto.RegisterEnum("google.logging.v2.LogSink_VersionFormat", LogSink_VersionFormat_name, LogSink_VersionFormat_value)
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion3
+
+// Client API for ConfigServiceV2 service
+
+type ConfigServiceV2Client interface {
+	// Lists sinks.
+	ListSinks(ctx context.Context, in *ListSinksRequest, opts ...grpc.CallOption) (*ListSinksResponse, error)
+	// Gets a sink.
+	GetSink(ctx context.Context, in *GetSinkRequest, opts ...grpc.CallOption) (*LogSink, error)
+	// Creates a sink.
+	CreateSink(ctx context.Context, in *CreateSinkRequest, opts ...grpc.CallOption) (*LogSink, error)
+	// Updates or creates a sink.
+	UpdateSink(ctx context.Context, in *UpdateSinkRequest, opts ...grpc.CallOption) (*LogSink, error)
+	// Deletes a sink.
+	DeleteSink(ctx context.Context, in *DeleteSinkRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error)
+}
+
+type configServiceV2Client struct {
+	cc *grpc.ClientConn
+}
+
+func NewConfigServiceV2Client(cc *grpc.ClientConn) ConfigServiceV2Client {
+	return &configServiceV2Client{cc}
+}
+
+func (c *configServiceV2Client) ListSinks(ctx context.Context, in *ListSinksRequest, opts ...grpc.CallOption) (*ListSinksResponse, error) {
+	out := new(ListSinksResponse)
+	err := grpc.Invoke(ctx, "/google.logging.v2.ConfigServiceV2/ListSinks", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *configServiceV2Client) GetSink(ctx context.Context, in *GetSinkRequest, opts ...grpc.CallOption) (*LogSink, error) {
+	out := new(LogSink)
+	err := grpc.Invoke(ctx, "/google.logging.v2.ConfigServiceV2/GetSink", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *configServiceV2Client) CreateSink(ctx context.Context, in *CreateSinkRequest, opts ...grpc.CallOption) (*LogSink, error) {
+	out := new(LogSink)
+	err := grpc.Invoke(ctx, "/google.logging.v2.ConfigServiceV2/CreateSink", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *configServiceV2Client) UpdateSink(ctx context.Context, in *UpdateSinkRequest, opts ...grpc.CallOption) (*LogSink, error) {
+	out := new(LogSink)
+	err := grpc.Invoke(ctx, "/google.logging.v2.ConfigServiceV2/UpdateSink", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *configServiceV2Client) DeleteSink(ctx context.Context, in *DeleteSinkRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) {
+	out := new(google_protobuf5.Empty)
+	err := grpc.Invoke(ctx, "/google.logging.v2.ConfigServiceV2/DeleteSink", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Server API for ConfigServiceV2 service
+
+type ConfigServiceV2Server interface {
+	// Lists sinks.
+	ListSinks(context.Context, *ListSinksRequest) (*ListSinksResponse, error)
+	// Gets a sink.
+	GetSink(context.Context, *GetSinkRequest) (*LogSink, error)
+	// Creates a sink.
+	CreateSink(context.Context, *CreateSinkRequest) (*LogSink, error)
+	// Updates or creates a sink.
+	UpdateSink(context.Context, *UpdateSinkRequest) (*LogSink, error)
+	// Deletes a sink.
+	DeleteSink(context.Context, *DeleteSinkRequest) (*google_protobuf5.Empty, error)
+}
+
+func RegisterConfigServiceV2Server(s *grpc.Server, srv ConfigServiceV2Server) {
+	s.RegisterService(&_ConfigServiceV2_serviceDesc, srv)
+}
+
+func _ConfigServiceV2_ListSinks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ListSinksRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ConfigServiceV2Server).ListSinks(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.logging.v2.ConfigServiceV2/ListSinks",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ConfigServiceV2Server).ListSinks(ctx, req.(*ListSinksRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _ConfigServiceV2_GetSink_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(GetSinkRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ConfigServiceV2Server).GetSink(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.logging.v2.ConfigServiceV2/GetSink",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ConfigServiceV2Server).GetSink(ctx, req.(*GetSinkRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _ConfigServiceV2_CreateSink_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(CreateSinkRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ConfigServiceV2Server).CreateSink(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.logging.v2.ConfigServiceV2/CreateSink",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ConfigServiceV2Server).CreateSink(ctx, req.(*CreateSinkRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _ConfigServiceV2_UpdateSink_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(UpdateSinkRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ConfigServiceV2Server).UpdateSink(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.logging.v2.ConfigServiceV2/UpdateSink",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ConfigServiceV2Server).UpdateSink(ctx, req.(*UpdateSinkRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _ConfigServiceV2_DeleteSink_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DeleteSinkRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ConfigServiceV2Server).DeleteSink(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.logging.v2.ConfigServiceV2/DeleteSink",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ConfigServiceV2Server).DeleteSink(ctx, req.(*DeleteSinkRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _ConfigServiceV2_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "google.logging.v2.ConfigServiceV2",
+	HandlerType: (*ConfigServiceV2Server)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "ListSinks",
+			Handler:    _ConfigServiceV2_ListSinks_Handler,
+		},
+		{
+			MethodName: "GetSink",
+			Handler:    _ConfigServiceV2_GetSink_Handler,
+		},
+		{
+			MethodName: "CreateSink",
+			Handler:    _ConfigServiceV2_CreateSink_Handler,
+		},
+		{
+			MethodName: "UpdateSink",
+			Handler:    _ConfigServiceV2_UpdateSink_Handler,
+		},
+		{
+			MethodName: "DeleteSink",
+			Handler:    _ConfigServiceV2_DeleteSink_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: fileDescriptor2,
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/googleapis/logging/v2/logging_config.proto", fileDescriptor2)
+}
+
+var fileDescriptor2 = []byte{
+	// 716 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x95, 0xdf, 0x4e, 0x13, 0x4d,
+	0x14, 0xc0, 0xbf, 0x16, 0x28, 0x70, 0x08, 0xd0, 0xce, 0x17, 0x48, 0xb3, 0x06, 0xc5, 0x15, 0x94,
+	0xd4, 0xb8, 0x5b, 0x97, 0x3b, 0x8d, 0x31, 0x02, 0xc5, 0x34, 0x41, 0x20, 0x5b, 0xe8, 0x05, 0x9a,
+	0xac, 0x4b, 0x99, 0xae, 0x23, 0xdd, 0x99, 0x75, 0x77, 0x5a, 0x45, 0x42, 0x62, 0x7c, 0x05, 0x1f,
+	0xc0, 0x87, 0xf2, 0x15, 0x7c, 0x0e, 0xe3, 0xec, 0xcc, 0x96, 0x16, 0x5a, 0xd6, 0x72, 0xb3, 0x9d,
+	0x39, 0xff, 0x7e, 0x67, 0xce, 0x39, 0x33, 0x85, 0x8a, 0xc7, 0x98, 0xd7, 0xc2, 0x86, 0xc7, 0x5a,
+	0x2e, 0xf5, 0x0c, 0x16, 0x7a, 0xa6, 0x87, 0x69, 0x10, 0x32, 0xce, 0x4c, 0xa5, 0x72, 0x03, 0x12,
+	0x99, 0x2d, 0xe6, 0x79, 0x84, 0x7a, 0x66, 0xc7, 0xea, 0x2e, 0x9d, 0x06, 0xa3, 0x4d, 0xe2, 0x19,
+	0xd2, 0x14, 0x15, 0x92, 0x30, 0x89, 0xd2, 0xe8, 0x58, 0x5a, 0x75, 0xb4, 0xc8, 0xe2, 0x63, 0x46,
+	0x38, 0xec, 0x90, 0x06, 0x56, 0x11, 0x4d, 0x97, 0x52, 0xc6, 0x5d, 0x4e, 0x18, 0x8d, 0x54, 0x74,
+	0x6d, 0xdd, 0x23, 0xfc, 0x43, 0xfb, 0xd8, 0x68, 0x30, 0xdf, 0x54, 0xe1, 0x4c, 0xa9, 0x38, 0x6e,
+	0x37, 0xcd, 0x80, 0x9f, 0x05, 0x38, 0x32, 0xb1, 0x2f, 0x16, 0xea, 0x9b, 0x38, 0x3d, 0xff, 0xb7,
+	0x13, 0x27, 0x3e, 0x8e, 0xb8, 0xeb, 0x07, 0xbd, 0x95, 0x72, 0xd6, 0x7f, 0x66, 0x61, 0x72, 0x87,
+	0x79, 0x35, 0x42, 0x4f, 0x11, 0x82, 0x71, 0xea, 0xfa, 0xb8, 0x98, 0x59, 0xce, 0xac, 0x4d, 0xdb,
+	0x72, 0x8d, 0x96, 0x61, 0xe6, 0x44, 0x38, 0x10, 0x2a, 0xf3, 0x2c, 0x8e, 0x49, 0x55, 0xbf, 0x08,
+	0x2d, 0x42, 0xae, 0x49, 0x5a, 0x1c, 0x87, 0xc5, 0x09, 0xa9, 0x4c, 0x76, 0xe8, 0x1d, 0x2c, 0xb0,
+	0x36, 0x0f, 0xda, 0xdc, 0xe9, 0xe0, 0x30, 0x12, 0x96, 0x4e, 0x93, 0x85, 0xbe, 0xcb, 0x8b, 0x39,
+	0x61, 0x36, 0x67, 0xad, 0x19, 0x03, 0x95, 0x34, 0x92, 0x44, 0x8c, 0xba, 0x72, 0xd8, 0x96, 0xf6,
+	0xf6, 0xff, 0x2a, 0xcc, 0x15, 0x21, 0x7a, 0x04, 0xf3, 0x9f, 0x43, 0x22, 0x38, 0x0e, 0x39, 0xc1,
+	0x94, 0x13, 0x7e, 0x56, 0x9c, 0x92, 0xf8, 0x39, 0x25, 0xae, 0x26, 0x52, 0xfd, 0x25, 0xcc, 0x5e,
+	0xf5, 0xbc, 0x0b, 0x5a, 0xbd, 0x62, 0xd7, 0xaa, 0x7b, 0xbb, 0xce, 0xf6, 0x9e, 0xfd, 0xe6, 0xd5,
+	0x81, 0x73, 0xb8, 0x5b, 0xdb, 0xaf, 0x6c, 0x56, 0xb7, 0xab, 0x95, 0xad, 0xfc, 0x7f, 0x28, 0x07,
+	0xd9, 0xba, 0x95, 0xcf, 0xc8, 0xdf, 0xa7, 0xf9, 0xac, 0xde, 0x84, 0xfc, 0x0e, 0x89, 0x78, 0x9c,
+	0x58, 0x64, 0xe3, 0x4f, 0x6d, 0x71, 0xf4, 0xf8, 0xcc, 0x81, 0x1b, 0x0a, 0x42, 0x52, 0xab, 0x64,
+	0x87, 0x96, 0x00, 0x02, 0xd7, 0xc3, 0x0e, 0x67, 0xa7, 0x98, 0x16, 0xb3, 0x52, 0x37, 0x1d, 0x4b,
+	0x0e, 0x62, 0x01, 0xba, 0x03, 0x72, 0xe3, 0x44, 0xe4, 0x2b, 0x96, 0xa5, 0x9c, 0xb0, 0xa7, 0x62,
+	0x41, 0x4d, 0xec, 0x75, 0x1f, 0x0a, 0x7d, 0x9c, 0x28, 0x10, 0x53, 0x81, 0x51, 0x19, 0x26, 0xa2,
+	0x58, 0x20, 0x38, 0x63, 0x6b, 0x33, 0x96, 0x76, 0x73, 0xd1, 0x6c, 0x65, 0x88, 0x1e, 0xc2, 0x3c,
+	0xc5, 0x5f, 0xb8, 0x33, 0x90, 0xc7, 0x6c, 0x2c, 0xde, 0xef, 0xe6, 0xa2, 0x3f, 0x81, 0xb9, 0xd7,
+	0x58, 0xd2, 0xba, 0x87, 0x12, 0xd9, 0xc5, 0x21, 0x9c, 0xbe, 0x19, 0x98, 0x8a, 0x05, 0xbb, 0x62,
+	0xaf, 0xbf, 0x85, 0xc2, 0x66, 0x88, 0x5d, 0x8e, 0xfb, 0x3d, 0x6e, 0x2a, 0x83, 0x01, 0xe3, 0xb1,
+	0xa3, 0x04, 0xa7, 0x27, 0x2d, 0xed, 0xf4, 0xf7, 0x50, 0x38, 0x0c, 0x4e, 0xae, 0x05, 0x4f, 0x4b,
+	0xe7, 0xd6, 0x84, 0x32, 0x14, 0xb6, 0x70, 0x0b, 0x8f, 0x4e, 0xb0, 0xfe, 0x8c, 0xc3, 0xfc, 0xa6,
+	0xbc, 0xa7, 0x35, 0x75, 0x69, 0xeb, 0x16, 0xba, 0x80, 0xe9, 0xcb, 0x16, 0xa1, 0x07, 0xc3, 0xa0,
+	0xd7, 0x06, 0x45, 0x5b, 0x49, 0x37, 0x52, 0x5d, 0xd6, 0x57, 0xbf, 0xff, 0xfa, 0xfd, 0x23, 0x7b,
+	0x0f, 0x2d, 0xc5, 0xcf, 0xce, 0xb9, 0x2a, 0xe2, 0x0b, 0x71, 0x3f, 0x3f, 0xe2, 0x06, 0x8f, 0xcc,
+	0xd2, 0x85, 0xa9, 0x5a, 0xcb, 0x61, 0x32, 0x69, 0x19, 0xba, 0x3f, 0x24, 0xee, 0xd5, 0x76, 0x6a,
+	0x29, 0x45, 0xd1, 0x4b, 0x12, 0xb8, 0x82, 0x74, 0x09, 0xbc, 0x2c, 0x42, 0x1f, 0x53, 0x21, 0x05,
+	0x1b, 0x9d, 0x03, 0xf4, 0x3a, 0x8f, 0x86, 0x1d, 0x68, 0x60, 0x30, 0x52, 0xd9, 0x8f, 0x25, 0x7b,
+	0x55, 0x4f, 0x3f, 0xec, 0x33, 0xd9, 0x37, 0xf4, 0x2d, 0x03, 0xd0, 0x1b, 0x8d, 0xa1, 0xf4, 0x81,
+	0xc9, 0x49, 0xa5, 0x97, 0x25, 0xbd, 0xa4, 0x8d, 0x70, 0xf2, 0x24, 0x85, 0x0e, 0x40, 0x6f, 0x74,
+	0x86, 0x66, 0x30, 0x30, 0x59, 0xda, 0x62, 0xd7, 0xaa, 0xfb, 0x10, 0x1b, 0x95, 0xf8, 0xc1, 0xee,
+	0xd6, 0xbd, 0x34, 0x02, 0x7d, 0xe3, 0x08, 0x16, 0xc4, 0x8b, 0x3e, 0x88, 0xdb, 0x98, 0xdd, 0x51,
+	0x6b, 0x35, 0x9d, 0xfb, 0x99, 0xa3, 0xf2, 0x6d, 0xff, 0xda, 0x8e, 0x73, 0x52, 0xb9, 0xfe, 0x37,
+	0x00, 0x00, 0xff, 0xff, 0x52, 0x86, 0xdf, 0xea, 0x15, 0x07, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/logging/v2/logging_config.proto b/vendor/google.golang.org/genproto/googleapis/logging/v2/logging_config.proto
new file mode 100644
index 0000000..fb9df62
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/logging/v2/logging_config.proto
@@ -0,0 +1,187 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.logging.v2;
+
+import "google.golang.org/genproto/googleapis/api/serviceconfig/annotations.proto"; // from google/api/annotations.proto
+import "github.com/golang/protobuf/ptypes/empty/empty.proto"; // from google/protobuf/empty.proto
+import "github.com/golang/protobuf/ptypes/timestamp/timestamp.proto"; // from google/protobuf/timestamp.proto
+
+option java_multiple_files = true;
+option java_outer_classname = "LoggingConfig";
+option java_package = "com.google.logging.v2";
+
+option go_package = "google.golang.org/genproto/googleapis/logging/v2";
+
+// Service for configuring sinks used to export log entries outside Stackdriver
+// Logging.
+service ConfigServiceV2 {
+  // Lists sinks.
+  rpc ListSinks(ListSinksRequest) returns (ListSinksResponse) {
+    option (google.api.http) = { get: "/v2/{parent=projects/*}/sinks" };
+  }
+
+  // Gets a sink.
+  rpc GetSink(GetSinkRequest) returns (LogSink) {
+    option (google.api.http) = { get: "/v2/{sink_name=projects/*/sinks/*}" };
+  }
+
+  // Creates a sink.
+  rpc CreateSink(CreateSinkRequest) returns (LogSink) {
+    option (google.api.http) = { post: "/v2/{parent=projects/*}/sinks" body: "sink" };
+  }
+
+  // Updates or creates a sink.
+  rpc UpdateSink(UpdateSinkRequest) returns (LogSink) {
+    option (google.api.http) = { put: "/v2/{sink_name=projects/*/sinks/*}" body: "sink" };
+  }
+
+  // Deletes a sink.
+  rpc DeleteSink(DeleteSinkRequest) returns (google.protobuf.Empty) {
+    option (google.api.http) = { delete: "/v2/{sink_name=projects/*/sinks/*}" };
+  }
+}
+
+// Describes a sink used to export log entries outside Stackdriver Logging.
+message LogSink {
+  // Available log entry formats. Log entries can be written to Cloud
+  // Logging in either format and can be exported in either format.
+  // Version 2 is the preferred format.
+  enum VersionFormat {
+    // An unspecified version format will default to V2.
+    VERSION_FORMAT_UNSPECIFIED = 0;
+
+    // `LogEntry` version 2 format.
+    V2 = 1;
+
+    // `LogEntry` version 1 format.
+    V1 = 2;
+  }
+
+  // Required. The client-assigned sink identifier, unique within the
+  // project. Example: `"my-syslog-errors-to-pubsub"`.  Sink identifiers are
+  // limited to 1000 characters and can include only the following characters:
+  // `A-Z`, `a-z`, `0-9`, and the special characters `_-.`.  The maximum length
+  // of the name is 100 characters.
+  string name = 1;
+
+  // Required. The export destination. See
+  // [Exporting Logs With Sinks](/logging/docs/api/tasks/exporting-logs).
+  // Examples:
+  //
+  //     "storage.googleapis.com/my-gcs-bucket"
+  //     "bigquery.googleapis.com/projects/my-project-id/datasets/my-dataset"
+  //     "pubsub.googleapis.com/projects/my-project/topics/my-topic"
+  string destination = 3;
+
+  // Optional. An [advanced logs filter](/logging/docs/view/advanced_filters).
+  // Only log entries matching the filter are exported. The filter
+  // must be consistent with the log entry format specified by the
+  // `outputVersionFormat` parameter, regardless of the format of the
+  // log entry that was originally written to Stackdriver Logging.
+  // Example filter (V2 format):
+  //
+  //     logName=projects/my-projectid/logs/syslog AND severity>=ERROR
+  string filter = 5;
+
+  // Optional. The log entry version to use for this sink's exported log
+  // entries.  This version does not have to correspond to the version of the
+  // log entry that was written to Stackdriver Logging. If omitted, the
+  // v2 format is used.
+  VersionFormat output_version_format = 6;
+
+  // Output only. The iam identity to which the destination needs to grant write
+  // access.  This may be a service account or a group.
+  // Examples (Do not assume these specific values):
+  //    "serviceAccount:cloud-logs@system.gserviceaccount.com"
+  //    "group:cloud-logs@google.com"
+  //
+  //   For GCS destinations, the role "roles/owner" is required on the bucket
+  //   For Cloud Pubsub destinations, the role "roles/pubsub.publisher" is
+  //     required on the topic
+  //   For BigQuery, the role "roles/editor" is required on the dataset
+  string writer_identity = 8;
+}
+
+// The parameters to `ListSinks`.
+message ListSinksRequest {
+  // Required. The cloud resource containing the sinks.
+  // Example: `"projects/my-logging-project"`.
+  string parent = 1;
+
+  // Optional. If present, then retrieve the next batch of results from the
+  // preceding call to this method.  `pageToken` must be the value of
+  // `nextPageToken` from the previous response.  The values of other method
+  // parameters should be identical to those in the previous call.
+  string page_token = 2;
+
+  // Optional. The maximum number of results to return from this request.
+  // Non-positive values are ignored.  The presence of `nextPageToken` in the
+  // response indicates that more results might be available.
+  int32 page_size = 3;
+}
+
+// Result returned from `ListSinks`.
+message ListSinksResponse {
+  // A list of sinks.
+  repeated LogSink sinks = 1;
+
+  // If there might be more results than appear in this response, then
+  // `nextPageToken` is included.  To get the next set of results, call the same
+  // method again using the value of `nextPageToken` as `pageToken`.
+  string next_page_token = 2;
+}
+
+// The parameters to `GetSink`.
+message GetSinkRequest {
+  // Required. The resource name of the sink to return.
+  // Example: `"projects/my-project-id/sinks/my-sink-id"`.
+  string sink_name = 1;
+}
+
+// The parameters to `CreateSink`.
+message CreateSinkRequest {
+  // Required. The resource in which to create the sink.
+  // Example: `"projects/my-project-id"`.
+  // The new sink must be provided in the request.
+  string parent = 1;
+
+  // Required. The new sink, whose `name` parameter is a sink identifier that
+  // is not already in use.
+  LogSink sink = 2;
+}
+
+// The parameters to `UpdateSink`.
+message UpdateSinkRequest {
+  // Required. The resource name of the sink to update, including the parent
+  // resource and the sink identifier.  If the sink does not exist, this method
+  // creates the sink.  Example: `"projects/my-project-id/sinks/my-sink-id"`.
+  string sink_name = 1;
+
+  // Required. The updated sink, whose name is the same identifier that appears
+  // as part of `sinkName`.  If `sinkName` does not exist, then
+  // this method creates a new sink.
+  LogSink sink = 2;
+}
+
+// The parameters to `DeleteSink`.
+message DeleteSinkRequest {
+  // Required. The resource name of the sink to delete, including the parent
+  // resource and the sink identifier.  Example:
+  // `"projects/my-project-id/sinks/my-sink-id"`.  It is an error if the sink
+  // does not exist.
+  string sink_name = 1;
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/logging/v2/logging_metrics.pb.go b/vendor/google.golang.org/genproto/googleapis/logging/v2/logging_metrics.pb.go
new file mode 100644
index 0000000..8eb079e
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/logging/v2/logging_metrics.pb.go
@@ -0,0 +1,465 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/googleapis/logging/v2/logging_metrics.proto
+// DO NOT EDIT!
+
+package v2
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "google.golang.org/genproto/googleapis/api/serviceconfig"
+import google_protobuf5 "github.com/golang/protobuf/ptypes/empty"
+
+import (
+	context "golang.org/x/net/context"
+	grpc "google.golang.org/grpc"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// Stackdriver Logging API version.
+type LogMetric_ApiVersion int32
+
+const (
+	// Stackdriver Logging API v2.
+	LogMetric_V2 LogMetric_ApiVersion = 0
+	// Stackdriver Logging API v1.
+	LogMetric_V1 LogMetric_ApiVersion = 1
+)
+
+var LogMetric_ApiVersion_name = map[int32]string{
+	0: "V2",
+	1: "V1",
+}
+var LogMetric_ApiVersion_value = map[string]int32{
+	"V2": 0,
+	"V1": 1,
+}
+
+func (x LogMetric_ApiVersion) String() string {
+	return proto.EnumName(LogMetric_ApiVersion_name, int32(x))
+}
+func (LogMetric_ApiVersion) EnumDescriptor() ([]byte, []int) { return fileDescriptor3, []int{0, 0} }
+
+// Describes a logs-based metric.  The value of the metric is the
+// number of log entries that match a logs filter.
+type LogMetric struct {
+	// Required. The client-assigned metric identifier. Example:
+	// `"severe_errors"`.  Metric identifiers are limited to 100
+	// characters and can include only the following characters: `A-Z`,
+	// `a-z`, `0-9`, and the special characters `_-.,+!*',()%/`.  The
+	// forward-slash character (`/`) denotes a hierarchy of name pieces,
+	// and it cannot be the first character of the name.  The '%' character
+	// is used to URL encode unsafe and reserved characters and must be
+	// followed by two hexadecimal digits according to RFC 1738.
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// Optional. A description of this metric, which is used in documentation.
+	Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"`
+	// Required. An [advanced logs filter](/logging/docs/view/advanced_filters).
+	// Example: `"resource.type=gae_app AND severity>=ERROR"`.
+	Filter string `protobuf:"bytes,3,opt,name=filter" json:"filter,omitempty"`
+	// Output only. The API version that created or updated this metric.
+	// The version also dictates the syntax of the filter expression. When a value
+	// for this field is missing, the default value of V2 should be assumed.
+	Version LogMetric_ApiVersion `protobuf:"varint,4,opt,name=version,enum=google.logging.v2.LogMetric_ApiVersion" json:"version,omitempty"`
+}
+
+func (m *LogMetric) Reset()                    { *m = LogMetric{} }
+func (m *LogMetric) String() string            { return proto.CompactTextString(m) }
+func (*LogMetric) ProtoMessage()               {}
+func (*LogMetric) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} }
+
+// The parameters to ListLogMetrics.
+type ListLogMetricsRequest struct {
+	// Required. The resource name containing the metrics.
+	// Example: `"projects/my-project-id"`.
+	Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"`
+	// Optional. If present, then retrieve the next batch of results from the
+	// preceding call to this method.  `pageToken` must be the value of
+	// `nextPageToken` from the previous response.  The values of other method
+	// parameters should be identical to those in the previous call.
+	PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken" json:"page_token,omitempty"`
+	// Optional. The maximum number of results to return from this request.
+	// Non-positive values are ignored.  The presence of `nextPageToken` in the
+	// response indicates that more results might be available.
+	PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize" json:"page_size,omitempty"`
+}
+
+func (m *ListLogMetricsRequest) Reset()                    { *m = ListLogMetricsRequest{} }
+func (m *ListLogMetricsRequest) String() string            { return proto.CompactTextString(m) }
+func (*ListLogMetricsRequest) ProtoMessage()               {}
+func (*ListLogMetricsRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} }
+
+// Result returned from ListLogMetrics.
+type ListLogMetricsResponse struct {
+	// A list of logs-based metrics.
+	Metrics []*LogMetric `protobuf:"bytes,1,rep,name=metrics" json:"metrics,omitempty"`
+	// If there might be more results than appear in this response, then
+	// `nextPageToken` is included.  To get the next set of results, call this
+	// method again using the value of `nextPageToken` as `pageToken`.
+	NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"`
+}
+
+func (m *ListLogMetricsResponse) Reset()                    { *m = ListLogMetricsResponse{} }
+func (m *ListLogMetricsResponse) String() string            { return proto.CompactTextString(m) }
+func (*ListLogMetricsResponse) ProtoMessage()               {}
+func (*ListLogMetricsResponse) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{2} }
+
+func (m *ListLogMetricsResponse) GetMetrics() []*LogMetric {
+	if m != nil {
+		return m.Metrics
+	}
+	return nil
+}
+
+// The parameters to GetLogMetric.
+type GetLogMetricRequest struct {
+	// The resource name of the desired metric.
+	// Example: `"projects/my-project-id/metrics/my-metric-id"`.
+	MetricName string `protobuf:"bytes,1,opt,name=metric_name,json=metricName" json:"metric_name,omitempty"`
+}
+
+func (m *GetLogMetricRequest) Reset()                    { *m = GetLogMetricRequest{} }
+func (m *GetLogMetricRequest) String() string            { return proto.CompactTextString(m) }
+func (*GetLogMetricRequest) ProtoMessage()               {}
+func (*GetLogMetricRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{3} }
+
+// The parameters to CreateLogMetric.
+type CreateLogMetricRequest struct {
+	// The resource name of the project in which to create the metric.
+	// Example: `"projects/my-project-id"`.
+	//
+	// The new metric must be provided in the request.
+	Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"`
+	// The new logs-based metric, which must not have an identifier that
+	// already exists.
+	Metric *LogMetric `protobuf:"bytes,2,opt,name=metric" json:"metric,omitempty"`
+}
+
+func (m *CreateLogMetricRequest) Reset()                    { *m = CreateLogMetricRequest{} }
+func (m *CreateLogMetricRequest) String() string            { return proto.CompactTextString(m) }
+func (*CreateLogMetricRequest) ProtoMessage()               {}
+func (*CreateLogMetricRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{4} }
+
+func (m *CreateLogMetricRequest) GetMetric() *LogMetric {
+	if m != nil {
+		return m.Metric
+	}
+	return nil
+}
+
+// The parameters to UpdateLogMetric.
+type UpdateLogMetricRequest struct {
+	// The resource name of the metric to update.
+	// Example: `"projects/my-project-id/metrics/my-metric-id"`.
+	//
+	// The updated metric must be provided in the request and have the
+	// same identifier that is specified in `metricName`.
+	// If the metric does not exist, it is created.
+	MetricName string `protobuf:"bytes,1,opt,name=metric_name,json=metricName" json:"metric_name,omitempty"`
+	// The updated metric, whose name must be the same as the
+	// metric identifier in `metricName`. If `metricName` does not
+	// exist, then a new metric is created.
+	Metric *LogMetric `protobuf:"bytes,2,opt,name=metric" json:"metric,omitempty"`
+}
+
+func (m *UpdateLogMetricRequest) Reset()                    { *m = UpdateLogMetricRequest{} }
+func (m *UpdateLogMetricRequest) String() string            { return proto.CompactTextString(m) }
+func (*UpdateLogMetricRequest) ProtoMessage()               {}
+func (*UpdateLogMetricRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{5} }
+
+func (m *UpdateLogMetricRequest) GetMetric() *LogMetric {
+	if m != nil {
+		return m.Metric
+	}
+	return nil
+}
+
+// The parameters to DeleteLogMetric.
+type DeleteLogMetricRequest struct {
+	// The resource name of the metric to delete.
+	// Example: `"projects/my-project-id/metrics/my-metric-id"`.
+	MetricName string `protobuf:"bytes,1,opt,name=metric_name,json=metricName" json:"metric_name,omitempty"`
+}
+
+func (m *DeleteLogMetricRequest) Reset()                    { *m = DeleteLogMetricRequest{} }
+func (m *DeleteLogMetricRequest) String() string            { return proto.CompactTextString(m) }
+func (*DeleteLogMetricRequest) ProtoMessage()               {}
+func (*DeleteLogMetricRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{6} }
+
+func init() {
+	proto.RegisterType((*LogMetric)(nil), "google.logging.v2.LogMetric")
+	proto.RegisterType((*ListLogMetricsRequest)(nil), "google.logging.v2.ListLogMetricsRequest")
+	proto.RegisterType((*ListLogMetricsResponse)(nil), "google.logging.v2.ListLogMetricsResponse")
+	proto.RegisterType((*GetLogMetricRequest)(nil), "google.logging.v2.GetLogMetricRequest")
+	proto.RegisterType((*CreateLogMetricRequest)(nil), "google.logging.v2.CreateLogMetricRequest")
+	proto.RegisterType((*UpdateLogMetricRequest)(nil), "google.logging.v2.UpdateLogMetricRequest")
+	proto.RegisterType((*DeleteLogMetricRequest)(nil), "google.logging.v2.DeleteLogMetricRequest")
+	proto.RegisterEnum("google.logging.v2.LogMetric_ApiVersion", LogMetric_ApiVersion_name, LogMetric_ApiVersion_value)
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion3
+
+// Client API for MetricsServiceV2 service
+
+type MetricsServiceV2Client interface {
+	// Lists logs-based metrics.
+	ListLogMetrics(ctx context.Context, in *ListLogMetricsRequest, opts ...grpc.CallOption) (*ListLogMetricsResponse, error)
+	// Gets a logs-based metric.
+	GetLogMetric(ctx context.Context, in *GetLogMetricRequest, opts ...grpc.CallOption) (*LogMetric, error)
+	// Creates a logs-based metric.
+	CreateLogMetric(ctx context.Context, in *CreateLogMetricRequest, opts ...grpc.CallOption) (*LogMetric, error)
+	// Creates or updates a logs-based metric.
+	UpdateLogMetric(ctx context.Context, in *UpdateLogMetricRequest, opts ...grpc.CallOption) (*LogMetric, error)
+	// Deletes a logs-based metric.
+	DeleteLogMetric(ctx context.Context, in *DeleteLogMetricRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error)
+}
+
+type metricsServiceV2Client struct {
+	cc *grpc.ClientConn
+}
+
+func NewMetricsServiceV2Client(cc *grpc.ClientConn) MetricsServiceV2Client {
+	return &metricsServiceV2Client{cc}
+}
+
+func (c *metricsServiceV2Client) ListLogMetrics(ctx context.Context, in *ListLogMetricsRequest, opts ...grpc.CallOption) (*ListLogMetricsResponse, error) {
+	out := new(ListLogMetricsResponse)
+	err := grpc.Invoke(ctx, "/google.logging.v2.MetricsServiceV2/ListLogMetrics", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *metricsServiceV2Client) GetLogMetric(ctx context.Context, in *GetLogMetricRequest, opts ...grpc.CallOption) (*LogMetric, error) {
+	out := new(LogMetric)
+	err := grpc.Invoke(ctx, "/google.logging.v2.MetricsServiceV2/GetLogMetric", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *metricsServiceV2Client) CreateLogMetric(ctx context.Context, in *CreateLogMetricRequest, opts ...grpc.CallOption) (*LogMetric, error) {
+	out := new(LogMetric)
+	err := grpc.Invoke(ctx, "/google.logging.v2.MetricsServiceV2/CreateLogMetric", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *metricsServiceV2Client) UpdateLogMetric(ctx context.Context, in *UpdateLogMetricRequest, opts ...grpc.CallOption) (*LogMetric, error) {
+	out := new(LogMetric)
+	err := grpc.Invoke(ctx, "/google.logging.v2.MetricsServiceV2/UpdateLogMetric", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *metricsServiceV2Client) DeleteLogMetric(ctx context.Context, in *DeleteLogMetricRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) {
+	out := new(google_protobuf5.Empty)
+	err := grpc.Invoke(ctx, "/google.logging.v2.MetricsServiceV2/DeleteLogMetric", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Server API for MetricsServiceV2 service
+
+type MetricsServiceV2Server interface {
+	// Lists logs-based metrics.
+	ListLogMetrics(context.Context, *ListLogMetricsRequest) (*ListLogMetricsResponse, error)
+	// Gets a logs-based metric.
+	GetLogMetric(context.Context, *GetLogMetricRequest) (*LogMetric, error)
+	// Creates a logs-based metric.
+	CreateLogMetric(context.Context, *CreateLogMetricRequest) (*LogMetric, error)
+	// Creates or updates a logs-based metric.
+	UpdateLogMetric(context.Context, *UpdateLogMetricRequest) (*LogMetric, error)
+	// Deletes a logs-based metric.
+	DeleteLogMetric(context.Context, *DeleteLogMetricRequest) (*google_protobuf5.Empty, error)
+}
+
+func RegisterMetricsServiceV2Server(s *grpc.Server, srv MetricsServiceV2Server) {
+	s.RegisterService(&_MetricsServiceV2_serviceDesc, srv)
+}
+
+func _MetricsServiceV2_ListLogMetrics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ListLogMetricsRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(MetricsServiceV2Server).ListLogMetrics(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.logging.v2.MetricsServiceV2/ListLogMetrics",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(MetricsServiceV2Server).ListLogMetrics(ctx, req.(*ListLogMetricsRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _MetricsServiceV2_GetLogMetric_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(GetLogMetricRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(MetricsServiceV2Server).GetLogMetric(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.logging.v2.MetricsServiceV2/GetLogMetric",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(MetricsServiceV2Server).GetLogMetric(ctx, req.(*GetLogMetricRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _MetricsServiceV2_CreateLogMetric_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(CreateLogMetricRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(MetricsServiceV2Server).CreateLogMetric(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.logging.v2.MetricsServiceV2/CreateLogMetric",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(MetricsServiceV2Server).CreateLogMetric(ctx, req.(*CreateLogMetricRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _MetricsServiceV2_UpdateLogMetric_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(UpdateLogMetricRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(MetricsServiceV2Server).UpdateLogMetric(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.logging.v2.MetricsServiceV2/UpdateLogMetric",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(MetricsServiceV2Server).UpdateLogMetric(ctx, req.(*UpdateLogMetricRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _MetricsServiceV2_DeleteLogMetric_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DeleteLogMetricRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(MetricsServiceV2Server).DeleteLogMetric(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.logging.v2.MetricsServiceV2/DeleteLogMetric",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(MetricsServiceV2Server).DeleteLogMetric(ctx, req.(*DeleteLogMetricRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _MetricsServiceV2_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "google.logging.v2.MetricsServiceV2",
+	HandlerType: (*MetricsServiceV2Server)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "ListLogMetrics",
+			Handler:    _MetricsServiceV2_ListLogMetrics_Handler,
+		},
+		{
+			MethodName: "GetLogMetric",
+			Handler:    _MetricsServiceV2_GetLogMetric_Handler,
+		},
+		{
+			MethodName: "CreateLogMetric",
+			Handler:    _MetricsServiceV2_CreateLogMetric_Handler,
+		},
+		{
+			MethodName: "UpdateLogMetric",
+			Handler:    _MetricsServiceV2_UpdateLogMetric_Handler,
+		},
+		{
+			MethodName: "DeleteLogMetric",
+			Handler:    _MetricsServiceV2_DeleteLogMetric_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: fileDescriptor3,
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/googleapis/logging/v2/logging_metrics.proto", fileDescriptor3)
+}
+
+var fileDescriptor3 = []byte{
+	// 648 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x55, 0xcf, 0x4e, 0x14, 0x4f,
+	0x10, 0xfe, 0x0d, 0x7f, 0x96, 0x1f, 0x85, 0x02, 0xb6, 0x61, 0x43, 0x46, 0x0c, 0x38, 0x07, 0x58,
+	0x38, 0x4c, 0xe3, 0x60, 0x48, 0x34, 0xf1, 0x00, 0xfe, 0x8b, 0x09, 0x1a, 0xb2, 0x28, 0x07, 0x3d,
+	0x6c, 0x86, 0xa1, 0xb6, 0x6d, 0xd9, 0x9d, 0x1e, 0xa7, 0x9b, 0x0d, 0x6a, 0xbc, 0x18, 0x6f, 0x26,
+	0x1e, 0xf4, 0x6d, 0x7c, 0x0d, 0x7d, 0x04, 0x1f, 0xc4, 0x9e, 0xee, 0x19, 0x58, 0x77, 0x47, 0x76,
+	0xe5, 0x32, 0xdb, 0x5d, 0x55, 0x5d, 0xdf, 0x57, 0x55, 0x5f, 0xf7, 0xc2, 0x43, 0x26, 0x04, 0x6b,
+	0xa1, 0xcf, 0x44, 0x2b, 0x8c, 0x99, 0x2f, 0x52, 0x46, 0x19, 0xc6, 0x49, 0x2a, 0x94, 0xa0, 0xd6,
+	0x15, 0x26, 0x5c, 0xd2, 0x96, 0x60, 0x8c, 0xc7, 0x8c, 0x76, 0x82, 0x62, 0xd9, 0x68, 0xa3, 0x4a,
+	0x79, 0x24, 0x7d, 0x13, 0x4b, 0xae, 0xe4, 0x79, 0x72, 0xaf, 0xdf, 0x09, 0xdc, 0xc7, 0xc3, 0xa5,
+	0xd6, 0x1f, 0x2a, 0x31, 0xed, 0xf0, 0x08, 0x23, 0x11, 0x37, 0x39, 0xa3, 0x61, 0x1c, 0x0b, 0x15,
+	0x2a, 0x2e, 0xe2, 0x3c, 0xbb, 0xbb, 0xc1, 0xb8, 0x7a, 0x75, 0x7c, 0xe0, 0x47, 0xa2, 0x4d, 0x6d,
+	0x3a, 0x6a, 0x1c, 0x07, 0xc7, 0x4d, 0x9a, 0xa8, 0xb7, 0x09, 0x4a, 0x8a, 0x6d, 0xbd, 0xb0, 0x5f,
+	0x7b, 0xc8, 0xfb, 0xee, 0xc0, 0xe4, 0x8e, 0x60, 0x4f, 0x0c, 0x4f, 0x42, 0x60, 0x2c, 0x0e, 0xdb,
+	0x38, 0xef, 0x2c, 0x39, 0xb5, 0xc9, 0xba, 0x59, 0x93, 0x25, 0x98, 0x3a, 0x44, 0x19, 0xa5, 0x3c,
+	0xc9, 0xc0, 0xe6, 0x47, 0x8c, 0xab, 0xdb, 0x44, 0xaa, 0x50, 0x69, 0xf2, 0x96, 0xc2, 0x74, 0x7e,
+	0xd4, 0x38, 0xf3, 0x1d, 0xd9, 0x82, 0x89, 0x0e, 0xa6, 0x32, 0x3b, 0x35, 0xa6, 0x1d, 0xd3, 0xc1,
+	0x8a, 0xdf, 0xd7, 0x00, 0xff, 0x14, 0xdc, 0xdf, 0x4a, 0xf8, 0xbe, 0x0d, 0xaf, 0x17, 0xe7, 0xbc,
+	0x05, 0x80, 0x33, 0x33, 0xa9, 0xc0, 0xc8, 0x7e, 0x30, 0xfb, 0x9f, 0xf9, 0xbd, 0x39, 0xeb, 0x78,
+	0x47, 0x30, 0xb7, 0xc3, 0xa5, 0x3a, 0x4d, 0x21, 0xeb, 0xf8, 0xe6, 0x18, 0xa5, 0xca, 0x18, 0x25,
+	0x61, 0x8a, 0xb1, 0xca, 0x2b, 0xc9, 0x77, 0xe4, 0x3a, 0x40, 0x12, 0x32, 0x6c, 0x28, 0x71, 0x84,
+	0x45, 0x29, 0x93, 0x99, 0xe5, 0x59, 0x66, 0x20, 0xd7, 0xc0, 0x6c, 0x1a, 0x92, 0xbf, 0x43, 0x53,
+	0xcb, 0x78, 0xfd, 0xff, 0xcc, 0xb0, 0xa7, 0xf7, 0xde, 0x09, 0x54, 0x7b, 0xc1, 0x64, 0xa2, 0xbb,
+	0x8f, 0x64, 0x13, 0x26, 0xf2, 0x39, 0x6b, 0xb8, 0xd1, 0xda, 0x54, 0xb0, 0x70, 0x5e, 0x9d, 0xf5,
+	0x22, 0x98, 0x2c, 0xc3, 0x4c, 0x8c, 0x27, 0xaa, 0xd1, 0x47, 0xe9, 0x72, 0x66, 0xde, 0x2d, 0x68,
+	0x79, 0x9b, 0x70, 0xf5, 0x11, 0x9e, 0x01, 0x17, 0x45, 0x2e, 0xc2, 0x94, 0xcd, 0xd4, 0xe8, 0x9a,
+	0x19, 0x58, 0xd3, 0x53, 0x6d, 0xf1, 0x9a, 0x50, 0xbd, 0x97, 0x62, 0xa8, 0xb0, 0xef, 0xe8, 0xdf,
+	0xfa, 0x73, 0x0b, 0x2a, 0xf6, 0xbc, 0x21, 0x32, 0xa8, 0x90, 0x3c, 0xd6, 0x13, 0x50, 0x7d, 0x9e,
+	0x1c, 0x96, 0xe1, 0x0c, 0xa2, 0x78, 0x41, 0xc0, 0xdb, 0x50, 0xbd, 0x8f, 0x2d, 0xbc, 0x00, 0x60,
+	0xf0, 0x73, 0x1c, 0x66, 0xf3, 0xf9, 0xed, 0xd9, 0xfb, 0xb4, 0x1f, 0x90, 0x2f, 0x0e, 0x4c, 0xff,
+	0x39, 0x5b, 0x52, 0x2b, 0x23, 0x52, 0xa6, 0x35, 0x77, 0x75, 0x88, 0x48, 0x2b, 0x14, 0x6f, 0xe5,
+	0xe3, 0x8f, 0x5f, 0xdf, 0x46, 0x6e, 0x90, 0xc5, 0xec, 0x89, 0x78, 0x6f, 0x7b, 0x7e, 0x57, 0xdf,
+	0xc3, 0xd7, 0x18, 0x29, 0x49, 0xd7, 0x3e, 0xd0, 0x42, 0x19, 0x9f, 0x1c, 0xb8, 0xd4, 0x3d, 0x72,
+	0xb2, 0x5c, 0x02, 0x52, 0xa2, 0x09, 0xf7, 0xdc, 0xfe, 0x79, 0xbe, 0xc1, 0xaf, 0x91, 0x65, 0x83,
+	0xdf, 0xd5, 0xa8, 0x2e, 0x12, 0x05, 0x07, 0x4d, 0x87, 0x7c, 0x76, 0x60, 0xa6, 0x47, 0x41, 0xa4,
+	0xac, 0xdc, 0x72, 0x95, 0x0d, 0x20, 0x43, 0x0d, 0x99, 0x55, 0x6f, 0x50, 0x33, 0xee, 0xe4, 0x53,
+	0x27, 0x5f, 0x35, 0x9b, 0x1e, 0x9d, 0x95, 0xb2, 0x29, 0xd7, 0xe2, 0x00, 0x36, 0x9b, 0x86, 0xcd,
+	0xba, 0x3b, 0x64, 0x6b, 0x4e, 0x49, 0xe9, 0x49, 0xcd, 0xf4, 0x68, 0xb1, 0x94, 0x54, 0xb9, 0x5e,
+	0xdd, 0x6a, 0x11, 0x5a, 0xbc, 0xd4, 0xfe, 0x83, 0xec, 0x71, 0x2e, 0x26, 0xb5, 0x36, 0x24, 0x9d,
+	0xed, 0x97, 0x30, 0xa7, 0x9f, 0xfd, 0x7e, 0xdc, 0xed, 0xe9, 0x1d, 0xbb, 0xce, 0xa5, 0xb8, 0xeb,
+	0xbc, 0x58, 0xff, 0xd7, 0x3f, 0xb3, 0x83, 0x8a, 0x71, 0x6e, 0xfc, 0x0e, 0x00, 0x00, 0xff, 0xff,
+	0x46, 0x24, 0x21, 0x19, 0x07, 0x07, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/logging/v2/logging_metrics.proto b/vendor/google.golang.org/genproto/googleapis/logging/v2/logging_metrics.proto
new file mode 100644
index 0000000..3ef6190
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/logging/v2/logging_metrics.proto
@@ -0,0 +1,161 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.logging.v2;
+
+import "google.golang.org/genproto/googleapis/api/serviceconfig/annotations.proto"; // from google/api/annotations.proto
+import "github.com/golang/protobuf/ptypes/empty/empty.proto"; // from google/protobuf/empty.proto
+
+option java_multiple_files = true;
+option java_outer_classname = "LoggingMetrics";
+option java_package = "com.google.logging.v2";
+
+option go_package = "google.golang.org/genproto/googleapis/logging/v2";
+
+// Service for configuring logs-based metrics.
+service MetricsServiceV2 {
+  // Lists logs-based metrics.
+  rpc ListLogMetrics(ListLogMetricsRequest) returns (ListLogMetricsResponse) {
+    option (google.api.http) = { get: "/v2/{parent=projects/*}/metrics" };
+  }
+
+  // Gets a logs-based metric.
+  rpc GetLogMetric(GetLogMetricRequest) returns (LogMetric) {
+    option (google.api.http) = { get: "/v2/{metric_name=projects/*/metrics/*}" };
+  }
+
+  // Creates a logs-based metric.
+  rpc CreateLogMetric(CreateLogMetricRequest) returns (LogMetric) {
+    option (google.api.http) = { post: "/v2/{parent=projects/*}/metrics" body: "metric" };
+  }
+
+  // Creates or updates a logs-based metric.
+  rpc UpdateLogMetric(UpdateLogMetricRequest) returns (LogMetric) {
+    option (google.api.http) = { put: "/v2/{metric_name=projects/*/metrics/*}" body: "metric" };
+  }
+
+  // Deletes a logs-based metric.
+  rpc DeleteLogMetric(DeleteLogMetricRequest) returns (google.protobuf.Empty) {
+    option (google.api.http) = { delete: "/v2/{metric_name=projects/*/metrics/*}" };
+  }
+}
+
+// Describes a logs-based metric.  The value of the metric is the
+// number of log entries that match a logs filter.
+message LogMetric {
+  // Stackdriver Logging API version.
+  enum ApiVersion {
+    // Stackdriver Logging API v2.
+    V2 = 0;
+
+    // Stackdriver Logging API v1.
+    V1 = 1;
+  }
+
+  // Required. The client-assigned metric identifier. Example:
+  // `"severe_errors"`.  Metric identifiers are limited to 100
+  // characters and can include only the following characters: `A-Z`,
+  // `a-z`, `0-9`, and the special characters `_-.,+!*',()%/`.  The
+  // forward-slash character (`/`) denotes a hierarchy of name pieces,
+  // and it cannot be the first character of the name.  The '%' character
+  // is used to URL encode unsafe and reserved characters and must be
+  // followed by two hexadecimal digits according to RFC 1738.
+  string name = 1;
+
+  // Optional. A description of this metric, which is used in documentation.
+  string description = 2;
+
+  // Required. An [advanced logs filter](/logging/docs/view/advanced_filters).
+  // Example: `"resource.type=gae_app AND severity>=ERROR"`.
+  string filter = 3;
+
+  // Output only. The API version that created or updated this metric.
+  // The version also dictates the syntax of the filter expression. When a value
+  // for this field is missing, the default value of V2 should be assumed.
+  ApiVersion version = 4;
+}
+
+// The parameters to ListLogMetrics.
+message ListLogMetricsRequest {
+  // Required. The resource name containing the metrics.
+  // Example: `"projects/my-project-id"`.
+  string parent = 1;
+
+  // Optional. If present, then retrieve the next batch of results from the
+  // preceding call to this method.  `pageToken` must be the value of
+  // `nextPageToken` from the previous response.  The values of other method
+  // parameters should be identical to those in the previous call.
+  string page_token = 2;
+
+  // Optional. The maximum number of results to return from this request.
+  // Non-positive values are ignored.  The presence of `nextPageToken` in the
+  // response indicates that more results might be available.
+  int32 page_size = 3;
+}
+
+// Result returned from ListLogMetrics.
+message ListLogMetricsResponse {
+  // A list of logs-based metrics.
+  repeated LogMetric metrics = 1;
+
+  // If there might be more results than appear in this response, then
+  // `nextPageToken` is included.  To get the next set of results, call this
+  // method again using the value of `nextPageToken` as `pageToken`.
+  string next_page_token = 2;
+}
+
+// The parameters to GetLogMetric.
+message GetLogMetricRequest {
+  // The resource name of the desired metric.
+  // Example: `"projects/my-project-id/metrics/my-metric-id"`.
+  string metric_name = 1;
+}
+
+// The parameters to CreateLogMetric.
+message CreateLogMetricRequest {
+  // The resource name of the project in which to create the metric.
+  // Example: `"projects/my-project-id"`.
+  //
+  // The new metric must be provided in the request.
+  string parent = 1;
+
+  // The new logs-based metric, which must not have an identifier that
+  // already exists.
+  LogMetric metric = 2;
+}
+
+// The parameters to UpdateLogMetric.
+message UpdateLogMetricRequest {
+  // The resource name of the metric to update.
+  // Example: `"projects/my-project-id/metrics/my-metric-id"`.
+  //
+  // The updated metric must be provided in the request and have the
+  // same identifier that is specified in `metricName`.
+  // If the metric does not exist, it is created.
+  string metric_name = 1;
+
+  // The updated metric, whose name must be the same as the
+  // metric identifier in `metricName`. If `metricName` does not
+  // exist, then a new metric is created.
+  LogMetric metric = 2;
+}
+
+// The parameters to DeleteLogMetric.
+message DeleteLogMetricRequest {
+  // The resource name of the metric to delete.
+  // Example: `"projects/my-project-id/metrics/my-metric-id"`.
+  string metric_name = 1;
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
new file mode 100644
index 0000000..cc09adb
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
@@ -0,0 +1,131 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/googleapis/rpc/status/status.proto
+// DO NOT EDIT!
+
+/*
+Package google_rpc is a generated protocol buffer package.
+
+It is generated from these files:
+	google.golang.org/genproto/googleapis/rpc/status/status.proto
+
+It has these top-level messages:
+	Status
+*/
+package google_rpc
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import google_protobuf "github.com/golang/protobuf/ptypes/any"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// The `Status` type defines a logical error model that is suitable for different
+// programming environments, including REST APIs and RPC APIs. It is used by
+// [gRPC](https://github.com/grpc). The error model is designed to be:
+//
+// - Simple to use and understand for most users
+// - Flexible enough to meet unexpected needs
+//
+// # Overview
+//
+// The `Status` message contains three pieces of data: error code, error message,
+// and error details. The error code should be an enum value of
+// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed.  The
+// error message should be a developer-facing English message that helps
+// developers *understand* and *resolve* the error. If a localized user-facing
+// error message is needed, put the localized message in the error details or
+// localize it in the client. The optional error details may contain arbitrary
+// information about the error. There is a predefined set of error detail types
+// in the package `google.rpc` which can be used for common error conditions.
+//
+// # Language mapping
+//
+// The `Status` message is the logical representation of the error model, but it
+// is not necessarily the actual wire format. When the `Status` message is
+// exposed in different client libraries and different wire protocols, it can be
+// mapped differently. For example, it will likely be mapped to some exceptions
+// in Java, but more likely mapped to some error codes in C.
+//
+// # Other uses
+//
+// The error model and the `Status` message can be used in a variety of
+// environments, either with or without APIs, to provide a
+// consistent developer experience across different environments.
+//
+// Example uses of this error model include:
+//
+// - Partial errors. If a service needs to return partial errors to the client,
+//     it may embed the `Status` in the normal response to indicate the partial
+//     errors.
+//
+// - Workflow errors. A typical workflow has multiple steps. Each step may
+//     have a `Status` message for error reporting purpose.
+//
+// - Batch operations. If a client uses batch request and batch response, the
+//     `Status` message should be used directly inside batch response, one for
+//     each error sub-response.
+//
+// - Asynchronous operations. If an API call embeds asynchronous operation
+//     results in its response, the status of those operations should be
+//     represented directly using the `Status` message.
+//
+// - Logging. If some API errors are stored in logs, the message `Status` could
+//     be used directly after any stripping needed for security/privacy reasons.
+type Status struct {
+	// The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
+	Code int32 `protobuf:"varint,1,opt,name=code" json:"code,omitempty"`
+	// A developer-facing error message, which should be in English. Any
+	// user-facing error message should be localized and sent in the
+	// [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
+	Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"`
+	// A list of messages that carry the error details.  There will be a
+	// common set of message types for APIs to use.
+	Details []*google_protobuf.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"`
+}
+
+func (m *Status) Reset()                    { *m = Status{} }
+func (m *Status) String() string            { return proto.CompactTextString(m) }
+func (*Status) ProtoMessage()               {}
+func (*Status) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *Status) GetDetails() []*google_protobuf.Any {
+	if m != nil {
+		return m.Details
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*Status)(nil), "google.rpc.Status")
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/googleapis/rpc/status/status.proto", fileDescriptor0)
+}
+
+var fileDescriptor0 = []byte{
+	// 208 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x44, 0x8e, 0xcd, 0x4e, 0x84, 0x30,
+	0x10, 0xc7, 0x83, 0xab, 0xbb, 0xb1, 0x9b, 0x78, 0x68, 0x3c, 0x34, 0x5e, 0xdc, 0x78, 0xe2, 0x34,
+	0x93, 0xe8, 0xd9, 0x83, 0x3c, 0x01, 0xc1, 0x27, 0x28, 0x50, 0x46, 0x12, 0xe8, 0x34, 0xb4, 0x1c,
+	0x78, 0x7b, 0xa1, 0x85, 0xec, 0xa1, 0x69, 0x3b, 0xf3, 0xfb, 0x7f, 0x88, 0x6f, 0x62, 0xa6, 0xc1,
+	0x00, 0xf1, 0xa0, 0x2d, 0x01, 0x4f, 0x84, 0x64, 0xac, 0x9b, 0x38, 0x30, 0xa6, 0x95, 0x76, 0xbd,
+	0xc7, 0xc9, 0x35, 0xe8, 0x83, 0x0e, 0xb3, 0xdf, 0x2f, 0x88, 0x88, 0x14, 0xbb, 0x7c, 0xdd, 0xbf,
+	0x21, 0xf5, 0xe1, 0x6f, 0xae, 0xa1, 0xe1, 0x11, 0x93, 0x1d, 0x46, 0xa8, 0x9e, 0x3b, 0x74, 0x61,
+	0x71, 0xc6, 0xa3, 0xb6, 0xcb, 0x76, 0x92, 0xf8, 0xa3, 0x13, 0xe7, 0xdf, 0x68, 0x26, 0xa5, 0x78,
+	0x6c, 0xb8, 0x35, 0x2a, 0xbb, 0x65, 0xf9, 0x53, 0x15, 0xdf, 0x52, 0x89, 0xcb, 0x68, 0xbc, 0xd7,
+	0x64, 0xd4, 0xc3, 0x3a, 0x7e, 0xae, 0x8e, 0xaf, 0x04, 0x71, 0x69, 0x4d, 0xd0, 0xfd, 0xe0, 0xd5,
+	0xe9, 0x76, 0xca, 0xaf, 0x9f, 0xaf, 0xb0, 0xd7, 0x38, 0xf2, 0xe0, 0xc7, 0x2e, 0xd5, 0x01, 0x15,
+	0xef, 0xe2, 0x65, 0xed, 0x04, 0xf7, 0xaa, 0xc5, 0x35, 0xe5, 0x96, 0x1b, 0x5e, 0x66, 0xf5, 0x39,
+	0xea, 0xbe, 0xfe, 0x03, 0x00, 0x00, 0xff, 0xff, 0x73, 0x63, 0xb7, 0xba, 0x0d, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.proto b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.proto
new file mode 100644
index 0000000..c1d69e9
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.proto
@@ -0,0 +1,90 @@
+// Copyright (c) 2015, Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.rpc;
+
+import "github.com/golang/protobuf/ptypes/any/any.proto"; // from google/protobuf/any.proto
+
+option java_multiple_files = true;
+option java_outer_classname = "StatusProto";
+option java_package = "com.google.rpc";
+
+
+// The `Status` type defines a logical error model that is suitable for different
+// programming environments, including REST APIs and RPC APIs. It is used by
+// [gRPC](https://github.com/grpc). The error model is designed to be:
+//
+// - Simple to use and understand for most users
+// - Flexible enough to meet unexpected needs
+//
+// # Overview
+//
+// The `Status` message contains three pieces of data: error code, error message,
+// and error details. The error code should be an enum value of
+// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed.  The
+// error message should be a developer-facing English message that helps
+// developers *understand* and *resolve* the error. If a localized user-facing
+// error message is needed, put the localized message in the error details or
+// localize it in the client. The optional error details may contain arbitrary
+// information about the error. There is a predefined set of error detail types
+// in the package `google.rpc` which can be used for common error conditions.
+//
+// # Language mapping
+//
+// The `Status` message is the logical representation of the error model, but it
+// is not necessarily the actual wire format. When the `Status` message is
+// exposed in different client libraries and different wire protocols, it can be
+// mapped differently. For example, it will likely be mapped to some exceptions
+// in Java, but more likely mapped to some error codes in C.
+//
+// # Other uses
+//
+// The error model and the `Status` message can be used in a variety of
+// environments, either with or without APIs, to provide a
+// consistent developer experience across different environments.
+//
+// Example uses of this error model include:
+//
+// - Partial errors. If a service needs to return partial errors to the client,
+//     it may embed the `Status` in the normal response to indicate the partial
+//     errors.
+//
+// - Workflow errors. A typical workflow has multiple steps. Each step may
+//     have a `Status` message for error reporting purpose.
+//
+// - Batch operations. If a client uses batch request and batch response, the
+//     `Status` message should be used directly inside batch response, one for
+//     each error sub-response.
+//
+// - Asynchronous operations. If an API call embeds asynchronous operation
+//     results in its response, the status of those operations should be
+//     represented directly using the `Status` message.
+//
+// - Logging. If some API errors are stored in logs, the message `Status` could
+//     be used directly after any stripping needed for security/privacy reasons.
+message Status {
+  // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
+  int32 code = 1;
+
+  // A developer-facing error message, which should be in English. Any
+  // user-facing error message should be localized and sent in the
+  // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
+  string message = 2;
+
+  // A list of messages that carry the error details.  There will be a
+  // common set of message types for APIs to use.
+  repeated google.protobuf.Any details = 3;
+}
diff --git a/vendor/google.golang.org/genproto/protobuf/api.pb.go b/vendor/google.golang.org/genproto/protobuf/api.pb.go
new file mode 100644
index 0000000..00e5f6c
--- /dev/null
+++ b/vendor/google.golang.org/genproto/protobuf/api.pb.go
@@ -0,0 +1,295 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/protobuf/api.proto
+// DO NOT EDIT!
+
+/*
+Package descriptor is a generated protocol buffer package.
+
+It is generated from these files:
+	google.golang.org/genproto/protobuf/api.proto
+	google.golang.org/genproto/protobuf/descriptor.proto
+	google.golang.org/genproto/protobuf/field_mask.proto
+	google.golang.org/genproto/protobuf/source_context.proto
+	google.golang.org/genproto/protobuf/type.proto
+
+It has these top-level messages:
+	Api
+	Method
+	Mixin
+	FileDescriptorSet
+	FileDescriptorProto
+	DescriptorProto
+	FieldDescriptorProto
+	OneofDescriptorProto
+	EnumDescriptorProto
+	EnumValueDescriptorProto
+	ServiceDescriptorProto
+	MethodDescriptorProto
+	FileOptions
+	MessageOptions
+	FieldOptions
+	OneofOptions
+	EnumOptions
+	EnumValueOptions
+	ServiceOptions
+	MethodOptions
+	UninterpretedOption
+	SourceCodeInfo
+	GeneratedCodeInfo
+	FieldMask
+	SourceContext
+	Type
+	Field
+	Enum
+	EnumValue
+	Option
+*/
+package descriptor
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// Api is a light-weight descriptor for a protocol buffer service.
+type Api struct {
+	// The fully qualified name of this api, including package name
+	// followed by the api's simple name.
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// The methods of this api, in unspecified order.
+	Methods []*Method `protobuf:"bytes,2,rep,name=methods" json:"methods,omitempty"`
+	// Any metadata attached to the API.
+	Options []*Option `protobuf:"bytes,3,rep,name=options" json:"options,omitempty"`
+	// A version string for this api. If specified, must have the form
+	// `major-version.minor-version`, as in `1.10`. If the minor version
+	// is omitted, it defaults to zero. If the entire version field is
+	// empty, the major version is derived from the package name, as
+	// outlined below. If the field is not empty, the version in the
+	// package name will be verified to be consistent with what is
+	// provided here.
+	//
+	// The versioning schema uses [semantic
+	// versioning](http://semver.org) where the major version number
+	// indicates a breaking change and the minor version an additive,
+	// non-breaking change. Both version numbers are signals to users
+	// what to expect from different versions, and should be carefully
+	// chosen based on the product plan.
+	//
+	// The major version is also reflected in the package name of the
+	// API, which must end in `v<major-version>`, as in
+	// `google.feature.v1`. For major versions 0 and 1, the suffix can
+	// be omitted. Zero major versions must only be used for
+	// experimental, none-GA apis.
+	//
+	//
+	Version string `protobuf:"bytes,4,opt,name=version" json:"version,omitempty"`
+	// Source context for the protocol buffer service represented by this
+	// message.
+	SourceContext *SourceContext `protobuf:"bytes,5,opt,name=source_context,json=sourceContext" json:"source_context,omitempty"`
+	// Included APIs. See [Mixin][].
+	Mixins []*Mixin `protobuf:"bytes,6,rep,name=mixins" json:"mixins,omitempty"`
+	// The source syntax of the service.
+	Syntax Syntax `protobuf:"varint,7,opt,name=syntax,enum=google.protobuf.Syntax" json:"syntax,omitempty"`
+}
+
+func (m *Api) Reset()                    { *m = Api{} }
+func (m *Api) String() string            { return proto.CompactTextString(m) }
+func (*Api) ProtoMessage()               {}
+func (*Api) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *Api) GetMethods() []*Method {
+	if m != nil {
+		return m.Methods
+	}
+	return nil
+}
+
+func (m *Api) GetOptions() []*Option {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+func (m *Api) GetSourceContext() *SourceContext {
+	if m != nil {
+		return m.SourceContext
+	}
+	return nil
+}
+
+func (m *Api) GetMixins() []*Mixin {
+	if m != nil {
+		return m.Mixins
+	}
+	return nil
+}
+
+// Method represents a method of an api.
+type Method struct {
+	// The simple name of this method.
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// A URL of the input message type.
+	RequestTypeUrl string `protobuf:"bytes,2,opt,name=request_type_url,json=requestTypeUrl" json:"request_type_url,omitempty"`
+	// If true, the request is streamed.
+	RequestStreaming bool `protobuf:"varint,3,opt,name=request_streaming,json=requestStreaming" json:"request_streaming,omitempty"`
+	// The URL of the output message type.
+	ResponseTypeUrl string `protobuf:"bytes,4,opt,name=response_type_url,json=responseTypeUrl" json:"response_type_url,omitempty"`
+	// If true, the response is streamed.
+	ResponseStreaming bool `protobuf:"varint,5,opt,name=response_streaming,json=responseStreaming" json:"response_streaming,omitempty"`
+	// Any metadata attached to the method.
+	Options []*Option `protobuf:"bytes,6,rep,name=options" json:"options,omitempty"`
+	// The source syntax of this method.
+	Syntax Syntax `protobuf:"varint,7,opt,name=syntax,enum=google.protobuf.Syntax" json:"syntax,omitempty"`
+}
+
+func (m *Method) Reset()                    { *m = Method{} }
+func (m *Method) String() string            { return proto.CompactTextString(m) }
+func (*Method) ProtoMessage()               {}
+func (*Method) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *Method) GetOptions() []*Option {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Declares an API to be included in this API. The including API must
+// redeclare all the methods from the included API, but documentation
+// and options are inherited as follows:
+//
+// - If after comment and whitespace stripping, the documentation
+//   string of the redeclared method is empty, it will be inherited
+//   from the original method.
+//
+// - Each annotation belonging to the service config (http,
+//   visibility) which is not set in the redeclared method will be
+//   inherited.
+//
+// - If an http annotation is inherited, the path pattern will be
+//   modified as follows. Any version prefix will be replaced by the
+//   version of the including API plus the [root][] path if specified.
+//
+// Example of a simple mixin:
+//
+//     package google.acl.v1;
+//     service AccessControl {
+//       // Get the underlying ACL object.
+//       rpc GetAcl(GetAclRequest) returns (Acl) {
+//         option (google.api.http).get = "/v1/{resource=**}:getAcl";
+//       }
+//     }
+//
+//     package google.storage.v2;
+//     service Storage {
+//       rpc GetAcl(GetAclRequest) returns (Acl);
+//
+//       // Get a data record.
+//       rpc GetData(GetDataRequest) returns (Data) {
+//         option (google.api.http).get = "/v2/{resource=**}";
+//       }
+//     }
+//
+// Example of a mixin configuration:
+//
+//     apis:
+//     - name: google.storage.v2.Storage
+//       mixins:
+//       - name: google.acl.v1.AccessControl
+//
+// The mixin construct implies that all methods in `AccessControl` are
+// also declared with same name and request/response types in
+// `Storage`. A documentation generator or annotation processor will
+// see the effective `Storage.GetAcl` method after inherting
+// documentation and annotations as follows:
+//
+//     service Storage {
+//       // Get the underlying ACL object.
+//       rpc GetAcl(GetAclRequest) returns (Acl) {
+//         option (google.api.http).get = "/v2/{resource=**}:getAcl";
+//       }
+//       ...
+//     }
+//
+// Note how the version in the path pattern changed from `v1` to `v2`.
+//
+// If the `root` field in the mixin is specified, it should be a
+// relative path under which inherited HTTP paths are placed. Example:
+//
+//     apis:
+//     - name: google.storage.v2.Storage
+//       mixins:
+//       - name: google.acl.v1.AccessControl
+//         root: acls
+//
+// This implies the following inherited HTTP annotation:
+//
+//     service Storage {
+//       // Get the underlying ACL object.
+//       rpc GetAcl(GetAclRequest) returns (Acl) {
+//         option (google.api.http).get = "/v2/acls/{resource=**}:getAcl";
+//       }
+//       ...
+//     }
+type Mixin struct {
+	// The fully qualified name of the API which is included.
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// If non-empty specifies a path under which inherited HTTP paths
+	// are rooted.
+	Root string `protobuf:"bytes,2,opt,name=root" json:"root,omitempty"`
+}
+
+func (m *Mixin) Reset()                    { *m = Mixin{} }
+func (m *Mixin) String() string            { return proto.CompactTextString(m) }
+func (*Mixin) ProtoMessage()               {}
+func (*Mixin) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+
+func init() {
+	proto.RegisterType((*Api)(nil), "google.protobuf.Api")
+	proto.RegisterType((*Method)(nil), "google.protobuf.Method")
+	proto.RegisterType((*Mixin)(nil), "google.protobuf.Mixin")
+}
+
+func init() { proto.RegisterFile("google.golang.org/genproto/protobuf/api.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+	// 424 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x52, 0x4f, 0x4f, 0xe2, 0x40,
+	0x14, 0x4f, 0x5b, 0x28, 0xec, 0x90, 0x85, 0xdd, 0xd9, 0x64, 0xb7, 0xe1, 0x40, 0x08, 0xa7, 0x66,
+	0x37, 0xb4, 0x59, 0xbc, 0x78, 0x15, 0x63, 0x38, 0x10, 0x63, 0x53, 0x34, 0x1e, 0x49, 0xc1, 0xb1,
+	0x36, 0x69, 0x67, 0xea, 0xcc, 0x54, 0xe1, 0xdb, 0x18, 0x8f, 0x1e, 0xfd, 0x06, 0x7e, 0x33, 0xa7,
+	0xd3, 0x0e, 0x20, 0x60, 0x82, 0x97, 0x66, 0xde, 0xfb, 0xfd, 0x79, 0xf3, 0x7e, 0x53, 0xd0, 0x0f,
+	0x09, 0x09, 0x63, 0xe4, 0x84, 0x24, 0x0e, 0x70, 0xe8, 0x10, 0x1a, 0xba, 0x21, 0xc2, 0x29, 0x25,
+	0x9c, 0xb8, 0xf2, 0x3b, 0xcb, 0x6e, 0xdd, 0x20, 0x8d, 0x1c, 0x59, 0xc0, 0x56, 0x49, 0x57, 0x50,
+	0xfb, 0xf8, 0x10, 0x3d, 0x23, 0x19, 0x9d, 0xa3, 0xe9, 0x9c, 0x60, 0x8e, 0x16, 0xbc, 0x10, 0xb7,
+	0x9d, 0x43, 0x94, 0x7c, 0x99, 0x96, 0xc3, 0x7a, 0x6f, 0x3a, 0x30, 0x4e, 0xd2, 0x08, 0x42, 0x50,
+	0xc1, 0x41, 0x82, 0x2c, 0xad, 0xab, 0xd9, 0xdf, 0x7c, 0x79, 0x86, 0xff, 0x41, 0x2d, 0x41, 0xfc,
+	0x8e, 0xdc, 0x30, 0x4b, 0xef, 0x1a, 0x76, 0x63, 0xf0, 0xc7, 0xd9, 0xba, 0xa8, 0x73, 0x2e, 0x71,
+	0x5f, 0xf1, 0x72, 0x09, 0x49, 0x79, 0x44, 0x30, 0xb3, 0x8c, 0x4f, 0x24, 0x17, 0x12, 0xf7, 0x15,
+	0x0f, 0x5a, 0xa0, 0xf6, 0x80, 0x28, 0x13, 0x67, 0xab, 0x22, 0x87, 0xab, 0x12, 0x9e, 0x81, 0xe6,
+	0xc7, 0x1d, 0xad, 0xaa, 0x20, 0x34, 0x06, 0x9d, 0x1d, 0xcf, 0x89, 0xa4, 0x9d, 0x16, 0x2c, 0xff,
+	0x3b, 0xdb, 0x2c, 0xa1, 0x03, 0xcc, 0x24, 0x5a, 0x44, 0xe2, 0x4a, 0xa6, 0xbc, 0xd2, 0xef, 0xdd,
+	0x2d, 0x72, 0xd8, 0x2f, 0x59, 0xd0, 0x05, 0x26, 0x5b, 0x62, 0x1e, 0x2c, 0xac, 0x9a, 0x18, 0xd7,
+	0xdc, 0xb3, 0xc2, 0x44, 0xc2, 0x7e, 0x49, 0xeb, 0xbd, 0xea, 0xc0, 0x2c, 0x82, 0xd8, 0x1b, 0xa3,
+	0x0d, 0x7e, 0x50, 0x74, 0x9f, 0x21, 0xc6, 0xa7, 0x79, 0xf0, 0xd3, 0x8c, 0xc6, 0x22, 0xcf, 0x1c,
+	0x6f, 0x96, 0xfd, 0x4b, 0xd1, 0xbe, 0xa2, 0x31, 0xfc, 0x07, 0x7e, 0x2a, 0x26, 0xe3, 0x14, 0x05,
+	0x49, 0x84, 0x43, 0x91, 0xa3, 0x66, 0xd7, 0x7d, 0x65, 0x31, 0x51, 0x7d, 0xf8, 0x37, 0x27, 0xb3,
+	0x54, 0x44, 0x88, 0xd6, 0xbe, 0x45, 0x82, 0x2d, 0x05, 0x28, 0xe3, 0x3e, 0x80, 0x2b, 0xee, 0xda,
+	0xb9, 0x2a, 0x9d, 0x57, 0x2e, 0x6b, 0xeb, 0x8d, 0x57, 0x34, 0x0f, 0x7c, 0xc5, 0x2f, 0x87, 0xe6,
+	0x82, 0xaa, 0x8c, 0x7d, 0x6f, 0x64, 0xa2, 0x47, 0x09, 0xe1, 0x65, 0x4c, 0xf2, 0x3c, 0x1c, 0x83,
+	0x5f, 0x73, 0x92, 0x6c, 0xdb, 0x0e, 0xeb, 0xe2, 0xef, 0xf5, 0xf2, 0xc2, 0xd3, 0x9e, 0x34, 0xed,
+	0x59, 0x37, 0x46, 0xde, 0xf0, 0x45, 0xef, 0x8c, 0x0a, 0x9a, 0xa7, 0xa6, 0x5f, 0xa3, 0x38, 0x1e,
+	0x63, 0xf2, 0x88, 0xf3, 0x48, 0xd8, 0xcc, 0x94, 0xfa, 0xa3, 0xf7, 0x00, 0x00, 0x00, 0xff, 0xff,
+	0x97, 0x07, 0xcf, 0x1c, 0xa9, 0x03, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/protobuf/api.proto b/vendor/google.golang.org/genproto/protobuf/api.proto
new file mode 100644
index 0000000..f08a7f8
--- /dev/null
+++ b/vendor/google.golang.org/genproto/protobuf/api.proto
@@ -0,0 +1,202 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+import "google.golang.org/genproto/protobuf/source_context.proto"; // from google/protobuf/source_context.proto
+import "google.golang.org/genproto/protobuf/type.proto"; // from google/protobuf/type.proto
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "ApiProto";
+option java_multiple_files = true;
+option java_generate_equals_and_hash = true;
+option objc_class_prefix = "GPB";
+
+// Api is a light-weight descriptor for a protocol buffer service.
+message Api {
+
+  // The fully qualified name of this api, including package name
+  // followed by the api's simple name.
+  string name = 1;
+
+  // The methods of this api, in unspecified order.
+  repeated Method methods = 2;
+
+  // Any metadata attached to the API.
+  repeated Option options = 3;
+
+  // A version string for this api. If specified, must have the form
+  // `major-version.minor-version`, as in `1.10`. If the minor version
+  // is omitted, it defaults to zero. If the entire version field is
+  // empty, the major version is derived from the package name, as
+  // outlined below. If the field is not empty, the version in the
+  // package name will be verified to be consistent with what is
+  // provided here.
+  //
+  // The versioning schema uses [semantic
+  // versioning](http://semver.org) where the major version number
+  // indicates a breaking change and the minor version an additive,
+  // non-breaking change. Both version numbers are signals to users
+  // what to expect from different versions, and should be carefully
+  // chosen based on the product plan.
+  //
+  // The major version is also reflected in the package name of the
+  // API, which must end in `v<major-version>`, as in
+  // `google.feature.v1`. For major versions 0 and 1, the suffix can
+  // be omitted. Zero major versions must only be used for
+  // experimental, none-GA apis.
+  //
+  //
+  string version = 4;
+
+  // Source context for the protocol buffer service represented by this
+  // message.
+  SourceContext source_context = 5;
+
+  // Included APIs. See [Mixin][].
+  repeated Mixin mixins = 6;
+
+  // The source syntax of the service.
+  Syntax syntax = 7;
+}
+
+// Method represents a method of an api.
+message Method {
+
+  // The simple name of this method.
+  string name = 1;
+
+  // A URL of the input message type.
+  string request_type_url = 2;
+
+  // If true, the request is streamed.
+  bool request_streaming = 3;
+
+  // The URL of the output message type.
+  string response_type_url = 4;
+
+  // If true, the response is streamed.
+  bool response_streaming = 5;
+
+  // Any metadata attached to the method.
+  repeated Option options = 6;
+
+  // The source syntax of this method.
+  Syntax syntax = 7;
+}
+
+// Declares an API to be included in this API. The including API must
+// redeclare all the methods from the included API, but documentation
+// and options are inherited as follows:
+//
+// - If after comment and whitespace stripping, the documentation
+//   string of the redeclared method is empty, it will be inherited
+//   from the original method.
+//
+// - Each annotation belonging to the service config (http,
+//   visibility) which is not set in the redeclared method will be
+//   inherited.
+//
+// - If an http annotation is inherited, the path pattern will be
+//   modified as follows. Any version prefix will be replaced by the
+//   version of the including API plus the [root][] path if specified.
+//
+// Example of a simple mixin:
+//
+//     package google.acl.v1;
+//     service AccessControl {
+//       // Get the underlying ACL object.
+//       rpc GetAcl(GetAclRequest) returns (Acl) {
+//         option (google.api.http).get = "/v1/{resource=**}:getAcl";
+//       }
+//     }
+//
+//     package google.storage.v2;
+//     service Storage {
+//       rpc GetAcl(GetAclRequest) returns (Acl);
+//
+//       // Get a data record.
+//       rpc GetData(GetDataRequest) returns (Data) {
+//         option (google.api.http).get = "/v2/{resource=**}";
+//       }
+//     }
+//
+// Example of a mixin configuration:
+//
+//     apis:
+//     - name: google.storage.v2.Storage
+//       mixins:
+//       - name: google.acl.v1.AccessControl
+//
+// The mixin construct implies that all methods in `AccessControl` are
+// also declared with same name and request/response types in
+// `Storage`. A documentation generator or annotation processor will
+// see the effective `Storage.GetAcl` method after inherting
+// documentation and annotations as follows:
+//
+//     service Storage {
+//       // Get the underlying ACL object.
+//       rpc GetAcl(GetAclRequest) returns (Acl) {
+//         option (google.api.http).get = "/v2/{resource=**}:getAcl";
+//       }
+//       ...
+//     }
+//
+// Note how the version in the path pattern changed from `v1` to `v2`.
+//
+// If the `root` field in the mixin is specified, it should be a
+// relative path under which inherited HTTP paths are placed. Example:
+//
+//     apis:
+//     - name: google.storage.v2.Storage
+//       mixins:
+//       - name: google.acl.v1.AccessControl
+//         root: acls
+//
+// This implies the following inherited HTTP annotation:
+//
+//     service Storage {
+//       // Get the underlying ACL object.
+//       rpc GetAcl(GetAclRequest) returns (Acl) {
+//         option (google.api.http).get = "/v2/acls/{resource=**}:getAcl";
+//       }
+//       ...
+//     }
+message Mixin {
+  // The fully qualified name of the API which is included.
+  string name = 1;
+
+  // If non-empty specifies a path under which inherited HTTP paths
+  // are rooted.
+  string root = 2;
+}
diff --git a/vendor/google.golang.org/genproto/protobuf/descriptor.pb.go b/vendor/google.golang.org/genproto/protobuf/descriptor.pb.go
new file mode 100644
index 0000000..ec27f87
--- /dev/null
+++ b/vendor/google.golang.org/genproto/protobuf/descriptor.pb.go
@@ -0,0 +1,2044 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/protobuf/descriptor.proto
+// DO NOT EDIT!
+
+package descriptor
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type FieldDescriptorProto_Type int32
+
+const (
+	// 0 is reserved for errors.
+	// Order is weird for historical reasons.
+	FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1
+	FieldDescriptorProto_TYPE_FLOAT  FieldDescriptorProto_Type = 2
+	// Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT64 if
+	// negative values are likely.
+	FieldDescriptorProto_TYPE_INT64  FieldDescriptorProto_Type = 3
+	FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4
+	// Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT32 if
+	// negative values are likely.
+	FieldDescriptorProto_TYPE_INT32   FieldDescriptorProto_Type = 5
+	FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6
+	FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7
+	FieldDescriptorProto_TYPE_BOOL    FieldDescriptorProto_Type = 8
+	FieldDescriptorProto_TYPE_STRING  FieldDescriptorProto_Type = 9
+	FieldDescriptorProto_TYPE_GROUP   FieldDescriptorProto_Type = 10
+	FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11
+	// New in version 2.
+	FieldDescriptorProto_TYPE_BYTES    FieldDescriptorProto_Type = 12
+	FieldDescriptorProto_TYPE_UINT32   FieldDescriptorProto_Type = 13
+	FieldDescriptorProto_TYPE_ENUM     FieldDescriptorProto_Type = 14
+	FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15
+	FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16
+	FieldDescriptorProto_TYPE_SINT32   FieldDescriptorProto_Type = 17
+	FieldDescriptorProto_TYPE_SINT64   FieldDescriptorProto_Type = 18
+)
+
+var FieldDescriptorProto_Type_name = map[int32]string{
+	1:  "TYPE_DOUBLE",
+	2:  "TYPE_FLOAT",
+	3:  "TYPE_INT64",
+	4:  "TYPE_UINT64",
+	5:  "TYPE_INT32",
+	6:  "TYPE_FIXED64",
+	7:  "TYPE_FIXED32",
+	8:  "TYPE_BOOL",
+	9:  "TYPE_STRING",
+	10: "TYPE_GROUP",
+	11: "TYPE_MESSAGE",
+	12: "TYPE_BYTES",
+	13: "TYPE_UINT32",
+	14: "TYPE_ENUM",
+	15: "TYPE_SFIXED32",
+	16: "TYPE_SFIXED64",
+	17: "TYPE_SINT32",
+	18: "TYPE_SINT64",
+}
+var FieldDescriptorProto_Type_value = map[string]int32{
+	"TYPE_DOUBLE":   1,
+	"TYPE_FLOAT":    2,
+	"TYPE_INT64":    3,
+	"TYPE_UINT64":   4,
+	"TYPE_INT32":    5,
+	"TYPE_FIXED64":  6,
+	"TYPE_FIXED32":  7,
+	"TYPE_BOOL":     8,
+	"TYPE_STRING":   9,
+	"TYPE_GROUP":    10,
+	"TYPE_MESSAGE":  11,
+	"TYPE_BYTES":    12,
+	"TYPE_UINT32":   13,
+	"TYPE_ENUM":     14,
+	"TYPE_SFIXED32": 15,
+	"TYPE_SFIXED64": 16,
+	"TYPE_SINT32":   17,
+	"TYPE_SINT64":   18,
+}
+
+func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type {
+	p := new(FieldDescriptorProto_Type)
+	*p = x
+	return p
+}
+func (x FieldDescriptorProto_Type) String() string {
+	return proto.EnumName(FieldDescriptorProto_Type_name, int32(x))
+}
+func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type")
+	if err != nil {
+		return err
+	}
+	*x = FieldDescriptorProto_Type(value)
+	return nil
+}
+func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{3, 0} }
+
+type FieldDescriptorProto_Label int32
+
+const (
+	// 0 is reserved for errors
+	FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1
+	FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2
+	FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3
+)
+
+var FieldDescriptorProto_Label_name = map[int32]string{
+	1: "LABEL_OPTIONAL",
+	2: "LABEL_REQUIRED",
+	3: "LABEL_REPEATED",
+}
+var FieldDescriptorProto_Label_value = map[string]int32{
+	"LABEL_OPTIONAL": 1,
+	"LABEL_REQUIRED": 2,
+	"LABEL_REPEATED": 3,
+}
+
+func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label {
+	p := new(FieldDescriptorProto_Label)
+	*p = x
+	return p
+}
+func (x FieldDescriptorProto_Label) String() string {
+	return proto.EnumName(FieldDescriptorProto_Label_name, int32(x))
+}
+func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label")
+	if err != nil {
+		return err
+	}
+	*x = FieldDescriptorProto_Label(value)
+	return nil
+}
+func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor1, []int{3, 1}
+}
+
+// Generated classes can be optimized for speed or code size.
+type FileOptions_OptimizeMode int32
+
+const (
+	FileOptions_SPEED FileOptions_OptimizeMode = 1
+	// etc.
+	FileOptions_CODE_SIZE    FileOptions_OptimizeMode = 2
+	FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3
+)
+
+var FileOptions_OptimizeMode_name = map[int32]string{
+	1: "SPEED",
+	2: "CODE_SIZE",
+	3: "LITE_RUNTIME",
+}
+var FileOptions_OptimizeMode_value = map[string]int32{
+	"SPEED":        1,
+	"CODE_SIZE":    2,
+	"LITE_RUNTIME": 3,
+}
+
+func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode {
+	p := new(FileOptions_OptimizeMode)
+	*p = x
+	return p
+}
+func (x FileOptions_OptimizeMode) String() string {
+	return proto.EnumName(FileOptions_OptimizeMode_name, int32(x))
+}
+func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode")
+	if err != nil {
+		return err
+	}
+	*x = FileOptions_OptimizeMode(value)
+	return nil
+}
+func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{9, 0} }
+
+type FieldOptions_CType int32
+
+const (
+	// Default mode.
+	FieldOptions_STRING       FieldOptions_CType = 0
+	FieldOptions_CORD         FieldOptions_CType = 1
+	FieldOptions_STRING_PIECE FieldOptions_CType = 2
+)
+
+var FieldOptions_CType_name = map[int32]string{
+	0: "STRING",
+	1: "CORD",
+	2: "STRING_PIECE",
+}
+var FieldOptions_CType_value = map[string]int32{
+	"STRING":       0,
+	"CORD":         1,
+	"STRING_PIECE": 2,
+}
+
+func (x FieldOptions_CType) Enum() *FieldOptions_CType {
+	p := new(FieldOptions_CType)
+	*p = x
+	return p
+}
+func (x FieldOptions_CType) String() string {
+	return proto.EnumName(FieldOptions_CType_name, int32(x))
+}
+func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType")
+	if err != nil {
+		return err
+	}
+	*x = FieldOptions_CType(value)
+	return nil
+}
+func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{11, 0} }
+
+type FieldOptions_JSType int32
+
+const (
+	// Use the default type.
+	FieldOptions_JS_NORMAL FieldOptions_JSType = 0
+	// Use JavaScript strings.
+	FieldOptions_JS_STRING FieldOptions_JSType = 1
+	// Use JavaScript numbers.
+	FieldOptions_JS_NUMBER FieldOptions_JSType = 2
+)
+
+var FieldOptions_JSType_name = map[int32]string{
+	0: "JS_NORMAL",
+	1: "JS_STRING",
+	2: "JS_NUMBER",
+}
+var FieldOptions_JSType_value = map[string]int32{
+	"JS_NORMAL": 0,
+	"JS_STRING": 1,
+	"JS_NUMBER": 2,
+}
+
+func (x FieldOptions_JSType) Enum() *FieldOptions_JSType {
+	p := new(FieldOptions_JSType)
+	*p = x
+	return p
+}
+func (x FieldOptions_JSType) String() string {
+	return proto.EnumName(FieldOptions_JSType_name, int32(x))
+}
+func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType")
+	if err != nil {
+		return err
+	}
+	*x = FieldOptions_JSType(value)
+	return nil
+}
+func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{11, 1} }
+
+// The protocol compiler can output a FileDescriptorSet containing the .proto
+// files it parses.
+type FileDescriptorSet struct {
+	File             []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"`
+	XXX_unrecognized []byte                 `json:"-"`
+}
+
+func (m *FileDescriptorSet) Reset()                    { *m = FileDescriptorSet{} }
+func (m *FileDescriptorSet) String() string            { return proto.CompactTextString(m) }
+func (*FileDescriptorSet) ProtoMessage()               {}
+func (*FileDescriptorSet) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
+
+func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto {
+	if m != nil {
+		return m.File
+	}
+	return nil
+}
+
+// Describes a complete .proto file.
+type FileDescriptorProto struct {
+	Name    *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"`
+	// Names of files imported by this file.
+	Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"`
+	// Indexes of the public imported files in the dependency list above.
+	PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"`
+	// Indexes of the weak imported files in the dependency list.
+	// For Google-internal migration only. Do not use.
+	WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"`
+	// All top-level definitions in this file.
+	MessageType []*DescriptorProto        `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"`
+	EnumType    []*EnumDescriptorProto    `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
+	Service     []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"`
+	Extension   []*FieldDescriptorProto   `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"`
+	Options     *FileOptions              `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
+	// This field contains optional information about the original source code.
+	// You may safely remove this entire field without harming runtime
+	// functionality of the descriptors -- the information is needed only by
+	// development tools.
+	SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"`
+	// The syntax of the proto file.
+	// The supported values are "proto2" and "proto3".
+	Syntax           *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *FileDescriptorProto) Reset()                    { *m = FileDescriptorProto{} }
+func (m *FileDescriptorProto) String() string            { return proto.CompactTextString(m) }
+func (*FileDescriptorProto) ProtoMessage()               {}
+func (*FileDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} }
+
+func (m *FileDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *FileDescriptorProto) GetPackage() string {
+	if m != nil && m.Package != nil {
+		return *m.Package
+	}
+	return ""
+}
+
+func (m *FileDescriptorProto) GetDependency() []string {
+	if m != nil {
+		return m.Dependency
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetPublicDependency() []int32 {
+	if m != nil {
+		return m.PublicDependency
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetWeakDependency() []int32 {
+	if m != nil {
+		return m.WeakDependency
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto {
+	if m != nil {
+		return m.MessageType
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto {
+	if m != nil {
+		return m.EnumType
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto {
+	if m != nil {
+		return m.Service
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto {
+	if m != nil {
+		return m.Extension
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetOptions() *FileOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo {
+	if m != nil {
+		return m.SourceCodeInfo
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetSyntax() string {
+	if m != nil && m.Syntax != nil {
+		return *m.Syntax
+	}
+	return ""
+}
+
+// Describes a message type.
+type DescriptorProto struct {
+	Name           *string                           `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Field          []*FieldDescriptorProto           `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
+	Extension      []*FieldDescriptorProto           `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"`
+	NestedType     []*DescriptorProto                `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"`
+	EnumType       []*EnumDescriptorProto            `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
+	ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"`
+	OneofDecl      []*OneofDescriptorProto           `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"`
+	Options        *MessageOptions                   `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"`
+	ReservedRange  []*DescriptorProto_ReservedRange  `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
+	// Reserved field names, which may not be used by fields in the same message.
+	// A given name may only be reserved once.
+	ReservedName     []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	XXX_unrecognized []byte   `json:"-"`
+}
+
+func (m *DescriptorProto) Reset()                    { *m = DescriptorProto{} }
+func (m *DescriptorProto) String() string            { return proto.CompactTextString(m) }
+func (*DescriptorProto) ProtoMessage()               {}
+func (*DescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} }
+
+func (m *DescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *DescriptorProto) GetField() []*FieldDescriptorProto {
+	if m != nil {
+		return m.Field
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto {
+	if m != nil {
+		return m.Extension
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetNestedType() []*DescriptorProto {
+	if m != nil {
+		return m.NestedType
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto {
+	if m != nil {
+		return m.EnumType
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange {
+	if m != nil {
+		return m.ExtensionRange
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto {
+	if m != nil {
+		return m.OneofDecl
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetOptions() *MessageOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange {
+	if m != nil {
+		return m.ReservedRange
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetReservedName() []string {
+	if m != nil {
+		return m.ReservedName
+	}
+	return nil
+}
+
+type DescriptorProto_ExtensionRange struct {
+	Start            *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
+	End              *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DescriptorProto_ExtensionRange) Reset()         { *m = DescriptorProto_ExtensionRange{} }
+func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto_ExtensionRange) ProtoMessage()    {}
+func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) {
+	return fileDescriptor1, []int{2, 0}
+}
+
+func (m *DescriptorProto_ExtensionRange) GetStart() int32 {
+	if m != nil && m.Start != nil {
+		return *m.Start
+	}
+	return 0
+}
+
+func (m *DescriptorProto_ExtensionRange) GetEnd() int32 {
+	if m != nil && m.End != nil {
+		return *m.End
+	}
+	return 0
+}
+
+// Range of reserved tag numbers. Reserved tag numbers may not be used by
+// fields or extension ranges in the same message. Reserved ranges may
+// not overlap.
+type DescriptorProto_ReservedRange struct {
+	Start            *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
+	End              *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DescriptorProto_ReservedRange) Reset()         { *m = DescriptorProto_ReservedRange{} }
+func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto_ReservedRange) ProtoMessage()    {}
+func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) {
+	return fileDescriptor1, []int{2, 1}
+}
+
+func (m *DescriptorProto_ReservedRange) GetStart() int32 {
+	if m != nil && m.Start != nil {
+		return *m.Start
+	}
+	return 0
+}
+
+func (m *DescriptorProto_ReservedRange) GetEnd() int32 {
+	if m != nil && m.End != nil {
+		return *m.End
+	}
+	return 0
+}
+
+// Describes a field within a message.
+type FieldDescriptorProto struct {
+	Name   *string                     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Number *int32                      `protobuf:"varint,3,opt,name=number" json:"number,omitempty"`
+	Label  *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"`
+	// If type_name is set, this need not be set.  If both this and type_name
+	// are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
+	Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"`
+	// For message and enum types, this is the name of the type.  If the name
+	// starts with a '.', it is fully-qualified.  Otherwise, C++-like scoping
+	// rules are used to find the type (i.e. first the nested types within this
+	// message are searched, then within the parent, on up to the root
+	// namespace).
+	TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"`
+	// For extensions, this is the name of the type being extended.  It is
+	// resolved in the same manner as type_name.
+	Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"`
+	// For numeric types, contains the original text representation of the value.
+	// For booleans, "true" or "false".
+	// For strings, contains the default text contents (not escaped in any way).
+	// For bytes, contains the C escaped value.  All bytes >= 128 are escaped.
+	// TODO(kenton):  Base-64 encode?
+	DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"`
+	// If set, gives the index of a oneof in the containing type's oneof_decl
+	// list.  This field is a member of that oneof.
+	OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"`
+	// JSON name of this field. The value is set by protocol compiler. If the
+	// user has set a "json_name" option on this field, that option's value
+	// will be used. Otherwise, it's deduced from the field's name by converting
+	// it to camelCase.
+	JsonName         *string       `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"`
+	Options          *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
+	XXX_unrecognized []byte        `json:"-"`
+}
+
+func (m *FieldDescriptorProto) Reset()                    { *m = FieldDescriptorProto{} }
+func (m *FieldDescriptorProto) String() string            { return proto.CompactTextString(m) }
+func (*FieldDescriptorProto) ProtoMessage()               {}
+func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} }
+
+func (m *FieldDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetNumber() int32 {
+	if m != nil && m.Number != nil {
+		return *m.Number
+	}
+	return 0
+}
+
+func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label {
+	if m != nil && m.Label != nil {
+		return *m.Label
+	}
+	return FieldDescriptorProto_LABEL_OPTIONAL
+}
+
+func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type {
+	if m != nil && m.Type != nil {
+		return *m.Type
+	}
+	return FieldDescriptorProto_TYPE_DOUBLE
+}
+
+func (m *FieldDescriptorProto) GetTypeName() string {
+	if m != nil && m.TypeName != nil {
+		return *m.TypeName
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetExtendee() string {
+	if m != nil && m.Extendee != nil {
+		return *m.Extendee
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetDefaultValue() string {
+	if m != nil && m.DefaultValue != nil {
+		return *m.DefaultValue
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetOneofIndex() int32 {
+	if m != nil && m.OneofIndex != nil {
+		return *m.OneofIndex
+	}
+	return 0
+}
+
+func (m *FieldDescriptorProto) GetJsonName() string {
+	if m != nil && m.JsonName != nil {
+		return *m.JsonName
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetOptions() *FieldOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Describes a oneof.
+type OneofDescriptorProto struct {
+	Name             *string       `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Options          *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
+	XXX_unrecognized []byte        `json:"-"`
+}
+
+func (m *OneofDescriptorProto) Reset()                    { *m = OneofDescriptorProto{} }
+func (m *OneofDescriptorProto) String() string            { return proto.CompactTextString(m) }
+func (*OneofDescriptorProto) ProtoMessage()               {}
+func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} }
+
+func (m *OneofDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *OneofDescriptorProto) GetOptions() *OneofOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Describes an enum type.
+type EnumDescriptorProto struct {
+	Name             *string                     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Value            []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
+	Options          *EnumOptions                `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+	XXX_unrecognized []byte                      `json:"-"`
+}
+
+func (m *EnumDescriptorProto) Reset()                    { *m = EnumDescriptorProto{} }
+func (m *EnumDescriptorProto) String() string            { return proto.CompactTextString(m) }
+func (*EnumDescriptorProto) ProtoMessage()               {}
+func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} }
+
+func (m *EnumDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func (m *EnumDescriptorProto) GetOptions() *EnumOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Describes a value within an enum.
+type EnumValueDescriptorProto struct {
+	Name             *string           `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Number           *int32            `protobuf:"varint,2,opt,name=number" json:"number,omitempty"`
+	Options          *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+	XXX_unrecognized []byte            `json:"-"`
+}
+
+func (m *EnumValueDescriptorProto) Reset()                    { *m = EnumValueDescriptorProto{} }
+func (m *EnumValueDescriptorProto) String() string            { return proto.CompactTextString(m) }
+func (*EnumValueDescriptorProto) ProtoMessage()               {}
+func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} }
+
+func (m *EnumValueDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *EnumValueDescriptorProto) GetNumber() int32 {
+	if m != nil && m.Number != nil {
+		return *m.Number
+	}
+	return 0
+}
+
+func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Describes a service.
+type ServiceDescriptorProto struct {
+	Name             *string                  `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Method           []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"`
+	Options          *ServiceOptions          `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+	XXX_unrecognized []byte                   `json:"-"`
+}
+
+func (m *ServiceDescriptorProto) Reset()                    { *m = ServiceDescriptorProto{} }
+func (m *ServiceDescriptorProto) String() string            { return proto.CompactTextString(m) }
+func (*ServiceDescriptorProto) ProtoMessage()               {}
+func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} }
+
+func (m *ServiceDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto {
+	if m != nil {
+		return m.Method
+	}
+	return nil
+}
+
+func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Describes a method of a service.
+type MethodDescriptorProto struct {
+	Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// Input and output type names.  These are resolved in the same way as
+	// FieldDescriptorProto.type_name, but must refer to a message type.
+	InputType  *string        `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"`
+	OutputType *string        `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"`
+	Options    *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"`
+	// Identifies if client streams multiple client messages
+	ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"`
+	// Identifies if server streams multiple server messages
+	ServerStreaming  *bool  `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MethodDescriptorProto) Reset()                    { *m = MethodDescriptorProto{} }
+func (m *MethodDescriptorProto) String() string            { return proto.CompactTextString(m) }
+func (*MethodDescriptorProto) ProtoMessage()               {}
+func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} }
+
+const Default_MethodDescriptorProto_ClientStreaming bool = false
+const Default_MethodDescriptorProto_ServerStreaming bool = false
+
+func (m *MethodDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *MethodDescriptorProto) GetInputType() string {
+	if m != nil && m.InputType != nil {
+		return *m.InputType
+	}
+	return ""
+}
+
+func (m *MethodDescriptorProto) GetOutputType() string {
+	if m != nil && m.OutputType != nil {
+		return *m.OutputType
+	}
+	return ""
+}
+
+func (m *MethodDescriptorProto) GetOptions() *MethodOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+func (m *MethodDescriptorProto) GetClientStreaming() bool {
+	if m != nil && m.ClientStreaming != nil {
+		return *m.ClientStreaming
+	}
+	return Default_MethodDescriptorProto_ClientStreaming
+}
+
+func (m *MethodDescriptorProto) GetServerStreaming() bool {
+	if m != nil && m.ServerStreaming != nil {
+		return *m.ServerStreaming
+	}
+	return Default_MethodDescriptorProto_ServerStreaming
+}
+
+type FileOptions struct {
+	// Sets the Java package where classes generated from this .proto will be
+	// placed.  By default, the proto package is used, but this is often
+	// inappropriate because proto packages do not normally start with backwards
+	// domain names.
+	JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"`
+	// If set, all the classes from the .proto file are wrapped in a single
+	// outer class with the given name.  This applies to both Proto1
+	// (equivalent to the old "--one_java_file" option) and Proto2 (where
+	// a .proto always translates to a single class, but you may want to
+	// explicitly choose the class name).
+	JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"`
+	// If set true, then the Java code generator will generate a separate .java
+	// file for each top-level message, enum, and service defined in the .proto
+	// file.  Thus, these types will *not* be nested inside the outer class
+	// named by java_outer_classname.  However, the outer class will still be
+	// generated to contain the file's getDescriptor() method as well as any
+	// top-level extensions defined in the file.
+	JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"`
+	// If set true, then the Java code generator will generate equals() and
+	// hashCode() methods for all messages defined in the .proto file.
+	// This increases generated code size, potentially substantially for large
+	// protos, which may harm a memory-constrained application.
+	// - In the full runtime this is a speed optimization, as the
+	// AbstractMessage base class includes reflection-based implementations of
+	// these methods.
+	// - In the lite runtime, setting this option changes the semantics of
+	// equals() and hashCode() to more closely match those of the full runtime;
+	// the generated methods compute their results based on field values rather
+	// than object identity. (Implementations should not assume that hashcodes
+	// will be consistent across runtimes or versions of the protocol compiler.)
+	JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash,def=0" json:"java_generate_equals_and_hash,omitempty"`
+	// If set true, then the Java2 code generator will generate code that
+	// throws an exception whenever an attempt is made to assign a non-UTF-8
+	// byte sequence to a string field.
+	// Message reflection will do the same.
+	// However, an extension field still accepts non-UTF-8 byte sequences.
+	// This option has no effect on when used with the lite runtime.
+	JavaStringCheckUtf8 *bool                     `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"`
+	OptimizeFor         *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"`
+	// Sets the Go package where structs generated from this .proto will be
+	// placed. If omitted, the Go package will be derived from the following:
+	//   - The basename of the package import path, if provided.
+	//   - Otherwise, the package statement in the .proto file, if present.
+	//   - Otherwise, the basename of the .proto file, without extension.
+	GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"`
+	// Should generic services be generated in each language?  "Generic" services
+	// are not specific to any particular RPC system.  They are generated by the
+	// main code generators in each language (without additional plugins).
+	// Generic services were the only kind of service generation supported by
+	// early versions of google.protobuf.
+	//
+	// Generic services are now considered deprecated in favor of using plugins
+	// that generate code specific to your particular RPC system.  Therefore,
+	// these default to false.  Old code which depends on generic services should
+	// explicitly set them to true.
+	CcGenericServices   *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"`
+	JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"`
+	PyGenericServices   *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"`
+	// Is this file deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for everything in the file, or it will be completely ignored; in the very
+	// least, this is a formalization for deprecating files.
+	Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// Enables the use of arenas for the proto messages in this file. This applies
+	// only to generated classes for C++.
+	CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"`
+	// Sets the objective c class prefix which is prepended to all objective c
+	// generated classes from this .proto. There is no default.
+	ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"`
+	// Namespace for generated classes; defaults to the package.
+	CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+}
+
+func (m *FileOptions) Reset()                    { *m = FileOptions{} }
+func (m *FileOptions) String() string            { return proto.CompactTextString(m) }
+func (*FileOptions) ProtoMessage()               {}
+func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} }
+
+var extRange_FileOptions = []proto.ExtensionRange{
+	{1000, 536870911},
+}
+
+func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_FileOptions
+}
+
+const Default_FileOptions_JavaMultipleFiles bool = false
+const Default_FileOptions_JavaGenerateEqualsAndHash bool = false
+const Default_FileOptions_JavaStringCheckUtf8 bool = false
+const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED
+const Default_FileOptions_CcGenericServices bool = false
+const Default_FileOptions_JavaGenericServices bool = false
+const Default_FileOptions_PyGenericServices bool = false
+const Default_FileOptions_Deprecated bool = false
+const Default_FileOptions_CcEnableArenas bool = false
+
+func (m *FileOptions) GetJavaPackage() string {
+	if m != nil && m.JavaPackage != nil {
+		return *m.JavaPackage
+	}
+	return ""
+}
+
+func (m *FileOptions) GetJavaOuterClassname() string {
+	if m != nil && m.JavaOuterClassname != nil {
+		return *m.JavaOuterClassname
+	}
+	return ""
+}
+
+func (m *FileOptions) GetJavaMultipleFiles() bool {
+	if m != nil && m.JavaMultipleFiles != nil {
+		return *m.JavaMultipleFiles
+	}
+	return Default_FileOptions_JavaMultipleFiles
+}
+
+func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool {
+	if m != nil && m.JavaGenerateEqualsAndHash != nil {
+		return *m.JavaGenerateEqualsAndHash
+	}
+	return Default_FileOptions_JavaGenerateEqualsAndHash
+}
+
+func (m *FileOptions) GetJavaStringCheckUtf8() bool {
+	if m != nil && m.JavaStringCheckUtf8 != nil {
+		return *m.JavaStringCheckUtf8
+	}
+	return Default_FileOptions_JavaStringCheckUtf8
+}
+
+func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode {
+	if m != nil && m.OptimizeFor != nil {
+		return *m.OptimizeFor
+	}
+	return Default_FileOptions_OptimizeFor
+}
+
+func (m *FileOptions) GetGoPackage() string {
+	if m != nil && m.GoPackage != nil {
+		return *m.GoPackage
+	}
+	return ""
+}
+
+func (m *FileOptions) GetCcGenericServices() bool {
+	if m != nil && m.CcGenericServices != nil {
+		return *m.CcGenericServices
+	}
+	return Default_FileOptions_CcGenericServices
+}
+
+func (m *FileOptions) GetJavaGenericServices() bool {
+	if m != nil && m.JavaGenericServices != nil {
+		return *m.JavaGenericServices
+	}
+	return Default_FileOptions_JavaGenericServices
+}
+
+func (m *FileOptions) GetPyGenericServices() bool {
+	if m != nil && m.PyGenericServices != nil {
+		return *m.PyGenericServices
+	}
+	return Default_FileOptions_PyGenericServices
+}
+
+func (m *FileOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_FileOptions_Deprecated
+}
+
+func (m *FileOptions) GetCcEnableArenas() bool {
+	if m != nil && m.CcEnableArenas != nil {
+		return *m.CcEnableArenas
+	}
+	return Default_FileOptions_CcEnableArenas
+}
+
+func (m *FileOptions) GetObjcClassPrefix() string {
+	if m != nil && m.ObjcClassPrefix != nil {
+		return *m.ObjcClassPrefix
+	}
+	return ""
+}
+
+func (m *FileOptions) GetCsharpNamespace() string {
+	if m != nil && m.CsharpNamespace != nil {
+		return *m.CsharpNamespace
+	}
+	return ""
+}
+
+func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type MessageOptions struct {
+	// Set true to use the old proto1 MessageSet wire format for extensions.
+	// This is provided for backwards-compatibility with the MessageSet wire
+	// format.  You should not use this for any other reason:  It's less
+	// efficient, has fewer features, and is more complicated.
+	//
+	// The message must be defined exactly as follows:
+	//   message Foo {
+	//     option message_set_wire_format = true;
+	//     extensions 4 to max;
+	//   }
+	// Note that the message cannot have any defined fields; MessageSets only
+	// have extensions.
+	//
+	// All extensions of your type must be singular messages; e.g. they cannot
+	// be int32s, enums, or repeated messages.
+	//
+	// Because this is an option, the above two restrictions are not enforced by
+	// the protocol compiler.
+	MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"`
+	// Disables the generation of the standard "descriptor()" accessor, which can
+	// conflict with a field of the same name.  This is meant to make migration
+	// from proto1 easier; new code should avoid fields named "descriptor".
+	NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"`
+	// Is this message deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the message, or it will be completely ignored; in the very least,
+	// this is a formalization for deprecating messages.
+	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// Whether the message is an automatically generated map entry type for the
+	// maps field.
+	//
+	// For maps fields:
+	//     map<KeyType, ValueType> map_field = 1;
+	// The parsed descriptor looks like:
+	//     message MapFieldEntry {
+	//         option map_entry = true;
+	//         optional KeyType key = 1;
+	//         optional ValueType value = 2;
+	//     }
+	//     repeated MapFieldEntry map_field = 1;
+	//
+	// Implementations may choose not to generate the map_entry=true message, but
+	// use a native map in the target language to hold the keys and values.
+	// The reflection APIs in such implementions still need to work as
+	// if the field is a repeated message field.
+	//
+	// NOTE: Do not set the option in .proto files. Always use the maps syntax
+	// instead. The option should only be implicitly set by the proto compiler
+	// parser.
+	MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+}
+
+func (m *MessageOptions) Reset()                    { *m = MessageOptions{} }
+func (m *MessageOptions) String() string            { return proto.CompactTextString(m) }
+func (*MessageOptions) ProtoMessage()               {}
+func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10} }
+
+var extRange_MessageOptions = []proto.ExtensionRange{
+	{1000, 536870911},
+}
+
+func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_MessageOptions
+}
+
+const Default_MessageOptions_MessageSetWireFormat bool = false
+const Default_MessageOptions_NoStandardDescriptorAccessor bool = false
+const Default_MessageOptions_Deprecated bool = false
+
+func (m *MessageOptions) GetMessageSetWireFormat() bool {
+	if m != nil && m.MessageSetWireFormat != nil {
+		return *m.MessageSetWireFormat
+	}
+	return Default_MessageOptions_MessageSetWireFormat
+}
+
+func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool {
+	if m != nil && m.NoStandardDescriptorAccessor != nil {
+		return *m.NoStandardDescriptorAccessor
+	}
+	return Default_MessageOptions_NoStandardDescriptorAccessor
+}
+
+func (m *MessageOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_MessageOptions_Deprecated
+}
+
+func (m *MessageOptions) GetMapEntry() bool {
+	if m != nil && m.MapEntry != nil {
+		return *m.MapEntry
+	}
+	return false
+}
+
+func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type FieldOptions struct {
+	// The ctype option instructs the C++ code generator to use a different
+	// representation of the field than it normally would.  See the specific
+	// options below.  This option is not yet implemented in the open source
+	// release -- sorry, we'll try to include it in a future version!
+	Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"`
+	// The packed option can be enabled for repeated primitive fields to enable
+	// a more efficient representation on the wire. Rather than repeatedly
+	// writing the tag and type for each element, the entire array is encoded as
+	// a single length-delimited blob. In proto3, only explicit setting it to
+	// false will avoid using packed encoding.
+	Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"`
+	// The jstype option determines the JavaScript type used for values of the
+	// field.  The option is permitted only for 64 bit integral and fixed types
+	// (int64, uint64, sint64, fixed64, sfixed64).  By default these types are
+	// represented as JavaScript strings.  This avoids loss of precision that can
+	// happen when a large value is converted to a floating point JavaScript
+	// numbers.  Specifying JS_NUMBER for the jstype causes the generated
+	// JavaScript code to use the JavaScript "number" type instead of strings.
+	// This option is an enum to permit additional types to be added,
+	// e.g. goog.math.Integer.
+	Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"`
+	// Should this field be parsed lazily?  Lazy applies only to message-type
+	// fields.  It means that when the outer message is initially parsed, the
+	// inner message's contents will not be parsed but instead stored in encoded
+	// form.  The inner message will actually be parsed when it is first accessed.
+	//
+	// This is only a hint.  Implementations are free to choose whether to use
+	// eager or lazy parsing regardless of the value of this option.  However,
+	// setting this option true suggests that the protocol author believes that
+	// using lazy parsing on this field is worth the additional bookkeeping
+	// overhead typically needed to implement it.
+	//
+	// This option does not affect the public interface of any generated code;
+	// all method signatures remain the same.  Furthermore, thread-safety of the
+	// interface is not affected by this option; const methods remain safe to
+	// call from multiple threads concurrently, while non-const methods continue
+	// to require exclusive access.
+	//
+	//
+	// Note that implementations may choose not to check required fields within
+	// a lazy sub-message.  That is, calling IsInitialized() on the outher message
+	// may return true even if the inner message has missing required fields.
+	// This is necessary because otherwise the inner message would have to be
+	// parsed in order to perform the check, defeating the purpose of lazy
+	// parsing.  An implementation which chooses not to check required fields
+	// must be consistent about it.  That is, for any particular sub-message, the
+	// implementation must either *always* check its required fields, or *never*
+	// check its required fields, regardless of whether or not the message has
+	// been parsed.
+	Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"`
+	// Is this field deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for accessors, or it will be completely ignored; in the very least, this
+	// is a formalization for deprecating fields.
+	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// For Google-internal migration only. Do not use.
+	Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+}
+
+func (m *FieldOptions) Reset()                    { *m = FieldOptions{} }
+func (m *FieldOptions) String() string            { return proto.CompactTextString(m) }
+func (*FieldOptions) ProtoMessage()               {}
+func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{11} }
+
+var extRange_FieldOptions = []proto.ExtensionRange{
+	{1000, 536870911},
+}
+
+func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_FieldOptions
+}
+
+const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING
+const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL
+const Default_FieldOptions_Lazy bool = false
+const Default_FieldOptions_Deprecated bool = false
+const Default_FieldOptions_Weak bool = false
+
+func (m *FieldOptions) GetCtype() FieldOptions_CType {
+	if m != nil && m.Ctype != nil {
+		return *m.Ctype
+	}
+	return Default_FieldOptions_Ctype
+}
+
+func (m *FieldOptions) GetPacked() bool {
+	if m != nil && m.Packed != nil {
+		return *m.Packed
+	}
+	return false
+}
+
+func (m *FieldOptions) GetJstype() FieldOptions_JSType {
+	if m != nil && m.Jstype != nil {
+		return *m.Jstype
+	}
+	return Default_FieldOptions_Jstype
+}
+
+func (m *FieldOptions) GetLazy() bool {
+	if m != nil && m.Lazy != nil {
+		return *m.Lazy
+	}
+	return Default_FieldOptions_Lazy
+}
+
+func (m *FieldOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_FieldOptions_Deprecated
+}
+
+func (m *FieldOptions) GetWeak() bool {
+	if m != nil && m.Weak != nil {
+		return *m.Weak
+	}
+	return Default_FieldOptions_Weak
+}
+
+func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type OneofOptions struct {
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+}
+
+func (m *OneofOptions) Reset()                    { *m = OneofOptions{} }
+func (m *OneofOptions) String() string            { return proto.CompactTextString(m) }
+func (*OneofOptions) ProtoMessage()               {}
+func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{12} }
+
+var extRange_OneofOptions = []proto.ExtensionRange{
+	{1000, 536870911},
+}
+
+func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_OneofOptions
+}
+
+func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type EnumOptions struct {
+	// Set this option to true to allow mapping different tag names to the same
+	// value.
+	AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"`
+	// Is this enum deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the enum, or it will be completely ignored; in the very least, this
+	// is a formalization for deprecating enums.
+	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+}
+
+func (m *EnumOptions) Reset()                    { *m = EnumOptions{} }
+func (m *EnumOptions) String() string            { return proto.CompactTextString(m) }
+func (*EnumOptions) ProtoMessage()               {}
+func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{13} }
+
+var extRange_EnumOptions = []proto.ExtensionRange{
+	{1000, 536870911},
+}
+
+func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_EnumOptions
+}
+
+const Default_EnumOptions_Deprecated bool = false
+
+func (m *EnumOptions) GetAllowAlias() bool {
+	if m != nil && m.AllowAlias != nil {
+		return *m.AllowAlias
+	}
+	return false
+}
+
+func (m *EnumOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_EnumOptions_Deprecated
+}
+
+func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type EnumValueOptions struct {
+	// Is this enum value deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the enum value, or it will be completely ignored; in the very least,
+	// this is a formalization for deprecating enum values.
+	Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+}
+
+func (m *EnumValueOptions) Reset()                    { *m = EnumValueOptions{} }
+func (m *EnumValueOptions) String() string            { return proto.CompactTextString(m) }
+func (*EnumValueOptions) ProtoMessage()               {}
+func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{14} }
+
+var extRange_EnumValueOptions = []proto.ExtensionRange{
+	{1000, 536870911},
+}
+
+func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_EnumValueOptions
+}
+
+const Default_EnumValueOptions_Deprecated bool = false
+
+func (m *EnumValueOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_EnumValueOptions_Deprecated
+}
+
+func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type ServiceOptions struct {
+	// Is this service deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the service, or it will be completely ignored; in the very least,
+	// this is a formalization for deprecating services.
+	Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+}
+
+func (m *ServiceOptions) Reset()                    { *m = ServiceOptions{} }
+func (m *ServiceOptions) String() string            { return proto.CompactTextString(m) }
+func (*ServiceOptions) ProtoMessage()               {}
+func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{15} }
+
+var extRange_ServiceOptions = []proto.ExtensionRange{
+	{1000, 536870911},
+}
+
+func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_ServiceOptions
+}
+
+const Default_ServiceOptions_Deprecated bool = false
+
+func (m *ServiceOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_ServiceOptions_Deprecated
+}
+
+func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type MethodOptions struct {
+	// Is this method deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the method, or it will be completely ignored; in the very least,
+	// this is a formalization for deprecating methods.
+	Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+}
+
+func (m *MethodOptions) Reset()                    { *m = MethodOptions{} }
+func (m *MethodOptions) String() string            { return proto.CompactTextString(m) }
+func (*MethodOptions) ProtoMessage()               {}
+func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{16} }
+
+var extRange_MethodOptions = []proto.ExtensionRange{
+	{1000, 536870911},
+}
+
+func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_MethodOptions
+}
+
+const Default_MethodOptions_Deprecated bool = false
+
+func (m *MethodOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_MethodOptions_Deprecated
+}
+
+func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+// A message representing a option the parser does not recognize. This only
+// appears in options protos created by the compiler::Parser class.
+// DescriptorPool resolves these when building Descriptor objects. Therefore,
+// options protos in descriptor objects (e.g. returned by Descriptor::options(),
+// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
+// in them.
+type UninterpretedOption struct {
+	Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"`
+	// The value of the uninterpreted option, in whatever type the tokenizer
+	// identified it as during parsing. Exactly one of these should be set.
+	IdentifierValue  *string  `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"`
+	PositiveIntValue *uint64  `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"`
+	NegativeIntValue *int64   `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"`
+	DoubleValue      *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"`
+	StringValue      []byte   `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"`
+	AggregateValue   *string  `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"`
+	XXX_unrecognized []byte   `json:"-"`
+}
+
+func (m *UninterpretedOption) Reset()                    { *m = UninterpretedOption{} }
+func (m *UninterpretedOption) String() string            { return proto.CompactTextString(m) }
+func (*UninterpretedOption) ProtoMessage()               {}
+func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{17} }
+
+func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart {
+	if m != nil {
+		return m.Name
+	}
+	return nil
+}
+
+func (m *UninterpretedOption) GetIdentifierValue() string {
+	if m != nil && m.IdentifierValue != nil {
+		return *m.IdentifierValue
+	}
+	return ""
+}
+
+func (m *UninterpretedOption) GetPositiveIntValue() uint64 {
+	if m != nil && m.PositiveIntValue != nil {
+		return *m.PositiveIntValue
+	}
+	return 0
+}
+
+func (m *UninterpretedOption) GetNegativeIntValue() int64 {
+	if m != nil && m.NegativeIntValue != nil {
+		return *m.NegativeIntValue
+	}
+	return 0
+}
+
+func (m *UninterpretedOption) GetDoubleValue() float64 {
+	if m != nil && m.DoubleValue != nil {
+		return *m.DoubleValue
+	}
+	return 0
+}
+
+func (m *UninterpretedOption) GetStringValue() []byte {
+	if m != nil {
+		return m.StringValue
+	}
+	return nil
+}
+
+func (m *UninterpretedOption) GetAggregateValue() string {
+	if m != nil && m.AggregateValue != nil {
+		return *m.AggregateValue
+	}
+	return ""
+}
+
+// The name of the uninterpreted option.  Each string represents a segment in
+// a dot-separated name.  is_extension is true iff a segment represents an
+// extension (denoted with parentheses in options specs in .proto files).
+// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
+// "foo.(bar.baz).qux".
+type UninterpretedOption_NamePart struct {
+	NamePart         *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"`
+	IsExtension      *bool   `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *UninterpretedOption_NamePart) Reset()         { *m = UninterpretedOption_NamePart{} }
+func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) }
+func (*UninterpretedOption_NamePart) ProtoMessage()    {}
+func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) {
+	return fileDescriptor1, []int{17, 0}
+}
+
+func (m *UninterpretedOption_NamePart) GetNamePart() string {
+	if m != nil && m.NamePart != nil {
+		return *m.NamePart
+	}
+	return ""
+}
+
+func (m *UninterpretedOption_NamePart) GetIsExtension() bool {
+	if m != nil && m.IsExtension != nil {
+		return *m.IsExtension
+	}
+	return false
+}
+
+// Encapsulates information about the original source file from which a
+// FileDescriptorProto was generated.
+type SourceCodeInfo struct {
+	// A Location identifies a piece of source code in a .proto file which
+	// corresponds to a particular definition.  This information is intended
+	// to be useful to IDEs, code indexers, documentation generators, and similar
+	// tools.
+	//
+	// For example, say we have a file like:
+	//   message Foo {
+	//     optional string foo = 1;
+	//   }
+	// Let's look at just the field definition:
+	//   optional string foo = 1;
+	//   ^       ^^     ^^  ^  ^^^
+	//   a       bc     de  f  ghi
+	// We have the following locations:
+	//   span   path               represents
+	//   [a,i)  [ 4, 0, 2, 0 ]     The whole field definition.
+	//   [a,b)  [ 4, 0, 2, 0, 4 ]  The label (optional).
+	//   [c,d)  [ 4, 0, 2, 0, 5 ]  The type (string).
+	//   [e,f)  [ 4, 0, 2, 0, 1 ]  The name (foo).
+	//   [g,h)  [ 4, 0, 2, 0, 3 ]  The number (1).
+	//
+	// Notes:
+	// - A location may refer to a repeated field itself (i.e. not to any
+	//   particular index within it).  This is used whenever a set of elements are
+	//   logically enclosed in a single code segment.  For example, an entire
+	//   extend block (possibly containing multiple extension definitions) will
+	//   have an outer location whose path refers to the "extensions" repeated
+	//   field without an index.
+	// - Multiple locations may have the same path.  This happens when a single
+	//   logical declaration is spread out across multiple places.  The most
+	//   obvious example is the "extend" block again -- there may be multiple
+	//   extend blocks in the same scope, each of which will have the same path.
+	// - A location's span is not always a subset of its parent's span.  For
+	//   example, the "extendee" of an extension declaration appears at the
+	//   beginning of the "extend" block and is shared by all extensions within
+	//   the block.
+	// - Just because a location's span is a subset of some other location's span
+	//   does not mean that it is a descendent.  For example, a "group" defines
+	//   both a type and a field in a single declaration.  Thus, the locations
+	//   corresponding to the type and field and their components will overlap.
+	// - Code which tries to interpret locations should probably be designed to
+	//   ignore those that it doesn't understand, as more types of locations could
+	//   be recorded in the future.
+	Location         []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"`
+	XXX_unrecognized []byte                     `json:"-"`
+}
+
+func (m *SourceCodeInfo) Reset()                    { *m = SourceCodeInfo{} }
+func (m *SourceCodeInfo) String() string            { return proto.CompactTextString(m) }
+func (*SourceCodeInfo) ProtoMessage()               {}
+func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{18} }
+
+func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location {
+	if m != nil {
+		return m.Location
+	}
+	return nil
+}
+
+type SourceCodeInfo_Location struct {
+	// Identifies which part of the FileDescriptorProto was defined at this
+	// location.
+	//
+	// Each element is a field number or an index.  They form a path from
+	// the root FileDescriptorProto to the place where the definition.  For
+	// example, this path:
+	//   [ 4, 3, 2, 7, 1 ]
+	// refers to:
+	//   file.message_type(3)  // 4, 3
+	//       .field(7)         // 2, 7
+	//       .name()           // 1
+	// This is because FileDescriptorProto.message_type has field number 4:
+	//   repeated DescriptorProto message_type = 4;
+	// and DescriptorProto.field has field number 2:
+	//   repeated FieldDescriptorProto field = 2;
+	// and FieldDescriptorProto.name has field number 1:
+	//   optional string name = 1;
+	//
+	// Thus, the above path gives the location of a field name.  If we removed
+	// the last element:
+	//   [ 4, 3, 2, 7 ]
+	// this path refers to the whole field declaration (from the beginning
+	// of the label to the terminating semicolon).
+	Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
+	// Always has exactly three or four elements: start line, start column,
+	// end line (optional, otherwise assumed same as start line), end column.
+	// These are packed into a single field for efficiency.  Note that line
+	// and column numbers are zero-based -- typically you will want to add
+	// 1 to each before displaying to a user.
+	Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"`
+	// If this SourceCodeInfo represents a complete declaration, these are any
+	// comments appearing before and after the declaration which appear to be
+	// attached to the declaration.
+	//
+	// A series of line comments appearing on consecutive lines, with no other
+	// tokens appearing on those lines, will be treated as a single comment.
+	//
+	// leading_detached_comments will keep paragraphs of comments that appear
+	// before (but not connected to) the current element. Each paragraph,
+	// separated by empty lines, will be one comment element in the repeated
+	// field.
+	//
+	// Only the comment content is provided; comment markers (e.g. //) are
+	// stripped out.  For block comments, leading whitespace and an asterisk
+	// will be stripped from the beginning of each line other than the first.
+	// Newlines are included in the output.
+	//
+	// Examples:
+	//
+	//   optional int32 foo = 1;  // Comment attached to foo.
+	//   // Comment attached to bar.
+	//   optional int32 bar = 2;
+	//
+	//   optional string baz = 3;
+	//   // Comment attached to baz.
+	//   // Another line attached to baz.
+	//
+	//   // Comment attached to qux.
+	//   //
+	//   // Another line attached to qux.
+	//   optional double qux = 4;
+	//
+	//   // Detached comment for corge. This is not leading or trailing comments
+	//   // to qux or corge because there are blank lines separating it from
+	//   // both.
+	//
+	//   // Detached comment for corge paragraph 2.
+	//
+	//   optional string corge = 5;
+	//   /* Block comment attached
+	//    * to corge.  Leading asterisks
+	//    * will be removed. */
+	//   /* Block comment attached to
+	//    * grault. */
+	//   optional int32 grault = 6;
+	//
+	//   // ignored detached comments.
+	LeadingComments         *string  `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"`
+	TrailingComments        *string  `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"`
+	LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"`
+	XXX_unrecognized        []byte   `json:"-"`
+}
+
+func (m *SourceCodeInfo_Location) Reset()                    { *m = SourceCodeInfo_Location{} }
+func (m *SourceCodeInfo_Location) String() string            { return proto.CompactTextString(m) }
+func (*SourceCodeInfo_Location) ProtoMessage()               {}
+func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{18, 0} }
+
+func (m *SourceCodeInfo_Location) GetPath() []int32 {
+	if m != nil {
+		return m.Path
+	}
+	return nil
+}
+
+func (m *SourceCodeInfo_Location) GetSpan() []int32 {
+	if m != nil {
+		return m.Span
+	}
+	return nil
+}
+
+func (m *SourceCodeInfo_Location) GetLeadingComments() string {
+	if m != nil && m.LeadingComments != nil {
+		return *m.LeadingComments
+	}
+	return ""
+}
+
+func (m *SourceCodeInfo_Location) GetTrailingComments() string {
+	if m != nil && m.TrailingComments != nil {
+		return *m.TrailingComments
+	}
+	return ""
+}
+
+func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string {
+	if m != nil {
+		return m.LeadingDetachedComments
+	}
+	return nil
+}
+
+// Describes the relationship between generated code and its original source
+// file. A GeneratedCodeInfo message is associated with only one generated
+// source file, but may contain references to different source .proto files.
+type GeneratedCodeInfo struct {
+	// An Annotation connects some span of text in generated code to an element
+	// of its generating .proto file.
+	Annotation       []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"`
+	XXX_unrecognized []byte                          `json:"-"`
+}
+
+func (m *GeneratedCodeInfo) Reset()                    { *m = GeneratedCodeInfo{} }
+func (m *GeneratedCodeInfo) String() string            { return proto.CompactTextString(m) }
+func (*GeneratedCodeInfo) ProtoMessage()               {}
+func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{19} }
+
+func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation {
+	if m != nil {
+		return m.Annotation
+	}
+	return nil
+}
+
+type GeneratedCodeInfo_Annotation struct {
+	// Identifies the element in the original source .proto file. This field
+	// is formatted the same as SourceCodeInfo.Location.path.
+	Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
+	// Identifies the filesystem path to the original source .proto.
+	SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"`
+	// Identifies the starting offset in bytes in the generated code
+	// that relates to the identified object.
+	Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"`
+	// Identifies the ending offset in bytes in the generated code that
+	// relates to the identified offset. The end offset should be one past
+	// the last relevant byte (so the length of the text = end - begin).
+	End              *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GeneratedCodeInfo_Annotation) Reset()         { *m = GeneratedCodeInfo_Annotation{} }
+func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) }
+func (*GeneratedCodeInfo_Annotation) ProtoMessage()    {}
+func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) {
+	return fileDescriptor1, []int{19, 0}
+}
+
+func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 {
+	if m != nil {
+		return m.Path
+	}
+	return nil
+}
+
+func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string {
+	if m != nil && m.SourceFile != nil {
+		return *m.SourceFile
+	}
+	return ""
+}
+
+func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 {
+	if m != nil && m.Begin != nil {
+		return *m.Begin
+	}
+	return 0
+}
+
+func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 {
+	if m != nil && m.End != nil {
+		return *m.End
+	}
+	return 0
+}
+
+func init() {
+	proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet")
+	proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto")
+	proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto")
+	proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange")
+	proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange")
+	proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto")
+	proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto")
+	proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto")
+	proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto")
+	proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto")
+	proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto")
+	proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions")
+	proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions")
+	proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions")
+	proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions")
+	proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions")
+	proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions")
+	proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions")
+	proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions")
+	proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption")
+	proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart")
+	proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo")
+	proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location")
+	proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo")
+	proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation")
+	proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value)
+	proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value)
+	proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value)
+	proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value)
+	proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value)
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/protobuf/descriptor.proto", fileDescriptor1)
+}
+
+var fileDescriptor1 = []byte{
+	// 2282 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x59, 0xcd, 0x8f, 0xdb, 0xc6,
+	0x15, 0x2f, 0xf5, 0xb5, 0xd2, 0xd3, 0xae, 0xc4, 0x9d, 0xdd, 0xd8, 0xf2, 0x26, 0x8e, 0x63, 0xc5,
+	0x8e, 0x1d, 0xa7, 0xd5, 0x06, 0x6e, 0x3e, 0xdc, 0x4d, 0x91, 0x42, 0x2b, 0xd1, 0x1b, 0x19, 0xd2,
+	0x4a, 0xa5, 0xa4, 0x36, 0x49, 0x0f, 0x04, 0x97, 0x1a, 0x69, 0x69, 0x53, 0xa4, 0x4a, 0x52, 0xb6,
+	0x37, 0xa7, 0x02, 0x3d, 0xf5, 0xd8, 0x5b, 0xd1, 0x16, 0x45, 0x91, 0x4b, 0x80, 0xfe, 0x01, 0x3d,
+	0xf4, 0xde, 0x6b, 0x81, 0xde, 0x7b, 0x2c, 0xd0, 0xfe, 0x07, 0xbd, 0xf6, 0xcd, 0x0c, 0x49, 0x91,
+	0xfa, 0x88, 0xb7, 0x01, 0x92, 0xd4, 0x07, 0x5b, 0xf3, 0xe6, 0xf7, 0xde, 0xbc, 0x79, 0xf3, 0x9b,
+	0xf7, 0x1e, 0xc7, 0xf0, 0xce, 0xc4, 0x71, 0x26, 0x16, 0xad, 0x4d, 0x1c, 0x4b, 0xb7, 0x27, 0x35,
+	0xc7, 0x9d, 0x1c, 0x4e, 0xa8, 0x3d, 0x73, 0x1d, 0xdf, 0x39, 0xe4, 0x7f, 0x9f, 0xcd, 0xc7, 0x87,
+	0x23, 0xea, 0x19, 0xae, 0x39, 0xf3, 0x1d, 0xb7, 0xc6, 0x65, 0xa4, 0x1c, 0x68, 0x85, 0x88, 0x6a,
+	0x07, 0x76, 0x1f, 0x9a, 0x16, 0x6d, 0x46, 0xc0, 0x3e, 0xf5, 0xc9, 0x03, 0xc8, 0x8c, 0x51, 0x58,
+	0x91, 0x5e, 0x4b, 0xdf, 0x2d, 0xde, 0xbf, 0x55, 0x5b, 0x52, 0xaa, 0x25, 0x35, 0x7a, 0x4c, 0xac,
+	0x72, 0x8d, 0xea, 0x3f, 0x33, 0xb0, 0xb7, 0x66, 0x96, 0x10, 0xc8, 0xd8, 0xfa, 0x94, 0x59, 0x94,
+	0xee, 0x16, 0x54, 0xfe, 0x9b, 0x54, 0x60, 0x6b, 0xa6, 0x1b, 0x4f, 0xf4, 0x09, 0xad, 0xa4, 0xb8,
+	0x38, 0x1c, 0x92, 0x57, 0x01, 0x46, 0x74, 0x46, 0xed, 0x11, 0xb5, 0x8d, 0x8b, 0x4a, 0x1a, 0xbd,
+	0x28, 0xa8, 0x31, 0x09, 0x79, 0x0b, 0x76, 0x67, 0xf3, 0x33, 0xcb, 0x34, 0xb4, 0x18, 0x0c, 0x10,
+	0x96, 0x55, 0x65, 0x31, 0xd1, 0x5c, 0x80, 0xef, 0x40, 0xf9, 0x19, 0xd5, 0x9f, 0xc4, 0xa1, 0x45,
+	0x0e, 0x2d, 0x31, 0x71, 0x0c, 0xd8, 0x80, 0xed, 0x29, 0xf5, 0x3c, 0x74, 0x40, 0xf3, 0x2f, 0x66,
+	0xb4, 0x92, 0xe1, 0xbb, 0x7f, 0x6d, 0x65, 0xf7, 0xcb, 0x3b, 0x2f, 0x06, 0x5a, 0x03, 0x54, 0x22,
+	0x75, 0x28, 0x50, 0x7b, 0x3e, 0x15, 0x16, 0xb2, 0x1b, 0xe2, 0xa7, 0x20, 0x62, 0xd9, 0x4a, 0x9e,
+	0xa9, 0x05, 0x26, 0xb6, 0x3c, 0xea, 0x3e, 0x35, 0x0d, 0x5a, 0xc9, 0x71, 0x03, 0x77, 0x56, 0x0c,
+	0xf4, 0xc5, 0xfc, 0xb2, 0x8d, 0x50, 0x0f, 0xb7, 0x52, 0xa0, 0xcf, 0x7d, 0x6a, 0x7b, 0xa6, 0x63,
+	0x57, 0xb6, 0xb8, 0x91, 0xdb, 0x6b, 0x4e, 0x91, 0x5a, 0xa3, 0x65, 0x13, 0x0b, 0x3d, 0xf2, 0x1e,
+	0x6c, 0x39, 0x33, 0x1f, 0x7f, 0x79, 0x95, 0x3c, 0x9e, 0x4f, 0xf1, 0xfe, 0x2b, 0x6b, 0x89, 0xd0,
+	0x15, 0x18, 0x35, 0x04, 0x93, 0x16, 0xc8, 0x9e, 0x33, 0x77, 0x0d, 0xaa, 0x19, 0xce, 0x88, 0x6a,
+	0xa6, 0x3d, 0x76, 0x2a, 0x05, 0x6e, 0xe0, 0xc6, 0xea, 0x46, 0x38, 0xb0, 0x81, 0xb8, 0x16, 0xc2,
+	0xd4, 0x92, 0x97, 0x18, 0x93, 0x2b, 0x90, 0xf3, 0x2e, 0x6c, 0x5f, 0x7f, 0x5e, 0xd9, 0xe6, 0x0c,
+	0x09, 0x46, 0xd5, 0xff, 0x64, 0xa1, 0x7c, 0x19, 0x8a, 0x7d, 0x00, 0xd9, 0x31, 0xdb, 0x25, 0x12,
+	0xec, 0x7f, 0x88, 0x81, 0xd0, 0x49, 0x06, 0x31, 0xf7, 0x15, 0x83, 0x58, 0x87, 0xa2, 0x4d, 0x3d,
+	0x9f, 0x8e, 0x04, 0x23, 0xd2, 0x97, 0xe4, 0x14, 0x08, 0xa5, 0x55, 0x4a, 0x65, 0xbe, 0x12, 0xa5,
+	0x3e, 0x86, 0x72, 0xe4, 0x92, 0xe6, 0x62, 0xbe, 0x08, 0xb9, 0x79, 0xf8, 0x22, 0x4f, 0x6a, 0x4a,
+	0xa8, 0xa7, 0x32, 0x35, 0xb5, 0x44, 0x13, 0x63, 0xd2, 0x04, 0x70, 0x6c, 0xea, 0x8c, 0xf1, 0x7a,
+	0x19, 0x16, 0xf2, 0x64, 0x7d, 0x94, 0xba, 0x0c, 0xb2, 0x12, 0x25, 0x47, 0x48, 0x0d, 0x8b, 0xfc,
+	0x60, 0x41, 0xb5, 0xad, 0x0d, 0x4c, 0xe9, 0x88, 0x4b, 0xb6, 0xc2, 0xb6, 0x21, 0x94, 0x5c, 0xca,
+	0x78, 0x8f, 0x21, 0x16, 0x3b, 0x2b, 0x70, 0x27, 0x6a, 0x2f, 0xdc, 0x99, 0x1a, 0xa8, 0x89, 0x8d,
+	0xed, 0xb8, 0xf1, 0x21, 0x79, 0x1d, 0x22, 0x81, 0xc6, 0x69, 0x05, 0x3c, 0x0b, 0x6d, 0x87, 0xc2,
+	0x53, 0x94, 0x1d, 0x3c, 0x80, 0x52, 0x32, 0x3c, 0x64, 0x1f, 0xb2, 0x9e, 0xaf, 0xbb, 0x3e, 0x67,
+	0x61, 0x56, 0x15, 0x03, 0x22, 0x43, 0x1a, 0x93, 0x0c, 0xcf, 0x72, 0x59, 0x95, 0xfd, 0x3c, 0x78,
+	0x1f, 0x76, 0x12, 0xcb, 0x5f, 0x56, 0xb1, 0xfa, 0x9b, 0x1c, 0xec, 0xaf, 0xe3, 0xdc, 0x5a, 0xfa,
+	0xe3, 0xf5, 0x41, 0x06, 0x9c, 0x51, 0x17, 0x79, 0xc7, 0x2c, 0x04, 0x23, 0x64, 0x54, 0xd6, 0xd2,
+	0xcf, 0xa8, 0x85, 0x6c, 0x92, 0xee, 0x96, 0xee, 0xbf, 0x75, 0x29, 0x56, 0xd7, 0xda, 0x4c, 0x45,
+	0x15, 0x9a, 0xe4, 0x43, 0xc8, 0x04, 0x29, 0x8e, 0x59, 0xb8, 0x77, 0x39, 0x0b, 0x8c, 0x8b, 0x2a,
+	0xd7, 0x23, 0x2f, 0x43, 0x81, 0xfd, 0x2b, 0x62, 0x9b, 0xe3, 0x3e, 0xe7, 0x99, 0x80, 0xc5, 0x95,
+	0x1c, 0x40, 0x9e, 0xd3, 0x6c, 0x44, 0xc3, 0xd2, 0x10, 0x8d, 0xd9, 0xc1, 0x8c, 0xe8, 0x58, 0x9f,
+	0x5b, 0xbe, 0xf6, 0x54, 0xb7, 0xe6, 0x94, 0x13, 0x06, 0x0f, 0x26, 0x10, 0xfe, 0x84, 0xc9, 0xc8,
+	0x0d, 0x28, 0x0a, 0x56, 0x9a, 0xa8, 0xf3, 0x9c, 0x67, 0x9f, 0xac, 0x2a, 0x88, 0xda, 0x62, 0x12,
+	0xb6, 0xfc, 0x63, 0x0f, 0xef, 0x42, 0x70, 0xb4, 0x7c, 0x09, 0x26, 0xe0, 0xcb, 0xbf, 0xbf, 0x9c,
+	0xf8, 0xae, 0xaf, 0xdf, 0xde, 0x32, 0x17, 0xab, 0x7f, 0x4e, 0x41, 0x86, 0xdf, 0xb7, 0x32, 0x14,
+	0x07, 0x9f, 0xf4, 0x14, 0xad, 0xd9, 0x1d, 0x1e, 0xb7, 0x15, 0x59, 0x22, 0x25, 0x00, 0x2e, 0x78,
+	0xd8, 0xee, 0xd6, 0x07, 0x72, 0x2a, 0x1a, 0xb7, 0x4e, 0x07, 0xef, 0xbd, 0x23, 0xa7, 0x23, 0x85,
+	0xa1, 0x10, 0x64, 0xe2, 0x80, 0xef, 0xdf, 0x97, 0xb3, 0xc8, 0x84, 0x6d, 0x61, 0xa0, 0xf5, 0xb1,
+	0xd2, 0x44, 0x44, 0x2e, 0x29, 0x41, 0xcc, 0x16, 0xd9, 0x81, 0x02, 0x97, 0x1c, 0x77, 0xbb, 0x6d,
+	0x39, 0x1f, 0xd9, 0xec, 0x0f, 0xd4, 0xd6, 0xe9, 0x89, 0x5c, 0x88, 0x6c, 0x9e, 0xa8, 0xdd, 0x61,
+	0x4f, 0x86, 0xc8, 0x42, 0x47, 0xe9, 0xf7, 0xeb, 0x27, 0x8a, 0x5c, 0x8c, 0x10, 0xc7, 0x9f, 0x0c,
+	0x94, 0xbe, 0xbc, 0x9d, 0x70, 0x0b, 0x97, 0xd8, 0x89, 0x96, 0x50, 0x4e, 0x87, 0x1d, 0xb9, 0x44,
+	0x76, 0x61, 0x47, 0x2c, 0x11, 0x3a, 0x51, 0x5e, 0x12, 0xa1, 0xa7, 0xf2, 0xc2, 0x11, 0x61, 0x65,
+	0x37, 0x21, 0x40, 0x04, 0xa9, 0x36, 0x20, 0xcb, 0xd9, 0x85, 0x2c, 0x2e, 0xb5, 0xeb, 0xc7, 0x4a,
+	0x5b, 0xeb, 0xf6, 0x06, 0xad, 0xee, 0x69, 0xbd, 0x8d, 0xb1, 0x8b, 0x64, 0xaa, 0xf2, 0xe3, 0x61,
+	0x4b, 0x55, 0x9a, 0x18, 0xbf, 0x98, 0xac, 0xa7, 0xd4, 0x07, 0x28, 0x4b, 0x57, 0x0d, 0xd8, 0x5f,
+	0x97, 0x67, 0xd6, 0xde, 0x8c, 0xd8, 0x11, 0xa7, 0x36, 0x1c, 0x31, 0xb7, 0xb5, 0x72, 0xc4, 0x9f,
+	0x4b, 0xb0, 0xb7, 0x26, 0xd7, 0xae, 0x5d, 0xe4, 0x47, 0x90, 0x15, 0x14, 0x15, 0xd5, 0xe7, 0xcd,
+	0xb5, 0x49, 0x9b, 0x13, 0x76, 0xa5, 0x02, 0x71, 0xbd, 0x78, 0x05, 0x4e, 0x6f, 0xa8, 0xc0, 0xcc,
+	0xc4, 0x8a, 0x93, 0xbf, 0x94, 0xa0, 0xb2, 0xc9, 0xf6, 0x0b, 0x12, 0x45, 0x2a, 0x91, 0x28, 0x3e,
+	0x58, 0x76, 0xe0, 0xe6, 0xe6, 0x3d, 0xac, 0x78, 0xf1, 0x85, 0x04, 0x57, 0xd6, 0x37, 0x2a, 0x6b,
+	0x7d, 0xf8, 0x10, 0x72, 0x53, 0xea, 0x9f, 0x3b, 0x61, 0xb1, 0x7e, 0x63, 0x4d, 0x09, 0x60, 0xd3,
+	0xcb, 0xb1, 0x0a, 0xb4, 0xe2, 0x35, 0x24, 0xbd, 0xa9, 0xdb, 0x10, 0xde, 0xac, 0x78, 0xfa, 0xab,
+	0x14, 0xbc, 0xb4, 0xd6, 0xf8, 0x5a, 0x47, 0xaf, 0x03, 0x98, 0xf6, 0x6c, 0xee, 0x8b, 0x82, 0x2c,
+	0xf2, 0x53, 0x81, 0x4b, 0xf8, 0xdd, 0x67, 0xb9, 0x67, 0xee, 0x47, 0xf3, 0x69, 0x3e, 0x0f, 0x42,
+	0xc4, 0x01, 0x0f, 0x16, 0x8e, 0x66, 0xb8, 0xa3, 0xaf, 0x6e, 0xd8, 0xe9, 0x4a, 0xad, 0x7b, 0x1b,
+	0x64, 0xc3, 0x32, 0xa9, 0xed, 0x6b, 0x9e, 0xef, 0x52, 0x7d, 0x6a, 0xda, 0x13, 0x9e, 0x80, 0xf3,
+	0x47, 0xd9, 0xb1, 0x6e, 0x79, 0x54, 0x2d, 0x8b, 0xe9, 0x7e, 0x38, 0xcb, 0x34, 0x78, 0x95, 0x71,
+	0x63, 0x1a, 0xb9, 0x84, 0x86, 0x98, 0x8e, 0x34, 0xaa, 0xbf, 0xde, 0x82, 0x62, 0xac, 0xad, 0x23,
+	0x37, 0x61, 0xfb, 0xb1, 0xfe, 0x54, 0xd7, 0xc2, 0x56, 0x5d, 0x44, 0xa2, 0xc8, 0x64, 0xbd, 0xa0,
+	0x5d, 0x7f, 0x1b, 0xf6, 0x39, 0x04, 0xf7, 0x88, 0x0b, 0x19, 0x96, 0xee, 0x79, 0x3c, 0x68, 0x79,
+	0x0e, 0x25, 0x6c, 0xae, 0xcb, 0xa6, 0x1a, 0xe1, 0x0c, 0x79, 0x17, 0xf6, 0xb8, 0xc6, 0x14, 0x33,
+	0xb6, 0x39, 0xb3, 0xa8, 0xc6, 0x3e, 0x1e, 0x3c, 0x9e, 0x88, 0x23, 0xcf, 0x76, 0x19, 0xa2, 0x13,
+	0x00, 0x98, 0x47, 0x1e, 0x39, 0x81, 0xeb, 0x5c, 0x0d, 0x3f, 0x74, 0xa8, 0xab, 0xfb, 0x54, 0xa3,
+	0x3f, 0x9f, 0x23, 0x56, 0xd3, 0xed, 0x91, 0x76, 0xae, 0x7b, 0xe7, 0x95, 0xfd, 0xb8, 0x81, 0x6b,
+	0x0c, 0x7b, 0x12, 0x40, 0x15, 0x8e, 0xac, 0xdb, 0xa3, 0x8f, 0x10, 0x47, 0x8e, 0xe0, 0x0a, 0x37,
+	0x84, 0x41, 0xc1, 0x3d, 0x6b, 0xc6, 0x39, 0x35, 0x9e, 0x68, 0x73, 0x7f, 0xfc, 0xa0, 0xf2, 0x72,
+	0xdc, 0x02, 0x77, 0xb2, 0xcf, 0x31, 0x0d, 0x06, 0x19, 0x22, 0x82, 0xf4, 0x61, 0x9b, 0x9d, 0xc7,
+	0xd4, 0xfc, 0x0c, 0xdd, 0x76, 0x5c, 0x5e, 0x5c, 0x4a, 0x6b, 0x2e, 0x77, 0x2c, 0x88, 0xb5, 0x6e,
+	0xa0, 0xd0, 0xc1, 0xc6, 0xf6, 0x28, 0xdb, 0xef, 0x29, 0x4a, 0x53, 0x2d, 0x86, 0x56, 0x1e, 0x3a,
+	0x2e, 0xe3, 0xd4, 0xc4, 0x89, 0x62, 0x5c, 0x14, 0x9c, 0x9a, 0x38, 0x61, 0x84, 0x31, 0x5e, 0x86,
+	0x21, 0xb6, 0x8d, 0x1f, 0x3d, 0x41, 0x97, 0xef, 0x55, 0xe4, 0x44, 0xbc, 0x0c, 0xe3, 0x44, 0x00,
+	0x02, 0x9a, 0x7b, 0x78, 0x25, 0x5e, 0x5a, 0xc4, 0x2b, 0xae, 0xb8, 0xbb, 0xb2, 0xcb, 0x65, 0x55,
+	0x5c, 0x71, 0x76, 0xb1, 0xaa, 0x48, 0x12, 0x2b, 0xce, 0x2e, 0x96, 0xd5, 0x6e, 0xf3, 0x2f, 0x37,
+	0x97, 0x1a, 0x18, 0xf2, 0x51, 0xe5, 0x6a, 0x1c, 0x1d, 0x9b, 0x20, 0x87, 0x48, 0x64, 0x43, 0xa3,
+	0xb6, 0x7e, 0x86, 0x67, 0xaf, 0xbb, 0xf8, 0xc3, 0xab, 0xdc, 0x88, 0x83, 0x4b, 0x86, 0xa1, 0xf0,
+	0xd9, 0x3a, 0x9f, 0x24, 0xf7, 0x60, 0xd7, 0x39, 0x7b, 0x6c, 0x08, 0x72, 0x69, 0x68, 0x67, 0x6c,
+	0x3e, 0xaf, 0xdc, 0xe2, 0x61, 0x2a, 0xb3, 0x09, 0x4e, 0xad, 0x1e, 0x17, 0x93, 0x37, 0xd1, 0xb8,
+	0x77, 0xae, 0xbb, 0x33, 0x5e, 0xdd, 0x3d, 0x0c, 0x2a, 0xad, 0xdc, 0x16, 0x50, 0x21, 0x3f, 0x0d,
+	0xc5, 0xd8, 0x17, 0xef, 0xcf, 0x6d, 0xd3, 0x46, 0x6e, 0xa2, 0x49, 0xd6, 0xa4, 0x8b, 0x9b, 0x56,
+	0xf9, 0xd7, 0xd6, 0x86, 0x36, 0x7b, 0x18, 0x47, 0x8b, 0xd3, 0x55, 0xf7, 0xe6, 0xab, 0xc2, 0xea,
+	0x11, 0x6c, 0xc7, 0x0f, 0x9d, 0x14, 0x40, 0x1c, 0x3b, 0xd6, 0x33, 0xac, 0xa1, 0x8d, 0x6e, 0x93,
+	0x55, 0xbf, 0x4f, 0x15, 0x2c, 0x65, 0x58, 0x85, 0xdb, 0xad, 0x81, 0xa2, 0xa9, 0xc3, 0xd3, 0x41,
+	0xab, 0xa3, 0xc8, 0xe9, 0x7b, 0x85, 0xfc, 0xbf, 0xb7, 0xe4, 0x5f, 0xe0, 0x9f, 0xd4, 0xa3, 0x4c,
+	0xfe, 0x0d, 0xf9, 0x4e, 0xf5, 0xaf, 0x29, 0x28, 0x25, 0xfb, 0x5f, 0xf2, 0x43, 0xb8, 0x1a, 0x7e,
+	0xac, 0x7a, 0xd4, 0xd7, 0x9e, 0x99, 0x2e, 0x67, 0xe3, 0x54, 0x17, 0x1d, 0x64, 0x14, 0xc8, 0xfd,
+	0x00, 0x85, 0x9f, 0xf5, 0x3f, 0x45, 0xcc, 0x43, 0x0e, 0x21, 0x6d, 0xb8, 0x61, 0x3b, 0xc8, 0x7e,
+	0xbc, 0x38, 0xba, 0x3b, 0xd2, 0x16, 0xcf, 0x04, 0x9a, 0x6e, 0xe0, 0x31, 0x7a, 0x8e, 0x28, 0x04,
+	0x91, 0x95, 0x57, 0x6c, 0xa7, 0x1f, 0x80, 0x17, 0x19, 0xb2, 0x1e, 0x40, 0x97, 0x0e, 0x3d, 0xbd,
+	0xe9, 0xd0, 0xb1, 0xe7, 0x9a, 0xea, 0x33, 0x3c, 0x75, 0xdf, 0xbd, 0xe0, 0x5d, 0x5b, 0x5e, 0xcd,
+	0xa3, 0x40, 0x61, 0xe3, 0xaf, 0xef, 0x24, 0x62, 0xd1, 0xac, 0xfe, 0x23, 0x0d, 0xdb, 0xf1, 0xce,
+	0x8d, 0x35, 0xc2, 0x06, 0xcf, 0xd2, 0x12, 0xbf, 0xc4, 0xaf, 0x7f, 0x69, 0x9f, 0x57, 0x6b, 0xb0,
+	0xf4, 0x7d, 0x94, 0x13, 0xfd, 0x94, 0x2a, 0x34, 0x59, 0xe9, 0x64, 0xd7, 0x96, 0x8a, 0x2e, 0x3d,
+	0xaf, 0x06, 0x23, 0xcc, 0x55, 0xb9, 0xc7, 0x1e, 0xb7, 0x9d, 0xe3, 0xb6, 0x6f, 0x7d, 0xb9, 0xed,
+	0x47, 0x7d, 0x6e, 0xbc, 0xf0, 0xa8, 0xaf, 0x9d, 0x76, 0xd5, 0x4e, 0xbd, 0xad, 0x06, 0xea, 0xe4,
+	0x1a, 0x64, 0x2c, 0xfd, 0xb3, 0x8b, 0x64, 0xa2, 0xe7, 0xa2, 0xcb, 0x06, 0x1e, 0x2d, 0xb0, 0xa7,
+	0x8e, 0x64, 0x7a, 0xe5, 0xa2, 0xaf, 0xf1, 0x02, 0x1c, 0x42, 0x96, 0xc7, 0x8b, 0x00, 0x04, 0x11,
+	0x93, 0xbf, 0x43, 0xf2, 0x90, 0x69, 0x74, 0x55, 0x76, 0x09, 0x90, 0xf5, 0x42, 0xaa, 0xf5, 0x5a,
+	0x4a, 0x03, 0xef, 0x41, 0xf5, 0x5d, 0xc8, 0x89, 0x20, 0xb0, 0x0b, 0x12, 0x85, 0x01, 0x95, 0xc4,
+	0x30, 0xb0, 0x21, 0x85, 0xb3, 0xc3, 0xce, 0xb1, 0xa2, 0xca, 0xa9, 0xf8, 0xf1, 0x7a, 0x78, 0xe7,
+	0x62, 0x4d, 0xdb, 0x37, 0xc3, 0xa9, 0xbf, 0x48, 0x50, 0x8c, 0x35, 0x61, 0xac, 0xfc, 0xeb, 0x96,
+	0xe5, 0x3c, 0xd3, 0x74, 0xcb, 0xd4, 0xbd, 0x80, 0x14, 0xc0, 0x45, 0x75, 0x26, 0xb9, 0xec, 0xa1,
+	0x7d, 0x23, 0xce, 0xff, 0x41, 0x02, 0x79, 0xb9, 0x81, 0x5b, 0x72, 0x50, 0xfa, 0x56, 0x1d, 0xfc,
+	0xbd, 0x04, 0xa5, 0x64, 0xd7, 0xb6, 0xe4, 0xde, 0xcd, 0x6f, 0xd5, 0xbd, 0xdf, 0x49, 0xb0, 0x93,
+	0xe8, 0xd5, 0xfe, 0xaf, 0xbc, 0xfb, 0x6d, 0x1a, 0xf6, 0xd6, 0xe8, 0x61, 0xd6, 0x13, 0x4d, 0xad,
+	0xe8, 0xb3, 0xbf, 0x77, 0x99, 0xb5, 0x6a, 0xac, 0x66, 0xf6, 0x74, 0xd7, 0x0f, 0x7a, 0x60, 0xac,
+	0xb1, 0xe6, 0x08, 0x33, 0xb9, 0x39, 0x36, 0xb1, 0xe5, 0x13, 0x5f, 0x39, 0xa2, 0xd3, 0x2d, 0x2f,
+	0xe4, 0xe2, 0x5b, 0xfc, 0xbb, 0x40, 0x66, 0x8e, 0x67, 0xfa, 0xe6, 0x53, 0xf6, 0x16, 0x18, 0x7e,
+	0xb5, 0xb3, 0xce, 0x37, 0xa3, 0xca, 0xe1, 0x4c, 0xcb, 0xf6, 0x23, 0xb4, 0x4d, 0x27, 0xfa, 0x12,
+	0x9a, 0xe5, 0xbe, 0xb4, 0x2a, 0x87, 0x33, 0x11, 0x1a, 0x9b, 0xd3, 0x91, 0x33, 0x67, 0x4d, 0x84,
+	0xc0, 0xb1, 0x54, 0x2b, 0xa9, 0x45, 0x21, 0x8b, 0x20, 0x41, 0x97, 0xb7, 0x78, 0x2e, 0xd8, 0x56,
+	0x8b, 0x42, 0x26, 0x20, 0x77, 0xa0, 0xac, 0x4f, 0x26, 0x2e, 0x33, 0x1e, 0x1a, 0x12, 0xad, 0x6b,
+	0x29, 0x12, 0x73, 0xe0, 0xc1, 0x23, 0xc8, 0x87, 0x71, 0x60, 0xd5, 0x8c, 0x45, 0x02, 0x7b, 0x36,
+	0xfe, 0x68, 0x93, 0x62, 0x2f, 0x08, 0x76, 0x38, 0x89, 0x8b, 0x9a, 0x9e, 0xb6, 0x78, 0x3d, 0x4c,
+	0xe1, 0x7c, 0x5e, 0x2d, 0x9a, 0x5e, 0xf4, 0x5c, 0x54, 0xfd, 0x02, 0x6b, 0x7a, 0xf2, 0xf5, 0x93,
+	0x34, 0x21, 0x6f, 0x39, 0xc8, 0x0f, 0xa6, 0x21, 0x9e, 0xde, 0xef, 0xbe, 0xe0, 0xc1, 0xb4, 0xd6,
+	0x0e, 0xf0, 0x6a, 0xa4, 0x79, 0xf0, 0x37, 0x09, 0xf2, 0xa1, 0x18, 0xab, 0x53, 0x66, 0xa6, 0xfb,
+	0xe7, 0xdc, 0x5c, 0xf6, 0x38, 0x25, 0x4b, 0x2a, 0x1f, 0x33, 0x39, 0x76, 0x40, 0x36, 0xa7, 0x40,
+	0x20, 0x67, 0x63, 0x76, 0xae, 0x16, 0xd5, 0x47, 0xbc, 0x29, 0x76, 0xa6, 0x53, 0x3c, 0x49, 0x2f,
+	0x3c, 0xd7, 0x40, 0xde, 0x08, 0xc4, 0xec, 0x11, 0xde, 0x77, 0x75, 0xd3, 0x4a, 0x60, 0x33, 0x1c,
+	0x2b, 0x87, 0x13, 0x11, 0xf8, 0x08, 0xae, 0x85, 0x76, 0x47, 0xd4, 0xd7, 0xb1, 0xe1, 0x1e, 0x2d,
+	0x94, 0x72, 0xfc, 0x69, 0xed, 0x6a, 0x00, 0x68, 0x06, 0xf3, 0xa1, 0x6e, 0xf5, 0xef, 0x12, 0xec,
+	0x86, 0x6d, 0xfc, 0x28, 0x0a, 0x56, 0x07, 0x40, 0xb7, 0x6d, 0xc7, 0x8f, 0x87, 0x6b, 0x95, 0xca,
+	0x2b, 0x7a, 0xb5, 0x7a, 0xa4, 0xa4, 0xc6, 0x0c, 0x1c, 0x4c, 0x01, 0x16, 0x33, 0x1b, 0xc3, 0x86,
+	0xc9, 0x3d, 0x78, 0xda, 0xe6, 0xff, 0x3f, 0x22, 0xbe, 0xfd, 0x40, 0x88, 0x58, 0xbf, 0xcf, 0x9e,
+	0xf1, 0xce, 0xe8, 0xc4, 0xb4, 0x83, 0x07, 0x37, 0x31, 0x08, 0x9f, 0xf1, 0x32, 0xd1, 0x33, 0xde,
+	0xf1, 0xcf, 0xb0, 0xa1, 0x77, 0xa6, 0xcb, 0xee, 0x1e, 0xcb, 0x4b, 0xdf, 0x9f, 0xde, 0x47, 0xd2,
+	0xa7, 0xb0, 0xe8, 0xce, 0xfe, 0x28, 0x49, 0x9f, 0xa7, 0xd2, 0x27, 0xbd, 0xe3, 0x3f, 0xa5, 0x0e,
+	0x4e, 0x84, 0x6a, 0x2f, 0xdc, 0xa9, 0x4a, 0xc7, 0x16, 0x35, 0x98, 0xf7, 0xff, 0x0d, 0x00, 0x00,
+	0xff, 0xff, 0x4c, 0x8f, 0xed, 0xda, 0x1b, 0x1a, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/protobuf/descriptor.proto b/vendor/google.golang.org/genproto/protobuf/descriptor.proto
new file mode 100644
index 0000000..28410d4
--- /dev/null
+++ b/vendor/google.golang.org/genproto/protobuf/descriptor.proto
@@ -0,0 +1,813 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+//  Based on original Protocol Buffers design by
+//  Sanjay Ghemawat, Jeff Dean, and others.
+//
+// The messages in this file describe the definitions found in .proto files.
+// A valid .proto file can be translated directly to a FileDescriptorProto
+// without any other information (e.g. without reading its imports).
+
+
+syntax = "proto2";
+
+package google.protobuf;
+option go_package = "descriptor";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "DescriptorProtos";
+option csharp_namespace = "Google.Protobuf.Reflection";
+option objc_class_prefix = "GPB";
+option java_generate_equals_and_hash = true;
+
+// descriptor.proto must be optimized for speed because reflection-based
+// algorithms don't work during bootstrapping.
+option optimize_for = SPEED;
+
+// The protocol compiler can output a FileDescriptorSet containing the .proto
+// files it parses.
+message FileDescriptorSet {
+  repeated FileDescriptorProto file = 1;
+}
+
+// Describes a complete .proto file.
+message FileDescriptorProto {
+  optional string name = 1;       // file name, relative to root of source tree
+  optional string package = 2;    // e.g. "foo", "foo.bar", etc.
+
+  // Names of files imported by this file.
+  repeated string dependency = 3;
+  // Indexes of the public imported files in the dependency list above.
+  repeated int32 public_dependency = 10;
+  // Indexes of the weak imported files in the dependency list.
+  // For Google-internal migration only. Do not use.
+  repeated int32 weak_dependency = 11;
+
+  // All top-level definitions in this file.
+  repeated DescriptorProto message_type = 4;
+  repeated EnumDescriptorProto enum_type = 5;
+  repeated ServiceDescriptorProto service = 6;
+  repeated FieldDescriptorProto extension = 7;
+
+  optional FileOptions options = 8;
+
+  // This field contains optional information about the original source code.
+  // You may safely remove this entire field without harming runtime
+  // functionality of the descriptors -- the information is needed only by
+  // development tools.
+  optional SourceCodeInfo source_code_info = 9;
+
+  // The syntax of the proto file.
+  // The supported values are "proto2" and "proto3".
+  optional string syntax = 12;
+}
+
+// Describes a message type.
+message DescriptorProto {
+  optional string name = 1;
+
+  repeated FieldDescriptorProto field = 2;
+  repeated FieldDescriptorProto extension = 6;
+
+  repeated DescriptorProto nested_type = 3;
+  repeated EnumDescriptorProto enum_type = 4;
+
+  message ExtensionRange {
+    optional int32 start = 1;
+    optional int32 end = 2;
+  }
+  repeated ExtensionRange extension_range = 5;
+
+  repeated OneofDescriptorProto oneof_decl = 8;
+
+  optional MessageOptions options = 7;
+
+  // Range of reserved tag numbers. Reserved tag numbers may not be used by
+  // fields or extension ranges in the same message. Reserved ranges may
+  // not overlap.
+  message ReservedRange {
+    optional int32 start = 1; // Inclusive.
+    optional int32 end = 2;   // Exclusive.
+  }
+  repeated ReservedRange reserved_range = 9;
+  // Reserved field names, which may not be used by fields in the same message.
+  // A given name may only be reserved once.
+  repeated string reserved_name = 10;
+}
+
+// Describes a field within a message.
+message FieldDescriptorProto {
+  enum Type {
+    // 0 is reserved for errors.
+    // Order is weird for historical reasons.
+    TYPE_DOUBLE         = 1;
+    TYPE_FLOAT          = 2;
+    // Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT64 if
+    // negative values are likely.
+    TYPE_INT64          = 3;
+    TYPE_UINT64         = 4;
+    // Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT32 if
+    // negative values are likely.
+    TYPE_INT32          = 5;
+    TYPE_FIXED64        = 6;
+    TYPE_FIXED32        = 7;
+    TYPE_BOOL           = 8;
+    TYPE_STRING         = 9;
+    TYPE_GROUP          = 10;  // Tag-delimited aggregate.
+    TYPE_MESSAGE        = 11;  // Length-delimited aggregate.
+
+    // New in version 2.
+    TYPE_BYTES          = 12;
+    TYPE_UINT32         = 13;
+    TYPE_ENUM           = 14;
+    TYPE_SFIXED32       = 15;
+    TYPE_SFIXED64       = 16;
+    TYPE_SINT32         = 17;  // Uses ZigZag encoding.
+    TYPE_SINT64         = 18;  // Uses ZigZag encoding.
+  };
+
+  enum Label {
+    // 0 is reserved for errors
+    LABEL_OPTIONAL      = 1;
+    LABEL_REQUIRED      = 2;
+    LABEL_REPEATED      = 3;
+    // TODO(sanjay): Should we add LABEL_MAP?
+  };
+
+  optional string name = 1;
+  optional int32 number = 3;
+  optional Label label = 4;
+
+  // If type_name is set, this need not be set.  If both this and type_name
+  // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
+  optional Type type = 5;
+
+  // For message and enum types, this is the name of the type.  If the name
+  // starts with a '.', it is fully-qualified.  Otherwise, C++-like scoping
+  // rules are used to find the type (i.e. first the nested types within this
+  // message are searched, then within the parent, on up to the root
+  // namespace).
+  optional string type_name = 6;
+
+  // For extensions, this is the name of the type being extended.  It is
+  // resolved in the same manner as type_name.
+  optional string extendee = 2;
+
+  // For numeric types, contains the original text representation of the value.
+  // For booleans, "true" or "false".
+  // For strings, contains the default text contents (not escaped in any way).
+  // For bytes, contains the C escaped value.  All bytes >= 128 are escaped.
+  // TODO(kenton):  Base-64 encode?
+  optional string default_value = 7;
+
+  // If set, gives the index of a oneof in the containing type's oneof_decl
+  // list.  This field is a member of that oneof.
+  optional int32 oneof_index = 9;
+
+  // JSON name of this field. The value is set by protocol compiler. If the
+  // user has set a "json_name" option on this field, that option's value
+  // will be used. Otherwise, it's deduced from the field's name by converting
+  // it to camelCase.
+  optional string json_name = 10;
+
+  optional FieldOptions options = 8;
+}
+
+// Describes a oneof.
+message OneofDescriptorProto {
+  optional string name = 1;
+  optional OneofOptions options = 2;
+}
+
+// Describes an enum type.
+message EnumDescriptorProto {
+  optional string name = 1;
+
+  repeated EnumValueDescriptorProto value = 2;
+
+  optional EnumOptions options = 3;
+}
+
+// Describes a value within an enum.
+message EnumValueDescriptorProto {
+  optional string name = 1;
+  optional int32 number = 2;
+
+  optional EnumValueOptions options = 3;
+}
+
+// Describes a service.
+message ServiceDescriptorProto {
+  optional string name = 1;
+  repeated MethodDescriptorProto method = 2;
+
+  optional ServiceOptions options = 3;
+}
+
+// Describes a method of a service.
+message MethodDescriptorProto {
+  optional string name = 1;
+
+  // Input and output type names.  These are resolved in the same way as
+  // FieldDescriptorProto.type_name, but must refer to a message type.
+  optional string input_type = 2;
+  optional string output_type = 3;
+
+  optional MethodOptions options = 4;
+
+  // Identifies if client streams multiple client messages
+  optional bool client_streaming = 5 [default=false];
+  // Identifies if server streams multiple server messages
+  optional bool server_streaming = 6 [default=false];
+}
+
+
+// ===================================================================
+// Options
+
+// Each of the definitions above may have "options" attached.  These are
+// just annotations which may cause code to be generated slightly differently
+// or may contain hints for code that manipulates protocol messages.
+//
+// Clients may define custom options as extensions of the *Options messages.
+// These extensions may not yet be known at parsing time, so the parser cannot
+// store the values in them.  Instead it stores them in a field in the *Options
+// message called uninterpreted_option. This field must have the same name
+// across all *Options messages. We then use this field to populate the
+// extensions when we build a descriptor, at which point all protos have been
+// parsed and so all extensions are known.
+//
+// Extension numbers for custom options may be chosen as follows:
+// * For options which will only be used within a single application or
+//   organization, or for experimental options, use field numbers 50000
+//   through 99999.  It is up to you to ensure that you do not use the
+//   same number for multiple options.
+// * For options which will be published and used publicly by multiple
+//   independent entities, e-mail protobuf-global-extension-registry@google.com
+//   to reserve extension numbers. Simply provide your project name (e.g.
+//   Objective-C plugin) and your project website (if available) -- there's no
+//   need to explain how you intend to use them. Usually you only need one
+//   extension number. You can declare multiple options with only one extension
+//   number by putting them in a sub-message. See the Custom Options section of
+//   the docs for examples:
+//   https://developers.google.com/protocol-buffers/docs/proto#options
+//   If this turns out to be popular, a web service will be set up
+//   to automatically assign option numbers.
+
+
+message FileOptions {
+
+  // Sets the Java package where classes generated from this .proto will be
+  // placed.  By default, the proto package is used, but this is often
+  // inappropriate because proto packages do not normally start with backwards
+  // domain names.
+  optional string java_package = 1;
+
+
+  // If set, all the classes from the .proto file are wrapped in a single
+  // outer class with the given name.  This applies to both Proto1
+  // (equivalent to the old "--one_java_file" option) and Proto2 (where
+  // a .proto always translates to a single class, but you may want to
+  // explicitly choose the class name).
+  optional string java_outer_classname = 8;
+
+  // If set true, then the Java code generator will generate a separate .java
+  // file for each top-level message, enum, and service defined in the .proto
+  // file.  Thus, these types will *not* be nested inside the outer class
+  // named by java_outer_classname.  However, the outer class will still be
+  // generated to contain the file's getDescriptor() method as well as any
+  // top-level extensions defined in the file.
+  optional bool java_multiple_files = 10 [default=false];
+
+  // If set true, then the Java code generator will generate equals() and
+  // hashCode() methods for all messages defined in the .proto file.
+  // This increases generated code size, potentially substantially for large
+  // protos, which may harm a memory-constrained application.
+  // - In the full runtime this is a speed optimization, as the
+  // AbstractMessage base class includes reflection-based implementations of
+  // these methods.
+  // - In the lite runtime, setting this option changes the semantics of
+  // equals() and hashCode() to more closely match those of the full runtime;
+  // the generated methods compute their results based on field values rather
+  // than object identity. (Implementations should not assume that hashcodes
+  // will be consistent across runtimes or versions of the protocol compiler.)
+  optional bool java_generate_equals_and_hash = 20 [default=false];
+
+  // If set true, then the Java2 code generator will generate code that
+  // throws an exception whenever an attempt is made to assign a non-UTF-8
+  // byte sequence to a string field.
+  // Message reflection will do the same.
+  // However, an extension field still accepts non-UTF-8 byte sequences.
+  // This option has no effect on when used with the lite runtime.
+  optional bool java_string_check_utf8 = 27 [default=false];
+
+
+  // Generated classes can be optimized for speed or code size.
+  enum OptimizeMode {
+    SPEED = 1;        // Generate complete code for parsing, serialization,
+                      // etc.
+    CODE_SIZE = 2;    // Use ReflectionOps to implement these methods.
+    LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
+  }
+  optional OptimizeMode optimize_for = 9 [default=SPEED];
+
+  // Sets the Go package where structs generated from this .proto will be
+  // placed. If omitted, the Go package will be derived from the following:
+  //   - The basename of the package import path, if provided.
+  //   - Otherwise, the package statement in the .proto file, if present.
+  //   - Otherwise, the basename of the .proto file, without extension.
+  optional string go_package = 11;
+
+
+
+  // Should generic services be generated in each language?  "Generic" services
+  // are not specific to any particular RPC system.  They are generated by the
+  // main code generators in each language (without additional plugins).
+  // Generic services were the only kind of service generation supported by
+  // early versions of google.protobuf.
+  //
+  // Generic services are now considered deprecated in favor of using plugins
+  // that generate code specific to your particular RPC system.  Therefore,
+  // these default to false.  Old code which depends on generic services should
+  // explicitly set them to true.
+  optional bool cc_generic_services = 16 [default=false];
+  optional bool java_generic_services = 17 [default=false];
+  optional bool py_generic_services = 18 [default=false];
+
+  // Is this file deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for everything in the file, or it will be completely ignored; in the very
+  // least, this is a formalization for deprecating files.
+  optional bool deprecated = 23 [default=false];
+
+  // Enables the use of arenas for the proto messages in this file. This applies
+  // only to generated classes for C++.
+  optional bool cc_enable_arenas = 31 [default=false];
+
+
+  // Sets the objective c class prefix which is prepended to all objective c
+  // generated classes from this .proto. There is no default.
+  optional string objc_class_prefix = 36;
+
+  // Namespace for generated classes; defaults to the package.
+  optional string csharp_namespace = 37;
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+
+  reserved 38;
+}
+
+message MessageOptions {
+  // Set true to use the old proto1 MessageSet wire format for extensions.
+  // This is provided for backwards-compatibility with the MessageSet wire
+  // format.  You should not use this for any other reason:  It's less
+  // efficient, has fewer features, and is more complicated.
+  //
+  // The message must be defined exactly as follows:
+  //   message Foo {
+  //     option message_set_wire_format = true;
+  //     extensions 4 to max;
+  //   }
+  // Note that the message cannot have any defined fields; MessageSets only
+  // have extensions.
+  //
+  // All extensions of your type must be singular messages; e.g. they cannot
+  // be int32s, enums, or repeated messages.
+  //
+  // Because this is an option, the above two restrictions are not enforced by
+  // the protocol compiler.
+  optional bool message_set_wire_format = 1 [default=false];
+
+  // Disables the generation of the standard "descriptor()" accessor, which can
+  // conflict with a field of the same name.  This is meant to make migration
+  // from proto1 easier; new code should avoid fields named "descriptor".
+  optional bool no_standard_descriptor_accessor = 2 [default=false];
+
+  // Is this message deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the message, or it will be completely ignored; in the very least,
+  // this is a formalization for deprecating messages.
+  optional bool deprecated = 3 [default=false];
+
+  // Whether the message is an automatically generated map entry type for the
+  // maps field.
+  //
+  // For maps fields:
+  //     map<KeyType, ValueType> map_field = 1;
+  // The parsed descriptor looks like:
+  //     message MapFieldEntry {
+  //         option map_entry = true;
+  //         optional KeyType key = 1;
+  //         optional ValueType value = 2;
+  //     }
+  //     repeated MapFieldEntry map_field = 1;
+  //
+  // Implementations may choose not to generate the map_entry=true message, but
+  // use a native map in the target language to hold the keys and values.
+  // The reflection APIs in such implementions still need to work as
+  // if the field is a repeated message field.
+  //
+  // NOTE: Do not set the option in .proto files. Always use the maps syntax
+  // instead. The option should only be implicitly set by the proto compiler
+  // parser.
+  optional bool map_entry = 7;
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message FieldOptions {
+  // The ctype option instructs the C++ code generator to use a different
+  // representation of the field than it normally would.  See the specific
+  // options below.  This option is not yet implemented in the open source
+  // release -- sorry, we'll try to include it in a future version!
+  optional CType ctype = 1 [default = STRING];
+  enum CType {
+    // Default mode.
+    STRING = 0;
+
+    CORD = 1;
+
+    STRING_PIECE = 2;
+  }
+  // The packed option can be enabled for repeated primitive fields to enable
+  // a more efficient representation on the wire. Rather than repeatedly
+  // writing the tag and type for each element, the entire array is encoded as
+  // a single length-delimited blob. In proto3, only explicit setting it to
+  // false will avoid using packed encoding.
+  optional bool packed = 2;
+
+
+  // The jstype option determines the JavaScript type used for values of the
+  // field.  The option is permitted only for 64 bit integral and fixed types
+  // (int64, uint64, sint64, fixed64, sfixed64).  By default these types are
+  // represented as JavaScript strings.  This avoids loss of precision that can
+  // happen when a large value is converted to a floating point JavaScript
+  // numbers.  Specifying JS_NUMBER for the jstype causes the generated
+  // JavaScript code to use the JavaScript "number" type instead of strings.
+  // This option is an enum to permit additional types to be added,
+  // e.g. goog.math.Integer.
+  optional JSType jstype = 6 [default = JS_NORMAL];
+  enum JSType {
+    // Use the default type.
+    JS_NORMAL = 0;
+
+    // Use JavaScript strings.
+    JS_STRING = 1;
+
+    // Use JavaScript numbers.
+    JS_NUMBER = 2;
+  }
+
+  // Should this field be parsed lazily?  Lazy applies only to message-type
+  // fields.  It means that when the outer message is initially parsed, the
+  // inner message's contents will not be parsed but instead stored in encoded
+  // form.  The inner message will actually be parsed when it is first accessed.
+  //
+  // This is only a hint.  Implementations are free to choose whether to use
+  // eager or lazy parsing regardless of the value of this option.  However,
+  // setting this option true suggests that the protocol author believes that
+  // using lazy parsing on this field is worth the additional bookkeeping
+  // overhead typically needed to implement it.
+  //
+  // This option does not affect the public interface of any generated code;
+  // all method signatures remain the same.  Furthermore, thread-safety of the
+  // interface is not affected by this option; const methods remain safe to
+  // call from multiple threads concurrently, while non-const methods continue
+  // to require exclusive access.
+  //
+  //
+  // Note that implementations may choose not to check required fields within
+  // a lazy sub-message.  That is, calling IsInitialized() on the outher message
+  // may return true even if the inner message has missing required fields.
+  // This is necessary because otherwise the inner message would have to be
+  // parsed in order to perform the check, defeating the purpose of lazy
+  // parsing.  An implementation which chooses not to check required fields
+  // must be consistent about it.  That is, for any particular sub-message, the
+  // implementation must either *always* check its required fields, or *never*
+  // check its required fields, regardless of whether or not the message has
+  // been parsed.
+  optional bool lazy = 5 [default=false];
+
+  // Is this field deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for accessors, or it will be completely ignored; in the very least, this
+  // is a formalization for deprecating fields.
+  optional bool deprecated = 3 [default=false];
+
+  // For Google-internal migration only. Do not use.
+  optional bool weak = 10 [default=false];
+
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message OneofOptions {
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message EnumOptions {
+
+  // Set this option to true to allow mapping different tag names to the same
+  // value.
+  optional bool allow_alias = 2;
+
+  // Is this enum deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the enum, or it will be completely ignored; in the very least, this
+  // is a formalization for deprecating enums.
+  optional bool deprecated = 3 [default=false];
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message EnumValueOptions {
+  // Is this enum value deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the enum value, or it will be completely ignored; in the very least,
+  // this is a formalization for deprecating enum values.
+  optional bool deprecated = 1 [default=false];
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message ServiceOptions {
+
+  // Note:  Field numbers 1 through 32 are reserved for Google's internal RPC
+  //   framework.  We apologize for hoarding these numbers to ourselves, but
+  //   we were already using them long before we decided to release Protocol
+  //   Buffers.
+
+  // Is this service deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the service, or it will be completely ignored; in the very least,
+  // this is a formalization for deprecating services.
+  optional bool deprecated = 33 [default=false];
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message MethodOptions {
+
+  // Note:  Field numbers 1 through 32 are reserved for Google's internal RPC
+  //   framework.  We apologize for hoarding these numbers to ourselves, but
+  //   we were already using them long before we decided to release Protocol
+  //   Buffers.
+
+  // Is this method deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the method, or it will be completely ignored; in the very least,
+  // this is a formalization for deprecating methods.
+  optional bool deprecated = 33 [default=false];
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+
+// A message representing a option the parser does not recognize. This only
+// appears in options protos created by the compiler::Parser class.
+// DescriptorPool resolves these when building Descriptor objects. Therefore,
+// options protos in descriptor objects (e.g. returned by Descriptor::options(),
+// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
+// in them.
+message UninterpretedOption {
+  // The name of the uninterpreted option.  Each string represents a segment in
+  // a dot-separated name.  is_extension is true iff a segment represents an
+  // extension (denoted with parentheses in options specs in .proto files).
+  // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
+  // "foo.(bar.baz).qux".
+  message NamePart {
+    required string name_part = 1;
+    required bool is_extension = 2;
+  }
+  repeated NamePart name = 2;
+
+  // The value of the uninterpreted option, in whatever type the tokenizer
+  // identified it as during parsing. Exactly one of these should be set.
+  optional string identifier_value = 3;
+  optional uint64 positive_int_value = 4;
+  optional int64 negative_int_value = 5;
+  optional double double_value = 6;
+  optional bytes string_value = 7;
+  optional string aggregate_value = 8;
+}
+
+// ===================================================================
+// Optional source code info
+
+// Encapsulates information about the original source file from which a
+// FileDescriptorProto was generated.
+message SourceCodeInfo {
+  // A Location identifies a piece of source code in a .proto file which
+  // corresponds to a particular definition.  This information is intended
+  // to be useful to IDEs, code indexers, documentation generators, and similar
+  // tools.
+  //
+  // For example, say we have a file like:
+  //   message Foo {
+  //     optional string foo = 1;
+  //   }
+  // Let's look at just the field definition:
+  //   optional string foo = 1;
+  //   ^       ^^     ^^  ^  ^^^
+  //   a       bc     de  f  ghi
+  // We have the following locations:
+  //   span   path               represents
+  //   [a,i)  [ 4, 0, 2, 0 ]     The whole field definition.
+  //   [a,b)  [ 4, 0, 2, 0, 4 ]  The label (optional).
+  //   [c,d)  [ 4, 0, 2, 0, 5 ]  The type (string).
+  //   [e,f)  [ 4, 0, 2, 0, 1 ]  The name (foo).
+  //   [g,h)  [ 4, 0, 2, 0, 3 ]  The number (1).
+  //
+  // Notes:
+  // - A location may refer to a repeated field itself (i.e. not to any
+  //   particular index within it).  This is used whenever a set of elements are
+  //   logically enclosed in a single code segment.  For example, an entire
+  //   extend block (possibly containing multiple extension definitions) will
+  //   have an outer location whose path refers to the "extensions" repeated
+  //   field without an index.
+  // - Multiple locations may have the same path.  This happens when a single
+  //   logical declaration is spread out across multiple places.  The most
+  //   obvious example is the "extend" block again -- there may be multiple
+  //   extend blocks in the same scope, each of which will have the same path.
+  // - A location's span is not always a subset of its parent's span.  For
+  //   example, the "extendee" of an extension declaration appears at the
+  //   beginning of the "extend" block and is shared by all extensions within
+  //   the block.
+  // - Just because a location's span is a subset of some other location's span
+  //   does not mean that it is a descendent.  For example, a "group" defines
+  //   both a type and a field in a single declaration.  Thus, the locations
+  //   corresponding to the type and field and their components will overlap.
+  // - Code which tries to interpret locations should probably be designed to
+  //   ignore those that it doesn't understand, as more types of locations could
+  //   be recorded in the future.
+  repeated Location location = 1;
+  message Location {
+    // Identifies which part of the FileDescriptorProto was defined at this
+    // location.
+    //
+    // Each element is a field number or an index.  They form a path from
+    // the root FileDescriptorProto to the place where the definition.  For
+    // example, this path:
+    //   [ 4, 3, 2, 7, 1 ]
+    // refers to:
+    //   file.message_type(3)  // 4, 3
+    //       .field(7)         // 2, 7
+    //       .name()           // 1
+    // This is because FileDescriptorProto.message_type has field number 4:
+    //   repeated DescriptorProto message_type = 4;
+    // and DescriptorProto.field has field number 2:
+    //   repeated FieldDescriptorProto field = 2;
+    // and FieldDescriptorProto.name has field number 1:
+    //   optional string name = 1;
+    //
+    // Thus, the above path gives the location of a field name.  If we removed
+    // the last element:
+    //   [ 4, 3, 2, 7 ]
+    // this path refers to the whole field declaration (from the beginning
+    // of the label to the terminating semicolon).
+    repeated int32 path = 1 [packed=true];
+
+    // Always has exactly three or four elements: start line, start column,
+    // end line (optional, otherwise assumed same as start line), end column.
+    // These are packed into a single field for efficiency.  Note that line
+    // and column numbers are zero-based -- typically you will want to add
+    // 1 to each before displaying to a user.
+    repeated int32 span = 2 [packed=true];
+
+    // If this SourceCodeInfo represents a complete declaration, these are any
+    // comments appearing before and after the declaration which appear to be
+    // attached to the declaration.
+    //
+    // A series of line comments appearing on consecutive lines, with no other
+    // tokens appearing on those lines, will be treated as a single comment.
+    //
+    // leading_detached_comments will keep paragraphs of comments that appear
+    // before (but not connected to) the current element. Each paragraph,
+    // separated by empty lines, will be one comment element in the repeated
+    // field.
+    //
+    // Only the comment content is provided; comment markers (e.g. //) are
+    // stripped out.  For block comments, leading whitespace and an asterisk
+    // will be stripped from the beginning of each line other than the first.
+    // Newlines are included in the output.
+    //
+    // Examples:
+    //
+    //   optional int32 foo = 1;  // Comment attached to foo.
+    //   // Comment attached to bar.
+    //   optional int32 bar = 2;
+    //
+    //   optional string baz = 3;
+    //   // Comment attached to baz.
+    //   // Another line attached to baz.
+    //
+    //   // Comment attached to qux.
+    //   //
+    //   // Another line attached to qux.
+    //   optional double qux = 4;
+    //
+    //   // Detached comment for corge. This is not leading or trailing comments
+    //   // to qux or corge because there are blank lines separating it from
+    //   // both.
+    //
+    //   // Detached comment for corge paragraph 2.
+    //
+    //   optional string corge = 5;
+    //   /* Block comment attached
+    //    * to corge.  Leading asterisks
+    //    * will be removed. */
+    //   /* Block comment attached to
+    //    * grault. */
+    //   optional int32 grault = 6;
+    //
+    //   // ignored detached comments.
+    optional string leading_comments = 3;
+    optional string trailing_comments = 4;
+    repeated string leading_detached_comments = 6;
+  }
+}
+
+// Describes the relationship between generated code and its original source
+// file. A GeneratedCodeInfo message is associated with only one generated
+// source file, but may contain references to different source .proto files.
+message GeneratedCodeInfo {
+  // An Annotation connects some span of text in generated code to an element
+  // of its generating .proto file.
+  repeated Annotation annotation = 1;
+  message Annotation {
+    // Identifies the element in the original source .proto file. This field
+    // is formatted the same as SourceCodeInfo.Location.path.
+    repeated int32 path = 1 [packed=true];
+
+    // Identifies the filesystem path to the original source .proto.
+    optional string source_file = 2;
+
+    // Identifies the starting offset in bytes in the generated code
+    // that relates to the identified object.
+    optional int32 begin = 3;
+
+    // Identifies the ending offset in bytes in the generated code that
+    // relates to the identified offset. The end offset should be one past
+    // the last relevant byte (so the length of the text = end - begin).
+    optional int32 end = 4;
+  }
+}
diff --git a/vendor/google.golang.org/genproto/protobuf/field_mask.pb.go b/vendor/google.golang.org/genproto/protobuf/field_mask.pb.go
new file mode 100644
index 0000000..aec0172
--- /dev/null
+++ b/vendor/google.golang.org/genproto/protobuf/field_mask.pb.go
@@ -0,0 +1,248 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/protobuf/field_mask.proto
+// DO NOT EDIT!
+
+package descriptor
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// `FieldMask` represents a set of symbolic field paths, for example:
+//
+//     paths: "f.a"
+//     paths: "f.b.d"
+//
+// Here `f` represents a field in some root message, `a` and `b`
+// fields in the message found in `f`, and `d` a field found in the
+// message in `f.b`.
+//
+// Field masks are used to specify a subset of fields that should be
+// returned by a get operation or modified by an update operation.
+// Field masks also have a custom JSON encoding (see below).
+//
+// # Field Masks in Projections
+//
+// When used in the context of a projection, a response message or
+// sub-message is filtered by the API to only contain those fields as
+// specified in the mask. For example, if the mask in the previous
+// example is applied to a response message as follows:
+//
+//     f {
+//       a : 22
+//       b {
+//         d : 1
+//         x : 2
+//       }
+//       y : 13
+//     }
+//     z: 8
+//
+// The result will not contain specific values for fields x,y and z
+// (their value will be set to the default, and omitted in proto text
+// output):
+//
+//
+//     f {
+//       a : 22
+//       b {
+//         d : 1
+//       }
+//     }
+//
+// A repeated field is not allowed except at the last position of a
+// field mask.
+//
+// If a FieldMask object is not present in a get operation, the
+// operation applies to all fields (as if a FieldMask of all fields
+// had been specified).
+//
+// Note that a field mask does not necessarily apply to the
+// top-level response message. In case of a REST get operation, the
+// field mask applies directly to the response, but in case of a REST
+// list operation, the mask instead applies to each individual message
+// in the returned resource list. In case of a REST custom method,
+// other definitions may be used. Where the mask applies will be
+// clearly documented together with its declaration in the API.  In
+// any case, the effect on the returned resource/resources is required
+// behavior for APIs.
+//
+// # Field Masks in Update Operations
+//
+// A field mask in update operations specifies which fields of the
+// targeted resource are going to be updated. The API is required
+// to only change the values of the fields as specified in the mask
+// and leave the others untouched. If a resource is passed in to
+// describe the updated values, the API ignores the values of all
+// fields not covered by the mask.
+//
+// If a repeated field is specified for an update operation, the existing
+// repeated values in the target resource will be overwritten by the new values.
+// Note that a repeated field is only allowed in the last position of a field
+// mask.
+//
+// If a sub-message is specified in the last position of the field mask for an
+// update operation, then the existing sub-message in the target resource is
+// overwritten. Given the target message:
+//
+//     f {
+//       b {
+//         d : 1
+//         x : 2
+//       }
+//       c : 1
+//     }
+//
+// And an update message:
+//
+//     f {
+//       b {
+//         d : 10
+//       }
+//     }
+//
+// then if the field mask is:
+//
+//  paths: "f.b"
+//
+// then the result will be:
+//
+//     f {
+//       b {
+//         d : 10
+//       }
+//       c : 1
+//     }
+//
+// However, if the update mask was:
+//
+//  paths: "f.b.d"
+//
+// then the result would be:
+//
+//     f {
+//       b {
+//         d : 10
+//         x : 2
+//       }
+//       c : 1
+//     }
+//
+// In order to reset a field's value to the default, the field must
+// be in the mask and set to the default value in the provided resource.
+// Hence, in order to reset all fields of a resource, provide a default
+// instance of the resource and set all fields in the mask, or do
+// not provide a mask as described below.
+//
+// If a field mask is not present on update, the operation applies to
+// all fields (as if a field mask of all fields has been specified).
+// Note that in the presence of schema evolution, this may mean that
+// fields the client does not know and has therefore not filled into
+// the request will be reset to their default. If this is unwanted
+// behavior, a specific service may require a client to always specify
+// a field mask, producing an error if not.
+//
+// As with get operations, the location of the resource which
+// describes the updated values in the request message depends on the
+// operation kind. In any case, the effect of the field mask is
+// required to be honored by the API.
+//
+// ## Considerations for HTTP REST
+//
+// The HTTP kind of an update operation which uses a field mask must
+// be set to PATCH instead of PUT in order to satisfy HTTP semantics
+// (PUT must only be used for full updates).
+//
+// # JSON Encoding of Field Masks
+//
+// In JSON, a field mask is encoded as a single string where paths are
+// separated by a comma. Fields name in each path are converted
+// to/from lower-camel naming conventions.
+//
+// As an example, consider the following message declarations:
+//
+//     message Profile {
+//       User user = 1;
+//       Photo photo = 2;
+//     }
+//     message User {
+//       string display_name = 1;
+//       string address = 2;
+//     }
+//
+// In proto a field mask for `Profile` may look as such:
+//
+//     mask {
+//       paths: "user.display_name"
+//       paths: "photo"
+//     }
+//
+// In JSON, the same mask is represented as below:
+//
+//     {
+//       mask: "user.displayName,photo"
+//     }
+//
+// # Field Masks and Oneof Fields
+//
+// Field masks treat fields in oneofs just as regular fields. Consider the
+// following message:
+//
+//     message SampleMessage {
+//       oneof test_oneof {
+//         string name = 4;
+//         SubMessage sub_message = 9;
+//       }
+//     }
+//
+// The field mask can be:
+//
+//     mask {
+//       paths: "name"
+//     }
+//
+// Or:
+//
+//     mask {
+//       paths: "sub_message"
+//     }
+//
+// Note that oneof type names ("test_oneof" in this case) cannot be used in
+// paths.
+type FieldMask struct {
+	// The set of field mask paths.
+	Paths []string `protobuf:"bytes,1,rep,name=paths" json:"paths,omitempty"`
+}
+
+func (m *FieldMask) Reset()                    { *m = FieldMask{} }
+func (m *FieldMask) String() string            { return proto.CompactTextString(m) }
+func (*FieldMask) ProtoMessage()               {}
+func (*FieldMask) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} }
+
+func init() {
+	proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask")
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/protobuf/field_mask.proto", fileDescriptor2)
+}
+
+var fileDescriptor2 = []byte{
+	// 163 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x32, 0x49, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x4b, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0xcb, 0x2f, 0x4a, 0xd7, 0x4f, 0x4f,
+	0xcd, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x07, 0x93, 0x49, 0xa5, 0x69, 0xfa, 0x69, 0x99, 0xa9,
+	0x39, 0x29, 0xf1, 0xb9, 0x89, 0xc5, 0xd9, 0x7a, 0x60, 0x31, 0x21, 0x7e, 0xa8, 0x2e, 0x98, 0x0a,
+	0x25, 0x45, 0x2e, 0x4e, 0x37, 0x90, 0x22, 0x5f, 0xa0, 0x1a, 0x21, 0x11, 0x2e, 0xd6, 0x82, 0xc4,
+	0x92, 0x8c, 0x62, 0x09, 0x46, 0x05, 0x66, 0x0d, 0xce, 0x20, 0x08, 0xc7, 0x29, 0x90, 0x4b, 0x38,
+	0x39, 0x3f, 0x57, 0x0f, 0x4d, 0xa7, 0x13, 0x1f, 0x5c, 0x5f, 0x00, 0x48, 0x28, 0x80, 0x71, 0x01,
+	0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, 0xc5, 0x01, 0x50,
+	0xc5, 0x7a, 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x95, 0x05, 0xa9, 0xc5,
+	0x49, 0x6c, 0x60, 0x53, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x4c, 0x96, 0xee, 0xc5,
+	0x00, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/protobuf/field_mask.proto b/vendor/google.golang.org/genproto/protobuf/field_mask.proto
new file mode 100644
index 0000000..c51de09
--- /dev/null
+++ b/vendor/google.golang.org/genproto/protobuf/field_mask.proto
@@ -0,0 +1,246 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "FieldMaskProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+option java_generate_equals_and_hash = true;
+
+// `FieldMask` represents a set of symbolic field paths, for example:
+//
+//     paths: "f.a"
+//     paths: "f.b.d"
+//
+// Here `f` represents a field in some root message, `a` and `b`
+// fields in the message found in `f`, and `d` a field found in the
+// message in `f.b`.
+//
+// Field masks are used to specify a subset of fields that should be
+// returned by a get operation or modified by an update operation.
+// Field masks also have a custom JSON encoding (see below).
+//
+// # Field Masks in Projections
+//
+// When used in the context of a projection, a response message or
+// sub-message is filtered by the API to only contain those fields as
+// specified in the mask. For example, if the mask in the previous
+// example is applied to a response message as follows:
+//
+//     f {
+//       a : 22
+//       b {
+//         d : 1
+//         x : 2
+//       }
+//       y : 13
+//     }
+//     z: 8
+//
+// The result will not contain specific values for fields x,y and z
+// (their value will be set to the default, and omitted in proto text
+// output):
+//
+//
+//     f {
+//       a : 22
+//       b {
+//         d : 1
+//       }
+//     }
+//
+// A repeated field is not allowed except at the last position of a
+// field mask.
+//
+// If a FieldMask object is not present in a get operation, the
+// operation applies to all fields (as if a FieldMask of all fields
+// had been specified).
+//
+// Note that a field mask does not necessarily apply to the
+// top-level response message. In case of a REST get operation, the
+// field mask applies directly to the response, but in case of a REST
+// list operation, the mask instead applies to each individual message
+// in the returned resource list. In case of a REST custom method,
+// other definitions may be used. Where the mask applies will be
+// clearly documented together with its declaration in the API.  In
+// any case, the effect on the returned resource/resources is required
+// behavior for APIs.
+//
+// # Field Masks in Update Operations
+//
+// A field mask in update operations specifies which fields of the
+// targeted resource are going to be updated. The API is required
+// to only change the values of the fields as specified in the mask
+// and leave the others untouched. If a resource is passed in to
+// describe the updated values, the API ignores the values of all
+// fields not covered by the mask.
+//
+// If a repeated field is specified for an update operation, the existing
+// repeated values in the target resource will be overwritten by the new values.
+// Note that a repeated field is only allowed in the last position of a field
+// mask.
+//
+// If a sub-message is specified in the last position of the field mask for an
+// update operation, then the existing sub-message in the target resource is
+// overwritten. Given the target message:
+//
+//     f {
+//       b {
+//         d : 1
+//         x : 2
+//       }
+//       c : 1
+//     }
+//
+// And an update message:
+//
+//     f {
+//       b {
+//         d : 10
+//       }
+//     }
+//
+// then if the field mask is:
+//
+//  paths: "f.b"
+//
+// then the result will be:
+//
+//     f {
+//       b {
+//         d : 10
+//       }
+//       c : 1
+//     }
+//
+// However, if the update mask was:
+//
+//  paths: "f.b.d"
+//
+// then the result would be:
+//
+//     f {
+//       b {
+//         d : 10
+//         x : 2
+//       }
+//       c : 1
+//     }
+//
+// In order to reset a field's value to the default, the field must
+// be in the mask and set to the default value in the provided resource.
+// Hence, in order to reset all fields of a resource, provide a default
+// instance of the resource and set all fields in the mask, or do
+// not provide a mask as described below.
+//
+// If a field mask is not present on update, the operation applies to
+// all fields (as if a field mask of all fields has been specified).
+// Note that in the presence of schema evolution, this may mean that
+// fields the client does not know and has therefore not filled into
+// the request will be reset to their default. If this is unwanted
+// behavior, a specific service may require a client to always specify
+// a field mask, producing an error if not.
+//
+// As with get operations, the location of the resource which
+// describes the updated values in the request message depends on the
+// operation kind. In any case, the effect of the field mask is
+// required to be honored by the API.
+//
+// ## Considerations for HTTP REST
+//
+// The HTTP kind of an update operation which uses a field mask must
+// be set to PATCH instead of PUT in order to satisfy HTTP semantics
+// (PUT must only be used for full updates).
+//
+// # JSON Encoding of Field Masks
+//
+// In JSON, a field mask is encoded as a single string where paths are
+// separated by a comma. Fields name in each path are converted
+// to/from lower-camel naming conventions.
+//
+// As an example, consider the following message declarations:
+//
+//     message Profile {
+//       User user = 1;
+//       Photo photo = 2;
+//     }
+//     message User {
+//       string display_name = 1;
+//       string address = 2;
+//     }
+//
+// In proto a field mask for `Profile` may look as such:
+//
+//     mask {
+//       paths: "user.display_name"
+//       paths: "photo"
+//     }
+//
+// In JSON, the same mask is represented as below:
+//
+//     {
+//       mask: "user.displayName,photo"
+//     }
+//
+// # Field Masks and Oneof Fields
+//
+// Field masks treat fields in oneofs just as regular fields. Consider the
+// following message:
+//
+//     message SampleMessage {
+//       oneof test_oneof {
+//         string name = 4;
+//         SubMessage sub_message = 9;
+//       }
+//     }
+//
+// The field mask can be:
+//
+//     mask {
+//       paths: "name"
+//     }
+//
+// Or:
+//
+//     mask {
+//       paths: "sub_message"
+//     }
+//
+// Note that oneof type names ("test_oneof" in this case) cannot be used in
+// paths.
+message FieldMask {
+  // The set of field mask paths.
+  repeated string paths = 1;
+}
diff --git a/vendor/google.golang.org/genproto/protobuf/source_context.pb.go b/vendor/google.golang.org/genproto/protobuf/source_context.pb.go
new file mode 100644
index 0000000..7f468a4
--- /dev/null
+++ b/vendor/google.golang.org/genproto/protobuf/source_context.pb.go
@@ -0,0 +1,50 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/protobuf/source_context.proto
+// DO NOT EDIT!
+
+package descriptor
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// `SourceContext` represents information about the source of a
+// protobuf element, like the file in which it is defined.
+type SourceContext struct {
+	// The path-qualified name of the .proto file that contained the associated
+	// protobuf element.  For example: `"google/protobuf/source_context.proto"`.
+	FileName string `protobuf:"bytes,1,opt,name=file_name,json=fileName" json:"file_name,omitempty"`
+}
+
+func (m *SourceContext) Reset()                    { *m = SourceContext{} }
+func (m *SourceContext) String() string            { return proto.CompactTextString(m) }
+func (*SourceContext) ProtoMessage()               {}
+func (*SourceContext) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} }
+
+func init() {
+	proto.RegisterType((*SourceContext)(nil), "google.protobuf.SourceContext")
+}
+
+func init() {
+	proto.RegisterFile("google.golang.org/genproto/protobuf/source_context.proto", fileDescriptor3)
+}
+
+var fileDescriptor3 = []byte{
+	// 175 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x48, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x4b, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0xcb, 0x2f, 0x4a, 0xd7, 0x4f, 0x4f,
+	0xcd, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x07, 0x93, 0x49, 0xa5, 0x69, 0xfa, 0xc5, 0xf9, 0xa5,
+	0x45, 0xc9, 0xa9, 0xf1, 0xc9, 0xf9, 0x79, 0x25, 0xa9, 0x15, 0x25, 0x7a, 0x60, 0x71, 0x21, 0x7e,
+	0xa8, 0x4e, 0x98, 0x2a, 0x25, 0x1d, 0x2e, 0xde, 0x60, 0xb0, 0x42, 0x67, 0x88, 0x3a, 0x21, 0x69,
+	0x2e, 0xce, 0xb4, 0xcc, 0x9c, 0xd4, 0xf8, 0xbc, 0xc4, 0xdc, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d,
+	0xce, 0x20, 0x0e, 0x90, 0x80, 0x1f, 0x90, 0xef, 0x14, 0xca, 0x25, 0x9c, 0x9c, 0x9f, 0xab, 0x87,
+	0x66, 0x88, 0x93, 0x10, 0x8a, 0x11, 0x01, 0x20, 0xe1, 0x00, 0xc6, 0x05, 0x8c, 0x8c, 0x8b, 0x98,
+	0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x34, 0x04, 0x40, 0x35, 0xe8, 0x85, 0xa7,
+	0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x54, 0x16, 0xa4, 0x16, 0x27, 0xb1, 0x81, 0x4d,
+	0x32, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xd3, 0x31, 0x36, 0x7e, 0xd8, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/protobuf/source_context.proto b/vendor/google.golang.org/genproto/protobuf/source_context.proto
new file mode 100644
index 0000000..a2c08e2
--- /dev/null
+++ b/vendor/google.golang.org/genproto/protobuf/source_context.proto
@@ -0,0 +1,48 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "SourceContextProto";
+option java_multiple_files = true;
+option java_generate_equals_and_hash = true;
+option objc_class_prefix = "GPB";
+
+// `SourceContext` represents information about the source of a
+// protobuf element, like the file in which it is defined.
+message SourceContext {
+  // The path-qualified name of the .proto file that contained the associated
+  // protobuf element.  For example: `"google/protobuf/source_context.proto"`.
+  string file_name = 1;
+}
diff --git a/vendor/google.golang.org/genproto/protobuf/type.pb.go b/vendor/google.golang.org/genproto/protobuf/type.pb.go
new file mode 100644
index 0000000..336ae3d
--- /dev/null
+++ b/vendor/google.golang.org/genproto/protobuf/type.pb.go
@@ -0,0 +1,393 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/genproto/protobuf/type.proto
+// DO NOT EDIT!
+
+package descriptor
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import google_protobuf "github.com/golang/protobuf/ptypes/any"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// The syntax in which a protocol buffer element is defined.
+type Syntax int32
+
+const (
+	// Syntax `proto2`.
+	Syntax_SYNTAX_PROTO2 Syntax = 0
+	// Syntax `proto3`.
+	Syntax_SYNTAX_PROTO3 Syntax = 1
+)
+
+var Syntax_name = map[int32]string{
+	0: "SYNTAX_PROTO2",
+	1: "SYNTAX_PROTO3",
+}
+var Syntax_value = map[string]int32{
+	"SYNTAX_PROTO2": 0,
+	"SYNTAX_PROTO3": 1,
+}
+
+func (x Syntax) String() string {
+	return proto.EnumName(Syntax_name, int32(x))
+}
+func (Syntax) EnumDescriptor() ([]byte, []int) { return fileDescriptor4, []int{0} }
+
+// Basic field types.
+type Field_Kind int32
+
+const (
+	// Field type unknown.
+	Field_TYPE_UNKNOWN Field_Kind = 0
+	// Field type double.
+	Field_TYPE_DOUBLE Field_Kind = 1
+	// Field type float.
+	Field_TYPE_FLOAT Field_Kind = 2
+	// Field type int64.
+	Field_TYPE_INT64 Field_Kind = 3
+	// Field type uint64.
+	Field_TYPE_UINT64 Field_Kind = 4
+	// Field type int32.
+	Field_TYPE_INT32 Field_Kind = 5
+	// Field type fixed64.
+	Field_TYPE_FIXED64 Field_Kind = 6
+	// Field type fixed32.
+	Field_TYPE_FIXED32 Field_Kind = 7
+	// Field type bool.
+	Field_TYPE_BOOL Field_Kind = 8
+	// Field type string.
+	Field_TYPE_STRING Field_Kind = 9
+	// Field type group. Proto2 syntax only, and deprecated.
+	Field_TYPE_GROUP Field_Kind = 10
+	// Field type message.
+	Field_TYPE_MESSAGE Field_Kind = 11
+	// Field type bytes.
+	Field_TYPE_BYTES Field_Kind = 12
+	// Field type uint32.
+	Field_TYPE_UINT32 Field_Kind = 13
+	// Field type enum.
+	Field_TYPE_ENUM Field_Kind = 14
+	// Field type sfixed32.
+	Field_TYPE_SFIXED32 Field_Kind = 15
+	// Field type sfixed64.
+	Field_TYPE_SFIXED64 Field_Kind = 16
+	// Field type sint32.
+	Field_TYPE_SINT32 Field_Kind = 17
+	// Field type sint64.
+	Field_TYPE_SINT64 Field_Kind = 18
+)
+
+var Field_Kind_name = map[int32]string{
+	0:  "TYPE_UNKNOWN",
+	1:  "TYPE_DOUBLE",
+	2:  "TYPE_FLOAT",
+	3:  "TYPE_INT64",
+	4:  "TYPE_UINT64",
+	5:  "TYPE_INT32",
+	6:  "TYPE_FIXED64",
+	7:  "TYPE_FIXED32",
+	8:  "TYPE_BOOL",
+	9:  "TYPE_STRING",
+	10: "TYPE_GROUP",
+	11: "TYPE_MESSAGE",
+	12: "TYPE_BYTES",
+	13: "TYPE_UINT32",
+	14: "TYPE_ENUM",
+	15: "TYPE_SFIXED32",
+	16: "TYPE_SFIXED64",
+	17: "TYPE_SINT32",
+	18: "TYPE_SINT64",
+}
+var Field_Kind_value = map[string]int32{
+	"TYPE_UNKNOWN":  0,
+	"TYPE_DOUBLE":   1,
+	"TYPE_FLOAT":    2,
+	"TYPE_INT64":    3,
+	"TYPE_UINT64":   4,
+	"TYPE_INT32":    5,
+	"TYPE_FIXED64":  6,
+	"TYPE_FIXED32":  7,
+	"TYPE_BOOL":     8,
+	"TYPE_STRING":   9,
+	"TYPE_GROUP":    10,
+	"TYPE_MESSAGE":  11,
+	"TYPE_BYTES":    12,
+	"TYPE_UINT32":   13,
+	"TYPE_ENUM":     14,
+	"TYPE_SFIXED32": 15,
+	"TYPE_SFIXED64": 16,
+	"TYPE_SINT32":   17,
+	"TYPE_SINT64":   18,
+}
+
+func (x Field_Kind) String() string {
+	return proto.EnumName(Field_Kind_name, int32(x))
+}
+func (Field_Kind) EnumDescriptor() ([]byte, []int) { return fileDescriptor4, []int{1, 0} }
+
+// Whether a field is optional, required, or repeated.
+type Field_Cardinality int32
+
+const (
+	// For fields with unknown cardinality.
+	Field_CARDINALITY_UNKNOWN Field_Cardinality = 0
+	// For optional fields.
+	Field_CARDINALITY_OPTIONAL Field_Cardinality = 1
+	// For required fields. Proto2 syntax only.
+	Field_CARDINALITY_REQUIRED Field_Cardinality = 2
+	// For repeated fields.
+	Field_CARDINALITY_REPEATED Field_Cardinality = 3
+)
+
+var Field_Cardinality_name = map[int32]string{
+	0: "CARDINALITY_UNKNOWN",
+	1: "CARDINALITY_OPTIONAL",
+	2: "CARDINALITY_REQUIRED",
+	3: "CARDINALITY_REPEATED",
+}
+var Field_Cardinality_value = map[string]int32{
+	"CARDINALITY_UNKNOWN":  0,
+	"CARDINALITY_OPTIONAL": 1,
+	"CARDINALITY_REQUIRED": 2,
+	"CARDINALITY_REPEATED": 3,
+}
+
+func (x Field_Cardinality) String() string {
+	return proto.EnumName(Field_Cardinality_name, int32(x))
+}
+func (Field_Cardinality) EnumDescriptor() ([]byte, []int) { return fileDescriptor4, []int{1, 1} }
+
+// A protocol buffer message type.
+type Type struct {
+	// The fully qualified message name.
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// The list of fields.
+	Fields []*Field `protobuf:"bytes,2,rep,name=fields" json:"fields,omitempty"`
+	// The list of types appearing in `oneof` definitions in this type.
+	Oneofs []string `protobuf:"bytes,3,rep,name=oneofs" json:"oneofs,omitempty"`
+	// The protocol buffer options.
+	Options []*Option `protobuf:"bytes,4,rep,name=options" json:"options,omitempty"`
+	// The source context.
+	SourceContext *SourceContext `protobuf:"bytes,5,opt,name=source_context,json=sourceContext" json:"source_context,omitempty"`
+	// The source syntax.
+	Syntax Syntax `protobuf:"varint,6,opt,name=syntax,enum=google.protobuf.Syntax" json:"syntax,omitempty"`
+}
+
+func (m *Type) Reset()                    { *m = Type{} }
+func (m *Type) String() string            { return proto.CompactTextString(m) }
+func (*Type) ProtoMessage()               {}
+func (*Type) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} }
+
+func (m *Type) GetFields() []*Field {
+	if m != nil {
+		return m.Fields
+	}
+	return nil
+}
+
+func (m *Type) GetOptions() []*Option {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+func (m *Type) GetSourceContext() *SourceContext {
+	if m != nil {
+		return m.SourceContext
+	}
+	return nil
+}
+
+// A single field of a message type.
+type Field struct {
+	// The field type.
+	Kind Field_Kind `protobuf:"varint,1,opt,name=kind,enum=google.protobuf.Field_Kind" json:"kind,omitempty"`
+	// The field cardinality.
+	Cardinality Field_Cardinality `protobuf:"varint,2,opt,name=cardinality,enum=google.protobuf.Field_Cardinality" json:"cardinality,omitempty"`
+	// The field number.
+	Number int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"`
+	// The field name.
+	Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
+	// The field type URL, without the scheme, for message or enumeration
+	// types. Example: `"type.googleapis.com/google.protobuf.Timestamp"`.
+	TypeUrl string `protobuf:"bytes,6,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"`
+	// The index of the field type in `Type.oneofs`, for message or enumeration
+	// types. The first type has index 1; zero means the type is not in the list.
+	OneofIndex int32 `protobuf:"varint,7,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"`
+	// Whether to use alternative packed wire representation.
+	Packed bool `protobuf:"varint,8,opt,name=packed" json:"packed,omitempty"`
+	// The protocol buffer options.
+	Options []*Option `protobuf:"bytes,9,rep,name=options" json:"options,omitempty"`
+	// The field JSON name.
+	JsonName string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"`
+	// The string value of the default value of this field. Proto2 syntax only.
+	DefaultValue string `protobuf:"bytes,11,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"`
+}
+
+func (m *Field) Reset()                    { *m = Field{} }
+func (m *Field) String() string            { return proto.CompactTextString(m) }
+func (*Field) ProtoMessage()               {}
+func (*Field) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} }
+
+func (m *Field) GetOptions() []*Option {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Enum type definition.
+type Enum struct {
+	// Enum type name.
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// Enum value definitions.
+	Enumvalue []*EnumValue `protobuf:"bytes,2,rep,name=enumvalue" json:"enumvalue,omitempty"`
+	// Protocol buffer options.
+	Options []*Option `protobuf:"bytes,3,rep,name=options" json:"options,omitempty"`
+	// The source context.
+	SourceContext *SourceContext `protobuf:"bytes,4,opt,name=source_context,json=sourceContext" json:"source_context,omitempty"`
+	// The source syntax.
+	Syntax Syntax `protobuf:"varint,5,opt,name=syntax,enum=google.protobuf.Syntax" json:"syntax,omitempty"`
+}
+
+func (m *Enum) Reset()                    { *m = Enum{} }
+func (m *Enum) String() string            { return proto.CompactTextString(m) }
+func (*Enum) ProtoMessage()               {}
+func (*Enum) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{2} }
+
+func (m *Enum) GetEnumvalue() []*EnumValue {
+	if m != nil {
+		return m.Enumvalue
+	}
+	return nil
+}
+
+func (m *Enum) GetOptions() []*Option {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+func (m *Enum) GetSourceContext() *SourceContext {
+	if m != nil {
+		return m.SourceContext
+	}
+	return nil
+}
+
+// Enum value definition.
+type EnumValue struct {
+	// Enum value name.
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// Enum value number.
+	Number int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"`
+	// Protocol buffer options.
+	Options []*Option `protobuf:"bytes,3,rep,name=options" json:"options,omitempty"`
+}
+
+func (m *EnumValue) Reset()                    { *m = EnumValue{} }
+func (m *EnumValue) String() string            { return proto.CompactTextString(m) }
+func (*EnumValue) ProtoMessage()               {}
+func (*EnumValue) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{3} }
+
+func (m *EnumValue) GetOptions() []*Option {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// A protocol buffer option, which can be attached to a message, field,
+// enumeration, etc.
+type Option struct {
+	// The option's name. For example, `"java_package"`.
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// The option's value. For example, `"com.google.protobuf"`.
+	Value *google_protobuf.Any `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *Option) Reset()                    { *m = Option{} }
+func (m *Option) String() string            { return proto.CompactTextString(m) }
+func (*Option) ProtoMessage()               {}
+func (*Option) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{4} }
+
+func (m *Option) GetValue() *google_protobuf.Any {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*Type)(nil), "google.protobuf.Type")
+	proto.RegisterType((*Field)(nil), "google.protobuf.Field")
+	proto.RegisterType((*Enum)(nil), "google.protobuf.Enum")
+	proto.RegisterType((*EnumValue)(nil), "google.protobuf.EnumValue")
+	proto.RegisterType((*Option)(nil), "google.protobuf.Option")
+	proto.RegisterEnum("google.protobuf.Syntax", Syntax_name, Syntax_value)
+	proto.RegisterEnum("google.protobuf.Field_Kind", Field_Kind_name, Field_Kind_value)
+	proto.RegisterEnum("google.protobuf.Field_Cardinality", Field_Cardinality_name, Field_Cardinality_value)
+}
+
+func init() { proto.RegisterFile("google.golang.org/genproto/protobuf/type.proto", fileDescriptor4) }
+
+var fileDescriptor4 = []byte{
+	// 800 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x55, 0xcd, 0x6e, 0xf2, 0x46,
+	0x14, 0xad, 0xc1, 0x18, 0x7c, 0x1d, 0x88, 0xbf, 0x49, 0x94, 0xb8, 0x89, 0x94, 0x46, 0x74, 0x13,
+	0x65, 0x61, 0x54, 0x52, 0x55, 0xd9, 0x42, 0x70, 0xa8, 0x15, 0x62, 0xbb, 0x83, 0x69, 0xc2, 0x0a,
+	0x19, 0x30, 0x94, 0xc4, 0xb1, 0x11, 0x36, 0x6d, 0x78, 0x88, 0xbe, 0x43, 0xd5, 0x65, 0xd7, 0x7d,
+	0x88, 0xbe, 0x55, 0x3b, 0x33, 0x06, 0x63, 0x7e, 0x2a, 0xa5, 0xfd, 0x16, 0x09, 0xbe, 0xe7, 0x9e,
+	0x7b, 0xee, 0xcf, 0x5c, 0x8f, 0x41, 0x1d, 0x07, 0xc1, 0xd8, 0x73, 0xc9, 0x8f, 0xe7, 0xf8, 0x63,
+	0x35, 0x98, 0x8d, 0x2b, 0x63, 0xd7, 0x9f, 0xce, 0x82, 0x28, 0xa8, 0xb0, 0xff, 0xfd, 0xf9, 0xa8,
+	0x12, 0x2d, 0xa6, 0xae, 0xca, 0x2c, 0x74, 0xb8, 0xe4, 0xaf, 0x7c, 0x67, 0x95, 0xf1, 0x24, 0xfa,
+	0x69, 0xde, 0x57, 0x07, 0xc1, 0x5b, 0x25, 0x16, 0x59, 0xc7, 0x4d, 0x69, 0x60, 0x58, 0x71, 0xfc,
+	0x05, 0xfd, 0x8b, 0x63, 0xce, 0x6e, 0x3f, 0x92, 0x31, 0x0c, 0xe6, 0xb3, 0x81, 0xdb, 0x1b, 0x04,
+	0x7e, 0xe4, 0xbe, 0x47, 0x71, 0x64, 0xf9, 0xd7, 0x0c, 0xf0, 0x36, 0x51, 0x44, 0x08, 0x78, 0xdf,
+	0x79, 0x73, 0x15, 0xee, 0x92, 0xbb, 0x12, 0x31, 0x7b, 0x46, 0x2a, 0x08, 0xa3, 0x89, 0xeb, 0x0d,
+	0x43, 0x25, 0x73, 0x99, 0xbd, 0x92, 0xaa, 0x27, 0xea, 0x56, 0xa5, 0xea, 0x3d, 0x75, 0xe3, 0x25,
+	0x0b, 0x9d, 0x80, 0x10, 0xf8, 0x6e, 0x30, 0x0a, 0x95, 0x2c, 0xe1, 0x8b, 0x78, 0x69, 0xa1, 0x6f,
+	0x20, 0x1f, 0x4c, 0xa3, 0x49, 0xe0, 0x87, 0x0a, 0xcf, 0x84, 0x4e, 0x77, 0x84, 0x4c, 0xe6, 0xc7,
+	0x2b, 0x1e, 0xd2, 0xa0, 0xb4, 0x59, 0xaf, 0x92, 0x23, 0x85, 0x49, 0xd5, 0x8b, 0x9d, 0xc8, 0x36,
+	0xa3, 0xdd, 0xc5, 0x2c, 0x5c, 0x0c, 0xd3, 0x26, 0xaa, 0x80, 0x10, 0x2e, 0xfc, 0xc8, 0x79, 0x57,
+	0x04, 0x12, 0x5e, 0xda, 0x93, 0xb8, 0xcd, 0xdc, 0x78, 0x49, 0x2b, 0xff, 0x29, 0x40, 0x8e, 0x35,
+	0x45, 0x42, 0xf9, 0xd7, 0x89, 0x3f, 0x64, 0x03, 0x29, 0x55, 0xcf, 0xf7, 0xb7, 0xae, 0x3e, 0x10,
+	0x0a, 0x66, 0x44, 0xd4, 0x00, 0x69, 0xe0, 0xcc, 0x86, 0x13, 0xdf, 0xf1, 0x26, 0xd1, 0x82, 0x8c,
+	0x8c, 0xc6, 0x95, 0xff, 0x25, 0xee, 0x6e, 0xcd, 0xc4, 0xe9, 0x30, 0x3a, 0x43, 0x7f, 0xfe, 0xd6,
+	0x77, 0x67, 0x64, 0x86, 0xdc, 0x55, 0x0e, 0x2f, 0xad, 0xe4, 0x7c, 0xf8, 0xd4, 0xf9, 0x7c, 0x09,
+	0x05, 0xba, 0x0d, 0xbd, 0xf9, 0xcc, 0x63, 0xfd, 0x89, 0x38, 0x4f, 0xed, 0xce, 0xcc, 0x43, 0x5f,
+	0x81, 0xc4, 0x86, 0xdf, 0x23, 0x95, 0xb9, 0xef, 0x4a, 0x9e, 0x69, 0x01, 0x83, 0x74, 0x8a, 0xd0,
+	0x3c, 0x53, 0x67, 0xf0, 0xea, 0x0e, 0x95, 0x02, 0xf1, 0x15, 0xf0, 0xd2, 0x4a, 0x9f, 0x95, 0xf8,
+	0xc1, 0xb3, 0x3a, 0x07, 0xf1, 0x25, 0x0c, 0xfc, 0x1e, 0xab, 0x0f, 0x58, 0x1d, 0x05, 0x0a, 0x18,
+	0xb4, 0xc6, 0xaf, 0xa1, 0x38, 0x74, 0x47, 0xce, 0xdc, 0x8b, 0x7a, 0x3f, 0x3b, 0xde, 0xdc, 0x55,
+	0x24, 0x46, 0x38, 0x58, 0x82, 0x3f, 0x52, 0xac, 0xfc, 0x17, 0xd9, 0x42, 0x3a, 0x49, 0x24, 0xc3,
+	0x81, 0xdd, 0xb5, 0xb4, 0x5e, 0xc7, 0x78, 0x30, 0xcc, 0x27, 0x43, 0xfe, 0x02, 0x1d, 0x82, 0xc4,
+	0x90, 0x86, 0xd9, 0xa9, 0xb7, 0x34, 0x99, 0x43, 0x25, 0x00, 0x06, 0xdc, 0xb7, 0xcc, 0x9a, 0x2d,
+	0x67, 0x12, 0x5b, 0x37, 0xec, 0xef, 0xbe, 0x95, 0xb3, 0x49, 0x40, 0x27, 0x06, 0xf8, 0x34, 0xe1,
+	0xa6, 0x2a, 0xe7, 0x92, 0x1c, 0xf7, 0xfa, 0xb3, 0xd6, 0x20, 0x0c, 0x61, 0x13, 0x21, 0x9c, 0x3c,
+	0x2a, 0x82, 0xc8, 0x90, 0xba, 0x69, 0xb6, 0xe4, 0x42, 0xa2, 0xd9, 0xb6, 0xb1, 0x6e, 0x34, 0x65,
+	0x31, 0xd1, 0x6c, 0x62, 0xb3, 0x63, 0xc9, 0x90, 0x28, 0x3c, 0x6a, 0xed, 0x76, 0xad, 0xa9, 0xc9,
+	0x52, 0xc2, 0xa8, 0x77, 0x6d, 0xad, 0x2d, 0x1f, 0x6c, 0x94, 0x45, 0x52, 0x14, 0x93, 0x14, 0x9a,
+	0xd1, 0x79, 0x94, 0x4b, 0xe8, 0x13, 0x14, 0xe3, 0x14, 0xab, 0x22, 0x0e, 0xb7, 0x20, 0x52, 0xa9,
+	0xbc, 0x2e, 0x24, 0x56, 0xf9, 0xb4, 0x01, 0x10, 0x06, 0x2a, 0x47, 0x20, 0xa5, 0x76, 0x0b, 0x9d,
+	0xc2, 0xd1, 0x5d, 0x0d, 0x37, 0x74, 0xa3, 0xd6, 0xd2, 0xed, 0x6e, 0x6a, 0xae, 0x0a, 0x1c, 0xa7,
+	0x1d, 0xa6, 0x65, 0xeb, 0x26, 0x79, 0x26, 0x03, 0xde, 0xf2, 0x60, 0xed, 0x87, 0x8e, 0x8e, 0xb5,
+	0x06, 0x19, 0xf5, 0x8e, 0xc7, 0xd2, 0x6a, 0x36, 0xf1, 0x64, 0xcb, 0x7f, 0x73, 0xc0, 0x6b, 0x64,
+	0x53, 0xf7, 0x5e, 0x23, 0xb7, 0x20, 0xba, 0xc4, 0x17, 0x1f, 0x7f, 0x7c, 0x93, 0x9c, 0xed, 0x2c,
+	0x15, 0x8d, 0x66, 0xcb, 0x80, 0xd7, 0xe4, 0xf4, 0x32, 0x66, 0xff, 0xf7, 0xc5, 0xc1, 0x7f, 0xde,
+	0xc5, 0x91, 0xfb, 0xd8, 0xc5, 0xf1, 0x02, 0x62, 0xd2, 0xc2, 0xde, 0x29, 0xac, 0x5f, 0xec, 0xcc,
+	0xc6, 0x8b, 0xfd, 0xdf, 0x7b, 0x2c, 0x7f, 0x0f, 0x42, 0x0c, 0xed, 0x4d, 0x74, 0x0d, 0xb9, 0xd5,
+	0xa8, 0x69, 0xe3, 0xc7, 0x3b, 0x72, 0x35, 0x7f, 0x81, 0x63, 0xca, 0x35, 0xb9, 0xe1, 0xe3, 0x3e,
+	0xe8, 0xb2, 0xb5, 0xbb, 0x86, 0x5d, 0x7b, 0xee, 0x59, 0xd8, 0xb4, 0xcd, 0x2a, 0x59, 0x91, 0x2d,
+	0xe8, 0x46, 0xe6, 0xea, 0x2d, 0x38, 0x22, 0x1f, 0xa5, 0x6d, 0xc5, 0xba, 0x48, 0x3f, 0x21, 0x16,
+	0xb5, 0x2c, 0xee, 0x37, 0x8e, 0xfb, 0x3d, 0x93, 0x6d, 0x5a, 0xf5, 0x3f, 0x32, 0x17, 0xcd, 0x98,
+	0x67, 0xad, 0x32, 0x3f, 0xb9, 0x9e, 0xf7, 0xe0, 0x07, 0xbf, 0xf8, 0x94, 0x1f, 0xf6, 0x05, 0x26,
+	0x70, 0xf3, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x19, 0x59, 0x69, 0x31, 0x07, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/protobuf/type.proto b/vendor/google.golang.org/genproto/protobuf/type.proto
new file mode 100644
index 0000000..7d94312
--- /dev/null
+++ b/vendor/google.golang.org/genproto/protobuf/type.proto
@@ -0,0 +1,180 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+import "github.com/golang/protobuf/ptypes/any/any.proto"; // from google/protobuf/any.proto
+import "google.golang.org/genproto/protobuf/source_context.proto"; // from google/protobuf/source_context.proto
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "TypeProto";
+option java_multiple_files = true;
+option java_generate_equals_and_hash = true;
+option objc_class_prefix = "GPB";
+
+// A protocol buffer message type.
+message Type {
+  // The fully qualified message name.
+  string name = 1;
+  // The list of fields.
+  repeated Field fields = 2;
+  // The list of types appearing in `oneof` definitions in this type.
+  repeated string oneofs = 3;
+  // The protocol buffer options.
+  repeated Option options = 4;
+  // The source context.
+  SourceContext source_context = 5;
+  // The source syntax.
+  Syntax syntax = 6;
+}
+
+// A single field of a message type.
+message Field {
+  // Basic field types.
+  enum Kind {
+    // Field type unknown.
+    TYPE_UNKNOWN        = 0;
+    // Field type double.
+    TYPE_DOUBLE         = 1;
+    // Field type float.
+    TYPE_FLOAT          = 2;
+    // Field type int64.
+    TYPE_INT64          = 3;
+    // Field type uint64.
+    TYPE_UINT64         = 4;
+    // Field type int32.
+    TYPE_INT32          = 5;
+    // Field type fixed64.
+    TYPE_FIXED64        = 6;
+    // Field type fixed32.
+    TYPE_FIXED32        = 7;
+    // Field type bool.
+    TYPE_BOOL           = 8;
+    // Field type string.
+    TYPE_STRING         = 9;
+    // Field type group. Proto2 syntax only, and deprecated.
+    TYPE_GROUP          = 10;
+    // Field type message.
+    TYPE_MESSAGE        = 11;
+    // Field type bytes.
+    TYPE_BYTES          = 12;
+    // Field type uint32.
+    TYPE_UINT32         = 13;
+    // Field type enum.
+    TYPE_ENUM           = 14;
+    // Field type sfixed32.
+    TYPE_SFIXED32       = 15;
+    // Field type sfixed64.
+    TYPE_SFIXED64       = 16;
+    // Field type sint32.
+    TYPE_SINT32         = 17;
+    // Field type sint64.
+    TYPE_SINT64         = 18;
+  };
+
+  // Whether a field is optional, required, or repeated.
+  enum Cardinality {
+    // For fields with unknown cardinality.
+    CARDINALITY_UNKNOWN = 0;
+    // For optional fields.
+    CARDINALITY_OPTIONAL = 1;
+    // For required fields. Proto2 syntax only.
+    CARDINALITY_REQUIRED = 2;
+    // For repeated fields.
+    CARDINALITY_REPEATED = 3;
+  };
+
+  // The field type.
+  Kind kind = 1;
+  // The field cardinality.
+  Cardinality cardinality = 2;
+  // The field number.
+  int32 number = 3;
+  // The field name.
+  string name = 4;
+  // The field type URL, without the scheme, for message or enumeration
+  // types. Example: `"type.googleapis.com/google.protobuf.Timestamp"`.
+  string type_url = 6;
+  // The index of the field type in `Type.oneofs`, for message or enumeration
+  // types. The first type has index 1; zero means the type is not in the list.
+  int32 oneof_index = 7;
+  // Whether to use alternative packed wire representation.
+  bool packed = 8;
+  // The protocol buffer options.
+  repeated Option options = 9;
+  // The field JSON name.
+  string json_name = 10;
+  // The string value of the default value of this field. Proto2 syntax only.
+  string default_value = 11;
+}
+
+// Enum type definition.
+message Enum {
+  // Enum type name.
+  string name = 1;
+  // Enum value definitions.
+  repeated EnumValue enumvalue = 2;
+  // Protocol buffer options.
+  repeated Option options = 3;
+  // The source context.
+  SourceContext source_context = 4;
+  // The source syntax.
+  Syntax syntax = 5;
+}
+
+// Enum value definition.
+message EnumValue {
+  // Enum value name.
+  string name = 1;
+  // Enum value number.
+  int32 number = 2;
+  // Protocol buffer options.
+  repeated Option options = 3;
+}
+
+// A protocol buffer option, which can be attached to a message, field,
+// enumeration, etc.
+message Option {
+  // The option's name. For example, `"java_package"`.
+  string name = 1;
+  // The option's value. For example, `"com.google.protobuf"`.
+  Any value = 2;
+}
+
+// The syntax in which a protocol buffer element is defined.
+enum Syntax {
+  // Syntax `proto2`.
+  SYNTAX_PROTO2 = 0;
+  // Syntax `proto3`.
+  SYNTAX_PROTO3 = 1;
+}
diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml
new file mode 100644
index 0000000..0c1a96c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/.travis.yml
@@ -0,0 +1,19 @@
+language: go
+
+go:
+  - 1.5.4
+  - 1.6.3
+  - 1.7
+
+go_import_path: google.golang.org/grpc
+
+before_install:
+  - if [[ $TRAVIS_GO_VERSION != 1.5* ]]; then go get github.com/golang/lint/golint; fi
+  - go get -u golang.org/x/tools/cmd/goimports github.com/axw/gocov/gocov github.com/mattn/goveralls golang.org/x/tools/cmd/cover
+
+script:
+  - '! gofmt -s -d -l . 2>&1 | read'
+  - '! goimports -l . | read'
+  - 'if [[ $TRAVIS_GO_VERSION != 1.5* ]]; then ! golint ./... | grep -vE "(_string|\.pb)\.go:"; fi'
+  - '! go tool vet -all . 2>&1 | grep -vE "constant [0-9]+ not a string in call to Errorf" | grep -vF .pb.go:' # https://github.com/golang/protobuf/issues/214
+  - make test testrace
diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md
new file mode 100644
index 0000000..36cd6f7
--- /dev/null
+++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md
@@ -0,0 +1,46 @@
+# How to contribute
+
+We definitely welcome patches and contribution to grpc! Here are some guidelines
+and information about how to do so.
+
+## Sending patches
+
+### Getting started
+
+1. Check out the code:
+
+        $ go get google.golang.org/grpc
+        $ cd $GOPATH/src/google.golang.org/grpc
+
+1. Create a fork of the grpc-go repository.
+1. Add your fork as a remote:
+
+        $ git remote add fork git@github.com:$YOURGITHUBUSERNAME/grpc-go.git
+
+1. Make changes, commit them.
+1. Run the test suite:
+
+        $ make test
+
+1. Push your changes to your fork:
+
+        $ git push fork ...
+
+1. Open a pull request.
+
+## Legal requirements
+
+In order to protect both you and ourselves, you will need to sign the
+[Contributor License Agreement](https://cla.developers.google.com/clas).
+
+## Filing Issues
+When filing an issue, make sure to answer these five questions:
+
+1. What version of Go are you using (`go version`)?
+2. What operating system and processor architecture are you using?
+3. What did you do?
+4. What did you expect to see?
+5. What did you see instead?
+
+### Contributing code
+Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file.
diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE
new file mode 100644
index 0000000..f4988b4
--- /dev/null
+++ b/vendor/google.golang.org/grpc/LICENSE
@@ -0,0 +1,28 @@
+Copyright 2014, Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+    * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile
new file mode 100644
index 0000000..03bb01f
--- /dev/null
+++ b/vendor/google.golang.org/grpc/Makefile
@@ -0,0 +1,52 @@
+all: test testrace
+
+deps:
+	go get -d -v google.golang.org/grpc/...
+
+updatedeps:
+	go get -d -v -u -f google.golang.org/grpc/...
+
+testdeps:
+	go get -d -v -t google.golang.org/grpc/...
+
+updatetestdeps:
+	go get -d -v -t -u -f google.golang.org/grpc/...
+
+build: deps
+	go build google.golang.org/grpc/...
+
+proto:
+	@ if ! which protoc > /dev/null; then \
+		echo "error: protoc not installed" >&2; \
+		exit 1; \
+	fi
+	go get -u -v github.com/golang/protobuf/protoc-gen-go
+	# use $$dir as the root for all proto files in the same directory
+	for dir in $$(git ls-files '*.proto' | xargs -n1 dirname | uniq); do \
+		protoc -I $$dir --go_out=plugins=grpc:$$dir $$dir/*.proto; \
+	done
+
+test: testdeps
+	go test -v -cpu 1,4 google.golang.org/grpc/...
+
+testrace: testdeps
+	go test -v -race -cpu 1,4 google.golang.org/grpc/...
+
+clean:
+	go clean -i google.golang.org/grpc/...
+
+coverage: testdeps
+	./coverage.sh --coveralls
+
+.PHONY: \
+	all \
+	deps \
+	updatedeps \
+	testdeps \
+	updatetestdeps \
+	build \
+	proto \
+	test \
+	testrace \
+	clean \
+	coverage
diff --git a/vendor/google.golang.org/grpc/PATENTS b/vendor/google.golang.org/grpc/PATENTS
new file mode 100644
index 0000000..69b4795
--- /dev/null
+++ b/vendor/google.golang.org/grpc/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the gRPC project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of gRPC, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of gRPC.  This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation.  If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of gRPC or any code incorporated within this
+implementation of gRPC constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of gRPC
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md
new file mode 100644
index 0000000..660658b
--- /dev/null
+++ b/vendor/google.golang.org/grpc/README.md
@@ -0,0 +1,32 @@
+#gRPC-Go
+
+[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc)
+
+The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start](http://www.grpc.io/docs/) guide.
+
+Installation
+------------
+
+To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run:
+
+```
+$ go get google.golang.org/grpc
+```
+
+Prerequisites
+-------------
+
+This requires Go 1.5 or later .
+
+Constraints
+-----------
+The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants.
+
+Documentation
+-------------
+See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/).
+
+Status
+------
+GA
+
diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go
new file mode 100644
index 0000000..52f4f10
--- /dev/null
+++ b/vendor/google.golang.org/grpc/backoff.go
@@ -0,0 +1,80 @@
+package grpc
+
+import (
+	"math/rand"
+	"time"
+)
+
+// DefaultBackoffConfig uses values specified for backoff in
+// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+var (
+	DefaultBackoffConfig = BackoffConfig{
+		MaxDelay:  120 * time.Second,
+		baseDelay: 1.0 * time.Second,
+		factor:    1.6,
+		jitter:    0.2,
+	}
+)
+
+// backoffStrategy defines the methodology for backing off after a grpc
+// connection failure.
+//
+// This is unexported until the gRPC project decides whether or not to allow
+// alternative backoff strategies. Once a decision is made, this type and its
+// method may be exported.
+type backoffStrategy interface {
+	// backoff returns the amount of time to wait before the next retry given
+	// the number of consecutive failures.
+	backoff(retries int) time.Duration
+}
+
+// BackoffConfig defines the parameters for the default gRPC backoff strategy.
+type BackoffConfig struct {
+	// MaxDelay is the upper bound of backoff delay.
+	MaxDelay time.Duration
+
+	// TODO(stevvooe): The following fields are not exported, as allowing
+	// changes would violate the current gRPC specification for backoff. If
+	// gRPC decides to allow more interesting backoff strategies, these fields
+	// may be opened up in the future.
+
+	// baseDelay is the amount of time to wait before retrying after the first
+	// failure.
+	baseDelay time.Duration
+
+	// factor is applied to the backoff after each retry.
+	factor float64
+
+	// jitter provides a range to randomize backoff delays.
+	jitter float64
+}
+
+func setDefaults(bc *BackoffConfig) {
+	md := bc.MaxDelay
+	*bc = DefaultBackoffConfig
+
+	if md > 0 {
+		bc.MaxDelay = md
+	}
+}
+
+func (bc BackoffConfig) backoff(retries int) (t time.Duration) {
+	if retries == 0 {
+		return bc.baseDelay
+	}
+	backoff, max := float64(bc.baseDelay), float64(bc.MaxDelay)
+	for backoff < max && retries > 0 {
+		backoff *= bc.factor
+		retries--
+	}
+	if backoff > max {
+		backoff = max
+	}
+	// Randomize backoff delays so that if a cluster of requests start at
+	// the same time, they won't operate in lockstep.
+	backoff *= 1 + bc.jitter*(rand.Float64()*2-1)
+	if backoff < 0 {
+		return 0
+	}
+	return time.Duration(backoff)
+}
diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go
new file mode 100644
index 0000000..e217a20
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer.go
@@ -0,0 +1,399 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package grpc
+
+import (
+	"fmt"
+	"sync"
+
+	"golang.org/x/net/context"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/naming"
+)
+
+// Address represents a server the client connects to.
+// This is the EXPERIMENTAL API and may be changed or extended in the future.
+type Address struct {
+	// Addr is the server address on which a connection will be established.
+	Addr string
+	// Metadata is the information associated with Addr, which may be used
+	// to make load balancing decision.
+	Metadata interface{}
+}
+
+// BalancerConfig specifies the configurations for Balancer.
+type BalancerConfig struct {
+	// DialCreds is the transport credential the Balancer implementation can
+	// use to dial to a remote load balancer server. The Balancer implementations
+	// can ignore this if it does not need to talk to another party securely.
+	DialCreds credentials.TransportCredentials
+}
+
+// BalancerGetOptions configures a Get call.
+// This is the EXPERIMENTAL API and may be changed or extended in the future.
+type BalancerGetOptions struct {
+	// BlockingWait specifies whether Get should block when there is no
+	// connected address.
+	BlockingWait bool
+}
+
+// Balancer chooses network addresses for RPCs.
+// This is the EXPERIMENTAL API and may be changed or extended in the future.
+type Balancer interface {
+	// Start does the initialization work to bootstrap a Balancer. For example,
+	// this function may start the name resolution and watch the updates. It will
+	// be called when dialing.
+	Start(target string, config BalancerConfig) error
+	// Up informs the Balancer that gRPC has a connection to the server at
+	// addr. It returns down which is called once the connection to addr gets
+	// lost or closed.
+	// TODO: It is not clear how to construct and take advantage of the meaningful error
+	// parameter for down. Need realistic demands to guide.
+	Up(addr Address) (down func(error))
+	// Get gets the address of a server for the RPC corresponding to ctx.
+	// i) If it returns a connected address, gRPC internals issues the RPC on the
+	// connection to this address;
+	// ii) If it returns an address on which the connection is under construction
+	// (initiated by Notify(...)) but not connected, gRPC internals
+	//  * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or
+	//  Shutdown state;
+	//  or
+	//  * issues RPC on the connection otherwise.
+	// iii) If it returns an address on which the connection does not exist, gRPC
+	// internals treats it as an error and will fail the corresponding RPC.
+	//
+	// Therefore, the following is the recommended rule when writing a custom Balancer.
+	// If opts.BlockingWait is true, it should return a connected address or
+	// block if there is no connected address. It should respect the timeout or
+	// cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast
+	// RPCs), it should return an address it has notified via Notify(...) immediately
+	// instead of blocking.
+	//
+	// The function returns put which is called once the rpc has completed or failed.
+	// put can collect and report RPC stats to a remote load balancer.
+	//
+	// This function should only return the errors Balancer cannot recover by itself.
+	// gRPC internals will fail the RPC if an error is returned.
+	Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error)
+	// Notify returns a channel that is used by gRPC internals to watch the addresses
+	// gRPC needs to connect. The addresses might be from a name resolver or remote
+	// load balancer. gRPC internals will compare it with the existing connected
+	// addresses. If the address Balancer notified is not in the existing connected
+	// addresses, gRPC starts to connect the address. If an address in the existing
+	// connected addresses is not in the notification list, the corresponding connection
+	// is shutdown gracefully. Otherwise, there are no operations to take. Note that
+	// the Address slice must be the full list of the Addresses which should be connected.
+	// It is NOT delta.
+	Notify() <-chan []Address
+	// Close shuts down the balancer.
+	Close() error
+}
+
+// downErr implements net.Error. It is constructed by gRPC internals and passed to the down
+// call of Balancer.
+type downErr struct {
+	timeout   bool
+	temporary bool
+	desc      string
+}
+
+func (e downErr) Error() string   { return e.desc }
+func (e downErr) Timeout() bool   { return e.timeout }
+func (e downErr) Temporary() bool { return e.temporary }
+
+func downErrorf(timeout, temporary bool, format string, a ...interface{}) downErr {
+	return downErr{
+		timeout:   timeout,
+		temporary: temporary,
+		desc:      fmt.Sprintf(format, a...),
+	}
+}
+
+// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
+// the name resolution updates and updates the addresses available correspondingly.
+func RoundRobin(r naming.Resolver) Balancer {
+	return &roundRobin{r: r}
+}
+
+type addrInfo struct {
+	addr      Address
+	connected bool
+}
+
+type roundRobin struct {
+	r      naming.Resolver
+	w      naming.Watcher
+	addrs  []*addrInfo // all the addresses the client should potentially connect
+	mu     sync.Mutex
+	addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to.
+	next   int            // index of the next address to return for Get()
+	waitCh chan struct{}  // the channel to block when there is no connected address available
+	done   bool           // The Balancer is closed.
+}
+
+func (rr *roundRobin) watchAddrUpdates() error {
+	updates, err := rr.w.Next()
+	if err != nil {
+		grpclog.Printf("grpc: the naming watcher stops working due to %v.\n", err)
+		return err
+	}
+	rr.mu.Lock()
+	defer rr.mu.Unlock()
+	for _, update := range updates {
+		addr := Address{
+			Addr:     update.Addr,
+			Metadata: update.Metadata,
+		}
+		switch update.Op {
+		case naming.Add:
+			var exist bool
+			for _, v := range rr.addrs {
+				if addr == v.addr {
+					exist = true
+					grpclog.Println("grpc: The name resolver wanted to add an existing address: ", addr)
+					break
+				}
+			}
+			if exist {
+				continue
+			}
+			rr.addrs = append(rr.addrs, &addrInfo{addr: addr})
+		case naming.Delete:
+			for i, v := range rr.addrs {
+				if addr == v.addr {
+					copy(rr.addrs[i:], rr.addrs[i+1:])
+					rr.addrs = rr.addrs[:len(rr.addrs)-1]
+					break
+				}
+			}
+		default:
+			grpclog.Println("Unknown update.Op ", update.Op)
+		}
+	}
+	// Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified.
+	open := make([]Address, len(rr.addrs))
+	for i, v := range rr.addrs {
+		open[i] = v.addr
+	}
+	if rr.done {
+		return ErrClientConnClosing
+	}
+	rr.addrCh <- open
+	return nil
+}
+
+func (rr *roundRobin) Start(target string, config BalancerConfig) error {
+	rr.mu.Lock()
+	defer rr.mu.Unlock()
+	if rr.done {
+		return ErrClientConnClosing
+	}
+	if rr.r == nil {
+		// If there is no name resolver installed, it is not needed to
+		// do name resolution. In this case, target is added into rr.addrs
+		// as the only address available and rr.addrCh stays nil.
+		rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}})
+		return nil
+	}
+	w, err := rr.r.Resolve(target)
+	if err != nil {
+		return err
+	}
+	rr.w = w
+	rr.addrCh = make(chan []Address)
+	go func() {
+		for {
+			if err := rr.watchAddrUpdates(); err != nil {
+				return
+			}
+		}
+	}()
+	return nil
+}
+
+// Up sets the connected state of addr and sends notification if there are pending
+// Get() calls.
+func (rr *roundRobin) Up(addr Address) func(error) {
+	rr.mu.Lock()
+	defer rr.mu.Unlock()
+	var cnt int
+	for _, a := range rr.addrs {
+		if a.addr == addr {
+			if a.connected {
+				return nil
+			}
+			a.connected = true
+		}
+		if a.connected {
+			cnt++
+		}
+	}
+	// addr is only one which is connected. Notify the Get() callers who are blocking.
+	if cnt == 1 && rr.waitCh != nil {
+		close(rr.waitCh)
+		rr.waitCh = nil
+	}
+	return func(err error) {
+		rr.down(addr, err)
+	}
+}
+
+// down unsets the connected state of addr.
+func (rr *roundRobin) down(addr Address, err error) {
+	rr.mu.Lock()
+	defer rr.mu.Unlock()
+	for _, a := range rr.addrs {
+		if addr == a.addr {
+			a.connected = false
+			break
+		}
+	}
+}
+
+// Get returns the next addr in the rotation.
+func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
+	var ch chan struct{}
+	rr.mu.Lock()
+	if rr.done {
+		rr.mu.Unlock()
+		err = ErrClientConnClosing
+		return
+	}
+
+	if len(rr.addrs) > 0 {
+		if rr.next >= len(rr.addrs) {
+			rr.next = 0
+		}
+		next := rr.next
+		for {
+			a := rr.addrs[next]
+			next = (next + 1) % len(rr.addrs)
+			if a.connected {
+				addr = a.addr
+				rr.next = next
+				rr.mu.Unlock()
+				return
+			}
+			if next == rr.next {
+				// Has iterated all the possible address but none is connected.
+				break
+			}
+		}
+	}
+	if !opts.BlockingWait {
+		if len(rr.addrs) == 0 {
+			rr.mu.Unlock()
+			err = fmt.Errorf("there is no address available")
+			return
+		}
+		// Returns the next addr on rr.addrs for failfast RPCs.
+		addr = rr.addrs[rr.next].addr
+		rr.next++
+		rr.mu.Unlock()
+		return
+	}
+	// Wait on rr.waitCh for non-failfast RPCs.
+	if rr.waitCh == nil {
+		ch = make(chan struct{})
+		rr.waitCh = ch
+	} else {
+		ch = rr.waitCh
+	}
+	rr.mu.Unlock()
+	for {
+		select {
+		case <-ctx.Done():
+			err = ctx.Err()
+			return
+		case <-ch:
+			rr.mu.Lock()
+			if rr.done {
+				rr.mu.Unlock()
+				err = ErrClientConnClosing
+				return
+			}
+
+			if len(rr.addrs) > 0 {
+				if rr.next >= len(rr.addrs) {
+					rr.next = 0
+				}
+				next := rr.next
+				for {
+					a := rr.addrs[next]
+					next = (next + 1) % len(rr.addrs)
+					if a.connected {
+						addr = a.addr
+						rr.next = next
+						rr.mu.Unlock()
+						return
+					}
+					if next == rr.next {
+						// Has iterated all the possible address but none is connected.
+						break
+					}
+				}
+			}
+			// The newly added addr got removed by Down() again.
+			if rr.waitCh == nil {
+				ch = make(chan struct{})
+				rr.waitCh = ch
+			} else {
+				ch = rr.waitCh
+			}
+			rr.mu.Unlock()
+		}
+	}
+}
+
+func (rr *roundRobin) Notify() <-chan []Address {
+	return rr.addrCh
+}
+
+func (rr *roundRobin) Close() error {
+	rr.mu.Lock()
+	defer rr.mu.Unlock()
+	rr.done = true
+	if rr.w != nil {
+		rr.w.Close()
+	}
+	if rr.waitCh != nil {
+		close(rr.waitCh)
+		rr.waitCh = nil
+	}
+	if rr.addrCh != nil {
+		close(rr.addrCh)
+	}
+	return nil
+}
diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go
new file mode 100644
index 0000000..788b3d9
--- /dev/null
+++ b/vendor/google.golang.org/grpc/call.go
@@ -0,0 +1,233 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package grpc
+
+import (
+	"bytes"
+	"io"
+	"math"
+	"time"
+
+	"golang.org/x/net/context"
+	"golang.org/x/net/trace"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/transport"
+)
+
+// recvResponse receives and parses an RPC response.
+// On error, it returns the error and indicates whether the call should be retried.
+//
+// TODO(zhaoq): Check whether the received message sequence is valid.
+func recvResponse(dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error {
+	// Try to acquire header metadata from the server if there is any.
+	var err error
+	defer func() {
+		if err != nil {
+			if _, ok := err.(transport.ConnectionError); !ok {
+				t.CloseStream(stream, err)
+			}
+		}
+	}()
+	c.headerMD, err = stream.Header()
+	if err != nil {
+		return err
+	}
+	p := &parser{r: stream}
+	for {
+		if err = recv(p, dopts.codec, stream, dopts.dc, reply, math.MaxInt32); err != nil {
+			if err == io.EOF {
+				break
+			}
+			return err
+		}
+	}
+	c.trailerMD = stream.Trailer()
+	return nil
+}
+
+// sendRequest writes out various information of an RPC such as Context and Message.
+func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) {
+	stream, err := t.NewStream(ctx, callHdr)
+	if err != nil {
+		return nil, err
+	}
+	defer func() {
+		if err != nil {
+			// If err is connection error, t will be closed, no need to close stream here.
+			if _, ok := err.(transport.ConnectionError); !ok {
+				t.CloseStream(stream, err)
+			}
+		}
+	}()
+	var cbuf *bytes.Buffer
+	if compressor != nil {
+		cbuf = new(bytes.Buffer)
+	}
+	outBuf, err := encode(codec, args, compressor, cbuf)
+	if err != nil {
+		return nil, Errorf(codes.Internal, "grpc: %v", err)
+	}
+	err = t.Write(stream, outBuf, opts)
+	// t.NewStream(...) could lead to an early rejection of the RPC (e.g., the service/method
+	// does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following
+	// recvResponse to get the final status.
+	if err != nil && err != io.EOF {
+		return nil, err
+	}
+	// Sent successfully.
+	return stream, nil
+}
+
+// Invoke sends the RPC request on the wire and returns after response is received.
+// Invoke is called by generated code. Also users can call Invoke directly when it
+// is really needed in their use cases.
+func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error {
+	if cc.dopts.unaryInt != nil {
+		return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...)
+	}
+	return invoke(ctx, method, args, reply, cc, opts...)
+}
+
+func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) {
+	c := defaultCallInfo
+	for _, o := range opts {
+		if err := o.before(&c); err != nil {
+			return toRPCErr(err)
+		}
+	}
+	defer func() {
+		for _, o := range opts {
+			o.after(&c)
+		}
+	}()
+	if EnableTracing {
+		c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
+		defer c.traceInfo.tr.Finish()
+		c.traceInfo.firstLine.client = true
+		if deadline, ok := ctx.Deadline(); ok {
+			c.traceInfo.firstLine.deadline = deadline.Sub(time.Now())
+		}
+		c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false)
+		// TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set.
+		defer func() {
+			if err != nil {
+				c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+				c.traceInfo.tr.SetError()
+			}
+		}()
+	}
+	topts := &transport.Options{
+		Last:  true,
+		Delay: false,
+	}
+	for {
+		var (
+			err    error
+			t      transport.ClientTransport
+			stream *transport.Stream
+			// Record the put handler from Balancer.Get(...). It is called once the
+			// RPC has completed or failed.
+			put func()
+		)
+		// TODO(zhaoq): Need a formal spec of fail-fast.
+		callHdr := &transport.CallHdr{
+			Host:   cc.authority,
+			Method: method,
+		}
+		if cc.dopts.cp != nil {
+			callHdr.SendCompress = cc.dopts.cp.Type()
+		}
+		gopts := BalancerGetOptions{
+			BlockingWait: !c.failFast,
+		}
+		t, put, err = cc.getTransport(ctx, gopts)
+		if err != nil {
+			// TODO(zhaoq): Probably revisit the error handling.
+			if _, ok := err.(*rpcError); ok {
+				return err
+			}
+			if err == errConnClosing || err == errConnUnavailable {
+				if c.failFast {
+					return Errorf(codes.Unavailable, "%v", err)
+				}
+				continue
+			}
+			// All the other errors are treated as Internal errors.
+			return Errorf(codes.Internal, "%v", err)
+		}
+		if c.traceInfo.tr != nil {
+			c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
+		}
+		stream, err = sendRequest(ctx, cc.dopts.codec, cc.dopts.cp, callHdr, t, args, topts)
+		if err != nil {
+			if put != nil {
+				put()
+				put = nil
+			}
+			// Retry a non-failfast RPC when
+			// i) there is a connection error; or
+			// ii) the server started to drain before this RPC was initiated.
+			if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain {
+				if c.failFast {
+					return toRPCErr(err)
+				}
+				continue
+			}
+			return toRPCErr(err)
+		}
+		err = recvResponse(cc.dopts, t, &c, stream, reply)
+		if err != nil {
+			if put != nil {
+				put()
+				put = nil
+			}
+			if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain {
+				if c.failFast {
+					return toRPCErr(err)
+				}
+				continue
+			}
+			return toRPCErr(err)
+		}
+		if c.traceInfo.tr != nil {
+			c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true)
+		}
+		t.CloseStream(stream, nil)
+		if put != nil {
+			put()
+			put = nil
+		}
+		return Errorf(stream.StatusCode(), "%s", stream.StatusDesc())
+	}
+}
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
new file mode 100644
index 0000000..11dce44
--- /dev/null
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -0,0 +1,884 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package grpc
+
+import (
+	"errors"
+	"fmt"
+	"net"
+	"strings"
+	"sync"
+	"time"
+
+	"golang.org/x/net/context"
+	"golang.org/x/net/trace"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/transport"
+)
+
+var (
+	// ErrClientConnClosing indicates that the operation is illegal because
+	// the ClientConn is closing.
+	ErrClientConnClosing = errors.New("grpc: the client connection is closing")
+	// ErrClientConnTimeout indicates that the ClientConn cannot establish the
+	// underlying connections within the specified timeout.
+	ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
+
+	// errNoTransportSecurity indicates that there is no transport security
+	// being set for ClientConn. Users should either set one or explicitly
+	// call WithInsecure DialOption to disable security.
+	errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)")
+	// errTransportCredentialsMissing indicates that users want to transmit security
+	// information (e.g., oauth2 token) which requires secure connection on an insecure
+	// connection.
+	errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)")
+	// errCredentialsConflict indicates that grpc.WithTransportCredentials()
+	// and grpc.WithInsecure() are both called for a connection.
+	errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)")
+	// errNetworkIO indicates that the connection is down due to some network I/O error.
+	errNetworkIO = errors.New("grpc: failed with network I/O error")
+	// errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs.
+	errConnDrain = errors.New("grpc: the connection is drained")
+	// errConnClosing indicates that the connection is closing.
+	errConnClosing = errors.New("grpc: the connection is closing")
+	// errConnUnavailable indicates that the connection is unavailable.
+	errConnUnavailable = errors.New("grpc: the connection is unavailable")
+	errNoAddr          = errors.New("grpc: there is no address available to dial")
+	// minimum time to give a connection to complete
+	minConnectTimeout = 20 * time.Second
+)
+
+// dialOptions configure a Dial call. dialOptions are set by the DialOption
+// values passed to Dial.
+type dialOptions struct {
+	unaryInt  UnaryClientInterceptor
+	streamInt StreamClientInterceptor
+	codec     Codec
+	cp        Compressor
+	dc        Decompressor
+	bs        backoffStrategy
+	balancer  Balancer
+	block     bool
+	insecure  bool
+	timeout   time.Duration
+	copts     transport.ConnectOptions
+}
+
+// DialOption configures how we set up the connection.
+type DialOption func(*dialOptions)
+
+// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling.
+func WithCodec(c Codec) DialOption {
+	return func(o *dialOptions) {
+		o.codec = c
+	}
+}
+
+// WithCompressor returns a DialOption which sets a CompressorGenerator for generating message
+// compressor.
+func WithCompressor(cp Compressor) DialOption {
+	return func(o *dialOptions) {
+		o.cp = cp
+	}
+}
+
+// WithDecompressor returns a DialOption which sets a DecompressorGenerator for generating
+// message decompressor.
+func WithDecompressor(dc Decompressor) DialOption {
+	return func(o *dialOptions) {
+		o.dc = dc
+	}
+}
+
+// WithBalancer returns a DialOption which sets a load balancer.
+func WithBalancer(b Balancer) DialOption {
+	return func(o *dialOptions) {
+		o.balancer = b
+	}
+}
+
+// WithBackoffMaxDelay configures the dialer to use the provided maximum delay
+// when backing off after failed connection attempts.
+func WithBackoffMaxDelay(md time.Duration) DialOption {
+	return WithBackoffConfig(BackoffConfig{MaxDelay: md})
+}
+
+// WithBackoffConfig configures the dialer to use the provided backoff
+// parameters after connection failures.
+//
+// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up
+// for use.
+func WithBackoffConfig(b BackoffConfig) DialOption {
+	// Set defaults to ensure that provided BackoffConfig is valid and
+	// unexported fields get default values.
+	setDefaults(&b)
+	return withBackoff(b)
+}
+
+// withBackoff sets the backoff strategy used for retries after a
+// failed connection attempt.
+//
+// This can be exported if arbitrary backoff strategies are allowed by gRPC.
+func withBackoff(bs backoffStrategy) DialOption {
+	return func(o *dialOptions) {
+		o.bs = bs
+	}
+}
+
+// WithBlock returns a DialOption which makes caller of Dial blocks until the underlying
+// connection is up. Without this, Dial returns immediately and connecting the server
+// happens in background.
+func WithBlock() DialOption {
+	return func(o *dialOptions) {
+		o.block = true
+	}
+}
+
+// WithInsecure returns a DialOption which disables transport security for this ClientConn.
+// Note that transport security is required unless WithInsecure is set.
+func WithInsecure() DialOption {
+	return func(o *dialOptions) {
+		o.insecure = true
+	}
+}
+
+// WithTransportCredentials returns a DialOption which configures a
+// connection level security credentials (e.g., TLS/SSL).
+func WithTransportCredentials(creds credentials.TransportCredentials) DialOption {
+	return func(o *dialOptions) {
+		o.copts.TransportCredentials = creds
+	}
+}
+
+// WithPerRPCCredentials returns a DialOption which sets
+// credentials which will place auth state on each outbound RPC.
+func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
+	return func(o *dialOptions) {
+		o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds)
+	}
+}
+
+// WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn
+// initially. This is valid if and only if WithBlock() is present.
+func WithTimeout(d time.Duration) DialOption {
+	return func(o *dialOptions) {
+		o.timeout = d
+	}
+}
+
+// WithDialer returns a DialOption that specifies a function to use for dialing network addresses.
+func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
+	return func(o *dialOptions) {
+		o.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) {
+			if deadline, ok := ctx.Deadline(); ok {
+				return f(addr, deadline.Sub(time.Now()))
+			}
+			return f(addr, 0)
+		}
+	}
+}
+
+// WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs.
+func WithUserAgent(s string) DialOption {
+	return func(o *dialOptions) {
+		o.copts.UserAgent = s
+	}
+}
+
+// WithUnaryInterceptor returns a DialOption that specifies the interceptor for unary RPCs.
+func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption {
+	return func(o *dialOptions) {
+		o.unaryInt = f
+	}
+}
+
+// WithStreamInterceptor returns a DialOption that specifies the interceptor for streaming RPCs.
+func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
+	return func(o *dialOptions) {
+		o.streamInt = f
+	}
+}
+
+// Dial creates a client connection to the given target.
+func Dial(target string, opts ...DialOption) (*ClientConn, error) {
+	return DialContext(context.Background(), target, opts...)
+}
+
+// DialContext creates a client connection to the given target. ctx can be used to
+// cancel or expire the pending connecting. Once this function returns, the
+// cancellation and expiration of ctx will be noop. Users should call ClientConn.Close
+// to terminate all the pending operations after this function returns.
+// This is the EXPERIMENTAL API.
+func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
+	cc := &ClientConn{
+		target: target,
+		conns:  make(map[Address]*addrConn),
+	}
+	cc.ctx, cc.cancel = context.WithCancel(context.Background())
+	defer func() {
+		select {
+		case <-ctx.Done():
+			conn, err = nil, ctx.Err()
+		default:
+		}
+
+		if err != nil {
+			cc.Close()
+		}
+	}()
+
+	for _, opt := range opts {
+		opt(&cc.dopts)
+	}
+
+	// Set defaults.
+	if cc.dopts.codec == nil {
+		cc.dopts.codec = protoCodec{}
+	}
+	if cc.dopts.bs == nil {
+		cc.dopts.bs = DefaultBackoffConfig
+	}
+	creds := cc.dopts.copts.TransportCredentials
+	if creds != nil && creds.Info().ServerName != "" {
+		cc.authority = creds.Info().ServerName
+	} else {
+		colonPos := strings.LastIndex(target, ":")
+		if colonPos == -1 {
+			colonPos = len(target)
+		}
+		cc.authority = target[:colonPos]
+	}
+	var ok bool
+	waitC := make(chan error, 1)
+	go func() {
+		var addrs []Address
+		if cc.dopts.balancer == nil {
+			// Connect to target directly if balancer is nil.
+			addrs = append(addrs, Address{Addr: target})
+		} else {
+			var credsClone credentials.TransportCredentials
+			if creds != nil {
+				credsClone = creds.Clone()
+			}
+			config := BalancerConfig{
+				DialCreds: credsClone,
+			}
+			if err := cc.dopts.balancer.Start(target, config); err != nil {
+				waitC <- err
+				return
+			}
+			ch := cc.dopts.balancer.Notify()
+			if ch == nil {
+				// There is no name resolver installed.
+				addrs = append(addrs, Address{Addr: target})
+			} else {
+				addrs, ok = <-ch
+				if !ok || len(addrs) == 0 {
+					waitC <- errNoAddr
+					return
+				}
+			}
+		}
+		for _, a := range addrs {
+			if err := cc.resetAddrConn(a, false, nil); err != nil {
+				waitC <- err
+				return
+			}
+		}
+		close(waitC)
+	}()
+	var timeoutCh <-chan time.Time
+	if cc.dopts.timeout > 0 {
+		timeoutCh = time.After(cc.dopts.timeout)
+	}
+	select {
+	case <-ctx.Done():
+		return nil, ctx.Err()
+	case err := <-waitC:
+		if err != nil {
+			return nil, err
+		}
+	case <-timeoutCh:
+		return nil, ErrClientConnTimeout
+	}
+	// If balancer is nil or balancer.Notify() is nil, ok will be false here.
+	// The lbWatcher goroutine will not be created.
+	if ok {
+		go cc.lbWatcher()
+	}
+	return cc, nil
+}
+
+// ConnectivityState indicates the state of a client connection.
+type ConnectivityState int
+
+const (
+	// Idle indicates the ClientConn is idle.
+	Idle ConnectivityState = iota
+	// Connecting indicates the ClienConn is connecting.
+	Connecting
+	// Ready indicates the ClientConn is ready for work.
+	Ready
+	// TransientFailure indicates the ClientConn has seen a failure but expects to recover.
+	TransientFailure
+	// Shutdown indicates the ClientConn has started shutting down.
+	Shutdown
+)
+
+func (s ConnectivityState) String() string {
+	switch s {
+	case Idle:
+		return "IDLE"
+	case Connecting:
+		return "CONNECTING"
+	case Ready:
+		return "READY"
+	case TransientFailure:
+		return "TRANSIENT_FAILURE"
+	case Shutdown:
+		return "SHUTDOWN"
+	default:
+		panic(fmt.Sprintf("unknown connectivity state: %d", s))
+	}
+}
+
+// ClientConn represents a client connection to an RPC server.
+type ClientConn struct {
+	ctx    context.Context
+	cancel context.CancelFunc
+
+	target    string
+	authority string
+	dopts     dialOptions
+
+	mu    sync.RWMutex
+	conns map[Address]*addrConn
+}
+
+func (cc *ClientConn) lbWatcher() {
+	for addrs := range cc.dopts.balancer.Notify() {
+		var (
+			add []Address   // Addresses need to setup connections.
+			del []*addrConn // Connections need to tear down.
+		)
+		cc.mu.Lock()
+		for _, a := range addrs {
+			if _, ok := cc.conns[a]; !ok {
+				add = append(add, a)
+			}
+		}
+		for k, c := range cc.conns {
+			var keep bool
+			for _, a := range addrs {
+				if k == a {
+					keep = true
+					break
+				}
+			}
+			if !keep {
+				del = append(del, c)
+				delete(cc.conns, c.addr)
+			}
+		}
+		cc.mu.Unlock()
+		for _, a := range add {
+			cc.resetAddrConn(a, true, nil)
+		}
+		for _, c := range del {
+			c.tearDown(errConnDrain)
+		}
+	}
+}
+
+// resetAddrConn creates an addrConn for addr and adds it to cc.conns.
+// If there is an old addrConn for addr, it will be torn down, using tearDownErr as the reason.
+// If tearDownErr is nil, errConnDrain will be used instead.
+func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr error) error {
+	ac := &addrConn{
+		cc:    cc,
+		addr:  addr,
+		dopts: cc.dopts,
+	}
+	ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
+	ac.stateCV = sync.NewCond(&ac.mu)
+	if EnableTracing {
+		ac.events = trace.NewEventLog("grpc.ClientConn", ac.addr.Addr)
+	}
+	if !ac.dopts.insecure {
+		if ac.dopts.copts.TransportCredentials == nil {
+			return errNoTransportSecurity
+		}
+	} else {
+		if ac.dopts.copts.TransportCredentials != nil {
+			return errCredentialsConflict
+		}
+		for _, cd := range ac.dopts.copts.PerRPCCredentials {
+			if cd.RequireTransportSecurity() {
+				return errTransportCredentialsMissing
+			}
+		}
+	}
+	// Track ac in cc. This needs to be done before any getTransport(...) is called.
+	cc.mu.Lock()
+	if cc.conns == nil {
+		cc.mu.Unlock()
+		return ErrClientConnClosing
+	}
+	stale := cc.conns[ac.addr]
+	cc.conns[ac.addr] = ac
+	cc.mu.Unlock()
+	if stale != nil {
+		// There is an addrConn alive on ac.addr already. This could be due to
+		// 1) a buggy Balancer notifies duplicated Addresses;
+		// 2) goaway was received, a new ac will replace the old ac.
+		//    The old ac should be deleted from cc.conns, but the
+		//    underlying transport should drain rather than close.
+		if tearDownErr == nil {
+			// tearDownErr is nil if resetAddrConn is called by
+			// 1) Dial
+			// 2) lbWatcher
+			// In both cases, the stale ac should drain, not close.
+			stale.tearDown(errConnDrain)
+		} else {
+			stale.tearDown(tearDownErr)
+		}
+	}
+	// skipWait may overwrite the decision in ac.dopts.block.
+	if ac.dopts.block && !skipWait {
+		if err := ac.resetTransport(false); err != nil {
+			if err != errConnClosing {
+				// Tear down ac and delete it from cc.conns.
+				cc.mu.Lock()
+				delete(cc.conns, ac.addr)
+				cc.mu.Unlock()
+				ac.tearDown(err)
+			}
+			if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() {
+				return e.Origin()
+			}
+			return err
+		}
+		// Start to monitor the error status of transport.
+		go ac.transportMonitor()
+	} else {
+		// Start a goroutine connecting to the server asynchronously.
+		go func() {
+			if err := ac.resetTransport(false); err != nil {
+				grpclog.Printf("Failed to dial %s: %v; please retry.", ac.addr.Addr, err)
+				if err != errConnClosing {
+					// Keep this ac in cc.conns, to get the reason it's torn down.
+					ac.tearDown(err)
+				}
+				return
+			}
+			ac.transportMonitor()
+		}()
+	}
+	return nil
+}
+
+func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) (transport.ClientTransport, func(), error) {
+	var (
+		ac  *addrConn
+		ok  bool
+		put func()
+	)
+	if cc.dopts.balancer == nil {
+		// If balancer is nil, there should be only one addrConn available.
+		cc.mu.RLock()
+		if cc.conns == nil {
+			cc.mu.RUnlock()
+			return nil, nil, toRPCErr(ErrClientConnClosing)
+		}
+		for _, ac = range cc.conns {
+			// Break after the first iteration to get the first addrConn.
+			ok = true
+			break
+		}
+		cc.mu.RUnlock()
+	} else {
+		var (
+			addr Address
+			err  error
+		)
+		addr, put, err = cc.dopts.balancer.Get(ctx, opts)
+		if err != nil {
+			return nil, nil, toRPCErr(err)
+		}
+		cc.mu.RLock()
+		if cc.conns == nil {
+			cc.mu.RUnlock()
+			return nil, nil, toRPCErr(ErrClientConnClosing)
+		}
+		ac, ok = cc.conns[addr]
+		cc.mu.RUnlock()
+	}
+	if !ok {
+		if put != nil {
+			put()
+		}
+		return nil, nil, errConnClosing
+	}
+	t, err := ac.wait(ctx, cc.dopts.balancer != nil, !opts.BlockingWait)
+	if err != nil {
+		if put != nil {
+			put()
+		}
+		return nil, nil, err
+	}
+	return t, put, nil
+}
+
+// Close tears down the ClientConn and all underlying connections.
+func (cc *ClientConn) Close() error {
+	cc.cancel()
+
+	cc.mu.Lock()
+	if cc.conns == nil {
+		cc.mu.Unlock()
+		return ErrClientConnClosing
+	}
+	conns := cc.conns
+	cc.conns = nil
+	cc.mu.Unlock()
+	if cc.dopts.balancer != nil {
+		cc.dopts.balancer.Close()
+	}
+	for _, ac := range conns {
+		ac.tearDown(ErrClientConnClosing)
+	}
+	return nil
+}
+
+// addrConn is a network connection to a given address.
+type addrConn struct {
+	ctx    context.Context
+	cancel context.CancelFunc
+
+	cc     *ClientConn
+	addr   Address
+	dopts  dialOptions
+	events trace.EventLog
+
+	mu      sync.Mutex
+	state   ConnectivityState
+	stateCV *sync.Cond
+	down    func(error) // the handler called when a connection is down.
+	// ready is closed and becomes nil when a new transport is up or failed
+	// due to timeout.
+	ready     chan struct{}
+	transport transport.ClientTransport
+
+	// The reason this addrConn is torn down.
+	tearDownErr error
+}
+
+// printf records an event in ac's event log, unless ac has been closed.
+// REQUIRES ac.mu is held.
+func (ac *addrConn) printf(format string, a ...interface{}) {
+	if ac.events != nil {
+		ac.events.Printf(format, a...)
+	}
+}
+
+// errorf records an error in ac's event log, unless ac has been closed.
+// REQUIRES ac.mu is held.
+func (ac *addrConn) errorf(format string, a ...interface{}) {
+	if ac.events != nil {
+		ac.events.Errorf(format, a...)
+	}
+}
+
+// getState returns the connectivity state of the Conn
+func (ac *addrConn) getState() ConnectivityState {
+	ac.mu.Lock()
+	defer ac.mu.Unlock()
+	return ac.state
+}
+
+// waitForStateChange blocks until the state changes to something other than the sourceState.
+func (ac *addrConn) waitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) {
+	ac.mu.Lock()
+	defer ac.mu.Unlock()
+	if sourceState != ac.state {
+		return ac.state, nil
+	}
+	done := make(chan struct{})
+	var err error
+	go func() {
+		select {
+		case <-ctx.Done():
+			ac.mu.Lock()
+			err = ctx.Err()
+			ac.stateCV.Broadcast()
+			ac.mu.Unlock()
+		case <-done:
+		}
+	}()
+	defer close(done)
+	for sourceState == ac.state {
+		ac.stateCV.Wait()
+		if err != nil {
+			return ac.state, err
+		}
+	}
+	return ac.state, nil
+}
+
+func (ac *addrConn) resetTransport(closeTransport bool) error {
+	for retries := 0; ; retries++ {
+		ac.mu.Lock()
+		ac.printf("connecting")
+		if ac.state == Shutdown {
+			// ac.tearDown(...) has been invoked.
+			ac.mu.Unlock()
+			return errConnClosing
+		}
+		if ac.down != nil {
+			ac.down(downErrorf(false, true, "%v", errNetworkIO))
+			ac.down = nil
+		}
+		ac.state = Connecting
+		ac.stateCV.Broadcast()
+		t := ac.transport
+		ac.mu.Unlock()
+		if closeTransport && t != nil {
+			t.Close()
+		}
+		sleepTime := ac.dopts.bs.backoff(retries)
+		timeout := minConnectTimeout
+		if timeout < sleepTime {
+			timeout = sleepTime
+		}
+		ctx, cancel := context.WithTimeout(ac.ctx, timeout)
+		connectTime := time.Now()
+		newTransport, err := transport.NewClientTransport(ctx, ac.addr.Addr, ac.dopts.copts)
+		if err != nil {
+			cancel()
+
+			if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() {
+				return err
+			}
+			grpclog.Printf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %v", err, ac.addr)
+			ac.mu.Lock()
+			if ac.state == Shutdown {
+				// ac.tearDown(...) has been invoked.
+				ac.mu.Unlock()
+				return errConnClosing
+			}
+			ac.errorf("transient failure: %v", err)
+			ac.state = TransientFailure
+			ac.stateCV.Broadcast()
+			if ac.ready != nil {
+				close(ac.ready)
+				ac.ready = nil
+			}
+			ac.mu.Unlock()
+			closeTransport = false
+			select {
+			case <-time.After(sleepTime - time.Since(connectTime)):
+			case <-ac.ctx.Done():
+				return ac.ctx.Err()
+			}
+			continue
+		}
+		ac.mu.Lock()
+		ac.printf("ready")
+		if ac.state == Shutdown {
+			// ac.tearDown(...) has been invoked.
+			ac.mu.Unlock()
+			newTransport.Close()
+			return errConnClosing
+		}
+		ac.state = Ready
+		ac.stateCV.Broadcast()
+		ac.transport = newTransport
+		if ac.ready != nil {
+			close(ac.ready)
+			ac.ready = nil
+		}
+		if ac.cc.dopts.balancer != nil {
+			ac.down = ac.cc.dopts.balancer.Up(ac.addr)
+		}
+		ac.mu.Unlock()
+		return nil
+	}
+}
+
+// Run in a goroutine to track the error in transport and create the
+// new transport if an error happens. It returns when the channel is closing.
+func (ac *addrConn) transportMonitor() {
+	for {
+		ac.mu.Lock()
+		t := ac.transport
+		ac.mu.Unlock()
+		select {
+		// This is needed to detect the teardown when
+		// the addrConn is idle (i.e., no RPC in flight).
+		case <-ac.ctx.Done():
+			select {
+			case <-t.Error():
+				t.Close()
+			default:
+			}
+			return
+		case <-t.GoAway():
+			// If GoAway happens without any network I/O error, ac is closed without shutting down the
+			// underlying transport (the transport will be closed when all the pending RPCs finished or
+			// failed.).
+			// If GoAway and some network I/O error happen concurrently, ac and its underlying transport
+			// are closed.
+			// In both cases, a new ac is created.
+			select {
+			case <-t.Error():
+				ac.cc.resetAddrConn(ac.addr, true, errNetworkIO)
+			default:
+				ac.cc.resetAddrConn(ac.addr, true, errConnDrain)
+			}
+			return
+		case <-t.Error():
+			select {
+			case <-ac.ctx.Done():
+				t.Close()
+				return
+			case <-t.GoAway():
+				ac.cc.resetAddrConn(ac.addr, true, errNetworkIO)
+				return
+			default:
+			}
+			ac.mu.Lock()
+			if ac.state == Shutdown {
+				// ac has been shutdown.
+				ac.mu.Unlock()
+				return
+			}
+			ac.state = TransientFailure
+			ac.stateCV.Broadcast()
+			ac.mu.Unlock()
+			if err := ac.resetTransport(true); err != nil {
+				ac.mu.Lock()
+				ac.printf("transport exiting: %v", err)
+				ac.mu.Unlock()
+				grpclog.Printf("grpc: addrConn.transportMonitor exits due to: %v", err)
+				if err != errConnClosing {
+					// Keep this ac in cc.conns, to get the reason it's torn down.
+					ac.tearDown(err)
+				}
+				return
+			}
+		}
+	}
+}
+
+// wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or
+// iv) transport is in TransientFailure and there's no balancer/failfast is true.
+func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (transport.ClientTransport, error) {
+	for {
+		ac.mu.Lock()
+		switch {
+		case ac.state == Shutdown:
+			if failfast || !hasBalancer {
+				// RPC is failfast or balancer is nil. This RPC should fail with ac.tearDownErr.
+				err := ac.tearDownErr
+				ac.mu.Unlock()
+				return nil, err
+			}
+			ac.mu.Unlock()
+			return nil, errConnClosing
+		case ac.state == Ready:
+			ct := ac.transport
+			ac.mu.Unlock()
+			return ct, nil
+		case ac.state == TransientFailure:
+			if failfast || hasBalancer {
+				ac.mu.Unlock()
+				return nil, errConnUnavailable
+			}
+		}
+		ready := ac.ready
+		if ready == nil {
+			ready = make(chan struct{})
+			ac.ready = ready
+		}
+		ac.mu.Unlock()
+		select {
+		case <-ctx.Done():
+			return nil, toRPCErr(ctx.Err())
+		// Wait until the new transport is ready or failed.
+		case <-ready:
+		}
+	}
+}
+
+// tearDown starts to tear down the addrConn.
+// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in
+// some edge cases (e.g., the caller opens and closes many addrConn's in a
+// tight loop.
+// tearDown doesn't remove ac from ac.cc.conns.
+func (ac *addrConn) tearDown(err error) {
+	ac.cancel()
+
+	ac.mu.Lock()
+	defer ac.mu.Unlock()
+	if ac.down != nil {
+		ac.down(downErrorf(false, false, "%v", err))
+		ac.down = nil
+	}
+	if err == errConnDrain && ac.transport != nil {
+		// GracefulClose(...) may be executed multiple times when
+		// i) receiving multiple GoAway frames from the server; or
+		// ii) there are concurrent name resolver/Balancer triggered
+		// address removal and GoAway.
+		ac.transport.GracefulClose()
+	}
+	if ac.state == Shutdown {
+		return
+	}
+	ac.state = Shutdown
+	ac.tearDownErr = err
+	ac.stateCV.Broadcast()
+	if ac.events != nil {
+		ac.events.Finish()
+		ac.events = nil
+	}
+	if ac.ready != nil {
+		close(ac.ready)
+		ac.ready = nil
+	}
+	if ac.transport != nil && err != errConnDrain {
+		ac.transport.Close()
+	}
+	return
+}
diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh
new file mode 100755
index 0000000..b009488
--- /dev/null
+++ b/vendor/google.golang.org/grpc/codegen.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+# This script serves as an example to demonstrate how to generate the gRPC-Go
+# interface and the related messages from .proto file.
+#
+# It assumes the installation of i) Google proto buffer compiler at
+# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen
+# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have
+# not, please install them first.
+#
+# We recommend running this script at $GOPATH/src.
+#
+# If this is not what you need, feel free to make your own scripts. Again, this
+# script is for demonstration purpose.
+#
+proto=$1
+protoc --go_out=plugins=grpc:. $proto
diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go
new file mode 100644
index 0000000..e6762d0
--- /dev/null
+++ b/vendor/google.golang.org/grpc/codes/code_string.go
@@ -0,0 +1,16 @@
+// generated by stringer -type=Code; DO NOT EDIT
+
+package codes
+
+import "fmt"
+
+const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated"
+
+var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192}
+
+func (i Code) String() string {
+	if i+1 >= Code(len(_Code_index)) {
+		return fmt.Sprintf("Code(%d)", i)
+	}
+	return _Code_name[_Code_index[i]:_Code_index[i+1]]
+}
diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go
new file mode 100644
index 0000000..37c5b86
--- /dev/null
+++ b/vendor/google.golang.org/grpc/codes/codes.go
@@ -0,0 +1,159 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+// Package codes defines the canonical error codes used by gRPC. It is
+// consistent across various languages.
+package codes
+
+// A Code is an unsigned 32-bit error code as defined in the gRPC spec.
+type Code uint32
+
+//go:generate stringer -type=Code
+
+const (
+	// OK is returned on success.
+	OK Code = 0
+
+	// Canceled indicates the operation was cancelled (typically by the caller).
+	Canceled Code = 1
+
+	// Unknown error.  An example of where this error may be returned is
+	// if a Status value received from another address space belongs to
+	// an error-space that is not known in this address space.  Also
+	// errors raised by APIs that do not return enough error information
+	// may be converted to this error.
+	Unknown Code = 2
+
+	// InvalidArgument indicates client specified an invalid argument.
+	// Note that this differs from FailedPrecondition. It indicates arguments
+	// that are problematic regardless of the state of the system
+	// (e.g., a malformed file name).
+	InvalidArgument Code = 3
+
+	// DeadlineExceeded means operation expired before completion.
+	// For operations that change the state of the system, this error may be
+	// returned even if the operation has completed successfully. For
+	// example, a successful response from a server could have been delayed
+	// long enough for the deadline to expire.
+	DeadlineExceeded Code = 4
+
+	// NotFound means some requested entity (e.g., file or directory) was
+	// not found.
+	NotFound Code = 5
+
+	// AlreadyExists means an attempt to create an entity failed because one
+	// already exists.
+	AlreadyExists Code = 6
+
+	// PermissionDenied indicates the caller does not have permission to
+	// execute the specified operation. It must not be used for rejections
+	// caused by exhausting some resource (use ResourceExhausted
+	// instead for those errors).  It must not be
+	// used if the caller cannot be identified (use Unauthenticated
+	// instead for those errors).
+	PermissionDenied Code = 7
+
+	// Unauthenticated indicates the request does not have valid
+	// authentication credentials for the operation.
+	Unauthenticated Code = 16
+
+	// ResourceExhausted indicates some resource has been exhausted, perhaps
+	// a per-user quota, or perhaps the entire file system is out of space.
+	ResourceExhausted Code = 8
+
+	// FailedPrecondition indicates operation was rejected because the
+	// system is not in a state required for the operation's execution.
+	// For example, directory to be deleted may be non-empty, an rmdir
+	// operation is applied to a non-directory, etc.
+	//
+	// A litmus test that may help a service implementor in deciding
+	// between FailedPrecondition, Aborted, and Unavailable:
+	//  (a) Use Unavailable if the client can retry just the failing call.
+	//  (b) Use Aborted if the client should retry at a higher-level
+	//      (e.g., restarting a read-modify-write sequence).
+	//  (c) Use FailedPrecondition if the client should not retry until
+	//      the system state has been explicitly fixed.  E.g., if an "rmdir"
+	//      fails because the directory is non-empty, FailedPrecondition
+	//      should be returned since the client should not retry unless
+	//      they have first fixed up the directory by deleting files from it.
+	//  (d) Use FailedPrecondition if the client performs conditional
+	//      REST Get/Update/Delete on a resource and the resource on the
+	//      server does not match the condition. E.g., conflicting
+	//      read-modify-write on the same resource.
+	FailedPrecondition Code = 9
+
+	// Aborted indicates the operation was aborted, typically due to a
+	// concurrency issue like sequencer check failures, transaction aborts,
+	// etc.
+	//
+	// See litmus test above for deciding between FailedPrecondition,
+	// Aborted, and Unavailable.
+	Aborted Code = 10
+
+	// OutOfRange means operation was attempted past the valid range.
+	// E.g., seeking or reading past end of file.
+	//
+	// Unlike InvalidArgument, this error indicates a problem that may
+	// be fixed if the system state changes. For example, a 32-bit file
+	// system will generate InvalidArgument if asked to read at an
+	// offset that is not in the range [0,2^32-1], but it will generate
+	// OutOfRange if asked to read from an offset past the current
+	// file size.
+	//
+	// There is a fair bit of overlap between FailedPrecondition and
+	// OutOfRange.  We recommend using OutOfRange (the more specific
+	// error) when it applies so that callers who are iterating through
+	// a space can easily look for an OutOfRange error to detect when
+	// they are done.
+	OutOfRange Code = 11
+
+	// Unimplemented indicates operation is not implemented or not
+	// supported/enabled in this service.
+	Unimplemented Code = 12
+
+	// Internal errors.  Means some invariants expected by underlying
+	// system has been broken.  If you see one of these errors,
+	// something is very broken.
+	Internal Code = 13
+
+	// Unavailable indicates the service is currently unavailable.
+	// This is a most likely a transient condition and may be corrected
+	// by retrying with a backoff.
+	//
+	// See litmus test above for deciding between FailedPrecondition,
+	// Aborted, and Unavailable.
+	Unavailable Code = 14
+
+	// DataLoss indicates unrecoverable data loss or corruption.
+	DataLoss Code = 15
+)
diff --git a/vendor/google.golang.org/grpc/coverage.sh b/vendor/google.golang.org/grpc/coverage.sh
new file mode 100755
index 0000000..1202353
--- /dev/null
+++ b/vendor/google.golang.org/grpc/coverage.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+set -e
+
+workdir=.cover
+profile="$workdir/cover.out"
+mode=set
+end2endtest="google.golang.org/grpc/test"
+
+generate_cover_data() {
+    rm -rf "$workdir"
+    mkdir "$workdir"
+
+    for pkg in "$@"; do
+        if [ $pkg == "google.golang.org/grpc" -o $pkg == "google.golang.org/grpc/transport" -o $pkg == "google.golang.org/grpc/metadata" -o $pkg == "google.golang.org/grpc/credentials" ]
+            then
+                f="$workdir/$(echo $pkg | tr / -)"
+                go test -covermode="$mode" -coverprofile="$f.cover" "$pkg"
+                go test -covermode="$mode" -coverpkg "$pkg" -coverprofile="$f.e2e.cover" "$end2endtest"
+        fi
+    done
+
+    echo "mode: $mode" >"$profile"
+    grep -h -v "^mode:" "$workdir"/*.cover >>"$profile"
+}
+
+show_cover_report() {
+    go tool cover -${1}="$profile"
+}
+
+push_to_coveralls() {
+    goveralls -coverprofile="$profile"
+}
+
+generate_cover_data $(go list ./...)
+show_cover_report func
+case "$1" in
+"")
+    ;;
+--html)
+    show_cover_report html ;;
+--coveralls)
+    push_to_coveralls ;;
+*)
+    echo >&2 "error: invalid option: $1" ;;
+esac
+rm -rf "$workdir"
diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go
new file mode 100644
index 0000000..4154f1e
--- /dev/null
+++ b/vendor/google.golang.org/grpc/credentials/credentials.go
@@ -0,0 +1,232 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+// Package credentials implements various credentials supported by gRPC library,
+// which encapsulate all the state needed by a client to authenticate with a
+// server and make various assertions, e.g., about the client's identity, role,
+// or whether it is authorized to make a particular call.
+package credentials
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"net"
+	"strings"
+
+	"golang.org/x/net/context"
+)
+
+var (
+	// alpnProtoStr are the specified application level protocols for gRPC.
+	alpnProtoStr = []string{"h2"}
+)
+
+// PerRPCCredentials defines the common interface for the credentials which need to
+// attach security information to every RPC (e.g., oauth2).
+type PerRPCCredentials interface {
+	// GetRequestMetadata gets the current request metadata, refreshing
+	// tokens if required. This should be called by the transport layer on
+	// each request, and the data should be populated in headers or other
+	// context. uri is the URI of the entry point for the request. When
+	// supported by the underlying implementation, ctx can be used for
+	// timeout and cancellation.
+	// TODO(zhaoq): Define the set of the qualified keys instead of leaving
+	// it as an arbitrary string.
+	GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
+	// RequireTransportSecurity indicates whether the credentials requires
+	// transport security.
+	RequireTransportSecurity() bool
+}
+
+// ProtocolInfo provides information regarding the gRPC wire protocol version,
+// security protocol, security protocol version in use, server name, etc.
+type ProtocolInfo struct {
+	// ProtocolVersion is the gRPC wire protocol version.
+	ProtocolVersion string
+	// SecurityProtocol is the security protocol in use.
+	SecurityProtocol string
+	// SecurityVersion is the security protocol version.
+	SecurityVersion string
+	// ServerName is the user-configured server name.
+	ServerName string
+}
+
+// AuthInfo defines the common interface for the auth information the users are interested in.
+type AuthInfo interface {
+	AuthType() string
+}
+
+var (
+	// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC
+	// and the caller should not close rawConn.
+	ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC")
+)
+
+// TransportCredentials defines the common interface for all the live gRPC wire
+// protocols and supported transport security protocols (e.g., TLS, SSL).
+type TransportCredentials interface {
+	// ClientHandshake does the authentication handshake specified by the corresponding
+	// authentication protocol on rawConn for clients. It returns the authenticated
+	// connection and the corresponding auth information about the connection.
+	// Implementations must use the provided context to implement timely cancellation.
+	ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
+	// ServerHandshake does the authentication handshake for servers. It returns
+	// the authenticated connection and the corresponding auth information about
+	// the connection.
+	ServerHandshake(net.Conn) (net.Conn, AuthInfo, error)
+	// Info provides the ProtocolInfo of this TransportCredentials.
+	Info() ProtocolInfo
+	// Clone makes a copy of this TransportCredentials.
+	Clone() TransportCredentials
+	// OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server.
+	// gRPC internals also use it to override the virtual hosting name if it is set.
+	// It must be called before dialing. Currently, this is only used by grpclb.
+	OverrideServerName(string) error
+}
+
+// TLSInfo contains the auth information for a TLS authenticated connection.
+// It implements the AuthInfo interface.
+type TLSInfo struct {
+	State tls.ConnectionState
+}
+
+// AuthType returns the type of TLSInfo as a string.
+func (t TLSInfo) AuthType() string {
+	return "tls"
+}
+
+// tlsCreds is the credentials required for authenticating a connection using TLS.
+type tlsCreds struct {
+	// TLS configuration
+	config *tls.Config
+}
+
+func (c tlsCreds) Info() ProtocolInfo {
+	return ProtocolInfo{
+		SecurityProtocol: "tls",
+		SecurityVersion:  "1.2",
+		ServerName:       c.config.ServerName,
+	}
+}
+
+func (c *tlsCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
+	// use local cfg to avoid clobbering ServerName if using multiple endpoints
+	cfg := cloneTLSConfig(c.config)
+	if cfg.ServerName == "" {
+		colonPos := strings.LastIndex(addr, ":")
+		if colonPos == -1 {
+			colonPos = len(addr)
+		}
+		cfg.ServerName = addr[:colonPos]
+	}
+	conn := tls.Client(rawConn, cfg)
+	errChannel := make(chan error, 1)
+	go func() {
+		errChannel <- conn.Handshake()
+	}()
+	select {
+	case err := <-errChannel:
+		if err != nil {
+			return nil, nil, err
+		}
+	case <-ctx.Done():
+		return nil, nil, ctx.Err()
+	}
+	// TODO(zhaoq): Omit the auth info for client now. It is more for
+	// information than anything else.
+	return conn, nil, nil
+}
+
+func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
+	conn := tls.Server(rawConn, c.config)
+	if err := conn.Handshake(); err != nil {
+		return nil, nil, err
+	}
+	return conn, TLSInfo{conn.ConnectionState()}, nil
+}
+
+func (c *tlsCreds) Clone() TransportCredentials {
+	return NewTLS(c.config)
+}
+
+func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
+	c.config.ServerName = serverNameOverride
+	return nil
+}
+
+// NewTLS uses c to construct a TransportCredentials based on TLS.
+func NewTLS(c *tls.Config) TransportCredentials {
+	tc := &tlsCreds{cloneTLSConfig(c)}
+	tc.config.NextProtos = alpnProtoStr
+	return tc
+}
+
+// NewClientTLSFromCert constructs a TLS from the input certificate for client.
+// serverNameOverride is for testing only. If set to a non empty string,
+// it will override the virtual host name of authority (e.g. :authority header field) in requests.
+func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
+	return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
+}
+
+// NewClientTLSFromFile constructs a TLS from the input certificate file for client.
+// serverNameOverride is for testing only. If set to a non empty string,
+// it will override the virtual host name of authority (e.g. :authority header field) in requests.
+func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
+	b, err := ioutil.ReadFile(certFile)
+	if err != nil {
+		return nil, err
+	}
+	cp := x509.NewCertPool()
+	if !cp.AppendCertsFromPEM(b) {
+		return nil, fmt.Errorf("credentials: failed to append certificates")
+	}
+	return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil
+}
+
+// NewServerTLSFromCert constructs a TLS from the input certificate for server.
+func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
+	return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
+}
+
+// NewServerTLSFromFile constructs a TLS from the input certificate file and key
+// file for server.
+func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
+	cert, err := tls.LoadX509KeyPair(certFile, keyFile)
+	if err != nil {
+		return nil, err
+	}
+	return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
+}
diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go
new file mode 100644
index 0000000..9647b9e
--- /dev/null
+++ b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go
@@ -0,0 +1,76 @@
+// +build go1.7
+
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package credentials
+
+import (
+	"crypto/tls"
+)
+
+// cloneTLSConfig returns a shallow clone of the exported
+// fields of cfg, ignoring the unexported sync.Once, which
+// contains a mutex and must not be copied.
+//
+// If cfg is nil, a new zero tls.Config is returned.
+//
+// TODO replace this function with official clone function.
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+	if cfg == nil {
+		return &tls.Config{}
+	}
+	return &tls.Config{
+		Rand:                        cfg.Rand,
+		Time:                        cfg.Time,
+		Certificates:                cfg.Certificates,
+		NameToCertificate:           cfg.NameToCertificate,
+		GetCertificate:              cfg.GetCertificate,
+		RootCAs:                     cfg.RootCAs,
+		NextProtos:                  cfg.NextProtos,
+		ServerName:                  cfg.ServerName,
+		ClientAuth:                  cfg.ClientAuth,
+		ClientCAs:                   cfg.ClientCAs,
+		InsecureSkipVerify:          cfg.InsecureSkipVerify,
+		CipherSuites:                cfg.CipherSuites,
+		PreferServerCipherSuites:    cfg.PreferServerCipherSuites,
+		SessionTicketsDisabled:      cfg.SessionTicketsDisabled,
+		SessionTicketKey:            cfg.SessionTicketKey,
+		ClientSessionCache:          cfg.ClientSessionCache,
+		MinVersion:                  cfg.MinVersion,
+		MaxVersion:                  cfg.MaxVersion,
+		CurvePreferences:            cfg.CurvePreferences,
+		DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled,
+		Renegotiation:               cfg.Renegotiation,
+	}
+}
diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go
new file mode 100644
index 0000000..09b8d12
--- /dev/null
+++ b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go
@@ -0,0 +1,74 @@
+// +build !go1.7
+
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package credentials
+
+import (
+	"crypto/tls"
+)
+
+// cloneTLSConfig returns a shallow clone of the exported
+// fields of cfg, ignoring the unexported sync.Once, which
+// contains a mutex and must not be copied.
+//
+// If cfg is nil, a new zero tls.Config is returned.
+//
+// TODO replace this function with official clone function.
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+	if cfg == nil {
+		return &tls.Config{}
+	}
+	return &tls.Config{
+		Rand:                     cfg.Rand,
+		Time:                     cfg.Time,
+		Certificates:             cfg.Certificates,
+		NameToCertificate:        cfg.NameToCertificate,
+		GetCertificate:           cfg.GetCertificate,
+		RootCAs:                  cfg.RootCAs,
+		NextProtos:               cfg.NextProtos,
+		ServerName:               cfg.ServerName,
+		ClientAuth:               cfg.ClientAuth,
+		ClientCAs:                cfg.ClientCAs,
+		InsecureSkipVerify:       cfg.InsecureSkipVerify,
+		CipherSuites:             cfg.CipherSuites,
+		PreferServerCipherSuites: cfg.PreferServerCipherSuites,
+		SessionTicketsDisabled:   cfg.SessionTicketsDisabled,
+		SessionTicketKey:         cfg.SessionTicketKey,
+		ClientSessionCache:       cfg.ClientSessionCache,
+		MinVersion:               cfg.MinVersion,
+		MaxVersion:               cfg.MaxVersion,
+		CurvePreferences:         cfg.CurvePreferences,
+	}
+}
diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go
new file mode 100644
index 0000000..8e68c4d
--- /dev/null
+++ b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go
@@ -0,0 +1,180 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+// Package oauth implements gRPC credentials using OAuth.
+package oauth
+
+import (
+	"fmt"
+	"io/ioutil"
+
+	"golang.org/x/net/context"
+	"golang.org/x/oauth2"
+	"golang.org/x/oauth2/google"
+	"golang.org/x/oauth2/jwt"
+	"google.golang.org/grpc/credentials"
+)
+
+// TokenSource supplies PerRPCCredentials from an oauth2.TokenSource.
+type TokenSource struct {
+	oauth2.TokenSource
+}
+
+// GetRequestMetadata gets the request metadata as a map from a TokenSource.
+func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
+	token, err := ts.Token()
+	if err != nil {
+		return nil, err
+	}
+	return map[string]string{
+		"authorization": token.Type() + " " + token.AccessToken,
+	}, nil
+}
+
+// RequireTransportSecurity indicates whether the credentails requires transport security.
+func (ts TokenSource) RequireTransportSecurity() bool {
+	return true
+}
+
+type jwtAccess struct {
+	jsonKey []byte
+}
+
+// NewJWTAccessFromFile creates PerRPCCredentials from the given keyFile.
+func NewJWTAccessFromFile(keyFile string) (credentials.PerRPCCredentials, error) {
+	jsonKey, err := ioutil.ReadFile(keyFile)
+	if err != nil {
+		return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err)
+	}
+	return NewJWTAccessFromKey(jsonKey)
+}
+
+// NewJWTAccessFromKey creates PerRPCCredentials from the given jsonKey.
+func NewJWTAccessFromKey(jsonKey []byte) (credentials.PerRPCCredentials, error) {
+	return jwtAccess{jsonKey}, nil
+}
+
+func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
+	ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, uri[0])
+	if err != nil {
+		return nil, err
+	}
+	token, err := ts.Token()
+	if err != nil {
+		return nil, err
+	}
+	return map[string]string{
+		"authorization": token.TokenType + " " + token.AccessToken,
+	}, nil
+}
+
+func (j jwtAccess) RequireTransportSecurity() bool {
+	return true
+}
+
+// oauthAccess supplies PerRPCCredentials from a given token.
+type oauthAccess struct {
+	token oauth2.Token
+}
+
+// NewOauthAccess constructs the PerRPCCredentials using a given token.
+func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials {
+	return oauthAccess{token: *token}
+}
+
+func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
+	return map[string]string{
+		"authorization": oa.token.TokenType + " " + oa.token.AccessToken,
+	}, nil
+}
+
+func (oa oauthAccess) RequireTransportSecurity() bool {
+	return true
+}
+
+// NewComputeEngine constructs the PerRPCCredentials that fetches access tokens from
+// Google Compute Engine (GCE)'s metadata server. It is only valid to use this
+// if your program is running on a GCE instance.
+// TODO(dsymonds): Deprecate and remove this.
+func NewComputeEngine() credentials.PerRPCCredentials {
+	return TokenSource{google.ComputeTokenSource("")}
+}
+
+// serviceAccount represents PerRPCCredentials via JWT signing key.
+type serviceAccount struct {
+	config *jwt.Config
+}
+
+func (s serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
+	token, err := s.config.TokenSource(ctx).Token()
+	if err != nil {
+		return nil, err
+	}
+	return map[string]string{
+		"authorization": token.TokenType + " " + token.AccessToken,
+	}, nil
+}
+
+func (s serviceAccount) RequireTransportSecurity() bool {
+	return true
+}
+
+// NewServiceAccountFromKey constructs the PerRPCCredentials using the JSON key slice
+// from a Google Developers service account.
+func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.PerRPCCredentials, error) {
+	config, err := google.JWTConfigFromJSON(jsonKey, scope...)
+	if err != nil {
+		return nil, err
+	}
+	return serviceAccount{config: config}, nil
+}
+
+// NewServiceAccountFromFile constructs the PerRPCCredentials using the JSON key file
+// of a Google Developers service account.
+func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.PerRPCCredentials, error) {
+	jsonKey, err := ioutil.ReadFile(keyFile)
+	if err != nil {
+		return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err)
+	}
+	return NewServiceAccountFromKey(jsonKey, scope...)
+}
+
+// NewApplicationDefault returns "Application Default Credentials". For more
+// detail, see https://developers.google.com/accounts/docs/application-default-credentials.
+func NewApplicationDefault(ctx context.Context, scope ...string) (credentials.PerRPCCredentials, error) {
+	t, err := google.DefaultTokenSource(ctx, scope...)
+	if err != nil {
+		return nil, err
+	}
+	return TokenSource{t}, nil
+}
diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go
new file mode 100644
index 0000000..b4c0e74
--- /dev/null
+++ b/vendor/google.golang.org/grpc/doc.go
@@ -0,0 +1,6 @@
+/*
+Package grpc implements an RPC system called gRPC.
+
+See www.grpc.io for more information about gRPC.
+*/
+package grpc
diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go
new file mode 100644
index 0000000..2cc09be
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclog/logger.go
@@ -0,0 +1,93 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+Package grpclog defines logging for grpc.
+*/
+package grpclog
+
+import (
+	"log"
+	"os"
+)
+
+// Use golang's standard logger by default.
+// Access is not mutex-protected: do not modify except in init()
+// functions.
+var logger Logger = log.New(os.Stderr, "", log.LstdFlags)
+
+// Logger mimics golang's standard Logger as an interface.
+type Logger interface {
+	Fatal(args ...interface{})
+	Fatalf(format string, args ...interface{})
+	Fatalln(args ...interface{})
+	Print(args ...interface{})
+	Printf(format string, args ...interface{})
+	Println(args ...interface{})
+}
+
+// SetLogger sets the logger that is used in grpc. Call only from
+// init() functions.
+func SetLogger(l Logger) {
+	logger = l
+}
+
+// Fatal is equivalent to Print() followed by a call to os.Exit() with a non-zero exit code.
+func Fatal(args ...interface{}) {
+	logger.Fatal(args...)
+}
+
+// Fatalf is equivalent to Printf() followed by a call to os.Exit() with a non-zero exit code.
+func Fatalf(format string, args ...interface{}) {
+	logger.Fatalf(format, args...)
+}
+
+// Fatalln is equivalent to Println() followed by a call to os.Exit()) with a non-zero exit code.
+func Fatalln(args ...interface{}) {
+	logger.Fatalln(args...)
+}
+
+// Print prints to the logger. Arguments are handled in the manner of fmt.Print.
+func Print(args ...interface{}) {
+	logger.Print(args...)
+}
+
+// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
+func Printf(format string, args ...interface{}) {
+	logger.Printf(format, args...)
+}
+
+// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
+func Println(args ...interface{}) {
+	logger.Println(args...)
+}
diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go
new file mode 100644
index 0000000..8d932ef
--- /dev/null
+++ b/vendor/google.golang.org/grpc/interceptor.go
@@ -0,0 +1,90 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package grpc
+
+import (
+	"golang.org/x/net/context"
+)
+
+// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs.
+type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error
+
+// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. inovker is the handler to complete the RPC
+// and it is the responsibility of the interceptor to call it.
+// This is the EXPERIMENTAL API.
+type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error
+
+// Streamer is called by StreamClientInterceptor to create a ClientStream.
+type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error)
+
+// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O
+// operations. streamer is the handlder to create a ClientStream and it is the responsibility of the interceptor to call it.
+// This is the EXPERIMENTAL API.
+type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error)
+
+// UnaryServerInfo consists of various information about a unary RPC on
+// server side. All per-rpc information may be mutated by the interceptor.
+type UnaryServerInfo struct {
+	// Server is the service implementation the user provides. This is read-only.
+	Server interface{}
+	// FullMethod is the full RPC method string, i.e., /package.service/method.
+	FullMethod string
+}
+
+// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal
+// execution of a unary RPC.
+type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error)
+
+// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info
+// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper
+// of the service method implementation. It is the responsibility of the interceptor to invoke handler
+// to complete the RPC.
+type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error)
+
+// StreamServerInfo consists of various information about a streaming RPC on
+// server side. All per-rpc information may be mutated by the interceptor.
+type StreamServerInfo struct {
+	// FullMethod is the full RPC method string, i.e., /package.service/method.
+	FullMethod string
+	// IsClientStream indicates whether the RPC is a client streaming RPC.
+	IsClientStream bool
+	// IsServerStream indicates whether the RPC is a server streaming RPC.
+	IsServerStream bool
+}
+
+// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server.
+// info contains all the information of this RPC the interceptor can operate on. And handler is the
+// service method implementation. It is the responsibility of the interceptor to invoke handler to
+// complete the RPC.
+type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
new file mode 100644
index 0000000..5489143
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+// Package internal contains gRPC-internal code for testing, to avoid polluting
+// the godoc of the top-level grpc package.
+package internal
+
+// TestingCloseConns closes all existing transports but keeps
+// grpcServer.lis accepting new connections.
+//
+// The provided grpcServer must be of type *grpc.Server. It is untyped
+// for circular dependency reasons.
+var TestingCloseConns func(grpcServer interface{})
+
+// TestingUseHandlerImpl enables the http.Handler-based server implementation.
+// It must be called before Serve and requires TLS credentials.
+//
+// The provided grpcServer must be of type *grpc.Server. It is untyped
+// for circular dependency reasons.
+var TestingUseHandlerImpl func(grpcServer interface{})
diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go
new file mode 100644
index 0000000..cb29bf9
--- /dev/null
+++ b/vendor/google.golang.org/grpc/metadata/metadata.go
@@ -0,0 +1,147 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+// Package metadata define the structure of the metadata supported by gRPC library.
+package metadata
+
+import (
+	"encoding/base64"
+	"fmt"
+	"strings"
+
+	"golang.org/x/net/context"
+)
+
+const (
+	binHdrSuffix = "-bin"
+)
+
+// encodeKeyValue encodes key and value qualified for transmission via gRPC.
+// Transmitting binary headers violates HTTP/2 spec.
+// TODO(zhaoq): Maybe check if k is ASCII also.
+func encodeKeyValue(k, v string) (string, string) {
+	k = strings.ToLower(k)
+	if strings.HasSuffix(k, binHdrSuffix) {
+		val := base64.StdEncoding.EncodeToString([]byte(v))
+		v = string(val)
+	}
+	return k, v
+}
+
+// DecodeKeyValue returns the original key and value corresponding to the
+// encoded data in k, v.
+// If k is a binary header and v contains comma, v is split on comma before decoded,
+// and the decoded v will be joined with comma before returned.
+func DecodeKeyValue(k, v string) (string, string, error) {
+	if !strings.HasSuffix(k, binHdrSuffix) {
+		return k, v, nil
+	}
+	vvs := strings.Split(v, ",")
+	for i, vv := range vvs {
+		val, err := base64.StdEncoding.DecodeString(vv)
+		if err != nil {
+			return "", "", err
+		}
+		vvs[i] = string(val)
+	}
+	return k, strings.Join(vvs, ","), nil
+}
+
+// MD is a mapping from metadata keys to values. Users should use the following
+// two convenience functions New and Pairs to generate MD.
+type MD map[string][]string
+
+// New creates a MD from given key-value map.
+func New(m map[string]string) MD {
+	md := MD{}
+	for k, v := range m {
+		key, val := encodeKeyValue(k, v)
+		md[key] = append(md[key], val)
+	}
+	return md
+}
+
+// Pairs returns an MD formed by the mapping of key, value ...
+// Pairs panics if len(kv) is odd.
+func Pairs(kv ...string) MD {
+	if len(kv)%2 == 1 {
+		panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
+	}
+	md := MD{}
+	var k string
+	for i, s := range kv {
+		if i%2 == 0 {
+			k = s
+			continue
+		}
+		key, val := encodeKeyValue(k, s)
+		md[key] = append(md[key], val)
+	}
+	return md
+}
+
+// Len returns the number of items in md.
+func (md MD) Len() int {
+	return len(md)
+}
+
+// Copy returns a copy of md.
+func (md MD) Copy() MD {
+	return Join(md)
+}
+
+// Join joins any number of MDs into a single MD.
+// The order of values for each key is determined by the order in which
+// the MDs containing those values are presented to Join.
+func Join(mds ...MD) MD {
+	out := MD{}
+	for _, md := range mds {
+		for k, v := range md {
+			out[k] = append(out[k], v...)
+		}
+	}
+	return out
+}
+
+type mdKey struct{}
+
+// NewContext creates a new context with md attached.
+func NewContext(ctx context.Context, md MD) context.Context {
+	return context.WithValue(ctx, mdKey{}, md)
+}
+
+// FromContext returns the MD in ctx if it exists.
+func FromContext(ctx context.Context) (md MD, ok bool) {
+	md, ok = ctx.Value(mdKey{}).(MD)
+	return
+}
diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go
new file mode 100644
index 0000000..c2e0871
--- /dev/null
+++ b/vendor/google.golang.org/grpc/naming/naming.go
@@ -0,0 +1,74 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+// Package naming defines the naming API and related data structures for gRPC.
+// The interface is EXPERIMENTAL and may be suject to change.
+package naming
+
+// Operation defines the corresponding operations for a name resolution change.
+type Operation uint8
+
+const (
+	// Add indicates a new address is added.
+	Add Operation = iota
+	// Delete indicates an exisiting address is deleted.
+	Delete
+)
+
+// Update defines a name resolution update. Notice that it is not valid having both
+// empty string Addr and nil Metadata in an Update.
+type Update struct {
+	// Op indicates the operation of the update.
+	Op Operation
+	// Addr is the updated address. It is empty string if there is no address update.
+	Addr string
+	// Metadata is the updated metadata. It is nil if there is no metadata update.
+	// Metadata is not required for a custom naming implementation.
+	Metadata interface{}
+}
+
+// Resolver creates a Watcher for a target to track its resolution changes.
+type Resolver interface {
+	// Resolve creates a Watcher for target.
+	Resolve(target string) (Watcher, error)
+}
+
+// Watcher watches for the updates on the specified target.
+type Watcher interface {
+	// Next blocks until an update or error happens. It may return one or more
+	// updates. The first call should get the full set of the results. It should
+	// return an error if and only if Watcher cannot recover.
+	Next() ([]*Update, error)
+	// Close closes the Watcher.
+	Close()
+}
diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go
new file mode 100644
index 0000000..bfa6205
--- /dev/null
+++ b/vendor/google.golang.org/grpc/peer/peer.go
@@ -0,0 +1,65 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+// Package peer defines various peer information associated with RPCs and
+// corresponding utils.
+package peer
+
+import (
+	"net"
+
+	"golang.org/x/net/context"
+	"google.golang.org/grpc/credentials"
+)
+
+// Peer contains the information of the peer for an RPC.
+type Peer struct {
+	// Addr is the peer address.
+	Addr net.Addr
+	// AuthInfo is the authentication information of the transport.
+	// It is nil if there is no transport security being used.
+	AuthInfo credentials.AuthInfo
+}
+
+type peerKey struct{}
+
+// NewContext creates a new context with peer information attached.
+func NewContext(ctx context.Context, p *Peer) context.Context {
+	return context.WithValue(ctx, peerKey{}, p)
+}
+
+// FromContext returns the peer information in ctx if it exists.
+func FromContext(ctx context.Context) (p *Peer, ok bool) {
+	p, ok = ctx.Value(peerKey{}).(*Peer)
+	return
+}
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
new file mode 100644
index 0000000..6b60095
--- /dev/null
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -0,0 +1,457 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package grpc
+
+import (
+	"bytes"
+	"compress/gzip"
+	"encoding/binary"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"math"
+	"os"
+
+	"github.com/golang/protobuf/proto"
+	"golang.org/x/net/context"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/transport"
+)
+
+// Codec defines the interface gRPC uses to encode and decode messages.
+type Codec interface {
+	// Marshal returns the wire format of v.
+	Marshal(v interface{}) ([]byte, error)
+	// Unmarshal parses the wire format into v.
+	Unmarshal(data []byte, v interface{}) error
+	// String returns the name of the Codec implementation. The returned
+	// string will be used as part of content type in transmission.
+	String() string
+}
+
+// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC.
+type protoCodec struct{}
+
+func (protoCodec) Marshal(v interface{}) ([]byte, error) {
+	return proto.Marshal(v.(proto.Message))
+}
+
+func (protoCodec) Unmarshal(data []byte, v interface{}) error {
+	return proto.Unmarshal(data, v.(proto.Message))
+}
+
+func (protoCodec) String() string {
+	return "proto"
+}
+
+// Compressor defines the interface gRPC uses to compress a message.
+type Compressor interface {
+	// Do compresses p into w.
+	Do(w io.Writer, p []byte) error
+	// Type returns the compression algorithm the Compressor uses.
+	Type() string
+}
+
+// NewGZIPCompressor creates a Compressor based on GZIP.
+func NewGZIPCompressor() Compressor {
+	return &gzipCompressor{}
+}
+
+type gzipCompressor struct {
+}
+
+func (c *gzipCompressor) Do(w io.Writer, p []byte) error {
+	z := gzip.NewWriter(w)
+	if _, err := z.Write(p); err != nil {
+		return err
+	}
+	return z.Close()
+}
+
+func (c *gzipCompressor) Type() string {
+	return "gzip"
+}
+
+// Decompressor defines the interface gRPC uses to decompress a message.
+type Decompressor interface {
+	// Do reads the data from r and uncompress them.
+	Do(r io.Reader) ([]byte, error)
+	// Type returns the compression algorithm the Decompressor uses.
+	Type() string
+}
+
+type gzipDecompressor struct {
+}
+
+// NewGZIPDecompressor creates a Decompressor based on GZIP.
+func NewGZIPDecompressor() Decompressor {
+	return &gzipDecompressor{}
+}
+
+func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) {
+	z, err := gzip.NewReader(r)
+	if err != nil {
+		return nil, err
+	}
+	defer z.Close()
+	return ioutil.ReadAll(z)
+}
+
+func (d *gzipDecompressor) Type() string {
+	return "gzip"
+}
+
+// callInfo contains all related configuration and information about an RPC.
+type callInfo struct {
+	failFast  bool
+	headerMD  metadata.MD
+	trailerMD metadata.MD
+	traceInfo traceInfo // in trace.go
+}
+
+var defaultCallInfo = callInfo{failFast: true}
+
+// CallOption configures a Call before it starts or extracts information from
+// a Call after it completes.
+type CallOption interface {
+	// before is called before the call is sent to any server.  If before
+	// returns a non-nil error, the RPC fails with that error.
+	before(*callInfo) error
+
+	// after is called after the call has completed.  after cannot return an
+	// error, so any failures should be reported via output parameters.
+	after(*callInfo)
+}
+
+type beforeCall func(c *callInfo) error
+
+func (o beforeCall) before(c *callInfo) error { return o(c) }
+func (o beforeCall) after(c *callInfo)        {}
+
+type afterCall func(c *callInfo)
+
+func (o afterCall) before(c *callInfo) error { return nil }
+func (o afterCall) after(c *callInfo)        { o(c) }
+
+// Header returns a CallOptions that retrieves the header metadata
+// for a unary RPC.
+func Header(md *metadata.MD) CallOption {
+	return afterCall(func(c *callInfo) {
+		*md = c.headerMD
+	})
+}
+
+// Trailer returns a CallOptions that retrieves the trailer metadata
+// for a unary RPC.
+func Trailer(md *metadata.MD) CallOption {
+	return afterCall(func(c *callInfo) {
+		*md = c.trailerMD
+	})
+}
+
+// FailFast configures the action to take when an RPC is attempted on broken
+// connections or unreachable servers. If failfast is true, the RPC will fail
+// immediately. Otherwise, the RPC client will block the call until a
+// connection is available (or the call is canceled or times out) and will retry
+// the call if it fails due to a transient error. Please refer to
+// https://github.com/grpc/grpc/blob/master/doc/fail_fast.md
+func FailFast(failFast bool) CallOption {
+	return beforeCall(func(c *callInfo) error {
+		c.failFast = failFast
+		return nil
+	})
+}
+
+// The format of the payload: compressed or not?
+type payloadFormat uint8
+
+const (
+	compressionNone payloadFormat = iota // no compression
+	compressionMade
+)
+
+// parser reads complete gRPC messages from the underlying reader.
+type parser struct {
+	// r is the underlying reader.
+	// See the comment on recvMsg for the permissible
+	// error types.
+	r io.Reader
+
+	// The header of a gRPC message. Find more detail
+	// at http://www.grpc.io/docs/guides/wire.html.
+	header [5]byte
+}
+
+// recvMsg reads a complete gRPC message from the stream.
+//
+// It returns the message and its payload (compression/encoding)
+// format. The caller owns the returned msg memory.
+//
+// If there is an error, possible values are:
+//   * io.EOF, when no messages remain
+//   * io.ErrUnexpectedEOF
+//   * of type transport.ConnectionError
+//   * of type transport.StreamError
+// No other error values or types must be returned, which also means
+// that the underlying io.Reader must not return an incompatible
+// error.
+func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err error) {
+	if _, err := io.ReadFull(p.r, p.header[:]); err != nil {
+		return 0, nil, err
+	}
+
+	pf = payloadFormat(p.header[0])
+	length := binary.BigEndian.Uint32(p.header[1:])
+
+	if length == 0 {
+		return pf, nil, nil
+	}
+	if length > uint32(maxMsgSize) {
+		return 0, nil, Errorf(codes.Internal, "grpc: received message length %d exceeding the max size %d", length, maxMsgSize)
+	}
+	// TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
+	// of making it for each message:
+	msg = make([]byte, int(length))
+	if _, err := io.ReadFull(p.r, msg); err != nil {
+		if err == io.EOF {
+			err = io.ErrUnexpectedEOF
+		}
+		return 0, nil, err
+	}
+	return pf, msg, nil
+}
+
+// encode serializes msg and prepends the message header. If msg is nil, it
+// generates the message header of 0 message length.
+func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer) ([]byte, error) {
+	var b []byte
+	var length uint
+	if msg != nil {
+		var err error
+		// TODO(zhaoq): optimize to reduce memory alloc and copying.
+		b, err = c.Marshal(msg)
+		if err != nil {
+			return nil, err
+		}
+		if cp != nil {
+			if err := cp.Do(cbuf, b); err != nil {
+				return nil, err
+			}
+			b = cbuf.Bytes()
+		}
+		length = uint(len(b))
+	}
+	if length > math.MaxUint32 {
+		return nil, Errorf(codes.InvalidArgument, "grpc: message too large (%d bytes)", length)
+	}
+
+	const (
+		payloadLen = 1
+		sizeLen    = 4
+	)
+
+	var buf = make([]byte, payloadLen+sizeLen+len(b))
+
+	// Write payload format
+	if cp == nil {
+		buf[0] = byte(compressionNone)
+	} else {
+		buf[0] = byte(compressionMade)
+	}
+	// Write length of b into buf
+	binary.BigEndian.PutUint32(buf[1:], uint32(length))
+	// Copy encoded msg to buf
+	copy(buf[5:], b)
+
+	return buf, nil
+}
+
+func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error {
+	switch pf {
+	case compressionNone:
+	case compressionMade:
+		if dc == nil || recvCompress != dc.Type() {
+			return Errorf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
+		}
+	default:
+		return Errorf(codes.Internal, "grpc: received unexpected payload format %d", pf)
+	}
+	return nil
+}
+
+func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxMsgSize int) error {
+	pf, d, err := p.recvMsg(maxMsgSize)
+	if err != nil {
+		return err
+	}
+	if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil {
+		return err
+	}
+	if pf == compressionMade {
+		d, err = dc.Do(bytes.NewReader(d))
+		if err != nil {
+			return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
+		}
+	}
+	if len(d) > maxMsgSize {
+		// TODO: Revisit the error code. Currently keep it consistent with java
+		// implementation.
+		return Errorf(codes.Internal, "grpc: received a message of %d bytes exceeding %d limit", len(d), maxMsgSize)
+	}
+	if err := c.Unmarshal(d, m); err != nil {
+		return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
+	}
+	return nil
+}
+
+// rpcError defines the status from an RPC.
+type rpcError struct {
+	code codes.Code
+	desc string
+}
+
+func (e *rpcError) Error() string {
+	return fmt.Sprintf("rpc error: code = %d desc = %s", e.code, e.desc)
+}
+
+// Code returns the error code for err if it was produced by the rpc system.
+// Otherwise, it returns codes.Unknown.
+func Code(err error) codes.Code {
+	if err == nil {
+		return codes.OK
+	}
+	if e, ok := err.(*rpcError); ok {
+		return e.code
+	}
+	return codes.Unknown
+}
+
+// ErrorDesc returns the error description of err if it was produced by the rpc system.
+// Otherwise, it returns err.Error() or empty string when err is nil.
+func ErrorDesc(err error) string {
+	if err == nil {
+		return ""
+	}
+	if e, ok := err.(*rpcError); ok {
+		return e.desc
+	}
+	return err.Error()
+}
+
+// Errorf returns an error containing an error code and a description;
+// Errorf returns nil if c is OK.
+func Errorf(c codes.Code, format string, a ...interface{}) error {
+	if c == codes.OK {
+		return nil
+	}
+	return &rpcError{
+		code: c,
+		desc: fmt.Sprintf(format, a...),
+	}
+}
+
+// toRPCErr converts an error into a rpcError.
+func toRPCErr(err error) error {
+	switch e := err.(type) {
+	case *rpcError:
+		return err
+	case transport.StreamError:
+		return &rpcError{
+			code: e.Code,
+			desc: e.Desc,
+		}
+	case transport.ConnectionError:
+		return &rpcError{
+			code: codes.Internal,
+			desc: e.Desc,
+		}
+	default:
+		switch err {
+		case context.DeadlineExceeded:
+			return &rpcError{
+				code: codes.DeadlineExceeded,
+				desc: err.Error(),
+			}
+		case context.Canceled:
+			return &rpcError{
+				code: codes.Canceled,
+				desc: err.Error(),
+			}
+		case ErrClientConnClosing:
+			return &rpcError{
+				code: codes.FailedPrecondition,
+				desc: err.Error(),
+			}
+		}
+
+	}
+	return Errorf(codes.Unknown, "%v", err)
+}
+
+// convertCode converts a standard Go error into its canonical code. Note that
+// this is only used to translate the error returned by the server applications.
+func convertCode(err error) codes.Code {
+	switch err {
+	case nil:
+		return codes.OK
+	case io.EOF:
+		return codes.OutOfRange
+	case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
+		return codes.FailedPrecondition
+	case os.ErrInvalid:
+		return codes.InvalidArgument
+	case context.Canceled:
+		return codes.Canceled
+	case context.DeadlineExceeded:
+		return codes.DeadlineExceeded
+	}
+	switch {
+	case os.IsExist(err):
+		return codes.AlreadyExists
+	case os.IsNotExist(err):
+		return codes.NotFound
+	case os.IsPermission(err):
+		return codes.PermissionDenied
+	}
+	return codes.Unknown
+}
+
+// SupportPackageIsVersion3 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the grpc package.
+//
+// This constant may be renamed in the future if a change in the generated code
+// requires a synchronised update of grpc-go and protoc-gen-go. This constant
+// should not be referenced from any other code.
+const SupportPackageIsVersion3 = true
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
new file mode 100644
index 0000000..debbd79
--- /dev/null
+++ b/vendor/google.golang.org/grpc/server.go
@@ -0,0 +1,900 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package grpc
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"reflect"
+	"runtime"
+	"strings"
+	"sync"
+	"time"
+
+	"golang.org/x/net/context"
+	"golang.org/x/net/http2"
+	"golang.org/x/net/trace"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/transport"
+)
+
+type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error)
+
+// MethodDesc represents an RPC service's method specification.
+type MethodDesc struct {
+	MethodName string
+	Handler    methodHandler
+}
+
+// ServiceDesc represents an RPC service's specification.
+type ServiceDesc struct {
+	ServiceName string
+	// The pointer to the service interface. Used to check whether the user
+	// provided implementation satisfies the interface requirements.
+	HandlerType interface{}
+	Methods     []MethodDesc
+	Streams     []StreamDesc
+	Metadata    interface{}
+}
+
+// service consists of the information of the server serving this service and
+// the methods in this service.
+type service struct {
+	server interface{} // the server for service methods
+	md     map[string]*MethodDesc
+	sd     map[string]*StreamDesc
+	mdata  interface{}
+}
+
+// Server is a gRPC server to serve RPC requests.
+type Server struct {
+	opts options
+
+	mu    sync.Mutex // guards following
+	lis   map[net.Listener]bool
+	conns map[io.Closer]bool
+	drain bool
+	// A CondVar to let GracefulStop() blocks until all the pending RPCs are finished
+	// and all the transport goes away.
+	cv     *sync.Cond
+	m      map[string]*service // service name -> service info
+	events trace.EventLog
+}
+
+type options struct {
+	creds                credentials.TransportCredentials
+	codec                Codec
+	cp                   Compressor
+	dc                   Decompressor
+	maxMsgSize           int
+	unaryInt             UnaryServerInterceptor
+	streamInt            StreamServerInterceptor
+	maxConcurrentStreams uint32
+	useHandlerImpl       bool // use http.Handler-based server
+}
+
+var defaultMaxMsgSize = 1024 * 1024 * 4 // use 4MB as the default message size limit
+
+// A ServerOption sets options.
+type ServerOption func(*options)
+
+// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
+func CustomCodec(codec Codec) ServerOption {
+	return func(o *options) {
+		o.codec = codec
+	}
+}
+
+// RPCCompressor returns a ServerOption that sets a compressor for outbound messages.
+func RPCCompressor(cp Compressor) ServerOption {
+	return func(o *options) {
+		o.cp = cp
+	}
+}
+
+// RPCDecompressor returns a ServerOption that sets a decompressor for inbound messages.
+func RPCDecompressor(dc Decompressor) ServerOption {
+	return func(o *options) {
+		o.dc = dc
+	}
+}
+
+// MaxMsgSize returns a ServerOption to set the max message size in bytes for inbound mesages.
+// If this is not set, gRPC uses the default 4MB.
+func MaxMsgSize(m int) ServerOption {
+	return func(o *options) {
+		o.maxMsgSize = m
+	}
+}
+
+// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
+// of concurrent streams to each ServerTransport.
+func MaxConcurrentStreams(n uint32) ServerOption {
+	return func(o *options) {
+		o.maxConcurrentStreams = n
+	}
+}
+
+// Creds returns a ServerOption that sets credentials for server connections.
+func Creds(c credentials.TransportCredentials) ServerOption {
+	return func(o *options) {
+		o.creds = c
+	}
+}
+
+// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the
+// server. Only one unary interceptor can be installed. The construction of multiple
+// interceptors (e.g., chaining) can be implemented at the caller.
+func UnaryInterceptor(i UnaryServerInterceptor) ServerOption {
+	return func(o *options) {
+		if o.unaryInt != nil {
+			panic("The unary server interceptor has been set.")
+		}
+		o.unaryInt = i
+	}
+}
+
+// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the
+// server. Only one stream interceptor can be installed.
+func StreamInterceptor(i StreamServerInterceptor) ServerOption {
+	return func(o *options) {
+		if o.streamInt != nil {
+			panic("The stream server interceptor has been set.")
+		}
+		o.streamInt = i
+	}
+}
+
+// NewServer creates a gRPC server which has no service registered and has not
+// started to accept requests yet.
+func NewServer(opt ...ServerOption) *Server {
+	var opts options
+	opts.maxMsgSize = defaultMaxMsgSize
+	for _, o := range opt {
+		o(&opts)
+	}
+	if opts.codec == nil {
+		// Set the default codec.
+		opts.codec = protoCodec{}
+	}
+	s := &Server{
+		lis:   make(map[net.Listener]bool),
+		opts:  opts,
+		conns: make(map[io.Closer]bool),
+		m:     make(map[string]*service),
+	}
+	s.cv = sync.NewCond(&s.mu)
+	if EnableTracing {
+		_, file, line, _ := runtime.Caller(1)
+		s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
+	}
+	return s
+}
+
+// printf records an event in s's event log, unless s has been stopped.
+// REQUIRES s.mu is held.
+func (s *Server) printf(format string, a ...interface{}) {
+	if s.events != nil {
+		s.events.Printf(format, a...)
+	}
+}
+
+// errorf records an error in s's event log, unless s has been stopped.
+// REQUIRES s.mu is held.
+func (s *Server) errorf(format string, a ...interface{}) {
+	if s.events != nil {
+		s.events.Errorf(format, a...)
+	}
+}
+
+// RegisterService register a service and its implementation to the gRPC
+// server. Called from the IDL generated code. This must be called before
+// invoking Serve.
+func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) {
+	ht := reflect.TypeOf(sd.HandlerType).Elem()
+	st := reflect.TypeOf(ss)
+	if !st.Implements(ht) {
+		grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht)
+	}
+	s.register(sd, ss)
+}
+
+func (s *Server) register(sd *ServiceDesc, ss interface{}) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	s.printf("RegisterService(%q)", sd.ServiceName)
+	if _, ok := s.m[sd.ServiceName]; ok {
+		grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName)
+	}
+	srv := &service{
+		server: ss,
+		md:     make(map[string]*MethodDesc),
+		sd:     make(map[string]*StreamDesc),
+		mdata:  sd.Metadata,
+	}
+	for i := range sd.Methods {
+		d := &sd.Methods[i]
+		srv.md[d.MethodName] = d
+	}
+	for i := range sd.Streams {
+		d := &sd.Streams[i]
+		srv.sd[d.StreamName] = d
+	}
+	s.m[sd.ServiceName] = srv
+}
+
+// MethodInfo contains the information of an RPC including its method name and type.
+type MethodInfo struct {
+	// Name is the method name only, without the service name or package name.
+	Name string
+	// IsClientStream indicates whether the RPC is a client streaming RPC.
+	IsClientStream bool
+	// IsServerStream indicates whether the RPC is a server streaming RPC.
+	IsServerStream bool
+}
+
+// ServiceInfo contains unary RPC method info, streaming RPC methid info and metadata for a service.
+type ServiceInfo struct {
+	Methods []MethodInfo
+	// Metadata is the metadata specified in ServiceDesc when registering service.
+	Metadata interface{}
+}
+
+// GetServiceInfo returns a map from service names to ServiceInfo.
+// Service names include the package names, in the form of <package>.<service>.
+func (s *Server) GetServiceInfo() map[string]ServiceInfo {
+	ret := make(map[string]ServiceInfo)
+	for n, srv := range s.m {
+		methods := make([]MethodInfo, 0, len(srv.md)+len(srv.sd))
+		for m := range srv.md {
+			methods = append(methods, MethodInfo{
+				Name:           m,
+				IsClientStream: false,
+				IsServerStream: false,
+			})
+		}
+		for m, d := range srv.sd {
+			methods = append(methods, MethodInfo{
+				Name:           m,
+				IsClientStream: d.ClientStreams,
+				IsServerStream: d.ServerStreams,
+			})
+		}
+
+		ret[n] = ServiceInfo{
+			Methods:  methods,
+			Metadata: srv.mdata,
+		}
+	}
+	return ret
+}
+
+var (
+	// ErrServerStopped indicates that the operation is now illegal because of
+	// the server being stopped.
+	ErrServerStopped = errors.New("grpc: the server has been stopped")
+)
+
+func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
+	if s.opts.creds == nil {
+		return rawConn, nil, nil
+	}
+	return s.opts.creds.ServerHandshake(rawConn)
+}
+
+// Serve accepts incoming connections on the listener lis, creating a new
+// ServerTransport and service goroutine for each. The service goroutines
+// read gRPC requests and then call the registered handlers to reply to them.
+// Serve returns when lis.Accept fails. lis will be closed when
+// this method returns.
+func (s *Server) Serve(lis net.Listener) error {
+	s.mu.Lock()
+	s.printf("serving")
+	if s.lis == nil {
+		s.mu.Unlock()
+		lis.Close()
+		return ErrServerStopped
+	}
+	s.lis[lis] = true
+	s.mu.Unlock()
+	defer func() {
+		s.mu.Lock()
+		if s.lis != nil && s.lis[lis] {
+			lis.Close()
+			delete(s.lis, lis)
+		}
+		s.mu.Unlock()
+	}()
+	for {
+		rawConn, err := lis.Accept()
+		if err != nil {
+			s.mu.Lock()
+			s.printf("done serving; Accept = %v", err)
+			s.mu.Unlock()
+			return err
+		}
+		// Start a new goroutine to deal with rawConn
+		// so we don't stall this Accept loop goroutine.
+		go s.handleRawConn(rawConn)
+	}
+}
+
+// handleRawConn is run in its own goroutine and handles a just-accepted
+// connection that has not had any I/O performed on it yet.
+func (s *Server) handleRawConn(rawConn net.Conn) {
+	conn, authInfo, err := s.useTransportAuthenticator(rawConn)
+	if err != nil {
+		s.mu.Lock()
+		s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
+		s.mu.Unlock()
+		grpclog.Printf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
+		// If serverHandShake returns ErrConnDispatched, keep rawConn open.
+		if err != credentials.ErrConnDispatched {
+			rawConn.Close()
+		}
+		return
+	}
+
+	s.mu.Lock()
+	if s.conns == nil {
+		s.mu.Unlock()
+		conn.Close()
+		return
+	}
+	s.mu.Unlock()
+
+	if s.opts.useHandlerImpl {
+		s.serveUsingHandler(conn)
+	} else {
+		s.serveNewHTTP2Transport(conn, authInfo)
+	}
+}
+
+// serveNewHTTP2Transport sets up a new http/2 transport (using the
+// gRPC http2 server transport in transport/http2_server.go) and
+// serves streams on it.
+// This is run in its own goroutine (it does network I/O in
+// transport.NewServerTransport).
+func (s *Server) serveNewHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) {
+	st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo)
+	if err != nil {
+		s.mu.Lock()
+		s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err)
+		s.mu.Unlock()
+		c.Close()
+		grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err)
+		return
+	}
+	if !s.addConn(st) {
+		st.Close()
+		return
+	}
+	s.serveStreams(st)
+}
+
+func (s *Server) serveStreams(st transport.ServerTransport) {
+	defer s.removeConn(st)
+	defer st.Close()
+	var wg sync.WaitGroup
+	st.HandleStreams(func(stream *transport.Stream) {
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			s.handleStream(st, stream, s.traceInfo(st, stream))
+		}()
+	})
+	wg.Wait()
+}
+
+var _ http.Handler = (*Server)(nil)
+
+// serveUsingHandler is called from handleRawConn when s is configured
+// to handle requests via the http.Handler interface. It sets up a
+// net/http.Server to handle the just-accepted conn. The http.Server
+// is configured to route all incoming requests (all HTTP/2 streams)
+// to ServeHTTP, which creates a new ServerTransport for each stream.
+// serveUsingHandler blocks until conn closes.
+//
+// This codepath is only used when Server.TestingUseHandlerImpl has
+// been configured. This lets the end2end tests exercise the ServeHTTP
+// method as one of the environment types.
+//
+// conn is the *tls.Conn that's already been authenticated.
+func (s *Server) serveUsingHandler(conn net.Conn) {
+	if !s.addConn(conn) {
+		conn.Close()
+		return
+	}
+	defer s.removeConn(conn)
+	h2s := &http2.Server{
+		MaxConcurrentStreams: s.opts.maxConcurrentStreams,
+	}
+	h2s.ServeConn(conn, &http2.ServeConnOpts{
+		Handler: s,
+	})
+}
+
+func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	st, err := transport.NewServerHandlerTransport(w, r)
+	if err != nil {
+		http.Error(w, err.Error(), http.StatusInternalServerError)
+		return
+	}
+	if !s.addConn(st) {
+		st.Close()
+		return
+	}
+	defer s.removeConn(st)
+	s.serveStreams(st)
+}
+
+// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled.
+// If tracing is not enabled, it returns nil.
+func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) {
+	if !EnableTracing {
+		return nil
+	}
+	trInfo = &traceInfo{
+		tr: trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()),
+	}
+	trInfo.firstLine.client = false
+	trInfo.firstLine.remoteAddr = st.RemoteAddr()
+	stream.TraceContext(trInfo.tr)
+	if dl, ok := stream.Context().Deadline(); ok {
+		trInfo.firstLine.deadline = dl.Sub(time.Now())
+	}
+	return trInfo
+}
+
+func (s *Server) addConn(c io.Closer) bool {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if s.conns == nil || s.drain {
+		return false
+	}
+	s.conns[c] = true
+	return true
+}
+
+func (s *Server) removeConn(c io.Closer) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if s.conns != nil {
+		delete(s.conns, c)
+		s.cv.Signal()
+	}
+}
+
+func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options) error {
+	var cbuf *bytes.Buffer
+	if cp != nil {
+		cbuf = new(bytes.Buffer)
+	}
+	p, err := encode(s.opts.codec, msg, cp, cbuf)
+	if err != nil {
+		// This typically indicates a fatal issue (e.g., memory
+		// corruption or hardware faults) the application program
+		// cannot handle.
+		//
+		// TODO(zhaoq): There exist other options also such as only closing the
+		// faulty stream locally and remotely (Other streams can keep going). Find
+		// the optimal option.
+		grpclog.Fatalf("grpc: Server failed to encode response %v", err)
+	}
+	return t.Write(stream, p, opts)
+}
+
+func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
+	if trInfo != nil {
+		defer trInfo.tr.Finish()
+		trInfo.firstLine.client = false
+		trInfo.tr.LazyLog(&trInfo.firstLine, false)
+		defer func() {
+			if err != nil && err != io.EOF {
+				trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+				trInfo.tr.SetError()
+			}
+		}()
+	}
+	if s.opts.cp != nil {
+		// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
+		stream.SetSendCompress(s.opts.cp.Type())
+	}
+	p := &parser{r: stream}
+	for {
+		pf, req, err := p.recvMsg(s.opts.maxMsgSize)
+		if err == io.EOF {
+			// The entire stream is done (for unary RPC only).
+			return err
+		}
+		if err == io.ErrUnexpectedEOF {
+			err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
+		}
+		if err != nil {
+			switch err := err.(type) {
+			case *rpcError:
+				if err := t.WriteStatus(stream, err.code, err.desc); err != nil {
+					grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
+				}
+			case transport.ConnectionError:
+				// Nothing to do here.
+			case transport.StreamError:
+				if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil {
+					grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
+				}
+			default:
+				panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", err, err))
+			}
+			return err
+		}
+
+		if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil {
+			switch err := err.(type) {
+			case *rpcError:
+				if err := t.WriteStatus(stream, err.code, err.desc); err != nil {
+					grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
+				}
+			default:
+				if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil {
+					grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
+				}
+
+			}
+			return err
+		}
+		statusCode := codes.OK
+		statusDesc := ""
+		df := func(v interface{}) error {
+			if pf == compressionMade {
+				var err error
+				req, err = s.opts.dc.Do(bytes.NewReader(req))
+				if err != nil {
+					if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil {
+						grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
+					}
+					return err
+				}
+			}
+			if len(req) > s.opts.maxMsgSize {
+				// TODO: Revisit the error code. Currently keep it consistent with
+				// java implementation.
+				statusCode = codes.Internal
+				statusDesc = fmt.Sprintf("grpc: server received a message of %d bytes exceeding %d limit", len(req), s.opts.maxMsgSize)
+			}
+			if err := s.opts.codec.Unmarshal(req, v); err != nil {
+				return err
+			}
+			if trInfo != nil {
+				trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
+			}
+			return nil
+		}
+		reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt)
+		if appErr != nil {
+			if err, ok := appErr.(*rpcError); ok {
+				statusCode = err.code
+				statusDesc = err.desc
+			} else {
+				statusCode = convertCode(appErr)
+				statusDesc = appErr.Error()
+			}
+			if trInfo != nil && statusCode != codes.OK {
+				trInfo.tr.LazyLog(stringer(statusDesc), true)
+				trInfo.tr.SetError()
+			}
+			if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil {
+				grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err)
+				return err
+			}
+			return nil
+		}
+		if trInfo != nil {
+			trInfo.tr.LazyLog(stringer("OK"), false)
+		}
+		opts := &transport.Options{
+			Last:  true,
+			Delay: false,
+		}
+		if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil {
+			switch err := err.(type) {
+			case transport.ConnectionError:
+				// Nothing to do here.
+			case transport.StreamError:
+				statusCode = err.Code
+				statusDesc = err.Desc
+			default:
+				statusCode = codes.Unknown
+				statusDesc = err.Error()
+			}
+			return err
+		}
+		if trInfo != nil {
+			trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
+		}
+		return t.WriteStatus(stream, statusCode, statusDesc)
+	}
+}
+
+func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
+	if s.opts.cp != nil {
+		stream.SetSendCompress(s.opts.cp.Type())
+	}
+	ss := &serverStream{
+		t:          t,
+		s:          stream,
+		p:          &parser{r: stream},
+		codec:      s.opts.codec,
+		cp:         s.opts.cp,
+		dc:         s.opts.dc,
+		maxMsgSize: s.opts.maxMsgSize,
+		trInfo:     trInfo,
+	}
+	if ss.cp != nil {
+		ss.cbuf = new(bytes.Buffer)
+	}
+	if trInfo != nil {
+		trInfo.tr.LazyLog(&trInfo.firstLine, false)
+		defer func() {
+			ss.mu.Lock()
+			if err != nil && err != io.EOF {
+				ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+				ss.trInfo.tr.SetError()
+			}
+			ss.trInfo.tr.Finish()
+			ss.trInfo.tr = nil
+			ss.mu.Unlock()
+		}()
+	}
+	var appErr error
+	if s.opts.streamInt == nil {
+		appErr = sd.Handler(srv.server, ss)
+	} else {
+		info := &StreamServerInfo{
+			FullMethod:     stream.Method(),
+			IsClientStream: sd.ClientStreams,
+			IsServerStream: sd.ServerStreams,
+		}
+		appErr = s.opts.streamInt(srv.server, ss, info, sd.Handler)
+	}
+	if appErr != nil {
+		if err, ok := appErr.(*rpcError); ok {
+			ss.statusCode = err.code
+			ss.statusDesc = err.desc
+		} else if err, ok := appErr.(transport.StreamError); ok {
+			ss.statusCode = err.Code
+			ss.statusDesc = err.Desc
+		} else {
+			ss.statusCode = convertCode(appErr)
+			ss.statusDesc = appErr.Error()
+		}
+	}
+	if trInfo != nil {
+		ss.mu.Lock()
+		if ss.statusCode != codes.OK {
+			ss.trInfo.tr.LazyLog(stringer(ss.statusDesc), true)
+			ss.trInfo.tr.SetError()
+		} else {
+			ss.trInfo.tr.LazyLog(stringer("OK"), false)
+		}
+		ss.mu.Unlock()
+	}
+	return t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc)
+
+}
+
+func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
+	sm := stream.Method()
+	if sm != "" && sm[0] == '/' {
+		sm = sm[1:]
+	}
+	pos := strings.LastIndex(sm, "/")
+	if pos == -1 {
+		if trInfo != nil {
+			trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true)
+			trInfo.tr.SetError()
+		}
+		if err := t.WriteStatus(stream, codes.InvalidArgument, fmt.Sprintf("malformed method name: %q", stream.Method())); err != nil {
+			if trInfo != nil {
+				trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+				trInfo.tr.SetError()
+			}
+			grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err)
+		}
+		if trInfo != nil {
+			trInfo.tr.Finish()
+		}
+		return
+	}
+	service := sm[:pos]
+	method := sm[pos+1:]
+	srv, ok := s.m[service]
+	if !ok {
+		if trInfo != nil {
+			trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true)
+			trInfo.tr.SetError()
+		}
+		if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown service %v", service)); err != nil {
+			if trInfo != nil {
+				trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+				trInfo.tr.SetError()
+			}
+			grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err)
+		}
+		if trInfo != nil {
+			trInfo.tr.Finish()
+		}
+		return
+	}
+	// Unary RPC or Streaming RPC?
+	if md, ok := srv.md[method]; ok {
+		s.processUnaryRPC(t, stream, srv, md, trInfo)
+		return
+	}
+	if sd, ok := srv.sd[method]; ok {
+		s.processStreamingRPC(t, stream, srv, sd, trInfo)
+		return
+	}
+	if trInfo != nil {
+		trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true)
+		trInfo.tr.SetError()
+	}
+	if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown method %v", method)); err != nil {
+		if trInfo != nil {
+			trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+			trInfo.tr.SetError()
+		}
+		grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err)
+	}
+	if trInfo != nil {
+		trInfo.tr.Finish()
+	}
+}
+
+// Stop stops the gRPC server. It immediately closes all open
+// connections and listeners.
+// It cancels all active RPCs on the server side and the corresponding
+// pending RPCs on the client side will get notified by connection
+// errors.
+func (s *Server) Stop() {
+	s.mu.Lock()
+	listeners := s.lis
+	s.lis = nil
+	st := s.conns
+	s.conns = nil
+	// interrupt GracefulStop if Stop and GracefulStop are called concurrently.
+	s.cv.Signal()
+	s.mu.Unlock()
+
+	for lis := range listeners {
+		lis.Close()
+	}
+	for c := range st {
+		c.Close()
+	}
+
+	s.mu.Lock()
+	if s.events != nil {
+		s.events.Finish()
+		s.events = nil
+	}
+	s.mu.Unlock()
+}
+
+// GracefulStop stops the gRPC server gracefully. It stops the server to accept new
+// connections and RPCs and blocks until all the pending RPCs are finished.
+func (s *Server) GracefulStop() {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if s.drain == true || s.conns == nil {
+		return
+	}
+	s.drain = true
+	for lis := range s.lis {
+		lis.Close()
+	}
+	s.lis = nil
+	for c := range s.conns {
+		c.(transport.ServerTransport).Drain()
+	}
+	for len(s.conns) != 0 {
+		s.cv.Wait()
+	}
+	s.conns = nil
+	if s.events != nil {
+		s.events.Finish()
+		s.events = nil
+	}
+}
+
+func init() {
+	internal.TestingCloseConns = func(arg interface{}) {
+		arg.(*Server).testingCloseConns()
+	}
+	internal.TestingUseHandlerImpl = func(arg interface{}) {
+		arg.(*Server).opts.useHandlerImpl = true
+	}
+}
+
+// testingCloseConns closes all existing transports but keeps s.lis
+// accepting new connections.
+func (s *Server) testingCloseConns() {
+	s.mu.Lock()
+	for c := range s.conns {
+		c.Close()
+		delete(s.conns, c)
+	}
+	s.mu.Unlock()
+}
+
+// SendHeader sends header metadata. It may be called at most once from a unary
+// RPC handler. The ctx is the RPC handler's Context or one derived from it.
+func SendHeader(ctx context.Context, md metadata.MD) error {
+	if md.Len() == 0 {
+		return nil
+	}
+	stream, ok := transport.StreamFromContext(ctx)
+	if !ok {
+		return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
+	}
+	t := stream.ServerTransport()
+	if t == nil {
+		grpclog.Fatalf("grpc: SendHeader: %v has no ServerTransport to send header metadata.", stream)
+	}
+	if err := t.WriteHeader(stream, md); err != nil {
+		return toRPCErr(err)
+	}
+	return nil
+}
+
+// SetTrailer sets the trailer metadata that will be sent when an RPC returns.
+// When called more than once, all the provided metadata will be merged.
+// The ctx is the RPC handler's Context or one derived from it.
+func SetTrailer(ctx context.Context, md metadata.MD) error {
+	if md.Len() == 0 {
+		return nil
+	}
+	stream, ok := transport.StreamFromContext(ctx)
+	if !ok {
+		return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
+	}
+	return stream.SetTrailer(md)
+}
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
new file mode 100644
index 0000000..68d777b
--- /dev/null
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -0,0 +1,512 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package grpc
+
+import (
+	"bytes"
+	"errors"
+	"io"
+	"math"
+	"sync"
+	"time"
+
+	"golang.org/x/net/context"
+	"golang.org/x/net/trace"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/transport"
+)
+
+// StreamHandler defines the handler called by gRPC server to complete the
+// execution of a streaming RPC.
+type StreamHandler func(srv interface{}, stream ServerStream) error
+
+// StreamDesc represents a streaming RPC service's method specification.
+type StreamDesc struct {
+	StreamName string
+	Handler    StreamHandler
+
+	// At least one of these is true.
+	ServerStreams bool
+	ClientStreams bool
+}
+
+// Stream defines the common interface a client or server stream has to satisfy.
+type Stream interface {
+	// Context returns the context for this stream.
+	Context() context.Context
+	// SendMsg blocks until it sends m, the stream is done or the stream
+	// breaks.
+	// On error, it aborts the stream and returns an RPC status on client
+	// side. On server side, it simply returns the error to the caller.
+	// SendMsg is called by generated code. Also Users can call SendMsg
+	// directly when it is really needed in their use cases.
+	SendMsg(m interface{}) error
+	// RecvMsg blocks until it receives a message or the stream is
+	// done. On client side, it returns io.EOF when the stream is done. On
+	// any other error, it aborts the stream and returns an RPC status. On
+	// server side, it simply returns the error to the caller.
+	RecvMsg(m interface{}) error
+}
+
+// ClientStream defines the interface a client stream has to satisfy.
+type ClientStream interface {
+	// Header returns the header metadata received from the server if there
+	// is any. It blocks if the metadata is not ready to read.
+	Header() (metadata.MD, error)
+	// Trailer returns the trailer metadata from the server, if there is any.
+	// It must only be called after stream.CloseAndRecv has returned, or
+	// stream.Recv has returned a non-nil error (including io.EOF).
+	Trailer() metadata.MD
+	// CloseSend closes the send direction of the stream. It closes the stream
+	// when non-nil error is met.
+	CloseSend() error
+	Stream
+}
+
+// NewClientStream creates a new Stream for the client side. This is called
+// by generated code.
+func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
+	if cc.dopts.streamInt != nil {
+		return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...)
+	}
+	return newClientStream(ctx, desc, cc, method, opts...)
+}
+
+func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
+	var (
+		t   transport.ClientTransport
+		s   *transport.Stream
+		put func()
+	)
+	c := defaultCallInfo
+	for _, o := range opts {
+		if err := o.before(&c); err != nil {
+			return nil, toRPCErr(err)
+		}
+	}
+	callHdr := &transport.CallHdr{
+		Host:   cc.authority,
+		Method: method,
+		Flush:  desc.ServerStreams && desc.ClientStreams,
+	}
+	if cc.dopts.cp != nil {
+		callHdr.SendCompress = cc.dopts.cp.Type()
+	}
+	var trInfo traceInfo
+	if EnableTracing {
+		trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
+		trInfo.firstLine.client = true
+		if deadline, ok := ctx.Deadline(); ok {
+			trInfo.firstLine.deadline = deadline.Sub(time.Now())
+		}
+		trInfo.tr.LazyLog(&trInfo.firstLine, false)
+		ctx = trace.NewContext(ctx, trInfo.tr)
+		defer func() {
+			if err != nil {
+				// Need to call tr.finish() if error is returned.
+				// Because tr will not be returned to caller.
+				trInfo.tr.LazyPrintf("RPC: [%v]", err)
+				trInfo.tr.SetError()
+				trInfo.tr.Finish()
+			}
+		}()
+	}
+	gopts := BalancerGetOptions{
+		BlockingWait: !c.failFast,
+	}
+	for {
+		t, put, err = cc.getTransport(ctx, gopts)
+		if err != nil {
+			// TODO(zhaoq): Probably revisit the error handling.
+			if _, ok := err.(*rpcError); ok {
+				return nil, err
+			}
+			if err == errConnClosing || err == errConnUnavailable {
+				if c.failFast {
+					return nil, Errorf(codes.Unavailable, "%v", err)
+				}
+				continue
+			}
+			// All the other errors are treated as Internal errors.
+			return nil, Errorf(codes.Internal, "%v", err)
+		}
+
+		s, err = t.NewStream(ctx, callHdr)
+		if err != nil {
+			if put != nil {
+				put()
+				put = nil
+			}
+			if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain {
+				if c.failFast {
+					return nil, toRPCErr(err)
+				}
+				continue
+			}
+			return nil, toRPCErr(err)
+		}
+		break
+	}
+	cs := &clientStream{
+		opts:  opts,
+		c:     c,
+		desc:  desc,
+		codec: cc.dopts.codec,
+		cp:    cc.dopts.cp,
+		dc:    cc.dopts.dc,
+
+		put: put,
+		t:   t,
+		s:   s,
+		p:   &parser{r: s},
+
+		tracing: EnableTracing,
+		trInfo:  trInfo,
+	}
+	if cc.dopts.cp != nil {
+		cs.cbuf = new(bytes.Buffer)
+	}
+	// Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination
+	// when there is no pending I/O operations on this stream.
+	go func() {
+		select {
+		case <-t.Error():
+			// Incur transport error, simply exit.
+		case <-s.Done():
+			// TODO: The trace of the RPC is terminated here when there is no pending
+			// I/O, which is probably not the optimal solution.
+			if s.StatusCode() == codes.OK {
+				cs.finish(nil)
+			} else {
+				cs.finish(Errorf(s.StatusCode(), "%s", s.StatusDesc()))
+			}
+			cs.closeTransportStream(nil)
+		case <-s.GoAway():
+			cs.finish(errConnDrain)
+			cs.closeTransportStream(errConnDrain)
+		case <-s.Context().Done():
+			err := s.Context().Err()
+			cs.finish(err)
+			cs.closeTransportStream(transport.ContextErr(err))
+		}
+	}()
+	return cs, nil
+}
+
+// clientStream implements a client side Stream.
+type clientStream struct {
+	opts  []CallOption
+	c     callInfo
+	t     transport.ClientTransport
+	s     *transport.Stream
+	p     *parser
+	desc  *StreamDesc
+	codec Codec
+	cp    Compressor
+	cbuf  *bytes.Buffer
+	dc    Decompressor
+
+	tracing bool // set to EnableTracing when the clientStream is created.
+
+	mu     sync.Mutex
+	put    func()
+	closed bool
+	// trInfo.tr is set when the clientStream is created (if EnableTracing is true),
+	// and is set to nil when the clientStream's finish method is called.
+	trInfo traceInfo
+}
+
+func (cs *clientStream) Context() context.Context {
+	return cs.s.Context()
+}
+
+func (cs *clientStream) Header() (metadata.MD, error) {
+	m, err := cs.s.Header()
+	if err != nil {
+		if _, ok := err.(transport.ConnectionError); !ok {
+			cs.closeTransportStream(err)
+		}
+	}
+	return m, err
+}
+
+func (cs *clientStream) Trailer() metadata.MD {
+	return cs.s.Trailer()
+}
+
+func (cs *clientStream) SendMsg(m interface{}) (err error) {
+	if cs.tracing {
+		cs.mu.Lock()
+		if cs.trInfo.tr != nil {
+			cs.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
+		}
+		cs.mu.Unlock()
+	}
+	defer func() {
+		if err != nil {
+			cs.finish(err)
+		}
+		if err == nil {
+			return
+		}
+		if err == io.EOF {
+			// Specialize the process for server streaming. SendMesg is only called
+			// once when creating the stream object. io.EOF needs to be skipped when
+			// the rpc is early finished (before the stream object is created.).
+			// TODO: It is probably better to move this into the generated code.
+			if !cs.desc.ClientStreams && cs.desc.ServerStreams {
+				err = nil
+			}
+			return
+		}
+		if _, ok := err.(transport.ConnectionError); !ok {
+			cs.closeTransportStream(err)
+		}
+		err = toRPCErr(err)
+	}()
+	out, err := encode(cs.codec, m, cs.cp, cs.cbuf)
+	defer func() {
+		if cs.cbuf != nil {
+			cs.cbuf.Reset()
+		}
+	}()
+	if err != nil {
+		return Errorf(codes.Internal, "grpc: %v", err)
+	}
+	return cs.t.Write(cs.s, out, &transport.Options{Last: false})
+}
+
+func (cs *clientStream) RecvMsg(m interface{}) (err error) {
+	err = recv(cs.p, cs.codec, cs.s, cs.dc, m, math.MaxInt32)
+	defer func() {
+		// err != nil indicates the termination of the stream.
+		if err != nil {
+			cs.finish(err)
+		}
+	}()
+	if err == nil {
+		if cs.tracing {
+			cs.mu.Lock()
+			if cs.trInfo.tr != nil {
+				cs.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
+			}
+			cs.mu.Unlock()
+		}
+		if !cs.desc.ClientStreams || cs.desc.ServerStreams {
+			return
+		}
+		// Special handling for client streaming rpc.
+		err = recv(cs.p, cs.codec, cs.s, cs.dc, m, math.MaxInt32)
+		cs.closeTransportStream(err)
+		if err == nil {
+			return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
+		}
+		if err == io.EOF {
+			if cs.s.StatusCode() == codes.OK {
+				cs.finish(err)
+				return nil
+			}
+			return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc())
+		}
+		return toRPCErr(err)
+	}
+	if _, ok := err.(transport.ConnectionError); !ok {
+		cs.closeTransportStream(err)
+	}
+	if err == io.EOF {
+		if cs.s.StatusCode() == codes.OK {
+			// Returns io.EOF to indicate the end of the stream.
+			return
+		}
+		return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc())
+	}
+	return toRPCErr(err)
+}
+
+func (cs *clientStream) CloseSend() (err error) {
+	err = cs.t.Write(cs.s, nil, &transport.Options{Last: true})
+	defer func() {
+		if err != nil {
+			cs.finish(err)
+		}
+	}()
+	if err == nil || err == io.EOF {
+		return nil
+	}
+	if _, ok := err.(transport.ConnectionError); !ok {
+		cs.closeTransportStream(err)
+	}
+	err = toRPCErr(err)
+	return
+}
+
+func (cs *clientStream) closeTransportStream(err error) {
+	cs.mu.Lock()
+	if cs.closed {
+		cs.mu.Unlock()
+		return
+	}
+	cs.closed = true
+	cs.mu.Unlock()
+	cs.t.CloseStream(cs.s, err)
+}
+
+func (cs *clientStream) finish(err error) {
+	cs.mu.Lock()
+	defer cs.mu.Unlock()
+	for _, o := range cs.opts {
+		o.after(&cs.c)
+	}
+	if cs.put != nil {
+		cs.put()
+		cs.put = nil
+	}
+	if !cs.tracing {
+		return
+	}
+	if cs.trInfo.tr != nil {
+		if err == nil || err == io.EOF {
+			cs.trInfo.tr.LazyPrintf("RPC: [OK]")
+		} else {
+			cs.trInfo.tr.LazyPrintf("RPC: [%v]", err)
+			cs.trInfo.tr.SetError()
+		}
+		cs.trInfo.tr.Finish()
+		cs.trInfo.tr = nil
+	}
+}
+
+// ServerStream defines the interface a server stream has to satisfy.
+type ServerStream interface {
+	// SendHeader sends the header metadata. It should not be called
+	// after SendProto. It fails if called multiple times or if
+	// called after SendProto.
+	SendHeader(metadata.MD) error
+	// SetTrailer sets the trailer metadata which will be sent with the RPC status.
+	// When called more than once, all the provided metadata will be merged.
+	SetTrailer(metadata.MD)
+	Stream
+}
+
+// serverStream implements a server side Stream.
+type serverStream struct {
+	t          transport.ServerTransport
+	s          *transport.Stream
+	p          *parser
+	codec      Codec
+	cp         Compressor
+	dc         Decompressor
+	cbuf       *bytes.Buffer
+	maxMsgSize int
+	statusCode codes.Code
+	statusDesc string
+	trInfo     *traceInfo
+
+	mu sync.Mutex // protects trInfo.tr after the service handler runs.
+}
+
+func (ss *serverStream) Context() context.Context {
+	return ss.s.Context()
+}
+
+func (ss *serverStream) SendHeader(md metadata.MD) error {
+	return ss.t.WriteHeader(ss.s, md)
+}
+
+func (ss *serverStream) SetTrailer(md metadata.MD) {
+	if md.Len() == 0 {
+		return
+	}
+	ss.s.SetTrailer(md)
+	return
+}
+
+func (ss *serverStream) SendMsg(m interface{}) (err error) {
+	defer func() {
+		if ss.trInfo != nil {
+			ss.mu.Lock()
+			if ss.trInfo.tr != nil {
+				if err == nil {
+					ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
+				} else {
+					ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+					ss.trInfo.tr.SetError()
+				}
+			}
+			ss.mu.Unlock()
+		}
+	}()
+	out, err := encode(ss.codec, m, ss.cp, ss.cbuf)
+	defer func() {
+		if ss.cbuf != nil {
+			ss.cbuf.Reset()
+		}
+	}()
+	if err != nil {
+		err = Errorf(codes.Internal, "grpc: %v", err)
+		return err
+	}
+	if err := ss.t.Write(ss.s, out, &transport.Options{Last: false}); err != nil {
+		return toRPCErr(err)
+	}
+	return nil
+}
+
+func (ss *serverStream) RecvMsg(m interface{}) (err error) {
+	defer func() {
+		if ss.trInfo != nil {
+			ss.mu.Lock()
+			if ss.trInfo.tr != nil {
+				if err == nil {
+					ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
+				} else if err != io.EOF {
+					ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+					ss.trInfo.tr.SetError()
+				}
+			}
+			ss.mu.Unlock()
+		}
+	}()
+	if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxMsgSize); err != nil {
+		if err == io.EOF {
+			return err
+		}
+		if err == io.ErrUnexpectedEOF {
+			err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
+		}
+		return toRPCErr(err)
+	}
+	return nil
+}
diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go
new file mode 100644
index 0000000..f6747e1
--- /dev/null
+++ b/vendor/google.golang.org/grpc/trace.go
@@ -0,0 +1,119 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package grpc
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"net"
+	"strings"
+	"time"
+
+	"golang.org/x/net/trace"
+)
+
+// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package.
+// This should only be set before any RPCs are sent or received by this program.
+var EnableTracing = true
+
+// methodFamily returns the trace family for the given method.
+// It turns "/pkg.Service/GetFoo" into "pkg.Service".
+func methodFamily(m string) string {
+	m = strings.TrimPrefix(m, "/") // remove leading slash
+	if i := strings.Index(m, "/"); i >= 0 {
+		m = m[:i] // remove everything from second slash
+	}
+	if i := strings.LastIndex(m, "."); i >= 0 {
+		m = m[i+1:] // cut down to last dotted component
+	}
+	return m
+}
+
+// traceInfo contains tracing information for an RPC.
+type traceInfo struct {
+	tr        trace.Trace
+	firstLine firstLine
+}
+
+// firstLine is the first line of an RPC trace.
+type firstLine struct {
+	client     bool // whether this is a client (outgoing) RPC
+	remoteAddr net.Addr
+	deadline   time.Duration // may be zero
+}
+
+func (f *firstLine) String() string {
+	var line bytes.Buffer
+	io.WriteString(&line, "RPC: ")
+	if f.client {
+		io.WriteString(&line, "to")
+	} else {
+		io.WriteString(&line, "from")
+	}
+	fmt.Fprintf(&line, " %v deadline:", f.remoteAddr)
+	if f.deadline != 0 {
+		fmt.Fprint(&line, f.deadline)
+	} else {
+		io.WriteString(&line, "none")
+	}
+	return line.String()
+}
+
+// payload represents an RPC request or response payload.
+type payload struct {
+	sent bool        // whether this is an outgoing payload
+	msg  interface{} // e.g. a proto.Message
+	// TODO(dsymonds): add stringifying info to codec, and limit how much we hold here?
+}
+
+func (p payload) String() string {
+	if p.sent {
+		return fmt.Sprintf("sent: %v", p.msg)
+	}
+	return fmt.Sprintf("recv: %v", p.msg)
+}
+
+type fmtStringer struct {
+	format string
+	a      []interface{}
+}
+
+func (f *fmtStringer) String() string {
+	return fmt.Sprintf(f.format, f.a...)
+}
+
+type stringer string
+
+func (s stringer) String() string { return string(s) }
diff --git a/vendor/google.golang.org/grpc/transport/control.go b/vendor/google.golang.org/grpc/transport/control.go
new file mode 100644
index 0000000..4ef0830
--- /dev/null
+++ b/vendor/google.golang.org/grpc/transport/control.go
@@ -0,0 +1,215 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package transport
+
+import (
+	"fmt"
+	"sync"
+
+	"golang.org/x/net/http2"
+)
+
+const (
+	// The default value of flow control window size in HTTP2 spec.
+	defaultWindowSize = 65535
+	// The initial window size for flow control.
+	initialWindowSize     = defaultWindowSize      // for an RPC
+	initialConnWindowSize = defaultWindowSize * 16 // for a connection
+)
+
+// The following defines various control items which could flow through
+// the control buffer of transport. They represent different aspects of
+// control tasks, e.g., flow control, settings, streaming resetting, etc.
+type windowUpdate struct {
+	streamID  uint32
+	increment uint32
+}
+
+func (*windowUpdate) item() {}
+
+type settings struct {
+	ack bool
+	ss  []http2.Setting
+}
+
+func (*settings) item() {}
+
+type resetStream struct {
+	streamID uint32
+	code     http2.ErrCode
+}
+
+func (*resetStream) item() {}
+
+type goAway struct {
+}
+
+func (*goAway) item() {}
+
+type flushIO struct {
+}
+
+func (*flushIO) item() {}
+
+type ping struct {
+	ack  bool
+	data [8]byte
+}
+
+func (*ping) item() {}
+
+// quotaPool is a pool which accumulates the quota and sends it to acquire()
+// when it is available.
+type quotaPool struct {
+	c chan int
+
+	mu    sync.Mutex
+	quota int
+}
+
+// newQuotaPool creates a quotaPool which has quota q available to consume.
+func newQuotaPool(q int) *quotaPool {
+	qb := &quotaPool{
+		c: make(chan int, 1),
+	}
+	if q > 0 {
+		qb.c <- q
+	} else {
+		qb.quota = q
+	}
+	return qb
+}
+
+// add adds n to the available quota and tries to send it on acquire.
+func (qb *quotaPool) add(n int) {
+	qb.mu.Lock()
+	defer qb.mu.Unlock()
+	qb.quota += n
+	if qb.quota <= 0 {
+		return
+	}
+	select {
+	case qb.c <- qb.quota:
+		qb.quota = 0
+	default:
+	}
+}
+
+// cancel cancels the pending quota sent on acquire, if any.
+func (qb *quotaPool) cancel() {
+	qb.mu.Lock()
+	defer qb.mu.Unlock()
+	select {
+	case n := <-qb.c:
+		qb.quota += n
+	default:
+	}
+}
+
+// reset cancels the pending quota sent on acquired, incremented by v and sends
+// it back on acquire.
+func (qb *quotaPool) reset(v int) {
+	qb.mu.Lock()
+	defer qb.mu.Unlock()
+	select {
+	case n := <-qb.c:
+		qb.quota += n
+	default:
+	}
+	qb.quota += v
+	if qb.quota <= 0 {
+		return
+	}
+	select {
+	case qb.c <- qb.quota:
+		qb.quota = 0
+	default:
+	}
+}
+
+// acquire returns the channel on which available quota amounts are sent.
+func (qb *quotaPool) acquire() <-chan int {
+	return qb.c
+}
+
+// inFlow deals with inbound flow control
+type inFlow struct {
+	// The inbound flow control limit for pending data.
+	limit uint32
+
+	mu sync.Mutex
+	// pendingData is the overall data which have been received but not been
+	// consumed by applications.
+	pendingData uint32
+	// The amount of data the application has consumed but grpc has not sent
+	// window update for them. Used to reduce window update frequency.
+	pendingUpdate uint32
+}
+
+// onData is invoked when some data frame is received. It updates pendingData.
+func (f *inFlow) onData(n uint32) error {
+	f.mu.Lock()
+	defer f.mu.Unlock()
+	f.pendingData += n
+	if f.pendingData+f.pendingUpdate > f.limit {
+		return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate, f.limit)
+	}
+	return nil
+}
+
+// onRead is invoked when the application reads the data. It returns the window size
+// to be sent to the peer.
+func (f *inFlow) onRead(n uint32) uint32 {
+	f.mu.Lock()
+	defer f.mu.Unlock()
+	if f.pendingData == 0 {
+		return 0
+	}
+	f.pendingData -= n
+	f.pendingUpdate += n
+	if f.pendingUpdate >= f.limit/4 {
+		wu := f.pendingUpdate
+		f.pendingUpdate = 0
+		return wu
+	}
+	return 0
+}
+
+func (f *inFlow) resetPendingData() uint32 {
+	f.mu.Lock()
+	defer f.mu.Unlock()
+	n := f.pendingData
+	f.pendingData = 0
+	return n
+}
diff --git a/vendor/google.golang.org/grpc/transport/go16.go b/vendor/google.golang.org/grpc/transport/go16.go
new file mode 100644
index 0000000..ee1c46b
--- /dev/null
+++ b/vendor/google.golang.org/grpc/transport/go16.go
@@ -0,0 +1,46 @@
+// +build go1.6,!go1.7
+
+/*
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package transport
+
+import (
+	"net"
+
+	"golang.org/x/net/context"
+)
+
+// dialContext connects to the address on the named network.
+func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
+	return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
+}
diff --git a/vendor/google.golang.org/grpc/transport/go17.go b/vendor/google.golang.org/grpc/transport/go17.go
new file mode 100644
index 0000000..356f13f
--- /dev/null
+++ b/vendor/google.golang.org/grpc/transport/go17.go
@@ -0,0 +1,46 @@
+// +build go1.7
+
+/*
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package transport
+
+import (
+	"net"
+
+	"golang.org/x/net/context"
+)
+
+// dialContext connects to the address on the named network.
+func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
+	return (&net.Dialer{}).DialContext(ctx, network, address)
+}
diff --git a/vendor/google.golang.org/grpc/transport/handler_server.go b/vendor/google.golang.org/grpc/transport/handler_server.go
new file mode 100644
index 0000000..114e349
--- /dev/null
+++ b/vendor/google.golang.org/grpc/transport/handler_server.go
@@ -0,0 +1,397 @@
+/*
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+// This file is the implementation of a gRPC server using HTTP/2 which
+// uses the standard Go http2 Server implementation (via the
+// http.Handler interface), rather than speaking low-level HTTP/2
+// frames itself. It is the implementation of *grpc.Server.ServeHTTP.
+
+package transport
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"strings"
+	"sync"
+	"time"
+
+	"golang.org/x/net/context"
+	"golang.org/x/net/http2"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/peer"
+)
+
+// NewServerHandlerTransport returns a ServerTransport handling gRPC
+// from inside an http.Handler. It requires that the http Server
+// supports HTTP/2.
+func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTransport, error) {
+	if r.ProtoMajor != 2 {
+		return nil, errors.New("gRPC requires HTTP/2")
+	}
+	if r.Method != "POST" {
+		return nil, errors.New("invalid gRPC request method")
+	}
+	if !validContentType(r.Header.Get("Content-Type")) {
+		return nil, errors.New("invalid gRPC request content-type")
+	}
+	if _, ok := w.(http.Flusher); !ok {
+		return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher")
+	}
+	if _, ok := w.(http.CloseNotifier); !ok {
+		return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier")
+	}
+
+	st := &serverHandlerTransport{
+		rw:       w,
+		req:      r,
+		closedCh: make(chan struct{}),
+		writes:   make(chan func()),
+	}
+
+	if v := r.Header.Get("grpc-timeout"); v != "" {
+		to, err := decodeTimeout(v)
+		if err != nil {
+			return nil, streamErrorf(codes.Internal, "malformed time-out: %v", err)
+		}
+		st.timeoutSet = true
+		st.timeout = to
+	}
+
+	var metakv []string
+	if r.Host != "" {
+		metakv = append(metakv, ":authority", r.Host)
+	}
+	for k, vv := range r.Header {
+		k = strings.ToLower(k)
+		if isReservedHeader(k) && !isWhitelistedPseudoHeader(k) {
+			continue
+		}
+		for _, v := range vv {
+			if k == "user-agent" {
+				// user-agent is special. Copying logic of http_util.go.
+				if i := strings.LastIndex(v, " "); i == -1 {
+					// There is no application user agent string being set
+					continue
+				} else {
+					v = v[:i]
+				}
+			}
+			metakv = append(metakv, k, v)
+		}
+	}
+	st.headerMD = metadata.Pairs(metakv...)
+
+	return st, nil
+}
+
+// serverHandlerTransport is an implementation of ServerTransport
+// which replies to exactly one gRPC request (exactly one HTTP request),
+// using the net/http.Handler interface. This http.Handler is guaranteed
+// at this point to be speaking over HTTP/2, so it's able to speak valid
+// gRPC.
+type serverHandlerTransport struct {
+	rw               http.ResponseWriter
+	req              *http.Request
+	timeoutSet       bool
+	timeout          time.Duration
+	didCommonHeaders bool
+
+	headerMD metadata.MD
+
+	closeOnce sync.Once
+	closedCh  chan struct{} // closed on Close
+
+	// writes is a channel of code to run serialized in the
+	// ServeHTTP (HandleStreams) goroutine. The channel is closed
+	// when WriteStatus is called.
+	writes chan func()
+}
+
+func (ht *serverHandlerTransport) Close() error {
+	ht.closeOnce.Do(ht.closeCloseChanOnce)
+	return nil
+}
+
+func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) }
+
+func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) }
+
+// strAddr is a net.Addr backed by either a TCP "ip:port" string, or
+// the empty string if unknown.
+type strAddr string
+
+func (a strAddr) Network() string {
+	if a != "" {
+		// Per the documentation on net/http.Request.RemoteAddr, if this is
+		// set, it's set to the IP:port of the peer (hence, TCP):
+		// https://golang.org/pkg/net/http/#Request
+		//
+		// If we want to support Unix sockets later, we can
+		// add our own grpc-specific convention within the
+		// grpc codebase to set RemoteAddr to a different
+		// format, or probably better: we can attach it to the
+		// context and use that from serverHandlerTransport.RemoteAddr.
+		return "tcp"
+	}
+	return ""
+}
+
+func (a strAddr) String() string { return string(a) }
+
+// do runs fn in the ServeHTTP goroutine.
+func (ht *serverHandlerTransport) do(fn func()) error {
+	select {
+	case ht.writes <- fn:
+		return nil
+	case <-ht.closedCh:
+		return ErrConnClosing
+	}
+}
+
+func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error {
+	err := ht.do(func() {
+		ht.writeCommonHeaders(s)
+
+		// And flush, in case no header or body has been sent yet.
+		// This forces a separation of headers and trailers if this is the
+		// first call (for example, in end2end tests's TestNoService).
+		ht.rw.(http.Flusher).Flush()
+
+		h := ht.rw.Header()
+		h.Set("Grpc-Status", fmt.Sprintf("%d", statusCode))
+		if statusDesc != "" {
+			h.Set("Grpc-Message", encodeGrpcMessage(statusDesc))
+		}
+		if md := s.Trailer(); len(md) > 0 {
+			for k, vv := range md {
+				// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
+				if isReservedHeader(k) {
+					continue
+				}
+				for _, v := range vv {
+					// http2 ResponseWriter mechanism to
+					// send undeclared Trailers after the
+					// headers have possibly been written.
+					h.Add(http2.TrailerPrefix+k, v)
+				}
+			}
+		}
+	})
+	close(ht.writes)
+	return err
+}
+
+// writeCommonHeaders sets common headers on the first write
+// call (Write, WriteHeader, or WriteStatus).
+func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
+	if ht.didCommonHeaders {
+		return
+	}
+	ht.didCommonHeaders = true
+
+	h := ht.rw.Header()
+	h["Date"] = nil // suppress Date to make tests happy; TODO: restore
+	h.Set("Content-Type", "application/grpc")
+
+	// Predeclare trailers we'll set later in WriteStatus (after the body).
+	// This is a SHOULD in the HTTP RFC, and the way you add (known)
+	// Trailers per the net/http.ResponseWriter contract.
+	// See https://golang.org/pkg/net/http/#ResponseWriter
+	// and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
+	h.Add("Trailer", "Grpc-Status")
+	h.Add("Trailer", "Grpc-Message")
+
+	if s.sendCompress != "" {
+		h.Set("Grpc-Encoding", s.sendCompress)
+	}
+}
+
+func (ht *serverHandlerTransport) Write(s *Stream, data []byte, opts *Options) error {
+	return ht.do(func() {
+		ht.writeCommonHeaders(s)
+		ht.rw.Write(data)
+		if !opts.Delay {
+			ht.rw.(http.Flusher).Flush()
+		}
+	})
+}
+
+func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
+	return ht.do(func() {
+		ht.writeCommonHeaders(s)
+		h := ht.rw.Header()
+		for k, vv := range md {
+			// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
+			if isReservedHeader(k) {
+				continue
+			}
+			for _, v := range vv {
+				h.Add(k, v)
+			}
+		}
+		ht.rw.WriteHeader(200)
+		ht.rw.(http.Flusher).Flush()
+	})
+}
+
+func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) {
+	// With this transport type there will be exactly 1 stream: this HTTP request.
+
+	var ctx context.Context
+	var cancel context.CancelFunc
+	if ht.timeoutSet {
+		ctx, cancel = context.WithTimeout(context.Background(), ht.timeout)
+	} else {
+		ctx, cancel = context.WithCancel(context.Background())
+	}
+
+	// requestOver is closed when either the request's context is done
+	// or the status has been written via WriteStatus.
+	requestOver := make(chan struct{})
+
+	// clientGone receives a single value if peer is gone, either
+	// because the underlying connection is dead or because the
+	// peer sends an http2 RST_STREAM.
+	clientGone := ht.rw.(http.CloseNotifier).CloseNotify()
+	go func() {
+		select {
+		case <-requestOver:
+			return
+		case <-ht.closedCh:
+		case <-clientGone:
+		}
+		cancel()
+	}()
+
+	req := ht.req
+
+	s := &Stream{
+		id:            0,            // irrelevant
+		windowHandler: func(int) {}, // nothing
+		cancel:        cancel,
+		buf:           newRecvBuffer(),
+		st:            ht,
+		method:        req.URL.Path,
+		recvCompress:  req.Header.Get("grpc-encoding"),
+	}
+	pr := &peer.Peer{
+		Addr: ht.RemoteAddr(),
+	}
+	if req.TLS != nil {
+		pr.AuthInfo = credentials.TLSInfo{State: *req.TLS}
+	}
+	ctx = metadata.NewContext(ctx, ht.headerMD)
+	ctx = peer.NewContext(ctx, pr)
+	s.ctx = newContextWithStream(ctx, s)
+	s.dec = &recvBufferReader{ctx: s.ctx, recv: s.buf}
+
+	// readerDone is closed when the Body.Read-ing goroutine exits.
+	readerDone := make(chan struct{})
+	go func() {
+		defer close(readerDone)
+
+		// TODO: minimize garbage, optimize recvBuffer code/ownership
+		const readSize = 8196
+		for buf := make([]byte, readSize); ; {
+			n, err := req.Body.Read(buf)
+			if n > 0 {
+				s.buf.put(&recvMsg{data: buf[:n:n]})
+				buf = buf[n:]
+			}
+			if err != nil {
+				s.buf.put(&recvMsg{err: mapRecvMsgError(err)})
+				return
+			}
+			if len(buf) == 0 {
+				buf = make([]byte, readSize)
+			}
+		}
+	}()
+
+	// startStream is provided by the *grpc.Server's serveStreams.
+	// It starts a goroutine serving s and exits immediately.
+	// The goroutine that is started is the one that then calls
+	// into ht, calling WriteHeader, Write, WriteStatus, Close, etc.
+	startStream(s)
+
+	ht.runStream()
+	close(requestOver)
+
+	// Wait for reading goroutine to finish.
+	req.Body.Close()
+	<-readerDone
+}
+
+func (ht *serverHandlerTransport) runStream() {
+	for {
+		select {
+		case fn, ok := <-ht.writes:
+			if !ok {
+				return
+			}
+			fn()
+		case <-ht.closedCh:
+			return
+		}
+	}
+}
+
+func (ht *serverHandlerTransport) Drain() {
+	panic("Drain() is not implemented")
+}
+
+// mapRecvMsgError returns the non-nil err into the appropriate
+// error value as expected by callers of *grpc.parser.recvMsg.
+// In particular, in can only be:
+//   * io.EOF
+//   * io.ErrUnexpectedEOF
+//   * of type transport.ConnectionError
+//   * of type transport.StreamError
+func mapRecvMsgError(err error) error {
+	if err == io.EOF || err == io.ErrUnexpectedEOF {
+		return err
+	}
+	if se, ok := err.(http2.StreamError); ok {
+		if code, ok := http2ErrConvTab[se.Code]; ok {
+			return StreamError{
+				Code: code,
+				Desc: se.Error(),
+			}
+		}
+	}
+	return connectionErrorf(true, err, err.Error())
+}
diff --git a/vendor/google.golang.org/grpc/transport/http2_client.go b/vendor/google.golang.org/grpc/transport/http2_client.go
new file mode 100644
index 0000000..3c18554
--- /dev/null
+++ b/vendor/google.golang.org/grpc/transport/http2_client.go
@@ -0,0 +1,1057 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package transport
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"math"
+	"net"
+	"strings"
+	"sync"
+	"time"
+
+	"golang.org/x/net/context"
+	"golang.org/x/net/http2"
+	"golang.org/x/net/http2/hpack"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/peer"
+)
+
+// http2Client implements the ClientTransport interface with HTTP2.
+type http2Client struct {
+	target    string // server name/addr
+	userAgent string
+	conn      net.Conn             // underlying communication channel
+	authInfo  credentials.AuthInfo // auth info about the connection
+	nextID    uint32               // the next stream ID to be used
+
+	// writableChan synchronizes write access to the transport.
+	// A writer acquires the write lock by sending a value on writableChan
+	// and releases it by receiving from writableChan.
+	writableChan chan int
+	// shutdownChan is closed when Close is called.
+	// Blocking operations should select on shutdownChan to avoid
+	// blocking forever after Close.
+	// TODO(zhaoq): Maybe have a channel context?
+	shutdownChan chan struct{}
+	// errorChan is closed to notify the I/O error to the caller.
+	errorChan chan struct{}
+	// goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor)
+	// that the server sent GoAway on this transport.
+	goAway chan struct{}
+
+	framer *framer
+	hBuf   *bytes.Buffer  // the buffer for HPACK encoding
+	hEnc   *hpack.Encoder // HPACK encoder
+
+	// controlBuf delivers all the control related tasks (e.g., window
+	// updates, reset streams, and various settings) to the controller.
+	controlBuf *recvBuffer
+	fc         *inFlow
+	// sendQuotaPool provides flow control to outbound message.
+	sendQuotaPool *quotaPool
+	// streamsQuota limits the max number of concurrent streams.
+	streamsQuota *quotaPool
+
+	// The scheme used: https if TLS is on, http otherwise.
+	scheme string
+
+	creds []credentials.PerRPCCredentials
+
+	mu            sync.Mutex     // guard the following variables
+	state         transportState // the state of underlying connection
+	activeStreams map[uint32]*Stream
+	// The max number of concurrent streams
+	maxStreams int
+	// the per-stream outbound flow control window size set by the peer.
+	streamSendQuota uint32
+	// goAwayID records the Last-Stream-ID in the GoAway frame from the server.
+	goAwayID uint32
+	// prevGoAway ID records the Last-Stream-ID in the previous GOAway frame.
+	prevGoAwayID uint32
+}
+
+func dial(fn func(context.Context, string) (net.Conn, error), ctx context.Context, addr string) (net.Conn, error) {
+	if fn != nil {
+		return fn(ctx, addr)
+	}
+	return dialContext(ctx, "tcp", addr)
+}
+
+func isTemporary(err error) bool {
+	switch err {
+	case io.EOF:
+		// Connection closures may be resolved upon retry, and are thus
+		// treated as temporary.
+		return true
+	case context.DeadlineExceeded:
+		// In Go 1.7, context.DeadlineExceeded implements Timeout(), and this
+		// special case is not needed. Until then, we need to keep this
+		// clause.
+		return true
+	}
+
+	switch err := err.(type) {
+	case interface {
+		Temporary() bool
+	}:
+		return err.Temporary()
+	case interface {
+		Timeout() bool
+	}:
+		// Timeouts may be resolved upon retry, and are thus treated as
+		// temporary.
+		return err.Timeout()
+	}
+	return false
+}
+
+// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
+// and starts to receive messages on it. Non-nil error returns if construction
+// fails.
+func newHTTP2Client(ctx context.Context, addr string, opts ConnectOptions) (_ ClientTransport, err error) {
+	scheme := "http"
+	conn, err := dial(opts.Dialer, ctx, addr)
+	if err != nil {
+		return nil, connectionErrorf(true, err, "transport: %v", err)
+	}
+	// Any further errors will close the underlying connection
+	defer func(conn net.Conn) {
+		if err != nil {
+			conn.Close()
+		}
+	}(conn)
+	var authInfo credentials.AuthInfo
+	if creds := opts.TransportCredentials; creds != nil {
+		scheme = "https"
+		conn, authInfo, err = creds.ClientHandshake(ctx, addr, conn)
+		if err != nil {
+			// Credentials handshake errors are typically considered permanent
+			// to avoid retrying on e.g. bad certificates.
+			temp := isTemporary(err)
+			return nil, connectionErrorf(temp, err, "transport: %v", err)
+		}
+	}
+	ua := primaryUA
+	if opts.UserAgent != "" {
+		ua = opts.UserAgent + " " + ua
+	}
+	var buf bytes.Buffer
+	t := &http2Client{
+		target:    addr,
+		userAgent: ua,
+		conn:      conn,
+		authInfo:  authInfo,
+		// The client initiated stream id is odd starting from 1.
+		nextID:          1,
+		writableChan:    make(chan int, 1),
+		shutdownChan:    make(chan struct{}),
+		errorChan:       make(chan struct{}),
+		goAway:          make(chan struct{}),
+		framer:          newFramer(conn),
+		hBuf:            &buf,
+		hEnc:            hpack.NewEncoder(&buf),
+		controlBuf:      newRecvBuffer(),
+		fc:              &inFlow{limit: initialConnWindowSize},
+		sendQuotaPool:   newQuotaPool(defaultWindowSize),
+		scheme:          scheme,
+		state:           reachable,
+		activeStreams:   make(map[uint32]*Stream),
+		creds:           opts.PerRPCCredentials,
+		maxStreams:      math.MaxInt32,
+		streamSendQuota: defaultWindowSize,
+	}
+	// Start the reader goroutine for incoming message. Each transport has
+	// a dedicated goroutine which reads HTTP2 frame from network. Then it
+	// dispatches the frame to the corresponding stream entity.
+	go t.reader()
+	// Send connection preface to server.
+	n, err := t.conn.Write(clientPreface)
+	if err != nil {
+		t.Close()
+		return nil, connectionErrorf(true, err, "transport: %v", err)
+	}
+	if n != len(clientPreface) {
+		t.Close()
+		return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
+	}
+	if initialWindowSize != defaultWindowSize {
+		err = t.framer.writeSettings(true, http2.Setting{
+			ID:  http2.SettingInitialWindowSize,
+			Val: uint32(initialWindowSize),
+		})
+	} else {
+		err = t.framer.writeSettings(true)
+	}
+	if err != nil {
+		t.Close()
+		return nil, connectionErrorf(true, err, "transport: %v", err)
+	}
+	// Adjust the connection flow control window if needed.
+	if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 {
+		if err := t.framer.writeWindowUpdate(true, 0, delta); err != nil {
+			t.Close()
+			return nil, connectionErrorf(true, err, "transport: %v", err)
+		}
+	}
+	go t.controller()
+	t.writableChan <- 0
+	return t, nil
+}
+
+func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
+	// TODO(zhaoq): Handle uint32 overflow of Stream.id.
+	s := &Stream{
+		id:            t.nextID,
+		done:          make(chan struct{}),
+		goAway:        make(chan struct{}),
+		method:        callHdr.Method,
+		sendCompress:  callHdr.SendCompress,
+		buf:           newRecvBuffer(),
+		fc:            &inFlow{limit: initialWindowSize},
+		sendQuotaPool: newQuotaPool(int(t.streamSendQuota)),
+		headerChan:    make(chan struct{}),
+	}
+	t.nextID += 2
+	s.windowHandler = func(n int) {
+		t.updateWindow(s, uint32(n))
+	}
+	// The client side stream context should have exactly the same life cycle with the user provided context.
+	// That means, s.ctx should be read-only. And s.ctx is done iff ctx is done.
+	// So we use the original context here instead of creating a copy.
+	s.ctx = ctx
+	s.dec = &recvBufferReader{
+		ctx:    s.ctx,
+		goAway: s.goAway,
+		recv:   s.buf,
+	}
+	return s
+}
+
+// NewStream creates a stream and register it into the transport as "active"
+// streams.
+func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
+	pr := &peer.Peer{
+		Addr: t.conn.RemoteAddr(),
+	}
+	// Attach Auth info if there is any.
+	if t.authInfo != nil {
+		pr.AuthInfo = t.authInfo
+	}
+	ctx = peer.NewContext(ctx, pr)
+	authData := make(map[string]string)
+	for _, c := range t.creds {
+		// Construct URI required to get auth request metadata.
+		var port string
+		if pos := strings.LastIndex(t.target, ":"); pos != -1 {
+			// Omit port if it is the default one.
+			if t.target[pos+1:] != "443" {
+				port = ":" + t.target[pos+1:]
+			}
+		}
+		pos := strings.LastIndex(callHdr.Method, "/")
+		if pos == -1 {
+			return nil, streamErrorf(codes.InvalidArgument, "transport: malformed method name: %q", callHdr.Method)
+		}
+		audience := "https://" + callHdr.Host + port + callHdr.Method[:pos]
+		data, err := c.GetRequestMetadata(ctx, audience)
+		if err != nil {
+			return nil, streamErrorf(codes.InvalidArgument, "transport: %v", err)
+		}
+		for k, v := range data {
+			authData[k] = v
+		}
+	}
+	t.mu.Lock()
+	if t.activeStreams == nil {
+		t.mu.Unlock()
+		return nil, ErrConnClosing
+	}
+	if t.state == draining {
+		t.mu.Unlock()
+		return nil, ErrStreamDrain
+	}
+	if t.state != reachable {
+		t.mu.Unlock()
+		return nil, ErrConnClosing
+	}
+	checkStreamsQuota := t.streamsQuota != nil
+	t.mu.Unlock()
+	if checkStreamsQuota {
+		sq, err := wait(ctx, nil, nil, t.shutdownChan, t.streamsQuota.acquire())
+		if err != nil {
+			return nil, err
+		}
+		// Returns the quota balance back.
+		if sq > 1 {
+			t.streamsQuota.add(sq - 1)
+		}
+	}
+	if _, err := wait(ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
+		// Return the quota back now because there is no stream returned to the caller.
+		if _, ok := err.(StreamError); ok && checkStreamsQuota {
+			t.streamsQuota.add(1)
+		}
+		return nil, err
+	}
+	t.mu.Lock()
+	if t.state == draining {
+		t.mu.Unlock()
+		if checkStreamsQuota {
+			t.streamsQuota.add(1)
+		}
+		// Need to make t writable again so that the rpc in flight can still proceed.
+		t.writableChan <- 0
+		return nil, ErrStreamDrain
+	}
+	if t.state != reachable {
+		t.mu.Unlock()
+		return nil, ErrConnClosing
+	}
+	s := t.newStream(ctx, callHdr)
+	t.activeStreams[s.id] = s
+
+	// This stream is not counted when applySetings(...) initialize t.streamsQuota.
+	// Reset t.streamsQuota to the right value.
+	var reset bool
+	if !checkStreamsQuota && t.streamsQuota != nil {
+		reset = true
+	}
+	t.mu.Unlock()
+	if reset {
+		t.streamsQuota.reset(-1)
+	}
+
+	// HPACK encodes various headers. Note that once WriteField(...) is
+	// called, the corresponding headers/continuation frame has to be sent
+	// because hpack.Encoder is stateful.
+	t.hBuf.Reset()
+	t.hEnc.WriteField(hpack.HeaderField{Name: ":method", Value: "POST"})
+	t.hEnc.WriteField(hpack.HeaderField{Name: ":scheme", Value: t.scheme})
+	t.hEnc.WriteField(hpack.HeaderField{Name: ":path", Value: callHdr.Method})
+	t.hEnc.WriteField(hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
+	t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
+	t.hEnc.WriteField(hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
+	t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"})
+
+	if callHdr.SendCompress != "" {
+		t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
+	}
+	if dl, ok := ctx.Deadline(); ok {
+		// Send out timeout regardless its value. The server can detect timeout context by itself.
+		timeout := dl.Sub(time.Now())
+		t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)})
+	}
+
+	for k, v := range authData {
+		// Capital header names are illegal in HTTP/2.
+		k = strings.ToLower(k)
+		t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v})
+	}
+	var (
+		hasMD      bool
+		endHeaders bool
+	)
+	if md, ok := metadata.FromContext(ctx); ok {
+		hasMD = true
+		for k, v := range md {
+			// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
+			if isReservedHeader(k) {
+				continue
+			}
+			for _, entry := range v {
+				t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
+			}
+		}
+	}
+	first := true
+	// Sends the headers in a single batch even when they span multiple frames.
+	for !endHeaders {
+		size := t.hBuf.Len()
+		if size > http2MaxFrameLen {
+			size = http2MaxFrameLen
+		} else {
+			endHeaders = true
+		}
+		var flush bool
+		if endHeaders && (hasMD || callHdr.Flush) {
+			flush = true
+		}
+		if first {
+			// Sends a HeadersFrame to server to start a new stream.
+			p := http2.HeadersFrameParam{
+				StreamID:      s.id,
+				BlockFragment: t.hBuf.Next(size),
+				EndStream:     false,
+				EndHeaders:    endHeaders,
+			}
+			// Do a force flush for the buffered frames iff it is the last headers frame
+			// and there is header metadata to be sent. Otherwise, there is flushing until
+			// the corresponding data frame is written.
+			err = t.framer.writeHeaders(flush, p)
+			first = false
+		} else {
+			// Sends Continuation frames for the leftover headers.
+			err = t.framer.writeContinuation(flush, s.id, endHeaders, t.hBuf.Next(size))
+		}
+		if err != nil {
+			t.notifyError(err)
+			return nil, connectionErrorf(true, err, "transport: %v", err)
+		}
+	}
+	t.writableChan <- 0
+	return s, nil
+}
+
+// CloseStream clears the footprint of a stream when the stream is not needed any more.
+// This must not be executed in reader's goroutine.
+func (t *http2Client) CloseStream(s *Stream, err error) {
+	var updateStreams bool
+	t.mu.Lock()
+	if t.activeStreams == nil {
+		t.mu.Unlock()
+		return
+	}
+	if t.streamsQuota != nil {
+		updateStreams = true
+	}
+	delete(t.activeStreams, s.id)
+	if t.state == draining && len(t.activeStreams) == 0 {
+		// The transport is draining and s is the last live stream on t.
+		t.mu.Unlock()
+		t.Close()
+		return
+	}
+	t.mu.Unlock()
+	if updateStreams {
+		t.streamsQuota.add(1)
+	}
+	s.mu.Lock()
+	if q := s.fc.resetPendingData(); q > 0 {
+		if n := t.fc.onRead(q); n > 0 {
+			t.controlBuf.put(&windowUpdate{0, n})
+		}
+	}
+	if s.state == streamDone {
+		s.mu.Unlock()
+		return
+	}
+	if !s.headerDone {
+		close(s.headerChan)
+		s.headerDone = true
+	}
+	s.state = streamDone
+	s.mu.Unlock()
+	if se, ok := err.(StreamError); ok && se.Code != codes.DeadlineExceeded {
+		t.controlBuf.put(&resetStream{s.id, http2.ErrCodeCancel})
+	}
+}
+
+// Close kicks off the shutdown process of the transport. This should be called
+// only once on a transport. Once it is called, the transport should not be
+// accessed any more.
+func (t *http2Client) Close() (err error) {
+	t.mu.Lock()
+	if t.state == closing {
+		t.mu.Unlock()
+		return
+	}
+	if t.state == reachable || t.state == draining {
+		close(t.errorChan)
+	}
+	t.state = closing
+	t.mu.Unlock()
+	close(t.shutdownChan)
+	err = t.conn.Close()
+	t.mu.Lock()
+	streams := t.activeStreams
+	t.activeStreams = nil
+	t.mu.Unlock()
+	// Notify all active streams.
+	for _, s := range streams {
+		s.mu.Lock()
+		if !s.headerDone {
+			close(s.headerChan)
+			s.headerDone = true
+		}
+		s.mu.Unlock()
+		s.write(recvMsg{err: ErrConnClosing})
+	}
+	return
+}
+
+func (t *http2Client) GracefulClose() error {
+	t.mu.Lock()
+	switch t.state {
+	case unreachable:
+		// The server may close the connection concurrently. t is not available for
+		// any streams. Close it now.
+		t.mu.Unlock()
+		t.Close()
+		return nil
+	case closing:
+		t.mu.Unlock()
+		return nil
+	}
+	// Notify the streams which were initiated after the server sent GOAWAY.
+	select {
+	case <-t.goAway:
+		n := t.prevGoAwayID
+		if n == 0 && t.nextID > 1 {
+			n = t.nextID - 2
+		}
+		m := t.goAwayID + 2
+		if m == 2 {
+			m = 1
+		}
+		for i := m; i <= n; i += 2 {
+			if s, ok := t.activeStreams[i]; ok {
+				close(s.goAway)
+			}
+		}
+	default:
+	}
+	if t.state == draining {
+		t.mu.Unlock()
+		return nil
+	}
+	t.state = draining
+	active := len(t.activeStreams)
+	t.mu.Unlock()
+	if active == 0 {
+		return t.Close()
+	}
+	return nil
+}
+
+// Write formats the data into HTTP2 data frame(s) and sends it out. The caller
+// should proceed only if Write returns nil.
+// TODO(zhaoq): opts.Delay is ignored in this implementation. Support it later
+// if it improves the performance.
+func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error {
+	r := bytes.NewBuffer(data)
+	for {
+		var p []byte
+		if r.Len() > 0 {
+			size := http2MaxFrameLen
+			s.sendQuotaPool.add(0)
+			// Wait until the stream has some quota to send the data.
+			sq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, s.sendQuotaPool.acquire())
+			if err != nil {
+				return err
+			}
+			t.sendQuotaPool.add(0)
+			// Wait until the transport has some quota to send the data.
+			tq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.sendQuotaPool.acquire())
+			if err != nil {
+				if _, ok := err.(StreamError); ok || err == io.EOF {
+					t.sendQuotaPool.cancel()
+				}
+				return err
+			}
+			if sq < size {
+				size = sq
+			}
+			if tq < size {
+				size = tq
+			}
+			p = r.Next(size)
+			ps := len(p)
+			if ps < sq {
+				// Overbooked stream quota. Return it back.
+				s.sendQuotaPool.add(sq - ps)
+			}
+			if ps < tq {
+				// Overbooked transport quota. Return it back.
+				t.sendQuotaPool.add(tq - ps)
+			}
+		}
+		var (
+			endStream  bool
+			forceFlush bool
+		)
+		if opts.Last && r.Len() == 0 {
+			endStream = true
+		}
+		// Indicate there is a writer who is about to write a data frame.
+		t.framer.adjustNumWriters(1)
+		// Got some quota. Try to acquire writing privilege on the transport.
+		if _, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.writableChan); err != nil {
+			if _, ok := err.(StreamError); ok || err == io.EOF {
+				// Return the connection quota back.
+				t.sendQuotaPool.add(len(p))
+			}
+			if t.framer.adjustNumWriters(-1) == 0 {
+				// This writer is the last one in this batch and has the
+				// responsibility to flush the buffered frames. It queues
+				// a flush request to controlBuf instead of flushing directly
+				// in order to avoid the race with other writing or flushing.
+				t.controlBuf.put(&flushIO{})
+			}
+			return err
+		}
+		select {
+		case <-s.ctx.Done():
+			t.sendQuotaPool.add(len(p))
+			if t.framer.adjustNumWriters(-1) == 0 {
+				t.controlBuf.put(&flushIO{})
+			}
+			t.writableChan <- 0
+			return ContextErr(s.ctx.Err())
+		default:
+		}
+		if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 {
+			// Do a force flush iff this is last frame for the entire gRPC message
+			// and the caller is the only writer at this moment.
+			forceFlush = true
+		}
+		// If WriteData fails, all the pending streams will be handled
+		// by http2Client.Close(). No explicit CloseStream() needs to be
+		// invoked.
+		if err := t.framer.writeData(forceFlush, s.id, endStream, p); err != nil {
+			t.notifyError(err)
+			return connectionErrorf(true, err, "transport: %v", err)
+		}
+		if t.framer.adjustNumWriters(-1) == 0 {
+			t.framer.flushWrite()
+		}
+		t.writableChan <- 0
+		if r.Len() == 0 {
+			break
+		}
+	}
+	if !opts.Last {
+		return nil
+	}
+	s.mu.Lock()
+	if s.state != streamDone {
+		s.state = streamWriteDone
+	}
+	s.mu.Unlock()
+	return nil
+}
+
+func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	s, ok := t.activeStreams[f.Header().StreamID]
+	return s, ok
+}
+
+// updateWindow adjusts the inbound quota for the stream and the transport.
+// Window updates will deliver to the controller for sending when
+// the cumulative quota exceeds the corresponding threshold.
+func (t *http2Client) updateWindow(s *Stream, n uint32) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if s.state == streamDone {
+		return
+	}
+	if w := t.fc.onRead(n); w > 0 {
+		t.controlBuf.put(&windowUpdate{0, w})
+	}
+	if w := s.fc.onRead(n); w > 0 {
+		t.controlBuf.put(&windowUpdate{s.id, w})
+	}
+}
+
+func (t *http2Client) handleData(f *http2.DataFrame) {
+	size := len(f.Data())
+	if err := t.fc.onData(uint32(size)); err != nil {
+		t.notifyError(connectionErrorf(true, err, "%v", err))
+		return
+	}
+	// Select the right stream to dispatch.
+	s, ok := t.getStream(f)
+	if !ok {
+		if w := t.fc.onRead(uint32(size)); w > 0 {
+			t.controlBuf.put(&windowUpdate{0, w})
+		}
+		return
+	}
+	if size > 0 {
+		s.mu.Lock()
+		if s.state == streamDone {
+			s.mu.Unlock()
+			// The stream has been closed. Release the corresponding quota.
+			if w := t.fc.onRead(uint32(size)); w > 0 {
+				t.controlBuf.put(&windowUpdate{0, w})
+			}
+			return
+		}
+		if err := s.fc.onData(uint32(size)); err != nil {
+			s.state = streamDone
+			s.statusCode = codes.Internal
+			s.statusDesc = err.Error()
+			close(s.done)
+			s.mu.Unlock()
+			s.write(recvMsg{err: io.EOF})
+			t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl})
+			return
+		}
+		s.mu.Unlock()
+		// TODO(bradfitz, zhaoq): A copy is required here because there is no
+		// guarantee f.Data() is consumed before the arrival of next frame.
+		// Can this copy be eliminated?
+		data := make([]byte, size)
+		copy(data, f.Data())
+		s.write(recvMsg{data: data})
+	}
+	// The server has closed the stream without sending trailers.  Record that
+	// the read direction is closed, and set the status appropriately.
+	if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) {
+		s.mu.Lock()
+		if s.state == streamDone {
+			s.mu.Unlock()
+			return
+		}
+		s.state = streamDone
+		s.statusCode = codes.Internal
+		s.statusDesc = "server closed the stream without sending trailers"
+		close(s.done)
+		s.mu.Unlock()
+		s.write(recvMsg{err: io.EOF})
+	}
+}
+
+func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
+	s, ok := t.getStream(f)
+	if !ok {
+		return
+	}
+	s.mu.Lock()
+	if s.state == streamDone {
+		s.mu.Unlock()
+		return
+	}
+	s.state = streamDone
+	if !s.headerDone {
+		close(s.headerChan)
+		s.headerDone = true
+	}
+	s.statusCode, ok = http2ErrConvTab[http2.ErrCode(f.ErrCode)]
+	if !ok {
+		grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode)
+		s.statusCode = codes.Unknown
+	}
+	s.statusDesc = fmt.Sprintf("stream terminated by RST_STREAM with error code: %d", f.ErrCode)
+	close(s.done)
+	s.mu.Unlock()
+	s.write(recvMsg{err: io.EOF})
+}
+
+func (t *http2Client) handleSettings(f *http2.SettingsFrame) {
+	if f.IsAck() {
+		return
+	}
+	var ss []http2.Setting
+	f.ForeachSetting(func(s http2.Setting) error {
+		ss = append(ss, s)
+		return nil
+	})
+	// The settings will be applied once the ack is sent.
+	t.controlBuf.put(&settings{ack: true, ss: ss})
+}
+
+func (t *http2Client) handlePing(f *http2.PingFrame) {
+	pingAck := &ping{ack: true}
+	copy(pingAck.data[:], f.Data[:])
+	t.controlBuf.put(pingAck)
+}
+
+func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
+	t.mu.Lock()
+	if t.state == reachable || t.state == draining {
+		if f.LastStreamID > 0 && f.LastStreamID%2 != 1 {
+			t.mu.Unlock()
+			t.notifyError(connectionErrorf(true, nil, "received illegal http2 GOAWAY frame: stream ID %d is even", f.LastStreamID))
+			return
+		}
+		select {
+		case <-t.goAway:
+			id := t.goAwayID
+			// t.goAway has been closed (i.e.,multiple GoAways).
+			if id < f.LastStreamID {
+				t.mu.Unlock()
+				t.notifyError(connectionErrorf(true, nil, "received illegal http2 GOAWAY frame: previously recv GOAWAY frame with LastStramID %d, currently recv %d", id, f.LastStreamID))
+				return
+			}
+			t.prevGoAwayID = id
+			t.goAwayID = f.LastStreamID
+			t.mu.Unlock()
+			return
+		default:
+		}
+		t.goAwayID = f.LastStreamID
+		close(t.goAway)
+	}
+	t.mu.Unlock()
+}
+
+func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
+	id := f.Header().StreamID
+	incr := f.Increment
+	if id == 0 {
+		t.sendQuotaPool.add(int(incr))
+		return
+	}
+	if s, ok := t.getStream(f); ok {
+		s.sendQuotaPool.add(int(incr))
+	}
+}
+
+// operateHeaders takes action on the decoded headers.
+func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
+	s, ok := t.getStream(frame)
+	if !ok {
+		return
+	}
+	var state decodeState
+	for _, hf := range frame.Fields {
+		state.processHeaderField(hf)
+	}
+	if state.err != nil {
+		s.mu.Lock()
+		if !s.headerDone {
+			close(s.headerChan)
+			s.headerDone = true
+		}
+		s.mu.Unlock()
+		s.write(recvMsg{err: state.err})
+		// Something wrong. Stops reading even when there is remaining.
+		return
+	}
+
+	endStream := frame.StreamEnded()
+
+	s.mu.Lock()
+	if !endStream {
+		s.recvCompress = state.encoding
+	}
+	if !s.headerDone {
+		if !endStream && len(state.mdata) > 0 {
+			s.header = state.mdata
+		}
+		close(s.headerChan)
+		s.headerDone = true
+	}
+	if !endStream || s.state == streamDone {
+		s.mu.Unlock()
+		return
+	}
+
+	if len(state.mdata) > 0 {
+		s.trailer = state.mdata
+	}
+	s.statusCode = state.statusCode
+	s.statusDesc = state.statusDesc
+	close(s.done)
+	s.state = streamDone
+	s.mu.Unlock()
+	s.write(recvMsg{err: io.EOF})
+}
+
+func handleMalformedHTTP2(s *Stream, err error) {
+	s.mu.Lock()
+	if !s.headerDone {
+		close(s.headerChan)
+		s.headerDone = true
+	}
+	s.mu.Unlock()
+	s.write(recvMsg{err: err})
+}
+
+// reader runs as a separate goroutine in charge of reading data from network
+// connection.
+//
+// TODO(zhaoq): currently one reader per transport. Investigate whether this is
+// optimal.
+// TODO(zhaoq): Check the validity of the incoming frame sequence.
+func (t *http2Client) reader() {
+	// Check the validity of server preface.
+	frame, err := t.framer.readFrame()
+	if err != nil {
+		t.notifyError(err)
+		return
+	}
+	sf, ok := frame.(*http2.SettingsFrame)
+	if !ok {
+		t.notifyError(err)
+		return
+	}
+	t.handleSettings(sf)
+
+	// loop to keep reading incoming messages on this transport.
+	for {
+		frame, err := t.framer.readFrame()
+		if err != nil {
+			// Abort an active stream if the http2.Framer returns a
+			// http2.StreamError. This can happen only if the server's response
+			// is malformed http2.
+			if se, ok := err.(http2.StreamError); ok {
+				t.mu.Lock()
+				s := t.activeStreams[se.StreamID]
+				t.mu.Unlock()
+				if s != nil {
+					// use error detail to provide better err message
+					handleMalformedHTTP2(s, streamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.errorDetail()))
+				}
+				continue
+			} else {
+				// Transport error.
+				t.notifyError(err)
+				return
+			}
+		}
+		switch frame := frame.(type) {
+		case *http2.MetaHeadersFrame:
+			t.operateHeaders(frame)
+		case *http2.DataFrame:
+			t.handleData(frame)
+		case *http2.RSTStreamFrame:
+			t.handleRSTStream(frame)
+		case *http2.SettingsFrame:
+			t.handleSettings(frame)
+		case *http2.PingFrame:
+			t.handlePing(frame)
+		case *http2.GoAwayFrame:
+			t.handleGoAway(frame)
+		case *http2.WindowUpdateFrame:
+			t.handleWindowUpdate(frame)
+		default:
+			grpclog.Printf("transport: http2Client.reader got unhandled frame type %v.", frame)
+		}
+	}
+}
+
+func (t *http2Client) applySettings(ss []http2.Setting) {
+	for _, s := range ss {
+		switch s.ID {
+		case http2.SettingMaxConcurrentStreams:
+			// TODO(zhaoq): This is a hack to avoid significant refactoring of the
+			// code to deal with the unrealistic int32 overflow. Probably will try
+			// to find a better way to handle this later.
+			if s.Val > math.MaxInt32 {
+				s.Val = math.MaxInt32
+			}
+			t.mu.Lock()
+			reset := t.streamsQuota != nil
+			if !reset {
+				t.streamsQuota = newQuotaPool(int(s.Val) - len(t.activeStreams))
+			}
+			ms := t.maxStreams
+			t.maxStreams = int(s.Val)
+			t.mu.Unlock()
+			if reset {
+				t.streamsQuota.reset(int(s.Val) - ms)
+			}
+		case http2.SettingInitialWindowSize:
+			t.mu.Lock()
+			for _, stream := range t.activeStreams {
+				// Adjust the sending quota for each stream.
+				stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota))
+			}
+			t.streamSendQuota = s.Val
+			t.mu.Unlock()
+		}
+	}
+}
+
+// controller running in a separate goroutine takes charge of sending control
+// frames (e.g., window update, reset stream, setting, etc.) to the server.
+func (t *http2Client) controller() {
+	for {
+		select {
+		case i := <-t.controlBuf.get():
+			t.controlBuf.load()
+			select {
+			case <-t.writableChan:
+				switch i := i.(type) {
+				case *windowUpdate:
+					t.framer.writeWindowUpdate(true, i.streamID, i.increment)
+				case *settings:
+					if i.ack {
+						t.framer.writeSettingsAck(true)
+						t.applySettings(i.ss)
+					} else {
+						t.framer.writeSettings(true, i.ss...)
+					}
+				case *resetStream:
+					t.framer.writeRSTStream(true, i.streamID, i.code)
+				case *flushIO:
+					t.framer.flushWrite()
+				case *ping:
+					t.framer.writePing(true, i.ack, i.data)
+				default:
+					grpclog.Printf("transport: http2Client.controller got unexpected item type %v\n", i)
+				}
+				t.writableChan <- 0
+				continue
+			case <-t.shutdownChan:
+				return
+			}
+		case <-t.shutdownChan:
+			return
+		}
+	}
+}
+
+func (t *http2Client) Error() <-chan struct{} {
+	return t.errorChan
+}
+
+func (t *http2Client) GoAway() <-chan struct{} {
+	return t.goAway
+}
+
+func (t *http2Client) notifyError(err error) {
+	t.mu.Lock()
+	// make sure t.errorChan is closed only once.
+	if t.state == draining {
+		t.mu.Unlock()
+		t.Close()
+		return
+	}
+	if t.state == reachable {
+		t.state = unreachable
+		close(t.errorChan)
+		grpclog.Printf("transport: http2Client.notifyError got notified that the client transport was broken %v.", err)
+	}
+	t.mu.Unlock()
+}
diff --git a/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/google.golang.org/grpc/transport/http2_server.go
new file mode 100644
index 0000000..f753c4f
--- /dev/null
+++ b/vendor/google.golang.org/grpc/transport/http2_server.go
@@ -0,0 +1,774 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package transport
+
+import (
+	"bytes"
+	"errors"
+	"io"
+	"math"
+	"net"
+	"strconv"
+	"sync"
+
+	"golang.org/x/net/context"
+	"golang.org/x/net/http2"
+	"golang.org/x/net/http2/hpack"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/peer"
+)
+
+// ErrIllegalHeaderWrite indicates that setting header is illegal because of
+// the stream's state.
+var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called")
+
+// http2Server implements the ServerTransport interface with HTTP2.
+type http2Server struct {
+	conn        net.Conn
+	maxStreamID uint32               // max stream ID ever seen
+	authInfo    credentials.AuthInfo // auth info about the connection
+	// writableChan synchronizes write access to the transport.
+	// A writer acquires the write lock by receiving a value on writableChan
+	// and releases it by sending on writableChan.
+	writableChan chan int
+	// shutdownChan is closed when Close is called.
+	// Blocking operations should select on shutdownChan to avoid
+	// blocking forever after Close.
+	shutdownChan chan struct{}
+	framer       *framer
+	hBuf         *bytes.Buffer  // the buffer for HPACK encoding
+	hEnc         *hpack.Encoder // HPACK encoder
+
+	// The max number of concurrent streams.
+	maxStreams uint32
+	// controlBuf delivers all the control related tasks (e.g., window
+	// updates, reset streams, and various settings) to the controller.
+	controlBuf *recvBuffer
+	fc         *inFlow
+	// sendQuotaPool provides flow control to outbound message.
+	sendQuotaPool *quotaPool
+
+	mu            sync.Mutex // guard the following
+	state         transportState
+	activeStreams map[uint32]*Stream
+	// the per-stream outbound flow control window size set by the peer.
+	streamSendQuota uint32
+}
+
+// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
+// returned if something goes wrong.
+func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (_ ServerTransport, err error) {
+	framer := newFramer(conn)
+	// Send initial settings as connection preface to client.
+	var settings []http2.Setting
+	// TODO(zhaoq): Have a better way to signal "no limit" because 0 is
+	// permitted in the HTTP2 spec.
+	if maxStreams == 0 {
+		maxStreams = math.MaxUint32
+	} else {
+		settings = append(settings, http2.Setting{
+			ID:  http2.SettingMaxConcurrentStreams,
+			Val: maxStreams,
+		})
+	}
+	if initialWindowSize != defaultWindowSize {
+		settings = append(settings, http2.Setting{
+			ID:  http2.SettingInitialWindowSize,
+			Val: uint32(initialWindowSize)})
+	}
+	if err := framer.writeSettings(true, settings...); err != nil {
+		return nil, connectionErrorf(true, err, "transport: %v", err)
+	}
+	// Adjust the connection flow control window if needed.
+	if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 {
+		if err := framer.writeWindowUpdate(true, 0, delta); err != nil {
+			return nil, connectionErrorf(true, err, "transport: %v", err)
+		}
+	}
+	var buf bytes.Buffer
+	t := &http2Server{
+		conn:            conn,
+		authInfo:        authInfo,
+		framer:          framer,
+		hBuf:            &buf,
+		hEnc:            hpack.NewEncoder(&buf),
+		maxStreams:      maxStreams,
+		controlBuf:      newRecvBuffer(),
+		fc:              &inFlow{limit: initialConnWindowSize},
+		sendQuotaPool:   newQuotaPool(defaultWindowSize),
+		state:           reachable,
+		writableChan:    make(chan int, 1),
+		shutdownChan:    make(chan struct{}),
+		activeStreams:   make(map[uint32]*Stream),
+		streamSendQuota: defaultWindowSize,
+	}
+	go t.controller()
+	t.writableChan <- 0
+	return t, nil
+}
+
+// operateHeader takes action on the decoded headers.
+func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) (close bool) {
+	buf := newRecvBuffer()
+	s := &Stream{
+		id:  frame.Header().StreamID,
+		st:  t,
+		buf: buf,
+		fc:  &inFlow{limit: initialWindowSize},
+	}
+
+	var state decodeState
+	for _, hf := range frame.Fields {
+		state.processHeaderField(hf)
+	}
+	if err := state.err; err != nil {
+		if se, ok := err.(StreamError); ok {
+			t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]})
+		}
+		return
+	}
+
+	if frame.StreamEnded() {
+		// s is just created by the caller. No lock needed.
+		s.state = streamReadDone
+	}
+	s.recvCompress = state.encoding
+	if state.timeoutSet {
+		s.ctx, s.cancel = context.WithTimeout(context.TODO(), state.timeout)
+	} else {
+		s.ctx, s.cancel = context.WithCancel(context.TODO())
+	}
+	pr := &peer.Peer{
+		Addr: t.conn.RemoteAddr(),
+	}
+	// Attach Auth info if there is any.
+	if t.authInfo != nil {
+		pr.AuthInfo = t.authInfo
+	}
+	s.ctx = peer.NewContext(s.ctx, pr)
+	// Cache the current stream to the context so that the server application
+	// can find out. Required when the server wants to send some metadata
+	// back to the client (unary call only).
+	s.ctx = newContextWithStream(s.ctx, s)
+	// Attach the received metadata to the context.
+	if len(state.mdata) > 0 {
+		s.ctx = metadata.NewContext(s.ctx, state.mdata)
+	}
+
+	s.dec = &recvBufferReader{
+		ctx:  s.ctx,
+		recv: s.buf,
+	}
+	s.recvCompress = state.encoding
+	s.method = state.method
+	t.mu.Lock()
+	if t.state != reachable {
+		t.mu.Unlock()
+		return
+	}
+	if uint32(len(t.activeStreams)) >= t.maxStreams {
+		t.mu.Unlock()
+		t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream})
+		return
+	}
+	if s.id%2 != 1 || s.id <= t.maxStreamID {
+		t.mu.Unlock()
+		// illegal gRPC stream id.
+		grpclog.Println("transport: http2Server.HandleStreams received an illegal stream id: ", s.id)
+		return true
+	}
+	t.maxStreamID = s.id
+	s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota))
+	t.activeStreams[s.id] = s
+	t.mu.Unlock()
+	s.windowHandler = func(n int) {
+		t.updateWindow(s, uint32(n))
+	}
+	handle(s)
+	return
+}
+
+// HandleStreams receives incoming streams using the given handler. This is
+// typically run in a separate goroutine.
+func (t *http2Server) HandleStreams(handle func(*Stream)) {
+	// Check the validity of client preface.
+	preface := make([]byte, len(clientPreface))
+	if _, err := io.ReadFull(t.conn, preface); err != nil {
+		grpclog.Printf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
+		t.Close()
+		return
+	}
+	if !bytes.Equal(preface, clientPreface) {
+		grpclog.Printf("transport: http2Server.HandleStreams received bogus greeting from client: %q", preface)
+		t.Close()
+		return
+	}
+
+	frame, err := t.framer.readFrame()
+	if err == io.EOF || err == io.ErrUnexpectedEOF {
+		t.Close()
+		return
+	}
+	if err != nil {
+		grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err)
+		t.Close()
+		return
+	}
+	sf, ok := frame.(*http2.SettingsFrame)
+	if !ok {
+		grpclog.Printf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
+		t.Close()
+		return
+	}
+	t.handleSettings(sf)
+
+	for {
+		frame, err := t.framer.readFrame()
+		if err != nil {
+			if se, ok := err.(http2.StreamError); ok {
+				t.mu.Lock()
+				s := t.activeStreams[se.StreamID]
+				t.mu.Unlock()
+				if s != nil {
+					t.closeStream(s)
+				}
+				t.controlBuf.put(&resetStream{se.StreamID, se.Code})
+				continue
+			}
+			if err == io.EOF || err == io.ErrUnexpectedEOF {
+				t.Close()
+				return
+			}
+			grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err)
+			t.Close()
+			return
+		}
+		switch frame := frame.(type) {
+		case *http2.MetaHeadersFrame:
+			if t.operateHeaders(frame, handle) {
+				t.Close()
+				break
+			}
+		case *http2.DataFrame:
+			t.handleData(frame)
+		case *http2.RSTStreamFrame:
+			t.handleRSTStream(frame)
+		case *http2.SettingsFrame:
+			t.handleSettings(frame)
+		case *http2.PingFrame:
+			t.handlePing(frame)
+		case *http2.WindowUpdateFrame:
+			t.handleWindowUpdate(frame)
+		case *http2.GoAwayFrame:
+			// TODO: Handle GoAway from the client appropriately.
+		default:
+			grpclog.Printf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
+		}
+	}
+}
+
+func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	if t.activeStreams == nil {
+		// The transport is closing.
+		return nil, false
+	}
+	s, ok := t.activeStreams[f.Header().StreamID]
+	if !ok {
+		// The stream is already done.
+		return nil, false
+	}
+	return s, true
+}
+
+// updateWindow adjusts the inbound quota for the stream and the transport.
+// Window updates will deliver to the controller for sending when
+// the cumulative quota exceeds the corresponding threshold.
+func (t *http2Server) updateWindow(s *Stream, n uint32) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if s.state == streamDone {
+		return
+	}
+	if w := t.fc.onRead(n); w > 0 {
+		t.controlBuf.put(&windowUpdate{0, w})
+	}
+	if w := s.fc.onRead(n); w > 0 {
+		t.controlBuf.put(&windowUpdate{s.id, w})
+	}
+}
+
+func (t *http2Server) handleData(f *http2.DataFrame) {
+	size := len(f.Data())
+	if err := t.fc.onData(uint32(size)); err != nil {
+		grpclog.Printf("transport: http2Server %v", err)
+		t.Close()
+		return
+	}
+	// Select the right stream to dispatch.
+	s, ok := t.getStream(f)
+	if !ok {
+		if w := t.fc.onRead(uint32(size)); w > 0 {
+			t.controlBuf.put(&windowUpdate{0, w})
+		}
+		return
+	}
+	if size > 0 {
+		s.mu.Lock()
+		if s.state == streamDone {
+			s.mu.Unlock()
+			// The stream has been closed. Release the corresponding quota.
+			if w := t.fc.onRead(uint32(size)); w > 0 {
+				t.controlBuf.put(&windowUpdate{0, w})
+			}
+			return
+		}
+		if err := s.fc.onData(uint32(size)); err != nil {
+			s.mu.Unlock()
+			t.closeStream(s)
+			t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl})
+			return
+		}
+		s.mu.Unlock()
+		// TODO(bradfitz, zhaoq): A copy is required here because there is no
+		// guarantee f.Data() is consumed before the arrival of next frame.
+		// Can this copy be eliminated?
+		data := make([]byte, size)
+		copy(data, f.Data())
+		s.write(recvMsg{data: data})
+	}
+	if f.Header().Flags.Has(http2.FlagDataEndStream) {
+		// Received the end of stream from the client.
+		s.mu.Lock()
+		if s.state != streamDone {
+			s.state = streamReadDone
+		}
+		s.mu.Unlock()
+		s.write(recvMsg{err: io.EOF})
+	}
+}
+
+func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) {
+	s, ok := t.getStream(f)
+	if !ok {
+		return
+	}
+	t.closeStream(s)
+}
+
+func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
+	if f.IsAck() {
+		return
+	}
+	var ss []http2.Setting
+	f.ForeachSetting(func(s http2.Setting) error {
+		ss = append(ss, s)
+		return nil
+	})
+	// The settings will be applied once the ack is sent.
+	t.controlBuf.put(&settings{ack: true, ss: ss})
+}
+
+func (t *http2Server) handlePing(f *http2.PingFrame) {
+	pingAck := &ping{ack: true}
+	copy(pingAck.data[:], f.Data[:])
+	t.controlBuf.put(pingAck)
+}
+
+func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) {
+	id := f.Header().StreamID
+	incr := f.Increment
+	if id == 0 {
+		t.sendQuotaPool.add(int(incr))
+		return
+	}
+	if s, ok := t.getStream(f); ok {
+		s.sendQuotaPool.add(int(incr))
+	}
+}
+
+func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) error {
+	first := true
+	endHeaders := false
+	var err error
+	// Sends the headers in a single batch.
+	for !endHeaders {
+		size := t.hBuf.Len()
+		if size > http2MaxFrameLen {
+			size = http2MaxFrameLen
+		} else {
+			endHeaders = true
+		}
+		if first {
+			p := http2.HeadersFrameParam{
+				StreamID:      s.id,
+				BlockFragment: b.Next(size),
+				EndStream:     endStream,
+				EndHeaders:    endHeaders,
+			}
+			err = t.framer.writeHeaders(endHeaders, p)
+			first = false
+		} else {
+			err = t.framer.writeContinuation(endHeaders, s.id, endHeaders, b.Next(size))
+		}
+		if err != nil {
+			t.Close()
+			return connectionErrorf(true, err, "transport: %v", err)
+		}
+	}
+	return nil
+}
+
+// WriteHeader sends the header metedata md back to the client.
+func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
+	s.mu.Lock()
+	if s.headerOk || s.state == streamDone {
+		s.mu.Unlock()
+		return ErrIllegalHeaderWrite
+	}
+	s.headerOk = true
+	s.mu.Unlock()
+	if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
+		return err
+	}
+	t.hBuf.Reset()
+	t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+	t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
+	if s.sendCompress != "" {
+		t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
+	}
+	for k, v := range md {
+		if isReservedHeader(k) {
+			// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
+			continue
+		}
+		for _, entry := range v {
+			t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
+		}
+	}
+	if err := t.writeHeaders(s, t.hBuf, false); err != nil {
+		return err
+	}
+	t.writableChan <- 0
+	return nil
+}
+
+// WriteStatus sends stream status to the client and terminates the stream.
+// There is no further I/O operations being able to perform on this stream.
+// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
+// OK is adopted.
+func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error {
+	var headersSent bool
+	s.mu.Lock()
+	if s.state == streamDone {
+		s.mu.Unlock()
+		return nil
+	}
+	if s.headerOk {
+		headersSent = true
+	}
+	s.mu.Unlock()
+	if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
+		return err
+	}
+	t.hBuf.Reset()
+	if !headersSent {
+		t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+		t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
+	}
+	t.hEnc.WriteField(
+		hpack.HeaderField{
+			Name:  "grpc-status",
+			Value: strconv.Itoa(int(statusCode)),
+		})
+	t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(statusDesc)})
+	// Attach the trailer metadata.
+	for k, v := range s.trailer {
+		// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
+		if isReservedHeader(k) {
+			continue
+		}
+		for _, entry := range v {
+			t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
+		}
+	}
+	if err := t.writeHeaders(s, t.hBuf, true); err != nil {
+		t.Close()
+		return err
+	}
+	t.closeStream(s)
+	t.writableChan <- 0
+	return nil
+}
+
+// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
+// is returns if it fails (e.g., framing error, transport error).
+func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error {
+	// TODO(zhaoq): Support multi-writers for a single stream.
+	var writeHeaderFrame bool
+	s.mu.Lock()
+	if s.state == streamDone {
+		s.mu.Unlock()
+		return streamErrorf(codes.Unknown, "the stream has been done")
+	}
+	if !s.headerOk {
+		writeHeaderFrame = true
+		s.headerOk = true
+	}
+	s.mu.Unlock()
+	if writeHeaderFrame {
+		if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
+			return err
+		}
+		t.hBuf.Reset()
+		t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+		t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
+		if s.sendCompress != "" {
+			t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
+		}
+		p := http2.HeadersFrameParam{
+			StreamID:      s.id,
+			BlockFragment: t.hBuf.Bytes(),
+			EndHeaders:    true,
+		}
+		if err := t.framer.writeHeaders(false, p); err != nil {
+			t.Close()
+			return connectionErrorf(true, err, "transport: %v", err)
+		}
+		t.writableChan <- 0
+	}
+	r := bytes.NewBuffer(data)
+	for {
+		if r.Len() == 0 {
+			return nil
+		}
+		size := http2MaxFrameLen
+		s.sendQuotaPool.add(0)
+		// Wait until the stream has some quota to send the data.
+		sq, err := wait(s.ctx, nil, nil, t.shutdownChan, s.sendQuotaPool.acquire())
+		if err != nil {
+			return err
+		}
+		t.sendQuotaPool.add(0)
+		// Wait until the transport has some quota to send the data.
+		tq, err := wait(s.ctx, nil, nil, t.shutdownChan, t.sendQuotaPool.acquire())
+		if err != nil {
+			if _, ok := err.(StreamError); ok {
+				t.sendQuotaPool.cancel()
+			}
+			return err
+		}
+		if sq < size {
+			size = sq
+		}
+		if tq < size {
+			size = tq
+		}
+		p := r.Next(size)
+		ps := len(p)
+		if ps < sq {
+			// Overbooked stream quota. Return it back.
+			s.sendQuotaPool.add(sq - ps)
+		}
+		if ps < tq {
+			// Overbooked transport quota. Return it back.
+			t.sendQuotaPool.add(tq - ps)
+		}
+		t.framer.adjustNumWriters(1)
+		// Got some quota. Try to acquire writing privilege on the
+		// transport.
+		if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
+			if _, ok := err.(StreamError); ok {
+				// Return the connection quota back.
+				t.sendQuotaPool.add(ps)
+			}
+			if t.framer.adjustNumWriters(-1) == 0 {
+				// This writer is the last one in this batch and has the
+				// responsibility to flush the buffered frames. It queues
+				// a flush request to controlBuf instead of flushing directly
+				// in order to avoid the race with other writing or flushing.
+				t.controlBuf.put(&flushIO{})
+			}
+			return err
+		}
+		select {
+		case <-s.ctx.Done():
+			t.sendQuotaPool.add(ps)
+			if t.framer.adjustNumWriters(-1) == 0 {
+				t.controlBuf.put(&flushIO{})
+			}
+			t.writableChan <- 0
+			return ContextErr(s.ctx.Err())
+		default:
+		}
+		var forceFlush bool
+		if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 && !opts.Last {
+			forceFlush = true
+		}
+		if err := t.framer.writeData(forceFlush, s.id, false, p); err != nil {
+			t.Close()
+			return connectionErrorf(true, err, "transport: %v", err)
+		}
+		if t.framer.adjustNumWriters(-1) == 0 {
+			t.framer.flushWrite()
+		}
+		t.writableChan <- 0
+	}
+
+}
+
+func (t *http2Server) applySettings(ss []http2.Setting) {
+	for _, s := range ss {
+		if s.ID == http2.SettingInitialWindowSize {
+			t.mu.Lock()
+			defer t.mu.Unlock()
+			for _, stream := range t.activeStreams {
+				stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota))
+			}
+			t.streamSendQuota = s.Val
+		}
+
+	}
+}
+
+// controller running in a separate goroutine takes charge of sending control
+// frames (e.g., window update, reset stream, setting, etc.) to the server.
+func (t *http2Server) controller() {
+	for {
+		select {
+		case i := <-t.controlBuf.get():
+			t.controlBuf.load()
+			select {
+			case <-t.writableChan:
+				switch i := i.(type) {
+				case *windowUpdate:
+					t.framer.writeWindowUpdate(true, i.streamID, i.increment)
+				case *settings:
+					if i.ack {
+						t.framer.writeSettingsAck(true)
+						t.applySettings(i.ss)
+					} else {
+						t.framer.writeSettings(true, i.ss...)
+					}
+				case *resetStream:
+					t.framer.writeRSTStream(true, i.streamID, i.code)
+				case *goAway:
+					t.mu.Lock()
+					if t.state == closing {
+						t.mu.Unlock()
+						// The transport is closing.
+						return
+					}
+					sid := t.maxStreamID
+					t.state = draining
+					t.mu.Unlock()
+					t.framer.writeGoAway(true, sid, http2.ErrCodeNo, nil)
+				case *flushIO:
+					t.framer.flushWrite()
+				case *ping:
+					t.framer.writePing(true, i.ack, i.data)
+				default:
+					grpclog.Printf("transport: http2Server.controller got unexpected item type %v\n", i)
+				}
+				t.writableChan <- 0
+				continue
+			case <-t.shutdownChan:
+				return
+			}
+		case <-t.shutdownChan:
+			return
+		}
+	}
+}
+
+// Close starts shutting down the http2Server transport.
+// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
+// could cause some resource issue. Revisit this later.
+func (t *http2Server) Close() (err error) {
+	t.mu.Lock()
+	if t.state == closing {
+		t.mu.Unlock()
+		return errors.New("transport: Close() was already called")
+	}
+	t.state = closing
+	streams := t.activeStreams
+	t.activeStreams = nil
+	t.mu.Unlock()
+	close(t.shutdownChan)
+	err = t.conn.Close()
+	// Cancel all active streams.
+	for _, s := range streams {
+		s.cancel()
+	}
+	return
+}
+
+// closeStream clears the footprint of a stream when the stream is not needed
+// any more.
+func (t *http2Server) closeStream(s *Stream) {
+	t.mu.Lock()
+	delete(t.activeStreams, s.id)
+	if t.state == draining && len(t.activeStreams) == 0 {
+		defer t.Close()
+	}
+	t.mu.Unlock()
+	// In case stream sending and receiving are invoked in separate
+	// goroutines (e.g., bi-directional streaming), cancel needs to be
+	// called to interrupt the potential blocking on other goroutines.
+	s.cancel()
+	s.mu.Lock()
+	if q := s.fc.resetPendingData(); q > 0 {
+		if w := t.fc.onRead(q); w > 0 {
+			t.controlBuf.put(&windowUpdate{0, w})
+		}
+	}
+	if s.state == streamDone {
+		s.mu.Unlock()
+		return
+	}
+	s.state = streamDone
+	s.mu.Unlock()
+}
+
+func (t *http2Server) RemoteAddr() net.Addr {
+	return t.conn.RemoteAddr()
+}
+
+func (t *http2Server) Drain() {
+	t.controlBuf.put(&goAway{})
+}
diff --git a/vendor/google.golang.org/grpc/transport/http_util.go b/vendor/google.golang.org/grpc/transport/http_util.go
new file mode 100644
index 0000000..a3c68d4
--- /dev/null
+++ b/vendor/google.golang.org/grpc/transport/http_util.go
@@ -0,0 +1,513 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package transport
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"net"
+	"strconv"
+	"strings"
+	"sync/atomic"
+	"time"
+
+	"golang.org/x/net/http2"
+	"golang.org/x/net/http2/hpack"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/metadata"
+)
+
+const (
+	// The primary user agent
+	primaryUA = "grpc-go/1.0"
+	// http2MaxFrameLen specifies the max length of a HTTP2 frame.
+	http2MaxFrameLen = 16384 // 16KB frame
+	// http://http2.github.io/http2-spec/#SettingValues
+	http2InitHeaderTableSize = 4096
+	// http2IOBufSize specifies the buffer size for sending frames.
+	http2IOBufSize = 32 * 1024
+)
+
+var (
+	clientPreface   = []byte(http2.ClientPreface)
+	http2ErrConvTab = map[http2.ErrCode]codes.Code{
+		http2.ErrCodeNo:                 codes.Internal,
+		http2.ErrCodeProtocol:           codes.Internal,
+		http2.ErrCodeInternal:           codes.Internal,
+		http2.ErrCodeFlowControl:        codes.ResourceExhausted,
+		http2.ErrCodeSettingsTimeout:    codes.Internal,
+		http2.ErrCodeStreamClosed:       codes.Internal,
+		http2.ErrCodeFrameSize:          codes.Internal,
+		http2.ErrCodeRefusedStream:      codes.Unavailable,
+		http2.ErrCodeCancel:             codes.Canceled,
+		http2.ErrCodeCompression:        codes.Internal,
+		http2.ErrCodeConnect:            codes.Internal,
+		http2.ErrCodeEnhanceYourCalm:    codes.ResourceExhausted,
+		http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
+		http2.ErrCodeHTTP11Required:     codes.FailedPrecondition,
+	}
+	statusCodeConvTab = map[codes.Code]http2.ErrCode{
+		codes.Internal:          http2.ErrCodeInternal,
+		codes.Canceled:          http2.ErrCodeCancel,
+		codes.Unavailable:       http2.ErrCodeRefusedStream,
+		codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm,
+		codes.PermissionDenied:  http2.ErrCodeInadequateSecurity,
+	}
+)
+
+// Records the states during HPACK decoding. Must be reset once the
+// decoding of the entire headers are finished.
+type decodeState struct {
+	err error // first error encountered decoding
+
+	encoding string
+	// statusCode caches the stream status received from the trailer
+	// the server sent. Client side only.
+	statusCode codes.Code
+	statusDesc string
+	// Server side only fields.
+	timeoutSet bool
+	timeout    time.Duration
+	method     string
+	// key-value metadata map from the peer.
+	mdata map[string][]string
+}
+
+// isReservedHeader checks whether hdr belongs to HTTP2 headers
+// reserved by gRPC protocol. Any other headers are classified as the
+// user-specified metadata.
+func isReservedHeader(hdr string) bool {
+	if hdr != "" && hdr[0] == ':' {
+		return true
+	}
+	switch hdr {
+	case "content-type",
+		"grpc-message-type",
+		"grpc-encoding",
+		"grpc-message",
+		"grpc-status",
+		"grpc-timeout",
+		"te":
+		return true
+	default:
+		return false
+	}
+}
+
+// isWhitelistedPseudoHeader checks whether hdr belongs to HTTP2 pseudoheaders
+// that should be propagated into metadata visible to users.
+func isWhitelistedPseudoHeader(hdr string) bool {
+	switch hdr {
+	case ":authority":
+		return true
+	default:
+		return false
+	}
+}
+
+func (d *decodeState) setErr(err error) {
+	if d.err == nil {
+		d.err = err
+	}
+}
+
+func validContentType(t string) bool {
+	e := "application/grpc"
+	if !strings.HasPrefix(t, e) {
+		return false
+	}
+	// Support variations on the content-type
+	// (e.g. "application/grpc+blah", "application/grpc;blah").
+	if len(t) > len(e) && t[len(e)] != '+' && t[len(e)] != ';' {
+		return false
+	}
+	return true
+}
+
+func (d *decodeState) processHeaderField(f hpack.HeaderField) {
+	switch f.Name {
+	case "content-type":
+		if !validContentType(f.Value) {
+			d.setErr(streamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value))
+			return
+		}
+	case "grpc-encoding":
+		d.encoding = f.Value
+	case "grpc-status":
+		code, err := strconv.Atoi(f.Value)
+		if err != nil {
+			d.setErr(streamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err))
+			return
+		}
+		d.statusCode = codes.Code(code)
+	case "grpc-message":
+		d.statusDesc = decodeGrpcMessage(f.Value)
+	case "grpc-timeout":
+		d.timeoutSet = true
+		var err error
+		d.timeout, err = decodeTimeout(f.Value)
+		if err != nil {
+			d.setErr(streamErrorf(codes.Internal, "transport: malformed time-out: %v", err))
+			return
+		}
+	case ":path":
+		d.method = f.Value
+	default:
+		if !isReservedHeader(f.Name) || isWhitelistedPseudoHeader(f.Name) {
+			if f.Name == "user-agent" {
+				i := strings.LastIndex(f.Value, " ")
+				if i == -1 {
+					// There is no application user agent string being set.
+					return
+				}
+				// Extract the application user agent string.
+				f.Value = f.Value[:i]
+			}
+			if d.mdata == nil {
+				d.mdata = make(map[string][]string)
+			}
+			k, v, err := metadata.DecodeKeyValue(f.Name, f.Value)
+			if err != nil {
+				grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err)
+				return
+			}
+			d.mdata[k] = append(d.mdata[k], v)
+		}
+	}
+}
+
+type timeoutUnit uint8
+
+const (
+	hour        timeoutUnit = 'H'
+	minute      timeoutUnit = 'M'
+	second      timeoutUnit = 'S'
+	millisecond timeoutUnit = 'm'
+	microsecond timeoutUnit = 'u'
+	nanosecond  timeoutUnit = 'n'
+)
+
+func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) {
+	switch u {
+	case hour:
+		return time.Hour, true
+	case minute:
+		return time.Minute, true
+	case second:
+		return time.Second, true
+	case millisecond:
+		return time.Millisecond, true
+	case microsecond:
+		return time.Microsecond, true
+	case nanosecond:
+		return time.Nanosecond, true
+	default:
+	}
+	return
+}
+
+const maxTimeoutValue int64 = 100000000 - 1
+
+// div does integer division and round-up the result. Note that this is
+// equivalent to (d+r-1)/r but has less chance to overflow.
+func div(d, r time.Duration) int64 {
+	if m := d % r; m > 0 {
+		return int64(d/r + 1)
+	}
+	return int64(d / r)
+}
+
+// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it.
+func encodeTimeout(t time.Duration) string {
+	if t <= 0 {
+		return "0n"
+	}
+	if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
+		return strconv.FormatInt(d, 10) + "n"
+	}
+	if d := div(t, time.Microsecond); d <= maxTimeoutValue {
+		return strconv.FormatInt(d, 10) + "u"
+	}
+	if d := div(t, time.Millisecond); d <= maxTimeoutValue {
+		return strconv.FormatInt(d, 10) + "m"
+	}
+	if d := div(t, time.Second); d <= maxTimeoutValue {
+		return strconv.FormatInt(d, 10) + "S"
+	}
+	if d := div(t, time.Minute); d <= maxTimeoutValue {
+		return strconv.FormatInt(d, 10) + "M"
+	}
+	// Note that maxTimeoutValue * time.Hour > MaxInt64.
+	return strconv.FormatInt(div(t, time.Hour), 10) + "H"
+}
+
+func decodeTimeout(s string) (time.Duration, error) {
+	size := len(s)
+	if size < 2 {
+		return 0, fmt.Errorf("transport: timeout string is too short: %q", s)
+	}
+	unit := timeoutUnit(s[size-1])
+	d, ok := timeoutUnitToDuration(unit)
+	if !ok {
+		return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s)
+	}
+	t, err := strconv.ParseInt(s[:size-1], 10, 64)
+	if err != nil {
+		return 0, err
+	}
+	return d * time.Duration(t), nil
+}
+
+const (
+	spaceByte   = ' '
+	tildaByte   = '~'
+	percentByte = '%'
+)
+
+// encodeGrpcMessage is used to encode status code in header field
+// "grpc-message".
+// It checks to see if each individual byte in msg is an
+// allowable byte, and then either percent encoding or passing it through.
+// When percent encoding, the byte is converted into hexadecimal notation
+// with a '%' prepended.
+func encodeGrpcMessage(msg string) string {
+	if msg == "" {
+		return ""
+	}
+	lenMsg := len(msg)
+	for i := 0; i < lenMsg; i++ {
+		c := msg[i]
+		if !(c >= spaceByte && c < tildaByte && c != percentByte) {
+			return encodeGrpcMessageUnchecked(msg)
+		}
+	}
+	return msg
+}
+
+func encodeGrpcMessageUnchecked(msg string) string {
+	var buf bytes.Buffer
+	lenMsg := len(msg)
+	for i := 0; i < lenMsg; i++ {
+		c := msg[i]
+		if c >= spaceByte && c < tildaByte && c != percentByte {
+			buf.WriteByte(c)
+		} else {
+			buf.WriteString(fmt.Sprintf("%%%02X", c))
+		}
+	}
+	return buf.String()
+}
+
+// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage.
+func decodeGrpcMessage(msg string) string {
+	if msg == "" {
+		return ""
+	}
+	lenMsg := len(msg)
+	for i := 0; i < lenMsg; i++ {
+		if msg[i] == percentByte && i+2 < lenMsg {
+			return decodeGrpcMessageUnchecked(msg)
+		}
+	}
+	return msg
+}
+
+func decodeGrpcMessageUnchecked(msg string) string {
+	var buf bytes.Buffer
+	lenMsg := len(msg)
+	for i := 0; i < lenMsg; i++ {
+		c := msg[i]
+		if c == percentByte && i+2 < lenMsg {
+			parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8)
+			if err != nil {
+				buf.WriteByte(c)
+			} else {
+				buf.WriteByte(byte(parsed))
+				i += 2
+			}
+		} else {
+			buf.WriteByte(c)
+		}
+	}
+	return buf.String()
+}
+
+type framer struct {
+	numWriters int32
+	reader     io.Reader
+	writer     *bufio.Writer
+	fr         *http2.Framer
+}
+
+func newFramer(conn net.Conn) *framer {
+	f := &framer{
+		reader: bufio.NewReaderSize(conn, http2IOBufSize),
+		writer: bufio.NewWriterSize(conn, http2IOBufSize),
+	}
+	f.fr = http2.NewFramer(f.writer, f.reader)
+	f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
+	return f
+}
+
+func (f *framer) adjustNumWriters(i int32) int32 {
+	return atomic.AddInt32(&f.numWriters, i)
+}
+
+// The following writeXXX functions can only be called when the caller gets
+// unblocked from writableChan channel (i.e., owns the privilege to write).
+
+func (f *framer) writeContinuation(forceFlush bool, streamID uint32, endHeaders bool, headerBlockFragment []byte) error {
+	if err := f.fr.WriteContinuation(streamID, endHeaders, headerBlockFragment); err != nil {
+		return err
+	}
+	if forceFlush {
+		return f.writer.Flush()
+	}
+	return nil
+}
+
+func (f *framer) writeData(forceFlush bool, streamID uint32, endStream bool, data []byte) error {
+	if err := f.fr.WriteData(streamID, endStream, data); err != nil {
+		return err
+	}
+	if forceFlush {
+		return f.writer.Flush()
+	}
+	return nil
+}
+
+func (f *framer) writeGoAway(forceFlush bool, maxStreamID uint32, code http2.ErrCode, debugData []byte) error {
+	if err := f.fr.WriteGoAway(maxStreamID, code, debugData); err != nil {
+		return err
+	}
+	if forceFlush {
+		return f.writer.Flush()
+	}
+	return nil
+}
+
+func (f *framer) writeHeaders(forceFlush bool, p http2.HeadersFrameParam) error {
+	if err := f.fr.WriteHeaders(p); err != nil {
+		return err
+	}
+	if forceFlush {
+		return f.writer.Flush()
+	}
+	return nil
+}
+
+func (f *framer) writePing(forceFlush, ack bool, data [8]byte) error {
+	if err := f.fr.WritePing(ack, data); err != nil {
+		return err
+	}
+	if forceFlush {
+		return f.writer.Flush()
+	}
+	return nil
+}
+
+func (f *framer) writePriority(forceFlush bool, streamID uint32, p http2.PriorityParam) error {
+	if err := f.fr.WritePriority(streamID, p); err != nil {
+		return err
+	}
+	if forceFlush {
+		return f.writer.Flush()
+	}
+	return nil
+}
+
+func (f *framer) writePushPromise(forceFlush bool, p http2.PushPromiseParam) error {
+	if err := f.fr.WritePushPromise(p); err != nil {
+		return err
+	}
+	if forceFlush {
+		return f.writer.Flush()
+	}
+	return nil
+}
+
+func (f *framer) writeRSTStream(forceFlush bool, streamID uint32, code http2.ErrCode) error {
+	if err := f.fr.WriteRSTStream(streamID, code); err != nil {
+		return err
+	}
+	if forceFlush {
+		return f.writer.Flush()
+	}
+	return nil
+}
+
+func (f *framer) writeSettings(forceFlush bool, settings ...http2.Setting) error {
+	if err := f.fr.WriteSettings(settings...); err != nil {
+		return err
+	}
+	if forceFlush {
+		return f.writer.Flush()
+	}
+	return nil
+}
+
+func (f *framer) writeSettingsAck(forceFlush bool) error {
+	if err := f.fr.WriteSettingsAck(); err != nil {
+		return err
+	}
+	if forceFlush {
+		return f.writer.Flush()
+	}
+	return nil
+}
+
+func (f *framer) writeWindowUpdate(forceFlush bool, streamID, incr uint32) error {
+	if err := f.fr.WriteWindowUpdate(streamID, incr); err != nil {
+		return err
+	}
+	if forceFlush {
+		return f.writer.Flush()
+	}
+	return nil
+}
+
+func (f *framer) flushWrite() error {
+	return f.writer.Flush()
+}
+
+func (f *framer) readFrame() (http2.Frame, error) {
+	return f.fr.ReadFrame()
+}
+
+func (f *framer) errorDetail() error {
+	return f.fr.ErrorDetail()
+}
diff --git a/vendor/google.golang.org/grpc/transport/pre_go16.go b/vendor/google.golang.org/grpc/transport/pre_go16.go
new file mode 100644
index 0000000..33d91c1
--- /dev/null
+++ b/vendor/google.golang.org/grpc/transport/pre_go16.go
@@ -0,0 +1,51 @@
+// +build !go1.6
+
+/*
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package transport
+
+import (
+	"net"
+	"time"
+
+	"golang.org/x/net/context"
+)
+
+// dialContext connects to the address on the named network.
+func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
+	var dialer net.Dialer
+	if deadline, ok := ctx.Deadline(); ok {
+		dialer.Timeout = deadline.Sub(time.Now())
+	}
+	return dialer.Dial(network, address)
+}
diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/transport/transport.go
new file mode 100644
index 0000000..dd4c12d
--- /dev/null
+++ b/vendor/google.golang.org/grpc/transport/transport.go
@@ -0,0 +1,571 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+Package transport defines and implements message oriented communication channel
+to complete various transactions (e.g., an RPC).
+*/
+package transport
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"net"
+	"sync"
+
+	"golang.org/x/net/context"
+	"golang.org/x/net/trace"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/metadata"
+)
+
+// recvMsg represents the received msg from the transport. All transport
+// protocol specific info has been removed.
+type recvMsg struct {
+	data []byte
+	// nil: received some data
+	// io.EOF: stream is completed. data is nil.
+	// other non-nil error: transport failure. data is nil.
+	err error
+}
+
+func (*recvMsg) item() {}
+
+// All items in an out of a recvBuffer should be the same type.
+type item interface {
+	item()
+}
+
+// recvBuffer is an unbounded channel of item.
+type recvBuffer struct {
+	c       chan item
+	mu      sync.Mutex
+	backlog []item
+}
+
+func newRecvBuffer() *recvBuffer {
+	b := &recvBuffer{
+		c: make(chan item, 1),
+	}
+	return b
+}
+
+func (b *recvBuffer) put(r item) {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	if len(b.backlog) == 0 {
+		select {
+		case b.c <- r:
+			return
+		default:
+		}
+	}
+	b.backlog = append(b.backlog, r)
+}
+
+func (b *recvBuffer) load() {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	if len(b.backlog) > 0 {
+		select {
+		case b.c <- b.backlog[0]:
+			b.backlog = b.backlog[1:]
+		default:
+		}
+	}
+}
+
+// get returns the channel that receives an item in the buffer.
+//
+// Upon receipt of an item, the caller should call load to send another
+// item onto the channel if there is any.
+func (b *recvBuffer) get() <-chan item {
+	return b.c
+}
+
+// recvBufferReader implements io.Reader interface to read the data from
+// recvBuffer.
+type recvBufferReader struct {
+	ctx    context.Context
+	goAway chan struct{}
+	recv   *recvBuffer
+	last   *bytes.Reader // Stores the remaining data in the previous calls.
+	err    error
+}
+
+// Read reads the next len(p) bytes from last. If last is drained, it tries to
+// read additional data from recv. It blocks if there no additional data available
+// in recv. If Read returns any non-nil error, it will continue to return that error.
+func (r *recvBufferReader) Read(p []byte) (n int, err error) {
+	if r.err != nil {
+		return 0, r.err
+	}
+	defer func() { r.err = err }()
+	if r.last != nil && r.last.Len() > 0 {
+		// Read remaining data left in last call.
+		return r.last.Read(p)
+	}
+	select {
+	case <-r.ctx.Done():
+		return 0, ContextErr(r.ctx.Err())
+	case <-r.goAway:
+		return 0, ErrStreamDrain
+	case i := <-r.recv.get():
+		r.recv.load()
+		m := i.(*recvMsg)
+		if m.err != nil {
+			return 0, m.err
+		}
+		r.last = bytes.NewReader(m.data)
+		return r.last.Read(p)
+	}
+}
+
+type streamState uint8
+
+const (
+	streamActive    streamState = iota
+	streamWriteDone             // EndStream sent
+	streamReadDone              // EndStream received
+	streamDone                  // the entire stream is finished.
+)
+
+// Stream represents an RPC in the transport layer.
+type Stream struct {
+	id uint32
+	// nil for client side Stream.
+	st ServerTransport
+	// ctx is the associated context of the stream.
+	ctx context.Context
+	// cancel is always nil for client side Stream.
+	cancel context.CancelFunc
+	// done is closed when the final status arrives.
+	done chan struct{}
+	// goAway is closed when the server sent GoAways signal before this stream was initiated.
+	goAway chan struct{}
+	// method records the associated RPC method of the stream.
+	method       string
+	recvCompress string
+	sendCompress string
+	buf          *recvBuffer
+	dec          io.Reader
+	fc           *inFlow
+	recvQuota    uint32
+	// The accumulated inbound quota pending for window update.
+	updateQuota uint32
+	// The handler to control the window update procedure for both this
+	// particular stream and the associated transport.
+	windowHandler func(int)
+
+	sendQuotaPool *quotaPool
+	// Close headerChan to indicate the end of reception of header metadata.
+	headerChan chan struct{}
+	// header caches the received header metadata.
+	header metadata.MD
+	// The key-value map of trailer metadata.
+	trailer metadata.MD
+
+	mu sync.RWMutex // guard the following
+	// headerOK becomes true from the first header is about to send.
+	headerOk bool
+	state    streamState
+	// true iff headerChan is closed. Used to avoid closing headerChan
+	// multiple times.
+	headerDone bool
+	// the status received from the server.
+	statusCode codes.Code
+	statusDesc string
+}
+
+// RecvCompress returns the compression algorithm applied to the inbound
+// message. It is empty string if there is no compression applied.
+func (s *Stream) RecvCompress() string {
+	return s.recvCompress
+}
+
+// SetSendCompress sets the compression algorithm to the stream.
+func (s *Stream) SetSendCompress(str string) {
+	s.sendCompress = str
+}
+
+// Done returns a chanel which is closed when it receives the final status
+// from the server.
+func (s *Stream) Done() <-chan struct{} {
+	return s.done
+}
+
+// GoAway returns a channel which is closed when the server sent GoAways signal
+// before this stream was initiated.
+func (s *Stream) GoAway() <-chan struct{} {
+	return s.goAway
+}
+
+// Header acquires the key-value pairs of header metadata once it
+// is available. It blocks until i) the metadata is ready or ii) there is no
+// header metadata or iii) the stream is cancelled/expired.
+func (s *Stream) Header() (metadata.MD, error) {
+	select {
+	case <-s.ctx.Done():
+		return nil, ContextErr(s.ctx.Err())
+	case <-s.goAway:
+		return nil, ErrStreamDrain
+	case <-s.headerChan:
+		return s.header.Copy(), nil
+	}
+}
+
+// Trailer returns the cached trailer metedata. Note that if it is not called
+// after the entire stream is done, it could return an empty MD. Client
+// side only.
+func (s *Stream) Trailer() metadata.MD {
+	s.mu.RLock()
+	defer s.mu.RUnlock()
+	return s.trailer.Copy()
+}
+
+// ServerTransport returns the underlying ServerTransport for the stream.
+// The client side stream always returns nil.
+func (s *Stream) ServerTransport() ServerTransport {
+	return s.st
+}
+
+// Context returns the context of the stream.
+func (s *Stream) Context() context.Context {
+	return s.ctx
+}
+
+// TraceContext recreates the context of s with a trace.Trace.
+func (s *Stream) TraceContext(tr trace.Trace) {
+	s.ctx = trace.NewContext(s.ctx, tr)
+}
+
+// Method returns the method for the stream.
+func (s *Stream) Method() string {
+	return s.method
+}
+
+// StatusCode returns statusCode received from the server.
+func (s *Stream) StatusCode() codes.Code {
+	return s.statusCode
+}
+
+// StatusDesc returns statusDesc received from the server.
+func (s *Stream) StatusDesc() string {
+	return s.statusDesc
+}
+
+// SetTrailer sets the trailer metadata which will be sent with the RPC status
+// by the server. This can be called multiple times. Server side only.
+func (s *Stream) SetTrailer(md metadata.MD) error {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	s.trailer = metadata.Join(s.trailer, md)
+	return nil
+}
+
+func (s *Stream) write(m recvMsg) {
+	s.buf.put(&m)
+}
+
+// Read reads all the data available for this Stream from the transport and
+// passes them into the decoder, which converts them into a gRPC message stream.
+// The error is io.EOF when the stream is done or another non-nil error if
+// the stream broke.
+func (s *Stream) Read(p []byte) (n int, err error) {
+	n, err = s.dec.Read(p)
+	if err != nil {
+		return
+	}
+	s.windowHandler(n)
+	return
+}
+
+// The key to save transport.Stream in the context.
+type streamKey struct{}
+
+// newContextWithStream creates a new context from ctx and attaches stream
+// to it.
+func newContextWithStream(ctx context.Context, stream *Stream) context.Context {
+	return context.WithValue(ctx, streamKey{}, stream)
+}
+
+// StreamFromContext returns the stream saved in ctx.
+func StreamFromContext(ctx context.Context) (s *Stream, ok bool) {
+	s, ok = ctx.Value(streamKey{}).(*Stream)
+	return
+}
+
+// state of transport
+type transportState int
+
+const (
+	reachable transportState = iota
+	unreachable
+	closing
+	draining
+)
+
+// NewServerTransport creates a ServerTransport with conn or non-nil error
+// if it fails.
+func NewServerTransport(protocol string, conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (ServerTransport, error) {
+	return newHTTP2Server(conn, maxStreams, authInfo)
+}
+
+// ConnectOptions covers all relevant options for dialing a server.
+type ConnectOptions struct {
+	// UserAgent is the application user agent.
+	UserAgent string
+	// Dialer specifies how to dial a network address.
+	Dialer func(context.Context, string) (net.Conn, error)
+	// PerRPCCredentials stores the PerRPCCredentials required to issue RPCs.
+	PerRPCCredentials []credentials.PerRPCCredentials
+	// TransportCredentials stores the Authenticator required to setup a client connection.
+	TransportCredentials credentials.TransportCredentials
+}
+
+// NewClientTransport establishes the transport with the required ConnectOptions
+// and returns it to the caller.
+func NewClientTransport(ctx context.Context, target string, opts ConnectOptions) (ClientTransport, error) {
+	return newHTTP2Client(ctx, target, opts)
+}
+
+// Options provides additional hints and information for message
+// transmission.
+type Options struct {
+	// Last indicates whether this write is the last piece for
+	// this stream.
+	Last bool
+
+	// Delay is a hint to the transport implementation for whether
+	// the data could be buffered for a batching write. The
+	// Transport implementation may ignore the hint.
+	Delay bool
+}
+
+// CallHdr carries the information of a particular RPC.
+type CallHdr struct {
+	// Host specifies the peer's host.
+	Host string
+
+	// Method specifies the operation to perform.
+	Method string
+
+	// RecvCompress specifies the compression algorithm applied on
+	// inbound messages.
+	RecvCompress string
+
+	// SendCompress specifies the compression algorithm applied on
+	// outbound message.
+	SendCompress string
+
+	// Flush indicates whether a new stream command should be sent
+	// to the peer without waiting for the first data. This is
+	// only a hint. The transport may modify the flush decision
+	// for performance purposes.
+	Flush bool
+}
+
+// ClientTransport is the common interface for all gRPC client-side transport
+// implementations.
+type ClientTransport interface {
+	// Close tears down this transport. Once it returns, the transport
+	// should not be accessed any more. The caller must make sure this
+	// is called only once.
+	Close() error
+
+	// GracefulClose starts to tear down the transport. It stops accepting
+	// new RPCs and wait the completion of the pending RPCs.
+	GracefulClose() error
+
+	// Write sends the data for the given stream. A nil stream indicates
+	// the write is to be performed on the transport as a whole.
+	Write(s *Stream, data []byte, opts *Options) error
+
+	// NewStream creates a Stream for an RPC.
+	NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error)
+
+	// CloseStream clears the footprint of a stream when the stream is
+	// not needed any more. The err indicates the error incurred when
+	// CloseStream is called. Must be called when a stream is finished
+	// unless the associated transport is closing.
+	CloseStream(stream *Stream, err error)
+
+	// Error returns a channel that is closed when some I/O error
+	// happens. Typically the caller should have a goroutine to monitor
+	// this in order to take action (e.g., close the current transport
+	// and create a new one) in error case. It should not return nil
+	// once the transport is initiated.
+	Error() <-chan struct{}
+
+	// GoAway returns a channel that is closed when ClientTranspor
+	// receives the draining signal from the server (e.g., GOAWAY frame in
+	// HTTP/2).
+	GoAway() <-chan struct{}
+}
+
+// ServerTransport is the common interface for all gRPC server-side transport
+// implementations.
+//
+// Methods may be called concurrently from multiple goroutines, but
+// Write methods for a given Stream will be called serially.
+type ServerTransport interface {
+	// HandleStreams receives incoming streams using the given handler.
+	HandleStreams(func(*Stream))
+
+	// WriteHeader sends the header metadata for the given stream.
+	// WriteHeader may not be called on all streams.
+	WriteHeader(s *Stream, md metadata.MD) error
+
+	// Write sends the data for the given stream.
+	// Write may not be called on all streams.
+	Write(s *Stream, data []byte, opts *Options) error
+
+	// WriteStatus sends the status of a stream to the client.
+	// WriteStatus is the final call made on a stream and always
+	// occurs.
+	WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error
+
+	// Close tears down the transport. Once it is called, the transport
+	// should not be accessed any more. All the pending streams and their
+	// handlers will be terminated asynchronously.
+	Close() error
+
+	// RemoteAddr returns the remote network address.
+	RemoteAddr() net.Addr
+
+	// Drain notifies the client this ServerTransport stops accepting new RPCs.
+	Drain()
+}
+
+// streamErrorf creates an StreamError with the specified error code and description.
+func streamErrorf(c codes.Code, format string, a ...interface{}) StreamError {
+	return StreamError{
+		Code: c,
+		Desc: fmt.Sprintf(format, a...),
+	}
+}
+
+// connectionErrorf creates an ConnectionError with the specified error description.
+func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError {
+	return ConnectionError{
+		Desc: fmt.Sprintf(format, a...),
+		temp: temp,
+		err:  e,
+	}
+}
+
+// ConnectionError is an error that results in the termination of the
+// entire connection and the retry of all the active streams.
+type ConnectionError struct {
+	Desc string
+	temp bool
+	err  error
+}
+
+func (e ConnectionError) Error() string {
+	return fmt.Sprintf("connection error: desc = %q", e.Desc)
+}
+
+// Temporary indicates if this connection error is temporary or fatal.
+func (e ConnectionError) Temporary() bool {
+	return e.temp
+}
+
+// Origin returns the original error of this connection error.
+func (e ConnectionError) Origin() error {
+	// Never return nil error here.
+	// If the original error is nil, return itself.
+	if e.err == nil {
+		return e
+	}
+	return e.err
+}
+
+var (
+	// ErrConnClosing indicates that the transport is closing.
+	ErrConnClosing = connectionErrorf(true, nil, "transport is closing")
+	// ErrStreamDrain indicates that the stream is rejected by the server because
+	// the server stops accepting new RPCs.
+	ErrStreamDrain = streamErrorf(codes.Unavailable, "the server stops accepting new RPCs")
+)
+
+// StreamError is an error that only affects one stream within a connection.
+type StreamError struct {
+	Code codes.Code
+	Desc string
+}
+
+func (e StreamError) Error() string {
+	return fmt.Sprintf("stream error: code = %d desc = %q", e.Code, e.Desc)
+}
+
+// ContextErr converts the error from context package into a StreamError.
+func ContextErr(err error) StreamError {
+	switch err {
+	case context.DeadlineExceeded:
+		return streamErrorf(codes.DeadlineExceeded, "%v", err)
+	case context.Canceled:
+		return streamErrorf(codes.Canceled, "%v", err)
+	}
+	panic(fmt.Sprintf("Unexpected error from context packet: %v", err))
+}
+
+// wait blocks until it can receive from ctx.Done, closing, or proceed.
+// If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err.
+// If it receives from done, it returns 0, io.EOF if ctx is not done; otherwise
+// it return the StreamError for ctx.Err.
+// If it receives from goAway, it returns 0, ErrStreamDrain.
+// If it receives from closing, it returns 0, ErrConnClosing.
+// If it receives from proceed, it returns the received integer, nil.
+func wait(ctx context.Context, done, goAway, closing <-chan struct{}, proceed <-chan int) (int, error) {
+	select {
+	case <-ctx.Done():
+		return 0, ContextErr(ctx.Err())
+	case <-done:
+		// User cancellation has precedence.
+		select {
+		case <-ctx.Done():
+			return 0, ContextErr(ctx.Err())
+		default:
+		}
+		return 0, io.EOF
+	case <-goAway:
+		return 0, ErrStreamDrain
+	case <-closing:
+		return 0, ErrConnClosing
+	case i := <-proceed:
+		return i, nil
+	}
+}