vendor: delete

The vendor directory in this repository is quite old and got out of sync
with the module versions selected via go.mod file. The versions selected
in go.mod are good for use, so this CL resolves the conflict by removing
the vendor directory.

Fixes golang/go#37738.

Change-Id: Icf8b022db4c3d03d5300c796e86906683f9525e4
Reviewed-on: https://go-review.googlesource.com/c/perf/+/222540
Run-TryBot: Dmitri Shuralyov <dmitshur@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Trust: Dmitri Shuralyov <dmitshur@golang.org>
Reviewed-by: Bryan C. Mills <bcmills@google.com>
diff --git a/README.md b/README.md
index 4820db2..f354c3a 100644
--- a/README.md
+++ b/README.md
@@ -32,8 +32,8 @@
 
 ## Download/Install
 
-The easiest way to install is to run `go get -u golang.org/x/perf/cmd/...`. You can
-also manually git clone the repository to `$GOPATH/src/golang.org/x/perf`.
+The easiest way to install is to run `go get golang.org/x/perf/cmd/...`.
+You can also manually git clone the repository and run `go install ./cmd/...`.
 
 ## Report Issues / Send Patches
 
diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE
deleted file mode 100644
index a4c5efd..0000000
--- a/vendor/cloud.google.com/go/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright 2014 Google Inc.
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go
deleted file mode 100644
index f9d2bef..0000000
--- a/vendor/cloud.google.com/go/compute/metadata/metadata.go
+++ /dev/null
@@ -1,438 +0,0 @@
-// Copyright 2014 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package metadata provides access to Google Compute Engine (GCE)
-// metadata and API service accounts.
-//
-// This package is a wrapper around the GCE metadata service,
-// as documented at https://developers.google.com/compute/docs/metadata.
-package metadata // import "cloud.google.com/go/compute/metadata"
-
-import (
-	"encoding/json"
-	"fmt"
-	"io/ioutil"
-	"net"
-	"net/http"
-	"net/url"
-	"os"
-	"runtime"
-	"strings"
-	"sync"
-	"time"
-
-	"golang.org/x/net/context"
-	"golang.org/x/net/context/ctxhttp"
-
-	"cloud.google.com/go/internal"
-)
-
-const (
-	// metadataIP is the documented metadata server IP address.
-	metadataIP = "169.254.169.254"
-
-	// metadataHostEnv is the environment variable specifying the
-	// GCE metadata hostname.  If empty, the default value of
-	// metadataIP ("169.254.169.254") is used instead.
-	// This is variable name is not defined by any spec, as far as
-	// I know; it was made up for the Go package.
-	metadataHostEnv = "GCE_METADATA_HOST"
-)
-
-type cachedValue struct {
-	k    string
-	trim bool
-	mu   sync.Mutex
-	v    string
-}
-
-var (
-	projID  = &cachedValue{k: "project/project-id", trim: true}
-	projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
-	instID  = &cachedValue{k: "instance/id", trim: true}
-)
-
-var (
-	metaClient = &http.Client{
-		Transport: &internal.Transport{
-			Base: &http.Transport{
-				Dial: (&net.Dialer{
-					Timeout:   2 * time.Second,
-					KeepAlive: 30 * time.Second,
-				}).Dial,
-				ResponseHeaderTimeout: 2 * time.Second,
-			},
-		},
-	}
-	subscribeClient = &http.Client{
-		Transport: &internal.Transport{
-			Base: &http.Transport{
-				Dial: (&net.Dialer{
-					Timeout:   2 * time.Second,
-					KeepAlive: 30 * time.Second,
-				}).Dial,
-			},
-		},
-	}
-)
-
-// NotDefinedError is returned when requested metadata is not defined.
-//
-// The underlying string is the suffix after "/computeMetadata/v1/".
-//
-// This error is not returned if the value is defined to be the empty
-// string.
-type NotDefinedError string
-
-func (suffix NotDefinedError) Error() string {
-	return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
-}
-
-// Get returns a value from the metadata service.
-// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
-//
-// If the GCE_METADATA_HOST environment variable is not defined, a default of
-// 169.254.169.254 will be used instead.
-//
-// If the requested metadata is not defined, the returned error will
-// be of type NotDefinedError.
-func Get(suffix string) (string, error) {
-	val, _, err := getETag(metaClient, suffix)
-	return val, err
-}
-
-// getETag returns a value from the metadata service as well as the associated
-// ETag using the provided client. This func is otherwise equivalent to Get.
-func getETag(client *http.Client, suffix string) (value, etag string, err error) {
-	// Using a fixed IP makes it very difficult to spoof the metadata service in
-	// a container, which is an important use-case for local testing of cloud
-	// deployments. To enable spoofing of the metadata service, the environment
-	// variable GCE_METADATA_HOST is first inspected to decide where metadata
-	// requests shall go.
-	host := os.Getenv(metadataHostEnv)
-	if host == "" {
-		// Using 169.254.169.254 instead of "metadata" here because Go
-		// binaries built with the "netgo" tag and without cgo won't
-		// know the search suffix for "metadata" is
-		// ".google.internal", and this IP address is documented as
-		// being stable anyway.
-		host = metadataIP
-	}
-	url := "http://" + host + "/computeMetadata/v1/" + suffix
-	req, _ := http.NewRequest("GET", url, nil)
-	req.Header.Set("Metadata-Flavor", "Google")
-	res, err := client.Do(req)
-	if err != nil {
-		return "", "", err
-	}
-	defer res.Body.Close()
-	if res.StatusCode == http.StatusNotFound {
-		return "", "", NotDefinedError(suffix)
-	}
-	if res.StatusCode != 200 {
-		return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
-	}
-	all, err := ioutil.ReadAll(res.Body)
-	if err != nil {
-		return "", "", err
-	}
-	return string(all), res.Header.Get("Etag"), nil
-}
-
-func getTrimmed(suffix string) (s string, err error) {
-	s, err = Get(suffix)
-	s = strings.TrimSpace(s)
-	return
-}
-
-func (c *cachedValue) get() (v string, err error) {
-	defer c.mu.Unlock()
-	c.mu.Lock()
-	if c.v != "" {
-		return c.v, nil
-	}
-	if c.trim {
-		v, err = getTrimmed(c.k)
-	} else {
-		v, err = Get(c.k)
-	}
-	if err == nil {
-		c.v = v
-	}
-	return
-}
-
-var (
-	onGCEOnce sync.Once
-	onGCE     bool
-)
-
-// OnGCE reports whether this process is running on Google Compute Engine.
-func OnGCE() bool {
-	onGCEOnce.Do(initOnGCE)
-	return onGCE
-}
-
-func initOnGCE() {
-	onGCE = testOnGCE()
-}
-
-func testOnGCE() bool {
-	// The user explicitly said they're on GCE, so trust them.
-	if os.Getenv(metadataHostEnv) != "" {
-		return true
-	}
-
-	ctx, cancel := context.WithCancel(context.Background())
-	defer cancel()
-
-	resc := make(chan bool, 2)
-
-	// Try two strategies in parallel.
-	// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
-	go func() {
-		res, err := ctxhttp.Get(ctx, metaClient, "http://"+metadataIP)
-		if err != nil {
-			resc <- false
-			return
-		}
-		defer res.Body.Close()
-		resc <- res.Header.Get("Metadata-Flavor") == "Google"
-	}()
-
-	go func() {
-		addrs, err := net.LookupHost("metadata.google.internal")
-		if err != nil || len(addrs) == 0 {
-			resc <- false
-			return
-		}
-		resc <- strsContains(addrs, metadataIP)
-	}()
-
-	tryHarder := systemInfoSuggestsGCE()
-	if tryHarder {
-		res := <-resc
-		if res {
-			// The first strategy succeeded, so let's use it.
-			return true
-		}
-		// Wait for either the DNS or metadata server probe to
-		// contradict the other one and say we are running on
-		// GCE. Give it a lot of time to do so, since the system
-		// info already suggests we're running on a GCE BIOS.
-		timer := time.NewTimer(5 * time.Second)
-		defer timer.Stop()
-		select {
-		case res = <-resc:
-			return res
-		case <-timer.C:
-			// Too slow. Who knows what this system is.
-			return false
-		}
-	}
-
-	// There's no hint from the system info that we're running on
-	// GCE, so use the first probe's result as truth, whether it's
-	// true or false. The goal here is to optimize for speed for
-	// users who are NOT running on GCE. We can't assume that
-	// either a DNS lookup or an HTTP request to a blackholed IP
-	// address is fast. Worst case this should return when the
-	// metaClient's Transport.ResponseHeaderTimeout or
-	// Transport.Dial.Timeout fires (in two seconds).
-	return <-resc
-}
-
-// systemInfoSuggestsGCE reports whether the local system (without
-// doing network requests) suggests that we're running on GCE. If this
-// returns true, testOnGCE tries a bit harder to reach its metadata
-// server.
-func systemInfoSuggestsGCE() bool {
-	if runtime.GOOS != "linux" {
-		// We don't have any non-Linux clues available, at least yet.
-		return false
-	}
-	slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
-	name := strings.TrimSpace(string(slurp))
-	return name == "Google" || name == "Google Compute Engine"
-}
-
-// Subscribe subscribes to a value from the metadata service.
-// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
-// The suffix may contain query parameters.
-//
-// Subscribe calls fn with the latest metadata value indicated by the provided
-// suffix. If the metadata value is deleted, fn is called with the empty string
-// and ok false. Subscribe blocks until fn returns a non-nil error or the value
-// is deleted. Subscribe returns the error value returned from the last call to
-// fn, which may be nil when ok == false.
-func Subscribe(suffix string, fn func(v string, ok bool) error) error {
-	const failedSubscribeSleep = time.Second * 5
-
-	// First check to see if the metadata value exists at all.
-	val, lastETag, err := getETag(subscribeClient, suffix)
-	if err != nil {
-		return err
-	}
-
-	if err := fn(val, true); err != nil {
-		return err
-	}
-
-	ok := true
-	if strings.ContainsRune(suffix, '?') {
-		suffix += "&wait_for_change=true&last_etag="
-	} else {
-		suffix += "?wait_for_change=true&last_etag="
-	}
-	for {
-		val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag))
-		if err != nil {
-			if _, deleted := err.(NotDefinedError); !deleted {
-				time.Sleep(failedSubscribeSleep)
-				continue // Retry on other errors.
-			}
-			ok = false
-		}
-		lastETag = etag
-
-		if err := fn(val, ok); err != nil || !ok {
-			return err
-		}
-	}
-}
-
-// ProjectID returns the current instance's project ID string.
-func ProjectID() (string, error) { return projID.get() }
-
-// NumericProjectID returns the current instance's numeric project ID.
-func NumericProjectID() (string, error) { return projNum.get() }
-
-// InternalIP returns the instance's primary internal IP address.
-func InternalIP() (string, error) {
-	return getTrimmed("instance/network-interfaces/0/ip")
-}
-
-// ExternalIP returns the instance's primary external (public) IP address.
-func ExternalIP() (string, error) {
-	return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
-}
-
-// Hostname returns the instance's hostname. This will be of the form
-// "<instanceID>.c.<projID>.internal".
-func Hostname() (string, error) {
-	return getTrimmed("instance/hostname")
-}
-
-// InstanceTags returns the list of user-defined instance tags,
-// assigned when initially creating a GCE instance.
-func InstanceTags() ([]string, error) {
-	var s []string
-	j, err := Get("instance/tags")
-	if err != nil {
-		return nil, err
-	}
-	if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
-		return nil, err
-	}
-	return s, nil
-}
-
-// InstanceID returns the current VM's numeric instance ID.
-func InstanceID() (string, error) {
-	return instID.get()
-}
-
-// InstanceName returns the current VM's instance ID string.
-func InstanceName() (string, error) {
-	host, err := Hostname()
-	if err != nil {
-		return "", err
-	}
-	return strings.Split(host, ".")[0], nil
-}
-
-// Zone returns the current VM's zone, such as "us-central1-b".
-func Zone() (string, error) {
-	zone, err := getTrimmed("instance/zone")
-	// zone is of the form "projects/<projNum>/zones/<zoneName>".
-	if err != nil {
-		return "", err
-	}
-	return zone[strings.LastIndex(zone, "/")+1:], nil
-}
-
-// InstanceAttributes returns the list of user-defined attributes,
-// assigned when initially creating a GCE VM instance. The value of an
-// attribute can be obtained with InstanceAttributeValue.
-func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
-
-// ProjectAttributes returns the list of user-defined attributes
-// applying to the project as a whole, not just this VM.  The value of
-// an attribute can be obtained with ProjectAttributeValue.
-func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
-
-func lines(suffix string) ([]string, error) {
-	j, err := Get(suffix)
-	if err != nil {
-		return nil, err
-	}
-	s := strings.Split(strings.TrimSpace(j), "\n")
-	for i := range s {
-		s[i] = strings.TrimSpace(s[i])
-	}
-	return s, nil
-}
-
-// InstanceAttributeValue returns the value of the provided VM
-// instance attribute.
-//
-// If the requested attribute is not defined, the returned error will
-// be of type NotDefinedError.
-//
-// InstanceAttributeValue may return ("", nil) if the attribute was
-// defined to be the empty string.
-func InstanceAttributeValue(attr string) (string, error) {
-	return Get("instance/attributes/" + attr)
-}
-
-// ProjectAttributeValue returns the value of the provided
-// project attribute.
-//
-// If the requested attribute is not defined, the returned error will
-// be of type NotDefinedError.
-//
-// ProjectAttributeValue may return ("", nil) if the attribute was
-// defined to be the empty string.
-func ProjectAttributeValue(attr string) (string, error) {
-	return Get("project/attributes/" + attr)
-}
-
-// Scopes returns the service account scopes for the given account.
-// The account may be empty or the string "default" to use the instance's
-// main account.
-func Scopes(serviceAccount string) ([]string, error) {
-	if serviceAccount == "" {
-		serviceAccount = "default"
-	}
-	return lines("instance/service-accounts/" + serviceAccount + "/scopes")
-}
-
-func strsContains(ss []string, s string) bool {
-	for _, v := range ss {
-		if v == s {
-			return true
-		}
-	}
-	return false
-}
diff --git a/vendor/cloud.google.com/go/internal/cloud.go b/vendor/cloud.google.com/go/internal/cloud.go
deleted file mode 100644
index 8e0c8f8..0000000
--- a/vendor/cloud.google.com/go/internal/cloud.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2014 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package internal provides support for the cloud packages.
-//
-// Users should not import this package directly.
-package internal
-
-import (
-	"fmt"
-	"net/http"
-)
-
-const userAgent = "gcloud-golang/0.1"
-
-// Transport is an http.RoundTripper that appends Google Cloud client's
-// user-agent to the original request's user-agent header.
-type Transport struct {
-	// TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does.
-	// Do User-Agent some other way.
-
-	// Base is the actual http.RoundTripper
-	// requests will use. It must not be nil.
-	Base http.RoundTripper
-}
-
-// RoundTrip appends a user-agent to the existing user-agent
-// header and delegates the request to the base http.RoundTripper.
-func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
-	req = cloneRequest(req)
-	ua := req.Header.Get("User-Agent")
-	if ua == "" {
-		ua = userAgent
-	} else {
-		ua = fmt.Sprintf("%s %s", ua, userAgent)
-	}
-	req.Header.Set("User-Agent", ua)
-	return t.Base.RoundTrip(req)
-}
-
-// cloneRequest returns a clone of the provided *http.Request.
-// The clone is a shallow copy of the struct and its Header map.
-func cloneRequest(r *http.Request) *http.Request {
-	// shallow copy of the struct
-	r2 := new(http.Request)
-	*r2 = *r
-	// deep copy of the Header
-	r2.Header = make(http.Header)
-	for k, s := range r.Header {
-		r2.Header[k] = s
-	}
-	return r2
-}
diff --git a/vendor/cloud.google.com/go/internal/optional/optional.go b/vendor/cloud.google.com/go/internal/optional/optional.go
deleted file mode 100644
index f9102f3..0000000
--- a/vendor/cloud.google.com/go/internal/optional/optional.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package optional provides versions of primitive types that can
-// be nil. These are useful in methods that update some of an API object's
-// fields.
-package optional
-
-import (
-	"fmt"
-	"strings"
-)
-
-type (
-	// Bool is either a bool or nil.
-	Bool interface{}
-
-	// String is either a string or nil.
-	String interface{}
-
-	// Int is either an int or nil.
-	Int interface{}
-
-	// Uint is either a uint or nil.
-	Uint interface{}
-
-	// Float64 is either a float64 or nil.
-	Float64 interface{}
-)
-
-// ToBool returns its argument as a bool.
-// It panics if its argument is nil or not a bool.
-func ToBool(v Bool) bool {
-	x, ok := v.(bool)
-	if !ok {
-		doPanic("Bool", v)
-	}
-	return x
-}
-
-// ToString returns its argument as a string.
-// It panics if its argument is nil or not a string.
-func ToString(v String) string {
-	x, ok := v.(string)
-	if !ok {
-		doPanic("String", v)
-	}
-	return x
-}
-
-// ToInt returns its argument as an int.
-// It panics if its argument is nil or not an int.
-func ToInt(v Int) int {
-	x, ok := v.(int)
-	if !ok {
-		doPanic("Int", v)
-	}
-	return x
-}
-
-// ToUint returns its argument as a uint.
-// It panics if its argument is nil or not a uint.
-func ToUint(v Uint) uint {
-	x, ok := v.(uint)
-	if !ok {
-		doPanic("Uint", v)
-	}
-	return x
-}
-
-// ToFloat64 returns its argument as a float64.
-// It panics if its argument is nil or not a float64.
-func ToFloat64(v Float64) float64 {
-	x, ok := v.(float64)
-	if !ok {
-		doPanic("Float64", v)
-	}
-	return x
-}
-
-func doPanic(capType string, v interface{}) {
-	panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v))
-}
diff --git a/vendor/cloud.google.com/go/internal/retry.go b/vendor/cloud.google.com/go/internal/retry.go
deleted file mode 100644
index 79995be..0000000
--- a/vendor/cloud.google.com/go/internal/retry.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import (
-	"fmt"
-	"time"
-
-	gax "github.com/googleapis/gax-go"
-
-	"golang.org/x/net/context"
-)
-
-// Retry calls the supplied function f repeatedly according to the provided
-// backoff parameters. It returns when one of the following occurs:
-// When f's first return value is true, Retry immediately returns with f's second
-// return value.
-// When the provided context is done, Retry returns with ctx.Err().
-func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error {
-	return retry(ctx, bo, f, gax.Sleep)
-}
-
-func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error),
-	sleep func(context.Context, time.Duration) error) error {
-	var lastErr error
-	for {
-		stop, err := f()
-		if stop {
-			return err
-		}
-		// Remember the last "real" error from f.
-		if err != nil && err != context.Canceled && err != context.DeadlineExceeded {
-			lastErr = err
-		}
-		p := bo.Pause()
-		if cerr := sleep(ctx, p); cerr != nil {
-			if lastErr != nil {
-				return fmt.Errorf("%v; last function err: %v", cerr, lastErr)
-			}
-			return cerr
-		}
-	}
-}
diff --git a/vendor/cloud.google.com/go/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go
deleted file mode 100644
index 714d280..0000000
--- a/vendor/cloud.google.com/go/storage/acl.go
+++ /dev/null
@@ -1,222 +0,0 @@
-// Copyright 2014 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package storage
-
-import (
-	"fmt"
-
-	"golang.org/x/net/context"
-	raw "google.golang.org/api/storage/v1"
-)
-
-// ACLRole is the level of access to grant.
-type ACLRole string
-
-const (
-	RoleOwner  ACLRole = "OWNER"
-	RoleReader ACLRole = "READER"
-)
-
-// ACLEntity refers to a user or group.
-// They are sometimes referred to as grantees.
-//
-// It could be in the form of:
-// "user-<userId>", "user-<email>", "group-<groupId>", "group-<email>",
-// "domain-<domain>" and "project-team-<projectId>".
-//
-// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers.
-type ACLEntity string
-
-const (
-	AllUsers              ACLEntity = "allUsers"
-	AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers"
-)
-
-// ACLRule represents a grant for a role to an entity (user, group or team) for a Google Cloud Storage object or bucket.
-type ACLRule struct {
-	Entity ACLEntity
-	Role   ACLRole
-}
-
-// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object.
-type ACLHandle struct {
-	c         *Client
-	bucket    string
-	object    string
-	isDefault bool
-}
-
-// Delete permanently deletes the ACL entry for the given entity.
-func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) error {
-	if a.object != "" {
-		return a.objectDelete(ctx, entity)
-	}
-	if a.isDefault {
-		return a.bucketDefaultDelete(ctx, entity)
-	}
-	return a.bucketDelete(ctx, entity)
-}
-
-// Set sets the permission level for the given entity.
-func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) error {
-	if a.object != "" {
-		return a.objectSet(ctx, entity, role)
-	}
-	if a.isDefault {
-		return a.bucketDefaultSet(ctx, entity, role)
-	}
-	return a.bucketSet(ctx, entity, role)
-}
-
-// List retrieves ACL entries.
-func (a *ACLHandle) List(ctx context.Context) ([]ACLRule, error) {
-	if a.object != "" {
-		return a.objectList(ctx)
-	}
-	if a.isDefault {
-		return a.bucketDefaultList(ctx)
-	}
-	return a.bucketList(ctx)
-}
-
-func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
-	var acls *raw.ObjectAccessControls
-	var err error
-	err = runWithRetry(ctx, func() error {
-		acls, err = a.c.raw.DefaultObjectAccessControls.List(a.bucket).Context(ctx).Do()
-		return err
-	})
-	if err != nil {
-		return nil, fmt.Errorf("storage: error listing default object ACL for bucket %q: %v", a.bucket, err)
-	}
-	return toACLRules(acls.Items), nil
-}
-
-func (a *ACLHandle) bucketDefaultSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
-	acl := &raw.ObjectAccessControl{
-		Bucket: a.bucket,
-		Entity: string(entity),
-		Role:   string(role),
-	}
-	err := runWithRetry(ctx, func() error {
-		_, err := a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl).Context(ctx).Do()
-		return err
-	})
-	if err != nil {
-		return fmt.Errorf("storage: error updating default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
-	}
-	return nil
-}
-
-func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
-	err := runWithRetry(ctx, func() error {
-		return a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity)).Context(ctx).Do()
-	})
-	if err != nil {
-		return fmt.Errorf("storage: error deleting default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
-	}
-	return nil
-}
-
-func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
-	var acls *raw.BucketAccessControls
-	var err error
-	err = runWithRetry(ctx, func() error {
-		acls, err = a.c.raw.BucketAccessControls.List(a.bucket).Context(ctx).Do()
-		return err
-	})
-	if err != nil {
-		return nil, fmt.Errorf("storage: error listing bucket ACL for bucket %q: %v", a.bucket, err)
-	}
-	r := make([]ACLRule, len(acls.Items))
-	for i, v := range acls.Items {
-		r[i].Entity = ACLEntity(v.Entity)
-		r[i].Role = ACLRole(v.Role)
-	}
-	return r, nil
-}
-
-func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
-	acl := &raw.BucketAccessControl{
-		Bucket: a.bucket,
-		Entity: string(entity),
-		Role:   string(role),
-	}
-	err := runWithRetry(ctx, func() error {
-		_, err := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl).Context(ctx).Do()
-		return err
-	})
-	if err != nil {
-		return fmt.Errorf("storage: error updating bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
-	}
-	return nil
-}
-
-func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
-	err := runWithRetry(ctx, func() error {
-		return a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity)).Context(ctx).Do()
-	})
-	if err != nil {
-		return fmt.Errorf("storage: error deleting bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
-	}
-	return nil
-}
-
-func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
-	var acls *raw.ObjectAccessControls
-	var err error
-	err = runWithRetry(ctx, func() error {
-		acls, err = a.c.raw.ObjectAccessControls.List(a.bucket, a.object).Context(ctx).Do()
-		return err
-	})
-	if err != nil {
-		return nil, fmt.Errorf("storage: error listing object ACL for bucket %q, file %q: %v", a.bucket, a.object, err)
-	}
-	return toACLRules(acls.Items), nil
-}
-
-func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
-	acl := &raw.ObjectAccessControl{
-		Bucket: a.bucket,
-		Entity: string(entity),
-		Role:   string(role),
-	}
-	err := runWithRetry(ctx, func() error {
-		_, err := a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl).Context(ctx).Do()
-		return err
-	})
-	if err != nil {
-		return fmt.Errorf("storage: error updating object ACL entry for bucket %q, file %q, entity %q: %v", a.bucket, a.object, entity, err)
-	}
-	return nil
-}
-
-func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
-	err := runWithRetry(ctx, func() error {
-		return a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity)).Context(ctx).Do()
-	})
-	if err != nil {
-		return fmt.Errorf("storage: error deleting object ACL entry for bucket %q, file %q, entity %q: %v", a.bucket, a.object, entity, err)
-	}
-	return nil
-}
-
-func toACLRules(items []*raw.ObjectAccessControl) []ACLRule {
-	r := make([]ACLRule, 0, len(items))
-	for _, item := range items {
-		r = append(r, ACLRule{Entity: ACLEntity(item.Entity), Role: ACLRole(item.Role)})
-	}
-	return r
-}
diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go
deleted file mode 100644
index f87fe33..0000000
--- a/vendor/cloud.google.com/go/storage/bucket.go
+++ /dev/null
@@ -1,331 +0,0 @@
-// Copyright 2014 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package storage
-
-import (
-	"net/http"
-	"time"
-
-	"golang.org/x/net/context"
-	"google.golang.org/api/googleapi"
-	"google.golang.org/api/iterator"
-	raw "google.golang.org/api/storage/v1"
-)
-
-// Create creates the Bucket in the project.
-// If attrs is nil the API defaults will be used.
-func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) error {
-	var bkt *raw.Bucket
-	if attrs != nil {
-		bkt = attrs.toRawBucket()
-	} else {
-		bkt = &raw.Bucket{}
-	}
-	bkt.Name = b.name
-	req := b.c.raw.Buckets.Insert(projectID, bkt)
-	return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err })
-}
-
-// Delete deletes the Bucket.
-func (b *BucketHandle) Delete(ctx context.Context) error {
-	req := b.c.raw.Buckets.Delete(b.name)
-	return runWithRetry(ctx, func() error { return req.Context(ctx).Do() })
-}
-
-// ACL returns an ACLHandle, which provides access to the bucket's access control list.
-// This controls who can list, create or overwrite the objects in a bucket.
-// This call does not perform any network operations.
-func (b *BucketHandle) ACL() *ACLHandle {
-	return &b.acl
-}
-
-// DefaultObjectACL returns an ACLHandle, which provides access to the bucket's default object ACLs.
-// These ACLs are applied to newly created objects in this bucket that do not have a defined ACL.
-// This call does not perform any network operations.
-func (b *BucketHandle) DefaultObjectACL() *ACLHandle {
-	return &b.defaultObjectACL
-}
-
-// Object returns an ObjectHandle, which provides operations on the named object.
-// This call does not perform any network operations.
-//
-// name must consist entirely of valid UTF-8-encoded runes. The full specification
-// for valid object names can be found at:
-//   https://cloud.google.com/storage/docs/bucket-naming
-func (b *BucketHandle) Object(name string) *ObjectHandle {
-	return &ObjectHandle{
-		c:      b.c,
-		bucket: b.name,
-		object: name,
-		acl: ACLHandle{
-			c:      b.c,
-			bucket: b.name,
-			object: name,
-		},
-		gen: -1,
-	}
-}
-
-// Attrs returns the metadata for the bucket.
-func (b *BucketHandle) Attrs(ctx context.Context) (*BucketAttrs, error) {
-	var resp *raw.Bucket
-	var err error
-	err = runWithRetry(ctx, func() error {
-		resp, err = b.c.raw.Buckets.Get(b.name).Projection("full").Context(ctx).Do()
-		return err
-	})
-	if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
-		return nil, ErrBucketNotExist
-	}
-	if err != nil {
-		return nil, err
-	}
-	return newBucket(resp), nil
-}
-
-// BucketAttrs represents the metadata for a Google Cloud Storage bucket.
-type BucketAttrs struct {
-	// Name is the name of the bucket.
-	Name string
-
-	// ACL is the list of access control rules on the bucket.
-	ACL []ACLRule
-
-	// DefaultObjectACL is the list of access controls to
-	// apply to new objects when no object ACL is provided.
-	DefaultObjectACL []ACLRule
-
-	// Location is the location of the bucket. It defaults to "US".
-	Location string
-
-	// MetaGeneration is the metadata generation of the bucket.
-	MetaGeneration int64
-
-	// StorageClass is the storage class of the bucket. This defines
-	// how objects in the bucket are stored and determines the SLA
-	// and the cost of storage. Typical values are "MULTI_REGIONAL",
-	// "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" and
-	// "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD", which
-	// is equivalent to "MULTI_REGIONAL" or "REGIONAL" depending on
-	// the bucket's location settings.
-	StorageClass string
-
-	// Created is the creation time of the bucket.
-	Created time.Time
-
-	// VersioningEnabled reports whether this bucket has versioning enabled.
-	// This field is read-only.
-	VersioningEnabled bool
-}
-
-func newBucket(b *raw.Bucket) *BucketAttrs {
-	if b == nil {
-		return nil
-	}
-	bucket := &BucketAttrs{
-		Name:              b.Name,
-		Location:          b.Location,
-		MetaGeneration:    b.Metageneration,
-		StorageClass:      b.StorageClass,
-		Created:           convertTime(b.TimeCreated),
-		VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled,
-	}
-	acl := make([]ACLRule, len(b.Acl))
-	for i, rule := range b.Acl {
-		acl[i] = ACLRule{
-			Entity: ACLEntity(rule.Entity),
-			Role:   ACLRole(rule.Role),
-		}
-	}
-	bucket.ACL = acl
-	objACL := make([]ACLRule, len(b.DefaultObjectAcl))
-	for i, rule := range b.DefaultObjectAcl {
-		objACL[i] = ACLRule{
-			Entity: ACLEntity(rule.Entity),
-			Role:   ACLRole(rule.Role),
-		}
-	}
-	bucket.DefaultObjectACL = objACL
-	return bucket
-}
-
-// toRawBucket copies the editable attribute from b to the raw library's Bucket type.
-func (b *BucketAttrs) toRawBucket() *raw.Bucket {
-	var acl []*raw.BucketAccessControl
-	if len(b.ACL) > 0 {
-		acl = make([]*raw.BucketAccessControl, len(b.ACL))
-		for i, rule := range b.ACL {
-			acl[i] = &raw.BucketAccessControl{
-				Entity: string(rule.Entity),
-				Role:   string(rule.Role),
-			}
-		}
-	}
-	dACL := toRawObjectACL(b.DefaultObjectACL)
-	return &raw.Bucket{
-		Name:             b.Name,
-		DefaultObjectAcl: dACL,
-		Location:         b.Location,
-		StorageClass:     b.StorageClass,
-		Acl:              acl,
-	}
-}
-
-// Objects returns an iterator over the objects in the bucket that match the Query q.
-// If q is nil, no filtering is done.
-func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator {
-	it := &ObjectIterator{
-		ctx:    ctx,
-		bucket: b,
-	}
-	it.pageInfo, it.nextFunc = iterator.NewPageInfo(
-		it.fetch,
-		func() int { return len(it.items) },
-		func() interface{} { b := it.items; it.items = nil; return b })
-	if q != nil {
-		it.query = *q
-	}
-	return it
-}
-
-// An ObjectIterator is an iterator over ObjectAttrs.
-type ObjectIterator struct {
-	ctx      context.Context
-	bucket   *BucketHandle
-	query    Query
-	pageInfo *iterator.PageInfo
-	nextFunc func() error
-	items    []*ObjectAttrs
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
-
-// Next returns the next result. Its second return value is iterator.Done if
-// there are no more results. Once Next returns iterator.Done, all subsequent
-// calls will return iterator.Done.
-//
-// If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will
-// have a non-empty Prefix field, and a zero value for all other fields. These
-// represent prefixes.
-func (it *ObjectIterator) Next() (*ObjectAttrs, error) {
-	if err := it.nextFunc(); err != nil {
-		return nil, err
-	}
-	item := it.items[0]
-	it.items = it.items[1:]
-	return item, nil
-}
-
-func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) {
-	req := it.bucket.c.raw.Objects.List(it.bucket.name)
-	req.Projection("full")
-	req.Delimiter(it.query.Delimiter)
-	req.Prefix(it.query.Prefix)
-	req.Versions(it.query.Versions)
-	req.PageToken(pageToken)
-	if pageSize > 0 {
-		req.MaxResults(int64(pageSize))
-	}
-	var resp *raw.Objects
-	var err error
-	err = runWithRetry(it.ctx, func() error {
-		resp, err = req.Context(it.ctx).Do()
-		return err
-	})
-	if err != nil {
-		if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
-			err = ErrBucketNotExist
-		}
-		return "", err
-	}
-	for _, item := range resp.Items {
-		it.items = append(it.items, newObject(item))
-	}
-	for _, prefix := range resp.Prefixes {
-		it.items = append(it.items, &ObjectAttrs{Prefix: prefix})
-	}
-	return resp.NextPageToken, nil
-}
-
-// TODO(jbd): Add storage.buckets.update.
-
-// Buckets returns an iterator over the buckets in the project. You may
-// optionally set the iterator's Prefix field to restrict the list to buckets
-// whose names begin with the prefix. By default, all buckets in the project
-// are returned.
-func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator {
-	it := &BucketIterator{
-		ctx:       ctx,
-		client:    c,
-		projectID: projectID,
-	}
-	it.pageInfo, it.nextFunc = iterator.NewPageInfo(
-		it.fetch,
-		func() int { return len(it.buckets) },
-		func() interface{} { b := it.buckets; it.buckets = nil; return b })
-	return it
-}
-
-// A BucketIterator is an iterator over BucketAttrs.
-type BucketIterator struct {
-	// Prefix restricts the iterator to buckets whose names begin with it.
-	Prefix string
-
-	ctx       context.Context
-	client    *Client
-	projectID string
-	buckets   []*BucketAttrs
-	pageInfo  *iterator.PageInfo
-	nextFunc  func() error
-}
-
-// Next returns the next result. Its second return value is iterator.Done if
-// there are no more results. Once Next returns iterator.Done, all subsequent
-// calls will return iterator.Done.
-func (it *BucketIterator) Next() (*BucketAttrs, error) {
-	if err := it.nextFunc(); err != nil {
-		return nil, err
-	}
-	b := it.buckets[0]
-	it.buckets = it.buckets[1:]
-	return b, nil
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
-
-func (it *BucketIterator) fetch(pageSize int, pageToken string) (string, error) {
-	req := it.client.raw.Buckets.List(it.projectID)
-	req.Projection("full")
-	req.Prefix(it.Prefix)
-	req.PageToken(pageToken)
-	if pageSize > 0 {
-		req.MaxResults(int64(pageSize))
-	}
-	var resp *raw.Buckets
-	var err error
-	err = runWithRetry(it.ctx, func() error {
-		resp, err = req.Context(it.ctx).Do()
-		return err
-	})
-	if err != nil {
-		return "", err
-	}
-	for _, item := range resp.Items {
-		it.buckets = append(it.buckets, newBucket(item))
-	}
-	return resp.NextPageToken, nil
-}
diff --git a/vendor/cloud.google.com/go/storage/copy.go b/vendor/cloud.google.com/go/storage/copy.go
deleted file mode 100644
index 1f28455..0000000
--- a/vendor/cloud.google.com/go/storage/copy.go
+++ /dev/null
@@ -1,197 +0,0 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package storage contains a Google Cloud Storage client.
-//
-// This package is experimental and may make backwards-incompatible changes.
-package storage
-
-import (
-	"errors"
-	"fmt"
-	"reflect"
-
-	"golang.org/x/net/context"
-	raw "google.golang.org/api/storage/v1"
-)
-
-// CopierFrom creates a Copier that can copy src to dst.
-// You can immediately call Run on the returned Copier, or
-// you can configure it first.
-func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier {
-	return &Copier{dst: dst, src: src}
-}
-
-// A Copier copies a source object to a destination.
-type Copier struct {
-	// ObjectAttrs are optional attributes to set on the destination object.
-	// Any attributes must be initialized before any calls on the Copier. Nil
-	// or zero-valued attributes are ignored.
-	ObjectAttrs
-
-	// RewriteToken can be set before calling Run to resume a copy
-	// operation. After Run returns a non-nil error, RewriteToken will
-	// have been updated to contain the value needed to resume the copy.
-	RewriteToken string
-
-	// ProgressFunc can be used to monitor the progress of a multi-RPC copy
-	// operation. If ProgressFunc is not nil and CopyFrom requires multiple
-	// calls to the underlying service (see
-	// https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then
-	// ProgressFunc will be invoked after each call with the number of bytes of
-	// content copied so far and the total size in bytes of the source object.
-	//
-	// ProgressFunc is intended to make upload progress available to the
-	// application. For example, the implementation of ProgressFunc may update
-	// a progress bar in the application's UI, or log the result of
-	// float64(copiedBytes)/float64(totalBytes).
-	//
-	// ProgressFunc should return quickly without blocking.
-	ProgressFunc func(copiedBytes, totalBytes uint64)
-
-	dst, src *ObjectHandle
-}
-
-// Run performs the copy.
-func (c *Copier) Run(ctx context.Context) (*ObjectAttrs, error) {
-	if err := c.src.validate(); err != nil {
-		return nil, err
-	}
-	if err := c.dst.validate(); err != nil {
-		return nil, err
-	}
-	var rawObject *raw.Object
-	// If any attribute was set, then we make sure the name matches the destination
-	// name, and we check that ContentType is non-empty so we can provide a better
-	// error message than the service.
-	if !reflect.DeepEqual(c.ObjectAttrs, ObjectAttrs{}) {
-		c.ObjectAttrs.Name = c.dst.object
-		if c.ObjectAttrs.ContentType == "" {
-			return nil, errors.New("storage: Copier.ContentType must be non-empty")
-		}
-		rawObject = c.ObjectAttrs.toRawObject(c.dst.bucket)
-	}
-	for {
-		res, err := c.callRewrite(ctx, c.src, rawObject)
-		if err != nil {
-			return nil, err
-		}
-		if c.ProgressFunc != nil {
-			c.ProgressFunc(res.TotalBytesRewritten, res.ObjectSize)
-		}
-		if res.Done { // Finished successfully.
-			return newObject(res.Resource), nil
-		}
-	}
-	return nil, nil
-}
-
-func (c *Copier) callRewrite(ctx context.Context, src *ObjectHandle, rawObj *raw.Object) (*raw.RewriteResponse, error) {
-	call := c.dst.c.raw.Objects.Rewrite(src.bucket, src.object, c.dst.bucket, c.dst.object, rawObj)
-
-	call.Context(ctx).Projection("full")
-	if c.RewriteToken != "" {
-		call.RewriteToken(c.RewriteToken)
-	}
-	if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil {
-		return nil, err
-	}
-	if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil {
-		return nil, err
-	}
-	if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
-		return nil, err
-	}
-	if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil {
-		return nil, err
-	}
-	var res *raw.RewriteResponse
-	var err error
-	err = runWithRetry(ctx, func() error { res, err = call.Do(); return err })
-	if err != nil {
-		return nil, err
-	}
-	c.RewriteToken = res.RewriteToken
-	return res, nil
-}
-
-// ComposerFrom creates a Composer that can compose srcs into dst.
-// You can immediately call Run on the returned Composer, or you can
-// configure it first.
-//
-// The encryption key for the destination object will be used to decrypt all
-// source objects and encrypt the destination object. It is an error
-// to specify an encryption key for any of the source objects.
-func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer {
-	return &Composer{dst: dst, srcs: srcs}
-}
-
-// A Composer composes source objects into a destination object.
-type Composer struct {
-	// ObjectAttrs are optional attributes to set on the destination object.
-	// Any attributes must be initialized before any calls on the Composer. Nil
-	// or zero-valued attributes are ignored.
-	ObjectAttrs
-
-	dst  *ObjectHandle
-	srcs []*ObjectHandle
-}
-
-// Run performs the compose operation.
-func (c *Composer) Run(ctx context.Context) (*ObjectAttrs, error) {
-	if err := c.dst.validate(); err != nil {
-		return nil, err
-	}
-	if len(c.srcs) == 0 {
-		return nil, errors.New("storage: at least one source object must be specified")
-	}
-
-	req := &raw.ComposeRequest{}
-	// Compose requires a non-empty Destination, so we always set it,
-	// even if the caller-provided ObjectAttrs is the zero value.
-	req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket)
-	for _, src := range c.srcs {
-		if err := src.validate(); err != nil {
-			return nil, err
-		}
-		if src.bucket != c.dst.bucket {
-			return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket)
-		}
-		if src.encryptionKey != nil {
-			return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object)
-		}
-		srcObj := &raw.ComposeRequestSourceObjects{
-			Name: src.object,
-		}
-		if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil {
-			return nil, err
-		}
-		req.SourceObjects = append(req.SourceObjects, srcObj)
-	}
-
-	call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx)
-	if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil {
-		return nil, err
-	}
-	if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
-		return nil, err
-	}
-	var obj *raw.Object
-	var err error
-	err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
-	if err != nil {
-		return nil, err
-	}
-	return newObject(obj), nil
-}
diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go
deleted file mode 100644
index cf6496b..0000000
--- a/vendor/cloud.google.com/go/storage/doc.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package storage provides an easy way to work with Google Cloud Storage.
-Google Cloud Storage stores data in named objects, which are grouped into buckets.
-
-More information about Google Cloud Storage is available at
-https://cloud.google.com/storage/docs.
-
-All of the methods of this package use exponential backoff to retry calls
-that fail with certain errors, as described in
-https://cloud.google.com/storage/docs/exponential-backoff.
-
-Note: This package is in beta.  Some backwards-incompatible changes may occur.
-
-
-Creating a Client
-
-To start working with this package, create a client:
-
-    ctx := context.Background()
-    client, err := storage.NewClient(ctx)
-    if err != nil {
-        // TODO: Handle error.
-    }
-
-Buckets
-
-A Google Cloud Storage bucket is a collection of objects. To work with a
-bucket, make a bucket handle:
-
-    bkt := client.Bucket(bucketName)
-
-A handle is a reference to a bucket. You can have a handle even if the
-bucket doesn't exist yet. To create a bucket in Google Cloud Storage,
-call Create on the handle:
-
-    if err := bkt.Create(ctx, projectID, nil); err != nil {
-        // TODO: Handle error.
-    }
-
-Note that although buckets are associated with projects, bucket names are
-global across all projects.
-
-Each bucket has associated metadata, represented in this package by
-BucketAttrs. The third argument to BucketHandle.Create allows you to set
-the intial BucketAttrs of a bucket. To retrieve a bucket's attributes, use
-Attrs:
-
-    attrs, err := bkt.Attrs(ctx)
-    if err != nil {
-        // TODO: Handle error.
-    }
-    fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n",
-        attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass)
-
-Objects
-
-An object holds arbitrary data as a sequence of bytes, like a file. You
-refer to objects using a handle, just as with buckets. You can use the
-standard Go io.Reader and io.Writer interfaces to read and write
-object data:
-
-    obj := bkt.Object("data")
-    // Write something to obj.
-    // w implements io.Writer.
-    w := obj.NewWriter(ctx)
-    // Write some text to obj. This will overwrite whatever is there.
-    if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil {
-        // TODO: Handle error.
-    }
-    // Close, just like writing a file.
-    if err := w.Close(); err != nil {
-        // TODO: Handle error.
-    }
-
-    // Read it back.
-    r, err := obj.NewReader(ctx)
-    if err != nil {
-        // TODO: Handle error.
-    }
-    defer r.Close()
-    if _, err := io.Copy(os.Stdout, r); err != nil {
-        // TODO: Handle error.
-    }
-    // Prints "This object contains text."
-
-Objects also have attributes, which you can fetch with Attrs:
-
-    objAttrs, err := obj.Attrs(ctx)
-    if err != nil {
-        // TODO: Handle error.
-    }
-    fmt.Printf("object %s has size %d and can be read using %s\n",
-        objAttrs.Name, objAttrs.Size, objAttrs.MediaLink)
-
-ACLs
-
-Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of
-ACLRules, each of which specifies the role of a user, group or project. ACLs
-are suitable for fine-grained control, but you may prefer using IAM to control
-access at the project level (see
-https://cloud.google.com/storage/docs/access-control/iam).
-
-To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method:
-
-    acls, err := obj.ACL().List(ctx)
-    if err != nil {
-        // TODO: Handle error.
-    }
-    for _, rule := range acls {
-        fmt.Printf("%s has role %s\n", rule.Entity, rule.Role)
-    }
-
-You can also set and delete ACLs.
-
-Conditions
-
-Every object has a generation and a metageneration. The generation changes
-whenever the content changes, and the metageneration changes whenever the
-metadata changes. Conditions let you check these values before an operation;
-the operation only executes if the conditions match. You can use conditions to
-prevent race conditions in read-modify-write operations.
-
-For example, say you've read an object's metadata into objAttrs. Now
-you want to write to that object, but only if its contents haven't changed
-since you read it. Here is how to express that:
-
-    w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx)
-    // Proceed with writing as above.
-
-Signed URLs
-
-You can obtain a URL that lets anyone read or write an object for a limited time.
-You don't need to create a client to do this. See the documentation of
-SignedURL for details.
-
-    url, err := storage.SignedURL(bucketName, "shared-object", opts)
-    if err != nil {
-        // TODO: Handle error.
-    }
-    fmt.Println(url)
-*/
-package storage // import "cloud.google.com/go/storage"
diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go
deleted file mode 100644
index e8fc924..0000000
--- a/vendor/cloud.google.com/go/storage/invoke.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2014 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package storage
-
-import (
-	"cloud.google.com/go/internal"
-	gax "github.com/googleapis/gax-go"
-	"golang.org/x/net/context"
-	"google.golang.org/api/googleapi"
-)
-
-// runWithRetry calls the function until it returns nil or a non-retryable error, or
-// the context is done.
-func runWithRetry(ctx context.Context, call func() error) error {
-	return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
-		err = call()
-		if err == nil {
-			return true, nil
-		}
-		e, ok := err.(*googleapi.Error)
-		if !ok {
-			return true, err
-		}
-		// Retry on 429 and 5xx, according to
-		// https://cloud.google.com/storage/docs/exponential-backoff.
-		if e.Code == 429 || (e.Code >= 500 && e.Code < 600) {
-			return false, nil
-		}
-		return true, err
-	})
-}
diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go
deleted file mode 100644
index 329a5f3..0000000
--- a/vendor/cloud.google.com/go/storage/reader.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package storage
-
-import (
-	"io"
-)
-
-// Reader reads a Cloud Storage object.
-// It implements io.Reader.
-type Reader struct {
-	body         io.ReadCloser
-	remain, size int64
-	contentType  string
-}
-
-// Close closes the Reader. It must be called when done reading.
-func (r *Reader) Close() error {
-	return r.body.Close()
-}
-
-func (r *Reader) Read(p []byte) (int, error) {
-	n, err := r.body.Read(p)
-	if r.remain != -1 {
-		r.remain -= int64(n)
-	}
-	return n, err
-}
-
-// Size returns the size of the object in bytes.
-// The returned value is always the same and is not affected by
-// calls to Read or Close.
-func (r *Reader) Size() int64 {
-	return r.size
-}
-
-// Remain returns the number of bytes left to read, or -1 if unknown.
-func (r *Reader) Remain() int64 {
-	return r.remain
-}
-
-// ContentType returns the content type of the object.
-func (r *Reader) ContentType() string {
-	return r.contentType
-}
diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go
deleted file mode 100644
index bafb136..0000000
--- a/vendor/cloud.google.com/go/storage/storage.go
+++ /dev/null
@@ -1,1074 +0,0 @@
-// Copyright 2014 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package storage
-
-import (
-	"bytes"
-	"crypto"
-	"crypto/rand"
-	"crypto/rsa"
-	"crypto/sha256"
-	"crypto/x509"
-	"encoding/base64"
-	"encoding/pem"
-	"errors"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"net/http"
-	"net/url"
-	"reflect"
-	"strconv"
-	"strings"
-	"time"
-	"unicode/utf8"
-
-	"google.golang.org/api/option"
-	"google.golang.org/api/transport"
-
-	"cloud.google.com/go/internal/optional"
-	"golang.org/x/net/context"
-	"google.golang.org/api/googleapi"
-	raw "google.golang.org/api/storage/v1"
-)
-
-var (
-	ErrBucketNotExist = errors.New("storage: bucket doesn't exist")
-	ErrObjectNotExist = errors.New("storage: object doesn't exist")
-)
-
-const userAgent = "gcloud-golang-storage/20151204"
-
-const (
-	// ScopeFullControl grants permissions to manage your
-	// data and permissions in Google Cloud Storage.
-	ScopeFullControl = raw.DevstorageFullControlScope
-
-	// ScopeReadOnly grants permissions to
-	// view your data in Google Cloud Storage.
-	ScopeReadOnly = raw.DevstorageReadOnlyScope
-
-	// ScopeReadWrite grants permissions to manage your
-	// data in Google Cloud Storage.
-	ScopeReadWrite = raw.DevstorageReadWriteScope
-)
-
-// Client is a client for interacting with Google Cloud Storage.
-//
-// Clients should be reused instead of created as needed.
-// The methods of Client are safe for concurrent use by multiple goroutines.
-type Client struct {
-	hc  *http.Client
-	raw *raw.Service
-}
-
-// NewClient creates a new Google Cloud Storage client.
-// The default scope is ScopeFullControl. To use a different scope, like ScopeReadOnly, use option.WithScopes.
-func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
-	o := []option.ClientOption{
-		option.WithScopes(ScopeFullControl),
-		option.WithUserAgent(userAgent),
-	}
-	opts = append(o, opts...)
-	hc, _, err := transport.NewHTTPClient(ctx, opts...)
-	if err != nil {
-		return nil, fmt.Errorf("dialing: %v", err)
-	}
-	rawService, err := raw.New(hc)
-	if err != nil {
-		return nil, fmt.Errorf("storage client: %v", err)
-	}
-	return &Client{
-		hc:  hc,
-		raw: rawService,
-	}, nil
-}
-
-// Close closes the Client.
-//
-// Close need not be called at program exit.
-func (c *Client) Close() error {
-	c.hc = nil
-	return nil
-}
-
-// BucketHandle provides operations on a Google Cloud Storage bucket.
-// Use Client.Bucket to get a handle.
-type BucketHandle struct {
-	acl              ACLHandle
-	defaultObjectACL ACLHandle
-
-	c    *Client
-	name string
-}
-
-// Bucket returns a BucketHandle, which provides operations on the named bucket.
-// This call does not perform any network operations.
-//
-// The supplied name must contain only lowercase letters, numbers, dashes,
-// underscores, and dots. The full specification for valid bucket names can be
-// found at:
-//   https://cloud.google.com/storage/docs/bucket-naming
-func (c *Client) Bucket(name string) *BucketHandle {
-	return &BucketHandle{
-		c:    c,
-		name: name,
-		acl: ACLHandle{
-			c:      c,
-			bucket: name,
-		},
-		defaultObjectACL: ACLHandle{
-			c:         c,
-			bucket:    name,
-			isDefault: true,
-		},
-	}
-}
-
-// SignedURLOptions allows you to restrict the access to the signed URL.
-type SignedURLOptions struct {
-	// GoogleAccessID represents the authorizer of the signed URL generation.
-	// It is typically the Google service account client email address from
-	// the Google Developers Console in the form of "xxx@developer.gserviceaccount.com".
-	// Required.
-	GoogleAccessID string
-
-	// PrivateKey is the Google service account private key. It is obtainable
-	// from the Google Developers Console.
-	// At https://console.developers.google.com/project/<your-project-id>/apiui/credential,
-	// create a service account client ID or reuse one of your existing service account
-	// credentials. Click on the "Generate new P12 key" to generate and download
-	// a new private key. Once you download the P12 file, use the following command
-	// to convert it into a PEM file.
-	//
-	//    $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes
-	//
-	// Provide the contents of the PEM file as a byte slice.
-	// Exactly one of PrivateKey or SignBytes must be non-nil.
-	PrivateKey []byte
-
-	// SignBytes is a function for implementing custom signing.
-	// If your application is running on Google App Engine, you can use appengine's internal signing function:
-	//     ctx := appengine.NewContext(request)
-	//     acc, _ := appengine.ServiceAccount(ctx)
-	//     url, err := SignedURL("bucket", "object", &SignedURLOptions{
-	//     	GoogleAccessID: acc,
-	//     	SignBytes: func(b []byte) ([]byte, error) {
-	//     		_, signedBytes, err := appengine.SignBytes(ctx, b)
-	//     		return signedBytes, err
-	//     	},
-	//     	// etc.
-	//     })
-	//
-	// Exactly one of PrivateKey or SignBytes must be non-nil.
-	SignBytes func([]byte) ([]byte, error)
-
-	// Method is the HTTP method to be used with the signed URL.
-	// Signed URLs can be used with GET, HEAD, PUT, and DELETE requests.
-	// Required.
-	Method string
-
-	// Expires is the expiration time on the signed URL. It must be
-	// a datetime in the future.
-	// Required.
-	Expires time.Time
-
-	// ContentType is the content type header the client must provide
-	// to use the generated signed URL.
-	// Optional.
-	ContentType string
-
-	// Headers is a list of extention headers the client must provide
-	// in order to use the generated signed URL.
-	// Optional.
-	Headers []string
-
-	// MD5 is the base64 encoded MD5 checksum of the file.
-	// If provided, the client should provide the exact value on the request
-	// header in order to use the signed URL.
-	// Optional.
-	MD5 []byte
-}
-
-// SignedURL returns a URL for the specified object. Signed URLs allow
-// the users access to a restricted resource for a limited time without having a
-// Google account or signing in. For more information about the signed
-// URLs, see https://cloud.google.com/storage/docs/accesscontrol#Signed-URLs.
-func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) {
-	if opts == nil {
-		return "", errors.New("storage: missing required SignedURLOptions")
-	}
-	if opts.GoogleAccessID == "" {
-		return "", errors.New("storage: missing required GoogleAccessID")
-	}
-	if (opts.PrivateKey == nil) == (opts.SignBytes == nil) {
-		return "", errors.New("storage: exactly one of PrivateKey or SignedBytes must be set")
-	}
-	if opts.Method == "" {
-		return "", errors.New("storage: missing required method option")
-	}
-	if opts.Expires.IsZero() {
-		return "", errors.New("storage: missing required expires option")
-	}
-
-	signBytes := opts.SignBytes
-	if opts.PrivateKey != nil {
-		key, err := parseKey(opts.PrivateKey)
-		if err != nil {
-			return "", err
-		}
-		signBytes = func(b []byte) ([]byte, error) {
-			sum := sha256.Sum256(b)
-			return rsa.SignPKCS1v15(
-				rand.Reader,
-				key,
-				crypto.SHA256,
-				sum[:],
-			)
-		}
-	} else {
-		signBytes = opts.SignBytes
-	}
-
-	u := &url.URL{
-		Path: fmt.Sprintf("/%s/%s", bucket, name),
-	}
-
-	buf := &bytes.Buffer{}
-	fmt.Fprintf(buf, "%s\n", opts.Method)
-	fmt.Fprintf(buf, "%s\n", opts.MD5)
-	fmt.Fprintf(buf, "%s\n", opts.ContentType)
-	fmt.Fprintf(buf, "%d\n", opts.Expires.Unix())
-	fmt.Fprintf(buf, "%s", strings.Join(opts.Headers, "\n"))
-	fmt.Fprintf(buf, "%s", u.String())
-
-	b, err := signBytes(buf.Bytes())
-	if err != nil {
-		return "", err
-	}
-	encoded := base64.StdEncoding.EncodeToString(b)
-	u.Scheme = "https"
-	u.Host = "storage.googleapis.com"
-	q := u.Query()
-	q.Set("GoogleAccessId", opts.GoogleAccessID)
-	q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix()))
-	q.Set("Signature", string(encoded))
-	u.RawQuery = q.Encode()
-	return u.String(), nil
-}
-
-// ObjectHandle provides operations on an object in a Google Cloud Storage bucket.
-// Use BucketHandle.Object to get a handle.
-type ObjectHandle struct {
-	c             *Client
-	bucket        string
-	object        string
-	acl           ACLHandle
-	gen           int64 // a negative value indicates latest
-	conds         *Conditions
-	encryptionKey []byte // AES-256 key
-}
-
-// ACL provides access to the object's access control list.
-// This controls who can read and write this object.
-// This call does not perform any network operations.
-func (o *ObjectHandle) ACL() *ACLHandle {
-	return &o.acl
-}
-
-// Generation returns a new ObjectHandle that operates on a specific generation
-// of the object.
-// By default, the handle operates on the latest generation. Not
-// all operations work when given a specific generation; check the API
-// endpoints at https://cloud.google.com/storage/docs/json_api/ for details.
-func (o *ObjectHandle) Generation(gen int64) *ObjectHandle {
-	o2 := *o
-	o2.gen = gen
-	return &o2
-}
-
-// If returns a new ObjectHandle that applies a set of preconditions.
-// Preconditions already set on the ObjectHandle are ignored.
-// Operations on the new handle will only occur if the preconditions are
-// satisfied. See https://cloud.google.com/storage/docs/generations-preconditions
-// for more details.
-func (o *ObjectHandle) If(conds Conditions) *ObjectHandle {
-	o2 := *o
-	o2.conds = &conds
-	return &o2
-}
-
-// Key returns a new ObjectHandle that uses the supplied encryption
-// key to encrypt and decrypt the object's contents.
-//
-// Encryption key must be a 32-byte AES-256 key.
-// See https://cloud.google.com/storage/docs/encryption for details.
-func (o *ObjectHandle) Key(encryptionKey []byte) *ObjectHandle {
-	o2 := *o
-	o2.encryptionKey = encryptionKey
-	return &o2
-}
-
-// Attrs returns meta information about the object.
-// ErrObjectNotExist will be returned if the object is not found.
-func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) {
-	if err := o.validate(); err != nil {
-		return nil, err
-	}
-	call := o.c.raw.Objects.Get(o.bucket, o.object).Projection("full").Context(ctx)
-	if err := applyConds("Attrs", o.gen, o.conds, call); err != nil {
-		return nil, err
-	}
-	if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil {
-		return nil, err
-	}
-	var obj *raw.Object
-	var err error
-	err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
-	if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
-		return nil, ErrObjectNotExist
-	}
-	if err != nil {
-		return nil, err
-	}
-	return newObject(obj), nil
-}
-
-// Update updates an object with the provided attributes.
-// All zero-value attributes are ignored.
-// ErrObjectNotExist will be returned if the object is not found.
-func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (*ObjectAttrs, error) {
-	if err := o.validate(); err != nil {
-		return nil, err
-	}
-	var attrs ObjectAttrs
-	// Lists of fields to send, and set to null, in the JSON.
-	var forceSendFields, nullFields []string
-	if uattrs.ContentType != nil {
-		attrs.ContentType = optional.ToString(uattrs.ContentType)
-		forceSendFields = append(forceSendFields, "ContentType")
-	}
-	if uattrs.ContentLanguage != nil {
-		attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage)
-		// For ContentLanguage It's an error to send the empty string.
-		// Instead we send a null.
-		if attrs.ContentLanguage == "" {
-			nullFields = append(nullFields, "ContentLanguage")
-		} else {
-			forceSendFields = append(forceSendFields, "ContentLanguage")
-		}
-	}
-	if uattrs.ContentEncoding != nil {
-		attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding)
-		forceSendFields = append(forceSendFields, "ContentType")
-	}
-	if uattrs.ContentDisposition != nil {
-		attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition)
-		forceSendFields = append(forceSendFields, "ContentDisposition")
-	}
-	if uattrs.CacheControl != nil {
-		attrs.CacheControl = optional.ToString(uattrs.CacheControl)
-		forceSendFields = append(forceSendFields, "CacheControl")
-	}
-	if uattrs.Metadata != nil {
-		attrs.Metadata = uattrs.Metadata
-		if len(attrs.Metadata) == 0 {
-			// Sending the empty map is a no-op. We send null instead.
-			nullFields = append(nullFields, "Metadata")
-		} else {
-			forceSendFields = append(forceSendFields, "Metadata")
-		}
-	}
-	if uattrs.ACL != nil {
-		attrs.ACL = uattrs.ACL
-		// It's an error to attempt to delete the ACL, so
-		// we don't append to nullFields here.
-		forceSendFields = append(forceSendFields, "Acl")
-	}
-	rawObj := attrs.toRawObject(o.bucket)
-	rawObj.ForceSendFields = forceSendFields
-	rawObj.NullFields = nullFields
-	call := o.c.raw.Objects.Patch(o.bucket, o.object, rawObj).Projection("full").Context(ctx)
-	if err := applyConds("Update", o.gen, o.conds, call); err != nil {
-		return nil, err
-	}
-	if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil {
-		return nil, err
-	}
-	var obj *raw.Object
-	var err error
-	err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
-	if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
-		return nil, ErrObjectNotExist
-	}
-	if err != nil {
-		return nil, err
-	}
-	return newObject(obj), nil
-}
-
-// ObjectAttrsToUpdate is used to update the attributes of an object.
-// Only fields set to non-nil values will be updated.
-// Set a field to its zero value to delete it.
-//
-// For example, to change ContentType and delete ContentEncoding and
-// Metadata, use
-//    ObjectAttrsToUpdate{
-//        ContentType: "text/html",
-//        ContentEncoding: "",
-//        Metadata: map[string]string{},
-//    }
-type ObjectAttrsToUpdate struct {
-	ContentType        optional.String
-	ContentLanguage    optional.String
-	ContentEncoding    optional.String
-	ContentDisposition optional.String
-	CacheControl       optional.String
-	Metadata           map[string]string // set to map[string]string{} to delete
-	ACL                []ACLRule
-}
-
-// Delete deletes the single specified object.
-func (o *ObjectHandle) Delete(ctx context.Context) error {
-	if err := o.validate(); err != nil {
-		return err
-	}
-	call := o.c.raw.Objects.Delete(o.bucket, o.object).Context(ctx)
-	if err := applyConds("Delete", o.gen, o.conds, call); err != nil {
-		return err
-	}
-	err := runWithRetry(ctx, func() error { return call.Do() })
-	switch e := err.(type) {
-	case nil:
-		return nil
-	case *googleapi.Error:
-		if e.Code == http.StatusNotFound {
-			return ErrObjectNotExist
-		}
-	}
-	return err
-}
-
-// NewReader creates a new Reader to read the contents of the
-// object.
-// ErrObjectNotExist will be returned if the object is not found.
-//
-// The caller must call Close on the returned Reader when done reading.
-func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
-	return o.NewRangeReader(ctx, 0, -1)
-}
-
-// NewRangeReader reads part of an object, reading at most length bytes
-// starting at the given offset. If length is negative, the object is read
-// until the end.
-func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (*Reader, error) {
-	if err := o.validate(); err != nil {
-		return nil, err
-	}
-	if offset < 0 {
-		return nil, fmt.Errorf("storage: invalid offset %d < 0", offset)
-	}
-	if o.conds != nil {
-		if err := o.conds.validate("NewRangeReader"); err != nil {
-			return nil, err
-		}
-	}
-	u := &url.URL{
-		Scheme:   "https",
-		Host:     "storage.googleapis.com",
-		Path:     fmt.Sprintf("/%s/%s", o.bucket, o.object),
-		RawQuery: conditionsQuery(o.gen, o.conds),
-	}
-	verb := "GET"
-	if length == 0 {
-		verb = "HEAD"
-	}
-	req, err := http.NewRequest(verb, u.String(), nil)
-	if err != nil {
-		return nil, err
-	}
-	if length < 0 && offset > 0 {
-		req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
-	} else if length > 0 {
-		req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
-	}
-	if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil {
-		return nil, err
-	}
-	var res *http.Response
-	err = runWithRetry(ctx, func() error { res, err = o.c.hc.Do(req); return err })
-	if err != nil {
-		return nil, err
-	}
-	if res.StatusCode == http.StatusNotFound {
-		res.Body.Close()
-		return nil, ErrObjectNotExist
-	}
-	if res.StatusCode < 200 || res.StatusCode > 299 {
-		body, _ := ioutil.ReadAll(res.Body)
-		res.Body.Close()
-		return nil, &googleapi.Error{
-			Code:   res.StatusCode,
-			Header: res.Header,
-			Body:   string(body),
-		}
-	}
-	if offset > 0 && length != 0 && res.StatusCode != http.StatusPartialContent {
-		res.Body.Close()
-		return nil, errors.New("storage: partial request not satisfied")
-	}
-
-	var size int64 // total size of object, even if a range was requested.
-	if res.StatusCode == http.StatusPartialContent {
-		cr := strings.TrimSpace(res.Header.Get("Content-Range"))
-		if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") {
-			return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
-		}
-		size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64)
-		if err != nil {
-			return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
-		}
-	} else {
-		size = res.ContentLength
-	}
-
-	remain := res.ContentLength
-	body := res.Body
-	if length == 0 {
-		remain = 0
-		body.Close()
-		body = emptyBody
-	}
-
-	return &Reader{
-		body:        body,
-		size:        size,
-		remain:      remain,
-		contentType: res.Header.Get("Content-Type"),
-	}, nil
-}
-
-var emptyBody = ioutil.NopCloser(strings.NewReader(""))
-
-// NewWriter returns a storage Writer that writes to the GCS object
-// associated with this ObjectHandle.
-//
-// A new object will be created unless an object with this name already exists.
-// Otherwise any previous object with the same name will be replaced.
-// The object will not be available (and any previous object will remain)
-// until Close has been called.
-//
-// Attributes can be set on the object by modifying the returned Writer's
-// ObjectAttrs field before the first call to Write. If no ContentType
-// attribute is specified, the content type will be automatically sniffed
-// using net/http.DetectContentType.
-//
-// It is the caller's responsibility to call Close when writing is done.
-func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer {
-	return &Writer{
-		ctx:         ctx,
-		o:           o,
-		donec:       make(chan struct{}),
-		ObjectAttrs: ObjectAttrs{Name: o.object},
-		ChunkSize:   googleapi.DefaultUploadChunkSize,
-	}
-}
-
-func (o *ObjectHandle) validate() error {
-	if o.bucket == "" {
-		return errors.New("storage: bucket name is empty")
-	}
-	if o.object == "" {
-		return errors.New("storage: object name is empty")
-	}
-	if !utf8.ValidString(o.object) {
-		return fmt.Errorf("storage: object name %q is not valid UTF-8", o.object)
-	}
-	return nil
-}
-
-// parseKey converts the binary contents of a private key file
-// to an *rsa.PrivateKey. It detects whether the private key is in a
-// PEM container or not. If so, it extracts the the private key
-// from PEM container before conversion. It only supports PEM
-// containers with no passphrase.
-func parseKey(key []byte) (*rsa.PrivateKey, error) {
-	if block, _ := pem.Decode(key); block != nil {
-		key = block.Bytes
-	}
-	parsedKey, err := x509.ParsePKCS8PrivateKey(key)
-	if err != nil {
-		parsedKey, err = x509.ParsePKCS1PrivateKey(key)
-		if err != nil {
-			return nil, err
-		}
-	}
-	parsed, ok := parsedKey.(*rsa.PrivateKey)
-	if !ok {
-		return nil, errors.New("oauth2: private key is invalid")
-	}
-	return parsed, nil
-}
-
-func toRawObjectACL(oldACL []ACLRule) []*raw.ObjectAccessControl {
-	var acl []*raw.ObjectAccessControl
-	if len(oldACL) > 0 {
-		acl = make([]*raw.ObjectAccessControl, len(oldACL))
-		for i, rule := range oldACL {
-			acl[i] = &raw.ObjectAccessControl{
-				Entity: string(rule.Entity),
-				Role:   string(rule.Role),
-			}
-		}
-	}
-	return acl
-}
-
-// toRawObject copies the editable attributes from o to the raw library's Object type.
-func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object {
-	acl := toRawObjectACL(o.ACL)
-	return &raw.Object{
-		Bucket:             bucket,
-		Name:               o.Name,
-		ContentType:        o.ContentType,
-		ContentEncoding:    o.ContentEncoding,
-		ContentLanguage:    o.ContentLanguage,
-		CacheControl:       o.CacheControl,
-		ContentDisposition: o.ContentDisposition,
-		Acl:                acl,
-		Metadata:           o.Metadata,
-	}
-}
-
-// ObjectAttrs represents the metadata for a Google Cloud Storage (GCS) object.
-type ObjectAttrs struct {
-	// Bucket is the name of the bucket containing this GCS object.
-	// This field is read-only.
-	Bucket string
-
-	// Name is the name of the object within the bucket.
-	// This field is read-only.
-	Name string
-
-	// ContentType is the MIME type of the object's content.
-	ContentType string
-
-	// ContentLanguage is the content language of the object's content.
-	ContentLanguage string
-
-	// CacheControl is the Cache-Control header to be sent in the response
-	// headers when serving the object data.
-	CacheControl string
-
-	// ACL is the list of access control rules for the object.
-	ACL []ACLRule
-
-	// Owner is the owner of the object. This field is read-only.
-	//
-	// If non-zero, it is in the form of "user-<userId>".
-	Owner string
-
-	// Size is the length of the object's content. This field is read-only.
-	Size int64
-
-	// ContentEncoding is the encoding of the object's content.
-	ContentEncoding string
-
-	// ContentDisposition is the optional Content-Disposition header of the object
-	// sent in the response headers.
-	ContentDisposition string
-
-	// MD5 is the MD5 hash of the object's content. This field is read-only.
-	MD5 []byte
-
-	// CRC32C is the CRC32 checksum of the object's content using
-	// the Castagnoli93 polynomial. This field is read-only.
-	CRC32C uint32
-
-	// MediaLink is an URL to the object's content. This field is read-only.
-	MediaLink string
-
-	// Metadata represents user-provided metadata, in key/value pairs.
-	// It can be nil if no metadata is provided.
-	Metadata map[string]string
-
-	// Generation is the generation number of the object's content.
-	// This field is read-only.
-	Generation int64
-
-	// MetaGeneration is the version of the metadata for this
-	// object at this generation. This field is used for preconditions
-	// and for detecting changes in metadata. A metageneration number
-	// is only meaningful in the context of a particular generation
-	// of a particular object. This field is read-only.
-	MetaGeneration int64
-
-	// StorageClass is the storage class of the bucket.
-	// This value defines how objects in the bucket are stored and
-	// determines the SLA and the cost of storage. Typical values are
-	// "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD"
-	// and "DURABLE_REDUCED_AVAILABILITY".
-	// It defaults to "STANDARD", which is equivalent to "MULTI_REGIONAL"
-	// or "REGIONAL" depending on the bucket's location settings. This
-	// field is read-only.
-	StorageClass string
-
-	// Created is the time the object was created. This field is read-only.
-	Created time.Time
-
-	// Deleted is the time the object was deleted.
-	// If not deleted, it is the zero value. This field is read-only.
-	Deleted time.Time
-
-	// Updated is the creation or modification time of the object.
-	// For buckets with versioning enabled, changing an object's
-	// metadata does not change this property. This field is read-only.
-	Updated time.Time
-
-	// CustomerKeySHA256 is the base64-encoded SHA-256 hash of the
-	// customer-supplied encryption key for the object. It is empty if there is
-	// no customer-supplied encryption key.
-	// See // https://cloud.google.com/storage/docs/encryption for more about
-	// encryption in Google Cloud Storage.
-	CustomerKeySHA256 string
-
-	// Prefix is set only for ObjectAttrs which represent synthetic "directory
-	// entries" when iterating over buckets using Query.Delimiter. See
-	// ObjectIterator.Next. When set, no other fields in ObjectAttrs will be
-	// populated.
-	Prefix string
-}
-
-// convertTime converts a time in RFC3339 format to time.Time.
-// If any error occurs in parsing, the zero-value time.Time is silently returned.
-func convertTime(t string) time.Time {
-	var r time.Time
-	if t != "" {
-		r, _ = time.Parse(time.RFC3339, t)
-	}
-	return r
-}
-
-func newObject(o *raw.Object) *ObjectAttrs {
-	if o == nil {
-		return nil
-	}
-	acl := make([]ACLRule, len(o.Acl))
-	for i, rule := range o.Acl {
-		acl[i] = ACLRule{
-			Entity: ACLEntity(rule.Entity),
-			Role:   ACLRole(rule.Role),
-		}
-	}
-	owner := ""
-	if o.Owner != nil {
-		owner = o.Owner.Entity
-	}
-	md5, _ := base64.StdEncoding.DecodeString(o.Md5Hash)
-	var crc32c uint32
-	d, err := base64.StdEncoding.DecodeString(o.Crc32c)
-	if err == nil && len(d) == 4 {
-		crc32c = uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3])
-	}
-	var sha256 string
-	if o.CustomerEncryption != nil {
-		sha256 = o.CustomerEncryption.KeySha256
-	}
-	return &ObjectAttrs{
-		Bucket:            o.Bucket,
-		Name:              o.Name,
-		ContentType:       o.ContentType,
-		ContentLanguage:   o.ContentLanguage,
-		CacheControl:      o.CacheControl,
-		ACL:               acl,
-		Owner:             owner,
-		ContentEncoding:   o.ContentEncoding,
-		Size:              int64(o.Size),
-		MD5:               md5,
-		CRC32C:            crc32c,
-		MediaLink:         o.MediaLink,
-		Metadata:          o.Metadata,
-		Generation:        o.Generation,
-		MetaGeneration:    o.Metageneration,
-		StorageClass:      o.StorageClass,
-		CustomerKeySHA256: sha256,
-		Created:           convertTime(o.TimeCreated),
-		Deleted:           convertTime(o.TimeDeleted),
-		Updated:           convertTime(o.Updated),
-	}
-}
-
-// Query represents a query to filter objects from a bucket.
-type Query struct {
-	// Delimiter returns results in a directory-like fashion.
-	// Results will contain only objects whose names, aside from the
-	// prefix, do not contain delimiter. Objects whose names,
-	// aside from the prefix, contain delimiter will have their name,
-	// truncated after the delimiter, returned in prefixes.
-	// Duplicate prefixes are omitted.
-	// Optional.
-	Delimiter string
-
-	// Prefix is the prefix filter to query objects
-	// whose names begin with this prefix.
-	// Optional.
-	Prefix string
-
-	// Versions indicates whether multiple versions of the same
-	// object will be included in the results.
-	Versions bool
-}
-
-// contentTyper implements ContentTyper to enable an
-// io.ReadCloser to specify its MIME type.
-type contentTyper struct {
-	io.Reader
-	t string
-}
-
-func (c *contentTyper) ContentType() string {
-	return c.t
-}
-
-// Conditions constrain methods to act on specific generations of
-// resources.
-//
-// The zero value is an empty set of constraints. Not all conditions or
-// combinations of conditions are applicable to all methods.
-// See https://cloud.google.com/storage/docs/generations-preconditions
-// for details on how these operate.
-type Conditions struct {
-	// Generation constraints.
-	// At most one of the following can be set to a non-zero value.
-
-	// GenerationMatch specifies that the object must have the given generation
-	// for the operation to occur.
-	// If GenerationMatch is zero, it has no effect.
-	// Use DoesNotExist to specify that the object does not exist in the bucket.
-	GenerationMatch int64
-
-	// GenerationNotMatch specifies that the object must not have the given
-	// generation for the operation to occur.
-	// If GenerationNotMatch is zero, it has no effect.
-	GenerationNotMatch int64
-
-	// DoesNotExist specifies that the object must not exist in the bucket for
-	// the operation to occur.
-	// If DoesNotExist is false, it has no effect.
-	DoesNotExist bool
-
-	// Metadata generation constraints.
-	// At most one of the following can be set to a non-zero value.
-
-	// MetagenerationMatch specifies that the object must have the given
-	// metageneration for the operation to occur.
-	// If MetagenerationMatch is zero, it has no effect.
-	MetagenerationMatch int64
-
-	// MetagenerationNotMatch specifies that the object must not have the given
-	// metageneration for the operation to occur.
-	// If MetagenerationNotMatch is zero, it has no effect.
-	MetagenerationNotMatch int64
-}
-
-func (c *Conditions) validate(method string) error {
-	if *c == (Conditions{}) {
-		return fmt.Errorf("storage: %s: empty conditions", method)
-	}
-	if !c.isGenerationValid() {
-		return fmt.Errorf("storage: %s: multiple conditions specified for generation", method)
-	}
-	if !c.isMetagenerationValid() {
-		return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method)
-	}
-	return nil
-}
-
-func (c *Conditions) isGenerationValid() bool {
-	n := 0
-	if c.GenerationMatch != 0 {
-		n++
-	}
-	if c.GenerationNotMatch != 0 {
-		n++
-	}
-	if c.DoesNotExist {
-		n++
-	}
-	return n <= 1
-}
-
-func (c *Conditions) isMetagenerationValid() bool {
-	return c.MetagenerationMatch == 0 || c.MetagenerationNotMatch == 0
-}
-
-// applyConds modifies the provided call using the conditions in conds.
-// call is something that quacks like a *raw.WhateverCall.
-func applyConds(method string, gen int64, conds *Conditions, call interface{}) error {
-	cval := reflect.ValueOf(call)
-	if gen >= 0 {
-		if !setConditionField(cval, "Generation", gen) {
-			return fmt.Errorf("storage: %s: generation not supported", method)
-		}
-	}
-	if conds == nil {
-		return nil
-	}
-	if err := conds.validate(method); err != nil {
-		return err
-	}
-	switch {
-	case conds.GenerationMatch != 0:
-		if !setConditionField(cval, "IfGenerationMatch", conds.GenerationMatch) {
-			return fmt.Errorf("storage: %s: ifGenerationMatch not supported", method)
-		}
-	case conds.GenerationNotMatch != 0:
-		if !setConditionField(cval, "IfGenerationNotMatch", conds.GenerationNotMatch) {
-			return fmt.Errorf("storage: %s: ifGenerationNotMatch not supported", method)
-		}
-	case conds.DoesNotExist:
-		if !setConditionField(cval, "IfGenerationMatch", int64(0)) {
-			return fmt.Errorf("storage: %s: DoesNotExist not supported", method)
-		}
-	}
-	switch {
-	case conds.MetagenerationMatch != 0:
-		if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) {
-			return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method)
-		}
-	case conds.MetagenerationNotMatch != 0:
-		if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) {
-			return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method)
-		}
-	}
-	return nil
-}
-
-func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall) error {
-	if gen >= 0 {
-		call.SourceGeneration(gen)
-	}
-	if conds == nil {
-		return nil
-	}
-	if err := conds.validate("CopyTo source"); err != nil {
-		return err
-	}
-	switch {
-	case conds.GenerationMatch != 0:
-		call.IfSourceGenerationMatch(conds.GenerationMatch)
-	case conds.GenerationNotMatch != 0:
-		call.IfSourceGenerationNotMatch(conds.GenerationNotMatch)
-	case conds.DoesNotExist:
-		call.IfSourceGenerationMatch(0)
-	}
-	switch {
-	case conds.MetagenerationMatch != 0:
-		call.IfSourceMetagenerationMatch(conds.MetagenerationMatch)
-	case conds.MetagenerationNotMatch != 0:
-		call.IfSourceMetagenerationNotMatch(conds.MetagenerationNotMatch)
-	}
-	return nil
-}
-
-// setConditionField sets a field on a *raw.WhateverCall.
-// We can't use anonymous interfaces because the return type is
-// different, since the field setters are builders.
-func setConditionField(call reflect.Value, name string, value interface{}) bool {
-	m := call.MethodByName(name)
-	if !m.IsValid() {
-		return false
-	}
-	m.Call([]reflect.Value{reflect.ValueOf(value)})
-	return true
-}
-
-// conditionsQuery returns the generation and conditions as a URL query
-// string suitable for URL.RawQuery.  It assumes that the conditions
-// have been validated.
-func conditionsQuery(gen int64, conds *Conditions) string {
-	// URL escapes are elided because integer strings are URL-safe.
-	var buf []byte
-
-	appendParam := func(s string, n int64) {
-		if len(buf) > 0 {
-			buf = append(buf, '&')
-		}
-		buf = append(buf, s...)
-		buf = strconv.AppendInt(buf, n, 10)
-	}
-
-	if gen >= 0 {
-		appendParam("generation=", gen)
-	}
-	if conds == nil {
-		return string(buf)
-	}
-	switch {
-	case conds.GenerationMatch != 0:
-		appendParam("ifGenerationMatch=", conds.GenerationMatch)
-	case conds.GenerationNotMatch != 0:
-		appendParam("ifGenerationNotMatch=", conds.GenerationNotMatch)
-	case conds.DoesNotExist:
-		appendParam("ifGenerationMatch=", 0)
-	}
-	switch {
-	case conds.MetagenerationMatch != 0:
-		appendParam("ifMetagenerationMatch=", conds.MetagenerationMatch)
-	case conds.MetagenerationNotMatch != 0:
-		appendParam("ifMetagenerationNotMatch=", conds.MetagenerationNotMatch)
-	}
-	return string(buf)
-}
-
-// composeSourceObj wraps a *raw.ComposeRequestSourceObjects, but adds the methods
-// that modifyCall searches for by name.
-type composeSourceObj struct {
-	src *raw.ComposeRequestSourceObjects
-}
-
-func (c composeSourceObj) Generation(gen int64) {
-	c.src.Generation = gen
-}
-
-func (c composeSourceObj) IfGenerationMatch(gen int64) {
-	// It's safe to overwrite ObjectPreconditions, since its only field is
-	// IfGenerationMatch.
-	c.src.ObjectPreconditions = &raw.ComposeRequestSourceObjectsObjectPreconditions{
-		IfGenerationMatch: gen,
-	}
-}
-
-func setEncryptionHeaders(headers http.Header, key []byte, copySource bool) error {
-	if key == nil {
-		return nil
-	}
-	// TODO(jbd): Ask the API team to return a more user-friendly error
-	// and avoid doing this check at the client level.
-	if len(key) != 32 {
-		return errors.New("storage: not a 32-byte AES-256 key")
-	}
-	var cs string
-	if copySource {
-		cs = "copy-source-"
-	}
-	headers.Set("x-goog-"+cs+"encryption-algorithm", "AES256")
-	headers.Set("x-goog-"+cs+"encryption-key", base64.StdEncoding.EncodeToString(key))
-	keyHash := sha256.Sum256(key)
-	headers.Set("x-goog-"+cs+"encryption-key-sha256", base64.StdEncoding.EncodeToString(keyHash[:]))
-	return nil
-}
-
-// TODO(jbd): Add storage.objects.watch.
diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go
deleted file mode 100644
index a7a9329..0000000
--- a/vendor/cloud.google.com/go/storage/writer.go
+++ /dev/null
@@ -1,150 +0,0 @@
-// Copyright 2014 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package storage
-
-import (
-	"errors"
-	"fmt"
-	"io"
-	"unicode/utf8"
-
-	"golang.org/x/net/context"
-	"google.golang.org/api/googleapi"
-	raw "google.golang.org/api/storage/v1"
-)
-
-// A Writer writes a Cloud Storage object.
-type Writer struct {
-	// ObjectAttrs are optional attributes to set on the object. Any attributes
-	// must be initialized before the first Write call. Nil or zero-valued
-	// attributes are ignored.
-	ObjectAttrs
-
-	// ChunkSize controls the maximum number of bytes of the object that the
-	// Writer will attempt to send to the server in a single request. Objects
-	// smaller than the size will be sent in a single request, while larger
-	// objects will be split over multiple requests. The size will be rounded up
-	// to the nearest multiple of 256K. If zero, chunking will be disabled and
-	// the object will be uploaded in a single request.
-	//
-	// ChunkSize will default to a reasonable value. Any custom configuration
-	// must be done before the first Write call.
-	ChunkSize int
-
-	ctx context.Context
-	o   *ObjectHandle
-
-	opened bool
-	pw     *io.PipeWriter
-
-	donec chan struct{} // closed after err and obj are set.
-	err   error
-	obj   *ObjectAttrs
-}
-
-func (w *Writer) open() error {
-	attrs := w.ObjectAttrs
-	// Check the developer didn't change the object Name (this is unfortunate, but
-	// we don't want to store an object under the wrong name).
-	if attrs.Name != w.o.object {
-		return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object)
-	}
-	if !utf8.ValidString(attrs.Name) {
-		return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name)
-	}
-	pr, pw := io.Pipe()
-	w.pw = pw
-	w.opened = true
-
-	if w.ChunkSize < 0 {
-		return errors.New("storage: Writer.ChunkSize must non-negative")
-	}
-	mediaOpts := []googleapi.MediaOption{
-		googleapi.ChunkSize(w.ChunkSize),
-	}
-	if c := attrs.ContentType; c != "" {
-		mediaOpts = append(mediaOpts, googleapi.ContentType(c))
-	}
-
-	go func() {
-		defer close(w.donec)
-
-		call := w.o.c.raw.Objects.Insert(w.o.bucket, attrs.toRawObject(w.o.bucket)).
-			Media(pr, mediaOpts...).
-			Projection("full").
-			Context(w.ctx)
-		if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil {
-			w.err = err
-			pr.CloseWithError(w.err)
-			return
-		}
-		var resp *raw.Object
-		err := applyConds("NewWriter", w.o.gen, w.o.conds, call)
-		if err == nil {
-			resp, err = call.Do()
-		}
-		if err != nil {
-			w.err = err
-			pr.CloseWithError(w.err)
-			return
-		}
-		w.obj = newObject(resp)
-	}()
-	return nil
-}
-
-// Write appends to w. It implements the io.Writer interface.
-func (w *Writer) Write(p []byte) (n int, err error) {
-	if w.err != nil {
-		return 0, w.err
-	}
-	if !w.opened {
-		if err := w.open(); err != nil {
-			return 0, err
-		}
-	}
-	return w.pw.Write(p)
-}
-
-// Close completes the write operation and flushes any buffered data.
-// If Close doesn't return an error, metadata about the written object
-// can be retrieved by calling Object.
-func (w *Writer) Close() error {
-	if !w.opened {
-		if err := w.open(); err != nil {
-			return err
-		}
-	}
-	if err := w.pw.Close(); err != nil {
-		return err
-	}
-	<-w.donec
-	return w.err
-}
-
-// CloseWithError aborts the write operation with the provided error.
-// CloseWithError always returns nil.
-func (w *Writer) CloseWithError(err error) error {
-	if !w.opened {
-		return nil
-	}
-	return w.pw.CloseWithError(err)
-}
-
-// Attrs returns metadata about a successfully-written object.
-// It's only valid to call it after Close returns nil.
-func (w *Writer) Attrs() *ObjectAttrs {
-	return w.obj
-}
diff --git a/vendor/github.com/aclements/go-gg/LICENSE b/vendor/github.com/aclements/go-gg/LICENSE
deleted file mode 100644
index a5389da..0000000
--- a/vendor/github.com/aclements/go-gg/LICENSE
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright (c) 2016 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-   * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-   * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-   * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
diff --git a/vendor/github.com/aclements/go-gg/generic/doc.go b/vendor/github.com/aclements/go-gg/generic/doc.go
deleted file mode 100644
index 0df3e8a..0000000
--- a/vendor/github.com/aclements/go-gg/generic/doc.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package generic provides type-generic functions.
-package generic
diff --git a/vendor/github.com/aclements/go-gg/generic/error.go b/vendor/github.com/aclements/go-gg/generic/error.go
deleted file mode 100644
index a04e5e5..0000000
--- a/vendor/github.com/aclements/go-gg/generic/error.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package generic
-
-import "reflect"
-
-type TypeError struct {
-	Type1, Type2 reflect.Type
-	Extra        string
-}
-
-func (e TypeError) Error() string {
-	msg := e.Type1.String()
-	if e.Type2 != nil {
-		msg += " and " + e.Type2.String()
-	}
-	msg += " " + e.Extra
-	return msg
-}
diff --git a/vendor/github.com/aclements/go-gg/generic/order.go b/vendor/github.com/aclements/go-gg/generic/order.go
deleted file mode 100644
index 551373d..0000000
--- a/vendor/github.com/aclements/go-gg/generic/order.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package generic
-
-import "reflect"
-
-// CanOrder returns whether the values a and b are orderable according
-// to the Go language specification.
-func CanOrder(a, b interface{}) bool {
-	ak, bk := reflect.ValueOf(a).Kind(), reflect.ValueOf(b).Kind()
-	if ak != bk {
-		return false
-	}
-	return CanOrderR(ak)
-}
-
-var orderable = map[reflect.Kind]bool{
-	reflect.Int:     true,
-	reflect.Int8:    true,
-	reflect.Int16:   true,
-	reflect.Int32:   true,
-	reflect.Int64:   true,
-	reflect.Uint:    true,
-	reflect.Uintptr: true,
-	reflect.Uint8:   true,
-	reflect.Uint16:  true,
-	reflect.Uint32:  true,
-	reflect.Uint64:  true,
-	reflect.Float32: true,
-	reflect.Float64: true,
-	reflect.String:  true,
-}
-
-// CanOrderR returns whether two values of kind k are orderable
-// according to the Go language specification.
-func CanOrderR(k reflect.Kind) bool {
-	return orderable[k]
-}
-
-// Order returns the order of values a and b: -1 if a < b, 0 if a ==
-// b, 1 if a > b. The results are undefined if either a or b is NaN.
-//
-// Order panics if a and b are not orderable according to the Go
-// language specification.
-func Order(a, b interface{}) int {
-	return OrderR(reflect.ValueOf(a), reflect.ValueOf(b))
-}
-
-// OrderR is equivalent to Order, but operates on reflect.Values.
-func OrderR(a, b reflect.Value) int {
-	if a.Kind() != b.Kind() {
-		panic(&TypeError{a.Type(), b.Type(), "are not orderable because they are different kinds"})
-	}
-
-	switch a.Kind() {
-	case reflect.Float32, reflect.Float64:
-		a, b := a.Float(), b.Float()
-		if a < b {
-			return -1
-		} else if a > b {
-			return 1
-		}
-		return 0
-
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		a, b := a.Int(), b.Int()
-		if a < b {
-			return -1
-		} else if a > b {
-			return 1
-		}
-		return 0
-
-	case reflect.Uint, reflect.Uintptr, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
-		a, b := a.Uint(), b.Uint()
-		if a < b {
-			return -1
-		} else if a > b {
-			return 1
-		}
-		return 0
-
-	case reflect.String:
-		a, b := a.String(), b.String()
-		if a < b {
-			return -1
-		} else if a > b {
-			return 1
-		}
-		return 0
-	}
-
-	panic(&TypeError{a.Type(), nil, "is not orderable"})
-}
diff --git a/vendor/github.com/aclements/go-gg/generic/slice/concat.go b/vendor/github.com/aclements/go-gg/generic/slice/concat.go
deleted file mode 100644
index e538790..0000000
--- a/vendor/github.com/aclements/go-gg/generic/slice/concat.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slice
-
-import (
-	"reflect"
-
-	"github.com/aclements/go-gg/generic"
-)
-
-// Concat returns the concatenation of all of ss. The types of all of
-// the arguments must be identical or Concat will panic with a
-// *generic.TypeError. The returned slice will have the same type as the
-// inputs. If there are 0 arguments, Concat returns nil. Concat does
-// not modify any of the input slices.
-func Concat(ss ...T) T {
-	if len(ss) == 0 {
-		return nil
-	}
-
-	rvs := make([]reflect.Value, len(ss))
-	total := 0
-	var typ reflect.Type
-	for i, s := range ss {
-		rvs[i] = reflectSlice(s)
-		total += rvs[i].Len()
-		if i == 0 {
-			typ = rvs[i].Type()
-		} else if rvs[i].Type() != typ {
-			panic(&generic.TypeError{typ, rvs[i].Type(), "have different types"})
-		}
-	}
-
-	out := reflect.MakeSlice(typ, 0, total)
-	for _, rv := range rvs {
-		out = reflect.AppendSlice(out, rv)
-	}
-	return out.Interface()
-}
diff --git a/vendor/github.com/aclements/go-gg/generic/slice/convert.go b/vendor/github.com/aclements/go-gg/generic/slice/convert.go
deleted file mode 100644
index c3d46a3..0000000
--- a/vendor/github.com/aclements/go-gg/generic/slice/convert.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slice
-
-import (
-	"reflect"
-
-	"github.com/aclements/go-gg/generic"
-)
-
-// Convert converts each element in from and assigns it to *to. to
-// must be a pointer to a slice. Convert slices or extends *to to
-// len(from) and then assigns to[i] = T(from[i]) where T is the type
-// of *to's elements. If from and *to have the same element type, it
-// simply assigns *to = from.
-func Convert(to interface{}, from T) {
-	fv := reflectSlice(from)
-	tv := reflect.ValueOf(to)
-	if tv.Kind() != reflect.Ptr {
-		panic(&generic.TypeError{tv.Type(), nil, "is not a *[]T"})
-	}
-	tst := tv.Type().Elem()
-	if tst.Kind() != reflect.Slice {
-		panic(&generic.TypeError{tv.Type(), nil, "is not a *[]T"})
-	}
-
-	if fv.Type().AssignableTo(tst) {
-		tv.Elem().Set(fv)
-		return
-	}
-
-	eltt := tst.Elem()
-	if !fv.Type().Elem().ConvertibleTo(eltt) {
-		panic(&generic.TypeError{fv.Type(), tst, "cannot be converted"})
-	}
-
-	switch to := to.(type) {
-	case *[]float64:
-		// This is extremely common.
-		*to = (*to)[:0]
-		for i, len := 0, fv.Len(); i < len; i++ {
-			*to = append(*to, fv.Index(i).Convert(eltt).Float())
-		}
-
-	default:
-		tsv := tv.Elem()
-		tsv.SetLen(0)
-		for i, len := 0, fv.Len(); i < len; i++ {
-			tsv = reflect.Append(tsv, fv.Index(i).Convert(eltt))
-		}
-		tv.Elem().Set(tsv)
-	}
-}
diff --git a/vendor/github.com/aclements/go-gg/generic/slice/cycle.go b/vendor/github.com/aclements/go-gg/generic/slice/cycle.go
deleted file mode 100644
index a94e5e2..0000000
--- a/vendor/github.com/aclements/go-gg/generic/slice/cycle.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slice
-
-import "reflect"
-
-// Cycle constructs a slice of length length by repeatedly
-// concatenating s to itself. If len(s) >= length, it returns
-// s[:length]. Otherwise, it allocates a new slice. If len(s) == 0 and
-// length != 0, Cycle panics.
-func Cycle(s T, length int) T {
-	rv := reflectSlice(s)
-	if rv.Len() >= length {
-		return rv.Slice(0, length).Interface()
-	}
-
-	if rv.Len() == 0 {
-		panic("empty slice")
-	}
-
-	// Allocate a new slice of the appropriate length.
-	out := reflect.MakeSlice(rv.Type(), length, length)
-
-	// Copy elements to out.
-	for pos := 0; pos < length; {
-		pos += reflect.Copy(out.Slice(pos, length), rv)
-	}
-
-	return out.Interface()
-}
-
-// Repeat returns a slice consisting of length copies of v.
-func Repeat(v interface{}, length int) T {
-	if length < 0 {
-		length = 0
-	}
-	rv := reflect.ValueOf(v)
-	out := reflect.MakeSlice(reflect.SliceOf(rv.Type()), length, length)
-	for i := 0; i < length; i++ {
-		out.Index(i).Set(rv)
-	}
-	return out.Interface()
-}
diff --git a/vendor/github.com/aclements/go-gg/generic/slice/doc.go b/vendor/github.com/aclements/go-gg/generic/slice/doc.go
deleted file mode 100644
index 2ebb862..0000000
--- a/vendor/github.com/aclements/go-gg/generic/slice/doc.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package slice provides generic slice functions.
-package slice
diff --git a/vendor/github.com/aclements/go-gg/generic/slice/find.go b/vendor/github.com/aclements/go-gg/generic/slice/find.go
deleted file mode 100644
index 09c7d53..0000000
--- a/vendor/github.com/aclements/go-gg/generic/slice/find.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slice
-
-import (
-	"reflect"
-
-	"github.com/aclements/go-gg/generic"
-)
-
-// Index returns the index of the first instance of val in s, or -1 if
-// val is not present in s. val's type must be s's element type.
-func Index(s T, val interface{}) int {
-	rs := reflectSlice(s)
-	if vt := reflect.TypeOf(val); rs.Type().Elem() != vt {
-		// TODO: Better "<seq> is not a sequence of <val>".
-		panic(&generic.TypeError{rs.Type(), vt, "cannot find"})
-	}
-
-	for i, l := 0, rs.Len(); i < l; i++ {
-		if rs.Index(i).Interface() == val {
-			return i
-		}
-	}
-	return -1
-}
-
-// LastIndex returns the index of the last instance of val in s, or -1
-// if val is not present in s. val's type must be s's element type.
-func LastIndex(s T, val interface{}) int {
-	rs := reflectSlice(s)
-	if vt := reflect.TypeOf(val); rs.Type().Elem() != vt {
-		// TODO: Better "<seq> is not a sequence of <val>".
-		panic(&generic.TypeError{rs.Type(), vt, "cannot find"})
-	}
-
-	for i := rs.Len() - 1; i >= 0; i-- {
-		if rs.Index(i).Interface() == val {
-			return i
-		}
-	}
-	return -1
-}
-
-// Contains reports whether val is within s. val's type must be s's
-// element type.
-func Contains(s T, val interface{}) bool {
-	return Index(s, val) >= 0
-}
diff --git a/vendor/github.com/aclements/go-gg/generic/slice/index.go b/vendor/github.com/aclements/go-gg/generic/slice/index.go
deleted file mode 100644
index 780b4ac..0000000
--- a/vendor/github.com/aclements/go-gg/generic/slice/index.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slice
-
-import (
-	"reflect"
-
-	"github.com/aclements/go-gg/generic"
-)
-
-// Select returns a slice w such that w[i] = v[indexes[i]].
-func Select(v T, indexes []int) T {
-	switch v := v.(type) {
-	case []int:
-		res := make([]int, len(indexes))
-		for i, x := range indexes {
-			res[i] = v[x]
-		}
-		return res
-
-	case []float64:
-		res := make([]float64, len(indexes))
-		for i, x := range indexes {
-			res[i] = v[x]
-		}
-		return res
-
-	case []string:
-		res := make([]string, len(indexes))
-		for i, x := range indexes {
-			res[i] = v[x]
-		}
-		return res
-	}
-
-	rv := reflectSlice(v)
-	res := reflect.MakeSlice(rv.Type(), len(indexes), len(indexes))
-	for i, x := range indexes {
-		res.Index(i).Set(rv.Index(x))
-	}
-	return res.Interface()
-}
-
-// SelectInto assigns out[i] = in[indexes[i]]. in and out must have
-// the same types and len(out) must be >= len(indexes). If in and out
-// overlap, the results are undefined.
-func SelectInto(out, in T, indexes []int) {
-	// TODO: Maybe they should only have to be assignable?
-	if it, ot := reflect.TypeOf(in), reflect.TypeOf(out); it != ot {
-		panic(&generic.TypeError{it, ot, "must be the same type"})
-	}
-
-	switch in := in.(type) {
-	case []int:
-		out := out.([]int)
-		for i, x := range indexes {
-			out[i] = in[x]
-		}
-		return
-
-	case []float64:
-		out := out.([]float64)
-		for i, x := range indexes {
-			out[i] = in[x]
-		}
-		return
-
-	case []string:
-		out := out.([]string)
-		for i, x := range indexes {
-			out[i] = in[x]
-		}
-		return
-	}
-
-	inv, outv := reflectSlice(in), reflectSlice(out)
-	for i, x := range indexes {
-		outv.Index(i).Set(inv.Index(x))
-	}
-}
diff --git a/vendor/github.com/aclements/go-gg/generic/slice/min.go b/vendor/github.com/aclements/go-gg/generic/slice/min.go
deleted file mode 100644
index 7cda3fc..0000000
--- a/vendor/github.com/aclements/go-gg/generic/slice/min.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slice
-
-import (
-	"reflect"
-	"sort"
-
-	"github.com/aclements/go-gg/generic"
-)
-
-// Min returns the minimum value in v. v must either implement
-// sort.Interface or its elements must be orderable. Min panics if v
-// is empty.
-func Min(v T) interface{} {
-	x, _ := minmax(v, -1, true)
-	return x.Interface()
-}
-
-// ArgMin returns the index of the minimum value in v. If there are
-// multiple indexes equal to the minimum value, ArgMin returns the
-// lowest of them. v must be a slice whose elements are orderable, or
-// must implement sort.Interface. ArgMin panics if v is empty.
-func ArgMin(v interface{}) int {
-	_, i := minmax(v, -1, false)
-	return i
-}
-
-// Max returns the maximum value in v. v must either implement
-// sort.Interface or its elements must be orderable. Max panics if v
-// is empty.
-func Max(v T) interface{} {
-	x, _ := minmax(v, 1, true)
-	return x.Interface()
-}
-
-// ArgMax returns the index of the maximum value in v. If there are
-// multiple indexes equal to the maximum value, ArgMax returns the
-// lowest of them. v must be a slice whose elements are orderable, or
-// must implement sort.Interface. ArgMax panics if v is empty.
-func ArgMax(v interface{}) int {
-	_, i := minmax(v, 1, false)
-	return i
-}
-
-func minmax(v interface{}, keep int, val bool) (reflect.Value, int) {
-	switch v := v.(type) {
-	case sort.Interface:
-		if v.Len() == 0 {
-			if keep < 0 {
-				panic("zero-length sequence has no minimum")
-			} else {
-				panic("zero-length sequence has no maximum")
-			}
-		}
-		maxi := 0
-		if keep < 0 {
-			for i, len := 0, v.Len(); i < len; i++ {
-				if v.Less(i, maxi) {
-					maxi = i
-				}
-			}
-		} else {
-			for i, len := 0, v.Len(); i < len; i++ {
-				if v.Less(maxi, i) {
-					maxi = i
-				}
-			}
-		}
-
-		if !val {
-			return reflect.Value{}, maxi
-		}
-
-		rv := reflectSlice(v)
-		return rv.Index(maxi), maxi
-	}
-
-	rv := reflectSlice(v)
-	if !generic.CanOrderR(rv.Type().Elem().Kind()) {
-		panic(&generic.TypeError{rv.Type().Elem(), nil, "is not orderable"})
-	}
-	if rv.Len() == 0 {
-		if keep < 0 {
-			panic("zero-length slice has no minimum")
-		} else {
-			panic("zero-length slice has no maximum")
-		}
-	}
-	max, maxi := rv.Index(0), 0
-	for i, len := 1, rv.Len(); i < len; i++ {
-		if elt := rv.Index(i); generic.OrderR(elt, max) == keep {
-			max, maxi = elt, i
-		}
-	}
-	return max, maxi
-}
diff --git a/vendor/github.com/aclements/go-gg/generic/slice/nub.go b/vendor/github.com/aclements/go-gg/generic/slice/nub.go
deleted file mode 100644
index 0c5f69c..0000000
--- a/vendor/github.com/aclements/go-gg/generic/slice/nub.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slice
-
-import "reflect"
-
-var trueVal = reflect.ValueOf(true)
-
-// Nub returns v with duplicates removed. It keeps the first instance
-// of each distinct value and preserves their order.
-func Nub(v T) T {
-	rv := reflectSlice(v)
-	indexes := make([]int, 0)
-	set := reflect.MakeMap(reflect.MapOf(rv.Type().Elem(), trueVal.Type()))
-	for i, l := 0, rv.Len(); i < l; i++ {
-		x := rv.Index(i)
-		if set.MapIndex(x).IsValid() {
-			continue
-		}
-		set.SetMapIndex(x, trueVal)
-		indexes = append(indexes, i)
-	}
-	return Select(v, indexes)
-}
-
-// NubAppend is equivalent to appending all of the slices in vs and
-// then calling Nub on the result, but more efficient.
-func NubAppend(vs ...T) T {
-	if len(vs) == 0 {
-		return nil
-	}
-
-	rv := reflectSlice(vs[0])
-	set := reflect.MakeMap(reflect.MapOf(rv.Type().Elem(), trueVal.Type()))
-	out := reflect.MakeSlice(rv.Type(), 0, 0)
-
-	for _, v := range vs {
-		rv := reflectSlice(v)
-		for i, l := 0, rv.Len(); i < l; i++ {
-			x := rv.Index(i)
-			if set.MapIndex(x).IsValid() {
-				continue
-			}
-			set.SetMapIndex(x, trueVal)
-			out = reflect.Append(out, x)
-		}
-	}
-
-	return out.Interface()
-}
diff --git a/vendor/github.com/aclements/go-gg/generic/slice/seq.go b/vendor/github.com/aclements/go-gg/generic/slice/seq.go
deleted file mode 100644
index 47812e4..0000000
--- a/vendor/github.com/aclements/go-gg/generic/slice/seq.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slice
-
-import (
-	"reflect"
-
-	"github.com/aclements/go-gg/generic"
-)
-
-// T is a Go slice value of type []U.
-//
-// This is primarily for documentation. There is no way to statically
-// enforce this in Go; however, functions that expect a slice will
-// panic with a *generic.TypeError if passed a non-slice value.
-type T interface{}
-
-// reflectSlice checks that s is a slice and returns its
-// reflect.Value. It panics with a *generic.TypeError if s is not a slice.
-func reflectSlice(s T) reflect.Value {
-	rv := reflect.ValueOf(s)
-	if rv.Kind() != reflect.Slice {
-		panic(&generic.TypeError{rv.Type(), nil, "is not a slice"})
-	}
-	return rv
-}
diff --git a/vendor/github.com/aclements/go-gg/generic/slice/sort.go b/vendor/github.com/aclements/go-gg/generic/slice/sort.go
deleted file mode 100644
index e5ef8b6..0000000
--- a/vendor/github.com/aclements/go-gg/generic/slice/sort.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slice
-
-import (
-	"reflect"
-	"sort"
-	"time"
-
-	"github.com/aclements/go-gg/generic"
-)
-
-// CanSort returns whether the value v can be sorted.
-func CanSort(v interface{}) bool {
-	switch v.(type) {
-	case sort.Interface, []time.Time:
-		return true
-	}
-	return generic.CanOrderR(reflect.TypeOf(v).Elem().Kind())
-}
-
-// Sort sorts v in increasing order. v must implement sort.Interface,
-// be a slice whose elements are orderable, or be a []time.Time.
-func Sort(v interface{}) {
-	sort.Sort(Sorter(v))
-}
-
-// Sorter returns a sort.Interface for sorting v. v must implement
-// sort.Interface, be a slice whose elements are orderable, or be a
-// []time.Time.
-func Sorter(v interface{}) sort.Interface {
-	switch v := v.(type) {
-	case []int:
-		return sort.IntSlice(v)
-	case []float64:
-		return sort.Float64Slice(v)
-	case []string:
-		return sort.StringSlice(v)
-	case []time.Time:
-		return sortTimeSlice(v)
-	case sort.Interface:
-		return v
-	}
-
-	rv := reflectSlice(v)
-	switch rv.Type().Elem().Kind() {
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return sortIntSlice{rv}
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		return sortUintSlice{rv}
-	case reflect.Float32, reflect.Float64:
-		return sortFloatSlice{rv}
-	case reflect.String:
-		return sortStringSlice{rv}
-	}
-	panic(&generic.TypeError{rv.Type().Elem(), nil, "is not orderable"})
-}
-
-type sortIntSlice struct {
-	reflect.Value
-}
-
-func (s sortIntSlice) Len() int {
-	return s.Value.Len()
-}
-
-func (s sortIntSlice) Less(i, j int) bool {
-	return s.Index(i).Int() < s.Index(j).Int()
-}
-
-func (s sortIntSlice) Swap(i, j int) {
-	a, b := s.Index(i).Int(), s.Index(j).Int()
-	s.Index(i).SetInt(b)
-	s.Index(j).SetInt(a)
-}
-
-type sortUintSlice struct {
-	reflect.Value
-}
-
-func (s sortUintSlice) Len() int {
-	return s.Value.Len()
-}
-
-func (s sortUintSlice) Less(i, j int) bool {
-	return s.Index(i).Uint() < s.Index(j).Uint()
-}
-
-func (s sortUintSlice) Swap(i, j int) {
-	a, b := s.Index(i).Uint(), s.Index(j).Uint()
-	s.Index(i).SetUint(b)
-	s.Index(j).SetUint(a)
-}
-
-type sortFloatSlice struct {
-	reflect.Value
-}
-
-func (s sortFloatSlice) Len() int {
-	return s.Value.Len()
-}
-
-func (s sortFloatSlice) Less(i, j int) bool {
-	return s.Index(i).Float() < s.Index(j).Float()
-}
-
-func (s sortFloatSlice) Swap(i, j int) {
-	a, b := s.Index(i).Float(), s.Index(j).Float()
-	s.Index(i).SetFloat(b)
-	s.Index(j).SetFloat(a)
-}
-
-type sortStringSlice struct {
-	reflect.Value
-}
-
-func (s sortStringSlice) Len() int {
-	return s.Value.Len()
-}
-
-func (s sortStringSlice) Less(i, j int) bool {
-	return s.Index(i).String() < s.Index(j).String()
-}
-
-func (s sortStringSlice) Swap(i, j int) {
-	a, b := s.Index(i).String(), s.Index(j).String()
-	s.Index(i).SetString(b)
-	s.Index(j).SetString(a)
-}
-
-type sortTimeSlice []time.Time
-
-func (s sortTimeSlice) Len() int           { return len(s) }
-func (s sortTimeSlice) Less(i, j int) bool { return s[i].Before(s[j]) }
-func (s sortTimeSlice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
diff --git a/vendor/github.com/aclements/go-gg/ggstat/agg.go b/vendor/github.com/aclements/go-gg/ggstat/agg.go
deleted file mode 100644
index 072d621..0000000
--- a/vendor/github.com/aclements/go-gg/ggstat/agg.go
+++ /dev/null
@@ -1,297 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ggstat
-
-import (
-	"fmt"
-	"reflect"
-
-	"github.com/aclements/go-gg/generic/slice"
-	"github.com/aclements/go-gg/table"
-	"github.com/aclements/go-moremath/stats"
-	"github.com/aclements/go-moremath/vec"
-)
-
-// TODO: AggFirst, AggTukey. StdDev?
-
-// Agg constructs an Aggregate transform from a grouping column and a
-// set of Aggregators.
-//
-// TODO: Does this belong in ggstat? The specific aggregator functions
-// probably do, but the concept could go in package table.
-func Agg(xs ...string) func(aggs ...Aggregator) Aggregate {
-	return func(aggs ...Aggregator) Aggregate {
-		return Aggregate{xs, aggs}
-	}
-}
-
-// Aggregate computes aggregate functions of a table grouped by
-// distinct values of a column or set of columns.
-//
-// Aggregate first groups the table by the Xs columns. Each of these
-// groups produces a single row in the output table, where the unique
-// value of each of the Xs columns appears in the output row, along
-// with constant columns from the input, as well as any columns that
-// have a unique value within every group (they're "effectively"
-// constant). Additional columns in the output row are produced by
-// applying the Aggregator functions to the group.
-type Aggregate struct {
-	// Xs is the list column names to group values by before
-	// computing aggregate functions.
-	Xs []string
-
-	// Aggregators is the set of Aggregator functions to apply to
-	// each group of values.
-	Aggregators []Aggregator
-}
-
-// An Aggregator is a function that aggregates each group of input
-// into one row and adds it to output. It may be based on multiple
-// columns from input and may add multiple columns to output.
-type Aggregator func(input table.Grouping, output *table.Builder)
-
-func (s Aggregate) F(g table.Grouping) table.Grouping {
-	isConst := make([]bool, len(g.Columns()))
-	for i := range isConst {
-		isConst[i] = true
-	}
-
-	subgroups := map[table.GroupID]table.Grouping{}
-	for _, gid := range g.Tables() {
-		g := table.GroupBy(g.Table(gid), s.Xs...)
-		subgroups[gid] = g
-
-		for i, col := range g.Columns() {
-			if !isConst[i] {
-				continue
-			}
-			// Can this column be promoted to constant?
-			for _, gid2 := range g.Tables() {
-				t := g.Table(gid2)
-				isConst[i] = isConst[i] && checkConst(t, col)
-			}
-		}
-	}
-
-	return table.MapTables(g, func(_ table.GroupID, t *table.Table) *table.Table {
-		g := table.GroupBy(t, s.Xs...)
-		var nt table.Builder
-
-		// Construct X columns.
-		rows := len(g.Tables())
-		for colidx, xcol := range s.Xs {
-			xs := reflect.MakeSlice(table.ColType(t, xcol), rows, rows)
-			for i, gid := range g.Tables() {
-				for j := 0; j < len(s.Xs)-colidx-1; j++ {
-					gid = gid.Parent()
-				}
-				xs.Index(i).Set(reflect.ValueOf(gid.Label()))
-			}
-
-			nt.Add(xcol, xs.Interface())
-		}
-
-		// Apply Aggregators.
-		for _, agg := range s.Aggregators {
-			agg(g, &nt)
-		}
-
-		// Keep constant and effectively constant columns.
-		for i := range isConst {
-			col := t.Columns()[i]
-			if !isConst[i] || nt.Has(col) {
-				continue
-			}
-			if cv, ok := t.Const(col); ok {
-				nt.AddConst(col, cv)
-				continue
-			}
-
-			ncol := reflect.MakeSlice(table.ColType(t, col), len(g.Tables()), len(g.Tables()))
-			for i, gid := range g.Tables() {
-				v := reflect.ValueOf(g.Table(gid).Column(col))
-				ncol.Index(i).Set(v.Index(0))
-			}
-			nt.Add(col, ncol.Interface())
-		}
-		return nt.Done()
-	})
-}
-
-func checkConst(t *table.Table, col string) bool {
-	if _, ok := t.Const(col); ok {
-		return true
-	}
-	v := reflect.ValueOf(t.Column(col))
-	if v.Len() <= 1 {
-		return true
-	}
-	if !v.Type().Elem().Comparable() {
-		return false
-	}
-	elem := v.Index(0).Interface()
-	for i, l := 1, v.Len(); i < l; i++ {
-		if elem != v.Index(i).Interface() {
-			return false
-		}
-	}
-	return true
-}
-
-// AggCount returns an aggregate function that computes the number of
-// rows in each group. The resulting column will be named label, or
-// "count" if label is "".
-func AggCount(label string) Aggregator {
-	if label == "" {
-		label = "count"
-	}
-
-	return func(input table.Grouping, b *table.Builder) {
-		counts := make([]int, 0, len(input.Tables()))
-		for _, gid := range input.Tables() {
-			counts = append(counts, input.Table(gid).Len())
-		}
-		b.Add(label, counts)
-	}
-}
-
-// AggMean returns an aggregate function that computes the mean of
-// each of cols. The resulting columns will be named "mean <col>" and
-// will have the same type as <col>.
-func AggMean(cols ...string) Aggregator {
-	return aggFn(stats.Mean, "mean ", cols...)
-}
-
-// AggGeoMean returns an aggregate function that computes the
-// geometric mean of each of cols. The resulting columns will be named
-// "geomean <col>" and will have the same type as <col>.
-func AggGeoMean(cols ...string) Aggregator {
-	return aggFn(stats.GeoMean, "geomean ", cols...)
-}
-
-// AggMin returns an aggregate function that computes the minimum of
-// each of cols. The resulting columns will be named "min <col>" and
-// will have the same type as <col>.
-func AggMin(cols ...string) Aggregator {
-	min := func(xs []float64) float64 {
-		x, _ := stats.Bounds(xs)
-		return x
-	}
-	return aggFn(min, "min ", cols...)
-}
-
-// AggMax returns an aggregate function that computes the maximum of
-// each of cols. The resulting columns will be named "max <col>" and
-// will have the same type as <col>.
-func AggMax(cols ...string) Aggregator {
-	max := func(xs []float64) float64 {
-		_, x := stats.Bounds(xs)
-		return x
-	}
-	return aggFn(max, "max ", cols...)
-}
-
-// AggSum returns an aggregate function that computes the sum of each
-// of cols. The resulting columns will be named "sum <col>" and will
-// have the same type as <col>.
-func AggSum(cols ...string) Aggregator {
-	return aggFn(vec.Sum, "sum ", cols...)
-}
-
-// AggQuantile returns an aggregate function that computes a quantile
-// of each of cols. quantile has a range of [0,1]. The resulting
-// columns will be named "<prefix> <col>" and will have the same type
-// as <col>.
-func AggQuantile(prefix string, quantile float64, cols ...string) Aggregator {
-	// "prefix" could be autogenerated (e.g. fmt.Sprintf("p%g ",
-	// quantile * 100)), but then the caller would need to do the
-	// same fmt.Sprintf to compute the column name they had just
-	// created. Perhaps Aggregator should provide a way to find
-	// the generated column names.
-	return aggFn(func(data []float64) float64 {
-		return stats.Sample{Xs: data}.Quantile(quantile)
-	}, prefix+" ", cols...)
-}
-
-func aggFn(f func([]float64) float64, prefix string, cols ...string) Aggregator {
-	ocols := make([]string, len(cols))
-	for i, col := range cols {
-		ocols[i] = prefix + col
-	}
-
-	return func(input table.Grouping, b *table.Builder) {
-		for coli, col := range cols {
-			means := make([]float64, 0, len(input.Tables()))
-
-			var xs []float64
-			var ct reflect.Type
-			for i, gid := range input.Tables() {
-				v := input.Table(gid).MustColumn(col)
-				if i == 0 {
-					ct = reflect.TypeOf(v)
-				}
-				slice.Convert(&xs, v)
-				means = append(means, f(xs))
-			}
-
-			if ct == float64SliceType {
-				b.Add(ocols[coli], means)
-			} else {
-				// Convert means back to the type of col.
-				outptr := reflect.New(ct)
-				slice.Convert(outptr.Interface(), means)
-				b.Add(ocols[coli], outptr.Elem().Interface())
-			}
-		}
-	}
-}
-
-// AggUnique returns an aggregate function retains the unique value of
-// each of cols within each aggregate group, or panics if some group
-// contains more than one value for one of these columns.
-//
-// Note that Aggregate will automatically retain columns that happen
-// to be unique. AggUnique can be used to enforce at aggregation time
-// that certain columns *must* be unique (and get a nice error if they
-// are not).
-func AggUnique(cols ...string) Aggregator {
-	return func(input table.Grouping, b *table.Builder) {
-		if len(cols) == 0 {
-			return
-		}
-		if len(input.Tables()) == 0 {
-			panic(fmt.Sprintf("unknown column: %q", cols[0]))
-		}
-
-		for _, col := range cols {
-			ctype := table.ColType(input, col)
-			rows := len(input.Tables())
-			vs := reflect.MakeSlice(ctype, rows, rows)
-			for i, gid := range input.Tables() {
-				// Get values in this column.
-				xs := reflect.ValueOf(input.Table(gid).MustColumn(col))
-
-				// Check for uniqueness.
-				if xs.Len() == 0 {
-					panic(fmt.Sprintf("cannot AggUnique empty column %q", col))
-				}
-				uniquev := xs.Index(0)
-				unique := uniquev.Interface()
-				for i, len := 1, xs.Len(); i < len; i++ {
-					other := xs.Index(i).Interface()
-					if unique != other {
-						panic(fmt.Sprintf("column %q is not unique; contains at least %v and %v", col, unique, other))
-					}
-				}
-
-				// Store unique value.
-				vs.Index(i).Set(uniquev)
-			}
-
-			// Add unique values slice to output table.
-			b.Add(col, vs.Interface())
-		}
-	}
-}
diff --git a/vendor/github.com/aclements/go-gg/ggstat/bin.go b/vendor/github.com/aclements/go-gg/ggstat/bin.go
deleted file mode 100644
index 2602004..0000000
--- a/vendor/github.com/aclements/go-gg/ggstat/bin.go
+++ /dev/null
@@ -1,196 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ggstat
-
-import (
-	"math"
-	"reflect"
-	"sort"
-
-	"github.com/aclements/go-gg/generic"
-	"github.com/aclements/go-gg/generic/slice"
-	"github.com/aclements/go-gg/table"
-	"github.com/aclements/go-moremath/vec"
-)
-
-// XXX If this is just based on the number of bins, it can come up
-// with really ugly boundary numbers. If the bin width is specified,
-// then you could also specify the left edge and bins will be placed
-// at [align+width*N, align+width*(N+1)]. ggplot2 also lets you
-// specify the center alignment.
-//
-// XXX In Matlab and NumPy, bins are open on the right *except* for
-// the last bin, which is closed on both.
-//
-// XXX Number of bins/bin width/specify boundaries, same bins across
-// all groups/separate for each group/based on shared scales (don't
-// have that information here), relative or absolute histogram (Matlab
-// has lots more).
-//
-// XXX Scale transform.
-//
-// The result of Bin has two columns in addition to constant columns from the input:
-//
-// - Column X is the left edge of the bin.
-//
-// - Column W is the sum of the rows' weights, or column "count" is
-//   the number of rows in the bin.
-type Bin struct {
-	// X is the name of the column to use for samples.
-	X string
-
-	// W is the optional name of the column to use for sample
-	// weights. It may be "" to weight each sample as 1.
-	W string
-
-	// Width controls how wide each bin should be. If not provided
-	// or 0, a width will be chosen to produce 30 bins. If X is an
-	// integer column, this width will be treated as an integer as
-	// well.
-	Width float64
-
-	// Center controls the center point of each bin. To center on
-	// integers, for example, you could use {Width: 1, Center:
-	// 0}.
-	// XXX What does center mean for integers? Should an unspecified center yield an autochosen one, or 0?
-	//Center float64
-
-	// Breaks is the set of break points to use as boundaries
-	// between bins. The interval of each bin is [Breaks[i],
-	// Breaks[i+1]). Data points before the first break are
-	// dropped. If provided, Width and Center are ignored.
-	Breaks table.Slice
-
-	// SplitGroups indicates that each group in the table should
-	// have separate bounds based on the data in that group alone.
-	// The default, false, indicates that the binning function
-	// should use the bounds of all of the data combined. This
-	// makes it easier to compare bins across groups.
-	SplitGroups bool
-}
-
-func (b Bin) F(g table.Grouping) table.Grouping {
-	breaks := reflect.ValueOf(b.Breaks)
-	agg := AggCount("count")
-	if b.W != "" {
-		agg = aggFn(vec.Sum, "", b.W)
-	}
-	if !breaks.IsValid() && !b.SplitGroups {
-		breaks = b.computeBreaks(g)
-	}
-	// Change b.X to the start of the bin.
-	g = table.MapTables(g, func(_ table.GroupID, t *table.Table) *table.Table {
-		breaks := breaks
-		if !breaks.IsValid() {
-			breaks = b.computeBreaks(t)
-		}
-		nbreaks := breaks.Len()
-
-		in := reflect.ValueOf(t.MustColumn(b.X))
-		nin := in.Len()
-
-		out := reflect.MakeSlice(breaks.Type(), nin, nin)
-		var found []int
-		for i := 0; i < nin; i++ {
-			elt := in.Index(i)
-			bin := sort.Search(nbreaks, func(j int) bool {
-				return generic.OrderR(elt, breaks.Index(j)) < 0
-			})
-			// 0 means the row doesn't fit on the front
-			// XXX Allow configuring the first and last bin as infinite or not.
-			bin = bin - 1
-			if bin >= 0 {
-				found = append(found, i)
-				out.Index(i).Set(breaks.Index(bin))
-			}
-		}
-		var nt table.Builder
-		for _, col := range t.Columns() {
-			if col == b.X {
-				nt.Add(col, slice.Select(out.Interface(), found))
-			} else if c, ok := t.Const(col); ok {
-				nt.AddConst(col, c)
-			} else {
-				nt.Add(col, slice.Select(t.Column(col), found))
-			}
-		}
-		return nt.Done()
-	})
-	// Group by the found bin
-	return Agg(b.X)(agg).F(g)
-}
-
-func (b Bin) computeBreaks(g table.Grouping) reflect.Value {
-	var cols []slice.T
-	for _, gid := range g.Tables() {
-		cols = append(cols, g.Table(gid).MustColumn(b.X))
-	}
-	data := slice.Concat(cols...)
-
-	min := slice.Min(data)
-	max := slice.Max(data)
-
-	rv := reflect.ValueOf(min)
-	switch rv.Type().Kind() {
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		min, max := rv.Int(), reflect.ValueOf(max).Int()
-		width := int64(b.Width)
-		if width == 0 {
-			width = (max - min) / 30
-			if width < 1 {
-				width = 1
-			}
-		}
-		// XXX: This assumes boundaries should be aligned with
-		// 0. We should support explicit Center or Boundary
-		// requests.
-		min -= (min % width)
-		var breaks []int64
-		for i := min; i < max; i += width {
-			breaks = append(breaks, i)
-		}
-		outs := reflect.New(reflect.ValueOf(cols[0]).Type())
-		slice.Convert(outs.Interface(), breaks)
-		return outs.Elem()
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		min, max := rv.Uint(), reflect.ValueOf(max).Uint()
-		width := uint64(b.Width)
-		if width == 0 {
-			width = (max - min) / 30
-			if width < 1 {
-				width = 1
-			}
-		}
-		min -= (min % width)
-		var breaks []uint64
-		for i := min; i < max; i += width {
-			breaks = append(breaks, i)
-		}
-		outs := reflect.New(reflect.ValueOf(cols[0]).Type())
-		slice.Convert(outs.Interface(), breaks)
-		return outs.Elem()
-	case reflect.Float32, reflect.Float64:
-		min, max := rv.Float(), reflect.ValueOf(max).Float()
-		width := b.Width
-		if width == 0 {
-			width = (max - min) / 30
-			if width == 0 {
-				width = 1
-			}
-		}
-		min -= math.Mod(min, width)
-		var breaks []float64
-		for i := min; i < max; i += width {
-			breaks = append(breaks, i)
-		}
-		outs := reflect.New(reflect.ValueOf(cols[0]).Type())
-		slice.Convert(outs.Interface(), breaks)
-		return outs.Elem()
-	default:
-		panic("can't compute breaks for unknown type")
-	}
-}
-
-// TODO: Count for categorical data.
diff --git a/vendor/github.com/aclements/go-gg/ggstat/common.go b/vendor/github.com/aclements/go-gg/ggstat/common.go
deleted file mode 100644
index db0d7a5..0000000
--- a/vendor/github.com/aclements/go-gg/ggstat/common.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ggstat
-
-import "reflect"
-
-var float64Type = reflect.TypeOf(float64(0))
-var float64SliceType = reflect.TypeOf([]float64(nil))
diff --git a/vendor/github.com/aclements/go-gg/ggstat/density.go b/vendor/github.com/aclements/go-gg/ggstat/density.go
deleted file mode 100644
index 73b8a60..0000000
--- a/vendor/github.com/aclements/go-gg/ggstat/density.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ggstat
-
-import (
-	"github.com/aclements/go-gg/generic/slice"
-	"github.com/aclements/go-gg/table"
-	"github.com/aclements/go-moremath/stats"
-	"github.com/aclements/go-moremath/vec"
-)
-
-// TODO: Default to first (and second) column for X (and Y)?
-
-// Density constructs a probability density estimate from a set of
-// samples using kernel density estimation.
-//
-// X is the only required field. All other fields have reasonable
-// default zero values.
-//
-// The result of Density has three columns in addition to constant
-// columns from the input:
-//
-// - Column X is the points at which the density estimate is sampled.
-//
-// - Column "probability density" is the density estimate.
-//
-// - Column "cumulative density" is the cumulative density estimate.
-type Density struct {
-	// X is the name of the column to use for samples.
-	X string
-
-	// W is the optional name of the column to use for sample
-	// weights. It may be "" to uniformly weight samples.
-	W string
-
-	// N is the number of points to sample the KDE at. If N is 0,
-	// a reasonable default is used.
-	//
-	// TODO: This is particularly sensitive to the scale
-	// transform.
-	//
-	// TODO: Base the default on the bandwidth. If the bandwidth
-	// is really narrow, we may need a lot of samples to exceed
-	// the Nyquist rate.
-	N int
-
-	// Domain specifies the domain at which to sample this function.
-	// If Domain is nil, it defaults to DomainData{}.
-	Domain FunctionDomainer
-
-	// Kernel is the kernel to use for the KDE.
-	Kernel stats.KDEKernel
-
-	// Bandwidth is the bandwidth to use for the KDE.
-	//
-	// If this is zero, the bandwidth is computed from the data
-	// using a default bandwidth estimator (currently
-	// stats.BandwidthScott).
-	Bandwidth float64
-
-	// BoundaryMethod is the boundary correction method to use for
-	// the KDE. The default value is BoundaryReflect; however, the
-	// default bounds are effectively +/-inf, which is equivalent
-	// to performing no boundary correction.
-	BoundaryMethod stats.KDEBoundaryMethod
-
-	// [BoundaryMin, BoundaryMax) specify a bounded support for
-	// the KDE. If both are 0 (their default values), they are
-	// treated as +/-inf.
-	//
-	// To specify a half-bounded support, set Min to math.Inf(-1)
-	// or Max to math.Inf(1).
-	BoundaryMin float64
-	BoundaryMax float64
-}
-
-func (d Density) F(g table.Grouping) table.Grouping {
-	kde := stats.KDE{
-		Kernel:         d.Kernel,
-		Bandwidth:      d.Bandwidth,
-		BoundaryMethod: d.BoundaryMethod,
-		BoundaryMin:    d.BoundaryMin,
-		BoundaryMax:    d.BoundaryMax,
-	}
-	dname, cname := "probability density", "cumulative density"
-
-	addEmpty := func(out *table.Builder) {
-		out.Add(dname, []float64{})
-		out.Add(cname, []float64{})
-	}
-
-	return Function{
-		X: d.X, N: d.N, Domain: d.Domain,
-		Fn: func(gid table.GroupID, in *table.Table, sampleAt []float64, out *table.Builder) {
-			if len(sampleAt) == 0 {
-				addEmpty(out)
-				return
-			}
-
-			// Get input sample.
-			var sample stats.Sample
-			slice.Convert(&sample.Xs, in.MustColumn(d.X))
-			if d.W != "" {
-				slice.Convert(&sample.Weights, in.MustColumn(d.W))
-				if sample.Weight() == 0 {
-					addEmpty(out)
-					return
-				}
-			}
-
-			// Compute KDE.
-			kde.Sample = sample
-			if d.Bandwidth == 0 {
-				kde.Bandwidth = stats.BandwidthScott(sample)
-			}
-
-			out.Add(dname, vec.Map(kde.PDF, sampleAt))
-			out.Add(cname, vec.Map(kde.CDF, sampleAt))
-		},
-	}.F(g)
-}
diff --git a/vendor/github.com/aclements/go-gg/ggstat/domain.go b/vendor/github.com/aclements/go-gg/ggstat/domain.go
deleted file mode 100644
index 77bb78e..0000000
--- a/vendor/github.com/aclements/go-gg/ggstat/domain.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ggstat
-
-import (
-	"math"
-
-	"github.com/aclements/go-gg/generic/slice"
-	"github.com/aclements/go-gg/table"
-	"github.com/aclements/go-moremath/stats"
-)
-
-// A FunctionDomainer computes the domain over which to evaluate a
-// statistical function.
-type FunctionDomainer interface {
-	// FunctionDomain computes the domain of a particular column
-	// within a table. It takes a Grouping and a column in that
-	// Grouping to compute the domain of and returns a function
-	// that computes the domain for a specific group in the
-	// Grouping. This makes it possible for FunctionDomain to
-	// easily compute either Grouping-wide domains, or per-Table
-	// domains.
-	//
-	// The returned domain may be (NaN, NaN) to indicate that
-	// there is no data and the domain is vacuous.
-	FunctionDomain(g table.Grouping, col string) func(gid table.GroupID) (min, max float64)
-}
-
-// DomainFixed is a FunctionDomainer that returns a fixed domain.
-type DomainFixed struct {
-	Min, Max float64
-}
-
-var _ FunctionDomainer = DomainFixed{}
-
-func (r DomainFixed) FunctionDomain(g table.Grouping, col string) func(gid table.GroupID) (min, max float64) {
-	return func(table.GroupID) (min, max float64) {
-		return r.Min, r.Max
-	}
-}
-
-// DomainData is a FunctionDomainer that computes domains based on the
-// bounds of the data.
-type DomainData struct {
-	// Widen expands the domain by Widen times the span of the
-	// data.
-	//
-	// A value of 1.0 means to use exactly the bounds of the data.
-	// If Widen is 0, it is treated as 1.1 (that is, widen the
-	// domain by 10%, or 5% on the left and 5% on the right).
-	Widen float64
-
-	// SplitGroups indicates that each group in the table should
-	// have a separate domain based on the data in that group
-	// alone. The default, false, indicates that the domain should
-	// be based on all of the data in the table combined. This
-	// makes it possible to stack functions and easier to compare
-	// them across groups.
-	SplitGroups bool
-}
-
-var _ FunctionDomainer = DomainData{}
-
-const defaultWiden = 1.1
-
-func (r DomainData) FunctionDomain(g table.Grouping, col string) func(gid table.GroupID) (min, max float64) {
-	widen := r.Widen
-	if widen <= 0 {
-		widen = defaultWiden
-	}
-
-	var xs []float64
-	if !r.SplitGroups {
-		// Compute combined bounds.
-		gmin, gmax := math.NaN(), math.NaN()
-		for _, gid := range g.Tables() {
-			t := g.Table(gid)
-			slice.Convert(&xs, t.MustColumn(col))
-			xmin, xmax := stats.Bounds(xs)
-			if xmin < gmin || math.IsNaN(gmin) {
-				gmin = xmin
-			}
-			if xmax > gmax || math.IsNaN(gmax) {
-				gmax = xmax
-			}
-		}
-
-		// Widen bounds.
-		span := gmax - gmin
-		gmin, gmax = gmin-span*(widen-1)/2, gmax+span*(widen-1)/2
-
-		return func(table.GroupID) (min, max float64) {
-			return gmin, gmax
-		}
-	}
-
-	return func(gid table.GroupID) (min, max float64) {
-		// Compute bounds.
-		slice.Convert(&xs, g.Table(gid).MustColumn(col))
-		min, max = stats.Bounds(xs)
-
-		// Widen bounds.
-		span := max - min
-		min, max = min-span*(widen-1)/2, max+span*(widen-1)/2
-		return
-	}
-}
diff --git a/vendor/github.com/aclements/go-gg/ggstat/ecdf.go b/vendor/github.com/aclements/go-gg/ggstat/ecdf.go
deleted file mode 100644
index 46364a8..0000000
--- a/vendor/github.com/aclements/go-gg/ggstat/ecdf.go
+++ /dev/null
@@ -1,138 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ggstat
-
-import (
-	"github.com/aclements/go-gg/generic/slice"
-	"github.com/aclements/go-gg/table"
-	"github.com/aclements/go-moremath/vec"
-)
-
-// ECDF constructs an empirical CDF from a set of samples.
-//
-// X is the only required field. All other fields have reasonable
-// default zero values.
-//
-// The result of ECDF has three columns in addition to constant
-// columns from the input. The names of the columns depend on whether
-// Label is "".
-//
-// - Column X is the points at which the CDF changes (a subset of the
-// samples).
-//
-// - Column "cumulative density" or "cumulative density of <label>" is
-// the cumulative density estimate.
-//
-// - Column "cumulative count" (if W and Label are ""), "cumulative
-// weight" (if W is not "", but Label is "") or "cumulative <label>"
-// (if Label is not "") is the cumulative count or weight of samples.
-// That is, cumulative density times the total weight of the samples.
-type ECDF struct {
-	// X is the name of the column to use for samples.
-	X string
-
-	// W is the optional name of the column to use for sample
-	// weights. It may be "" to uniformly weight samples.
-	W string
-
-	// Label, if not "", gives a label for the samples. It is used
-	// to construct more specific names for the output columns. It
-	// should be a plural noun.
-	Label string
-
-	// Domain specifies the domain of the returned ECDF. If the
-	// domain is wider than the bounds of the data in a group,
-	// ECDF will add a point below the smallest sample and above
-	// the largest sample to make the 0 and 1 levels clear. If
-	// Domain is nil, it defaults to DomainData{}.
-	Domain FunctionDomainer
-}
-
-func (s ECDF) F(g table.Grouping) table.Grouping {
-	// Set defaults.
-	if s.Domain == nil {
-		s.Domain = DomainData{}
-	}
-
-	// Construct output column names.
-	dname, cname := "cumulative density", "cumulative count"
-	if s.Label != "" {
-		dname += " of " + s.Label
-		cname = "cumulative " + s.Label
-	} else if s.W != "" {
-		cname = "cumulative weight"
-	}
-
-	g = table.SortBy(g, s.X)
-	domain := s.Domain.FunctionDomain(g, s.X)
-
-	return table.MapTables(g, func(gid table.GroupID, t *table.Table) *table.Table {
-		// Get input columns.
-		var xs, ws []float64
-		slice.Convert(&xs, t.MustColumn(s.X))
-		if s.W != "" {
-			slice.Convert(&ws, t.MustColumn(s.W))
-		}
-
-		// Ignore empty tables.
-		if len(xs) == 0 {
-			nt := new(table.Builder).Add(s.X, []float64{}).Add(cname, []float64{}).Add(dname, []float64{})
-			preserveConsts(nt, t)
-			return nt.Done()
-		}
-
-		// Get domain.
-		min, max := domain(gid)
-
-		// Create output columns.
-		xo, do, co := make([]float64, 0), make([]float64, 0), make([]float64, 0)
-		if min < xs[0] {
-			// Extend to the left.
-			xo = append(xo, min)
-			do = append(do, 0)
-			co = append(co, 0)
-		}
-
-		// Compute total weight.
-		var total float64
-		if ws == nil {
-			total = float64(t.Len())
-		} else {
-			total = vec.Sum(ws)
-		}
-
-		// Create ECDF.
-		cum := 0.0
-		for i := 0; i < len(xs); {
-			j := i
-			for j < len(xs) && xs[i] == xs[j] {
-				if ws == nil {
-					cum += 1
-				} else {
-					cum += ws[j]
-				}
-				j++
-			}
-
-			xo = append(xo, xs[i])
-			do = append(do, cum/total)
-			co = append(co, cum)
-
-			i = j
-		}
-
-		if xs[len(xs)-1] < max {
-			// Extend to the right.
-			xo = append(xo, max)
-			do = append(do, 1)
-			co = append(co, cum)
-		}
-
-		// Construct results table.
-		nt := new(table.Builder).Add(s.X, xo).Add(dname, do).Add(cname, co)
-		preserveConsts(nt, t)
-		return nt.Done()
-	})
-}
diff --git a/vendor/github.com/aclements/go-gg/ggstat/fn.go b/vendor/github.com/aclements/go-gg/ggstat/fn.go
deleted file mode 100644
index 0a4de33..0000000
--- a/vendor/github.com/aclements/go-gg/ggstat/fn.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ggstat
-
-import (
-	"math"
-	"reflect"
-
-	"github.com/aclements/go-gg/generic/slice"
-	"github.com/aclements/go-gg/table"
-	"github.com/aclements/go-moremath/vec"
-)
-
-// Function samples a continuous univariate function at N points in
-// the domain computed by Domain.
-//
-// The result of Function binds column X to the X values at which the
-// function is sampled and retains constant columns from the input.
-// The computed function can add arbitrary columns for its output.
-type Function struct {
-	// X is the name of the column to use for input domain of this
-	// function.
-	X string
-
-	// N is the number of points to sample the function at. If N
-	// is 0, a reasonable default is used.
-	N int
-
-	// Domain specifies the domain of which to sample this function.
-	// If Domain is nil, it defaults to DomainData{}.
-	Domain FunctionDomainer
-
-	// Fn is the continuous univariate function to sample. Fn will
-	// be called with each table in the grouping and the X values
-	// at which it should be sampled. Fn must add its output
-	// columns to out. The output table will already contain the
-	// sample points bound to the X column.
-	Fn func(gid table.GroupID, in *table.Table, sampleAt []float64, out *table.Builder)
-}
-
-const defaultFunctionSamples = 200
-
-func (f Function) F(g table.Grouping) table.Grouping {
-	// Set defaults.
-	if f.N <= 0 {
-		f.N = defaultFunctionSamples
-	}
-	if f.Domain == nil {
-		f.Domain = DomainData{}
-	}
-
-	domain := f.Domain.FunctionDomain(g, f.X)
-	return table.MapTables(g, func(gid table.GroupID, t *table.Table) *table.Table {
-		min, max := domain(gid)
-
-		// Compute sample points. If there's no data, there
-		// are no sample points, but we still have to run the
-		// function to get the right output columns.
-		var ss []float64
-		if math.IsNaN(min) {
-			ss = []float64{}
-		} else {
-			ss = vec.Linspace(min, max, f.N)
-		}
-
-		var nt table.Builder
-		ctype := table.ColType(t, f.X)
-		if ctype == float64Type {
-			// Bind output X column.
-			nt.Add(f.X, ss)
-		} else {
-			// Convert to the column type.
-			vsp := reflect.New(ctype)
-			slice.Convert(vsp.Interface(), ss)
-			vs := vsp.Elem()
-			// This may have produced duplicate values.
-			// Eliminate those.
-			if vs.Len() > 0 {
-				prev, i := vs.Index(0).Interface(), 1
-				for j := 1; j < vs.Len(); j++ {
-					next := vs.Index(j).Interface()
-					if prev == next {
-						// Skip duplicate.
-						continue
-					}
-
-					if i != j {
-						vs.Index(i).Set(vs.Index(j))
-					}
-					i++
-					prev = next
-				}
-				vs.SetLen(i)
-			}
-			// Bind column-typed values to output X.
-			nt.Add(f.X, vs.Interface())
-			// And convert back to []float64 so we can
-			// apply the function.
-			slice.Convert(&ss, vs.Interface())
-		}
-
-		// Apply the function to the sample points.
-		f.Fn(gid, t, ss, &nt)
-
-		preserveConsts(&nt, t)
-		return nt.Done()
-	})
-}
-
-// preserveConsts copies the constant columns from t into nt.
-func preserveConsts(nt *table.Builder, t *table.Table) {
-	for _, col := range t.Columns() {
-		if nt.Has(col) {
-			// Don't overwrite existing columns in nt.
-			continue
-		}
-		if cv, ok := t.Const(col); ok {
-			nt.AddConst(col, cv)
-		}
-	}
-}
diff --git a/vendor/github.com/aclements/go-gg/ggstat/normalize.go b/vendor/github.com/aclements/go-gg/ggstat/normalize.go
deleted file mode 100644
index 4644246..0000000
--- a/vendor/github.com/aclements/go-gg/ggstat/normalize.go
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ggstat
-
-import (
-	"reflect"
-
-	"github.com/aclements/go-gg/generic/slice"
-	"github.com/aclements/go-gg/table"
-)
-
-// Normalize normalizes each group such that some data point is 1.
-//
-// Either X or Index is required (though 0 is a reasonable value of
-// Index).
-//
-// The result of Normalize is the same as the input table, plus
-// additional columns for each normalized column. These columns will
-// be named "normalized <col>" where <col> is the name of the original
-// column and will have type []float64.
-type Normalize struct {
-	// X is the name of the column to use to find the denominator
-	// row. If X is "", Index is used instead.
-	X string
-
-	// Index is the row index of the denominator row if X is ""
-	// (otherwise it is ignored). Index may be negative, in which
-	// case it is added to the number of rows (e.g., -1 is the
-	// last row).
-	Index int
-
-	// By is a function func([]T) int that returns the index of
-	// the denominator row given column X. By may be nil, in which
-	// case it defaults to generic.ArgMin.
-	By interface{}
-
-	// Cols is a slice of the names of columns to normalize
-	// relative to the corresponding DenomCols value in the
-	// denominator row. Cols may be nil, in which case it defaults
-	// to all integral and floating point columns.
-	Cols []string
-
-	// DenomCols is a slice of the names of columns used as the
-	// demoninator. DenomCols may be nil, in which case it
-	// defaults to Cols (i.e. each column will be normalized to
-	// the value from that column in the denominator row.)
-	// Otherwise, DenomCols must be the same length as Cols.
-	DenomCols []string
-}
-
-func (s Normalize) F(g table.Grouping) table.Grouping {
-	// Find the columns to normalize.
-	if s.Cols == nil {
-		cols := []string{}
-		for i, ct := range colTypes(g) {
-			if canNormalize(ct.Elem().Kind()) {
-				cols = append(cols, g.Columns()[i])
-			}
-		}
-		s.Cols = cols
-	}
-	if len(s.Cols) == 0 {
-		return g
-	}
-
-	// Construct new column names.
-	newcols := make([]string, len(s.Cols))
-	for i, col := range s.Cols {
-		newcols[i] = "normalized " + col
-	}
-
-	// Get "by" function.
-	var byv reflect.Value
-	byargs := make([]reflect.Value, 1)
-	if s.By != nil {
-		byv = reflect.ValueOf(s.By)
-		// TODO: Type check byv better.
-	}
-
-	return table.MapTables(g, func(_ table.GroupID, t *table.Table) *table.Table {
-		if t.Len() == 0 {
-			return t
-		}
-
-		// Find the denominator row.
-		var drow int
-		if s.X == "" {
-			drow = s.Index
-			if drow < 0 {
-				drow += t.Len()
-			}
-		} else {
-			xs := t.MustColumn(s.X)
-			if s.By == nil {
-				drow = slice.ArgMin(xs)
-			} else {
-				byargs[0] = reflect.ValueOf(xs)
-				byout := byv.Call(byargs)
-				drow = int(byout[0].Int())
-			}
-		}
-
-		// Normalize columns.
-		newt := table.NewBuilder(t)
-		denomCols := s.DenomCols
-		if denomCols == nil {
-			denomCols = s.Cols
-		}
-		for coli, col := range s.Cols {
-			denom := denomValue(t.MustColumn(denomCols[coli]), drow)
-			out := normalizeTo(t.MustColumn(col), denom)
-			newt.Add(newcols[coli], out)
-		}
-
-		return newt.Done()
-	})
-}
-
-func colTypes(g table.Grouping) []reflect.Type {
-	cts := make([]reflect.Type, len(g.Columns()))
-	for i, col := range g.Columns() {
-		cts[i] = table.ColType(g, col)
-	}
-	return cts
-}
-
-var canNormalizeKinds = map[reflect.Kind]bool{
-	reflect.Float32: true,
-	reflect.Float64: true,
-	reflect.Int:     true,
-	reflect.Int8:    true,
-	reflect.Int16:   true,
-	reflect.Int32:   true,
-	reflect.Int64:   true,
-	reflect.Uint:    true,
-	reflect.Uintptr: true,
-	reflect.Uint8:   true,
-	reflect.Uint16:  true,
-	reflect.Uint32:  true,
-	reflect.Uint64:  true,
-}
-
-func canNormalize(k reflect.Kind) bool {
-	return canNormalizeKinds[k]
-}
-
-func denomValue(s interface{}, index int) float64 {
-	switch s := s.(type) {
-	case []float64:
-		return s[index]
-	}
-	return reflect.ValueOf(s).Index(index).Convert(float64Type).Float()
-}
-
-func normalizeTo(s interface{}, denom float64) interface{} {
-	switch s := s.(type) {
-	case []float64:
-		out := make([]float64, len(s))
-		for i, numer := range s {
-			out[i] = numer / denom
-		}
-		return out
-	}
-
-	sv := reflect.ValueOf(s)
-
-	out := reflect.MakeSlice(float64SliceType, sv.Len(), sv.Len())
-	for i, len := 0, sv.Len(); i < len; i++ {
-		numer := sv.Index(i).Convert(float64Type).Float()
-		out.Index(i).SetFloat(numer / denom)
-	}
-	return out.Interface()
-}
diff --git a/vendor/github.com/aclements/go-gg/table/concat.go b/vendor/github.com/aclements/go-gg/table/concat.go
deleted file mode 100644
index 14a8f4f..0000000
--- a/vendor/github.com/aclements/go-gg/table/concat.go
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package table
-
-import (
-	"fmt"
-
-	"github.com/aclements/go-gg/generic/slice"
-)
-
-// Concat returns the concatenation of the rows in each matching group
-// across gs. All Groupings in gs must have the same set of columns
-// (though they need not be in the same order; the column order from
-// gs[0] will be used). The GroupIDs in the returned Grouping will be
-// the union of the GroupIDs in gs.
-func Concat(gs ...Grouping) Grouping {
-	if len(gs) == 0 {
-		return new(Table)
-	}
-
-	// Check that all Groupings have the same set of columns. They
-	// can be in different orders.
-	colSet := map[string]bool{}
-	for _, col := range gs[0].Columns() {
-		colSet[col] = true
-	}
-	for i, g2 := range gs[1:] {
-		diff := len(g2.Columns()) != len(colSet)
-		if !diff {
-			for _, col := range g2.Columns() {
-				if !colSet[col] {
-					diff = true
-					break
-				}
-			}
-		}
-		if diff {
-			panic(fmt.Sprintf("columns in Groupings 0 and %d differ: %q vs %q", i+1, gs[0].Columns(), g2.Columns()))
-		}
-	}
-
-	// Collect group IDs.
-	haveGID := map[GroupID]bool{}
-	gids := []GroupID{}
-	for _, g := range gs {
-		for _, gid := range g.Tables() {
-			if haveGID[gid] {
-				continue
-			}
-			haveGID[gid] = true
-			gids = append(gids, gid)
-		}
-	}
-
-	// Build output groups.
-	var ng GroupingBuilder
-	for _, gid := range gids {
-		// Build output table.
-		var nt Builder
-		var cols []slice.T
-		for _, col := range gs[0].Columns() {
-			// Is it constant?
-			isConst := false
-			var cv interface{}
-			for _, g := range gs {
-				t := g.Table(gid)
-				if t == nil {
-					continue
-				}
-				if cv1, ok := t.Const(col); ok {
-					if !isConst {
-						isConst = true
-						cv = cv1
-					} else if cv != cv1 {
-						isConst = false
-						break
-					}
-				} else {
-					isConst = false
-					break
-				}
-			}
-			if isConst {
-				nt.AddConst(col, cv)
-				continue
-			}
-
-			// Not a constant. Collect slices.
-			for _, g := range gs {
-				t := g.Table(gid)
-				if t == nil {
-					continue
-				}
-				cols = append(cols, t.Column(col))
-			}
-			nt.Add(col, slice.Concat(cols...))
-			cols = cols[:0]
-		}
-		ng.Add(gid, nt.Done())
-	}
-	return ng.Done()
-}
diff --git a/vendor/github.com/aclements/go-gg/table/filter.go b/vendor/github.com/aclements/go-gg/table/filter.go
deleted file mode 100644
index 5008138..0000000
--- a/vendor/github.com/aclements/go-gg/table/filter.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package table
-
-import (
-	"fmt"
-	"reflect"
-
-	"github.com/aclements/go-gg/generic/slice"
-)
-
-var boolType = reflect.TypeOf(false)
-
-// Filter filters g to only rows where pred returns true. pred must be
-// a function that returns bool and takes len(cols) arguments where
-// the type of col[i] is assignable to argument i.
-//
-// TODO: Create a faster batch variant where pred takes slices.
-func Filter(g Grouping, pred interface{}, cols ...string) Grouping {
-	// TODO: Use generic.TypeError.
-	predv := reflect.ValueOf(pred)
-	predt := predv.Type()
-	if predt.Kind() != reflect.Func || predt.NumIn() != len(cols) || predt.NumOut() != 1 || predt.Out(0) != boolType {
-		panic("predicate function must be func(col[0], col[1], ...) bool")
-	}
-	if len(cols) == 0 {
-		return g
-	}
-	if len(g.Tables()) == 0 {
-		panic(fmt.Sprintf("unknown column %q", cols[0]))
-	}
-	// Type check arguments.
-	for i, col := range cols {
-		colt := ColType(g, col)
-		if !colt.Elem().AssignableTo(predt.In(i)) {
-			panic(fmt.Sprintf("column %d (type %s) is not assignable to predicate argument %d (type %s)", i, colt.Elem(), i, predt.In(i)))
-		}
-	}
-
-	args := make([]reflect.Value, len(cols))
-	colvs := make([]reflect.Value, len(cols))
-	match := make([]int, 0)
-	return MapTables(g, func(_ GroupID, t *Table) *Table {
-		// Get columns.
-		for i, col := range cols {
-			colvs[i] = reflect.ValueOf(t.MustColumn(col))
-		}
-
-		// Find the set of row indexes that satisfy pred.
-		match = match[:0]
-		for r, len := 0, t.Len(); r < len; r++ {
-			for c, colv := range colvs {
-				args[c] = colv.Index(r)
-			}
-			if predv.Call(args)[0].Bool() {
-				match = append(match, r)
-			}
-		}
-
-		// Create the new table.
-		if len(match) == t.Len() {
-			return t
-		}
-		var nt Builder
-		for _, col := range t.Columns() {
-			nt.Add(col, slice.Select(t.Column(col), match))
-		}
-		return nt.Done()
-	})
-}
-
-// FilterEq filters g to only rows where the value in col equals val.
-func FilterEq(g Grouping, col string, val interface{}) Grouping {
-	match := make([]int, 0)
-	return MapTables(g, func(_ GroupID, t *Table) *Table {
-		// Find the set of row indexes that match val.
-		seq := t.MustColumn(col)
-		match = match[:0]
-		rv := reflect.ValueOf(seq)
-		for i, len := 0, rv.Len(); i < len; i++ {
-			if rv.Index(i).Interface() == val {
-				match = append(match, i)
-			}
-		}
-
-		var nt Builder
-		for _, col := range t.Columns() {
-			nt.Add(col, slice.Select(t.Column(col), match))
-		}
-		return nt.Done()
-	})
-}
diff --git a/vendor/github.com/aclements/go-gg/table/group.go b/vendor/github.com/aclements/go-gg/table/group.go
deleted file mode 100644
index afabba2..0000000
--- a/vendor/github.com/aclements/go-gg/table/group.go
+++ /dev/null
@@ -1,243 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package table
-
-import (
-	"fmt"
-	"reflect"
-	"strings"
-
-	"github.com/aclements/go-gg/generic/slice"
-)
-
-// GroupID identifies a group. GroupIDs form a tree, rooted at
-// RootGroupID (which is also the zero GroupID).
-type GroupID struct {
-	*groupNode
-}
-
-// RootGroupID is the root of the GroupID tree.
-var RootGroupID = GroupID{}
-
-type groupNode struct {
-	parent GroupID
-	label  interface{}
-}
-
-// String returns the path to GroupID g in the form "/l1/l2/l3". If g
-// is RootGroupID, it returns "/". Each level in the group is formed
-// by formatting the label using fmt's "%v" verb. Note that this is
-// purely diagnostic; this string may not uniquely identify g.
-func (g GroupID) String() string {
-	if g == RootGroupID {
-		return "/"
-	}
-	parts := []string{}
-	for p := g; p != RootGroupID; p = p.parent {
-		part := fmt.Sprintf("/%v", p.label)
-		parts = append(parts, part)
-	}
-	for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 {
-		parts[i], parts[j] = parts[j], parts[i]
-	}
-	return strings.Join(parts, "")
-}
-
-// Extend returns a new GroupID that is a child of GroupID g. The
-// returned GroupID will not be equal to any existing GroupID (even if
-// label is not unique among g's children). The label is primarily
-// diagnostic; the table package uses it only when printing tables,
-// but callers may store semantic information in group labels.
-func (g GroupID) Extend(label interface{}) GroupID {
-	return GroupID{&groupNode{g, label}}
-}
-
-// Parent returns the parent of g. The parent of RootGroupID is
-// RootGroupID.
-func (g GroupID) Parent() GroupID {
-	if g == RootGroupID {
-		return RootGroupID
-	}
-	return g.parent
-}
-
-// Label returns the label of g.
-func (g GroupID) Label() interface{} {
-	return g.label
-}
-
-// GroupBy sub-divides all groups such that all of the rows in each
-// group have equal values for all of the named columns. The relative
-// order of rows with equal values for the named columns is
-// maintained. Grouped-by columns become constant columns within each
-// group.
-func GroupBy(g Grouping, cols ...string) Grouping {
-	// TODO: This would generate much less garbage if we grouped
-	// all of cols in one pass.
-	//
-	// TODO: This constructs one slice per column per input group,
-	// but it would be even better if it constructed just one
-	// slice per column.
-
-	if len(cols) == 0 {
-		return g
-	}
-
-	var out GroupingBuilder
-	for _, gid := range g.Tables() {
-		t := g.Table(gid)
-
-		if cv, ok := t.Const(cols[0]); ok {
-			// Grouping by a constant is trivial.
-			subgid := gid.Extend(cv)
-			out.Add(subgid, t)
-			continue
-		}
-
-		c := t.MustColumn(cols[0])
-
-		// Create an index on c.
-		type subgroupInfo struct {
-			key  interface{}
-			rows []int
-		}
-		subgroups := []subgroupInfo{}
-		keys := make(map[interface{}]int)
-		seq := reflect.ValueOf(c)
-		for i := 0; i < seq.Len(); i++ {
-			x := seq.Index(i).Interface()
-			sg, ok := keys[x]
-			if !ok {
-				sg = len(subgroups)
-				subgroups = append(subgroups, subgroupInfo{x, []int{}})
-				keys[x] = sg
-			}
-			subgroup := &subgroups[sg]
-			subgroup.rows = append(subgroup.rows, i)
-		}
-
-		// Count rows in each subgroup.
-		offsets := make([]int, 1+len(subgroups))
-		for i := range subgroups {
-			offsets[i+1] = offsets[i] + len(subgroups[i].rows)
-		}
-
-		// Split each column.
-		builders := make([]Builder, len(subgroups))
-		for _, name := range t.Columns() {
-			if name == cols[0] {
-				// Promote the group-by column to a
-				// constant.
-				for i := range subgroups {
-					builders[i].AddConst(name, subgroups[i].key)
-				}
-				continue
-			}
-
-			if cv, ok := t.Const(name); ok {
-				// Keep constants constant.
-				for i := range builders {
-					builders[i].AddConst(name, cv)
-				}
-				continue
-			}
-
-			// Create a slice for all of the values.
-			col := t.Column(name)
-			ncol := reflect.MakeSlice(reflect.TypeOf(col), t.Len(), t.Len())
-
-			// Shuffle each subgroup into ncol.
-			for i := range subgroups {
-				subcol := ncol.Slice(offsets[i], offsets[i+1]).Interface()
-				slice.SelectInto(subcol, col, subgroups[i].rows)
-				builders[i].Add(name, subcol)
-			}
-		}
-
-		// Add tables to output Grouping.
-		for i := range builders {
-			subgid := gid.Extend(subgroups[i].key)
-			out.Add(subgid, builders[i].Done())
-		}
-	}
-
-	return GroupBy(out.Done(), cols[1:]...)
-}
-
-// Ungroup concatenates adjacent Tables in g that share a group parent
-// into a Table identified by the parent, undoing the effects of the
-// most recent GroupBy operation.
-func Ungroup(g Grouping) Grouping {
-	groups := g.Tables()
-	if len(groups) == 0 || len(groups) == 1 && groups[0] == RootGroupID {
-		return g
-	}
-
-	var out GroupingBuilder
-	runGid := groups[0].Parent()
-	runTabs := []*Table{}
-	for _, gid := range groups {
-		if gid.Parent() != runGid {
-			// Flush the run.
-			out.Add(runGid, concatRows(runTabs...))
-
-			runGid = gid.Parent()
-			runTabs = runTabs[:0]
-		}
-		runTabs = append(runTabs, g.Table(gid))
-	}
-	// Flush the last run.
-	out.Add(runGid, concatRows(runTabs...))
-
-	return out.Done()
-}
-
-// Flatten concatenates all of the groups in g into a single Table.
-// This is equivalent to repeatedly Ungrouping g.
-func Flatten(g Grouping) *Table {
-	groups := g.Tables()
-	switch len(groups) {
-	case 0:
-		return new(Table)
-
-	case 1:
-		return g.Table(groups[0])
-	}
-
-	tabs := make([]*Table, len(groups))
-	for i, gid := range groups {
-		tabs[i] = g.Table(gid)
-	}
-
-	return concatRows(tabs...)
-}
-
-// concatRows concatenates the rows of tabs into a single Table. All
-// Tables in tabs must all have the same column set.
-func concatRows(tabs ...*Table) *Table {
-	// TODO: Consider making this public. It would have to check
-	// the columns, and we would probably also want a concatCols.
-
-	switch len(tabs) {
-	case 0:
-		return new(Table)
-
-	case 1:
-		return tabs[0]
-	}
-
-	// Construct each column.
-	var out Builder
-	seqs := make([]slice.T, len(tabs))
-	for _, col := range tabs[0].Columns() {
-		seqs = seqs[:0]
-		for _, tab := range tabs {
-			seqs = append(seqs, tab.Column(col))
-		}
-		out.Add(col, slice.Concat(seqs...))
-	}
-
-	return out.Done()
-}
diff --git a/vendor/github.com/aclements/go-gg/table/head.go b/vendor/github.com/aclements/go-gg/table/head.go
deleted file mode 100644
index 3c1b2ec..0000000
--- a/vendor/github.com/aclements/go-gg/table/head.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package table
-
-import "reflect"
-
-// Head returns the first n rows in each Table of g.
-func Head(g Grouping, n int) Grouping {
-	return headTail(g, n, false)
-}
-
-// Tail returns the last n rows in each Table of g.
-func Tail(g Grouping, n int) Grouping {
-	return headTail(g, n, true)
-}
-
-func headTail(g Grouping, n int, tail bool) Grouping {
-	return MapTables(g, func(_ GroupID, t *Table) *Table {
-		if t.Len() <= n {
-			return t
-		}
-
-		var nt Builder
-		for _, col := range t.Columns() {
-			if cv, ok := t.Const(col); ok {
-				nt.AddConst(col, cv)
-				continue
-			}
-
-			cv := reflect.ValueOf(t.Column(col))
-			if tail {
-				cv = cv.Slice(t.Len()-n, t.Len())
-			} else {
-				cv = cv.Slice(0, n)
-			}
-			nt.Add(col, cv.Interface())
-		}
-		return nt.Done()
-	})
-}
-
-// HeadTables returns the first n tables in g.
-func HeadTables(g Grouping, n int) Grouping {
-	return headTailTables(g, n, false)
-}
-
-// TailTables returns the first n tables in g.
-func TailTables(g Grouping, n int) Grouping {
-	return headTailTables(g, n, true)
-}
-
-func headTailTables(g Grouping, n int, tail bool) Grouping {
-	tables := g.Tables()
-	if len(tables) <= n {
-		return g
-	} else if tail {
-		tables = tables[len(tables)-n:]
-	} else {
-		tables = tables[:n]
-	}
-
-	var ng GroupingBuilder
-	for _, gid := range tables {
-		ng.Add(gid, g.Table(gid))
-	}
-	return ng.Done()
-}
diff --git a/vendor/github.com/aclements/go-gg/table/join.go b/vendor/github.com/aclements/go-gg/table/join.go
deleted file mode 100644
index 03ce51b..0000000
--- a/vendor/github.com/aclements/go-gg/table/join.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package table
-
-import (
-	"reflect"
-
-	"github.com/aclements/go-gg/generic/slice"
-)
-
-// Join joins g1 and g2 on tables with identical group IDs where col1
-// in g1 equals col2 in g2. It maintains the group order of g1, except
-// that groups that aren't in g2 are removed, and maintains the row
-// order of g1, followed by the row order of g2.
-//
-// TODO: Support join on more than one column.
-func Join(g1 Grouping, col1 string, g2 Grouping, col2 string) Grouping {
-	var ng GroupingBuilder
-	for _, gid := range g1.Tables() {
-		t1, t2 := g1.Table(gid), g2.Table(gid)
-		if t2 == nil {
-			continue
-		}
-
-		// TODO: Optimize for cases where col1 and/or col2 are
-		// constant.
-
-		// Index col2 in t2.
-		ridx := make(map[interface{}][]int)
-		rv := reflect.ValueOf(t2.MustColumn(col2))
-		for i, l := 0, rv.Len(); i < l; i++ {
-			v := rv.Index(i).Interface()
-			ridx[v] = append(ridx[v], i)
-		}
-
-		// For each row in t1, find the matching rows in col2
-		// and build up the row indexes for t1 and t2.
-		idx1, idx2 := []int{}, []int{}
-		lv := reflect.ValueOf(t1.MustColumn(col1))
-		for i, l := 0, lv.Len(); i < l; i++ {
-			r := ridx[lv.Index(i).Interface()]
-			for range r {
-				idx1 = append(idx1, i)
-			}
-			idx2 = append(idx2, r...)
-		}
-
-		// Build the joined table.
-		var nt Builder
-		for _, col := range t1.Columns() {
-			if cv, ok := t1.Const(col); ok {
-				nt.Add(col, cv)
-				continue
-			}
-			nt.Add(col, slice.Select(t1.Column(col), idx1))
-		}
-		for _, col := range t2.Columns() {
-			// Often the join column is the same in both
-			// and we can skip it because we added it from
-			// the first table.
-			if col == col1 && col == col2 {
-				continue
-			}
-
-			if cv, ok := t2.Const(col); ok {
-				nt.Add(col, cv)
-				continue
-			}
-			nt.Add(col, slice.Select(t2.Column(col), idx2))
-		}
-
-		ng.Add(gid, nt.Done())
-	}
-	return ng.Done()
-}
diff --git a/vendor/github.com/aclements/go-gg/table/map.go b/vendor/github.com/aclements/go-gg/table/map.go
deleted file mode 100644
index 87641a5..0000000
--- a/vendor/github.com/aclements/go-gg/table/map.go
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package table
-
-import (
-	"fmt"
-	"reflect"
-
-	"github.com/aclements/go-gg/generic"
-)
-
-// MapTables applies f to each Table in g and returns a new Grouping
-// with the same group structure as g, but with the Tables returned by
-// f.
-func MapTables(g Grouping, f func(gid GroupID, table *Table) *Table) Grouping {
-	var out GroupingBuilder
-	for _, gid := range g.Tables() {
-		out.Add(gid, f(gid, g.Table(gid)))
-	}
-	return out.Done()
-}
-
-// MapCols applies f to a set of input columns to construct a set of
-// new output columns.
-//
-// For each Table in g, MapCols calls f(in[0], in[1], ..., out[0],
-// out[1], ...) where in[i] is column incols[i]. f should process the
-// values in the input column slices and fill output columns slices
-// out[j] accordingly. MapCols returns a new Grouping that adds each
-// outcols[j] bound to out[j].
-//
-// If all of the input columns are constant for a given table, MapCols
-// will call f with all slices of length 1. The input column slices
-// will contain the constant column values and MapCols will bind each
-// output column value out[i][0] as a constant.
-func MapCols(g Grouping, f interface{}, incols ...string) func(outcols ...string) Grouping {
-	return func(outcols ...string) Grouping {
-		fv := reflect.ValueOf(f)
-		if fv.Kind() != reflect.Func {
-			panic(&generic.TypeError{fv.Type(), nil, "must be a function"})
-		}
-		ft := fv.Type()
-		if ft.NumIn() != len(incols)+len(outcols) {
-			panic(&generic.TypeError{ft, nil, fmt.Sprintf("has the wrong number of arguments; expected %d", len(incols)+len(outcols))})
-		}
-		if ft.NumOut() != 0 {
-			panic(&generic.TypeError{ft, nil, "has the wrong number of results; expected 0"})
-		}
-
-		// Create output column slices.
-		totalRows := 0
-		for _, gid := range g.Tables() {
-			t := g.Table(gid)
-		colloop:
-			for _, incol := range incols {
-				if _, ok := t.Const(incol); !ok {
-					totalRows += g.Table(gid).Len()
-					break colloop
-				}
-			}
-		}
-		ocols := make([]reflect.Value, len(outcols))
-		for i := range ocols {
-			ocols[i] = reflect.MakeSlice(ft.In(i+len(incols)), totalRows, totalRows)
-		}
-
-		// Apply f to each group.
-		var out GroupingBuilder
-		args := make([]reflect.Value, len(incols)+len(outcols))
-		opos := 0
-		for _, gid := range g.Tables() {
-			t := g.Table(gid)
-
-			// Are all inputs are constants?
-			allConst := true
-			for _, incol := range incols {
-				if _, ok := t.Const(incol); !ok {
-					allConst = false
-					break
-				}
-			}
-			if allConst {
-				for i, incol := range incols {
-					cv, _ := t.Const(incol)
-					args[i] = reflect.MakeSlice(ColType(t, incol), 1, 1)
-					args[i].Index(0).Set(reflect.ValueOf(cv))
-				}
-				for i, ocol := range ocols {
-					args[i+len(incols)] = reflect.MakeSlice(ocol.Type(), 1, 1)
-				}
-
-				fv.Call(args)
-
-				tb := NewBuilder(t)
-				for i, outcol := range outcols {
-					tb.AddConst(outcol, args[i+len(incols)].Index(0).Interface())
-				}
-				out.Add(gid, tb.Done())
-				continue
-			}
-
-			// Prepare arguments.
-			for i, incol := range incols {
-				args[i] = reflect.ValueOf(t.MustColumn(incol))
-			}
-			for i, ocol := range ocols {
-				args[i+len(incols)] = ocol.Slice(opos, opos+t.Len())
-			}
-			opos += t.Len()
-
-			// Call f.
-			fv.Call(args)
-
-			// Add output columns.
-			tb := NewBuilder(t)
-			for i, outcol := range outcols {
-				tb.Add(outcol, args[i+len(incols)].Interface())
-			}
-			out.Add(gid, tb.Done())
-		}
-		return out.Done()
-	}
-}
-
-// Rename returns g with column 'from' renamed to 'to'. The column
-// retains its position.
-func Rename(g Grouping, from, to string) Grouping {
-	return MapTables(g, func(_ GroupID, t *Table) *Table {
-		t.MustColumn(from)
-		var nt Builder
-		for _, col := range t.Columns() {
-			if col == to {
-				continue
-			}
-
-			ncol := col
-			if col == from {
-				ncol = to
-			}
-
-			if cv, ok := t.Const(col); ok {
-				nt.AddConst(ncol, cv)
-			} else {
-				nt.Add(ncol, t.Column(col))
-			}
-		}
-		return nt.Done()
-	})
-}
-
-// Remove returns g with column 'col' removed.
-func Remove(g Grouping, col string) Grouping {
-	return MapTables(g, func(_ GroupID, t *Table) *Table {
-		t.MustColumn(col)
-		var nt Builder
-		for _, col2 := range t.Columns() {
-			if col == col2 {
-				continue
-			}
-			if cv, ok := t.Const(col2); ok {
-				nt.AddConst(col2, cv)
-			} else {
-				nt.Add(col2, t.Column(col2))
-			}
-		}
-		return nt.Done()
-	})
-}
diff --git a/vendor/github.com/aclements/go-gg/table/new.go b/vendor/github.com/aclements/go-gg/table/new.go
deleted file mode 100644
index 8094593..0000000
--- a/vendor/github.com/aclements/go-gg/table/new.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package table
-
-import (
-	"reflect"
-	"strconv"
-
-	"github.com/aclements/go-gg/generic"
-)
-
-// TableFromStructs converts a []T where T is a struct to a Table
-// where the columns of the table correspond to T's exported fields.
-func TableFromStructs(structs Slice) *Table {
-	s := reflectSlice(structs)
-	st := s.Type()
-	if st.Elem().Kind() != reflect.Struct {
-		panic(&generic.TypeError{st, nil, "is not a slice of struct"})
-	}
-
-	var t Builder
-	rows := s.Len()
-	var rec func(reflect.Type, []int)
-	rec = func(typ reflect.Type, index []int) {
-		for fn := 0; fn < typ.NumField(); fn++ {
-			field := typ.Field(fn)
-			if field.PkgPath != "" {
-				continue
-			}
-			oldIndexLen := len(index)
-			index = append(index, field.Index...)
-			if field.Anonymous {
-				rec(field.Type, index)
-			} else {
-				col := reflect.MakeSlice(reflect.SliceOf(field.Type), rows, rows)
-				for i := 0; i < rows; i++ {
-					col.Index(i).Set(s.Index(i).FieldByIndex(index))
-				}
-				t.Add(field.Name, col.Interface())
-			}
-			index = index[:oldIndexLen]
-		}
-	}
-	rec(st.Elem(), []int{})
-	return t.Done()
-}
-
-// TableFromStrings converts a [][]string to a Table. This is intended
-// for processing external data, such as from CSV files. If coerce is
-// true, TableFromStrings will convert columns to []int or []float
-// when every string in that column is accepted by strconv.ParseInt or
-// strconv.ParseFloat, respectively.
-func TableFromStrings(cols []string, rows [][]string, coerce bool) *Table {
-	var t Builder
-	for i, col := range cols {
-		slice := make([]string, len(rows))
-		for j, row := range rows {
-			slice[j] = row[i]
-		}
-
-		var colData interface{} = slice
-		switch {
-		case coerce && len(slice) > 0:
-			// Try []int.
-			var err error
-			for _, str := range slice {
-				_, err = strconv.ParseInt(str, 10, 0)
-				if err != nil {
-					break
-				}
-			}
-			if err == nil {
-				nslice := make([]int, len(rows))
-				for i, str := range slice {
-					v, _ := strconv.ParseInt(str, 10, 0)
-					nslice[i] = int(v)
-				}
-				colData = nslice
-				break
-			}
-
-			// Try []float64. This must be done after
-			// []int. It's also more expensive.
-			for _, str := range slice {
-				_, err = strconv.ParseFloat(str, 64)
-				if err != nil {
-					break
-				}
-			}
-			if err == nil {
-				nslice := make([]float64, len(rows))
-				for i, str := range slice {
-					nslice[i], _ = strconv.ParseFloat(str, 64)
-				}
-				colData = nslice
-				break
-			}
-		}
-
-		t.Add(col, colData)
-	}
-	return t.Done()
-}
diff --git a/vendor/github.com/aclements/go-gg/table/pivot.go b/vendor/github.com/aclements/go-gg/table/pivot.go
deleted file mode 100644
index 6f488f9..0000000
--- a/vendor/github.com/aclements/go-gg/table/pivot.go
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package table
-
-import (
-	"reflect"
-
-	"github.com/aclements/go-gg/generic"
-)
-
-// Pivot converts rows of g into columns. label and value must name
-// columns in g, and the label column must have type []string. Pivot
-// returns a Grouping with a new column named after each distinct
-// value in the label column, where the values in that column
-// correspond to the values from the value column. All other columns
-// (besides label and value) are copied to the output. If, for a given
-// column in an output row, no input row has that column in the label
-// column, the output cell will have the zero value for its type.
-func Pivot(g Grouping, label, value string) Grouping {
-	// Find all unique values of label. These are the new columns.
-	labels := []string{}
-	lset := map[string]int{}
-	for _, gid := range g.Tables() {
-		for _, l := range g.Table(gid).MustColumn(label).([]string) {
-			if _, ok := lset[l]; !ok {
-				lset[l] = len(lset)
-				labels = append(labels, l)
-			}
-		}
-	}
-
-	// Get all columns that are not label or value.
-	groupCols := []string{}
-	for _, col := range g.Columns() {
-		if col != label && col != value {
-			groupCols = append(groupCols, col)
-		}
-	}
-
-	return MapTables(g, func(_ GroupID, t *Table) *Table {
-		var nt Builder
-
-		// Group by all other columns. Each group in gg
-		// becomes an output row.
-		gg := GroupBy(t, groupCols...)
-
-		// Copy grouped-by values.
-		for _, groupCol := range groupCols {
-			cv := reflect.MakeSlice(reflect.TypeOf(t.Column(groupCol)), len(gg.Tables()), len(gg.Tables()))
-			for i, gid := range gg.Tables() {
-				sub := gg.Table(gid)
-				cv.Index(i).Set(reflect.ValueOf(sub.Column(groupCol)).Index(0))
-			}
-			nt.Add(groupCol, cv.Interface())
-		}
-
-		// Initialize new columns.
-		newCols := make([]reflect.Value, len(lset))
-		vt := reflect.TypeOf(t.MustColumn(value))
-		for i := range newCols {
-			newCols[i] = reflect.MakeSlice(vt, len(gg.Tables()), len(gg.Tables()))
-		}
-
-		// Fill in new columns.
-		for i, gid := range gg.Tables() {
-			sub := gg.Table(gid)
-
-			vcol := reflect.ValueOf(sub.MustColumn(value))
-			for j, l := range sub.MustColumn(label).([]string) {
-				val := vcol.Index(j)
-				newCols[lset[l]].Index(i).Set(val)
-			}
-		}
-
-		// Add new columns to output table.
-		for i, newCol := range newCols {
-			nt.Add(labels[i], newCol.Interface())
-		}
-
-		return nt.Done()
-	})
-}
-
-// Unpivot converts columns of g into rows. The returned Grouping
-// consists of the columns of g *not* listed in cols, plus two columns
-// named by the label and value arguments. For each input row in g,
-// the returned Grouping will have len(cols) output rows. The i'th
-// such output row corresponds to column cols[i] in the input row. The
-// label column will contain the name of the unpivoted column,
-// cols[i], and the value column will contain that column's value from
-// the input row. The values of all other columns in the input row
-// will be repeated across the output rows. All columns in cols must
-// have the same type.
-func Unpivot(g Grouping, label, value string, cols ...string) Grouping {
-	if len(cols) == 0 {
-		panic("Unpivot requires at least 1 column")
-	}
-
-	colSet := map[string]bool{}
-	for _, col := range cols {
-		colSet[col] = true
-	}
-
-	return MapTables(g, func(_ GroupID, t *Table) *Table {
-		var nt Builder
-
-		// Repeat all other columns len(cols) times.
-		ntlen := t.Len() * len(cols)
-		for _, name := range t.Columns() {
-			if colSet[name] || name == label || name == value {
-				continue
-			}
-
-			col := reflect.ValueOf(t.Column(name))
-			ncol := reflect.MakeSlice(col.Type(), ntlen, ntlen)
-			for i, l := 0, col.Len(); i < l; i++ {
-				v := col.Index(i)
-				for j := range cols {
-					ncol.Index(i*len(cols) + j).Set(v)
-				}
-			}
-
-			nt.Add(name, ncol.Interface())
-		}
-
-		// Get input columns.
-		var vt reflect.Type
-		colvs := make([]reflect.Value, len(cols))
-		for i, col := range cols {
-			colvs[i] = reflect.ValueOf(t.MustColumn(col))
-			if i == 0 {
-				vt = colvs[i].Type()
-			} else if vt != colvs[i].Type() {
-				panic(&generic.TypeError{vt, colvs[i].Type(), "; cannot Unpivot columns with different types"})
-			}
-		}
-
-		// Create label and value columns.
-		lcol := make([]string, 0, ntlen)
-		vcol := reflect.MakeSlice(vt, ntlen, ntlen)
-		for i := 0; i < t.Len(); i++ {
-			lcol = append(lcol, cols...)
-			for j, colv := range colvs {
-				vcol.Index(i*len(cols) + j).Set(colv.Index(i))
-			}
-		}
-		nt.Add(label, lcol).Add(value, vcol.Interface())
-
-		return nt.Done()
-	})
-}
diff --git a/vendor/github.com/aclements/go-gg/table/print.go b/vendor/github.com/aclements/go-gg/table/print.go
deleted file mode 100644
index 1795865..0000000
--- a/vendor/github.com/aclements/go-gg/table/print.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package table
-
-import (
-	"fmt"
-	"io"
-	"os"
-	"reflect"
-	"strings"
-)
-
-// TODO: Have a format struct with options for things like column
-// separator, and header separator. Provide some defaults ones for,
-// e.g., Markdown, CSV, TSV, and such. Make top-level Print and Fprint
-// call methods in some default format.
-
-// Print(...) is shorthand for Fprint(os.Stderr, ...).
-func Print(g Grouping, formats ...string) error {
-	return Fprint(os.Stdout, g, formats...)
-}
-
-// Fprint prints Grouping g to w. formats[i] specifies a fmt-style
-// format string for column i. If there are more columns than formats,
-// remaining columns are formatted with %v (in particular, formats may
-// be omitted entirely to use %v for all columns). Numeric columns are
-// right aligned; all other column types are left aligned.
-func Fprint(w io.Writer, g Grouping, formats ...string) error {
-	if g.Columns() == nil {
-		return nil
-	}
-
-	// Convert each column to strings.
-	ss := make([][]string, len(g.Columns()))
-	rowFmts := make([]string, len(g.Columns()))
-	for i, col := range g.Columns() {
-		format := "%v"
-		if i < len(formats) {
-			format = formats[i]
-		}
-
-		// Format column.
-		var valKind reflect.Kind
-		ss[i] = []string{col}
-		for _, gid := range g.Tables() {
-			seq := reflect.ValueOf(g.Table(gid).Column(col))
-			for row := 0; row < seq.Len(); row++ {
-				str := fmt.Sprintf(format, seq.Index(row).Interface())
-				ss[i] = append(ss[i], str)
-			}
-
-			if valKind == reflect.Invalid {
-				valKind = seq.Type().Elem().Kind()
-			}
-		}
-
-		// Find column width.
-		width := 0
-		for _, s := range ss[i] {
-			if len(s) > width {
-				width = len(s)
-			}
-		}
-
-		// If it's a numeric column, right align.
-		//
-		// TODO: Even better would be to decimal align, though
-		// that may require some understanding of the format;
-		// or we could only do it for the default format.
-		switch valKind {
-		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64:
-			width = -width
-		}
-
-		if i == len(g.Columns())-1 && width > 0 {
-			// Don't pad the last column.
-			rowFmts[i] = "%s"
-		} else {
-			rowFmts[i] = fmt.Sprintf("%%%ds", -width)
-		}
-	}
-
-	// Compute group headers.
-	groups := []GroupID{}
-	groupPos := []int{}
-	lastPos := 1
-	for _, gid := range g.Tables() {
-		groups = append(groups, gid)
-		groupPos = append(groupPos, lastPos)
-		lastPos += g.Table(gid).Len()
-	}
-	if len(groups) == 1 && groups[0] == RootGroupID {
-		groups, groupPos = nil, nil
-	}
-
-	// Print rows.
-	rowFmt := strings.Join(rowFmts, "  ") + "\n"
-	rowBuf := make([]interface{}, len(rowFmts))
-	for row := 0; row < len(ss[0]); row++ {
-		if len(groupPos) > 0 && row == groupPos[0] {
-			_, err := fmt.Fprintf(w, "-- %s\n", groups[0])
-			if err != nil {
-				return err
-			}
-			groups, groupPos = groups[1:], groupPos[1:]
-		}
-
-		for col := range rowBuf {
-			rowBuf[col] = ss[col][row]
-		}
-		_, err := fmt.Fprintf(w, rowFmt, rowBuf...)
-		if err != nil {
-			return err
-		}
-	}
-	return nil
-}
diff --git a/vendor/github.com/aclements/go-gg/table/sort.go b/vendor/github.com/aclements/go-gg/table/sort.go
deleted file mode 100644
index ec64e00..0000000
--- a/vendor/github.com/aclements/go-gg/table/sort.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package table
-
-import (
-	"sort"
-
-	"github.com/aclements/go-gg/generic/slice"
-)
-
-// SortBy sorts each group of g by the named columns. If a column's
-// type implements sort.Interface, rows will be sorted according to
-// that order. Otherwise, the values in the column must be naturally
-// ordered (their types must be orderable by the Go specification). If
-// neither is true, SortBy panics with a *generic.TypeError. If more
-// than one column is given, SortBy sorts by the tuple of the columns;
-// that is, if two values in the first column are equal, they are
-// sorted by the second column, and so on.
-func SortBy(g Grouping, cols ...string) Grouping {
-	// Sort each group.
-	sorters := make([]sort.Interface, len(cols))
-	return MapTables(g, func(_ GroupID, t *Table) *Table {
-		// Create sorters for each column.
-		sorters = sorters[:0]
-		for _, col := range cols {
-			if _, ok := t.Const(col); ok {
-				continue
-			}
-			seq := t.MustColumn(col)
-			sorter := slice.Sorter(seq)
-			if sort.IsSorted(sorter) {
-				continue
-			}
-			sorters = append(sorters, sorter)
-		}
-
-		if len(sorters) == 0 {
-			// Avoid shuffling everything by the identity
-			// permutation.
-			return t
-		}
-
-		// Generate an initial permutation sequence.
-		perm := make([]int, t.Len())
-		for i := range perm {
-			perm[i] = i
-		}
-
-		// Sort the permutation sequence.
-		sort.Stable(&permSort{perm, sorters})
-
-		// Permute all columns.
-		var nt Builder
-		for _, name := range t.Columns() {
-			if cv, ok := t.Const(name); ok {
-				nt.AddConst(name, cv)
-				continue
-			}
-			seq := t.Column(name)
-			seq = slice.Select(seq, perm)
-			nt.Add(name, seq)
-		}
-		return nt.Done()
-	})
-}
-
-type permSort struct {
-	perm []int
-	keys []sort.Interface
-}
-
-func (s *permSort) Len() int {
-	return len(s.perm)
-}
-
-func (s *permSort) Less(i, j int) bool {
-	// Since there's no way to ask about equality, we have to do
-	// extra work for all of the keys except the last.
-	for _, key := range s.keys[:len(s.keys)-1] {
-		if key.Less(s.perm[i], s.perm[j]) {
-			return true
-		} else if key.Less(s.perm[j], s.perm[i]) {
-			return false
-		}
-	}
-	return s.keys[len(s.keys)-1].Less(s.perm[i], s.perm[j])
-}
-
-func (s *permSort) Swap(i, j int) {
-	s.perm[i], s.perm[j] = s.perm[j], s.perm[i]
-}
diff --git a/vendor/github.com/aclements/go-gg/table/table.go b/vendor/github.com/aclements/go-gg/table/table.go
deleted file mode 100644
index 5821752..0000000
--- a/vendor/github.com/aclements/go-gg/table/table.go
+++ /dev/null
@@ -1,489 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package table implements ordered, grouped two dimensional relations.
-//
-// There are two related abstractions: Table and Grouping.
-//
-// A Table is an ordered relation of rows and columns. Each column is
-// a Go slice and hence must be homogeneously typed, but different
-// columns may have different types. All columns in a Table have the
-// same number of rows.
-//
-// A Grouping generalizes a Table by grouping the Table's rows into
-// zero or more groups. A Table is itself a Grouping with zero or one
-// groups. Most operations take a Grouping and operate on each group
-// independently, though some operations sub-divide or combine groups.
-//
-// The structures of both Tables and Groupings are immutable. They are
-// constructed using a Builder or a GroupingBuilder, respectively, and
-// then "frozen" into their respective immutable data structures.
-package table
-
-import (
-	"fmt"
-	"reflect"
-
-	"github.com/aclements/go-gg/generic"
-	"github.com/aclements/go-gg/generic/slice"
-)
-
-// TODO
-//
-// Rename Table to T?
-//
-// Make Table an interface? Then columns could be constructed lazily.
-//
-// Do all transformation functions as func(g Grouping) Grouping? That
-// could be a "Transform" type that has easy methods for chaining. In
-// a lot of cases, transformation functions could just return the
-// Transform returned by another function (like MapTables).
-//
-// Make an error type for "unknown column".
-
-// A Table is an immutable, ordered two dimensional relation. It
-// consists of a set of named columns. Each column is a sequence of
-// values of a consistent type or a constant value. All (non-constant)
-// columns have the same length.
-//
-// The zero value of Table is the "empty table": it has no rows and no
-// columns. Note that a Table may have one or more columns, but no
-// rows; such a Table is *not* considered empty.
-//
-// A Table is also a trivial Grouping. If a Table is empty, it has no
-// groups and hence the zero value of Table is also the "empty group".
-// Otherwise, it consists only of the root group, RootGroupID.
-type Table struct {
-	cols     map[string]Slice
-	consts   map[string]interface{}
-	colNames []string
-	len      int
-}
-
-// A Builder constructs a Table one column at a time.
-//
-// The zero value of a Builder represents an empty Table.
-type Builder struct {
-	t Table
-}
-
-// A Grouping is an immutable set of tables with identical sets of
-// columns, each identified by a distinct GroupID.
-//
-// Visually, a Grouping can be thought of as follows:
-//
-//	   Col A  Col B  Col C
-//	------ group /a ------
-//	0   5.4    "x"     90
-//	1   -.2    "y"     30
-//	------ group /b ------
-//	0   9.3    "a"     10
-//
-// Like a Table, a Grouping's structure is immutable. To construct a
-// Grouping, use a GroupingBuilder.
-//
-// Despite the fact that GroupIDs form a hierarchy, a Grouping ignores
-// this hierarchy and simply operates on a flat map of distinct
-// GroupIDs to Tables.
-type Grouping interface {
-	// Columns returns the names of the columns in this Grouping,
-	// or nil if there are no Tables or the group consists solely
-	// of empty Tables. All Tables in this Grouping have the same
-	// set of columns.
-	Columns() []string
-
-	// Tables returns the group IDs of the tables in this
-	// Grouping.
-	Tables() []GroupID
-
-	// Table returns the Table in group gid, or nil if there is no
-	// such Table.
-	Table(gid GroupID) *Table
-}
-
-// A GroupingBuilder constructs a Grouping one table a time.
-//
-// The zero value of a GroupingBuilder represents an empty Grouping
-// with no tables and no columns.
-type GroupingBuilder struct {
-	g        groupedTable
-	colTypes []reflect.Type
-}
-
-type groupedTable struct {
-	tables   map[GroupID]*Table
-	groups   []GroupID
-	colNames []string
-}
-
-// A Slice is a Go slice value.
-//
-// This is primarily for documentation. There is no way to statically
-// enforce this in Go; however, functions that expect a Slice will
-// panic with a *generic.TypeError if passed a non-slice value.
-type Slice interface{}
-
-func reflectSlice(s Slice) reflect.Value {
-	rv := reflect.ValueOf(s)
-	if rv.Kind() != reflect.Slice {
-		panic(&generic.TypeError{rv.Type(), nil, "is not a slice"})
-	}
-	return rv
-}
-
-// NewBuilder returns a new Builder. If t is non-nil, it populates the
-// new Builder with the columns from t.
-func NewBuilder(t *Table) *Builder {
-	if t == nil {
-		return new(Builder)
-	}
-	b := Builder{Table{
-		cols:     make(map[string]Slice),
-		consts:   make(map[string]interface{}),
-		colNames: append([]string(nil), t.Columns()...),
-		len:      t.len,
-	}}
-	for k, v := range t.cols {
-		b.t.cols[k] = v
-	}
-	for k, v := range t.consts {
-		b.t.consts[k] = v
-	}
-	return &b
-}
-
-// Add adds a column to b, or removes the named column if data is nil.
-// If b already has a column with the given name, Add replaces it. If
-// data is non-nil, it must have the same length as any existing
-// columns or Add will panic.
-func (b *Builder) Add(name string, data Slice) *Builder {
-	if data == nil {
-		// Remove the column.
-		if _, ok := b.t.cols[name]; !ok {
-			if _, ok := b.t.consts[name]; !ok {
-				// Nothing to remove.
-				return b
-			}
-		}
-		delete(b.t.cols, name)
-		delete(b.t.consts, name)
-		for i, n := range b.t.colNames {
-			if n == name {
-				copy(b.t.colNames[i:], b.t.colNames[i+1:])
-				b.t.colNames = b.t.colNames[:len(b.t.colNames)-1]
-				break
-			}
-		}
-		return b
-	}
-
-	// Are we replacing an existing column?
-	_, replace := b.t.cols[name]
-	if !replace {
-		_, replace = b.t.consts[name]
-	}
-
-	// Check the column and add it.
-	rv := reflectSlice(data)
-	dataLen := rv.Len()
-	if len(b.t.cols) == 0 || (replace && len(b.t.cols) == 1) {
-		if b.t.cols == nil {
-			b.t.cols = make(map[string]Slice)
-		}
-		// First non-constant column (possibly replacing the
-		// only non-constant column).
-		b.t.cols[name] = data
-		b.t.len = dataLen
-	} else if b.t.len != dataLen {
-		panic(fmt.Sprintf("cannot add column %q with %d elements to table with %d rows", name, dataLen, b.t.len))
-	} else {
-		b.t.cols[name] = data
-	}
-
-	if replace {
-		// Make sure it's not in constants.
-		delete(b.t.consts, name)
-	} else {
-		b.t.colNames = append(b.t.colNames, name)
-	}
-
-	return b
-}
-
-// AddConst adds a constant column to b whose value is val. If b
-// already has a column with this name, AddConst replaces it.
-//
-// A constant column has the same value in every row of the Table. It
-// does not itself have an inherent length.
-func (b *Builder) AddConst(name string, val interface{}) *Builder {
-	// Are we replacing an existing column?
-	_, replace := b.t.cols[name]
-	if !replace {
-		_, replace = b.t.consts[name]
-	}
-
-	if b.t.consts == nil {
-		b.t.consts = make(map[string]interface{})
-	}
-	b.t.consts[name] = val
-
-	if replace {
-		// Make sure it's not in cols.
-		delete(b.t.cols, name)
-	} else {
-		b.t.colNames = append(b.t.colNames, name)
-	}
-
-	return b
-}
-
-// Has returns true if b has a column named "name".
-func (b *Builder) Has(name string) bool {
-	if _, ok := b.t.cols[name]; ok {
-		return true
-	}
-	if _, ok := b.t.consts[name]; ok {
-		return true
-	}
-	return false
-}
-
-// Done returns the constructed Table and resets b.
-func (b *Builder) Done() *Table {
-	if len(b.t.colNames) == 0 {
-		return new(Table)
-	}
-	t := b.t
-	b.t = Table{}
-	return &t
-}
-
-// Len returns the number of rows in Table t.
-func (t *Table) Len() int {
-	return t.len
-}
-
-// Columns returns the names of the columns in Table t, or nil if this
-// Table is empty.
-func (t *Table) Columns() []string {
-	return t.colNames
-}
-
-// Column returns the slice of data in column name of Table t, or nil
-// if there is no such column. If name is a constant column, this
-// returns a slice with the constant value repeated to the length of
-// the Table.
-func (t *Table) Column(name string) Slice {
-	if c, ok := t.cols[name]; ok {
-		// It's a regular column or a constant column with a
-		// cached expansion.
-		return c
-	}
-
-	if cv, ok := t.consts[name]; ok {
-		// Expand the constant column and cache the result.
-		expanded := slice.Repeat(cv, t.len)
-		t.cols[name] = expanded
-		return expanded
-	}
-
-	return nil
-}
-
-// MustColumn is like Column, but panics if there is no such column.
-func (t *Table) MustColumn(name string) Slice {
-	if c := t.Column(name); c != nil {
-		return c
-	}
-	panic(fmt.Sprintf("unknown column %q", name))
-}
-
-// Const returns the value of constant column name. If this column
-// does not exist or is not a constant column, Const returns nil,
-// false.
-func (t *Table) Const(name string) (val interface{}, ok bool) {
-	cv, ok := t.consts[name]
-	return cv, ok
-}
-
-// isEmpty returns true if t is an empty Table, meaning it has no rows
-// or columns.
-func (t *Table) isEmpty() bool {
-	return t.colNames == nil
-}
-
-// Tables returns the groups IDs in this Table. If t is empty, there
-// are no group IDs. Otherwise, there is only RootGroupID.
-func (t *Table) Tables() []GroupID {
-	if t.isEmpty() {
-		return []GroupID{}
-	}
-	return []GroupID{RootGroupID}
-}
-
-// Table returns t if gid is RootGroupID and t is not empty; otherwise
-// it returns nil.
-func (t *Table) Table(gid GroupID) *Table {
-	if gid == RootGroupID && !t.isEmpty() {
-		return t
-	}
-	return nil
-}
-
-// NewGroupingBuilder returns a new GroupingBuilder. If g is non-nil,
-// it populates the new GroupingBuilder with the tables from g.
-func NewGroupingBuilder(g Grouping) *GroupingBuilder {
-	if g == nil {
-		return new(GroupingBuilder)
-	}
-	b := GroupingBuilder{groupedTable{
-		tables:   make(map[GroupID]*Table),
-		groups:   append([]GroupID(nil), g.Tables()...),
-		colNames: append([]string(nil), g.Columns()...),
-	}, nil}
-	for _, gid := range g.Tables() {
-		t := g.Table(gid)
-		b.g.tables[gid] = t
-		if b.colTypes == nil {
-			b.colTypes = colTypes(t)
-		}
-	}
-	return &b
-}
-
-func colTypes(t *Table) []reflect.Type {
-	colTypes := make([]reflect.Type, len(t.colNames))
-	for i, col := range t.colNames {
-		if c, ok := t.cols[col]; ok {
-			colTypes[i] = reflect.TypeOf(c).Elem()
-		} else {
-			colTypes[i] = reflect.TypeOf(t.consts[col])
-		}
-	}
-	return colTypes
-}
-
-// Add adds a Table to b, or removes a table if t is nil. If t is the
-// empty Table, this is a no-op because the empty Table contains no
-// groups. If gid already exists, Add replaces it. Table t must have
-// the same columns as any existing Tables in this Grouping and they
-// must have identical types; otherwise, Add will panic.
-//
-// TODO This doesn't make it easy to combine two Groupings. It could
-// instead take a Grouping and reparent it.
-func (b *GroupingBuilder) Add(gid GroupID, t *Table) *GroupingBuilder {
-	if t == nil {
-		if _, ok := b.g.tables[gid]; !ok {
-			// Nothing to remove.
-			return b
-		}
-		delete(b.g.tables, gid)
-		for i, g2 := range b.g.groups {
-			if g2 == gid {
-				copy(b.g.groups[i:], b.g.groups[i+1:])
-				b.g.groups = b.g.groups[:len(b.g.groups)-1]
-				break
-			}
-		}
-		return b
-	}
-
-	if t != nil && t.isEmpty() {
-		// Adding an empty table has no effect.
-		return b
-	}
-
-	if len(b.g.groups) == 1 && b.g.groups[0] == gid {
-		// We're replacing the only group. This is allowed to
-		// change the shape of the Grouping.
-		b.g.tables[gid] = t
-		b.g.colNames = t.Columns()
-		b.colTypes = colTypes(t)
-		return b
-	} else if len(b.g.groups) == 0 {
-		b.g.tables = map[GroupID]*Table{gid: t}
-		b.g.groups = []GroupID{gid}
-		b.g.colNames = t.Columns()
-		b.colTypes = colTypes(t)
-		return b
-	}
-
-	// Check that t's column names match.
-	matches := true
-	if len(t.colNames) != len(b.g.colNames) {
-		matches = false
-	} else {
-		for i, n := range t.colNames {
-			if b.g.colNames[i] != n {
-				matches = false
-				break
-			}
-		}
-	}
-	if !matches {
-		panic(fmt.Sprintf("table columns %q do not match group columns %q", t.colNames, b.g.colNames))
-	}
-
-	// Check that t's column types match.
-	for i, col := range b.g.colNames {
-		t0 := b.colTypes[i]
-		var t1 reflect.Type
-		if c, ok := t.cols[col]; ok {
-			t1 = reflect.TypeOf(c).Elem()
-		} else if cv, ok := t.consts[col]; ok {
-			t1 = reflect.TypeOf(cv)
-		}
-		if t0 != t1 {
-			panic(&generic.TypeError{t0, t1, fmt.Sprintf("for column %q are not the same", col)})
-		}
-	}
-
-	// Add t.
-	if _, ok := b.g.tables[gid]; !ok {
-		b.g.groups = append(b.g.groups, gid)
-	}
-	b.g.tables[gid] = t
-
-	return b
-}
-
-// Done returns the constructed Grouping and resets b.
-func (b *GroupingBuilder) Done() Grouping {
-	if len(b.g.groups) == 0 {
-		return new(groupedTable)
-	}
-	g := b.g
-	b.g = groupedTable{}
-	return &g
-}
-
-func (g *groupedTable) Columns() []string {
-	return g.colNames
-}
-
-func (g *groupedTable) Tables() []GroupID {
-	return g.groups
-}
-
-func (g *groupedTable) Table(gid GroupID) *Table {
-	return g.tables[gid]
-}
-
-// ColType returns the type of column col in g. This will always be a
-// slice type, even if col is a constant column. ColType panics if col
-// is unknown.
-//
-// TODO: If I introduce a first-class representation for a grouped
-// column, this should probably be in that.
-func ColType(g Grouping, col string) reflect.Type {
-	tables := g.Tables()
-	if len(tables) == 0 {
-		panic(fmt.Sprintf("unknown column %q", col))
-	}
-	t0 := g.Table(tables[0])
-	if cv, ok := t0.Const(col); ok {
-		return reflect.SliceOf(reflect.TypeOf(cv))
-	}
-	return reflect.TypeOf(t0.MustColumn(col))
-}
diff --git a/vendor/github.com/aclements/go-moremath/LICENSE b/vendor/github.com/aclements/go-moremath/LICENSE
deleted file mode 100644
index d29b372..0000000
--- a/vendor/github.com/aclements/go-moremath/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2015 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-   * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-   * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-   * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/aclements/go-moremath/mathx/beta.go b/vendor/github.com/aclements/go-moremath/mathx/beta.go
deleted file mode 100644
index 49f8722..0000000
--- a/vendor/github.com/aclements/go-moremath/mathx/beta.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package mathx
-
-import "math"
-
-func lgamma(x float64) float64 {
-	y, _ := math.Lgamma(x)
-	return y
-}
-
-// Beta returns the value of the complete beta function B(a, b).
-func Beta(a, b float64) float64 {
-	// B(x,y) = Γ(x)Γ(y) / Γ(x+y)
-	return math.Exp(lgamma(a) + lgamma(b) - lgamma(a+b))
-}
-
-// BetaInc returns the value of the regularized incomplete beta
-// function Iₓ(a, b).
-//
-// This is not to be confused with the "incomplete beta function",
-// which can be computed as BetaInc(x, a, b)*Beta(a, b).
-//
-// If x < 0 or x > 1, returns NaN.
-func BetaInc(x, a, b float64) float64 {
-	// Based on Numerical Recipes in C, section 6.4. This uses the
-	// continued fraction definition of I:
-	//
-	//  (xᵃ*(1-x)ᵇ)/(a*B(a,b)) * (1/(1+(d₁/(1+(d₂/(1+...))))))
-	//
-	// where B(a,b) is the beta function and
-	//
-	//  d_{2m+1} = -(a+m)(a+b+m)x/((a+2m)(a+2m+1))
-	//  d_{2m}   = m(b-m)x/((a+2m-1)(a+2m))
-	if x < 0 || x > 1 {
-		return math.NaN()
-	}
-	bt := 0.0
-	if 0 < x && x < 1 {
-		// Compute the coefficient before the continued
-		// fraction.
-		bt = math.Exp(lgamma(a+b) - lgamma(a) - lgamma(b) +
-			a*math.Log(x) + b*math.Log(1-x))
-	}
-	if x < (a+1)/(a+b+2) {
-		// Compute continued fraction directly.
-		return bt * betacf(x, a, b) / a
-	} else {
-		// Compute continued fraction after symmetry transform.
-		return 1 - bt*betacf(1-x, b, a)/b
-	}
-}
-
-// betacf is the continued fraction component of the regularized
-// incomplete beta function Iₓ(a, b).
-func betacf(x, a, b float64) float64 {
-	const maxIterations = 200
-	const epsilon = 3e-14
-
-	raiseZero := func(z float64) float64 {
-		if math.Abs(z) < math.SmallestNonzeroFloat64 {
-			return math.SmallestNonzeroFloat64
-		}
-		return z
-	}
-
-	c := 1.0
-	d := 1 / raiseZero(1-(a+b)*x/(a+1))
-	h := d
-	for m := 1; m <= maxIterations; m++ {
-		mf := float64(m)
-
-		// Even step of the recurrence.
-		numer := mf * (b - mf) * x / ((a + 2*mf - 1) * (a + 2*mf))
-		d = 1 / raiseZero(1+numer*d)
-		c = raiseZero(1 + numer/c)
-		h *= d * c
-
-		// Odd step of the recurrence.
-		numer = -(a + mf) * (a + b + mf) * x / ((a + 2*mf) * (a + 2*mf + 1))
-		d = 1 / raiseZero(1+numer*d)
-		c = raiseZero(1 + numer/c)
-		hfac := d * c
-		h *= hfac
-
-		if math.Abs(hfac-1) < epsilon {
-			return h
-		}
-	}
-	panic("betainc: a or b too big; failed to converge")
-}
diff --git a/vendor/github.com/aclements/go-moremath/mathx/choose.go b/vendor/github.com/aclements/go-moremath/mathx/choose.go
deleted file mode 100644
index 54dc27c..0000000
--- a/vendor/github.com/aclements/go-moremath/mathx/choose.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package mathx
-
-import "math"
-
-const smallFactLimit = 20 // 20! => 62 bits
-var smallFact [smallFactLimit + 1]int64
-
-func init() {
-	smallFact[0] = 1
-	fact := int64(1)
-	for n := int64(1); n <= smallFactLimit; n++ {
-		fact *= n
-		smallFact[n] = fact
-	}
-}
-
-// Choose returns the binomial coefficient of n and k.
-func Choose(n, k int) float64 {
-	if k == 0 || k == n {
-		return 1
-	}
-	if k < 0 || n < k {
-		return 0
-	}
-	if n <= smallFactLimit { // Implies k <= smallFactLimit
-		// It's faster to do several integer multiplications
-		// than it is to do an extra integer division.
-		// Remarkably, this is also faster than pre-computing
-		// Pascal's triangle (presumably because this is very
-		// cache efficient).
-		numer := int64(1)
-		for n1 := int64(n - (k - 1)); n1 <= int64(n); n1++ {
-			numer *= n1
-		}
-		denom := smallFact[k]
-		return float64(numer / denom)
-	}
-
-	return math.Exp(lchoose(n, k))
-}
-
-// Lchoose returns math.Log(Choose(n, k)).
-func Lchoose(n, k int) float64 {
-	if k == 0 || k == n {
-		return 0
-	}
-	if k < 0 || n < k {
-		return math.NaN()
-	}
-	return lchoose(n, k)
-}
-
-func lchoose(n, k int) float64 {
-	a, _ := math.Lgamma(float64(n + 1))
-	b, _ := math.Lgamma(float64(k + 1))
-	c, _ := math.Lgamma(float64(n - k + 1))
-	return a - b - c
-}
diff --git a/vendor/github.com/aclements/go-moremath/mathx/gamma.go b/vendor/github.com/aclements/go-moremath/mathx/gamma.go
deleted file mode 100644
index d11096e..0000000
--- a/vendor/github.com/aclements/go-moremath/mathx/gamma.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package mathx
-
-import "math"
-
-// GammaInc returns the value of the incomplete gamma function (also
-// known as the regularized gamma function):
-//
-//   P(a, x) = 1 / Γ(a) * ∫₀ˣ exp(-t) t**(a-1) dt
-func GammaInc(a, x float64) float64 {
-	// Based on Numerical Recipes in C, section 6.2.
-
-	if a <= 0 || x < 0 || math.IsNaN(a) || math.IsNaN(x) {
-		return math.NaN()
-	}
-
-	if x < a+1 {
-		// Use the series representation, which converges more
-		// rapidly in this range.
-		return gammaIncSeries(a, x)
-	} else {
-		// Use the continued fraction representation.
-		return 1 - gammaIncCF(a, x)
-	}
-}
-
-// GammaIncComp returns the complement of the incomplete gamma
-// function 1 - GammaInc(a, x). This is more numerically stable for
-// values near 0.
-func GammaIncComp(a, x float64) float64 {
-	if a <= 0 || x < 0 || math.IsNaN(a) || math.IsNaN(x) {
-		return math.NaN()
-	}
-
-	if x < a+1 {
-		return 1 - gammaIncSeries(a, x)
-	} else {
-		return gammaIncCF(a, x)
-	}
-}
-
-func gammaIncSeries(a, x float64) float64 {
-	const maxIterations = 200
-	const epsilon = 3e-14
-
-	if x == 0 {
-		return 0
-	}
-
-	ap := a
-	del := 1 / a
-	sum := del
-	for n := 0; n < maxIterations; n++ {
-		ap++
-		del *= x / ap
-		sum += del
-		if math.Abs(del) < math.Abs(sum)*epsilon {
-			return sum * math.Exp(-x+a*math.Log(x)-lgamma(a))
-		}
-	}
-	panic("a too large; failed to converge")
-}
-
-func gammaIncCF(a, x float64) float64 {
-	const maxIterations = 200
-	const epsilon = 3e-14
-
-	raiseZero := func(z float64) float64 {
-		if math.Abs(z) < math.SmallestNonzeroFloat64 {
-			return math.SmallestNonzeroFloat64
-		}
-		return z
-	}
-
-	b := x + 1 - a
-	c := math.MaxFloat64
-	d := 1 / b
-	h := d
-
-	for i := 1; i <= maxIterations; i++ {
-		an := -float64(i) * (float64(i) - a)
-		b += 2
-		d = raiseZero(an*d + b)
-		c = raiseZero(b + an/c)
-		d = 1 / d
-		del := d * c
-		h *= del
-		if math.Abs(del-1) < epsilon {
-			return math.Exp(-x+a*math.Log(x)-lgamma(a)) * h
-		}
-	}
-	panic("a too large; failed to converge")
-}
diff --git a/vendor/github.com/aclements/go-moremath/mathx/package.go b/vendor/github.com/aclements/go-moremath/mathx/package.go
deleted file mode 100644
index 9d5de0d..0000000
--- a/vendor/github.com/aclements/go-moremath/mathx/package.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package mathx implements special functions not provided by the
-// standard math package.
-package mathx // import "github.com/aclements/go-moremath/mathx"
-
-import "math"
-
-var nan = math.NaN()
diff --git a/vendor/github.com/aclements/go-moremath/mathx/sign.go b/vendor/github.com/aclements/go-moremath/mathx/sign.go
deleted file mode 100644
index 372e92f..0000000
--- a/vendor/github.com/aclements/go-moremath/mathx/sign.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package mathx
-
-// Sign returns the sign of x: -1 if x < 0, 0 if x == 0, 1 if x > 0.
-// If x is NaN, it returns NaN.
-func Sign(x float64) float64 {
-	if x == 0 {
-		return 0
-	} else if x < 0 {
-		return -1
-	} else if x > 0 {
-		return 1
-	}
-	return nan
-}
diff --git a/vendor/github.com/aclements/go-moremath/stats/alg.go b/vendor/github.com/aclements/go-moremath/stats/alg.go
deleted file mode 100644
index f704cef..0000000
--- a/vendor/github.com/aclements/go-moremath/stats/alg.go
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package stats
-
-// Miscellaneous helper algorithms
-
-import (
-	"fmt"
-
-	"github.com/aclements/go-moremath/mathx"
-)
-
-func maxint(a, b int) int {
-	if a > b {
-		return a
-	}
-	return b
-}
-
-func minint(a, b int) int {
-	if a < b {
-		return a
-	}
-	return b
-}
-
-func sumint(xs []int) int {
-	sum := 0
-	for _, x := range xs {
-		sum += x
-	}
-	return sum
-}
-
-// bisect returns an x in [low, high] such that |f(x)| <= tolerance
-// using the bisection method.
-//
-// f(low) and f(high) must have opposite signs.
-//
-// If f does not have a root in this interval (e.g., it is
-// discontiguous), this returns the X of the apparent discontinuity
-// and false.
-func bisect(f func(float64) float64, low, high, tolerance float64) (float64, bool) {
-	flow, fhigh := f(low), f(high)
-	if -tolerance <= flow && flow <= tolerance {
-		return low, true
-	}
-	if -tolerance <= fhigh && fhigh <= tolerance {
-		return high, true
-	}
-	if mathx.Sign(flow) == mathx.Sign(fhigh) {
-		panic(fmt.Sprintf("root of f is not bracketed by [low, high]; f(%g)=%g f(%g)=%g", low, flow, high, fhigh))
-	}
-	for {
-		mid := (high + low) / 2
-		fmid := f(mid)
-		if -tolerance <= fmid && fmid <= tolerance {
-			return mid, true
-		}
-		if mid == high || mid == low {
-			return mid, false
-		}
-		if mathx.Sign(fmid) == mathx.Sign(flow) {
-			low = mid
-			flow = fmid
-		} else {
-			high = mid
-			fhigh = fmid
-		}
-	}
-}
-
-// bisectBool implements the bisection method on a boolean function.
-// It returns x1, x2 ∈ [low, high], x1 < x2 such that f(x1) != f(x2)
-// and x2 - x1 <= xtol.
-//
-// If f(low) == f(high), it panics.
-func bisectBool(f func(float64) bool, low, high, xtol float64) (x1, x2 float64) {
-	flow, fhigh := f(low), f(high)
-	if flow == fhigh {
-		panic(fmt.Sprintf("root of f is not bracketed by [low, high]; f(%g)=%v f(%g)=%v", low, flow, high, fhigh))
-	}
-	for {
-		if high-low <= xtol {
-			return low, high
-		}
-		mid := (high + low) / 2
-		if mid == high || mid == low {
-			return low, high
-		}
-		fmid := f(mid)
-		if fmid == flow {
-			low = mid
-			flow = fmid
-		} else {
-			high = mid
-			fhigh = fmid
-		}
-	}
-}
-
-// series returns the sum of the series f(0), f(1), ...
-//
-// This implementation is fast, but subject to round-off error.
-func series(f func(float64) float64) float64 {
-	y, yp := 0.0, 1.0
-	for n := 0.0; y != yp; n++ {
-		yp = y
-		y += f(n)
-	}
-	return y
-}
diff --git a/vendor/github.com/aclements/go-moremath/stats/deltadist.go b/vendor/github.com/aclements/go-moremath/stats/deltadist.go
deleted file mode 100644
index bb3ba3f..0000000
--- a/vendor/github.com/aclements/go-moremath/stats/deltadist.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package stats
-
-// DeltaDist is the Dirac delta function, centered at T, with total
-// area 1.
-//
-// The CDF of the Dirac delta function is the Heaviside step function,
-// centered at T. Specifically, f(T) == 1.
-type DeltaDist struct {
-	T float64
-}
-
-func (d DeltaDist) PDF(x float64) float64 {
-	if x == d.T {
-		return inf
-	}
-	return 0
-}
-
-func (d DeltaDist) pdfEach(xs []float64) []float64 {
-	res := make([]float64, len(xs))
-	for i, x := range xs {
-		if x == d.T {
-			res[i] = inf
-		}
-	}
-	return res
-}
-
-func (d DeltaDist) CDF(x float64) float64 {
-	if x >= d.T {
-		return 1
-	}
-	return 0
-}
-
-func (d DeltaDist) cdfEach(xs []float64) []float64 {
-	res := make([]float64, len(xs))
-	for i, x := range xs {
-		res[i] = d.CDF(x)
-	}
-	return res
-}
-
-func (d DeltaDist) InvCDF(y float64) float64 {
-	if y < 0 || y > 1 {
-		return nan
-	}
-	return d.T
-}
-
-func (d DeltaDist) Bounds() (float64, float64) {
-	return d.T - 1, d.T + 1
-}
diff --git a/vendor/github.com/aclements/go-moremath/stats/dist.go b/vendor/github.com/aclements/go-moremath/stats/dist.go
deleted file mode 100644
index 048477d..0000000
--- a/vendor/github.com/aclements/go-moremath/stats/dist.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package stats
-
-import "math/rand"
-
-// A DistCommon is a statistical distribution. DistCommon is a base
-// interface provided by both continuous and discrete distributions.
-type DistCommon interface {
-	// CDF returns the cumulative probability Pr[X <= x].
-	//
-	// For continuous distributions, the CDF is the integral of
-	// the PDF from -inf to x.
-	//
-	// For discrete distributions, the CDF is the sum of the PMF
-	// at all defined points from -inf to x, inclusive. Note that
-	// the CDF of a discrete distribution is defined for the whole
-	// real line (unlike the PMF) but has discontinuities where
-	// the PMF is non-zero.
-	//
-	// The CDF is a monotonically increasing function and has a
-	// domain of all real numbers. If the distribution has bounded
-	// support, it has a range of [0, 1]; otherwise it has a range
-	// of (0, 1). Finally, CDF(-inf)==0 and CDF(inf)==1.
-	CDF(x float64) float64
-
-	// Bounds returns reasonable bounds for this distribution's
-	// PDF/PMF and CDF. The total weight outside of these bounds
-	// should be approximately 0.
-	//
-	// For a discrete distribution, both bounds are integer
-	// multiples of Step().
-	//
-	// If this distribution has finite support, it returns exact
-	// bounds l, h such that CDF(l')=0 for all l' < l and
-	// CDF(h')=1 for all h' >= h.
-	Bounds() (float64, float64)
-}
-
-// A Dist is a continuous statistical distribution.
-type Dist interface {
-	DistCommon
-
-	// PDF returns the value of the probability density function
-	// of this distribution at x.
-	PDF(x float64) float64
-}
-
-// A DiscreteDist is a discrete statistical distribution.
-//
-// Most discrete distributions are defined only at integral values of
-// the random variable. However, some are defined at other intervals,
-// so this interface takes a float64 value for the random variable.
-// The probability mass function rounds down to the nearest defined
-// point. Note that float64 values can exactly represent integer
-// values between ±2**53, so this generally shouldn't be an issue for
-// integer-valued distributions (likewise, for half-integer-valued
-// distributions, float64 can exactly represent all values between
-// ±2**52).
-type DiscreteDist interface {
-	DistCommon
-
-	// PMF returns the value of the probability mass function
-	// Pr[X = x'], where x' is x rounded down to the nearest
-	// defined point on the distribution.
-	//
-	// Note for implementers: for integer-valued distributions,
-	// round x using int(math.Floor(x)). Do not use int(x), since
-	// that truncates toward zero (unless all x <= 0 are handled
-	// the same).
-	PMF(x float64) float64
-
-	// Step returns s, where the distribution is defined for sℕ.
-	Step() float64
-}
-
-// TODO: Add a Support method for finite support distributions? Or
-// maybe just another return value from Bounds indicating that the
-// bounds are exact?
-
-// TODO: Plot method to return a pre-configured Plot object with
-// reasonable bounds and an integral function? Have to distinguish
-// PDF/CDF/InvCDF. Three methods? Argument?
-//
-// Doesn't have to be a method of Dist. Could be just a function that
-// takes a Dist and uses Bounds.
-
-// InvCDF returns the inverse CDF function of the given distribution
-// (also known as the quantile function or the percent point
-// function). This is a function f such that f(dist.CDF(x)) == x. If
-// dist.CDF is only weakly monotonic (that it, there are intervals
-// over which it is constant) and y > 0, f returns the smallest x that
-// satisfies this condition. In general, the inverse CDF is not
-// well-defined for y==0, but for convenience if y==0, f returns the
-// largest x that satisfies this condition. For distributions with
-// infinite support both the largest and smallest x are -Inf; however,
-// for distributions with finite support, this is the lower bound of
-// the support.
-//
-// If y < 0 or y > 1, f returns NaN.
-//
-// If dist implements InvCDF(float64) float64, this returns that
-// method. Otherwise, it returns a function that uses a generic
-// numerical method to construct the inverse CDF at y by finding x
-// such that dist.CDF(x) == y. This may have poor precision around
-// points of discontinuity, including f(0) and f(1).
-func InvCDF(dist DistCommon) func(y float64) (x float64) {
-	type invCDF interface {
-		InvCDF(float64) float64
-	}
-	if dist, ok := dist.(invCDF); ok {
-		return dist.InvCDF
-	}
-
-	// Otherwise, use a numerical algorithm.
-	//
-	// TODO: For discrete distributions, use the step size to
-	// inform this computation.
-	return func(y float64) (x float64) {
-		const almostInf = 1e100
-		const xtol = 1e-16
-
-		if y < 0 || y > 1 {
-			return nan
-		} else if y == 0 {
-			l, _ := dist.Bounds()
-			if dist.CDF(l) == 0 {
-				// Finite support
-				return l
-			} else {
-				// Infinite support
-				return -inf
-			}
-		} else if y == 1 {
-			_, h := dist.Bounds()
-			if dist.CDF(h) == 1 {
-				// Finite support
-				return h
-			} else {
-				// Infinite support
-				return inf
-			}
-		}
-
-		// Find loX, hiX for which cdf(loX) < y <= cdf(hiX).
-		var loX, loY, hiX, hiY float64
-		x1, y1 := 0.0, dist.CDF(0)
-		xdelta := 1.0
-		if y1 < y {
-			hiX, hiY = x1, y1
-			for hiY < y && hiX != inf {
-				loX, loY, hiX = hiX, hiY, hiX+xdelta
-				hiY = dist.CDF(hiX)
-				xdelta *= 2
-			}
-		} else {
-			loX, loY = x1, y1
-			for y <= loY && loX != -inf {
-				hiX, hiY, loX = loX, loY, loX-xdelta
-				loY = dist.CDF(loX)
-				xdelta *= 2
-			}
-		}
-		if loX == -inf {
-			return loX
-		} else if hiX == inf {
-			return hiX
-		}
-
-		// Use bisection on the interval to find the smallest
-		// x at which cdf(x) <= y.
-		_, x = bisectBool(func(x float64) bool {
-			return dist.CDF(x) < y
-		}, loX, hiX, xtol)
-		return
-	}
-}
-
-// Rand returns a random number generator that draws from the given
-// distribution. The returned generator takes an optional source of
-// randomness; if this is nil, it uses the default global source.
-//
-// If dist implements Rand(*rand.Rand) float64, Rand returns that
-// method. Otherwise, it returns a generic generator based on dist's
-// inverse CDF (which may in turn use an efficient implementation or a
-// generic numerical implementation; see InvCDF).
-func Rand(dist DistCommon) func(*rand.Rand) float64 {
-	type distRand interface {
-		Rand(*rand.Rand) float64
-	}
-	if dist, ok := dist.(distRand); ok {
-		return dist.Rand
-	}
-
-	// Otherwise, use a generic algorithm.
-	inv := InvCDF(dist)
-	return func(r *rand.Rand) float64 {
-		var y float64
-		for y == 0 {
-			if r == nil {
-				y = rand.Float64()
-			} else {
-				y = r.Float64()
-			}
-		}
-		return inv(y)
-	}
-}
diff --git a/vendor/github.com/aclements/go-moremath/stats/hist.go b/vendor/github.com/aclements/go-moremath/stats/hist.go
deleted file mode 100644
index 8578c07..0000000
--- a/vendor/github.com/aclements/go-moremath/stats/hist.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package stats
-
-import "math"
-
-// TODO: Implement histograms on top of scales.
-
-type Histogram interface {
-	// Add adds a sample with value x to histogram h.
-	Add(x float64)
-
-	// Counts returns the number of samples less than the lowest
-	// bin, a slice of the number of samples in each bin,
-	// and the number of samples greater than the highest bin.
-	Counts() (under uint, counts []uint, over uint)
-
-	// BinToValue returns the value that would appear at the given
-	// bin index.
-	//
-	// For integral values of bin, BinToValue returns the lower
-	// bound of bin.  That is, a sample value x will be in bin if
-	// bin is integral and
-	//
-	//    BinToValue(bin) <= x < BinToValue(bin + 1)
-	//
-	// For non-integral values of bin, BinToValue interpolates
-	// between the lower and upper bounds of math.Floor(bin).
-	//
-	// BinToValue is undefined if bin > 1 + the number of bins.
-	BinToValue(bin float64) float64
-}
-
-// HistogramQuantile returns the x such that n*q samples in hist are
-// <= x, assuming values are distibuted within each bin according to
-// hist's distribution.
-//
-// If the q'th sample falls below the lowest bin or above the highest
-// bin, returns NaN.
-func HistogramQuantile(hist Histogram, q float64) float64 {
-	under, counts, over := hist.Counts()
-	total := under + over
-	for _, count := range counts {
-		total += count
-	}
-
-	goal := uint(float64(total) * q)
-	if goal <= under || goal > total-over {
-		return math.NaN()
-	}
-	for bin, count := range counts {
-		if count > goal {
-			return hist.BinToValue(float64(bin) + float64(goal)/float64(count))
-		}
-		goal -= count
-	}
-	panic("goal count not reached")
-}
-
-// HistogramIQR returns the interquartile range of the samples in
-// hist.
-func HistogramIQR(hist Histogram) float64 {
-	return HistogramQuantile(hist, 0.75) - HistogramQuantile(hist, 0.25)
-}
diff --git a/vendor/github.com/aclements/go-moremath/stats/hypergdist.go b/vendor/github.com/aclements/go-moremath/stats/hypergdist.go
deleted file mode 100644
index 1ea05e2..0000000
--- a/vendor/github.com/aclements/go-moremath/stats/hypergdist.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package stats
-
-import (
-	"math"
-
-	"github.com/aclements/go-moremath/mathx"
-)
-
-// HypergeometicDist is a hypergeometric distribution.
-type HypergeometicDist struct {
-	// N is the size of the population. N >= 0.
-	N int
-
-	// K is the number of successes in the population. 0 <= K <= N.
-	K int
-
-	// Draws is the number of draws from the population. This is
-	// usually written "n", but is called Draws here because of
-	// limitations on Go identifier naming. 0 <= Draws <= N.
-	Draws int
-}
-
-// PMF is the probability of getting exactly int(k) successes in
-// d.Draws draws with replacement from a population of size d.N that
-// contains exactly d.K successes.
-func (d HypergeometicDist) PMF(k float64) float64 {
-	ki := int(math.Floor(k))
-	l, h := d.bounds()
-	if ki < l || ki > h {
-		return 0
-	}
-	return d.pmf(ki)
-}
-
-func (d HypergeometicDist) pmf(k int) float64 {
-	return math.Exp(mathx.Lchoose(d.K, k) + mathx.Lchoose(d.N-d.K, d.Draws-k) - mathx.Lchoose(d.N, d.Draws))
-}
-
-// CDF is the probability of getting int(k) or fewer successes in
-// d.Draws draws with replacement from a population of size d.N that
-// contains exactly d.K successes.
-func (d HypergeometicDist) CDF(k float64) float64 {
-	// Based on Klotz, A Computational Approach to Statistics.
-	ki := int(math.Floor(k))
-	l, h := d.bounds()
-	if ki < l {
-		return 0
-	} else if ki >= h {
-		return 1
-	}
-	// Use symmetry to compute the smaller sum.
-	flip := false
-	if ki > (d.Draws+1)/(d.N+1)*(d.K+1) {
-		flip = true
-		ki = d.K - ki - 1
-		d.Draws = d.N - d.Draws
-	}
-	p := d.pmf(ki) * d.sum(ki)
-	if flip {
-		p = 1 - p
-	}
-	return p
-}
-
-func (d HypergeometicDist) sum(k int) float64 {
-	const epsilon = 1e-14
-	sum, ak := 1.0, 1.0
-	L := maxint(0, d.Draws+d.K-d.N)
-	for dk := 1; dk <= k-L && ak/sum > epsilon; dk++ {
-		ak *= float64(1+k-dk) / float64(d.Draws-k+dk)
-		ak *= float64(d.N-d.K-d.Draws+k+1-dk) / float64(d.K-k+dk)
-		sum += ak
-	}
-	return sum
-}
-
-func (d HypergeometicDist) bounds() (int, int) {
-	return maxint(0, d.Draws+d.K-d.N), minint(d.Draws, d.K)
-}
-
-func (d HypergeometicDist) Bounds() (float64, float64) {
-	l, h := d.bounds()
-	return float64(l), float64(h)
-}
-
-func (d HypergeometicDist) Step() float64 {
-	return 1
-}
-
-func (d HypergeometicDist) Mean() float64 {
-	return float64(d.Draws*d.K) / float64(d.N)
-}
-
-func (d HypergeometicDist) Variance() float64 {
-	return float64(d.Draws*d.K*(d.N-d.K)*(d.N-d.Draws)) /
-		float64(d.N*d.N*(d.N-1))
-}
diff --git a/vendor/github.com/aclements/go-moremath/stats/kde.go b/vendor/github.com/aclements/go-moremath/stats/kde.go
deleted file mode 100644
index 6bcbd49..0000000
--- a/vendor/github.com/aclements/go-moremath/stats/kde.go
+++ /dev/null
@@ -1,350 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package stats
-
-import (
-	"fmt"
-	"math"
-)
-
-// A KDE is a distribution that estimates the underlying distribution
-// of a Sample using kernel density estimation.
-//
-// Kernel density estimation is a method for constructing an estimate
-// ƒ̂(x) of a unknown distribution ƒ(x) given a sample from that
-// distribution. Unlike many techniques, kernel density estimation is
-// non-parametric: in general, it doesn't assume any particular true
-// distribution (note, however, that the resulting distribution
-// depends deeply on the selected bandwidth, and many bandwidth
-// estimation techniques assume normal reference rules).
-//
-// A kernel density estimate is similar to a histogram, except that it
-// is a smooth probability estimate and does not require choosing a
-// bin size and discretizing the data.
-//
-// Sample is the only required field. All others have reasonable
-// defaults.
-type KDE struct {
-	// Sample is the data sample underlying this KDE.
-	Sample Sample
-
-	// Kernel is the kernel to use for the KDE.
-	Kernel KDEKernel
-
-	// Bandwidth is the bandwidth to use for the KDE.
-	//
-	// If this is zero, the bandwidth is computed from the
-	// provided data using a default bandwidth estimator
-	// (currently BandwidthScott).
-	Bandwidth float64
-
-	// BoundaryMethod is the boundary correction method to use for
-	// the KDE. The default value is BoundaryReflect; however, the
-	// default bounds are effectively +/-inf, which is equivalent
-	// to performing no boundary correction.
-	BoundaryMethod KDEBoundaryMethod
-
-	// [BoundaryMin, BoundaryMax) specify a bounded support for
-	// the KDE. If both are 0 (their default values), they are
-	// treated as +/-inf.
-	//
-	// To specify a half-bounded support, set Min to math.Inf(-1)
-	// or Max to math.Inf(1).
-	BoundaryMin float64
-	BoundaryMax float64
-}
-
-// BandwidthSilverman is a bandwidth estimator implementing
-// Silverman's Rule of Thumb. It's fast, but not very robust to
-// outliers as it assumes data is approximately normal.
-//
-// Silverman, B. W. (1986) Density Estimation.
-func BandwidthSilverman(data interface {
-	StdDev() float64
-	Weight() float64
-}) float64 {
-	return 1.06 * data.StdDev() * math.Pow(data.Weight(), -1.0/5)
-}
-
-// BandwidthScott is a bandwidth estimator implementing Scott's Rule.
-// This is generally robust to outliers: it chooses the minimum
-// between the sample's standard deviation and an robust estimator of
-// a Gaussian distribution's standard deviation.
-//
-// Scott, D. W. (1992) Multivariate Density Estimation: Theory,
-// Practice, and Visualization.
-func BandwidthScott(data interface {
-	StdDev() float64
-	Weight() float64
-	Quantile(float64) float64
-}) float64 {
-	iqr := data.Quantile(0.75) - data.Quantile(0.25)
-	hScale := 1.06 * math.Pow(data.Weight(), -1.0/5)
-	stdDev := data.StdDev()
-	if stdDev < iqr/1.349 {
-		// Use Silverman's Rule of Thumb
-		return hScale * stdDev
-	} else {
-		// Use IQR/1.349 as a robust estimator of the standard
-		// deviation of a Gaussian distribution.
-		return hScale * (iqr / 1.349)
-	}
-}
-
-// TODO(austin) Implement bandwidth estimator from Botev, Grotowski,
-// Kroese. (2010) Kernel Density Estimation via Diffusion.
-
-// KDEKernel represents a kernel to use for a KDE.
-type KDEKernel int
-
-//go:generate stringer -type=KDEKernel
-
-const (
-	// An EpanechnikovKernel is a smooth kernel with bounded
-	// support. As a result, the KDE will also have bounded
-	// support. It is "optimal" in the sense that it minimizes the
-	// asymptotic mean integrated squared error (AMISE).
-	EpanechnikovKernel KDEKernel = iota
-
-	// A GaussianKernel is a Gaussian (normal) kernel.
-	GaussianKernel
-
-	// A DeltaKernel is a Dirac delta function. The PDF of such a
-	// KDE is not well-defined, but the CDF will represent each
-	// sample as an instantaneous increase. This kernel ignores
-	// bandwidth and never requires boundary correction.
-	DeltaKernel
-)
-
-// KDEBoundaryMethod represents a boundary correction method for
-// constructing a KDE with bounded support.
-type KDEBoundaryMethod int
-
-//go:generate stringer -type=KDEBoundaryMethod
-
-const (
-	// BoundaryReflect reflects the density estimate at the
-	// boundaries.  For example, for a KDE with support [0, inf),
-	// this is equivalent to ƒ̂ᵣ(x)=ƒ̂(x)+ƒ̂(-x) for x>=0.  This is a
-	// simple and fast technique, but enforces that ƒ̂ᵣ'(0)=0, so
-	// it may not be applicable to all distributions.
-	BoundaryReflect KDEBoundaryMethod = iota
-)
-
-type kdeKernel interface {
-	pdfEach(xs []float64) []float64
-	cdfEach(xs []float64) []float64
-}
-
-func (k *KDE) prepare() (kdeKernel, bool) {
-	// Compute bandwidth.
-	if k.Bandwidth == 0 {
-		k.Bandwidth = BandwidthScott(k.Sample)
-	}
-
-	// Construct kernel.
-	kernel := kdeKernel(nil)
-	switch k.Kernel {
-	default:
-		panic(fmt.Sprint("unknown kernel", k))
-	case EpanechnikovKernel:
-		kernel = epanechnikovKernel{k.Bandwidth}
-	case GaussianKernel:
-		kernel = NormalDist{0, k.Bandwidth}
-	case DeltaKernel:
-		kernel = DeltaDist{0}
-	}
-
-	// Use boundary correction?
-	bc := k.BoundaryMin != 0 || k.BoundaryMax != 0
-
-	return kernel, bc
-}
-
-// TODO: For KDEs of histograms, make histograms able to create a
-// weighted Sample and simply require the caller to provide a
-// good bandwidth from a StreamStats.
-
-// normalizedXs returns x - kde.Sample.Xs. Evaluating kernels shifted
-// by kde.Sample.Xs all at x is equivalent to evaluating one unshifted
-// kernel at x - kde.Sample.Xs.
-func (kde *KDE) normalizedXs(x float64) []float64 {
-	txs := make([]float64, len(kde.Sample.Xs))
-	for i, xi := range kde.Sample.Xs {
-		txs[i] = x - xi
-	}
-	return txs
-}
-
-func (kde *KDE) PDF(x float64) float64 {
-	kernel, bc := kde.prepare()
-
-	// Apply boundary
-	if bc && (x < kde.BoundaryMin || x >= kde.BoundaryMax) {
-		return 0
-	}
-
-	y := func(x float64) float64 {
-		// Shift kernel to each of kde.xs and evaluate at x
-		ys := kernel.pdfEach(kde.normalizedXs(x))
-
-		// Kernel samples are weighted according to the weights of xs
-		wys := Sample{Xs: ys, Weights: kde.Sample.Weights}
-
-		return wys.Sum() / wys.Weight()
-	}
-	if !bc {
-		return y(x)
-	}
-	switch kde.BoundaryMethod {
-	default:
-		panic("unknown boundary correction method")
-	case BoundaryReflect:
-		if math.IsInf(kde.BoundaryMax, 1) {
-			return y(x) + y(2*kde.BoundaryMin-x)
-		} else if math.IsInf(kde.BoundaryMin, -1) {
-			return y(x) + y(2*kde.BoundaryMax-x)
-		} else {
-			d := 2 * (kde.BoundaryMax - kde.BoundaryMin)
-			w := 2 * (x - kde.BoundaryMin)
-			return series(func(n float64) float64 {
-				// Points >= x
-				return y(x+n*d) + y(x+n*d-w)
-			}) + series(func(n float64) float64 {
-				// Points < x
-				return y(x-(n+1)*d+w) + y(x-(n+1)*d)
-			})
-		}
-	}
-}
-
-func (kde *KDE) CDF(x float64) float64 {
-	kernel, bc := kde.prepare()
-
-	// Apply boundary
-	if bc {
-		if x < kde.BoundaryMin {
-			return 0
-		} else if x >= kde.BoundaryMax {
-			return 1
-		}
-	}
-
-	y := func(x float64) float64 {
-		// Shift kernel integral to each of cdf.xs and evaluate at x
-		ys := kernel.cdfEach(kde.normalizedXs(x))
-
-		// Kernel samples are weighted according to the weights of xs
-		wys := Sample{Xs: ys, Weights: kde.Sample.Weights}
-
-		return wys.Sum() / wys.Weight()
-	}
-	if !bc {
-		return y(x)
-	}
-	switch kde.BoundaryMethod {
-	default:
-		panic("unknown boundary correction method")
-	case BoundaryReflect:
-		if math.IsInf(kde.BoundaryMax, 1) {
-			return y(x) - y(2*kde.BoundaryMin-x)
-		} else if math.IsInf(kde.BoundaryMin, -1) {
-			return y(x) + (1 - y(2*kde.BoundaryMax-x))
-		} else {
-			d := 2 * (kde.BoundaryMax - kde.BoundaryMin)
-			w := 2 * (x - kde.BoundaryMin)
-			return series(func(n float64) float64 {
-				// Windows >= x-w
-				return y(x+n*d) - y(x+n*d-w)
-			}) + series(func(n float64) float64 {
-				// Windows < x-w
-				return y(x-(n+1)*d) - y(x-(n+1)*d-w)
-			})
-		}
-	}
-}
-
-func (kde *KDE) Bounds() (low float64, high float64) {
-	_, bc := kde.prepare()
-
-	// TODO(austin) If this KDE came from a histogram, we'd better
-	// not sample at a significantly higher rate than the
-	// histogram.  Maybe we want to just return the bounds of the
-	// histogram?
-
-	// TODO(austin) It would be nice if this could be instructed
-	// to include all original data points, even if they are in
-	// the tail.  Probably that should just be up to the caller to
-	// pass an axis derived from the bounds of the original data.
-
-	// Use the lowest and highest samples as starting points
-	lowX, highX := kde.Sample.Bounds()
-	if lowX == highX {
-		lowX -= 1
-		highX += 1
-	}
-
-	// Find the end points that contain 99% of the CDF's weight.
-	// Since bisect requires that the root be bracketed, start by
-	// expanding our range if necessary.  TODO(austin) This can
-	// definitely be done faster.
-	const (
-		lowY      = 0.005
-		highY     = 0.995
-		tolerance = 0.001
-	)
-	for kde.CDF(lowX) > lowY {
-		lowX -= highX - lowX
-	}
-	for kde.CDF(highX) < highY {
-		highX += highX - lowX
-	}
-	// Explicitly accept discontinuities, since we may be using a
-	// discontiguous kernel.
-	low, _ = bisect(func(x float64) float64 { return kde.CDF(x) - lowY }, lowX, highX, tolerance)
-	high, _ = bisect(func(x float64) float64 { return kde.CDF(x) - highY }, lowX, highX, tolerance)
-
-	// Expand width by 20% to give some margins
-	width := high - low
-	low, high = low-0.1*width, high+0.1*width
-
-	// Limit to bounds
-	if bc {
-		low = math.Max(low, kde.BoundaryMin)
-		high = math.Min(high, kde.BoundaryMax)
-	}
-
-	return
-}
-
-type epanechnikovKernel struct {
-	h float64
-}
-
-func (d epanechnikovKernel) pdfEach(xs []float64) []float64 {
-	ys := make([]float64, len(xs))
-	a := 0.75 / d.h
-	invhh := 1 / (d.h * d.h)
-	for i, x := range xs {
-		if -d.h < x && x < d.h {
-			ys[i] = a * (1 - x*x*invhh)
-		}
-	}
-	return ys
-}
-
-func (d epanechnikovKernel) cdfEach(xs []float64) []float64 {
-	ys := make([]float64, len(xs))
-	invh := 1 / d.h
-	for i, x := range xs {
-		if x > d.h {
-			ys[i] = 1
-		} else if x > -d.h {
-			u := x * invh
-			ys[i] = 0.25 * (2 + 3*u - u*u*u)
-		}
-	}
-	return ys
-}
diff --git a/vendor/github.com/aclements/go-moremath/stats/kdeboundarymethod_string.go b/vendor/github.com/aclements/go-moremath/stats/kdeboundarymethod_string.go
deleted file mode 100644
index e01d0e7..0000000
--- a/vendor/github.com/aclements/go-moremath/stats/kdeboundarymethod_string.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// generated by stringer -type=KDEBoundaryMethod; DO NOT EDIT
-
-package stats
-
-import "fmt"
-
-const _KDEBoundaryMethod_name = "BoundaryReflect"
-
-var _KDEBoundaryMethod_index = [...]uint8{0, 15}
-
-func (i KDEBoundaryMethod) String() string {
-	if i < 0 || i+1 >= KDEBoundaryMethod(len(_KDEBoundaryMethod_index)) {
-		return fmt.Sprintf("KDEBoundaryMethod(%d)", i)
-	}
-	return _KDEBoundaryMethod_name[_KDEBoundaryMethod_index[i]:_KDEBoundaryMethod_index[i+1]]
-}
diff --git a/vendor/github.com/aclements/go-moremath/stats/kdekernel_string.go b/vendor/github.com/aclements/go-moremath/stats/kdekernel_string.go
deleted file mode 100644
index 20a23fd..0000000
--- a/vendor/github.com/aclements/go-moremath/stats/kdekernel_string.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// generated by stringer -type=KDEKernel; DO NOT EDIT
-
-package stats
-
-import "fmt"
-
-const _KDEKernel_name = "GaussianKernelDeltaKernel"
-
-var _KDEKernel_index = [...]uint8{0, 14, 25}
-
-func (i KDEKernel) String() string {
-	if i < 0 || i+1 >= KDEKernel(len(_KDEKernel_index)) {
-		return fmt.Sprintf("KDEKernel(%d)", i)
-	}
-	return _KDEKernel_name[_KDEKernel_index[i]:_KDEKernel_index[i+1]]
-}
diff --git a/vendor/github.com/aclements/go-moremath/stats/linearhist.go b/vendor/github.com/aclements/go-moremath/stats/linearhist.go
deleted file mode 100644
index c36335a..0000000
--- a/vendor/github.com/aclements/go-moremath/stats/linearhist.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package stats
-
-// LinearHist is a Histogram with uniformly-sized bins.
-type LinearHist struct {
-	min, max, delta float64
-	low, high       uint
-	bins            []uint
-}
-
-// NewLinearHist returns an empty histogram with nbins uniformly-sized
-// bins spanning [min, max].
-func NewLinearHist(min, max float64, nbins int) *LinearHist {
-	delta := float64(nbins) / (max - min)
-	return &LinearHist{min, max, delta, 0, 0, make([]uint, nbins)}
-}
-
-func (h *LinearHist) bin(x float64) int {
-	return int(h.delta * (x - h.min))
-}
-
-func (h *LinearHist) Add(x float64) {
-	bin := h.bin(x)
-	if bin < 0 {
-		h.low++
-	} else if bin >= len(h.bins) {
-		h.high++
-	} else {
-		h.bins[bin]++
-	}
-}
-
-func (h *LinearHist) Counts() (uint, []uint, uint) {
-	return h.low, h.bins, h.high
-}
-
-func (h *LinearHist) BinToValue(bin float64) float64 {
-	return h.min + bin*h.delta
-}
diff --git a/vendor/github.com/aclements/go-moremath/stats/locationhypothesis_string.go b/vendor/github.com/aclements/go-moremath/stats/locationhypothesis_string.go
deleted file mode 100644
index ab0f26c..0000000
--- a/vendor/github.com/aclements/go-moremath/stats/locationhypothesis_string.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// generated by stringer -type LocationHypothesis; DO NOT EDIT
-
-package stats
-
-import "fmt"
-
-const _LocationHypothesis_name = "LocationLessLocationDiffersLocationGreater"
-
-var _LocationHypothesis_index = [...]uint8{0, 12, 27, 42}
-
-func (i LocationHypothesis) String() string {
-	i -= -1
-	if i < 0 || i+1 >= LocationHypothesis(len(_LocationHypothesis_index)) {
-		return fmt.Sprintf("LocationHypothesis(%d)", i+-1)
-	}
-	return _LocationHypothesis_name[_LocationHypothesis_index[i]:_LocationHypothesis_index[i+1]]
-}
diff --git a/vendor/github.com/aclements/go-moremath/stats/loghist.go b/vendor/github.com/aclements/go-moremath/stats/loghist.go
deleted file mode 100644
index 937f62a..0000000
--- a/vendor/github.com/aclements/go-moremath/stats/loghist.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package stats
-
-import "math"
-
-// LogHist is a Histogram with logarithmically-spaced bins.
-type LogHist struct {
-	b         int
-	m         float64
-	mOverLogb float64
-	low, high uint
-	bins      []uint
-}
-
-// NewLogHist returns an empty logarithmic histogram with bins for
-// integral values of m * log_b(x) up to x = max.
-func NewLogHist(b int, m float64, max float64) *LogHist {
-	// TODO(austin) Minimum value as well?  If the samples are
-	// actually integral, having fractional bin boundaries can
-	// mess up smoothing.
-	mOverLogb := m / math.Log(float64(b))
-	nbins := int(math.Ceil(mOverLogb * math.Log(max)))
-	return &LogHist{b: b, m: m, mOverLogb: mOverLogb, low: 0, high: 0, bins: make([]uint, nbins)}
-}
-
-func (h *LogHist) bin(x float64) int {
-	return int(h.mOverLogb * math.Log(x))
-}
-
-func (h *LogHist) Add(x float64) {
-	bin := h.bin(x)
-	if bin < 0 {
-		h.low++
-	} else if bin >= len(h.bins) {
-		h.high++
-	} else {
-		h.bins[bin]++
-	}
-}
-
-func (h *LogHist) Counts() (uint, []uint, uint) {
-	return h.low, h.bins, h.high
-}
-
-func (h *LogHist) BinToValue(bin float64) float64 {
-	return math.Pow(float64(h.b), bin/h.m)
-}
-
-func (h *LogHist) At(x float64) float64 {
-	bin := h.bin(x)
-	if bin < 0 || bin >= len(h.bins) {
-		return 0
-	}
-	return float64(h.bins[bin])
-}
-
-func (h *LogHist) Bounds() (float64, float64) {
-	// XXX Plot will plot this on a linear axis.  Maybe this
-	// should be able to return the natural axis?
-	// Maybe then we could also give it the bins for the tics.
-	lowbin := 0
-	if h.low == 0 {
-		for bin, count := range h.bins {
-			if count > 0 {
-				lowbin = bin
-				break
-			}
-		}
-	}
-	highbin := len(h.bins)
-	if h.high == 0 {
-		for bin := range h.bins {
-			if h.bins[len(h.bins)-bin-1] > 0 {
-				highbin = len(h.bins) - bin
-				break
-			}
-		}
-	}
-	return h.BinToValue(float64(lowbin)), h.BinToValue(float64(highbin))
-}
diff --git a/vendor/github.com/aclements/go-moremath/stats/normaldist.go b/vendor/github.com/aclements/go-moremath/stats/normaldist.go
deleted file mode 100644
index d00f96a..0000000
--- a/vendor/github.com/aclements/go-moremath/stats/normaldist.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package stats
-
-import (
-	"math"
-	"math/rand"
-)
-
-// NormalDist is a normal (Gaussian) distribution with mean Mu and
-// standard deviation Sigma.
-type NormalDist struct {
-	Mu, Sigma float64
-}
-
-// StdNormal is the standard normal distribution (Mu = 0, Sigma = 1)
-var StdNormal = NormalDist{0, 1}
-
-// 1/sqrt(2 * pi)
-const invSqrt2Pi = 0.39894228040143267793994605993438186847585863116493465766592583
-
-func (n NormalDist) PDF(x float64) float64 {
-	z := x - n.Mu
-	return math.Exp(-z*z/(2*n.Sigma*n.Sigma)) * invSqrt2Pi / n.Sigma
-}
-
-func (n NormalDist) pdfEach(xs []float64) []float64 {
-	res := make([]float64, len(xs))
-	if n.Mu == 0 && n.Sigma == 1 {
-		// Standard normal fast path
-		for i, x := range xs {
-			res[i] = math.Exp(-x*x/2) * invSqrt2Pi
-		}
-	} else {
-		a := -1 / (2 * n.Sigma * n.Sigma)
-		b := invSqrt2Pi / n.Sigma
-		for i, x := range xs {
-			z := x - n.Mu
-			res[i] = math.Exp(z*z*a) * b
-		}
-	}
-	return res
-}
-
-func (n NormalDist) CDF(x float64) float64 {
-	return math.Erfc(-(x-n.Mu)/(n.Sigma*math.Sqrt2)) / 2
-}
-
-func (n NormalDist) cdfEach(xs []float64) []float64 {
-	res := make([]float64, len(xs))
-	a := 1 / (n.Sigma * math.Sqrt2)
-	for i, x := range xs {
-		res[i] = math.Erfc(-(x-n.Mu)*a) / 2
-	}
-	return res
-}
-
-func (n NormalDist) InvCDF(p float64) (x float64) {
-	// This is based on Peter John Acklam's inverse normal CDF
-	// algorithm: http://home.online.no/~pjacklam/notes/invnorm/
-	const (
-		a1 = -3.969683028665376e+01
-		a2 = 2.209460984245205e+02
-		a3 = -2.759285104469687e+02
-		a4 = 1.383577518672690e+02
-		a5 = -3.066479806614716e+01
-		a6 = 2.506628277459239e+00
-
-		b1 = -5.447609879822406e+01
-		b2 = 1.615858368580409e+02
-		b3 = -1.556989798598866e+02
-		b4 = 6.680131188771972e+01
-		b5 = -1.328068155288572e+01
-
-		c1 = -7.784894002430293e-03
-		c2 = -3.223964580411365e-01
-		c3 = -2.400758277161838e+00
-		c4 = -2.549732539343734e+00
-		c5 = 4.374664141464968e+00
-		c6 = 2.938163982698783e+00
-
-		d1 = 7.784695709041462e-03
-		d2 = 3.224671290700398e-01
-		d3 = 2.445134137142996e+00
-		d4 = 3.754408661907416e+00
-
-		plow  = 0.02425
-		phigh = 1 - plow
-	)
-
-	if p < 0 || p > 1 {
-		return nan
-	} else if p == 0 {
-		return -inf
-	} else if p == 1 {
-		return inf
-	}
-
-	if p < plow {
-		// Rational approximation for lower region.
-		q := math.Sqrt(-2 * math.Log(p))
-		x = (((((c1*q+c2)*q+c3)*q+c4)*q+c5)*q + c6) /
-			((((d1*q+d2)*q+d3)*q+d4)*q + 1)
-	} else if phigh < p {
-		// Rational approximation for upper region.
-		q := math.Sqrt(-2 * math.Log(1-p))
-		x = -(((((c1*q+c2)*q+c3)*q+c4)*q+c5)*q + c6) /
-			((((d1*q+d2)*q+d3)*q+d4)*q + 1)
-	} else {
-		// Rational approximation for central region.
-		q := p - 0.5
-		r := q * q
-		x = (((((a1*r+a2)*r+a3)*r+a4)*r+a5)*r + a6) * q /
-			(((((b1*r+b2)*r+b3)*r+b4)*r+b5)*r + 1)
-	}
-
-	// Refine approximation.
-	e := 0.5*math.Erfc(-x/math.Sqrt2) - p
-	u := e * math.Sqrt(2*math.Pi) * math.Exp(x*x/2)
-	x = x - u/(1+x*u/2)
-
-	// Adjust from standard normal.
-	return x*n.Sigma + n.Mu
-}
-
-func (n NormalDist) Rand(r *rand.Rand) float64 {
-	var x float64
-	if r == nil {
-		x = rand.NormFloat64()
-	} else {
-		x = r.NormFloat64()
-	}
-	return x*n.Sigma + n.Mu
-}
-
-func (n NormalDist) Bounds() (float64, float64) {
-	const stddevs = 3
-	return n.Mu - stddevs*n.Sigma, n.Mu + stddevs*n.Sigma
-}
diff --git a/vendor/github.com/aclements/go-moremath/stats/package.go b/vendor/github.com/aclements/go-moremath/stats/package.go
deleted file mode 100644
index 644b399..0000000
--- a/vendor/github.com/aclements/go-moremath/stats/package.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package stats implements several statistical distributions,
-// hypothesis tests, and functions for descriptive statistics.
-//
-// Currently stats is fairly small, but for what it does implement, it
-// focuses on high quality, fast implementations with good, idiomatic
-// Go APIs.
-package stats // import "github.com/aclements/go-moremath/stats"
-
-import (
-	"errors"
-	"math"
-)
-
-var inf = math.Inf(1)
-var nan = math.NaN()
-
-// TODO: Put all errors in the same place and maybe unify them.
-
-var (
-	ErrSamplesEqual = errors.New("all samples are equal")
-)
diff --git a/vendor/github.com/aclements/go-moremath/stats/sample.go b/vendor/github.com/aclements/go-moremath/stats/sample.go
deleted file mode 100644
index 0b5d23e..0000000
--- a/vendor/github.com/aclements/go-moremath/stats/sample.go
+++ /dev/null
@@ -1,335 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package stats
-
-import (
-	"math"
-	"sort"
-
-	"github.com/aclements/go-moremath/vec"
-)
-
-// Sample is a collection of possibly weighted data points.
-type Sample struct {
-	// Xs is the slice of sample values.
-	Xs []float64
-
-	// Weights[i] is the weight of sample Xs[i].  If Weights is
-	// nil, all Xs have weight 1.  Weights must have the same
-	// length of Xs and all values must be non-negative.
-	Weights []float64
-
-	// Sorted indicates that Xs is sorted in ascending order.
-	Sorted bool
-}
-
-// Bounds returns the minimum and maximum values of xs.
-func Bounds(xs []float64) (min float64, max float64) {
-	if len(xs) == 0 {
-		return math.NaN(), math.NaN()
-	}
-	min, max = xs[0], xs[0]
-	for _, x := range xs {
-		if x < min {
-			min = x
-		}
-		if x > max {
-			max = x
-		}
-	}
-	return
-}
-
-// Bounds returns the minimum and maximum values of the Sample.
-//
-// If the Sample is weighted, this ignores samples with zero weight.
-//
-// This is constant time if s.Sorted and there are no zero-weighted
-// values.
-func (s Sample) Bounds() (min float64, max float64) {
-	if len(s.Xs) == 0 || (!s.Sorted && s.Weights == nil) {
-		return Bounds(s.Xs)
-	}
-
-	if s.Sorted {
-		if s.Weights == nil {
-			return s.Xs[0], s.Xs[len(s.Xs)-1]
-		}
-		min, max = math.NaN(), math.NaN()
-		for i, w := range s.Weights {
-			if w != 0 {
-				min = s.Xs[i]
-				break
-			}
-		}
-		if math.IsNaN(min) {
-			return
-		}
-		for i := range s.Weights {
-			if s.Weights[len(s.Weights)-i-1] != 0 {
-				max = s.Xs[len(s.Weights)-i-1]
-				break
-			}
-		}
-	} else {
-		min, max = math.Inf(1), math.Inf(-1)
-		for i, x := range s.Xs {
-			w := s.Weights[i]
-			if x < min && w != 0 {
-				min = x
-			}
-			if x > max && w != 0 {
-				max = x
-			}
-		}
-		if math.IsInf(min, 0) {
-			min, max = math.NaN(), math.NaN()
-		}
-	}
-	return
-}
-
-// Sum returns the (possibly weighted) sum of the Sample.
-func (s Sample) Sum() float64 {
-	if s.Weights == nil {
-		return vec.Sum(s.Xs)
-	}
-	sum := 0.0
-	for i, x := range s.Xs {
-		sum += x * s.Weights[i]
-	}
-	return sum
-}
-
-// Weight returns the total weight of the Sasmple.
-func (s Sample) Weight() float64 {
-	if s.Weights == nil {
-		return float64(len(s.Xs))
-	}
-	return vec.Sum(s.Weights)
-}
-
-// Mean returns the arithmetic mean of xs.
-func Mean(xs []float64) float64 {
-	if len(xs) == 0 {
-		return math.NaN()
-	}
-	m := 0.0
-	for i, x := range xs {
-		m += (x - m) / float64(i+1)
-	}
-	return m
-}
-
-// Mean returns the arithmetic mean of the Sample.
-func (s Sample) Mean() float64 {
-	if len(s.Xs) == 0 || s.Weights == nil {
-		return Mean(s.Xs)
-	}
-
-	m, wsum := 0.0, 0.0
-	for i, x := range s.Xs {
-		// Use weighted incremental mean:
-		//   m_i = (1 - w_i/wsum_i) * m_(i-1) + (w_i/wsum_i) * x_i
-		//       = m_(i-1) + (x_i - m_(i-1)) * (w_i/wsum_i)
-		w := s.Weights[i]
-		wsum += w
-		m += (x - m) * w / wsum
-	}
-	return m
-}
-
-// GeoMean returns the geometric mean of xs. xs must be positive.
-func GeoMean(xs []float64) float64 {
-	if len(xs) == 0 {
-		return math.NaN()
-	}
-	m := 0.0
-	for i, x := range xs {
-		if x <= 0 {
-			return math.NaN()
-		}
-		lx := math.Log(x)
-		m += (lx - m) / float64(i+1)
-	}
-	return math.Exp(m)
-}
-
-// GeoMean returns the geometric mean of the Sample. All samples
-// values must be positive.
-func (s Sample) GeoMean() float64 {
-	if len(s.Xs) == 0 || s.Weights == nil {
-		return GeoMean(s.Xs)
-	}
-
-	m, wsum := 0.0, 0.0
-	for i, x := range s.Xs {
-		w := s.Weights[i]
-		wsum += w
-		lx := math.Log(x)
-		m += (lx - m) * w / wsum
-	}
-	return math.Exp(m)
-}
-
-// Variance returns the sample variance of xs.
-func Variance(xs []float64) float64 {
-	if len(xs) == 0 {
-		return math.NaN()
-	} else if len(xs) <= 1 {
-		return 0
-	}
-
-	// Based on Wikipedia's presentation of Welford 1962
-	// (http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm).
-	// This is more numerically stable than the standard two-pass
-	// formula and not prone to massive cancellation.
-	mean, M2 := 0.0, 0.0
-	for n, x := range xs {
-		delta := x - mean
-		mean += delta / float64(n+1)
-		M2 += delta * (x - mean)
-	}
-	return M2 / float64(len(xs)-1)
-}
-
-func (s Sample) Variance() float64 {
-	if len(s.Xs) == 0 || s.Weights == nil {
-		return Variance(s.Xs)
-	}
-	// TODO(austin)
-	panic("Weighted Variance not implemented")
-}
-
-// StdDev returns the sample standard deviation of xs.
-func StdDev(xs []float64) float64 {
-	return math.Sqrt(Variance(xs))
-}
-
-// StdDev returns the sample standard deviation of the Sample.
-func (s Sample) StdDev() float64 {
-	if len(s.Xs) == 0 || s.Weights == nil {
-		return StdDev(s.Xs)
-	}
-	// TODO(austin)
-	panic("Weighted StdDev not implemented")
-}
-
-// Quantile returns the sample value X at which q*weight of the sample
-// is <= X. This uses interpolation method R8 from Hyndman and Fan
-// (1996).
-//
-// q will be capped to the range [0, 1]. If len(xs) == 0 or all
-// weights are 0, returns NaN.
-//
-// Quantile(0.5) is the median. Quantile(0.25) and Quantile(0.75) are
-// the first and third quartiles, respectively. Quantile(P/100) is the
-// P'th percentile.
-//
-// This is constant time if s.Sorted and s.Weights == nil.
-func (s Sample) Quantile(q float64) float64 {
-	if len(s.Xs) == 0 {
-		return math.NaN()
-	} else if q <= 0 {
-		min, _ := s.Bounds()
-		return min
-	} else if q >= 1 {
-		_, max := s.Bounds()
-		return max
-	}
-
-	if !s.Sorted {
-		// TODO(austin) Use select algorithm instead
-		s = *s.Copy().Sort()
-	}
-
-	if s.Weights == nil {
-		N := float64(len(s.Xs))
-		//n := q * (N + 1) // R6
-		n := 1/3.0 + q*(N+1/3.0) // R8
-		kf, frac := math.Modf(n)
-		k := int(kf)
-		if k <= 0 {
-			return s.Xs[0]
-		} else if k >= len(s.Xs) {
-			return s.Xs[len(s.Xs)-1]
-		}
-		return s.Xs[k-1] + frac*(s.Xs[k]-s.Xs[k-1])
-	} else {
-		// TODO(austin): Implement interpolation
-
-		target := s.Weight() * q
-
-		// TODO(austin) If we had cumulative weights, we could
-		// do this in log time.
-		for i, weight := range s.Weights {
-			target -= weight
-			if target < 0 {
-				return s.Xs[i]
-			}
-		}
-		return s.Xs[len(s.Xs)-1]
-	}
-}
-
-// IQR returns the interquartile range of the Sample.
-//
-// This is constant time if s.Sorted and s.Weights == nil.
-func (s Sample) IQR() float64 {
-	if !s.Sorted {
-		s = *s.Copy().Sort()
-	}
-	return s.Quantile(0.75) - s.Quantile(0.25)
-}
-
-type sampleSorter struct {
-	xs      []float64
-	weights []float64
-}
-
-func (p *sampleSorter) Len() int {
-	return len(p.xs)
-}
-
-func (p *sampleSorter) Less(i, j int) bool {
-	return p.xs[i] < p.xs[j]
-}
-
-func (p *sampleSorter) Swap(i, j int) {
-	p.xs[i], p.xs[j] = p.xs[j], p.xs[i]
-	p.weights[i], p.weights[j] = p.weights[j], p.weights[i]
-}
-
-// Sort sorts the samples in place in s and returns s.
-//
-// A sorted sample improves the performance of some algorithms.
-func (s *Sample) Sort() *Sample {
-	if s.Sorted || sort.Float64sAreSorted(s.Xs) {
-		// All set
-	} else if s.Weights == nil {
-		sort.Float64s(s.Xs)
-	} else {
-		sort.Sort(&sampleSorter{s.Xs, s.Weights})
-	}
-	s.Sorted = true
-	return s
-}
-
-// Copy returns a copy of the Sample.
-//
-// The returned Sample shares no data with the original, so they can
-// be modified (for example, sorted) independently.
-func (s Sample) Copy() *Sample {
-	xs := make([]float64, len(s.Xs))
-	copy(xs, s.Xs)
-
-	weights := []float64(nil)
-	if s.Weights != nil {
-		weights = make([]float64, len(s.Weights))
-		copy(weights, s.Weights)
-	}
-
-	return &Sample{xs, weights, s.Sorted}
-}
diff --git a/vendor/github.com/aclements/go-moremath/stats/stream.go b/vendor/github.com/aclements/go-moremath/stats/stream.go
deleted file mode 100644
index 0deb904..0000000
--- a/vendor/github.com/aclements/go-moremath/stats/stream.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package stats
-
-import (
-	"fmt"
-	"math"
-)
-
-// TODO(austin) Unify more with Sample interface
-
-// StreamStats tracks basic statistics for a stream of data in O(1)
-// space.
-//
-// StreamStats should be initialized to its zero value.
-type StreamStats struct {
-	Count           uint
-	Total, Min, Max float64
-
-	// Numerically stable online mean
-	mean          float64
-	meanOfSquares float64
-
-	// Online variance
-	vM2 float64
-}
-
-// Add updates s's statistics with sample value x.
-func (s *StreamStats) Add(x float64) {
-	s.Total += x
-	if s.Count == 0 {
-		s.Min, s.Max = x, x
-	} else {
-		if x < s.Min {
-			s.Min = x
-		}
-		if x > s.Max {
-			s.Max = x
-		}
-	}
-	s.Count++
-
-	// Update online mean, mean of squares, and variance.  Online
-	// variance based on Wikipedia's presentation ("Algorithms for
-	// calculating variance") of Knuth's formulation of Welford
-	// 1962.
-	delta := x - s.mean
-	s.mean += delta / float64(s.Count)
-	s.meanOfSquares += (x*x - s.meanOfSquares) / float64(s.Count)
-	s.vM2 += delta * (x - s.mean)
-}
-
-func (s *StreamStats) Weight() float64 {
-	return float64(s.Count)
-}
-
-func (s *StreamStats) Mean() float64 {
-	return s.mean
-}
-
-func (s *StreamStats) Variance() float64 {
-	return s.vM2 / float64(s.Count-1)
-}
-
-func (s *StreamStats) StdDev() float64 {
-	return math.Sqrt(s.Variance())
-}
-
-func (s *StreamStats) RMS() float64 {
-	return math.Sqrt(s.meanOfSquares)
-}
-
-// Combine updates s's statistics as if all samples added to o were
-// added to s.
-func (s *StreamStats) Combine(o *StreamStats) {
-	count := s.Count + o.Count
-
-	// Compute combined online variance statistics
-	delta := o.mean - s.mean
-	mean := s.mean + delta*float64(o.Count)/float64(count)
-	vM2 := s.vM2 + o.vM2 + delta*delta*float64(s.Count)*float64(o.Count)/float64(count)
-
-	s.Count = count
-	s.Total += o.Total
-	if o.Min < s.Min {
-		s.Min = o.Min
-	}
-	if o.Max > s.Max {
-		s.Max = o.Max
-	}
-	s.mean = mean
-	s.meanOfSquares += (o.meanOfSquares - s.meanOfSquares) * float64(o.Count) / float64(count)
-	s.vM2 = vM2
-}
-
-func (s *StreamStats) String() string {
-	return fmt.Sprintf("Count=%d Total=%g Min=%g Mean=%g RMS=%g Max=%g StdDev=%g", s.Count, s.Total, s.Min, s.Mean(), s.RMS(), s.Max, s.StdDev())
-}
diff --git a/vendor/github.com/aclements/go-moremath/stats/tdist.go b/vendor/github.com/aclements/go-moremath/stats/tdist.go
deleted file mode 100644
index 29bbb1a..0000000
--- a/vendor/github.com/aclements/go-moremath/stats/tdist.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package stats
-
-import (
-	"math"
-
-	"github.com/aclements/go-moremath/mathx"
-)
-
-// A TDist is a Student's t-distribution with V degrees of freedom.
-type TDist struct {
-	V float64
-}
-
-func lgamma(x float64) float64 {
-	y, _ := math.Lgamma(x)
-	return y
-}
-
-func (t TDist) PDF(x float64) float64 {
-	return math.Exp(lgamma((t.V+1)/2)-lgamma(t.V/2)) /
-		math.Sqrt(t.V*math.Pi) * math.Pow(1+(x*x)/t.V, -(t.V+1)/2)
-}
-
-func (t TDist) CDF(x float64) float64 {
-	if x == 0 {
-		return 0.5
-	} else if x > 0 {
-		return 1 - 0.5*mathx.BetaInc(t.V/(t.V+x*x), t.V/2, 0.5)
-	} else if x < 0 {
-		return 1 - t.CDF(-x)
-	} else {
-		return math.NaN()
-	}
-}
-
-func (t TDist) Bounds() (float64, float64) {
-	return -4, 4
-}
diff --git a/vendor/github.com/aclements/go-moremath/stats/ttest.go b/vendor/github.com/aclements/go-moremath/stats/ttest.go
deleted file mode 100644
index 8742298..0000000
--- a/vendor/github.com/aclements/go-moremath/stats/ttest.go
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package stats
-
-import (
-	"errors"
-	"math"
-)
-
-// A TTestResult is the result of a t-test.
-type TTestResult struct {
-	// N1 and N2 are the sizes of the input samples. For a
-	// one-sample t-test, N2 is 0.
-	N1, N2 int
-
-	// T is the value of the t-statistic for this t-test.
-	T float64
-
-	// DoF is the degrees of freedom for this t-test.
-	DoF float64
-
-	// AltHypothesis specifies the alternative hypothesis tested
-	// by this test against the null hypothesis that there is no
-	// difference in the means of the samples.
-	AltHypothesis LocationHypothesis
-
-	// P is p-value for this t-test for the given null hypothesis.
-	P float64
-}
-
-func newTTestResult(n1, n2 int, t, dof float64, alt LocationHypothesis) *TTestResult {
-	dist := TDist{dof}
-	var p float64
-	switch alt {
-	case LocationDiffers:
-		p = 2 * (1 - dist.CDF(math.Abs(t)))
-	case LocationLess:
-		p = dist.CDF(t)
-	case LocationGreater:
-		p = 1 - dist.CDF(t)
-	}
-	return &TTestResult{N1: n1, N2: n2, T: t, DoF: dof, AltHypothesis: alt, P: p}
-}
-
-// A TTestSample is a sample that can be used for a one or two sample
-// t-test.
-type TTestSample interface {
-	Weight() float64
-	Mean() float64
-	Variance() float64
-}
-
-var (
-	ErrSampleSize        = errors.New("sample is too small")
-	ErrZeroVariance      = errors.New("sample has zero variance")
-	ErrMismatchedSamples = errors.New("samples have different lengths")
-)
-
-// TwoSampleTTest performs a two-sample (unpaired) Student's t-test on
-// samples x1 and x2. This is a test of the null hypothesis that x1
-// and x2 are drawn from populations with equal means. It assumes x1
-// and x2 are independent samples, that the distributions have equal
-// variance, and that the populations are normally distributed.
-func TwoSampleTTest(x1, x2 TTestSample, alt LocationHypothesis) (*TTestResult, error) {
-	n1, n2 := x1.Weight(), x2.Weight()
-	if n1 == 0 || n2 == 0 {
-		return nil, ErrSampleSize
-	}
-	v1, v2 := x1.Variance(), x2.Variance()
-	if v1 == 0 && v2 == 0 {
-		return nil, ErrZeroVariance
-	}
-
-	dof := n1 + n2 - 2
-	v12 := ((n1-1)*v1 + (n2-1)*v2) / dof
-	t := (x1.Mean() - x2.Mean()) / math.Sqrt(v12*(1/n1+1/n2))
-	return newTTestResult(int(n1), int(n2), t, dof, alt), nil
-}
-
-// TwoSampleWelchTTest performs a two-sample (unpaired) Welch's t-test
-// on samples x1 and x2. This is like TwoSampleTTest, but does not
-// assume the distributions have equal variance.
-func TwoSampleWelchTTest(x1, x2 TTestSample, alt LocationHypothesis) (*TTestResult, error) {
-	n1, n2 := x1.Weight(), x2.Weight()
-	if n1 <= 1 || n2 <= 1 {
-		// TODO: Can we still do this with n == 1?
-		return nil, ErrSampleSize
-	}
-	v1, v2 := x1.Variance(), x2.Variance()
-	if v1 == 0 && v2 == 0 {
-		return nil, ErrZeroVariance
-	}
-
-	dof := math.Pow(v1/n1+v2/n2, 2) /
-		(math.Pow(v1/n1, 2)/(n1-1) + math.Pow(v2/n2, 2)/(n2-1))
-	s := math.Sqrt(v1/n1 + v2/n2)
-	t := (x1.Mean() - x2.Mean()) / s
-	return newTTestResult(int(n1), int(n2), t, dof, alt), nil
-}
-
-// PairedTTest performs a two-sample paired t-test on samples x1 and
-// x2. If μ0 is non-zero, this tests if the average of the difference
-// is significantly different from μ0. If x1 and x2 are identical,
-// this returns nil.
-func PairedTTest(x1, x2 []float64, μ0 float64, alt LocationHypothesis) (*TTestResult, error) {
-	if len(x1) != len(x2) {
-		return nil, ErrMismatchedSamples
-	}
-	if len(x1) <= 1 {
-		// TODO: Can we still do this with n == 1?
-		return nil, ErrSampleSize
-	}
-
-	dof := float64(len(x1) - 1)
-
-	diff := make([]float64, len(x1))
-	for i := range x1 {
-		diff[i] = x1[i] - x2[i]
-	}
-	sd := StdDev(diff)
-	if sd == 0 {
-		// TODO: Can we still do the test?
-		return nil, ErrZeroVariance
-	}
-	t := (Mean(diff) - μ0) * math.Sqrt(float64(len(x1))) / sd
-	return newTTestResult(len(x1), len(x2), t, dof, alt), nil
-}
-
-// OneSampleTTest performs a one-sample t-test on sample x. This tests
-// the null hypothesis that the population mean is equal to μ0. This
-// assumes the distribution of the population of sample means is
-// normal.
-func OneSampleTTest(x TTestSample, μ0 float64, alt LocationHypothesis) (*TTestResult, error) {
-	n, v := x.Weight(), x.Variance()
-	if n == 0 {
-		return nil, ErrSampleSize
-	}
-	if v == 0 {
-		// TODO: Can we still do the test?
-		return nil, ErrZeroVariance
-	}
-	dof := n - 1
-	t := (x.Mean() - μ0) * math.Sqrt(n) / math.Sqrt(v)
-	return newTTestResult(int(n), 0, t, dof, alt), nil
-}
diff --git a/vendor/github.com/aclements/go-moremath/stats/udist.go b/vendor/github.com/aclements/go-moremath/stats/udist.go
deleted file mode 100644
index 06c34ad..0000000
--- a/vendor/github.com/aclements/go-moremath/stats/udist.go
+++ /dev/null
@@ -1,389 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package stats
-
-import (
-	"math"
-
-	"github.com/aclements/go-moremath/mathx"
-)
-
-// A UDist is the discrete probability distribution of the
-// Mann-Whitney U statistic for a pair of samples of sizes N1 and N2.
-//
-// The details of computing this distribution with no ties can be
-// found in Mann, Henry B.; Whitney, Donald R. (1947). "On a Test of
-// Whether one of Two Random Variables is Stochastically Larger than
-// the Other". Annals of Mathematical Statistics 18 (1): 50–60.
-// Computing this distribution in the presence of ties is described in
-// Klotz, J. H. (1966). "The Wilcoxon, Ties, and the Computer".
-// Journal of the American Statistical Association 61 (315): 772-787
-// and Cheung, Ying Kuen; Klotz, Jerome H. (1997). "The Mann Whitney
-// Wilcoxon Distribution Using Linked Lists". Statistica Sinica 7:
-// 805-813 (the former paper contains details that are glossed over in
-// the latter paper but has mathematical typesetting issues, so it's
-// easiest to get the context from the former paper and the details
-// from the latter).
-type UDist struct {
-	N1, N2 int
-
-	// T is the count of the number of ties at each rank in the
-	// input distributions. T may be nil, in which case it is
-	// assumed there are no ties (which is equivalent to an M+N
-	// slice of 1s). It must be the case that Sum(T) == M+N.
-	T []int
-}
-
-// hasTies returns true if d has any tied samples.
-func (d UDist) hasTies() bool {
-	for _, t := range d.T {
-		if t > 1 {
-			return true
-		}
-	}
-	return false
-}
-
-// p returns the p_{d.N1,d.N2} function defined by Mann, Whitney 1947
-// for values of U from 0 up to and including the U argument.
-//
-// This algorithm runs in Θ(N1*N2*U) = O(N1²N2²) time and is quite
-// fast for small values of N1 and N2. However, it does not handle ties.
-func (d UDist) p(U int) []float64 {
-	// This is a dynamic programming implementation of the
-	// recursive recurrence definition given by Mann and Whitney:
-	//
-	//   p_{n,m}(U) = (n * p_{n-1,m}(U-m) + m * p_{n,m-1}(U)) / (n+m)
-	//   p_{n,m}(U) = 0                           if U < 0
-	//   p_{0,m}(U) = p{n,0}(U) = 1 / nCr(m+n, n) if U = 0
-	//                          = 0               if U > 0
-	//
-	// (Note that there is a typo in the original paper. The first
-	// recursive application of p should be for U-m, not U-M.)
-	//
-	// Since p{n,m} only depends on p{n-1,m} and p{n,m-1}, we only
-	// need to store one "plane" of the three dimensional space at
-	// a time.
-	//
-	// Furthermore, p_{n,m} = p_{m,n}, so we only construct values
-	// for n <= m and obtain the rest through symmetry.
-	//
-	// We organize the computed values of p as followed:
-	//
-	//       n →   N
-	//     m *
-	//     ↓ * *
-	//       * * *
-	//       * * * *
-	//       * * * *
-	//     M * * * *
-	//
-	// where each * is a slice indexed by U. The code below
-	// computes these left-to-right, top-to-bottom, so it only
-	// stores one row of this matrix at a time. Furthermore,
-	// computing an element in a given U slice only depends on the
-	// same and smaller values of U, so we can overwrite the U
-	// slice we're computing in place as long as we start with the
-	// largest value of U. Finally, even though the recurrence
-	// depends on (n,m) above the diagonal and we use symmetry to
-	// mirror those across the diagonal to (m,n), the mirrored
-	// indexes are always available in the current row, so this
-	// mirroring does not interfere with our ability to recycle
-	// state.
-
-	N, M := d.N1, d.N2
-	if N > M {
-		N, M = M, N
-	}
-
-	memo := make([][]float64, N+1)
-	for n := range memo {
-		memo[n] = make([]float64, U+1)
-	}
-
-	for m := 0; m <= M; m++ {
-		// Compute p_{0,m}. This is zero except for U=0.
-		memo[0][0] = 1
-
-		// Compute the remainder of this row.
-		nlim := N
-		if m < nlim {
-			nlim = m
-		}
-		for n := 1; n <= nlim; n++ {
-			lp := memo[n-1] // p_{n-1,m}
-			var rp []float64
-			if n <= m-1 {
-				rp = memo[n] // p_{n,m-1}
-			} else {
-				rp = memo[m-1] // p{m-1,n} and m==n
-			}
-
-			// For a given n,m, U is at most n*m.
-			//
-			// TODO: Actually, it's at most ⌈n*m/2⌉, but
-			// then we need to use more complex symmetries
-			// in the inner loop below.
-			ulim := n * m
-			if U < ulim {
-				ulim = U
-			}
-
-			out := memo[n] // p_{n,m}
-			nplusm := float64(n + m)
-			for U1 := ulim; U1 >= 0; U1-- {
-				l := 0.0
-				if U1-m >= 0 {
-					l = float64(n) * lp[U1-m]
-				}
-				r := float64(m) * rp[U1]
-				out[U1] = (l + r) / nplusm
-			}
-		}
-	}
-	return memo[N]
-}
-
-type ukey struct {
-	n1   int // size of first sample
-	twoU int // 2*U statistic for this permutation
-}
-
-// This computes the cumulative counts of the Mann-Whitney U
-// distribution in the presence of ties using the computation from
-// Cheung, Ying Kuen; Klotz, Jerome H. (1997). "The Mann Whitney
-// Wilcoxon Distribution Using Linked Lists". Statistica Sinica 7:
-// 805-813, with much guidance from appendix L of Klotz, A
-// Computational Approach to Statistics.
-//
-// makeUmemo constructs a table memo[K][ukey{n1, 2*U}], where K is the
-// number of ranks (up to len(t)), n1 is the size of the first sample
-// (up to the n1 argument), and U is the U statistic (up to the
-// argument twoU/2). The value of an entry in the memo table is the
-// number of permutations of a sample of size n1 in a ranking with tie
-// vector t[:K] having a U statistic <= U.
-func makeUmemo(twoU, n1 int, t []int) []map[ukey]float64 {
-	// Another candidate for a fast implementation is van de Wiel,
-	// "The split-up algorithm: a fast symbolic method for
-	// computing p-values of distribution-free statistics". This
-	// is what's used by R's coin package. It's a comparatively
-	// recent publication, so it's presumably faster (or perhaps
-	// just more general) than previous techniques, but I can't
-	// get my hands on the paper.
-	//
-	// TODO: ~40% of this function's time is spent in mapassign on
-	// the assignment lines in the two loops and another ~20% in
-	// map access and iteration. Improving map behavior or
-	// replacing the maps altogether with some other constant-time
-	// structure could double performance.
-	//
-	// TODO: The worst case for this function is when there are
-	// few ties. Yet the best case overall is when there are *no*
-	// ties. Can we get the best of both worlds? Use the fast
-	// algorithm for the most part when there are few ties and mix
-	// in the general algorithm just where we need it? That's
-	// certainly possible for sub-problems where t[:k] has no
-	// ties, but that doesn't help if t[0] has a tie but nothing
-	// else does. Is it possible to rearrange the ranks without
-	// messing up our computation of the U statistic for
-	// sub-problems?
-
-	K := len(t)
-
-	// Compute a coefficients. The a slice is indexed by k (a[0]
-	// is unused).
-	a := make([]int, K+1)
-	a[1] = t[0]
-	for k := 2; k <= K; k++ {
-		a[k] = a[k-1] + t[k-2] + t[k-1]
-	}
-
-	// Create the memo table for the counts function, A. The A
-	// slice is indexed by k (A[0] is unused).
-	//
-	// In "The Mann Whitney Distribution Using Linked Lists", they
-	// use linked lists (*gasp*) for this, but within each K it's
-	// really just a memoization table, so it's faster to use a
-	// map. The outer structure is a slice indexed by k because we
-	// need to find all memo entries with certain values of k.
-	//
-	// TODO: The n1 and twoU values in the ukeys follow strict
-	// patterns. For each K value, the n1 values are every integer
-	// between two bounds. For each (K, n1) value, the twoU values
-	// are every integer multiple of a certain base between two
-	// bounds. It might be worth turning these into directly
-	// indexible slices.
-	A := make([]map[ukey]float64, K+1)
-	A[K] = map[ukey]float64{ukey{n1: n1, twoU: twoU}: 0}
-
-	// Compute memo table (k, n1, twoU) triples from high K values
-	// to low K values. This drives the recurrence relation
-	// downward to figure out all of the needed argument triples.
-	//
-	// TODO: Is it possible to generate this table bottom-up? If
-	// so, this could be a pure dynamic programming algorithm and
-	// we could discard the K dimension. We could at least store
-	// the inputs in a more compact representation that replaces
-	// the twoU dimension with an interval and a step size (as
-	// suggested by Cheung, Klotz, not that they make it at all
-	// clear *why* they're suggesting this).
-	tsum := sumint(t) // always ∑ t[0:k]
-	for k := K - 1; k >= 2; k-- {
-		tsum -= t[k]
-		A[k] = make(map[ukey]float64)
-
-		// Construct A[k] from A[k+1].
-		for A_kplus1 := range A[k+1] {
-			rkLow := maxint(0, A_kplus1.n1-tsum)
-			rkHigh := minint(A_kplus1.n1, t[k])
-			for rk := rkLow; rk <= rkHigh; rk++ {
-				twoU_k := A_kplus1.twoU - rk*(a[k+1]-2*A_kplus1.n1+rk)
-				n1_k := A_kplus1.n1 - rk
-				if twoUmin(n1_k, t[:k], a) <= twoU_k && twoU_k <= twoUmax(n1_k, t[:k], a) {
-					key := ukey{n1: n1_k, twoU: twoU_k}
-					A[k][key] = 0
-				}
-			}
-		}
-	}
-
-	// Fill counts in memo table from low K values to high K
-	// values. This unwinds the recurrence relation.
-
-	// Start with K==2 base case.
-	//
-	// TODO: Later computations depend on these, but these don't
-	// depend on anything (including each other), so if K==2, we
-	// can skip the memo table altogether.
-	if K < 2 {
-		panic("K < 2")
-	}
-	N_2 := t[0] + t[1]
-	for A_2i := range A[2] {
-		Asum := 0.0
-		r2Low := maxint(0, A_2i.n1-t[0])
-		r2High := (A_2i.twoU - A_2i.n1*(t[0]-A_2i.n1)) / N_2
-		for r2 := r2Low; r2 <= r2High; r2++ {
-			Asum += mathx.Choose(t[0], A_2i.n1-r2) *
-				mathx.Choose(t[1], r2)
-		}
-		A[2][A_2i] = Asum
-	}
-
-	// Derive counts for the rest of the memo table.
-	tsum = t[0] // always ∑ t[0:k-1]
-	for k := 3; k <= K; k++ {
-		tsum += t[k-2]
-
-		// Compute A[k] counts from A[k-1] counts.
-		for A_ki := range A[k] {
-			Asum := 0.0
-			rkLow := maxint(0, A_ki.n1-tsum)
-			rkHigh := minint(A_ki.n1, t[k-1])
-			for rk := rkLow; rk <= rkHigh; rk++ {
-				twoU_kminus1 := A_ki.twoU - rk*(a[k]-2*A_ki.n1+rk)
-				n1_kminus1 := A_ki.n1 - rk
-				x, ok := A[k-1][ukey{n1: n1_kminus1, twoU: twoU_kminus1}]
-				if !ok && twoUmax(n1_kminus1, t[:k-1], a) < twoU_kminus1 {
-					x = mathx.Choose(tsum, n1_kminus1)
-				}
-				Asum += x * mathx.Choose(t[k-1], rk)
-			}
-			A[k][A_ki] = Asum
-		}
-	}
-
-	return A
-}
-
-func twoUmin(n1 int, t, a []int) int {
-	K := len(t)
-	twoU := -n1 * n1
-	n1_k := n1
-	for k := 1; k <= K; k++ {
-		twoU_k := minint(n1_k, t[k-1])
-		twoU += twoU_k * a[k]
-		n1_k -= twoU_k
-	}
-	return twoU
-}
-
-func twoUmax(n1 int, t, a []int) int {
-	K := len(t)
-	twoU := -n1 * n1
-	n1_k := n1
-	for k := K; k > 0; k-- {
-		twoU_k := minint(n1_k, t[k-1])
-		twoU += twoU_k * a[k]
-		n1_k -= twoU_k
-	}
-	return twoU
-}
-
-func (d UDist) PMF(U float64) float64 {
-	if U < 0 || U >= 0.5+float64(d.N1*d.N2) {
-		return 0
-	}
-
-	if d.hasTies() {
-		// makeUmemo computes the CDF directly. Take its
-		// difference to get the PMF.
-		p1, ok1 := makeUmemo(int(2*U)-1, d.N1, d.T)[len(d.T)][ukey{d.N1, int(2*U) - 1}]
-		p2, ok2 := makeUmemo(int(2*U), d.N1, d.T)[len(d.T)][ukey{d.N1, int(2 * U)}]
-		if !ok1 || !ok2 {
-			panic("makeUmemo did not return expected memoization table")
-		}
-		return (p2 - p1) / mathx.Choose(d.N1+d.N2, d.N1)
-	}
-
-	// There are no ties. Use the fast algorithm. U must be integral.
-	Ui := int(math.Floor(U))
-	// TODO: Use symmetry to minimize U
-	return d.p(Ui)[Ui]
-}
-
-func (d UDist) CDF(U float64) float64 {
-	if U < 0 {
-		return 0
-	} else if U >= float64(d.N1*d.N2) {
-		return 1
-	}
-
-	if d.hasTies() {
-		// TODO: Minimize U?
-		p, ok := makeUmemo(int(2*U), d.N1, d.T)[len(d.T)][ukey{d.N1, int(2 * U)}]
-		if !ok {
-			panic("makeUmemo did not return expected memoization table")
-		}
-		return p / mathx.Choose(d.N1+d.N2, d.N1)
-	}
-
-	// There are no ties. Use the fast algorithm. U must be integral.
-	Ui := int(math.Floor(U))
-	// The distribution is symmetric around U = m * n / 2. Sum up
-	// whichever tail is smaller.
-	flip := Ui >= (d.N1*d.N2+1)/2
-	if flip {
-		Ui = d.N1*d.N2 - Ui - 1
-	}
-	pdfs := d.p(Ui)
-	p := 0.0
-	for _, pdf := range pdfs[:Ui+1] {
-		p += pdf
-	}
-	if flip {
-		p = 1 - p
-	}
-	return p
-}
-
-func (d UDist) Step() float64 {
-	return 0.5
-}
-
-func (d UDist) Bounds() (float64, float64) {
-	// TODO: More precise bounds when there are ties.
-	return 0, float64(d.N1 * d.N2)
-}
diff --git a/vendor/github.com/aclements/go-moremath/stats/utest.go b/vendor/github.com/aclements/go-moremath/stats/utest.go
deleted file mode 100644
index c31e54c..0000000
--- a/vendor/github.com/aclements/go-moremath/stats/utest.go
+++ /dev/null
@@ -1,276 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package stats
-
-import (
-	"math"
-	"sort"
-
-	"github.com/aclements/go-moremath/mathx"
-)
-
-// A LocationHypothesis specifies the alternative hypothesis of a
-// location test such as a t-test or a Mann-Whitney U-test. The
-// default (zero) value is to test against the alternative hypothesis
-// that they differ.
-type LocationHypothesis int
-
-//go:generate stringer -type LocationHypothesis
-
-const (
-	// LocationLess specifies the alternative hypothesis that the
-	// location of the first sample is less than the second. This
-	// is a one-tailed test.
-	LocationLess LocationHypothesis = -1
-
-	// LocationDiffers specifies the alternative hypothesis that
-	// the locations of the two samples are not equal. This is a
-	// two-tailed test.
-	LocationDiffers LocationHypothesis = 0
-
-	// LocationGreater specifies the alternative hypothesis that
-	// the location of the first sample is greater than the
-	// second. This is a one-tailed test.
-	LocationGreater LocationHypothesis = 1
-)
-
-// A MannWhitneyUTestResult is the result of a Mann-Whitney U-test.
-type MannWhitneyUTestResult struct {
-	// N1 and N2 are the sizes of the input samples.
-	N1, N2 int
-
-	// U is the value of the Mann-Whitney U statistic for this
-	// test, generalized by counting ties as 0.5.
-	//
-	// Given the Cartesian product of the two samples, this is the
-	// number of pairs in which the value from the first sample is
-	// greater than the value of the second, plus 0.5 times the
-	// number of pairs where the values from the two samples are
-	// equal. Hence, U is always an integer multiple of 0.5 (it is
-	// a whole integer if there are no ties) in the range [0, N1*N2].
-	//
-	// U statistics always come in pairs, depending on which
-	// sample is "first". The mirror U for the other sample can be
-	// calculated as N1*N2 - U.
-	//
-	// There are many equivalent statistics with slightly
-	// different definitions. The Wilcoxon (1945) W statistic
-	// (generalized for ties) is U + (N1(N1+1))/2. It is also
-	// common to use 2U to eliminate the half steps and Smid
-	// (1956) uses N1*N2 - 2U to additionally center the
-	// distribution.
-	U float64
-
-	// AltHypothesis specifies the alternative hypothesis tested
-	// by this test against the null hypothesis that there is no
-	// difference in the locations of the samples.
-	AltHypothesis LocationHypothesis
-
-	// P is the p-value of the Mann-Whitney test for the given
-	// null hypothesis.
-	P float64
-}
-
-// MannWhitneyExactLimit gives the largest sample size for which the
-// exact U distribution will be used for the Mann-Whitney U-test.
-//
-// Using the exact distribution is necessary for small sample sizes
-// because the distribution is highly irregular. However, computing
-// the distribution for large sample sizes is both computationally
-// expensive and unnecessary because it quickly approaches a normal
-// approximation. Computing the distribution for two 50 value samples
-// takes a few milliseconds on a 2014 laptop.
-var MannWhitneyExactLimit = 50
-
-// MannWhitneyTiesExactLimit gives the largest sample size for which
-// the exact U distribution will be used for the Mann-Whitney U-test
-// in the presence of ties.
-//
-// Computing this distribution is more expensive than computing the
-// distribution without ties, so this is set lower. Computing this
-// distribution for two 25 value samples takes about ten milliseconds
-// on a 2014 laptop.
-var MannWhitneyTiesExactLimit = 25
-
-// MannWhitneyUTest performs a Mann-Whitney U-test [1,2] of the null
-// hypothesis that two samples come from the same population against
-// the alternative hypothesis that one sample tends to have larger or
-// smaller values than the other.
-//
-// This is similar to a t-test, but unlike the t-test, the
-// Mann-Whitney U-test is non-parametric (it does not assume a normal
-// distribution). It has very slightly lower efficiency than the
-// t-test on normal distributions.
-//
-// Computing the exact U distribution is expensive for large sample
-// sizes, so this uses a normal approximation for sample sizes larger
-// than MannWhitneyExactLimit if there are no ties or
-// MannWhitneyTiesExactLimit if there are ties. This normal
-// approximation uses both the tie correction and the continuity
-// correction.
-//
-// This can fail with ErrSampleSize if either sample is empty or
-// ErrSamplesEqual if all sample values are equal.
-//
-// This is also known as a Mann-Whitney-Wilcoxon test and is
-// equivalent to the Wilcoxon rank-sum test, though the Wilcoxon
-// rank-sum test differs in nomenclature.
-//
-// [1] Mann, Henry B.; Whitney, Donald R. (1947). "On a Test of
-// Whether one of Two Random Variables is Stochastically Larger than
-// the Other". Annals of Mathematical Statistics 18 (1): 50–60.
-//
-// [2] Klotz, J. H. (1966). "The Wilcoxon, Ties, and the Computer".
-// Journal of the American Statistical Association 61 (315): 772-787.
-func MannWhitneyUTest(x1, x2 []float64, alt LocationHypothesis) (*MannWhitneyUTestResult, error) {
-	n1, n2 := len(x1), len(x2)
-	if n1 == 0 || n2 == 0 {
-		return nil, ErrSampleSize
-	}
-
-	// Compute the U statistic and tie vector T.
-	x1 = append([]float64(nil), x1...)
-	x2 = append([]float64(nil), x2...)
-	sort.Float64s(x1)
-	sort.Float64s(x2)
-	merged, labels := labeledMerge(x1, x2)
-
-	R1 := 0.0
-	T, hasTies := []int{}, false
-	for i := 0; i < len(merged); {
-		rank1, nx1, v1 := i+1, 0, merged[i]
-		// Consume samples that tie this sample (including itself).
-		for ; i < len(merged) && merged[i] == v1; i++ {
-			if labels[i] == 1 {
-				nx1++
-			}
-		}
-		// Assign all tied samples the average rank of the
-		// samples, where merged[0] has rank 1.
-		if nx1 != 0 {
-			rank := float64(i+rank1) / 2
-			R1 += rank * float64(nx1)
-		}
-		T = append(T, i-rank1+1)
-		if i > rank1 {
-			hasTies = true
-		}
-	}
-	U1 := R1 - float64(n1*(n1+1))/2
-
-	// Compute the smaller of U1 and U2
-	U2 := float64(n1*n2) - U1
-	Usmall := math.Min(U1, U2)
-
-	var p float64
-	if !hasTies && n1 <= MannWhitneyExactLimit && n2 <= MannWhitneyExactLimit ||
-		hasTies && n1 <= MannWhitneyTiesExactLimit && n2 <= MannWhitneyTiesExactLimit {
-		// Use exact U distribution. U1 will be an integer.
-		if len(T) == 1 {
-			// All values are equal. Test is meaningless.
-			return nil, ErrSamplesEqual
-		}
-
-		dist := UDist{N1: n1, N2: n2, T: T}
-		switch alt {
-		case LocationDiffers:
-			if U1 == U2 {
-				// The distribution is symmetric about
-				// Usmall. Since the distribution is
-				// discrete, the CDF is discontinuous
-				// and if simply double CDF(Usmall),
-				// we'll double count the
-				// (non-infinitesimal) probability
-				// mass at Usmall. What we want is
-				// just the integral of the whole CDF,
-				// which is 1.
-				p = 1
-			} else {
-				p = dist.CDF(Usmall) * 2
-			}
-
-		case LocationLess:
-			p = dist.CDF(U1)
-
-		case LocationGreater:
-			p = 1 - dist.CDF(U1-1)
-		}
-	} else {
-		// Use normal approximation (with tie and continuity
-		// correction).
-		t := tieCorrection(T)
-		N := float64(n1 + n2)
-		μ_U := float64(n1*n2) / 2
-		σ_U := math.Sqrt(float64(n1*n2) * ((N + 1) - t/(N*(N-1))) / 12)
-		if σ_U == 0 {
-			return nil, ErrSamplesEqual
-		}
-		numer := U1 - μ_U
-		// Perform continuity correction.
-		switch alt {
-		case LocationDiffers:
-			numer -= mathx.Sign(numer) * 0.5
-		case LocationLess:
-			numer += 0.5
-		case LocationGreater:
-			numer -= 0.5
-		}
-		z := numer / σ_U
-		switch alt {
-		case LocationDiffers:
-			p = 2 * math.Min(StdNormal.CDF(z), 1-StdNormal.CDF(z))
-		case LocationLess:
-			p = StdNormal.CDF(z)
-		case LocationGreater:
-			p = 1 - StdNormal.CDF(z)
-		}
-	}
-
-	return &MannWhitneyUTestResult{N1: n1, N2: n2, U: U1,
-		AltHypothesis: alt, P: p}, nil
-}
-
-// labeledMerge merges sorted lists x1 and x2 into sorted list merged.
-// labels[i] is 1 or 2 depending on whether merged[i] is a value from
-// x1 or x2, respectively.
-func labeledMerge(x1, x2 []float64) (merged []float64, labels []byte) {
-	merged = make([]float64, len(x1)+len(x2))
-	labels = make([]byte, len(x1)+len(x2))
-
-	i, j, o := 0, 0, 0
-	for i < len(x1) && j < len(x2) {
-		if x1[i] < x2[j] {
-			merged[o] = x1[i]
-			labels[o] = 1
-			i++
-		} else {
-			merged[o] = x2[j]
-			labels[o] = 2
-			j++
-		}
-		o++
-	}
-	for ; i < len(x1); i++ {
-		merged[o] = x1[i]
-		labels[o] = 1
-		o++
-	}
-	for ; j < len(x2); j++ {
-		merged[o] = x2[j]
-		labels[o] = 2
-		o++
-	}
-	return
-}
-
-// tieCorrection computes the tie correction factor Σ_j (t_j³ - t_j)
-// where t_j is the number of ties in the j'th rank.
-func tieCorrection(ties []int) float64 {
-	t := 0
-	for _, tie := range ties {
-		t += tie*tie*tie - tie
-	}
-	return float64(t)
-}
diff --git a/vendor/github.com/aclements/go-moremath/vec/package.go b/vendor/github.com/aclements/go-moremath/vec/package.go
deleted file mode 100644
index 6bd2061..0000000
--- a/vendor/github.com/aclements/go-moremath/vec/package.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package vec provides functions for float64 vectors.
-package vec // import "github.com/aclements/go-moremath/vec"
diff --git a/vendor/github.com/aclements/go-moremath/vec/vec.go b/vendor/github.com/aclements/go-moremath/vec/vec.go
deleted file mode 100644
index 970d441..0000000
--- a/vendor/github.com/aclements/go-moremath/vec/vec.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package vec
-
-import "math"
-
-// Vectorize returns a function g(xs) that applies f to each x in xs.
-//
-// f may be evaluated in parallel and in any order.
-func Vectorize(f func(float64) float64) func(xs []float64) []float64 {
-	return func(xs []float64) []float64 {
-		return Map(f, xs)
-	}
-}
-
-// Map returns f(x) for each x in xs.
-//
-// f may be evaluated in parallel and in any order.
-func Map(f func(float64) float64, xs []float64) []float64 {
-	// TODO(austin) Parallelize
-	res := make([]float64, len(xs))
-	for i, x := range xs {
-		res[i] = f(x)
-	}
-	return res
-}
-
-// Linspace returns num values spaced evenly between lo and hi,
-// inclusive. If num is 1, this returns an array consisting of lo.
-func Linspace(lo, hi float64, num int) []float64 {
-	res := make([]float64, num)
-	if num == 1 {
-		res[0] = lo
-		return res
-	}
-	for i := 0; i < num; i++ {
-		res[i] = lo + float64(i)*(hi-lo)/float64(num-1)
-	}
-	return res
-}
-
-// Logspace returns num values spaced evenly on a logarithmic scale
-// between base**lo and base**hi, inclusive.
-func Logspace(lo, hi float64, num int, base float64) []float64 {
-	res := Linspace(lo, hi, num)
-	for i, x := range res {
-		res[i] = math.Pow(base, x)
-	}
-	return res
-}
-
-// Sum returns the sum of xs.
-func Sum(xs []float64) float64 {
-	sum := 0.0
-	for _, x := range xs {
-		sum += x
-	}
-	return sum
-}
-
-// Concat returns the concatenation of its arguments. It does not
-// modify its inputs.
-func Concat(xss ...[]float64) []float64 {
-	total := 0
-	for _, xs := range xss {
-		total += len(xs)
-	}
-	out := make([]float64, total)
-	pos := 0
-	for _, xs := range xss {
-		pos += copy(out[pos:], xs)
-	}
-	return out
-}
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
deleted file mode 100644
index 1b1b192..0000000
--- a/vendor/github.com/golang/protobuf/LICENSE
+++ /dev/null
@@ -1,31 +0,0 @@
-Go support for Protocol Buffers - Google's data interchange format
-
-Copyright 2010 The Go Authors.  All rights reserved.
-https://github.com/golang/protobuf
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-    * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-    * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile
deleted file mode 100644
index e2e0651..0000000
--- a/vendor/github.com/golang/protobuf/proto/Makefile
+++ /dev/null
@@ -1,43 +0,0 @@
-# Go support for Protocol Buffers - Google's data interchange format
-#
-# Copyright 2010 The Go Authors.  All rights reserved.
-# https://github.com/golang/protobuf
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-install:
-	go install
-
-test: install generate-test-pbs
-	go test
-
-
-generate-test-pbs:
-	make install
-	make -C testdata
-	protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto
-	make
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
deleted file mode 100644
index e392575..0000000
--- a/vendor/github.com/golang/protobuf/proto/clone.go
+++ /dev/null
@@ -1,229 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2011 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Protocol buffer deep copy and merge.
-// TODO: RawMessage.
-
-package proto
-
-import (
-	"log"
-	"reflect"
-	"strings"
-)
-
-// Clone returns a deep copy of a protocol buffer.
-func Clone(pb Message) Message {
-	in := reflect.ValueOf(pb)
-	if in.IsNil() {
-		return pb
-	}
-
-	out := reflect.New(in.Type().Elem())
-	// out is empty so a merge is a deep copy.
-	mergeStruct(out.Elem(), in.Elem())
-	return out.Interface().(Message)
-}
-
-// Merge merges src into dst.
-// Required and optional fields that are set in src will be set to that value in dst.
-// Elements of repeated fields will be appended.
-// Merge panics if src and dst are not the same type, or if dst is nil.
-func Merge(dst, src Message) {
-	in := reflect.ValueOf(src)
-	out := reflect.ValueOf(dst)
-	if out.IsNil() {
-		panic("proto: nil destination")
-	}
-	if in.Type() != out.Type() {
-		// Explicit test prior to mergeStruct so that mistyped nils will fail
-		panic("proto: type mismatch")
-	}
-	if in.IsNil() {
-		// Merging nil into non-nil is a quiet no-op
-		return
-	}
-	mergeStruct(out.Elem(), in.Elem())
-}
-
-func mergeStruct(out, in reflect.Value) {
-	sprop := GetProperties(in.Type())
-	for i := 0; i < in.NumField(); i++ {
-		f := in.Type().Field(i)
-		if strings.HasPrefix(f.Name, "XXX_") {
-			continue
-		}
-		mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
-	}
-
-	if emIn, ok := extendable(in.Addr().Interface()); ok {
-		emOut, _ := extendable(out.Addr().Interface())
-		mIn, muIn := emIn.extensionsRead()
-		if mIn != nil {
-			mOut := emOut.extensionsWrite()
-			muIn.Lock()
-			mergeExtension(mOut, mIn)
-			muIn.Unlock()
-		}
-	}
-
-	uf := in.FieldByName("XXX_unrecognized")
-	if !uf.IsValid() {
-		return
-	}
-	uin := uf.Bytes()
-	if len(uin) > 0 {
-		out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
-	}
-}
-
-// mergeAny performs a merge between two values of the same type.
-// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
-// prop is set if this is a struct field (it may be nil).
-func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
-	if in.Type() == protoMessageType {
-		if !in.IsNil() {
-			if out.IsNil() {
-				out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
-			} else {
-				Merge(out.Interface().(Message), in.Interface().(Message))
-			}
-		}
-		return
-	}
-	switch in.Kind() {
-	case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
-		reflect.String, reflect.Uint32, reflect.Uint64:
-		if !viaPtr && isProto3Zero(in) {
-			return
-		}
-		out.Set(in)
-	case reflect.Interface:
-		// Probably a oneof field; copy non-nil values.
-		if in.IsNil() {
-			return
-		}
-		// Allocate destination if it is not set, or set to a different type.
-		// Otherwise we will merge as normal.
-		if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
-			out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
-		}
-		mergeAny(out.Elem(), in.Elem(), false, nil)
-	case reflect.Map:
-		if in.Len() == 0 {
-			return
-		}
-		if out.IsNil() {
-			out.Set(reflect.MakeMap(in.Type()))
-		}
-		// For maps with value types of *T or []byte we need to deep copy each value.
-		elemKind := in.Type().Elem().Kind()
-		for _, key := range in.MapKeys() {
-			var val reflect.Value
-			switch elemKind {
-			case reflect.Ptr:
-				val = reflect.New(in.Type().Elem().Elem())
-				mergeAny(val, in.MapIndex(key), false, nil)
-			case reflect.Slice:
-				val = in.MapIndex(key)
-				val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
-			default:
-				val = in.MapIndex(key)
-			}
-			out.SetMapIndex(key, val)
-		}
-	case reflect.Ptr:
-		if in.IsNil() {
-			return
-		}
-		if out.IsNil() {
-			out.Set(reflect.New(in.Elem().Type()))
-		}
-		mergeAny(out.Elem(), in.Elem(), true, nil)
-	case reflect.Slice:
-		if in.IsNil() {
-			return
-		}
-		if in.Type().Elem().Kind() == reflect.Uint8 {
-			// []byte is a scalar bytes field, not a repeated field.
-
-			// Edge case: if this is in a proto3 message, a zero length
-			// bytes field is considered the zero value, and should not
-			// be merged.
-			if prop != nil && prop.proto3 && in.Len() == 0 {
-				return
-			}
-
-			// Make a deep copy.
-			// Append to []byte{} instead of []byte(nil) so that we never end up
-			// with a nil result.
-			out.SetBytes(append([]byte{}, in.Bytes()...))
-			return
-		}
-		n := in.Len()
-		if out.IsNil() {
-			out.Set(reflect.MakeSlice(in.Type(), 0, n))
-		}
-		switch in.Type().Elem().Kind() {
-		case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
-			reflect.String, reflect.Uint32, reflect.Uint64:
-			out.Set(reflect.AppendSlice(out, in))
-		default:
-			for i := 0; i < n; i++ {
-				x := reflect.Indirect(reflect.New(in.Type().Elem()))
-				mergeAny(x, in.Index(i), false, nil)
-				out.Set(reflect.Append(out, x))
-			}
-		}
-	case reflect.Struct:
-		mergeStruct(out, in)
-	default:
-		// unknown type, so not a protocol buffer
-		log.Printf("proto: don't know how to copy %v", in)
-	}
-}
-
-func mergeExtension(out, in map[int32]Extension) {
-	for extNum, eIn := range in {
-		eOut := Extension{desc: eIn.desc}
-		if eIn.value != nil {
-			v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
-			mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
-			eOut.value = v.Interface()
-		}
-		if eIn.enc != nil {
-			eOut.enc = make([]byte, len(eIn.enc))
-			copy(eOut.enc, eIn.enc)
-		}
-
-		out[extNum] = eOut
-	}
-}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
deleted file mode 100644
index aa20729..0000000
--- a/vendor/github.com/golang/protobuf/proto/decode.go
+++ /dev/null
@@ -1,970 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Routines for decoding protocol buffer data to construct in-memory representations.
- */
-
-import (
-	"errors"
-	"fmt"
-	"io"
-	"os"
-	"reflect"
-)
-
-// errOverflow is returned when an integer is too large to be represented.
-var errOverflow = errors.New("proto: integer overflow")
-
-// ErrInternalBadWireType is returned by generated code when an incorrect
-// wire type is encountered. It does not get returned to user code.
-var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
-
-// The fundamental decoders that interpret bytes on the wire.
-// Those that take integer types all return uint64 and are
-// therefore of type valueDecoder.
-
-// DecodeVarint reads a varint-encoded integer from the slice.
-// It returns the integer and the number of bytes consumed, or
-// zero if there is not enough.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func DecodeVarint(buf []byte) (x uint64, n int) {
-	for shift := uint(0); shift < 64; shift += 7 {
-		if n >= len(buf) {
-			return 0, 0
-		}
-		b := uint64(buf[n])
-		n++
-		x |= (b & 0x7F) << shift
-		if (b & 0x80) == 0 {
-			return x, n
-		}
-	}
-
-	// The number is too large to represent in a 64-bit value.
-	return 0, 0
-}
-
-func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
-	i := p.index
-	l := len(p.buf)
-
-	for shift := uint(0); shift < 64; shift += 7 {
-		if i >= l {
-			err = io.ErrUnexpectedEOF
-			return
-		}
-		b := p.buf[i]
-		i++
-		x |= (uint64(b) & 0x7F) << shift
-		if b < 0x80 {
-			p.index = i
-			return
-		}
-	}
-
-	// The number is too large to represent in a 64-bit value.
-	err = errOverflow
-	return
-}
-
-// DecodeVarint reads a varint-encoded integer from the Buffer.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func (p *Buffer) DecodeVarint() (x uint64, err error) {
-	i := p.index
-	buf := p.buf
-
-	if i >= len(buf) {
-		return 0, io.ErrUnexpectedEOF
-	} else if buf[i] < 0x80 {
-		p.index++
-		return uint64(buf[i]), nil
-	} else if len(buf)-i < 10 {
-		return p.decodeVarintSlow()
-	}
-
-	var b uint64
-	// we already checked the first byte
-	x = uint64(buf[i]) - 0x80
-	i++
-
-	b = uint64(buf[i])
-	i++
-	x += b << 7
-	if b&0x80 == 0 {
-		goto done
-	}
-	x -= 0x80 << 7
-
-	b = uint64(buf[i])
-	i++
-	x += b << 14
-	if b&0x80 == 0 {
-		goto done
-	}
-	x -= 0x80 << 14
-
-	b = uint64(buf[i])
-	i++
-	x += b << 21
-	if b&0x80 == 0 {
-		goto done
-	}
-	x -= 0x80 << 21
-
-	b = uint64(buf[i])
-	i++
-	x += b << 28
-	if b&0x80 == 0 {
-		goto done
-	}
-	x -= 0x80 << 28
-
-	b = uint64(buf[i])
-	i++
-	x += b << 35
-	if b&0x80 == 0 {
-		goto done
-	}
-	x -= 0x80 << 35
-
-	b = uint64(buf[i])
-	i++
-	x += b << 42
-	if b&0x80 == 0 {
-		goto done
-	}
-	x -= 0x80 << 42
-
-	b = uint64(buf[i])
-	i++
-	x += b << 49
-	if b&0x80 == 0 {
-		goto done
-	}
-	x -= 0x80 << 49
-
-	b = uint64(buf[i])
-	i++
-	x += b << 56
-	if b&0x80 == 0 {
-		goto done
-	}
-	x -= 0x80 << 56
-
-	b = uint64(buf[i])
-	i++
-	x += b << 63
-	if b&0x80 == 0 {
-		goto done
-	}
-	// x -= 0x80 << 63 // Always zero.
-
-	return 0, errOverflow
-
-done:
-	p.index = i
-	return x, nil
-}
-
-// DecodeFixed64 reads a 64-bit integer from the Buffer.
-// This is the format for the
-// fixed64, sfixed64, and double protocol buffer types.
-func (p *Buffer) DecodeFixed64() (x uint64, err error) {
-	// x, err already 0
-	i := p.index + 8
-	if i < 0 || i > len(p.buf) {
-		err = io.ErrUnexpectedEOF
-		return
-	}
-	p.index = i
-
-	x = uint64(p.buf[i-8])
-	x |= uint64(p.buf[i-7]) << 8
-	x |= uint64(p.buf[i-6]) << 16
-	x |= uint64(p.buf[i-5]) << 24
-	x |= uint64(p.buf[i-4]) << 32
-	x |= uint64(p.buf[i-3]) << 40
-	x |= uint64(p.buf[i-2]) << 48
-	x |= uint64(p.buf[i-1]) << 56
-	return
-}
-
-// DecodeFixed32 reads a 32-bit integer from the Buffer.
-// This is the format for the
-// fixed32, sfixed32, and float protocol buffer types.
-func (p *Buffer) DecodeFixed32() (x uint64, err error) {
-	// x, err already 0
-	i := p.index + 4
-	if i < 0 || i > len(p.buf) {
-		err = io.ErrUnexpectedEOF
-		return
-	}
-	p.index = i
-
-	x = uint64(p.buf[i-4])
-	x |= uint64(p.buf[i-3]) << 8
-	x |= uint64(p.buf[i-2]) << 16
-	x |= uint64(p.buf[i-1]) << 24
-	return
-}
-
-// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
-// from the Buffer.
-// This is the format used for the sint64 protocol buffer type.
-func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
-	x, err = p.DecodeVarint()
-	if err != nil {
-		return
-	}
-	x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
-	return
-}
-
-// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
-// from  the Buffer.
-// This is the format used for the sint32 protocol buffer type.
-func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
-	x, err = p.DecodeVarint()
-	if err != nil {
-		return
-	}
-	x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
-	return
-}
-
-// These are not ValueDecoders: they produce an array of bytes or a string.
-// bytes, embedded messages
-
-// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
-// This is the format used for the bytes protocol buffer
-// type and for embedded messages.
-func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
-	n, err := p.DecodeVarint()
-	if err != nil {
-		return nil, err
-	}
-
-	nb := int(n)
-	if nb < 0 {
-		return nil, fmt.Errorf("proto: bad byte length %d", nb)
-	}
-	end := p.index + nb
-	if end < p.index || end > len(p.buf) {
-		return nil, io.ErrUnexpectedEOF
-	}
-
-	if !alloc {
-		// todo: check if can get more uses of alloc=false
-		buf = p.buf[p.index:end]
-		p.index += nb
-		return
-	}
-
-	buf = make([]byte, nb)
-	copy(buf, p.buf[p.index:])
-	p.index += nb
-	return
-}
-
-// DecodeStringBytes reads an encoded string from the Buffer.
-// This is the format used for the proto2 string type.
-func (p *Buffer) DecodeStringBytes() (s string, err error) {
-	buf, err := p.DecodeRawBytes(false)
-	if err != nil {
-		return
-	}
-	return string(buf), nil
-}
-
-// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
-// If the protocol buffer has extensions, and the field matches, add it as an extension.
-// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
-func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
-	oi := o.index
-
-	err := o.skip(t, tag, wire)
-	if err != nil {
-		return err
-	}
-
-	if !unrecField.IsValid() {
-		return nil
-	}
-
-	ptr := structPointer_Bytes(base, unrecField)
-
-	// Add the skipped field to struct field
-	obuf := o.buf
-
-	o.buf = *ptr
-	o.EncodeVarint(uint64(tag<<3 | wire))
-	*ptr = append(o.buf, obuf[oi:o.index]...)
-
-	o.buf = obuf
-
-	return nil
-}
-
-// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
-func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
-
-	var u uint64
-	var err error
-
-	switch wire {
-	case WireVarint:
-		_, err = o.DecodeVarint()
-	case WireFixed64:
-		_, err = o.DecodeFixed64()
-	case WireBytes:
-		_, err = o.DecodeRawBytes(false)
-	case WireFixed32:
-		_, err = o.DecodeFixed32()
-	case WireStartGroup:
-		for {
-			u, err = o.DecodeVarint()
-			if err != nil {
-				break
-			}
-			fwire := int(u & 0x7)
-			if fwire == WireEndGroup {
-				break
-			}
-			ftag := int(u >> 3)
-			err = o.skip(t, ftag, fwire)
-			if err != nil {
-				break
-			}
-		}
-	default:
-		err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
-	}
-	return err
-}
-
-// Unmarshaler is the interface representing objects that can
-// unmarshal themselves.  The method should reset the receiver before
-// decoding starts.  The argument points to data that may be
-// overwritten, so implementations should not keep references to the
-// buffer.
-type Unmarshaler interface {
-	Unmarshal([]byte) error
-}
-
-// Unmarshal parses the protocol buffer representation in buf and places the
-// decoded result in pb.  If the struct underlying pb does not match
-// the data in buf, the results can be unpredictable.
-//
-// Unmarshal resets pb before starting to unmarshal, so any
-// existing data in pb is always removed. Use UnmarshalMerge
-// to preserve and append to existing data.
-func Unmarshal(buf []byte, pb Message) error {
-	pb.Reset()
-	return UnmarshalMerge(buf, pb)
-}
-
-// UnmarshalMerge parses the protocol buffer representation in buf and
-// writes the decoded result to pb.  If the struct underlying pb does not match
-// the data in buf, the results can be unpredictable.
-//
-// UnmarshalMerge merges into existing data in pb.
-// Most code should use Unmarshal instead.
-func UnmarshalMerge(buf []byte, pb Message) error {
-	// If the object can unmarshal itself, let it.
-	if u, ok := pb.(Unmarshaler); ok {
-		return u.Unmarshal(buf)
-	}
-	return NewBuffer(buf).Unmarshal(pb)
-}
-
-// DecodeMessage reads a count-delimited message from the Buffer.
-func (p *Buffer) DecodeMessage(pb Message) error {
-	enc, err := p.DecodeRawBytes(false)
-	if err != nil {
-		return err
-	}
-	return NewBuffer(enc).Unmarshal(pb)
-}
-
-// DecodeGroup reads a tag-delimited group from the Buffer.
-func (p *Buffer) DecodeGroup(pb Message) error {
-	typ, base, err := getbase(pb)
-	if err != nil {
-		return err
-	}
-	return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
-}
-
-// Unmarshal parses the protocol buffer representation in the
-// Buffer and places the decoded result in pb.  If the struct
-// underlying pb does not match the data in the buffer, the results can be
-// unpredictable.
-//
-// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
-func (p *Buffer) Unmarshal(pb Message) error {
-	// If the object can unmarshal itself, let it.
-	if u, ok := pb.(Unmarshaler); ok {
-		err := u.Unmarshal(p.buf[p.index:])
-		p.index = len(p.buf)
-		return err
-	}
-
-	typ, base, err := getbase(pb)
-	if err != nil {
-		return err
-	}
-
-	err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
-
-	if collectStats {
-		stats.Decode++
-	}
-
-	return err
-}
-
-// unmarshalType does the work of unmarshaling a structure.
-func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
-	var state errorState
-	required, reqFields := prop.reqCount, uint64(0)
-
-	var err error
-	for err == nil && o.index < len(o.buf) {
-		oi := o.index
-		var u uint64
-		u, err = o.DecodeVarint()
-		if err != nil {
-			break
-		}
-		wire := int(u & 0x7)
-		if wire == WireEndGroup {
-			if is_group {
-				if required > 0 {
-					// Not enough information to determine the exact field.
-					// (See below.)
-					return &RequiredNotSetError{"{Unknown}"}
-				}
-				return nil // input is satisfied
-			}
-			return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
-		}
-		tag := int(u >> 3)
-		if tag <= 0 {
-			return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
-		}
-		fieldnum, ok := prop.decoderTags.get(tag)
-		if !ok {
-			// Maybe it's an extension?
-			if prop.extendable {
-				if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
-					if err = o.skip(st, tag, wire); err == nil {
-						extmap := e.extensionsWrite()
-						ext := extmap[int32(tag)] // may be missing
-						ext.enc = append(ext.enc, o.buf[oi:o.index]...)
-						extmap[int32(tag)] = ext
-					}
-					continue
-				}
-			}
-			// Maybe it's a oneof?
-			if prop.oneofUnmarshaler != nil {
-				m := structPointer_Interface(base, st).(Message)
-				// First return value indicates whether tag is a oneof field.
-				ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
-				if err == ErrInternalBadWireType {
-					// Map the error to something more descriptive.
-					// Do the formatting here to save generated code space.
-					err = fmt.Errorf("bad wiretype for oneof field in %T", m)
-				}
-				if ok {
-					continue
-				}
-			}
-			err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
-			continue
-		}
-		p := prop.Prop[fieldnum]
-
-		if p.dec == nil {
-			fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
-			continue
-		}
-		dec := p.dec
-		if wire != WireStartGroup && wire != p.WireType {
-			if wire == WireBytes && p.packedDec != nil {
-				// a packable field
-				dec = p.packedDec
-			} else {
-				err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
-				continue
-			}
-		}
-		decErr := dec(o, p, base)
-		if decErr != nil && !state.shouldContinue(decErr, p) {
-			err = decErr
-		}
-		if err == nil && p.Required {
-			// Successfully decoded a required field.
-			if tag <= 64 {
-				// use bitmap for fields 1-64 to catch field reuse.
-				var mask uint64 = 1 << uint64(tag-1)
-				if reqFields&mask == 0 {
-					// new required field
-					reqFields |= mask
-					required--
-				}
-			} else {
-				// This is imprecise. It can be fooled by a required field
-				// with a tag > 64 that is encoded twice; that's very rare.
-				// A fully correct implementation would require allocating
-				// a data structure, which we would like to avoid.
-				required--
-			}
-		}
-	}
-	if err == nil {
-		if is_group {
-			return io.ErrUnexpectedEOF
-		}
-		if state.err != nil {
-			return state.err
-		}
-		if required > 0 {
-			// Not enough information to determine the exact field. If we use extra
-			// CPU, we could determine the field only if the missing required field
-			// has a tag <= 64 and we check reqFields.
-			return &RequiredNotSetError{"{Unknown}"}
-		}
-	}
-	return err
-}
-
-// Individual type decoders
-// For each,
-//	u is the decoded value,
-//	v is a pointer to the field (pointer) in the struct
-
-// Sizes of the pools to allocate inside the Buffer.
-// The goal is modest amortization and allocation
-// on at least 16-byte boundaries.
-const (
-	boolPoolSize   = 16
-	uint32PoolSize = 8
-	uint64PoolSize = 4
-)
-
-// Decode a bool.
-func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
-	u, err := p.valDec(o)
-	if err != nil {
-		return err
-	}
-	if len(o.bools) == 0 {
-		o.bools = make([]bool, boolPoolSize)
-	}
-	o.bools[0] = u != 0
-	*structPointer_Bool(base, p.field) = &o.bools[0]
-	o.bools = o.bools[1:]
-	return nil
-}
-
-func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
-	u, err := p.valDec(o)
-	if err != nil {
-		return err
-	}
-	*structPointer_BoolVal(base, p.field) = u != 0
-	return nil
-}
-
-// Decode an int32.
-func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
-	u, err := p.valDec(o)
-	if err != nil {
-		return err
-	}
-	word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
-	return nil
-}
-
-func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
-	u, err := p.valDec(o)
-	if err != nil {
-		return err
-	}
-	word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
-	return nil
-}
-
-// Decode an int64.
-func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
-	u, err := p.valDec(o)
-	if err != nil {
-		return err
-	}
-	word64_Set(structPointer_Word64(base, p.field), o, u)
-	return nil
-}
-
-func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
-	u, err := p.valDec(o)
-	if err != nil {
-		return err
-	}
-	word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
-	return nil
-}
-
-// Decode a string.
-func (o *Buffer) dec_string(p *Properties, base structPointer) error {
-	s, err := o.DecodeStringBytes()
-	if err != nil {
-		return err
-	}
-	*structPointer_String(base, p.field) = &s
-	return nil
-}
-
-func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
-	s, err := o.DecodeStringBytes()
-	if err != nil {
-		return err
-	}
-	*structPointer_StringVal(base, p.field) = s
-	return nil
-}
-
-// Decode a slice of bytes ([]byte).
-func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
-	b, err := o.DecodeRawBytes(true)
-	if err != nil {
-		return err
-	}
-	*structPointer_Bytes(base, p.field) = b
-	return nil
-}
-
-// Decode a slice of bools ([]bool).
-func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
-	u, err := p.valDec(o)
-	if err != nil {
-		return err
-	}
-	v := structPointer_BoolSlice(base, p.field)
-	*v = append(*v, u != 0)
-	return nil
-}
-
-// Decode a slice of bools ([]bool) in packed format.
-func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
-	v := structPointer_BoolSlice(base, p.field)
-
-	nn, err := o.DecodeVarint()
-	if err != nil {
-		return err
-	}
-	nb := int(nn) // number of bytes of encoded bools
-	fin := o.index + nb
-	if fin < o.index {
-		return errOverflow
-	}
-
-	y := *v
-	for o.index < fin {
-		u, err := p.valDec(o)
-		if err != nil {
-			return err
-		}
-		y = append(y, u != 0)
-	}
-
-	*v = y
-	return nil
-}
-
-// Decode a slice of int32s ([]int32).
-func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
-	u, err := p.valDec(o)
-	if err != nil {
-		return err
-	}
-	structPointer_Word32Slice(base, p.field).Append(uint32(u))
-	return nil
-}
-
-// Decode a slice of int32s ([]int32) in packed format.
-func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
-	v := structPointer_Word32Slice(base, p.field)
-
-	nn, err := o.DecodeVarint()
-	if err != nil {
-		return err
-	}
-	nb := int(nn) // number of bytes of encoded int32s
-
-	fin := o.index + nb
-	if fin < o.index {
-		return errOverflow
-	}
-	for o.index < fin {
-		u, err := p.valDec(o)
-		if err != nil {
-			return err
-		}
-		v.Append(uint32(u))
-	}
-	return nil
-}
-
-// Decode a slice of int64s ([]int64).
-func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
-	u, err := p.valDec(o)
-	if err != nil {
-		return err
-	}
-
-	structPointer_Word64Slice(base, p.field).Append(u)
-	return nil
-}
-
-// Decode a slice of int64s ([]int64) in packed format.
-func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
-	v := structPointer_Word64Slice(base, p.field)
-
-	nn, err := o.DecodeVarint()
-	if err != nil {
-		return err
-	}
-	nb := int(nn) // number of bytes of encoded int64s
-
-	fin := o.index + nb
-	if fin < o.index {
-		return errOverflow
-	}
-	for o.index < fin {
-		u, err := p.valDec(o)
-		if err != nil {
-			return err
-		}
-		v.Append(u)
-	}
-	return nil
-}
-
-// Decode a slice of strings ([]string).
-func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
-	s, err := o.DecodeStringBytes()
-	if err != nil {
-		return err
-	}
-	v := structPointer_StringSlice(base, p.field)
-	*v = append(*v, s)
-	return nil
-}
-
-// Decode a slice of slice of bytes ([][]byte).
-func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
-	b, err := o.DecodeRawBytes(true)
-	if err != nil {
-		return err
-	}
-	v := structPointer_BytesSlice(base, p.field)
-	*v = append(*v, b)
-	return nil
-}
-
-// Decode a map field.
-func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
-	raw, err := o.DecodeRawBytes(false)
-	if err != nil {
-		return err
-	}
-	oi := o.index       // index at the end of this map entry
-	o.index -= len(raw) // move buffer back to start of map entry
-
-	mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
-	if mptr.Elem().IsNil() {
-		mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
-	}
-	v := mptr.Elem() // map[K]V
-
-	// Prepare addressable doubly-indirect placeholders for the key and value types.
-	// See enc_new_map for why.
-	keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
-	keybase := toStructPointer(keyptr.Addr())                  // **K
-
-	var valbase structPointer
-	var valptr reflect.Value
-	switch p.mtype.Elem().Kind() {
-	case reflect.Slice:
-		// []byte
-		var dummy []byte
-		valptr = reflect.ValueOf(&dummy)  // *[]byte
-		valbase = toStructPointer(valptr) // *[]byte
-	case reflect.Ptr:
-		// message; valptr is **Msg; need to allocate the intermediate pointer
-		valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
-		valptr.Set(reflect.New(valptr.Type().Elem()))
-		valbase = toStructPointer(valptr)
-	default:
-		// everything else
-		valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
-		valbase = toStructPointer(valptr.Addr())                   // **V
-	}
-
-	// Decode.
-	// This parses a restricted wire format, namely the encoding of a message
-	// with two fields. See enc_new_map for the format.
-	for o.index < oi {
-		// tagcode for key and value properties are always a single byte
-		// because they have tags 1 and 2.
-		tagcode := o.buf[o.index]
-		o.index++
-		switch tagcode {
-		case p.mkeyprop.tagcode[0]:
-			if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
-				return err
-			}
-		case p.mvalprop.tagcode[0]:
-			if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
-				return err
-			}
-		default:
-			// TODO: Should we silently skip this instead?
-			return fmt.Errorf("proto: bad map data tag %d", raw[0])
-		}
-	}
-	keyelem, valelem := keyptr.Elem(), valptr.Elem()
-	if !keyelem.IsValid() {
-		keyelem = reflect.Zero(p.mtype.Key())
-	}
-	if !valelem.IsValid() {
-		valelem = reflect.Zero(p.mtype.Elem())
-	}
-
-	v.SetMapIndex(keyelem, valelem)
-	return nil
-}
-
-// Decode a group.
-func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
-	bas := structPointer_GetStructPointer(base, p.field)
-	if structPointer_IsNil(bas) {
-		// allocate new nested message
-		bas = toStructPointer(reflect.New(p.stype))
-		structPointer_SetStructPointer(base, p.field, bas)
-	}
-	return o.unmarshalType(p.stype, p.sprop, true, bas)
-}
-
-// Decode an embedded message.
-func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
-	raw, e := o.DecodeRawBytes(false)
-	if e != nil {
-		return e
-	}
-
-	bas := structPointer_GetStructPointer(base, p.field)
-	if structPointer_IsNil(bas) {
-		// allocate new nested message
-		bas = toStructPointer(reflect.New(p.stype))
-		structPointer_SetStructPointer(base, p.field, bas)
-	}
-
-	// If the object can unmarshal itself, let it.
-	if p.isUnmarshaler {
-		iv := structPointer_Interface(bas, p.stype)
-		return iv.(Unmarshaler).Unmarshal(raw)
-	}
-
-	obuf := o.buf
-	oi := o.index
-	o.buf = raw
-	o.index = 0
-
-	err = o.unmarshalType(p.stype, p.sprop, false, bas)
-	o.buf = obuf
-	o.index = oi
-
-	return err
-}
-
-// Decode a slice of embedded messages.
-func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
-	return o.dec_slice_struct(p, false, base)
-}
-
-// Decode a slice of embedded groups.
-func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
-	return o.dec_slice_struct(p, true, base)
-}
-
-// Decode a slice of structs ([]*struct).
-func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
-	v := reflect.New(p.stype)
-	bas := toStructPointer(v)
-	structPointer_StructPointerSlice(base, p.field).Append(bas)
-
-	if is_group {
-		err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
-		return err
-	}
-
-	raw, err := o.DecodeRawBytes(false)
-	if err != nil {
-		return err
-	}
-
-	// If the object can unmarshal itself, let it.
-	if p.isUnmarshaler {
-		iv := v.Interface()
-		return iv.(Unmarshaler).Unmarshal(raw)
-	}
-
-	obuf := o.buf
-	oi := o.index
-	o.buf = raw
-	o.index = 0
-
-	err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
-
-	o.buf = obuf
-	o.index = oi
-
-	return err
-}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
deleted file mode 100644
index 2b30f84..0000000
--- a/vendor/github.com/golang/protobuf/proto/encode.go
+++ /dev/null
@@ -1,1362 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Routines for encoding data into the wire format for protocol buffers.
- */
-
-import (
-	"errors"
-	"fmt"
-	"reflect"
-	"sort"
-)
-
-// RequiredNotSetError is the error returned if Marshal is called with
-// a protocol buffer struct whose required fields have not
-// all been initialized. It is also the error returned if Unmarshal is
-// called with an encoded protocol buffer that does not include all the
-// required fields.
-//
-// When printed, RequiredNotSetError reports the first unset required field in a
-// message. If the field cannot be precisely determined, it is reported as
-// "{Unknown}".
-type RequiredNotSetError struct {
-	field string
-}
-
-func (e *RequiredNotSetError) Error() string {
-	return fmt.Sprintf("proto: required field %q not set", e.field)
-}
-
-var (
-	// errRepeatedHasNil is the error returned if Marshal is called with
-	// a struct with a repeated field containing a nil element.
-	errRepeatedHasNil = errors.New("proto: repeated field has nil element")
-
-	// errOneofHasNil is the error returned if Marshal is called with
-	// a struct with a oneof field containing a nil element.
-	errOneofHasNil = errors.New("proto: oneof field has nil value")
-
-	// ErrNil is the error returned if Marshal is called with nil.
-	ErrNil = errors.New("proto: Marshal called with nil")
-
-	// ErrTooLarge is the error returned if Marshal is called with a
-	// message that encodes to >2GB.
-	ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
-)
-
-// The fundamental encoders that put bytes on the wire.
-// Those that take integer types all accept uint64 and are
-// therefore of type valueEncoder.
-
-const maxVarintBytes = 10 // maximum length of a varint
-
-// maxMarshalSize is the largest allowed size of an encoded protobuf,
-// since C++ and Java use signed int32s for the size.
-const maxMarshalSize = 1<<31 - 1
-
-// EncodeVarint returns the varint encoding of x.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-// Not used by the package itself, but helpful to clients
-// wishing to use the same encoding.
-func EncodeVarint(x uint64) []byte {
-	var buf [maxVarintBytes]byte
-	var n int
-	for n = 0; x > 127; n++ {
-		buf[n] = 0x80 | uint8(x&0x7F)
-		x >>= 7
-	}
-	buf[n] = uint8(x)
-	n++
-	return buf[0:n]
-}
-
-// EncodeVarint writes a varint-encoded integer to the Buffer.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func (p *Buffer) EncodeVarint(x uint64) error {
-	for x >= 1<<7 {
-		p.buf = append(p.buf, uint8(x&0x7f|0x80))
-		x >>= 7
-	}
-	p.buf = append(p.buf, uint8(x))
-	return nil
-}
-
-// SizeVarint returns the varint encoding size of an integer.
-func SizeVarint(x uint64) int {
-	return sizeVarint(x)
-}
-
-func sizeVarint(x uint64) (n int) {
-	for {
-		n++
-		x >>= 7
-		if x == 0 {
-			break
-		}
-	}
-	return n
-}
-
-// EncodeFixed64 writes a 64-bit integer to the Buffer.
-// This is the format for the
-// fixed64, sfixed64, and double protocol buffer types.
-func (p *Buffer) EncodeFixed64(x uint64) error {
-	p.buf = append(p.buf,
-		uint8(x),
-		uint8(x>>8),
-		uint8(x>>16),
-		uint8(x>>24),
-		uint8(x>>32),
-		uint8(x>>40),
-		uint8(x>>48),
-		uint8(x>>56))
-	return nil
-}
-
-func sizeFixed64(x uint64) int {
-	return 8
-}
-
-// EncodeFixed32 writes a 32-bit integer to the Buffer.
-// This is the format for the
-// fixed32, sfixed32, and float protocol buffer types.
-func (p *Buffer) EncodeFixed32(x uint64) error {
-	p.buf = append(p.buf,
-		uint8(x),
-		uint8(x>>8),
-		uint8(x>>16),
-		uint8(x>>24))
-	return nil
-}
-
-func sizeFixed32(x uint64) int {
-	return 4
-}
-
-// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
-// to the Buffer.
-// This is the format used for the sint64 protocol buffer type.
-func (p *Buffer) EncodeZigzag64(x uint64) error {
-	// use signed number to get arithmetic right shift.
-	return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-
-func sizeZigzag64(x uint64) int {
-	return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-
-// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
-// to the Buffer.
-// This is the format used for the sint32 protocol buffer type.
-func (p *Buffer) EncodeZigzag32(x uint64) error {
-	// use signed number to get arithmetic right shift.
-	return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
-}
-
-func sizeZigzag32(x uint64) int {
-	return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
-}
-
-// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
-// This is the format used for the bytes protocol buffer
-// type and for embedded messages.
-func (p *Buffer) EncodeRawBytes(b []byte) error {
-	p.EncodeVarint(uint64(len(b)))
-	p.buf = append(p.buf, b...)
-	return nil
-}
-
-func sizeRawBytes(b []byte) int {
-	return sizeVarint(uint64(len(b))) +
-		len(b)
-}
-
-// EncodeStringBytes writes an encoded string to the Buffer.
-// This is the format used for the proto2 string type.
-func (p *Buffer) EncodeStringBytes(s string) error {
-	p.EncodeVarint(uint64(len(s)))
-	p.buf = append(p.buf, s...)
-	return nil
-}
-
-func sizeStringBytes(s string) int {
-	return sizeVarint(uint64(len(s))) +
-		len(s)
-}
-
-// Marshaler is the interface representing objects that can marshal themselves.
-type Marshaler interface {
-	Marshal() ([]byte, error)
-}
-
-// Marshal takes the protocol buffer
-// and encodes it into the wire format, returning the data.
-func Marshal(pb Message) ([]byte, error) {
-	// Can the object marshal itself?
-	if m, ok := pb.(Marshaler); ok {
-		return m.Marshal()
-	}
-	p := NewBuffer(nil)
-	err := p.Marshal(pb)
-	if p.buf == nil && err == nil {
-		// Return a non-nil slice on success.
-		return []byte{}, nil
-	}
-	return p.buf, err
-}
-
-// EncodeMessage writes the protocol buffer to the Buffer,
-// prefixed by a varint-encoded length.
-func (p *Buffer) EncodeMessage(pb Message) error {
-	t, base, err := getbase(pb)
-	if structPointer_IsNil(base) {
-		return ErrNil
-	}
-	if err == nil {
-		var state errorState
-		err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
-	}
-	return err
-}
-
-// Marshal takes the protocol buffer
-// and encodes it into the wire format, writing the result to the
-// Buffer.
-func (p *Buffer) Marshal(pb Message) error {
-	// Can the object marshal itself?
-	if m, ok := pb.(Marshaler); ok {
-		data, err := m.Marshal()
-		p.buf = append(p.buf, data...)
-		return err
-	}
-
-	t, base, err := getbase(pb)
-	if structPointer_IsNil(base) {
-		return ErrNil
-	}
-	if err == nil {
-		err = p.enc_struct(GetProperties(t.Elem()), base)
-	}
-
-	if collectStats {
-		(stats).Encode++ // Parens are to work around a goimports bug.
-	}
-
-	if len(p.buf) > maxMarshalSize {
-		return ErrTooLarge
-	}
-	return err
-}
-
-// Size returns the encoded size of a protocol buffer.
-func Size(pb Message) (n int) {
-	// Can the object marshal itself?  If so, Size is slow.
-	// TODO: add Size to Marshaler, or add a Sizer interface.
-	if m, ok := pb.(Marshaler); ok {
-		b, _ := m.Marshal()
-		return len(b)
-	}
-
-	t, base, err := getbase(pb)
-	if structPointer_IsNil(base) {
-		return 0
-	}
-	if err == nil {
-		n = size_struct(GetProperties(t.Elem()), base)
-	}
-
-	if collectStats {
-		(stats).Size++ // Parens are to work around a goimports bug.
-	}
-
-	return
-}
-
-// Individual type encoders.
-
-// Encode a bool.
-func (o *Buffer) enc_bool(p *Properties, base structPointer) error {
-	v := *structPointer_Bool(base, p.field)
-	if v == nil {
-		return ErrNil
-	}
-	x := 0
-	if *v {
-		x = 1
-	}
-	o.buf = append(o.buf, p.tagcode...)
-	p.valEnc(o, uint64(x))
-	return nil
-}
-
-func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {
-	v := *structPointer_BoolVal(base, p.field)
-	if !v {
-		return ErrNil
-	}
-	o.buf = append(o.buf, p.tagcode...)
-	p.valEnc(o, 1)
-	return nil
-}
-
-func size_bool(p *Properties, base structPointer) int {
-	v := *structPointer_Bool(base, p.field)
-	if v == nil {
-		return 0
-	}
-	return len(p.tagcode) + 1 // each bool takes exactly one byte
-}
-
-func size_proto3_bool(p *Properties, base structPointer) int {
-	v := *structPointer_BoolVal(base, p.field)
-	if !v && !p.oneof {
-		return 0
-	}
-	return len(p.tagcode) + 1 // each bool takes exactly one byte
-}
-
-// Encode an int32.
-func (o *Buffer) enc_int32(p *Properties, base structPointer) error {
-	v := structPointer_Word32(base, p.field)
-	if word32_IsNil(v) {
-		return ErrNil
-	}
-	x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
-	o.buf = append(o.buf, p.tagcode...)
-	p.valEnc(o, uint64(x))
-	return nil
-}
-
-func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {
-	v := structPointer_Word32Val(base, p.field)
-	x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
-	if x == 0 {
-		return ErrNil
-	}
-	o.buf = append(o.buf, p.tagcode...)
-	p.valEnc(o, uint64(x))
-	return nil
-}
-
-func size_int32(p *Properties, base structPointer) (n int) {
-	v := structPointer_Word32(base, p.field)
-	if word32_IsNil(v) {
-		return 0
-	}
-	x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
-	n += len(p.tagcode)
-	n += p.valSize(uint64(x))
-	return
-}
-
-func size_proto3_int32(p *Properties, base structPointer) (n int) {
-	v := structPointer_Word32Val(base, p.field)
-	x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
-	if x == 0 && !p.oneof {
-		return 0
-	}
-	n += len(p.tagcode)
-	n += p.valSize(uint64(x))
-	return
-}
-
-// Encode a uint32.
-// Exactly the same as int32, except for no sign extension.
-func (o *Buffer) enc_uint32(p *Properties, base structPointer) error {
-	v := structPointer_Word32(base, p.field)
-	if word32_IsNil(v) {
-		return ErrNil
-	}
-	x := word32_Get(v)
-	o.buf = append(o.buf, p.tagcode...)
-	p.valEnc(o, uint64(x))
-	return nil
-}
-
-func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {
-	v := structPointer_Word32Val(base, p.field)
-	x := word32Val_Get(v)
-	if x == 0 {
-		return ErrNil
-	}
-	o.buf = append(o.buf, p.tagcode...)
-	p.valEnc(o, uint64(x))
-	return nil
-}
-
-func size_uint32(p *Properties, base structPointer) (n int) {
-	v := structPointer_Word32(base, p.field)
-	if word32_IsNil(v) {
-		return 0
-	}
-	x := word32_Get(v)
-	n += len(p.tagcode)
-	n += p.valSize(uint64(x))
-	return
-}
-
-func size_proto3_uint32(p *Properties, base structPointer) (n int) {
-	v := structPointer_Word32Val(base, p.field)
-	x := word32Val_Get(v)
-	if x == 0 && !p.oneof {
-		return 0
-	}
-	n += len(p.tagcode)
-	n += p.valSize(uint64(x))
-	return
-}
-
-// Encode an int64.
-func (o *Buffer) enc_int64(p *Properties, base structPointer) error {
-	v := structPointer_Word64(base, p.field)
-	if word64_IsNil(v) {
-		return ErrNil
-	}
-	x := word64_Get(v)
-	o.buf = append(o.buf, p.tagcode...)
-	p.valEnc(o, x)
-	return nil
-}
-
-func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {
-	v := structPointer_Word64Val(base, p.field)
-	x := word64Val_Get(v)
-	if x == 0 {
-		return ErrNil
-	}
-	o.buf = append(o.buf, p.tagcode...)
-	p.valEnc(o, x)
-	return nil
-}
-
-func size_int64(p *Properties, base structPointer) (n int) {
-	v := structPointer_Word64(base, p.field)
-	if word64_IsNil(v) {
-		return 0
-	}
-	x := word64_Get(v)
-	n += len(p.tagcode)
-	n += p.valSize(x)
-	return
-}
-
-func size_proto3_int64(p *Properties, base structPointer) (n int) {
-	v := structPointer_Word64Val(base, p.field)
-	x := word64Val_Get(v)
-	if x == 0 && !p.oneof {
-		return 0
-	}
-	n += len(p.tagcode)
-	n += p.valSize(x)
-	return
-}
-
-// Encode a string.
-func (o *Buffer) enc_string(p *Properties, base structPointer) error {
-	v := *structPointer_String(base, p.field)
-	if v == nil {
-		return ErrNil
-	}
-	x := *v
-	o.buf = append(o.buf, p.tagcode...)
-	o.EncodeStringBytes(x)
-	return nil
-}
-
-func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {
-	v := *structPointer_StringVal(base, p.field)
-	if v == "" {
-		return ErrNil
-	}
-	o.buf = append(o.buf, p.tagcode...)
-	o.EncodeStringBytes(v)
-	return nil
-}
-
-func size_string(p *Properties, base structPointer) (n int) {
-	v := *structPointer_String(base, p.field)
-	if v == nil {
-		return 0
-	}
-	x := *v
-	n += len(p.tagcode)
-	n += sizeStringBytes(x)
-	return
-}
-
-func size_proto3_string(p *Properties, base structPointer) (n int) {
-	v := *structPointer_StringVal(base, p.field)
-	if v == "" && !p.oneof {
-		return 0
-	}
-	n += len(p.tagcode)
-	n += sizeStringBytes(v)
-	return
-}
-
-// All protocol buffer fields are nillable, but be careful.
-func isNil(v reflect.Value) bool {
-	switch v.Kind() {
-	case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
-		return v.IsNil()
-	}
-	return false
-}
-
-// Encode a message struct.
-func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
-	var state errorState
-	structp := structPointer_GetStructPointer(base, p.field)
-	if structPointer_IsNil(structp) {
-		return ErrNil
-	}
-
-	// Can the object marshal itself?
-	if p.isMarshaler {
-		m := structPointer_Interface(structp, p.stype).(Marshaler)
-		data, err := m.Marshal()
-		if err != nil && !state.shouldContinue(err, nil) {
-			return err
-		}
-		o.buf = append(o.buf, p.tagcode...)
-		o.EncodeRawBytes(data)
-		return state.err
-	}
-
-	o.buf = append(o.buf, p.tagcode...)
-	return o.enc_len_struct(p.sprop, structp, &state)
-}
-
-func size_struct_message(p *Properties, base structPointer) int {
-	structp := structPointer_GetStructPointer(base, p.field)
-	if structPointer_IsNil(structp) {
-		return 0
-	}
-
-	// Can the object marshal itself?
-	if p.isMarshaler {
-		m := structPointer_Interface(structp, p.stype).(Marshaler)
-		data, _ := m.Marshal()
-		n0 := len(p.tagcode)
-		n1 := sizeRawBytes(data)
-		return n0 + n1
-	}
-
-	n0 := len(p.tagcode)
-	n1 := size_struct(p.sprop, structp)
-	n2 := sizeVarint(uint64(n1)) // size of encoded length
-	return n0 + n1 + n2
-}
-
-// Encode a group struct.
-func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {
-	var state errorState
-	b := structPointer_GetStructPointer(base, p.field)
-	if structPointer_IsNil(b) {
-		return ErrNil
-	}
-
-	o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
-	err := o.enc_struct(p.sprop, b)
-	if err != nil && !state.shouldContinue(err, nil) {
-		return err
-	}
-	o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
-	return state.err
-}
-
-func size_struct_group(p *Properties, base structPointer) (n int) {
-	b := structPointer_GetStructPointer(base, p.field)
-	if structPointer_IsNil(b) {
-		return 0
-	}
-
-	n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))
-	n += size_struct(p.sprop, b)
-	n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))
-	return
-}
-
-// Encode a slice of bools ([]bool).
-func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {
-	s := *structPointer_BoolSlice(base, p.field)
-	l := len(s)
-	if l == 0 {
-		return ErrNil
-	}
-	for _, x := range s {
-		o.buf = append(o.buf, p.tagcode...)
-		v := uint64(0)
-		if x {
-			v = 1
-		}
-		p.valEnc(o, v)
-	}
-	return nil
-}
-
-func size_slice_bool(p *Properties, base structPointer) int {
-	s := *structPointer_BoolSlice(base, p.field)
-	l := len(s)
-	if l == 0 {
-		return 0
-	}
-	return l * (len(p.tagcode) + 1) // each bool takes exactly one byte
-}
-
-// Encode a slice of bools ([]bool) in packed format.
-func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {
-	s := *structPointer_BoolSlice(base, p.field)
-	l := len(s)
-	if l == 0 {
-		return ErrNil
-	}
-	o.buf = append(o.buf, p.tagcode...)
-	o.EncodeVarint(uint64(l)) // each bool takes exactly one byte
-	for _, x := range s {
-		v := uint64(0)
-		if x {
-			v = 1
-		}
-		p.valEnc(o, v)
-	}
-	return nil
-}
-
-func size_slice_packed_bool(p *Properties, base structPointer) (n int) {
-	s := *structPointer_BoolSlice(base, p.field)
-	l := len(s)
-	if l == 0 {
-		return 0
-	}
-	n += len(p.tagcode)
-	n += sizeVarint(uint64(l))
-	n += l // each bool takes exactly one byte
-	return
-}
-
-// Encode a slice of bytes ([]byte).
-func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {
-	s := *structPointer_Bytes(base, p.field)
-	if s == nil {
-		return ErrNil
-	}
-	o.buf = append(o.buf, p.tagcode...)
-	o.EncodeRawBytes(s)
-	return nil
-}
-
-func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {
-	s := *structPointer_Bytes(base, p.field)
-	if len(s) == 0 {
-		return ErrNil
-	}
-	o.buf = append(o.buf, p.tagcode...)
-	o.EncodeRawBytes(s)
-	return nil
-}
-
-func size_slice_byte(p *Properties, base structPointer) (n int) {
-	s := *structPointer_Bytes(base, p.field)
-	if s == nil && !p.oneof {
-		return 0
-	}
-	n += len(p.tagcode)
-	n += sizeRawBytes(s)
-	return
-}
-
-func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
-	s := *structPointer_Bytes(base, p.field)
-	if len(s) == 0 && !p.oneof {
-		return 0
-	}
-	n += len(p.tagcode)
-	n += sizeRawBytes(s)
-	return
-}
-
-// Encode a slice of int32s ([]int32).
-func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {
-	s := structPointer_Word32Slice(base, p.field)
-	l := s.Len()
-	if l == 0 {
-		return ErrNil
-	}
-	for i := 0; i < l; i++ {
-		o.buf = append(o.buf, p.tagcode...)
-		x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
-		p.valEnc(o, uint64(x))
-	}
-	return nil
-}
-
-func size_slice_int32(p *Properties, base structPointer) (n int) {
-	s := structPointer_Word32Slice(base, p.field)
-	l := s.Len()
-	if l == 0 {
-		return 0
-	}
-	for i := 0; i < l; i++ {
-		n += len(p.tagcode)
-		x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
-		n += p.valSize(uint64(x))
-	}
-	return
-}
-
-// Encode a slice of int32s ([]int32) in packed format.
-func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {
-	s := structPointer_Word32Slice(base, p.field)
-	l := s.Len()
-	if l == 0 {
-		return ErrNil
-	}
-	// TODO: Reuse a Buffer.
-	buf := NewBuffer(nil)
-	for i := 0; i < l; i++ {
-		x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
-		p.valEnc(buf, uint64(x))
-	}
-
-	o.buf = append(o.buf, p.tagcode...)
-	o.EncodeVarint(uint64(len(buf.buf)))
-	o.buf = append(o.buf, buf.buf...)
-	return nil
-}
-
-func size_slice_packed_int32(p *Properties, base structPointer) (n int) {
-	s := structPointer_Word32Slice(base, p.field)
-	l := s.Len()
-	if l == 0 {
-		return 0
-	}
-	var bufSize int
-	for i := 0; i < l; i++ {
-		x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
-		bufSize += p.valSize(uint64(x))
-	}
-
-	n += len(p.tagcode)
-	n += sizeVarint(uint64(bufSize))
-	n += bufSize
-	return
-}
-
-// Encode a slice of uint32s ([]uint32).
-// Exactly the same as int32, except for no sign extension.
-func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {
-	s := structPointer_Word32Slice(base, p.field)
-	l := s.Len()
-	if l == 0 {
-		return ErrNil
-	}
-	for i := 0; i < l; i++ {
-		o.buf = append(o.buf, p.tagcode...)
-		x := s.Index(i)
-		p.valEnc(o, uint64(x))
-	}
-	return nil
-}
-
-func size_slice_uint32(p *Properties, base structPointer) (n int) {
-	s := structPointer_Word32Slice(base, p.field)
-	l := s.Len()
-	if l == 0 {
-		return 0
-	}
-	for i := 0; i < l; i++ {
-		n += len(p.tagcode)
-		x := s.Index(i)
-		n += p.valSize(uint64(x))
-	}
-	return
-}
-
-// Encode a slice of uint32s ([]uint32) in packed format.
-// Exactly the same as int32, except for no sign extension.
-func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {
-	s := structPointer_Word32Slice(base, p.field)
-	l := s.Len()
-	if l == 0 {
-		return ErrNil
-	}
-	// TODO: Reuse a Buffer.
-	buf := NewBuffer(nil)
-	for i := 0; i < l; i++ {
-		p.valEnc(buf, uint64(s.Index(i)))
-	}
-
-	o.buf = append(o.buf, p.tagcode...)
-	o.EncodeVarint(uint64(len(buf.buf)))
-	o.buf = append(o.buf, buf.buf...)
-	return nil
-}
-
-func size_slice_packed_uint32(p *Properties, base structPointer) (n int) {
-	s := structPointer_Word32Slice(base, p.field)
-	l := s.Len()
-	if l == 0 {
-		return 0
-	}
-	var bufSize int
-	for i := 0; i < l; i++ {
-		bufSize += p.valSize(uint64(s.Index(i)))
-	}
-
-	n += len(p.tagcode)
-	n += sizeVarint(uint64(bufSize))
-	n += bufSize
-	return
-}
-
-// Encode a slice of int64s ([]int64).
-func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {
-	s := structPointer_Word64Slice(base, p.field)
-	l := s.Len()
-	if l == 0 {
-		return ErrNil
-	}
-	for i := 0; i < l; i++ {
-		o.buf = append(o.buf, p.tagcode...)
-		p.valEnc(o, s.Index(i))
-	}
-	return nil
-}
-
-func size_slice_int64(p *Properties, base structPointer) (n int) {
-	s := structPointer_Word64Slice(base, p.field)
-	l := s.Len()
-	if l == 0 {
-		return 0
-	}
-	for i := 0; i < l; i++ {
-		n += len(p.tagcode)
-		n += p.valSize(s.Index(i))
-	}
-	return
-}
-
-// Encode a slice of int64s ([]int64) in packed format.
-func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {
-	s := structPointer_Word64Slice(base, p.field)
-	l := s.Len()
-	if l == 0 {
-		return ErrNil
-	}
-	// TODO: Reuse a Buffer.
-	buf := NewBuffer(nil)
-	for i := 0; i < l; i++ {
-		p.valEnc(buf, s.Index(i))
-	}
-
-	o.buf = append(o.buf, p.tagcode...)
-	o.EncodeVarint(uint64(len(buf.buf)))
-	o.buf = append(o.buf, buf.buf...)
-	return nil
-}
-
-func size_slice_packed_int64(p *Properties, base structPointer) (n int) {
-	s := structPointer_Word64Slice(base, p.field)
-	l := s.Len()
-	if l == 0 {
-		return 0
-	}
-	var bufSize int
-	for i := 0; i < l; i++ {
-		bufSize += p.valSize(s.Index(i))
-	}
-
-	n += len(p.tagcode)
-	n += sizeVarint(uint64(bufSize))
-	n += bufSize
-	return
-}
-
-// Encode a slice of slice of bytes ([][]byte).
-func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {
-	ss := *structPointer_BytesSlice(base, p.field)
-	l := len(ss)
-	if l == 0 {
-		return ErrNil
-	}
-	for i := 0; i < l; i++ {
-		o.buf = append(o.buf, p.tagcode...)
-		o.EncodeRawBytes(ss[i])
-	}
-	return nil
-}
-
-func size_slice_slice_byte(p *Properties, base structPointer) (n int) {
-	ss := *structPointer_BytesSlice(base, p.field)
-	l := len(ss)
-	if l == 0 {
-		return 0
-	}
-	n += l * len(p.tagcode)
-	for i := 0; i < l; i++ {
-		n += sizeRawBytes(ss[i])
-	}
-	return
-}
-
-// Encode a slice of strings ([]string).
-func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {
-	ss := *structPointer_StringSlice(base, p.field)
-	l := len(ss)
-	for i := 0; i < l; i++ {
-		o.buf = append(o.buf, p.tagcode...)
-		o.EncodeStringBytes(ss[i])
-	}
-	return nil
-}
-
-func size_slice_string(p *Properties, base structPointer) (n int) {
-	ss := *structPointer_StringSlice(base, p.field)
-	l := len(ss)
-	n += l * len(p.tagcode)
-	for i := 0; i < l; i++ {
-		n += sizeStringBytes(ss[i])
-	}
-	return
-}
-
-// Encode a slice of message structs ([]*struct).
-func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {
-	var state errorState
-	s := structPointer_StructPointerSlice(base, p.field)
-	l := s.Len()
-
-	for i := 0; i < l; i++ {
-		structp := s.Index(i)
-		if structPointer_IsNil(structp) {
-			return errRepeatedHasNil
-		}
-
-		// Can the object marshal itself?
-		if p.isMarshaler {
-			m := structPointer_Interface(structp, p.stype).(Marshaler)
-			data, err := m.Marshal()
-			if err != nil && !state.shouldContinue(err, nil) {
-				return err
-			}
-			o.buf = append(o.buf, p.tagcode...)
-			o.EncodeRawBytes(data)
-			continue
-		}
-
-		o.buf = append(o.buf, p.tagcode...)
-		err := o.enc_len_struct(p.sprop, structp, &state)
-		if err != nil && !state.shouldContinue(err, nil) {
-			if err == ErrNil {
-				return errRepeatedHasNil
-			}
-			return err
-		}
-	}
-	return state.err
-}
-
-func size_slice_struct_message(p *Properties, base structPointer) (n int) {
-	s := structPointer_StructPointerSlice(base, p.field)
-	l := s.Len()
-	n += l * len(p.tagcode)
-	for i := 0; i < l; i++ {
-		structp := s.Index(i)
-		if structPointer_IsNil(structp) {
-			return // return the size up to this point
-		}
-
-		// Can the object marshal itself?
-		if p.isMarshaler {
-			m := structPointer_Interface(structp, p.stype).(Marshaler)
-			data, _ := m.Marshal()
-			n += sizeRawBytes(data)
-			continue
-		}
-
-		n0 := size_struct(p.sprop, structp)
-		n1 := sizeVarint(uint64(n0)) // size of encoded length
-		n += n0 + n1
-	}
-	return
-}
-
-// Encode a slice of group structs ([]*struct).
-func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {
-	var state errorState
-	s := structPointer_StructPointerSlice(base, p.field)
-	l := s.Len()
-
-	for i := 0; i < l; i++ {
-		b := s.Index(i)
-		if structPointer_IsNil(b) {
-			return errRepeatedHasNil
-		}
-
-		o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
-
-		err := o.enc_struct(p.sprop, b)
-
-		if err != nil && !state.shouldContinue(err, nil) {
-			if err == ErrNil {
-				return errRepeatedHasNil
-			}
-			return err
-		}
-
-		o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
-	}
-	return state.err
-}
-
-func size_slice_struct_group(p *Properties, base structPointer) (n int) {
-	s := structPointer_StructPointerSlice(base, p.field)
-	l := s.Len()
-
-	n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))
-	n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))
-	for i := 0; i < l; i++ {
-		b := s.Index(i)
-		if structPointer_IsNil(b) {
-			return // return size up to this point
-		}
-
-		n += size_struct(p.sprop, b)
-	}
-	return
-}
-
-// Encode an extension map.
-func (o *Buffer) enc_map(p *Properties, base structPointer) error {
-	exts := structPointer_ExtMap(base, p.field)
-	if err := encodeExtensionsMap(*exts); err != nil {
-		return err
-	}
-
-	return o.enc_map_body(*exts)
-}
-
-func (o *Buffer) enc_exts(p *Properties, base structPointer) error {
-	exts := structPointer_Extensions(base, p.field)
-
-	v, mu := exts.extensionsRead()
-	if v == nil {
-		return nil
-	}
-
-	mu.Lock()
-	defer mu.Unlock()
-	if err := encodeExtensionsMap(v); err != nil {
-		return err
-	}
-
-	return o.enc_map_body(v)
-}
-
-func (o *Buffer) enc_map_body(v map[int32]Extension) error {
-	// Fast-path for common cases: zero or one extensions.
-	if len(v) <= 1 {
-		for _, e := range v {
-			o.buf = append(o.buf, e.enc...)
-		}
-		return nil
-	}
-
-	// Sort keys to provide a deterministic encoding.
-	keys := make([]int, 0, len(v))
-	for k := range v {
-		keys = append(keys, int(k))
-	}
-	sort.Ints(keys)
-
-	for _, k := range keys {
-		o.buf = append(o.buf, v[int32(k)].enc...)
-	}
-	return nil
-}
-
-func size_map(p *Properties, base structPointer) int {
-	v := structPointer_ExtMap(base, p.field)
-	return extensionsMapSize(*v)
-}
-
-func size_exts(p *Properties, base structPointer) int {
-	v := structPointer_Extensions(base, p.field)
-	return extensionsSize(v)
-}
-
-// Encode a map field.
-func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
-	var state errorState // XXX: or do we need to plumb this through?
-
-	/*
-		A map defined as
-			map<key_type, value_type> map_field = N;
-		is encoded in the same way as
-			message MapFieldEntry {
-				key_type key = 1;
-				value_type value = 2;
-			}
-			repeated MapFieldEntry map_field = N;
-	*/
-
-	v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
-	if v.Len() == 0 {
-		return nil
-	}
-
-	keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
-
-	enc := func() error {
-		if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
-			return err
-		}
-		if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil {
-			return err
-		}
-		return nil
-	}
-
-	// Don't sort map keys. It is not required by the spec, and C++ doesn't do it.
-	for _, key := range v.MapKeys() {
-		val := v.MapIndex(key)
-
-		keycopy.Set(key)
-		valcopy.Set(val)
-
-		o.buf = append(o.buf, p.tagcode...)
-		if err := o.enc_len_thing(enc, &state); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func size_new_map(p *Properties, base structPointer) int {
-	v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
-
-	keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
-
-	n := 0
-	for _, key := range v.MapKeys() {
-		val := v.MapIndex(key)
-		keycopy.Set(key)
-		valcopy.Set(val)
-
-		// Tag codes for key and val are the responsibility of the sub-sizer.
-		keysize := p.mkeyprop.size(p.mkeyprop, keybase)
-		valsize := p.mvalprop.size(p.mvalprop, valbase)
-		entry := keysize + valsize
-		// Add on tag code and length of map entry itself.
-		n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
-	}
-	return n
-}
-
-// mapEncodeScratch returns a new reflect.Value matching the map's value type,
-// and a structPointer suitable for passing to an encoder or sizer.
-func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {
-	// Prepare addressable doubly-indirect placeholders for the key and value types.
-	// This is needed because the element-type encoders expect **T, but the map iteration produces T.
-
-	keycopy = reflect.New(mapType.Key()).Elem()                 // addressable K
-	keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K
-	keyptr.Set(keycopy.Addr())                                  //
-	keybase = toStructPointer(keyptr.Addr())                    // **K
-
-	// Value types are more varied and require special handling.
-	switch mapType.Elem().Kind() {
-	case reflect.Slice:
-		// []byte
-		var dummy []byte
-		valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte
-		valbase = toStructPointer(valcopy.Addr())
-	case reflect.Ptr:
-		// message; the generated field type is map[K]*Msg (so V is *Msg),
-		// so we only need one level of indirection.
-		valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
-		valbase = toStructPointer(valcopy.Addr())
-	default:
-		// everything else
-		valcopy = reflect.New(mapType.Elem()).Elem()                // addressable V
-		valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V
-		valptr.Set(valcopy.Addr())                                  //
-		valbase = toStructPointer(valptr.Addr())                    // **V
-	}
-	return
-}
-
-// Encode a struct.
-func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
-	var state errorState
-	// Encode fields in tag order so that decoders may use optimizations
-	// that depend on the ordering.
-	// https://developers.google.com/protocol-buffers/docs/encoding#order
-	for _, i := range prop.order {
-		p := prop.Prop[i]
-		if p.enc != nil {
-			err := p.enc(o, p, base)
-			if err != nil {
-				if err == ErrNil {
-					if p.Required && state.err == nil {
-						state.err = &RequiredNotSetError{p.Name}
-					}
-				} else if err == errRepeatedHasNil {
-					// Give more context to nil values in repeated fields.
-					return errors.New("repeated field " + p.OrigName + " has nil element")
-				} else if !state.shouldContinue(err, p) {
-					return err
-				}
-			}
-			if len(o.buf) > maxMarshalSize {
-				return ErrTooLarge
-			}
-		}
-	}
-
-	// Do oneof fields.
-	if prop.oneofMarshaler != nil {
-		m := structPointer_Interface(base, prop.stype).(Message)
-		if err := prop.oneofMarshaler(m, o); err == ErrNil {
-			return errOneofHasNil
-		} else if err != nil {
-			return err
-		}
-	}
-
-	// Add unrecognized fields at the end.
-	if prop.unrecField.IsValid() {
-		v := *structPointer_Bytes(base, prop.unrecField)
-		if len(o.buf)+len(v) > maxMarshalSize {
-			return ErrTooLarge
-		}
-		if len(v) > 0 {
-			o.buf = append(o.buf, v...)
-		}
-	}
-
-	return state.err
-}
-
-func size_struct(prop *StructProperties, base structPointer) (n int) {
-	for _, i := range prop.order {
-		p := prop.Prop[i]
-		if p.size != nil {
-			n += p.size(p, base)
-		}
-	}
-
-	// Add unrecognized fields at the end.
-	if prop.unrecField.IsValid() {
-		v := *structPointer_Bytes(base, prop.unrecField)
-		n += len(v)
-	}
-
-	// Factor in any oneof fields.
-	if prop.oneofSizer != nil {
-		m := structPointer_Interface(base, prop.stype).(Message)
-		n += prop.oneofSizer(m)
-	}
-
-	return
-}
-
-var zeroes [20]byte // longer than any conceivable sizeVarint
-
-// Encode a struct, preceded by its encoded length (as a varint).
-func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {
-	return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)
-}
-
-// Encode something, preceded by its encoded length (as a varint).
-func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {
-	iLen := len(o.buf)
-	o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
-	iMsg := len(o.buf)
-	err := enc()
-	if err != nil && !state.shouldContinue(err, nil) {
-		return err
-	}
-	lMsg := len(o.buf) - iMsg
-	lLen := sizeVarint(uint64(lMsg))
-	switch x := lLen - (iMsg - iLen); {
-	case x > 0: // actual length is x bytes larger than the space we reserved
-		// Move msg x bytes right.
-		o.buf = append(o.buf, zeroes[:x]...)
-		copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
-	case x < 0: // actual length is x bytes smaller than the space we reserved
-		// Move msg x bytes left.
-		copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
-		o.buf = o.buf[:len(o.buf)+x] // x is negative
-	}
-	// Encode the length in the reserved space.
-	o.buf = o.buf[:iLen]
-	o.EncodeVarint(uint64(lMsg))
-	o.buf = o.buf[:len(o.buf)+lMsg]
-	return state.err
-}
-
-// errorState maintains the first error that occurs and updates that error
-// with additional context.
-type errorState struct {
-	err error
-}
-
-// shouldContinue reports whether encoding should continue upon encountering the
-// given error. If the error is RequiredNotSetError, shouldContinue returns true
-// and, if this is the first appearance of that error, remembers it for future
-// reporting.
-//
-// If prop is not nil, it may update any error with additional context about the
-// field with the error.
-func (s *errorState) shouldContinue(err error, prop *Properties) bool {
-	// Ignore unset required fields.
-	reqNotSet, ok := err.(*RequiredNotSetError)
-	if !ok {
-		return false
-	}
-	if s.err == nil {
-		if prop != nil {
-			err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field}
-		}
-		s.err = err
-	}
-	return true
-}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
deleted file mode 100644
index 2ed1cf5..0000000
--- a/vendor/github.com/golang/protobuf/proto/equal.go
+++ /dev/null
@@ -1,300 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2011 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Protocol buffer comparison.
-
-package proto
-
-import (
-	"bytes"
-	"log"
-	"reflect"
-	"strings"
-)
-
-/*
-Equal returns true iff protocol buffers a and b are equal.
-The arguments must both be pointers to protocol buffer structs.
-
-Equality is defined in this way:
-  - Two messages are equal iff they are the same type,
-    corresponding fields are equal, unknown field sets
-    are equal, and extensions sets are equal.
-  - Two set scalar fields are equal iff their values are equal.
-    If the fields are of a floating-point type, remember that
-    NaN != x for all x, including NaN. If the message is defined
-    in a proto3 .proto file, fields are not "set"; specifically,
-    zero length proto3 "bytes" fields are equal (nil == {}).
-  - Two repeated fields are equal iff their lengths are the same,
-    and their corresponding elements are equal. Note a "bytes" field,
-    although represented by []byte, is not a repeated field and the
-    rule for the scalar fields described above applies.
-  - Two unset fields are equal.
-  - Two unknown field sets are equal if their current
-    encoded state is equal.
-  - Two extension sets are equal iff they have corresponding
-    elements that are pairwise equal.
-  - Two map fields are equal iff their lengths are the same,
-    and they contain the same set of elements. Zero-length map
-    fields are equal.
-  - Every other combination of things are not equal.
-
-The return value is undefined if a and b are not protocol buffers.
-*/
-func Equal(a, b Message) bool {
-	if a == nil || b == nil {
-		return a == b
-	}
-	v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
-	if v1.Type() != v2.Type() {
-		return false
-	}
-	if v1.Kind() == reflect.Ptr {
-		if v1.IsNil() {
-			return v2.IsNil()
-		}
-		if v2.IsNil() {
-			return false
-		}
-		v1, v2 = v1.Elem(), v2.Elem()
-	}
-	if v1.Kind() != reflect.Struct {
-		return false
-	}
-	return equalStruct(v1, v2)
-}
-
-// v1 and v2 are known to have the same type.
-func equalStruct(v1, v2 reflect.Value) bool {
-	sprop := GetProperties(v1.Type())
-	for i := 0; i < v1.NumField(); i++ {
-		f := v1.Type().Field(i)
-		if strings.HasPrefix(f.Name, "XXX_") {
-			continue
-		}
-		f1, f2 := v1.Field(i), v2.Field(i)
-		if f.Type.Kind() == reflect.Ptr {
-			if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
-				// both unset
-				continue
-			} else if n1 != n2 {
-				// set/unset mismatch
-				return false
-			}
-			b1, ok := f1.Interface().(raw)
-			if ok {
-				b2 := f2.Interface().(raw)
-				// RawMessage
-				if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
-					return false
-				}
-				continue
-			}
-			f1, f2 = f1.Elem(), f2.Elem()
-		}
-		if !equalAny(f1, f2, sprop.Prop[i]) {
-			return false
-		}
-	}
-
-	if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
-		em2 := v2.FieldByName("XXX_InternalExtensions")
-		if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
-			return false
-		}
-	}
-
-	if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
-		em2 := v2.FieldByName("XXX_extensions")
-		if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
-			return false
-		}
-	}
-
-	uf := v1.FieldByName("XXX_unrecognized")
-	if !uf.IsValid() {
-		return true
-	}
-
-	u1 := uf.Bytes()
-	u2 := v2.FieldByName("XXX_unrecognized").Bytes()
-	if !bytes.Equal(u1, u2) {
-		return false
-	}
-
-	return true
-}
-
-// v1 and v2 are known to have the same type.
-// prop may be nil.
-func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
-	if v1.Type() == protoMessageType {
-		m1, _ := v1.Interface().(Message)
-		m2, _ := v2.Interface().(Message)
-		return Equal(m1, m2)
-	}
-	switch v1.Kind() {
-	case reflect.Bool:
-		return v1.Bool() == v2.Bool()
-	case reflect.Float32, reflect.Float64:
-		return v1.Float() == v2.Float()
-	case reflect.Int32, reflect.Int64:
-		return v1.Int() == v2.Int()
-	case reflect.Interface:
-		// Probably a oneof field; compare the inner values.
-		n1, n2 := v1.IsNil(), v2.IsNil()
-		if n1 || n2 {
-			return n1 == n2
-		}
-		e1, e2 := v1.Elem(), v2.Elem()
-		if e1.Type() != e2.Type() {
-			return false
-		}
-		return equalAny(e1, e2, nil)
-	case reflect.Map:
-		if v1.Len() != v2.Len() {
-			return false
-		}
-		for _, key := range v1.MapKeys() {
-			val2 := v2.MapIndex(key)
-			if !val2.IsValid() {
-				// This key was not found in the second map.
-				return false
-			}
-			if !equalAny(v1.MapIndex(key), val2, nil) {
-				return false
-			}
-		}
-		return true
-	case reflect.Ptr:
-		// Maps may have nil values in them, so check for nil.
-		if v1.IsNil() && v2.IsNil() {
-			return true
-		}
-		if v1.IsNil() != v2.IsNil() {
-			return false
-		}
-		return equalAny(v1.Elem(), v2.Elem(), prop)
-	case reflect.Slice:
-		if v1.Type().Elem().Kind() == reflect.Uint8 {
-			// short circuit: []byte
-
-			// Edge case: if this is in a proto3 message, a zero length
-			// bytes field is considered the zero value.
-			if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
-				return true
-			}
-			if v1.IsNil() != v2.IsNil() {
-				return false
-			}
-			return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
-		}
-
-		if v1.Len() != v2.Len() {
-			return false
-		}
-		for i := 0; i < v1.Len(); i++ {
-			if !equalAny(v1.Index(i), v2.Index(i), prop) {
-				return false
-			}
-		}
-		return true
-	case reflect.String:
-		return v1.Interface().(string) == v2.Interface().(string)
-	case reflect.Struct:
-		return equalStruct(v1, v2)
-	case reflect.Uint32, reflect.Uint64:
-		return v1.Uint() == v2.Uint()
-	}
-
-	// unknown type, so not a protocol buffer
-	log.Printf("proto: don't know how to compare %v", v1)
-	return false
-}
-
-// base is the struct type that the extensions are based on.
-// x1 and x2 are InternalExtensions.
-func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
-	em1, _ := x1.extensionsRead()
-	em2, _ := x2.extensionsRead()
-	return equalExtMap(base, em1, em2)
-}
-
-func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
-	if len(em1) != len(em2) {
-		return false
-	}
-
-	for extNum, e1 := range em1 {
-		e2, ok := em2[extNum]
-		if !ok {
-			return false
-		}
-
-		m1, m2 := e1.value, e2.value
-
-		if m1 != nil && m2 != nil {
-			// Both are unencoded.
-			if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
-				return false
-			}
-			continue
-		}
-
-		// At least one is encoded. To do a semantically correct comparison
-		// we need to unmarshal them first.
-		var desc *ExtensionDesc
-		if m := extensionMaps[base]; m != nil {
-			desc = m[extNum]
-		}
-		if desc == nil {
-			log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
-			continue
-		}
-		var err error
-		if m1 == nil {
-			m1, err = decodeExtension(e1.enc, desc)
-		}
-		if m2 == nil && err == nil {
-			m2, err = decodeExtension(e2.enc, desc)
-		}
-		if err != nil {
-			// The encoded form is invalid.
-			log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
-			return false
-		}
-		if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
-			return false
-		}
-	}
-
-	return true
-}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
deleted file mode 100644
index eaad218..0000000
--- a/vendor/github.com/golang/protobuf/proto/extensions.go
+++ /dev/null
@@ -1,587 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Types and routines for supporting protocol buffer extensions.
- */
-
-import (
-	"errors"
-	"fmt"
-	"reflect"
-	"strconv"
-	"sync"
-)
-
-// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
-var ErrMissingExtension = errors.New("proto: missing extension")
-
-// ExtensionRange represents a range of message extensions for a protocol buffer.
-// Used in code generated by the protocol compiler.
-type ExtensionRange struct {
-	Start, End int32 // both inclusive
-}
-
-// extendableProto is an interface implemented by any protocol buffer generated by the current
-// proto compiler that may be extended.
-type extendableProto interface {
-	Message
-	ExtensionRangeArray() []ExtensionRange
-	extensionsWrite() map[int32]Extension
-	extensionsRead() (map[int32]Extension, sync.Locker)
-}
-
-// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
-// version of the proto compiler that may be extended.
-type extendableProtoV1 interface {
-	Message
-	ExtensionRangeArray() []ExtensionRange
-	ExtensionMap() map[int32]Extension
-}
-
-// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
-type extensionAdapter struct {
-	extendableProtoV1
-}
-
-func (e extensionAdapter) extensionsWrite() map[int32]Extension {
-	return e.ExtensionMap()
-}
-
-func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
-	return e.ExtensionMap(), notLocker{}
-}
-
-// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
-type notLocker struct{}
-
-func (n notLocker) Lock()   {}
-func (n notLocker) Unlock() {}
-
-// extendable returns the extendableProto interface for the given generated proto message.
-// If the proto message has the old extension format, it returns a wrapper that implements
-// the extendableProto interface.
-func extendable(p interface{}) (extendableProto, bool) {
-	if ep, ok := p.(extendableProto); ok {
-		return ep, ok
-	}
-	if ep, ok := p.(extendableProtoV1); ok {
-		return extensionAdapter{ep}, ok
-	}
-	return nil, false
-}
-
-// XXX_InternalExtensions is an internal representation of proto extensions.
-//
-// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
-// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
-//
-// The methods of XXX_InternalExtensions are not concurrency safe in general,
-// but calls to logically read-only methods such as has and get may be executed concurrently.
-type XXX_InternalExtensions struct {
-	// The struct must be indirect so that if a user inadvertently copies a
-	// generated message and its embedded XXX_InternalExtensions, they
-	// avoid the mayhem of a copied mutex.
-	//
-	// The mutex serializes all logically read-only operations to p.extensionMap.
-	// It is up to the client to ensure that write operations to p.extensionMap are
-	// mutually exclusive with other accesses.
-	p *struct {
-		mu           sync.Mutex
-		extensionMap map[int32]Extension
-	}
-}
-
-// extensionsWrite returns the extension map, creating it on first use.
-func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
-	if e.p == nil {
-		e.p = new(struct {
-			mu           sync.Mutex
-			extensionMap map[int32]Extension
-		})
-		e.p.extensionMap = make(map[int32]Extension)
-	}
-	return e.p.extensionMap
-}
-
-// extensionsRead returns the extensions map for read-only use.  It may be nil.
-// The caller must hold the returned mutex's lock when accessing Elements within the map.
-func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
-	if e.p == nil {
-		return nil, nil
-	}
-	return e.p.extensionMap, &e.p.mu
-}
-
-var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
-var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem()
-
-// ExtensionDesc represents an extension specification.
-// Used in generated code from the protocol compiler.
-type ExtensionDesc struct {
-	ExtendedType  Message     // nil pointer to the type that is being extended
-	ExtensionType interface{} // nil pointer to the extension type
-	Field         int32       // field number
-	Name          string      // fully-qualified name of extension, for text formatting
-	Tag           string      // protobuf tag style
-	Filename      string      // name of the file in which the extension is defined
-}
-
-func (ed *ExtensionDesc) repeated() bool {
-	t := reflect.TypeOf(ed.ExtensionType)
-	return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
-}
-
-// Extension represents an extension in a message.
-type Extension struct {
-	// When an extension is stored in a message using SetExtension
-	// only desc and value are set. When the message is marshaled
-	// enc will be set to the encoded form of the message.
-	//
-	// When a message is unmarshaled and contains extensions, each
-	// extension will have only enc set. When such an extension is
-	// accessed using GetExtension (or GetExtensions) desc and value
-	// will be set.
-	desc  *ExtensionDesc
-	value interface{}
-	enc   []byte
-}
-
-// SetRawExtension is for testing only.
-func SetRawExtension(base Message, id int32, b []byte) {
-	epb, ok := extendable(base)
-	if !ok {
-		return
-	}
-	extmap := epb.extensionsWrite()
-	extmap[id] = Extension{enc: b}
-}
-
-// isExtensionField returns true iff the given field number is in an extension range.
-func isExtensionField(pb extendableProto, field int32) bool {
-	for _, er := range pb.ExtensionRangeArray() {
-		if er.Start <= field && field <= er.End {
-			return true
-		}
-	}
-	return false
-}
-
-// checkExtensionTypes checks that the given extension is valid for pb.
-func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
-	var pbi interface{} = pb
-	// Check the extended type.
-	if ea, ok := pbi.(extensionAdapter); ok {
-		pbi = ea.extendableProtoV1
-	}
-	if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
-		return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
-	}
-	// Check the range.
-	if !isExtensionField(pb, extension.Field) {
-		return errors.New("proto: bad extension number; not in declared ranges")
-	}
-	return nil
-}
-
-// extPropKey is sufficient to uniquely identify an extension.
-type extPropKey struct {
-	base  reflect.Type
-	field int32
-}
-
-var extProp = struct {
-	sync.RWMutex
-	m map[extPropKey]*Properties
-}{
-	m: make(map[extPropKey]*Properties),
-}
-
-func extensionProperties(ed *ExtensionDesc) *Properties {
-	key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
-
-	extProp.RLock()
-	if prop, ok := extProp.m[key]; ok {
-		extProp.RUnlock()
-		return prop
-	}
-	extProp.RUnlock()
-
-	extProp.Lock()
-	defer extProp.Unlock()
-	// Check again.
-	if prop, ok := extProp.m[key]; ok {
-		return prop
-	}
-
-	prop := new(Properties)
-	prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
-	extProp.m[key] = prop
-	return prop
-}
-
-// encode encodes any unmarshaled (unencoded) extensions in e.
-func encodeExtensions(e *XXX_InternalExtensions) error {
-	m, mu := e.extensionsRead()
-	if m == nil {
-		return nil // fast path
-	}
-	mu.Lock()
-	defer mu.Unlock()
-	return encodeExtensionsMap(m)
-}
-
-// encode encodes any unmarshaled (unencoded) extensions in e.
-func encodeExtensionsMap(m map[int32]Extension) error {
-	for k, e := range m {
-		if e.value == nil || e.desc == nil {
-			// Extension is only in its encoded form.
-			continue
-		}
-
-		// We don't skip extensions that have an encoded form set,
-		// because the extension value may have been mutated after
-		// the last time this function was called.
-
-		et := reflect.TypeOf(e.desc.ExtensionType)
-		props := extensionProperties(e.desc)
-
-		p := NewBuffer(nil)
-		// If e.value has type T, the encoder expects a *struct{ X T }.
-		// Pass a *T with a zero field and hope it all works out.
-		x := reflect.New(et)
-		x.Elem().Set(reflect.ValueOf(e.value))
-		if err := props.enc(p, props, toStructPointer(x)); err != nil {
-			return err
-		}
-		e.enc = p.buf
-		m[k] = e
-	}
-	return nil
-}
-
-func extensionsSize(e *XXX_InternalExtensions) (n int) {
-	m, mu := e.extensionsRead()
-	if m == nil {
-		return 0
-	}
-	mu.Lock()
-	defer mu.Unlock()
-	return extensionsMapSize(m)
-}
-
-func extensionsMapSize(m map[int32]Extension) (n int) {
-	for _, e := range m {
-		if e.value == nil || e.desc == nil {
-			// Extension is only in its encoded form.
-			n += len(e.enc)
-			continue
-		}
-
-		// We don't skip extensions that have an encoded form set,
-		// because the extension value may have been mutated after
-		// the last time this function was called.
-
-		et := reflect.TypeOf(e.desc.ExtensionType)
-		props := extensionProperties(e.desc)
-
-		// If e.value has type T, the encoder expects a *struct{ X T }.
-		// Pass a *T with a zero field and hope it all works out.
-		x := reflect.New(et)
-		x.Elem().Set(reflect.ValueOf(e.value))
-		n += props.size(props, toStructPointer(x))
-	}
-	return
-}
-
-// HasExtension returns whether the given extension is present in pb.
-func HasExtension(pb Message, extension *ExtensionDesc) bool {
-	// TODO: Check types, field numbers, etc.?
-	epb, ok := extendable(pb)
-	if !ok {
-		return false
-	}
-	extmap, mu := epb.extensionsRead()
-	if extmap == nil {
-		return false
-	}
-	mu.Lock()
-	_, ok = extmap[extension.Field]
-	mu.Unlock()
-	return ok
-}
-
-// ClearExtension removes the given extension from pb.
-func ClearExtension(pb Message, extension *ExtensionDesc) {
-	epb, ok := extendable(pb)
-	if !ok {
-		return
-	}
-	// TODO: Check types, field numbers, etc.?
-	extmap := epb.extensionsWrite()
-	delete(extmap, extension.Field)
-}
-
-// GetExtension parses and returns the given extension of pb.
-// If the extension is not present and has no default value it returns ErrMissingExtension.
-func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
-	epb, ok := extendable(pb)
-	if !ok {
-		return nil, errors.New("proto: not an extendable proto")
-	}
-
-	if err := checkExtensionTypes(epb, extension); err != nil {
-		return nil, err
-	}
-
-	emap, mu := epb.extensionsRead()
-	if emap == nil {
-		return defaultExtensionValue(extension)
-	}
-	mu.Lock()
-	defer mu.Unlock()
-	e, ok := emap[extension.Field]
-	if !ok {
-		// defaultExtensionValue returns the default value or
-		// ErrMissingExtension if there is no default.
-		return defaultExtensionValue(extension)
-	}
-
-	if e.value != nil {
-		// Already decoded. Check the descriptor, though.
-		if e.desc != extension {
-			// This shouldn't happen. If it does, it means that
-			// GetExtension was called twice with two different
-			// descriptors with the same field number.
-			return nil, errors.New("proto: descriptor conflict")
-		}
-		return e.value, nil
-	}
-
-	v, err := decodeExtension(e.enc, extension)
-	if err != nil {
-		return nil, err
-	}
-
-	// Remember the decoded version and drop the encoded version.
-	// That way it is safe to mutate what we return.
-	e.value = v
-	e.desc = extension
-	e.enc = nil
-	emap[extension.Field] = e
-	return e.value, nil
-}
-
-// defaultExtensionValue returns the default value for extension.
-// If no default for an extension is defined ErrMissingExtension is returned.
-func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
-	t := reflect.TypeOf(extension.ExtensionType)
-	props := extensionProperties(extension)
-
-	sf, _, err := fieldDefault(t, props)
-	if err != nil {
-		return nil, err
-	}
-
-	if sf == nil || sf.value == nil {
-		// There is no default value.
-		return nil, ErrMissingExtension
-	}
-
-	if t.Kind() != reflect.Ptr {
-		// We do not need to return a Ptr, we can directly return sf.value.
-		return sf.value, nil
-	}
-
-	// We need to return an interface{} that is a pointer to sf.value.
-	value := reflect.New(t).Elem()
-	value.Set(reflect.New(value.Type().Elem()))
-	if sf.kind == reflect.Int32 {
-		// We may have an int32 or an enum, but the underlying data is int32.
-		// Since we can't set an int32 into a non int32 reflect.value directly
-		// set it as a int32.
-		value.Elem().SetInt(int64(sf.value.(int32)))
-	} else {
-		value.Elem().Set(reflect.ValueOf(sf.value))
-	}
-	return value.Interface(), nil
-}
-
-// decodeExtension decodes an extension encoded in b.
-func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
-	o := NewBuffer(b)
-
-	t := reflect.TypeOf(extension.ExtensionType)
-
-	props := extensionProperties(extension)
-
-	// t is a pointer to a struct, pointer to basic type or a slice.
-	// Allocate a "field" to store the pointer/slice itself; the
-	// pointer/slice will be stored here. We pass
-	// the address of this field to props.dec.
-	// This passes a zero field and a *t and lets props.dec
-	// interpret it as a *struct{ x t }.
-	value := reflect.New(t).Elem()
-
-	for {
-		// Discard wire type and field number varint. It isn't needed.
-		if _, err := o.DecodeVarint(); err != nil {
-			return nil, err
-		}
-
-		if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
-			return nil, err
-		}
-
-		if o.index >= len(o.buf) {
-			break
-		}
-	}
-	return value.Interface(), nil
-}
-
-// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
-// The returned slice has the same length as es; missing extensions will appear as nil elements.
-func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
-	epb, ok := extendable(pb)
-	if !ok {
-		return nil, errors.New("proto: not an extendable proto")
-	}
-	extensions = make([]interface{}, len(es))
-	for i, e := range es {
-		extensions[i], err = GetExtension(epb, e)
-		if err == ErrMissingExtension {
-			err = nil
-		}
-		if err != nil {
-			return
-		}
-	}
-	return
-}
-
-// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
-// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
-// just the Field field, which defines the extension's field number.
-func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
-	epb, ok := extendable(pb)
-	if !ok {
-		return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
-	}
-	registeredExtensions := RegisteredExtensions(pb)
-
-	emap, mu := epb.extensionsRead()
-	if emap == nil {
-		return nil, nil
-	}
-	mu.Lock()
-	defer mu.Unlock()
-	extensions := make([]*ExtensionDesc, 0, len(emap))
-	for extid, e := range emap {
-		desc := e.desc
-		if desc == nil {
-			desc = registeredExtensions[extid]
-			if desc == nil {
-				desc = &ExtensionDesc{Field: extid}
-			}
-		}
-
-		extensions = append(extensions, desc)
-	}
-	return extensions, nil
-}
-
-// SetExtension sets the specified extension of pb to the specified value.
-func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
-	epb, ok := extendable(pb)
-	if !ok {
-		return errors.New("proto: not an extendable proto")
-	}
-	if err := checkExtensionTypes(epb, extension); err != nil {
-		return err
-	}
-	typ := reflect.TypeOf(extension.ExtensionType)
-	if typ != reflect.TypeOf(value) {
-		return errors.New("proto: bad extension value type")
-	}
-	// nil extension values need to be caught early, because the
-	// encoder can't distinguish an ErrNil due to a nil extension
-	// from an ErrNil due to a missing field. Extensions are
-	// always optional, so the encoder would just swallow the error
-	// and drop all the extensions from the encoded message.
-	if reflect.ValueOf(value).IsNil() {
-		return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
-	}
-
-	extmap := epb.extensionsWrite()
-	extmap[extension.Field] = Extension{desc: extension, value: value}
-	return nil
-}
-
-// ClearAllExtensions clears all extensions from pb.
-func ClearAllExtensions(pb Message) {
-	epb, ok := extendable(pb)
-	if !ok {
-		return
-	}
-	m := epb.extensionsWrite()
-	for k := range m {
-		delete(m, k)
-	}
-}
-
-// A global registry of extensions.
-// The generated code will register the generated descriptors by calling RegisterExtension.
-
-var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
-
-// RegisterExtension is called from the generated code.
-func RegisterExtension(desc *ExtensionDesc) {
-	st := reflect.TypeOf(desc.ExtendedType).Elem()
-	m := extensionMaps[st]
-	if m == nil {
-		m = make(map[int32]*ExtensionDesc)
-		extensionMaps[st] = m
-	}
-	if _, ok := m[desc.Field]; ok {
-		panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
-	}
-	m[desc.Field] = desc
-}
-
-// RegisteredExtensions returns a map of the registered extensions of a
-// protocol buffer struct, indexed by the extension number.
-// The argument pb should be a nil pointer to the struct type.
-func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
-	return extensionMaps[reflect.TypeOf(pb).Elem()]
-}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
deleted file mode 100644
index ac4ddbc..0000000
--- a/vendor/github.com/golang/protobuf/proto/lib.go
+++ /dev/null
@@ -1,898 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-/*
-Package proto converts data structures to and from the wire format of
-protocol buffers.  It works in concert with the Go source code generated
-for .proto files by the protocol compiler.
-
-A summary of the properties of the protocol buffer interface
-for a protocol buffer variable v:
-
-  - Names are turned from camel_case to CamelCase for export.
-  - There are no methods on v to set fields; just treat
-	them as structure fields.
-  - There are getters that return a field's value if set,
-	and return the field's default value if unset.
-	The getters work even if the receiver is a nil message.
-  - The zero value for a struct is its correct initialization state.
-	All desired fields must be set before marshaling.
-  - A Reset() method will restore a protobuf struct to its zero state.
-  - Non-repeated fields are pointers to the values; nil means unset.
-	That is, optional or required field int32 f becomes F *int32.
-  - Repeated fields are slices.
-  - Helper functions are available to aid the setting of fields.
-	msg.Foo = proto.String("hello") // set field
-  - Constants are defined to hold the default values of all fields that
-	have them.  They have the form Default_StructName_FieldName.
-	Because the getter methods handle defaulted values,
-	direct use of these constants should be rare.
-  - Enums are given type names and maps from names to values.
-	Enum values are prefixed by the enclosing message's name, or by the
-	enum's type name if it is a top-level enum. Enum types have a String
-	method, and a Enum method to assist in message construction.
-  - Nested messages, groups and enums have type names prefixed with the name of
-	the surrounding message type.
-  - Extensions are given descriptor names that start with E_,
-	followed by an underscore-delimited list of the nested messages
-	that contain it (if any) followed by the CamelCased name of the
-	extension field itself.  HasExtension, ClearExtension, GetExtension
-	and SetExtension are functions for manipulating extensions.
-  - Oneof field sets are given a single field in their message,
-	with distinguished wrapper types for each possible field value.
-  - Marshal and Unmarshal are functions to encode and decode the wire format.
-
-When the .proto file specifies `syntax="proto3"`, there are some differences:
-
-  - Non-repeated fields of non-message type are values instead of pointers.
-  - Getters are only generated for message and oneof fields.
-  - Enum types do not get an Enum method.
-
-The simplest way to describe this is to see an example.
-Given file test.proto, containing
-
-	package example;
-
-	enum FOO { X = 17; }
-
-	message Test {
-	  required string label = 1;
-	  optional int32 type = 2 [default=77];
-	  repeated int64 reps = 3;
-	  optional group OptionalGroup = 4 {
-	    required string RequiredField = 5;
-	  }
-	  oneof union {
-	    int32 number = 6;
-	    string name = 7;
-	  }
-	}
-
-The resulting file, test.pb.go, is:
-
-	package example
-
-	import proto "github.com/golang/protobuf/proto"
-	import math "math"
-
-	type FOO int32
-	const (
-		FOO_X FOO = 17
-	)
-	var FOO_name = map[int32]string{
-		17: "X",
-	}
-	var FOO_value = map[string]int32{
-		"X": 17,
-	}
-
-	func (x FOO) Enum() *FOO {
-		p := new(FOO)
-		*p = x
-		return p
-	}
-	func (x FOO) String() string {
-		return proto.EnumName(FOO_name, int32(x))
-	}
-	func (x *FOO) UnmarshalJSON(data []byte) error {
-		value, err := proto.UnmarshalJSONEnum(FOO_value, data)
-		if err != nil {
-			return err
-		}
-		*x = FOO(value)
-		return nil
-	}
-
-	type Test struct {
-		Label         *string             `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
-		Type          *int32              `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
-		Reps          []int64             `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
-		Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
-		// Types that are valid to be assigned to Union:
-		//	*Test_Number
-		//	*Test_Name
-		Union            isTest_Union `protobuf_oneof:"union"`
-		XXX_unrecognized []byte       `json:"-"`
-	}
-	func (m *Test) Reset()         { *m = Test{} }
-	func (m *Test) String() string { return proto.CompactTextString(m) }
-	func (*Test) ProtoMessage() {}
-
-	type isTest_Union interface {
-		isTest_Union()
-	}
-
-	type Test_Number struct {
-		Number int32 `protobuf:"varint,6,opt,name=number"`
-	}
-	type Test_Name struct {
-		Name string `protobuf:"bytes,7,opt,name=name"`
-	}
-
-	func (*Test_Number) isTest_Union() {}
-	func (*Test_Name) isTest_Union()   {}
-
-	func (m *Test) GetUnion() isTest_Union {
-		if m != nil {
-			return m.Union
-		}
-		return nil
-	}
-	const Default_Test_Type int32 = 77
-
-	func (m *Test) GetLabel() string {
-		if m != nil && m.Label != nil {
-			return *m.Label
-		}
-		return ""
-	}
-
-	func (m *Test) GetType() int32 {
-		if m != nil && m.Type != nil {
-			return *m.Type
-		}
-		return Default_Test_Type
-	}
-
-	func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
-		if m != nil {
-			return m.Optionalgroup
-		}
-		return nil
-	}
-
-	type Test_OptionalGroup struct {
-		RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
-	}
-	func (m *Test_OptionalGroup) Reset()         { *m = Test_OptionalGroup{} }
-	func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
-
-	func (m *Test_OptionalGroup) GetRequiredField() string {
-		if m != nil && m.RequiredField != nil {
-			return *m.RequiredField
-		}
-		return ""
-	}
-
-	func (m *Test) GetNumber() int32 {
-		if x, ok := m.GetUnion().(*Test_Number); ok {
-			return x.Number
-		}
-		return 0
-	}
-
-	func (m *Test) GetName() string {
-		if x, ok := m.GetUnion().(*Test_Name); ok {
-			return x.Name
-		}
-		return ""
-	}
-
-	func init() {
-		proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
-	}
-
-To create and play with a Test object:
-
-	package main
-
-	import (
-		"log"
-
-		"github.com/golang/protobuf/proto"
-		pb "./example.pb"
-	)
-
-	func main() {
-		test := &pb.Test{
-			Label: proto.String("hello"),
-			Type:  proto.Int32(17),
-			Reps:  []int64{1, 2, 3},
-			Optionalgroup: &pb.Test_OptionalGroup{
-				RequiredField: proto.String("good bye"),
-			},
-			Union: &pb.Test_Name{"fred"},
-		}
-		data, err := proto.Marshal(test)
-		if err != nil {
-			log.Fatal("marshaling error: ", err)
-		}
-		newTest := &pb.Test{}
-		err = proto.Unmarshal(data, newTest)
-		if err != nil {
-			log.Fatal("unmarshaling error: ", err)
-		}
-		// Now test and newTest contain the same data.
-		if test.GetLabel() != newTest.GetLabel() {
-			log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
-		}
-		// Use a type switch to determine which oneof was set.
-		switch u := test.Union.(type) {
-		case *pb.Test_Number: // u.Number contains the number.
-		case *pb.Test_Name: // u.Name contains the string.
-		}
-		// etc.
-	}
-*/
-package proto
-
-import (
-	"encoding/json"
-	"fmt"
-	"log"
-	"reflect"
-	"sort"
-	"strconv"
-	"sync"
-)
-
-// Message is implemented by generated protocol buffer messages.
-type Message interface {
-	Reset()
-	String() string
-	ProtoMessage()
-}
-
-// Stats records allocation details about the protocol buffer encoders
-// and decoders.  Useful for tuning the library itself.
-type Stats struct {
-	Emalloc uint64 // mallocs in encode
-	Dmalloc uint64 // mallocs in decode
-	Encode  uint64 // number of encodes
-	Decode  uint64 // number of decodes
-	Chit    uint64 // number of cache hits
-	Cmiss   uint64 // number of cache misses
-	Size    uint64 // number of sizes
-}
-
-// Set to true to enable stats collection.
-const collectStats = false
-
-var stats Stats
-
-// GetStats returns a copy of the global Stats structure.
-func GetStats() Stats { return stats }
-
-// A Buffer is a buffer manager for marshaling and unmarshaling
-// protocol buffers.  It may be reused between invocations to
-// reduce memory usage.  It is not necessary to use a Buffer;
-// the global functions Marshal and Unmarshal create a
-// temporary Buffer and are fine for most applications.
-type Buffer struct {
-	buf   []byte // encode/decode byte stream
-	index int    // read point
-
-	// pools of basic types to amortize allocation.
-	bools   []bool
-	uint32s []uint32
-	uint64s []uint64
-
-	// extra pools, only used with pointer_reflect.go
-	int32s   []int32
-	int64s   []int64
-	float32s []float32
-	float64s []float64
-}
-
-// NewBuffer allocates a new Buffer and initializes its internal data to
-// the contents of the argument slice.
-func NewBuffer(e []byte) *Buffer {
-	return &Buffer{buf: e}
-}
-
-// Reset resets the Buffer, ready for marshaling a new protocol buffer.
-func (p *Buffer) Reset() {
-	p.buf = p.buf[0:0] // for reading/writing
-	p.index = 0        // for reading
-}
-
-// SetBuf replaces the internal buffer with the slice,
-// ready for unmarshaling the contents of the slice.
-func (p *Buffer) SetBuf(s []byte) {
-	p.buf = s
-	p.index = 0
-}
-
-// Bytes returns the contents of the Buffer.
-func (p *Buffer) Bytes() []byte { return p.buf }
-
-/*
- * Helper routines for simplifying the creation of optional fields of basic type.
- */
-
-// Bool is a helper routine that allocates a new bool value
-// to store v and returns a pointer to it.
-func Bool(v bool) *bool {
-	return &v
-}
-
-// Int32 is a helper routine that allocates a new int32 value
-// to store v and returns a pointer to it.
-func Int32(v int32) *int32 {
-	return &v
-}
-
-// Int is a helper routine that allocates a new int32 value
-// to store v and returns a pointer to it, but unlike Int32
-// its argument value is an int.
-func Int(v int) *int32 {
-	p := new(int32)
-	*p = int32(v)
-	return p
-}
-
-// Int64 is a helper routine that allocates a new int64 value
-// to store v and returns a pointer to it.
-func Int64(v int64) *int64 {
-	return &v
-}
-
-// Float32 is a helper routine that allocates a new float32 value
-// to store v and returns a pointer to it.
-func Float32(v float32) *float32 {
-	return &v
-}
-
-// Float64 is a helper routine that allocates a new float64 value
-// to store v and returns a pointer to it.
-func Float64(v float64) *float64 {
-	return &v
-}
-
-// Uint32 is a helper routine that allocates a new uint32 value
-// to store v and returns a pointer to it.
-func Uint32(v uint32) *uint32 {
-	return &v
-}
-
-// Uint64 is a helper routine that allocates a new uint64 value
-// to store v and returns a pointer to it.
-func Uint64(v uint64) *uint64 {
-	return &v
-}
-
-// String is a helper routine that allocates a new string value
-// to store v and returns a pointer to it.
-func String(v string) *string {
-	return &v
-}
-
-// EnumName is a helper function to simplify printing protocol buffer enums
-// by name.  Given an enum map and a value, it returns a useful string.
-func EnumName(m map[int32]string, v int32) string {
-	s, ok := m[v]
-	if ok {
-		return s
-	}
-	return strconv.Itoa(int(v))
-}
-
-// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
-// from their JSON-encoded representation. Given a map from the enum's symbolic
-// names to its int values, and a byte buffer containing the JSON-encoded
-// value, it returns an int32 that can be cast to the enum type by the caller.
-//
-// The function can deal with both JSON representations, numeric and symbolic.
-func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
-	if data[0] == '"' {
-		// New style: enums are strings.
-		var repr string
-		if err := json.Unmarshal(data, &repr); err != nil {
-			return -1, err
-		}
-		val, ok := m[repr]
-		if !ok {
-			return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
-		}
-		return val, nil
-	}
-	// Old style: enums are ints.
-	var val int32
-	if err := json.Unmarshal(data, &val); err != nil {
-		return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
-	}
-	return val, nil
-}
-
-// DebugPrint dumps the encoded data in b in a debugging format with a header
-// including the string s. Used in testing but made available for general debugging.
-func (p *Buffer) DebugPrint(s string, b []byte) {
-	var u uint64
-
-	obuf := p.buf
-	index := p.index
-	p.buf = b
-	p.index = 0
-	depth := 0
-
-	fmt.Printf("\n--- %s ---\n", s)
-
-out:
-	for {
-		for i := 0; i < depth; i++ {
-			fmt.Print("  ")
-		}
-
-		index := p.index
-		if index == len(p.buf) {
-			break
-		}
-
-		op, err := p.DecodeVarint()
-		if err != nil {
-			fmt.Printf("%3d: fetching op err %v\n", index, err)
-			break out
-		}
-		tag := op >> 3
-		wire := op & 7
-
-		switch wire {
-		default:
-			fmt.Printf("%3d: t=%3d unknown wire=%d\n",
-				index, tag, wire)
-			break out
-
-		case WireBytes:
-			var r []byte
-
-			r, err = p.DecodeRawBytes(false)
-			if err != nil {
-				break out
-			}
-			fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
-			if len(r) <= 6 {
-				for i := 0; i < len(r); i++ {
-					fmt.Printf(" %.2x", r[i])
-				}
-			} else {
-				for i := 0; i < 3; i++ {
-					fmt.Printf(" %.2x", r[i])
-				}
-				fmt.Printf(" ..")
-				for i := len(r) - 3; i < len(r); i++ {
-					fmt.Printf(" %.2x", r[i])
-				}
-			}
-			fmt.Printf("\n")
-
-		case WireFixed32:
-			u, err = p.DecodeFixed32()
-			if err != nil {
-				fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
-				break out
-			}
-			fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
-
-		case WireFixed64:
-			u, err = p.DecodeFixed64()
-			if err != nil {
-				fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
-				break out
-			}
-			fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
-
-		case WireVarint:
-			u, err = p.DecodeVarint()
-			if err != nil {
-				fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
-				break out
-			}
-			fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
-
-		case WireStartGroup:
-			fmt.Printf("%3d: t=%3d start\n", index, tag)
-			depth++
-
-		case WireEndGroup:
-			depth--
-			fmt.Printf("%3d: t=%3d end\n", index, tag)
-		}
-	}
-
-	if depth != 0 {
-		fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
-	}
-	fmt.Printf("\n")
-
-	p.buf = obuf
-	p.index = index
-}
-
-// SetDefaults sets unset protocol buffer fields to their default values.
-// It only modifies fields that are both unset and have defined defaults.
-// It recursively sets default values in any non-nil sub-messages.
-func SetDefaults(pb Message) {
-	setDefaults(reflect.ValueOf(pb), true, false)
-}
-
-// v is a pointer to a struct.
-func setDefaults(v reflect.Value, recur, zeros bool) {
-	v = v.Elem()
-
-	defaultMu.RLock()
-	dm, ok := defaults[v.Type()]
-	defaultMu.RUnlock()
-	if !ok {
-		dm = buildDefaultMessage(v.Type())
-		defaultMu.Lock()
-		defaults[v.Type()] = dm
-		defaultMu.Unlock()
-	}
-
-	for _, sf := range dm.scalars {
-		f := v.Field(sf.index)
-		if !f.IsNil() {
-			// field already set
-			continue
-		}
-		dv := sf.value
-		if dv == nil && !zeros {
-			// no explicit default, and don't want to set zeros
-			continue
-		}
-		fptr := f.Addr().Interface() // **T
-		// TODO: Consider batching the allocations we do here.
-		switch sf.kind {
-		case reflect.Bool:
-			b := new(bool)
-			if dv != nil {
-				*b = dv.(bool)
-			}
-			*(fptr.(**bool)) = b
-		case reflect.Float32:
-			f := new(float32)
-			if dv != nil {
-				*f = dv.(float32)
-			}
-			*(fptr.(**float32)) = f
-		case reflect.Float64:
-			f := new(float64)
-			if dv != nil {
-				*f = dv.(float64)
-			}
-			*(fptr.(**float64)) = f
-		case reflect.Int32:
-			// might be an enum
-			if ft := f.Type(); ft != int32PtrType {
-				// enum
-				f.Set(reflect.New(ft.Elem()))
-				if dv != nil {
-					f.Elem().SetInt(int64(dv.(int32)))
-				}
-			} else {
-				// int32 field
-				i := new(int32)
-				if dv != nil {
-					*i = dv.(int32)
-				}
-				*(fptr.(**int32)) = i
-			}
-		case reflect.Int64:
-			i := new(int64)
-			if dv != nil {
-				*i = dv.(int64)
-			}
-			*(fptr.(**int64)) = i
-		case reflect.String:
-			s := new(string)
-			if dv != nil {
-				*s = dv.(string)
-			}
-			*(fptr.(**string)) = s
-		case reflect.Uint8:
-			// exceptional case: []byte
-			var b []byte
-			if dv != nil {
-				db := dv.([]byte)
-				b = make([]byte, len(db))
-				copy(b, db)
-			} else {
-				b = []byte{}
-			}
-			*(fptr.(*[]byte)) = b
-		case reflect.Uint32:
-			u := new(uint32)
-			if dv != nil {
-				*u = dv.(uint32)
-			}
-			*(fptr.(**uint32)) = u
-		case reflect.Uint64:
-			u := new(uint64)
-			if dv != nil {
-				*u = dv.(uint64)
-			}
-			*(fptr.(**uint64)) = u
-		default:
-			log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
-		}
-	}
-
-	for _, ni := range dm.nested {
-		f := v.Field(ni)
-		// f is *T or []*T or map[T]*T
-		switch f.Kind() {
-		case reflect.Ptr:
-			if f.IsNil() {
-				continue
-			}
-			setDefaults(f, recur, zeros)
-
-		case reflect.Slice:
-			for i := 0; i < f.Len(); i++ {
-				e := f.Index(i)
-				if e.IsNil() {
-					continue
-				}
-				setDefaults(e, recur, zeros)
-			}
-
-		case reflect.Map:
-			for _, k := range f.MapKeys() {
-				e := f.MapIndex(k)
-				if e.IsNil() {
-					continue
-				}
-				setDefaults(e, recur, zeros)
-			}
-		}
-	}
-}
-
-var (
-	// defaults maps a protocol buffer struct type to a slice of the fields,
-	// with its scalar fields set to their proto-declared non-zero default values.
-	defaultMu sync.RWMutex
-	defaults  = make(map[reflect.Type]defaultMessage)
-
-	int32PtrType = reflect.TypeOf((*int32)(nil))
-)
-
-// defaultMessage represents information about the default values of a message.
-type defaultMessage struct {
-	scalars []scalarField
-	nested  []int // struct field index of nested messages
-}
-
-type scalarField struct {
-	index int          // struct field index
-	kind  reflect.Kind // element type (the T in *T or []T)
-	value interface{}  // the proto-declared default value, or nil
-}
-
-// t is a struct type.
-func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
-	sprop := GetProperties(t)
-	for _, prop := range sprop.Prop {
-		fi, ok := sprop.decoderTags.get(prop.Tag)
-		if !ok {
-			// XXX_unrecognized
-			continue
-		}
-		ft := t.Field(fi).Type
-
-		sf, nested, err := fieldDefault(ft, prop)
-		switch {
-		case err != nil:
-			log.Print(err)
-		case nested:
-			dm.nested = append(dm.nested, fi)
-		case sf != nil:
-			sf.index = fi
-			dm.scalars = append(dm.scalars, *sf)
-		}
-	}
-
-	return dm
-}
-
-// fieldDefault returns the scalarField for field type ft.
-// sf will be nil if the field can not have a default.
-// nestedMessage will be true if this is a nested message.
-// Note that sf.index is not set on return.
-func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
-	var canHaveDefault bool
-	switch ft.Kind() {
-	case reflect.Ptr:
-		if ft.Elem().Kind() == reflect.Struct {
-			nestedMessage = true
-		} else {
-			canHaveDefault = true // proto2 scalar field
-		}
-
-	case reflect.Slice:
-		switch ft.Elem().Kind() {
-		case reflect.Ptr:
-			nestedMessage = true // repeated message
-		case reflect.Uint8:
-			canHaveDefault = true // bytes field
-		}
-
-	case reflect.Map:
-		if ft.Elem().Kind() == reflect.Ptr {
-			nestedMessage = true // map with message values
-		}
-	}
-
-	if !canHaveDefault {
-		if nestedMessage {
-			return nil, true, nil
-		}
-		return nil, false, nil
-	}
-
-	// We now know that ft is a pointer or slice.
-	sf = &scalarField{kind: ft.Elem().Kind()}
-
-	// scalar fields without defaults
-	if !prop.HasDefault {
-		return sf, false, nil
-	}
-
-	// a scalar field: either *T or []byte
-	switch ft.Elem().Kind() {
-	case reflect.Bool:
-		x, err := strconv.ParseBool(prop.Default)
-		if err != nil {
-			return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
-		}
-		sf.value = x
-	case reflect.Float32:
-		x, err := strconv.ParseFloat(prop.Default, 32)
-		if err != nil {
-			return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
-		}
-		sf.value = float32(x)
-	case reflect.Float64:
-		x, err := strconv.ParseFloat(prop.Default, 64)
-		if err != nil {
-			return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
-		}
-		sf.value = x
-	case reflect.Int32:
-		x, err := strconv.ParseInt(prop.Default, 10, 32)
-		if err != nil {
-			return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
-		}
-		sf.value = int32(x)
-	case reflect.Int64:
-		x, err := strconv.ParseInt(prop.Default, 10, 64)
-		if err != nil {
-			return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
-		}
-		sf.value = x
-	case reflect.String:
-		sf.value = prop.Default
-	case reflect.Uint8:
-		// []byte (not *uint8)
-		sf.value = []byte(prop.Default)
-	case reflect.Uint32:
-		x, err := strconv.ParseUint(prop.Default, 10, 32)
-		if err != nil {
-			return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
-		}
-		sf.value = uint32(x)
-	case reflect.Uint64:
-		x, err := strconv.ParseUint(prop.Default, 10, 64)
-		if err != nil {
-			return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
-		}
-		sf.value = x
-	default:
-		return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
-	}
-
-	return sf, false, nil
-}
-
-// Map fields may have key types of non-float scalars, strings and enums.
-// The easiest way to sort them in some deterministic order is to use fmt.
-// If this turns out to be inefficient we can always consider other options,
-// such as doing a Schwartzian transform.
-
-func mapKeys(vs []reflect.Value) sort.Interface {
-	s := mapKeySorter{
-		vs: vs,
-		// default Less function: textual comparison
-		less: func(a, b reflect.Value) bool {
-			return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
-		},
-	}
-
-	// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
-	// numeric keys are sorted numerically.
-	if len(vs) == 0 {
-		return s
-	}
-	switch vs[0].Kind() {
-	case reflect.Int32, reflect.Int64:
-		s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
-	case reflect.Uint32, reflect.Uint64:
-		s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
-	}
-
-	return s
-}
-
-type mapKeySorter struct {
-	vs   []reflect.Value
-	less func(a, b reflect.Value) bool
-}
-
-func (s mapKeySorter) Len() int      { return len(s.vs) }
-func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
-func (s mapKeySorter) Less(i, j int) bool {
-	return s.less(s.vs[i], s.vs[j])
-}
-
-// isProto3Zero reports whether v is a zero proto3 value.
-func isProto3Zero(v reflect.Value) bool {
-	switch v.Kind() {
-	case reflect.Bool:
-		return !v.Bool()
-	case reflect.Int32, reflect.Int64:
-		return v.Int() == 0
-	case reflect.Uint32, reflect.Uint64:
-		return v.Uint() == 0
-	case reflect.Float32, reflect.Float64:
-		return v.Float() == 0
-	case reflect.String:
-		return v.String() == ""
-	}
-	return false
-}
-
-// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
-// to assert that that code is compatible with this version of the proto package.
-const ProtoPackageIsVersion2 = true
-
-// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
-// to assert that that code is compatible with this version of the proto package.
-const ProtoPackageIsVersion1 = true
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
deleted file mode 100644
index fd982de..0000000
--- a/vendor/github.com/golang/protobuf/proto/message_set.go
+++ /dev/null
@@ -1,311 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Support for message sets.
- */
-
-import (
-	"bytes"
-	"encoding/json"
-	"errors"
-	"fmt"
-	"reflect"
-	"sort"
-)
-
-// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
-// A message type ID is required for storing a protocol buffer in a message set.
-var errNoMessageTypeID = errors.New("proto does not have a message type ID")
-
-// The first two types (_MessageSet_Item and messageSet)
-// model what the protocol compiler produces for the following protocol message:
-//   message MessageSet {
-//     repeated group Item = 1 {
-//       required int32 type_id = 2;
-//       required string message = 3;
-//     };
-//   }
-// That is the MessageSet wire format. We can't use a proto to generate these
-// because that would introduce a circular dependency between it and this package.
-
-type _MessageSet_Item struct {
-	TypeId  *int32 `protobuf:"varint,2,req,name=type_id"`
-	Message []byte `protobuf:"bytes,3,req,name=message"`
-}
-
-type messageSet struct {
-	Item             []*_MessageSet_Item `protobuf:"group,1,rep"`
-	XXX_unrecognized []byte
-	// TODO: caching?
-}
-
-// Make sure messageSet is a Message.
-var _ Message = (*messageSet)(nil)
-
-// messageTypeIder is an interface satisfied by a protocol buffer type
-// that may be stored in a MessageSet.
-type messageTypeIder interface {
-	MessageTypeId() int32
-}
-
-func (ms *messageSet) find(pb Message) *_MessageSet_Item {
-	mti, ok := pb.(messageTypeIder)
-	if !ok {
-		return nil
-	}
-	id := mti.MessageTypeId()
-	for _, item := range ms.Item {
-		if *item.TypeId == id {
-			return item
-		}
-	}
-	return nil
-}
-
-func (ms *messageSet) Has(pb Message) bool {
-	if ms.find(pb) != nil {
-		return true
-	}
-	return false
-}
-
-func (ms *messageSet) Unmarshal(pb Message) error {
-	if item := ms.find(pb); item != nil {
-		return Unmarshal(item.Message, pb)
-	}
-	if _, ok := pb.(messageTypeIder); !ok {
-		return errNoMessageTypeID
-	}
-	return nil // TODO: return error instead?
-}
-
-func (ms *messageSet) Marshal(pb Message) error {
-	msg, err := Marshal(pb)
-	if err != nil {
-		return err
-	}
-	if item := ms.find(pb); item != nil {
-		// reuse existing item
-		item.Message = msg
-		return nil
-	}
-
-	mti, ok := pb.(messageTypeIder)
-	if !ok {
-		return errNoMessageTypeID
-	}
-
-	mtid := mti.MessageTypeId()
-	ms.Item = append(ms.Item, &_MessageSet_Item{
-		TypeId:  &mtid,
-		Message: msg,
-	})
-	return nil
-}
-
-func (ms *messageSet) Reset()         { *ms = messageSet{} }
-func (ms *messageSet) String() string { return CompactTextString(ms) }
-func (*messageSet) ProtoMessage()     {}
-
-// Support for the message_set_wire_format message option.
-
-func skipVarint(buf []byte) []byte {
-	i := 0
-	for ; buf[i]&0x80 != 0; i++ {
-	}
-	return buf[i+1:]
-}
-
-// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
-// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
-func MarshalMessageSet(exts interface{}) ([]byte, error) {
-	var m map[int32]Extension
-	switch exts := exts.(type) {
-	case *XXX_InternalExtensions:
-		if err := encodeExtensions(exts); err != nil {
-			return nil, err
-		}
-		m, _ = exts.extensionsRead()
-	case map[int32]Extension:
-		if err := encodeExtensionsMap(exts); err != nil {
-			return nil, err
-		}
-		m = exts
-	default:
-		return nil, errors.New("proto: not an extension map")
-	}
-
-	// Sort extension IDs to provide a deterministic encoding.
-	// See also enc_map in encode.go.
-	ids := make([]int, 0, len(m))
-	for id := range m {
-		ids = append(ids, int(id))
-	}
-	sort.Ints(ids)
-
-	ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
-	for _, id := range ids {
-		e := m[int32(id)]
-		// Remove the wire type and field number varint, as well as the length varint.
-		msg := skipVarint(skipVarint(e.enc))
-
-		ms.Item = append(ms.Item, &_MessageSet_Item{
-			TypeId:  Int32(int32(id)),
-			Message: msg,
-		})
-	}
-	return Marshal(ms)
-}
-
-// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
-// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
-func UnmarshalMessageSet(buf []byte, exts interface{}) error {
-	var m map[int32]Extension
-	switch exts := exts.(type) {
-	case *XXX_InternalExtensions:
-		m = exts.extensionsWrite()
-	case map[int32]Extension:
-		m = exts
-	default:
-		return errors.New("proto: not an extension map")
-	}
-
-	ms := new(messageSet)
-	if err := Unmarshal(buf, ms); err != nil {
-		return err
-	}
-	for _, item := range ms.Item {
-		id := *item.TypeId
-		msg := item.Message
-
-		// Restore wire type and field number varint, plus length varint.
-		// Be careful to preserve duplicate items.
-		b := EncodeVarint(uint64(id)<<3 | WireBytes)
-		if ext, ok := m[id]; ok {
-			// Existing data; rip off the tag and length varint
-			// so we join the new data correctly.
-			// We can assume that ext.enc is set because we are unmarshaling.
-			o := ext.enc[len(b):]   // skip wire type and field number
-			_, n := DecodeVarint(o) // calculate length of length varint
-			o = o[n:]               // skip length varint
-			msg = append(o, msg...) // join old data and new data
-		}
-		b = append(b, EncodeVarint(uint64(len(msg)))...)
-		b = append(b, msg...)
-
-		m[id] = Extension{enc: b}
-	}
-	return nil
-}
-
-// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
-// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
-func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
-	var m map[int32]Extension
-	switch exts := exts.(type) {
-	case *XXX_InternalExtensions:
-		m, _ = exts.extensionsRead()
-	case map[int32]Extension:
-		m = exts
-	default:
-		return nil, errors.New("proto: not an extension map")
-	}
-	var b bytes.Buffer
-	b.WriteByte('{')
-
-	// Process the map in key order for deterministic output.
-	ids := make([]int32, 0, len(m))
-	for id := range m {
-		ids = append(ids, id)
-	}
-	sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
-
-	for i, id := range ids {
-		ext := m[id]
-		if i > 0 {
-			b.WriteByte(',')
-		}
-
-		msd, ok := messageSetMap[id]
-		if !ok {
-			// Unknown type; we can't render it, so skip it.
-			continue
-		}
-		fmt.Fprintf(&b, `"[%s]":`, msd.name)
-
-		x := ext.value
-		if x == nil {
-			x = reflect.New(msd.t.Elem()).Interface()
-			if err := Unmarshal(ext.enc, x.(Message)); err != nil {
-				return nil, err
-			}
-		}
-		d, err := json.Marshal(x)
-		if err != nil {
-			return nil, err
-		}
-		b.Write(d)
-	}
-	b.WriteByte('}')
-	return b.Bytes(), nil
-}
-
-// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
-// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
-func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
-	// Common-case fast path.
-	if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
-		return nil
-	}
-
-	// This is fairly tricky, and it's not clear that it is needed.
-	return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
-}
-
-// A global registry of types that can be used in a MessageSet.
-
-var messageSetMap = make(map[int32]messageSetDesc)
-
-type messageSetDesc struct {
-	t    reflect.Type // pointer to struct
-	name string
-}
-
-// RegisterMessageSetType is called from the generated code.
-func RegisterMessageSetType(m Message, fieldNum int32, name string) {
-	messageSetMap[fieldNum] = messageSetDesc{
-		t:    reflect.TypeOf(m),
-		name: name,
-	}
-}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
deleted file mode 100644
index 6b5567d..0000000
--- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
+++ /dev/null
@@ -1,270 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2012 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// +build !appengine,!js
-
-// This file contains the implementation of the proto field accesses using package unsafe.
-
-package proto
-
-import (
-	"reflect"
-	"unsafe"
-)
-
-// NOTE: These type_Foo functions would more idiomatically be methods,
-// but Go does not allow methods on pointer types, and we must preserve
-// some pointer type for the garbage collector. We use these
-// funcs with clunky names as our poor approximation to methods.
-//
-// An alternative would be
-//	type structPointer struct { p unsafe.Pointer }
-// but that does not registerize as well.
-
-// A structPointer is a pointer to a struct.
-type structPointer unsafe.Pointer
-
-// toStructPointer returns a structPointer equivalent to the given reflect value.
-func toStructPointer(v reflect.Value) structPointer {
-	return structPointer(unsafe.Pointer(v.Pointer()))
-}
-
-// IsNil reports whether p is nil.
-func structPointer_IsNil(p structPointer) bool {
-	return p == nil
-}
-
-// Interface returns the struct pointer, assumed to have element type t,
-// as an interface value.
-func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
-	return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
-}
-
-// A field identifies a field in a struct, accessible from a structPointer.
-// In this implementation, a field is identified by its byte offset from the start of the struct.
-type field uintptr
-
-// toField returns a field equivalent to the given reflect field.
-func toField(f *reflect.StructField) field {
-	return field(f.Offset)
-}
-
-// invalidField is an invalid field identifier.
-const invalidField = ^field(0)
-
-// IsValid reports whether the field identifier is valid.
-func (f field) IsValid() bool {
-	return f != ^field(0)
-}
-
-// Bytes returns the address of a []byte field in the struct.
-func structPointer_Bytes(p structPointer, f field) *[]byte {
-	return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// BytesSlice returns the address of a [][]byte field in the struct.
-func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
-	return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// Bool returns the address of a *bool field in the struct.
-func structPointer_Bool(p structPointer, f field) **bool {
-	return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// BoolVal returns the address of a bool field in the struct.
-func structPointer_BoolVal(p structPointer, f field) *bool {
-	return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// BoolSlice returns the address of a []bool field in the struct.
-func structPointer_BoolSlice(p structPointer, f field) *[]bool {
-	return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// String returns the address of a *string field in the struct.
-func structPointer_String(p structPointer, f field) **string {
-	return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// StringVal returns the address of a string field in the struct.
-func structPointer_StringVal(p structPointer, f field) *string {
-	return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// StringSlice returns the address of a []string field in the struct.
-func structPointer_StringSlice(p structPointer, f field) *[]string {
-	return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// ExtMap returns the address of an extension map field in the struct.
-func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
-	return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
-	return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// NewAt returns the reflect.Value for a pointer to a field in the struct.
-func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
-	return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
-}
-
-// SetStructPointer writes a *struct field in the struct.
-func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
-	*(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
-}
-
-// GetStructPointer reads a *struct field in the struct.
-func structPointer_GetStructPointer(p structPointer, f field) structPointer {
-	return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// StructPointerSlice the address of a []*struct field in the struct.
-func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
-	return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
-type structPointerSlice []structPointer
-
-func (v *structPointerSlice) Len() int                  { return len(*v) }
-func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
-func (v *structPointerSlice) Append(p structPointer)    { *v = append(*v, p) }
-
-// A word32 is the address of a "pointer to 32-bit value" field.
-type word32 **uint32
-
-// IsNil reports whether *v is nil.
-func word32_IsNil(p word32) bool {
-	return *p == nil
-}
-
-// Set sets *v to point at a newly allocated word set to x.
-func word32_Set(p word32, o *Buffer, x uint32) {
-	if len(o.uint32s) == 0 {
-		o.uint32s = make([]uint32, uint32PoolSize)
-	}
-	o.uint32s[0] = x
-	*p = &o.uint32s[0]
-	o.uint32s = o.uint32s[1:]
-}
-
-// Get gets the value pointed at by *v.
-func word32_Get(p word32) uint32 {
-	return **p
-}
-
-// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
-func structPointer_Word32(p structPointer, f field) word32 {
-	return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
-}
-
-// A word32Val is the address of a 32-bit value field.
-type word32Val *uint32
-
-// Set sets *p to x.
-func word32Val_Set(p word32Val, x uint32) {
-	*p = x
-}
-
-// Get gets the value pointed at by p.
-func word32Val_Get(p word32Val) uint32 {
-	return *p
-}
-
-// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
-func structPointer_Word32Val(p structPointer, f field) word32Val {
-	return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
-}
-
-// A word32Slice is a slice of 32-bit values.
-type word32Slice []uint32
-
-func (v *word32Slice) Append(x uint32)    { *v = append(*v, x) }
-func (v *word32Slice) Len() int           { return len(*v) }
-func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
-
-// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
-func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
-	return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// word64 is like word32 but for 64-bit values.
-type word64 **uint64
-
-func word64_Set(p word64, o *Buffer, x uint64) {
-	if len(o.uint64s) == 0 {
-		o.uint64s = make([]uint64, uint64PoolSize)
-	}
-	o.uint64s[0] = x
-	*p = &o.uint64s[0]
-	o.uint64s = o.uint64s[1:]
-}
-
-func word64_IsNil(p word64) bool {
-	return *p == nil
-}
-
-func word64_Get(p word64) uint64 {
-	return **p
-}
-
-func structPointer_Word64(p structPointer, f field) word64 {
-	return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
-}
-
-// word64Val is like word32Val but for 64-bit values.
-type word64Val *uint64
-
-func word64Val_Set(p word64Val, o *Buffer, x uint64) {
-	*p = x
-}
-
-func word64Val_Get(p word64Val) uint64 {
-	return *p
-}
-
-func structPointer_Word64Val(p structPointer, f field) word64Val {
-	return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
-}
-
-// word64Slice is like word32Slice but for 64-bit values.
-type word64Slice []uint64
-
-func (v *word64Slice) Append(x uint64)    { *v = append(*v, x) }
-func (v *word64Slice) Len() int           { return len(*v) }
-func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
-
-func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
-	return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
deleted file mode 100644
index ec2289c..0000000
--- a/vendor/github.com/golang/protobuf/proto/properties.go
+++ /dev/null
@@ -1,872 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Routines for encoding data into the wire format for protocol buffers.
- */
-
-import (
-	"fmt"
-	"log"
-	"os"
-	"reflect"
-	"sort"
-	"strconv"
-	"strings"
-	"sync"
-)
-
-const debug bool = false
-
-// Constants that identify the encoding of a value on the wire.
-const (
-	WireVarint     = 0
-	WireFixed64    = 1
-	WireBytes      = 2
-	WireStartGroup = 3
-	WireEndGroup   = 4
-	WireFixed32    = 5
-)
-
-const startSize = 10 // initial slice/string sizes
-
-// Encoders are defined in encode.go
-// An encoder outputs the full representation of a field, including its
-// tag and encoder type.
-type encoder func(p *Buffer, prop *Properties, base structPointer) error
-
-// A valueEncoder encodes a single integer in a particular encoding.
-type valueEncoder func(o *Buffer, x uint64) error
-
-// Sizers are defined in encode.go
-// A sizer returns the encoded size of a field, including its tag and encoder
-// type.
-type sizer func(prop *Properties, base structPointer) int
-
-// A valueSizer returns the encoded size of a single integer in a particular
-// encoding.
-type valueSizer func(x uint64) int
-
-// Decoders are defined in decode.go
-// A decoder creates a value from its wire representation.
-// Unrecognized subelements are saved in unrec.
-type decoder func(p *Buffer, prop *Properties, base structPointer) error
-
-// A valueDecoder decodes a single integer in a particular encoding.
-type valueDecoder func(o *Buffer) (x uint64, err error)
-
-// A oneofMarshaler does the marshaling for all oneof fields in a message.
-type oneofMarshaler func(Message, *Buffer) error
-
-// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
-type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
-
-// A oneofSizer does the sizing for all oneof fields in a message.
-type oneofSizer func(Message) int
-
-// tagMap is an optimization over map[int]int for typical protocol buffer
-// use-cases. Encoded protocol buffers are often in tag order with small tag
-// numbers.
-type tagMap struct {
-	fastTags []int
-	slowTags map[int]int
-}
-
-// tagMapFastLimit is the upper bound on the tag number that will be stored in
-// the tagMap slice rather than its map.
-const tagMapFastLimit = 1024
-
-func (p *tagMap) get(t int) (int, bool) {
-	if t > 0 && t < tagMapFastLimit {
-		if t >= len(p.fastTags) {
-			return 0, false
-		}
-		fi := p.fastTags[t]
-		return fi, fi >= 0
-	}
-	fi, ok := p.slowTags[t]
-	return fi, ok
-}
-
-func (p *tagMap) put(t int, fi int) {
-	if t > 0 && t < tagMapFastLimit {
-		for len(p.fastTags) < t+1 {
-			p.fastTags = append(p.fastTags, -1)
-		}
-		p.fastTags[t] = fi
-		return
-	}
-	if p.slowTags == nil {
-		p.slowTags = make(map[int]int)
-	}
-	p.slowTags[t] = fi
-}
-
-// StructProperties represents properties for all the fields of a struct.
-// decoderTags and decoderOrigNames should only be used by the decoder.
-type StructProperties struct {
-	Prop             []*Properties  // properties for each field
-	reqCount         int            // required count
-	decoderTags      tagMap         // map from proto tag to struct field number
-	decoderOrigNames map[string]int // map from original name to struct field number
-	order            []int          // list of struct field numbers in tag order
-	unrecField       field          // field id of the XXX_unrecognized []byte field
-	extendable       bool           // is this an extendable proto
-
-	oneofMarshaler   oneofMarshaler
-	oneofUnmarshaler oneofUnmarshaler
-	oneofSizer       oneofSizer
-	stype            reflect.Type
-
-	// OneofTypes contains information about the oneof fields in this message.
-	// It is keyed by the original name of a field.
-	OneofTypes map[string]*OneofProperties
-}
-
-// OneofProperties represents information about a specific field in a oneof.
-type OneofProperties struct {
-	Type  reflect.Type // pointer to generated struct type for this oneof field
-	Field int          // struct field number of the containing oneof in the message
-	Prop  *Properties
-}
-
-// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
-// See encode.go, (*Buffer).enc_struct.
-
-func (sp *StructProperties) Len() int { return len(sp.order) }
-func (sp *StructProperties) Less(i, j int) bool {
-	return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
-}
-func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
-
-// Properties represents the protocol-specific behavior of a single struct field.
-type Properties struct {
-	Name     string // name of the field, for error messages
-	OrigName string // original name before protocol compiler (always set)
-	JSONName string // name to use for JSON; determined by protoc
-	Wire     string
-	WireType int
-	Tag      int
-	Required bool
-	Optional bool
-	Repeated bool
-	Packed   bool   // relevant for repeated primitives only
-	Enum     string // set for enum types only
-	proto3   bool   // whether this is known to be a proto3 field; set for []byte only
-	oneof    bool   // whether this is a oneof field
-
-	Default    string // default value
-	HasDefault bool   // whether an explicit default was provided
-	def_uint64 uint64
-
-	enc           encoder
-	valEnc        valueEncoder // set for bool and numeric types only
-	field         field
-	tagcode       []byte // encoding of EncodeVarint((Tag<<3)|WireType)
-	tagbuf        [8]byte
-	stype         reflect.Type      // set for struct types only
-	sprop         *StructProperties // set for struct types only
-	isMarshaler   bool
-	isUnmarshaler bool
-
-	mtype    reflect.Type // set for map types only
-	mkeyprop *Properties  // set for map types only
-	mvalprop *Properties  // set for map types only
-
-	size    sizer
-	valSize valueSizer // set for bool and numeric types only
-
-	dec    decoder
-	valDec valueDecoder // set for bool and numeric types only
-
-	// If this is a packable field, this will be the decoder for the packed version of the field.
-	packedDec decoder
-}
-
-// String formats the properties in the protobuf struct field tag style.
-func (p *Properties) String() string {
-	s := p.Wire
-	s = ","
-	s += strconv.Itoa(p.Tag)
-	if p.Required {
-		s += ",req"
-	}
-	if p.Optional {
-		s += ",opt"
-	}
-	if p.Repeated {
-		s += ",rep"
-	}
-	if p.Packed {
-		s += ",packed"
-	}
-	s += ",name=" + p.OrigName
-	if p.JSONName != p.OrigName {
-		s += ",json=" + p.JSONName
-	}
-	if p.proto3 {
-		s += ",proto3"
-	}
-	if p.oneof {
-		s += ",oneof"
-	}
-	if len(p.Enum) > 0 {
-		s += ",enum=" + p.Enum
-	}
-	if p.HasDefault {
-		s += ",def=" + p.Default
-	}
-	return s
-}
-
-// Parse populates p by parsing a string in the protobuf struct field tag style.
-func (p *Properties) Parse(s string) {
-	// "bytes,49,opt,name=foo,def=hello!"
-	fields := strings.Split(s, ",") // breaks def=, but handled below.
-	if len(fields) < 2 {
-		fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
-		return
-	}
-
-	p.Wire = fields[0]
-	switch p.Wire {
-	case "varint":
-		p.WireType = WireVarint
-		p.valEnc = (*Buffer).EncodeVarint
-		p.valDec = (*Buffer).DecodeVarint
-		p.valSize = sizeVarint
-	case "fixed32":
-		p.WireType = WireFixed32
-		p.valEnc = (*Buffer).EncodeFixed32
-		p.valDec = (*Buffer).DecodeFixed32
-		p.valSize = sizeFixed32
-	case "fixed64":
-		p.WireType = WireFixed64
-		p.valEnc = (*Buffer).EncodeFixed64
-		p.valDec = (*Buffer).DecodeFixed64
-		p.valSize = sizeFixed64
-	case "zigzag32":
-		p.WireType = WireVarint
-		p.valEnc = (*Buffer).EncodeZigzag32
-		p.valDec = (*Buffer).DecodeZigzag32
-		p.valSize = sizeZigzag32
-	case "zigzag64":
-		p.WireType = WireVarint
-		p.valEnc = (*Buffer).EncodeZigzag64
-		p.valDec = (*Buffer).DecodeZigzag64
-		p.valSize = sizeZigzag64
-	case "bytes", "group":
-		p.WireType = WireBytes
-		// no numeric converter for non-numeric types
-	default:
-		fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
-		return
-	}
-
-	var err error
-	p.Tag, err = strconv.Atoi(fields[1])
-	if err != nil {
-		return
-	}
-
-	for i := 2; i < len(fields); i++ {
-		f := fields[i]
-		switch {
-		case f == "req":
-			p.Required = true
-		case f == "opt":
-			p.Optional = true
-		case f == "rep":
-			p.Repeated = true
-		case f == "packed":
-			p.Packed = true
-		case strings.HasPrefix(f, "name="):
-			p.OrigName = f[5:]
-		case strings.HasPrefix(f, "json="):
-			p.JSONName = f[5:]
-		case strings.HasPrefix(f, "enum="):
-			p.Enum = f[5:]
-		case f == "proto3":
-			p.proto3 = true
-		case f == "oneof":
-			p.oneof = true
-		case strings.HasPrefix(f, "def="):
-			p.HasDefault = true
-			p.Default = f[4:] // rest of string
-			if i+1 < len(fields) {
-				// Commas aren't escaped, and def is always last.
-				p.Default += "," + strings.Join(fields[i+1:], ",")
-				break
-			}
-		}
-	}
-}
-
-func logNoSliceEnc(t1, t2 reflect.Type) {
-	fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
-}
-
-var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
-
-// Initialize the fields for encoding and decoding.
-func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
-	p.enc = nil
-	p.dec = nil
-	p.size = nil
-
-	switch t1 := typ; t1.Kind() {
-	default:
-		fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
-
-	// proto3 scalar types
-
-	case reflect.Bool:
-		p.enc = (*Buffer).enc_proto3_bool
-		p.dec = (*Buffer).dec_proto3_bool
-		p.size = size_proto3_bool
-	case reflect.Int32:
-		p.enc = (*Buffer).enc_proto3_int32
-		p.dec = (*Buffer).dec_proto3_int32
-		p.size = size_proto3_int32
-	case reflect.Uint32:
-		p.enc = (*Buffer).enc_proto3_uint32
-		p.dec = (*Buffer).dec_proto3_int32 // can reuse
-		p.size = size_proto3_uint32
-	case reflect.Int64, reflect.Uint64:
-		p.enc = (*Buffer).enc_proto3_int64
-		p.dec = (*Buffer).dec_proto3_int64
-		p.size = size_proto3_int64
-	case reflect.Float32:
-		p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
-		p.dec = (*Buffer).dec_proto3_int32
-		p.size = size_proto3_uint32
-	case reflect.Float64:
-		p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
-		p.dec = (*Buffer).dec_proto3_int64
-		p.size = size_proto3_int64
-	case reflect.String:
-		p.enc = (*Buffer).enc_proto3_string
-		p.dec = (*Buffer).dec_proto3_string
-		p.size = size_proto3_string
-
-	case reflect.Ptr:
-		switch t2 := t1.Elem(); t2.Kind() {
-		default:
-			fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
-			break
-		case reflect.Bool:
-			p.enc = (*Buffer).enc_bool
-			p.dec = (*Buffer).dec_bool
-			p.size = size_bool
-		case reflect.Int32:
-			p.enc = (*Buffer).enc_int32
-			p.dec = (*Buffer).dec_int32
-			p.size = size_int32
-		case reflect.Uint32:
-			p.enc = (*Buffer).enc_uint32
-			p.dec = (*Buffer).dec_int32 // can reuse
-			p.size = size_uint32
-		case reflect.Int64, reflect.Uint64:
-			p.enc = (*Buffer).enc_int64
-			p.dec = (*Buffer).dec_int64
-			p.size = size_int64
-		case reflect.Float32:
-			p.enc = (*Buffer).enc_uint32 // can just treat them as bits
-			p.dec = (*Buffer).dec_int32
-			p.size = size_uint32
-		case reflect.Float64:
-			p.enc = (*Buffer).enc_int64 // can just treat them as bits
-			p.dec = (*Buffer).dec_int64
-			p.size = size_int64
-		case reflect.String:
-			p.enc = (*Buffer).enc_string
-			p.dec = (*Buffer).dec_string
-			p.size = size_string
-		case reflect.Struct:
-			p.stype = t1.Elem()
-			p.isMarshaler = isMarshaler(t1)
-			p.isUnmarshaler = isUnmarshaler(t1)
-			if p.Wire == "bytes" {
-				p.enc = (*Buffer).enc_struct_message
-				p.dec = (*Buffer).dec_struct_message
-				p.size = size_struct_message
-			} else {
-				p.enc = (*Buffer).enc_struct_group
-				p.dec = (*Buffer).dec_struct_group
-				p.size = size_struct_group
-			}
-		}
-
-	case reflect.Slice:
-		switch t2 := t1.Elem(); t2.Kind() {
-		default:
-			logNoSliceEnc(t1, t2)
-			break
-		case reflect.Bool:
-			if p.Packed {
-				p.enc = (*Buffer).enc_slice_packed_bool
-				p.size = size_slice_packed_bool
-			} else {
-				p.enc = (*Buffer).enc_slice_bool
-				p.size = size_slice_bool
-			}
-			p.dec = (*Buffer).dec_slice_bool
-			p.packedDec = (*Buffer).dec_slice_packed_bool
-		case reflect.Int32:
-			if p.Packed {
-				p.enc = (*Buffer).enc_slice_packed_int32
-				p.size = size_slice_packed_int32
-			} else {
-				p.enc = (*Buffer).enc_slice_int32
-				p.size = size_slice_int32
-			}
-			p.dec = (*Buffer).dec_slice_int32
-			p.packedDec = (*Buffer).dec_slice_packed_int32
-		case reflect.Uint32:
-			if p.Packed {
-				p.enc = (*Buffer).enc_slice_packed_uint32
-				p.size = size_slice_packed_uint32
-			} else {
-				p.enc = (*Buffer).enc_slice_uint32
-				p.size = size_slice_uint32
-			}
-			p.dec = (*Buffer).dec_slice_int32
-			p.packedDec = (*Buffer).dec_slice_packed_int32
-		case reflect.Int64, reflect.Uint64:
-			if p.Packed {
-				p.enc = (*Buffer).enc_slice_packed_int64
-				p.size = size_slice_packed_int64
-			} else {
-				p.enc = (*Buffer).enc_slice_int64
-				p.size = size_slice_int64
-			}
-			p.dec = (*Buffer).dec_slice_int64
-			p.packedDec = (*Buffer).dec_slice_packed_int64
-		case reflect.Uint8:
-			p.dec = (*Buffer).dec_slice_byte
-			if p.proto3 {
-				p.enc = (*Buffer).enc_proto3_slice_byte
-				p.size = size_proto3_slice_byte
-			} else {
-				p.enc = (*Buffer).enc_slice_byte
-				p.size = size_slice_byte
-			}
-		case reflect.Float32, reflect.Float64:
-			switch t2.Bits() {
-			case 32:
-				// can just treat them as bits
-				if p.Packed {
-					p.enc = (*Buffer).enc_slice_packed_uint32
-					p.size = size_slice_packed_uint32
-				} else {
-					p.enc = (*Buffer).enc_slice_uint32
-					p.size = size_slice_uint32
-				}
-				p.dec = (*Buffer).dec_slice_int32
-				p.packedDec = (*Buffer).dec_slice_packed_int32
-			case 64:
-				// can just treat them as bits
-				if p.Packed {
-					p.enc = (*Buffer).enc_slice_packed_int64
-					p.size = size_slice_packed_int64
-				} else {
-					p.enc = (*Buffer).enc_slice_int64
-					p.size = size_slice_int64
-				}
-				p.dec = (*Buffer).dec_slice_int64
-				p.packedDec = (*Buffer).dec_slice_packed_int64
-			default:
-				logNoSliceEnc(t1, t2)
-				break
-			}
-		case reflect.String:
-			p.enc = (*Buffer).enc_slice_string
-			p.dec = (*Buffer).dec_slice_string
-			p.size = size_slice_string
-		case reflect.Ptr:
-			switch t3 := t2.Elem(); t3.Kind() {
-			default:
-				fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
-				break
-			case reflect.Struct:
-				p.stype = t2.Elem()
-				p.isMarshaler = isMarshaler(t2)
-				p.isUnmarshaler = isUnmarshaler(t2)
-				if p.Wire == "bytes" {
-					p.enc = (*Buffer).enc_slice_struct_message
-					p.dec = (*Buffer).dec_slice_struct_message
-					p.size = size_slice_struct_message
-				} else {
-					p.enc = (*Buffer).enc_slice_struct_group
-					p.dec = (*Buffer).dec_slice_struct_group
-					p.size = size_slice_struct_group
-				}
-			}
-		case reflect.Slice:
-			switch t2.Elem().Kind() {
-			default:
-				fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
-				break
-			case reflect.Uint8:
-				p.enc = (*Buffer).enc_slice_slice_byte
-				p.dec = (*Buffer).dec_slice_slice_byte
-				p.size = size_slice_slice_byte
-			}
-		}
-
-	case reflect.Map:
-		p.enc = (*Buffer).enc_new_map
-		p.dec = (*Buffer).dec_new_map
-		p.size = size_new_map
-
-		p.mtype = t1
-		p.mkeyprop = &Properties{}
-		p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
-		p.mvalprop = &Properties{}
-		vtype := p.mtype.Elem()
-		if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
-			// The value type is not a message (*T) or bytes ([]byte),
-			// so we need encoders for the pointer to this type.
-			vtype = reflect.PtrTo(vtype)
-		}
-		p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
-	}
-
-	// precalculate tag code
-	wire := p.WireType
-	if p.Packed {
-		wire = WireBytes
-	}
-	x := uint32(p.Tag)<<3 | uint32(wire)
-	i := 0
-	for i = 0; x > 127; i++ {
-		p.tagbuf[i] = 0x80 | uint8(x&0x7F)
-		x >>= 7
-	}
-	p.tagbuf[i] = uint8(x)
-	p.tagcode = p.tagbuf[0 : i+1]
-
-	if p.stype != nil {
-		if lockGetProp {
-			p.sprop = GetProperties(p.stype)
-		} else {
-			p.sprop = getPropertiesLocked(p.stype)
-		}
-	}
-}
-
-var (
-	marshalerType   = reflect.TypeOf((*Marshaler)(nil)).Elem()
-	unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
-)
-
-// isMarshaler reports whether type t implements Marshaler.
-func isMarshaler(t reflect.Type) bool {
-	// We're checking for (likely) pointer-receiver methods
-	// so if t is not a pointer, something is very wrong.
-	// The calls above only invoke isMarshaler on pointer types.
-	if t.Kind() != reflect.Ptr {
-		panic("proto: misuse of isMarshaler")
-	}
-	return t.Implements(marshalerType)
-}
-
-// isUnmarshaler reports whether type t implements Unmarshaler.
-func isUnmarshaler(t reflect.Type) bool {
-	// We're checking for (likely) pointer-receiver methods
-	// so if t is not a pointer, something is very wrong.
-	// The calls above only invoke isUnmarshaler on pointer types.
-	if t.Kind() != reflect.Ptr {
-		panic("proto: misuse of isUnmarshaler")
-	}
-	return t.Implements(unmarshalerType)
-}
-
-// Init populates the properties from a protocol buffer struct tag.
-func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
-	p.init(typ, name, tag, f, true)
-}
-
-func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
-	// "bytes,49,opt,def=hello!"
-	p.Name = name
-	p.OrigName = name
-	if f != nil {
-		p.field = toField(f)
-	}
-	if tag == "" {
-		return
-	}
-	p.Parse(tag)
-	p.setEncAndDec(typ, f, lockGetProp)
-}
-
-var (
-	propertiesMu  sync.RWMutex
-	propertiesMap = make(map[reflect.Type]*StructProperties)
-)
-
-// GetProperties returns the list of properties for the type represented by t.
-// t must represent a generated struct type of a protocol message.
-func GetProperties(t reflect.Type) *StructProperties {
-	if t.Kind() != reflect.Struct {
-		panic("proto: type must have kind struct")
-	}
-
-	// Most calls to GetProperties in a long-running program will be
-	// retrieving details for types we have seen before.
-	propertiesMu.RLock()
-	sprop, ok := propertiesMap[t]
-	propertiesMu.RUnlock()
-	if ok {
-		if collectStats {
-			stats.Chit++
-		}
-		return sprop
-	}
-
-	propertiesMu.Lock()
-	sprop = getPropertiesLocked(t)
-	propertiesMu.Unlock()
-	return sprop
-}
-
-// getPropertiesLocked requires that propertiesMu is held.
-func getPropertiesLocked(t reflect.Type) *StructProperties {
-	if prop, ok := propertiesMap[t]; ok {
-		if collectStats {
-			stats.Chit++
-		}
-		return prop
-	}
-	if collectStats {
-		stats.Cmiss++
-	}
-
-	prop := new(StructProperties)
-	// in case of recursive protos, fill this in now.
-	propertiesMap[t] = prop
-
-	// build properties
-	prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) ||
-		reflect.PtrTo(t).Implements(extendableProtoV1Type)
-	prop.unrecField = invalidField
-	prop.Prop = make([]*Properties, t.NumField())
-	prop.order = make([]int, t.NumField())
-
-	for i := 0; i < t.NumField(); i++ {
-		f := t.Field(i)
-		p := new(Properties)
-		name := f.Name
-		p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
-
-		if f.Name == "XXX_InternalExtensions" { // special case
-			p.enc = (*Buffer).enc_exts
-			p.dec = nil // not needed
-			p.size = size_exts
-		} else if f.Name == "XXX_extensions" { // special case
-			p.enc = (*Buffer).enc_map
-			p.dec = nil // not needed
-			p.size = size_map
-		} else if f.Name == "XXX_unrecognized" { // special case
-			prop.unrecField = toField(&f)
-		}
-		oneof := f.Tag.Get("protobuf_oneof") // special case
-		if oneof != "" {
-			// Oneof fields don't use the traditional protobuf tag.
-			p.OrigName = oneof
-		}
-		prop.Prop[i] = p
-		prop.order[i] = i
-		if debug {
-			print(i, " ", f.Name, " ", t.String(), " ")
-			if p.Tag > 0 {
-				print(p.String())
-			}
-			print("\n")
-		}
-		if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" {
-			fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
-		}
-	}
-
-	// Re-order prop.order.
-	sort.Sort(prop)
-
-	type oneofMessage interface {
-		XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
-	}
-	if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
-		var oots []interface{}
-		prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
-		prop.stype = t
-
-		// Interpret oneof metadata.
-		prop.OneofTypes = make(map[string]*OneofProperties)
-		for _, oot := range oots {
-			oop := &OneofProperties{
-				Type: reflect.ValueOf(oot).Type(), // *T
-				Prop: new(Properties),
-			}
-			sft := oop.Type.Elem().Field(0)
-			oop.Prop.Name = sft.Name
-			oop.Prop.Parse(sft.Tag.Get("protobuf"))
-			// There will be exactly one interface field that
-			// this new value is assignable to.
-			for i := 0; i < t.NumField(); i++ {
-				f := t.Field(i)
-				if f.Type.Kind() != reflect.Interface {
-					continue
-				}
-				if !oop.Type.AssignableTo(f.Type) {
-					continue
-				}
-				oop.Field = i
-				break
-			}
-			prop.OneofTypes[oop.Prop.OrigName] = oop
-		}
-	}
-
-	// build required counts
-	// build tags
-	reqCount := 0
-	prop.decoderOrigNames = make(map[string]int)
-	for i, p := range prop.Prop {
-		if strings.HasPrefix(p.Name, "XXX_") {
-			// Internal fields should not appear in tags/origNames maps.
-			// They are handled specially when encoding and decoding.
-			continue
-		}
-		if p.Required {
-			reqCount++
-		}
-		prop.decoderTags.put(p.Tag, i)
-		prop.decoderOrigNames[p.OrigName] = i
-	}
-	prop.reqCount = reqCount
-
-	return prop
-}
-
-// Return the Properties object for the x[0]'th field of the structure.
-func propByIndex(t reflect.Type, x []int) *Properties {
-	if len(x) != 1 {
-		fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
-		return nil
-	}
-	prop := GetProperties(t)
-	return prop.Prop[x[0]]
-}
-
-// Get the address and type of a pointer to a struct from an interface.
-func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
-	if pb == nil {
-		err = ErrNil
-		return
-	}
-	// get the reflect type of the pointer to the struct.
-	t = reflect.TypeOf(pb)
-	// get the address of the struct.
-	value := reflect.ValueOf(pb)
-	b = toStructPointer(value)
-	return
-}
-
-// A global registry of enum types.
-// The generated code will register the generated maps by calling RegisterEnum.
-
-var enumValueMaps = make(map[string]map[string]int32)
-
-// RegisterEnum is called from the generated code to install the enum descriptor
-// maps into the global table to aid parsing text format protocol buffers.
-func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
-	if _, ok := enumValueMaps[typeName]; ok {
-		panic("proto: duplicate enum registered: " + typeName)
-	}
-	enumValueMaps[typeName] = valueMap
-}
-
-// EnumValueMap returns the mapping from names to integers of the
-// enum type enumType, or a nil if not found.
-func EnumValueMap(enumType string) map[string]int32 {
-	return enumValueMaps[enumType]
-}
-
-// A registry of all linked message types.
-// The string is a fully-qualified proto name ("pkg.Message").
-var (
-	protoTypes    = make(map[string]reflect.Type)
-	revProtoTypes = make(map[reflect.Type]string)
-)
-
-// RegisterType is called from generated code and maps from the fully qualified
-// proto name to the type (pointer to struct) of the protocol buffer.
-func RegisterType(x Message, name string) {
-	if _, ok := protoTypes[name]; ok {
-		// TODO: Some day, make this a panic.
-		log.Printf("proto: duplicate proto type registered: %s", name)
-		return
-	}
-	t := reflect.TypeOf(x)
-	protoTypes[name] = t
-	revProtoTypes[t] = name
-}
-
-// MessageName returns the fully-qualified proto name for the given message type.
-func MessageName(x Message) string {
-	type xname interface {
-		XXX_MessageName() string
-	}
-	if m, ok := x.(xname); ok {
-		return m.XXX_MessageName()
-	}
-	return revProtoTypes[reflect.TypeOf(x)]
-}
-
-// MessageType returns the message type (pointer to struct) for a named message.
-func MessageType(name string) reflect.Type { return protoTypes[name] }
-
-// A registry of all linked proto files.
-var (
-	protoFiles = make(map[string][]byte) // file name => fileDescriptor
-)
-
-// RegisterFile is called from generated code and maps from the
-// full file name of a .proto file to its compressed FileDescriptorProto.
-func RegisterFile(filename string, fileDescriptor []byte) {
-	protoFiles[filename] = fileDescriptor
-}
-
-// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
-func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
deleted file mode 100644
index 965876b..0000000
--- a/vendor/github.com/golang/protobuf/proto/text.go
+++ /dev/null
@@ -1,854 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-// Functions for writing the text protocol buffer format.
-
-import (
-	"bufio"
-	"bytes"
-	"encoding"
-	"errors"
-	"fmt"
-	"io"
-	"log"
-	"math"
-	"reflect"
-	"sort"
-	"strings"
-)
-
-var (
-	newline         = []byte("\n")
-	spaces          = []byte("                                        ")
-	gtNewline       = []byte(">\n")
-	endBraceNewline = []byte("}\n")
-	backslashN      = []byte{'\\', 'n'}
-	backslashR      = []byte{'\\', 'r'}
-	backslashT      = []byte{'\\', 't'}
-	backslashDQ     = []byte{'\\', '"'}
-	backslashBS     = []byte{'\\', '\\'}
-	posInf          = []byte("inf")
-	negInf          = []byte("-inf")
-	nan             = []byte("nan")
-)
-
-type writer interface {
-	io.Writer
-	WriteByte(byte) error
-}
-
-// textWriter is an io.Writer that tracks its indentation level.
-type textWriter struct {
-	ind      int
-	complete bool // if the current position is a complete line
-	compact  bool // whether to write out as a one-liner
-	w        writer
-}
-
-func (w *textWriter) WriteString(s string) (n int, err error) {
-	if !strings.Contains(s, "\n") {
-		if !w.compact && w.complete {
-			w.writeIndent()
-		}
-		w.complete = false
-		return io.WriteString(w.w, s)
-	}
-	// WriteString is typically called without newlines, so this
-	// codepath and its copy are rare.  We copy to avoid
-	// duplicating all of Write's logic here.
-	return w.Write([]byte(s))
-}
-
-func (w *textWriter) Write(p []byte) (n int, err error) {
-	newlines := bytes.Count(p, newline)
-	if newlines == 0 {
-		if !w.compact && w.complete {
-			w.writeIndent()
-		}
-		n, err = w.w.Write(p)
-		w.complete = false
-		return n, err
-	}
-
-	frags := bytes.SplitN(p, newline, newlines+1)
-	if w.compact {
-		for i, frag := range frags {
-			if i > 0 {
-				if err := w.w.WriteByte(' '); err != nil {
-					return n, err
-				}
-				n++
-			}
-			nn, err := w.w.Write(frag)
-			n += nn
-			if err != nil {
-				return n, err
-			}
-		}
-		return n, nil
-	}
-
-	for i, frag := range frags {
-		if w.complete {
-			w.writeIndent()
-		}
-		nn, err := w.w.Write(frag)
-		n += nn
-		if err != nil {
-			return n, err
-		}
-		if i+1 < len(frags) {
-			if err := w.w.WriteByte('\n'); err != nil {
-				return n, err
-			}
-			n++
-		}
-	}
-	w.complete = len(frags[len(frags)-1]) == 0
-	return n, nil
-}
-
-func (w *textWriter) WriteByte(c byte) error {
-	if w.compact && c == '\n' {
-		c = ' '
-	}
-	if !w.compact && w.complete {
-		w.writeIndent()
-	}
-	err := w.w.WriteByte(c)
-	w.complete = c == '\n'
-	return err
-}
-
-func (w *textWriter) indent() { w.ind++ }
-
-func (w *textWriter) unindent() {
-	if w.ind == 0 {
-		log.Print("proto: textWriter unindented too far")
-		return
-	}
-	w.ind--
-}
-
-func writeName(w *textWriter, props *Properties) error {
-	if _, err := w.WriteString(props.OrigName); err != nil {
-		return err
-	}
-	if props.Wire != "group" {
-		return w.WriteByte(':')
-	}
-	return nil
-}
-
-// raw is the interface satisfied by RawMessage.
-type raw interface {
-	Bytes() []byte
-}
-
-func requiresQuotes(u string) bool {
-	// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
-	for _, ch := range u {
-		switch {
-		case ch == '.' || ch == '/' || ch == '_':
-			continue
-		case '0' <= ch && ch <= '9':
-			continue
-		case 'A' <= ch && ch <= 'Z':
-			continue
-		case 'a' <= ch && ch <= 'z':
-			continue
-		default:
-			return true
-		}
-	}
-	return false
-}
-
-// isAny reports whether sv is a google.protobuf.Any message
-func isAny(sv reflect.Value) bool {
-	type wkt interface {
-		XXX_WellKnownType() string
-	}
-	t, ok := sv.Addr().Interface().(wkt)
-	return ok && t.XXX_WellKnownType() == "Any"
-}
-
-// writeProto3Any writes an expanded google.protobuf.Any message.
-//
-// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
-// required messages are not linked in).
-//
-// It returns (true, error) when sv was written in expanded format or an error
-// was encountered.
-func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
-	turl := sv.FieldByName("TypeUrl")
-	val := sv.FieldByName("Value")
-	if !turl.IsValid() || !val.IsValid() {
-		return true, errors.New("proto: invalid google.protobuf.Any message")
-	}
-
-	b, ok := val.Interface().([]byte)
-	if !ok {
-		return true, errors.New("proto: invalid google.protobuf.Any message")
-	}
-
-	parts := strings.Split(turl.String(), "/")
-	mt := MessageType(parts[len(parts)-1])
-	if mt == nil {
-		return false, nil
-	}
-	m := reflect.New(mt.Elem())
-	if err := Unmarshal(b, m.Interface().(Message)); err != nil {
-		return false, nil
-	}
-	w.Write([]byte("["))
-	u := turl.String()
-	if requiresQuotes(u) {
-		writeString(w, u)
-	} else {
-		w.Write([]byte(u))
-	}
-	if w.compact {
-		w.Write([]byte("]:<"))
-	} else {
-		w.Write([]byte("]: <\n"))
-		w.ind++
-	}
-	if err := tm.writeStruct(w, m.Elem()); err != nil {
-		return true, err
-	}
-	if w.compact {
-		w.Write([]byte("> "))
-	} else {
-		w.ind--
-		w.Write([]byte(">\n"))
-	}
-	return true, nil
-}
-
-func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
-	if tm.ExpandAny && isAny(sv) {
-		if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
-			return err
-		}
-	}
-	st := sv.Type()
-	sprops := GetProperties(st)
-	for i := 0; i < sv.NumField(); i++ {
-		fv := sv.Field(i)
-		props := sprops.Prop[i]
-		name := st.Field(i).Name
-
-		if strings.HasPrefix(name, "XXX_") {
-			// There are two XXX_ fields:
-			//   XXX_unrecognized []byte
-			//   XXX_extensions   map[int32]proto.Extension
-			// The first is handled here;
-			// the second is handled at the bottom of this function.
-			if name == "XXX_unrecognized" && !fv.IsNil() {
-				if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
-					return err
-				}
-			}
-			continue
-		}
-		if fv.Kind() == reflect.Ptr && fv.IsNil() {
-			// Field not filled in. This could be an optional field or
-			// a required field that wasn't filled in. Either way, there
-			// isn't anything we can show for it.
-			continue
-		}