all: revendor to fix tests

Change-Id: Ie68ebee20918908e9eeb6d9d5c8bbba879f21d5d
Reviewed-on: https://go-review.googlesource.com/58611
Reviewed-by: Francesc Campoy Flores <campoy@golang.org>
diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
index 5e6ee14..8eaee41 100644
--- a/Godeps/Godeps.json
+++ b/Godeps/Godeps.json
@@ -8,494 +8,538 @@
 	"Deps": [
 		{
 			"ImportPath": "cloud.google.com/go/compute/metadata",
-			"Comment": "v0.6.0-20-g686f0e8",
-			"Rev": "686f0e89858ea78eae54d4b2021e6bfc7d3a30ca"
+			"Comment": "v0.12.0-3-ge08da2b",
+			"Rev": "e08da2b906e55a135acb4c335baefd7422ea35b0"
 		},
 		{
-			"ImportPath": "cloud.google.com/go/internal",
-			"Comment": "v0.6.0-20-g686f0e8",
-			"Rev": "686f0e89858ea78eae54d4b2021e6bfc7d3a30ca"
+			"ImportPath": "cloud.google.com/go/internal/version",
+			"Comment": "v0.12.0-3-ge08da2b",
+			"Rev": "e08da2b906e55a135acb4c335baefd7422ea35b0"
 		},
 		{
 			"ImportPath": "cloud.google.com/go/logging",
-			"Comment": "v0.6.0-20-g686f0e8",
-			"Rev": "686f0e89858ea78eae54d4b2021e6bfc7d3a30ca"
+			"Comment": "v0.12.0-3-ge08da2b",
+			"Rev": "e08da2b906e55a135acb4c335baefd7422ea35b0"
 		},
 		{
 			"ImportPath": "cloud.google.com/go/logging/apiv2",
-			"Comment": "v0.6.0-20-g686f0e8",
-			"Rev": "686f0e89858ea78eae54d4b2021e6bfc7d3a30ca"
+			"Comment": "v0.12.0-3-ge08da2b",
+			"Rev": "e08da2b906e55a135acb4c335baefd7422ea35b0"
 		},
 		{
 			"ImportPath": "cloud.google.com/go/logging/internal",
-			"Comment": "v0.6.0-20-g686f0e8",
-			"Rev": "686f0e89858ea78eae54d4b2021e6bfc7d3a30ca"
+			"Comment": "v0.12.0-3-ge08da2b",
+			"Rev": "e08da2b906e55a135acb4c335baefd7422ea35b0"
 		},
 		{
 			"ImportPath": "github.com/bradfitz/gomemcache/memcache",
-			"Comment": "release.r60-41-gfb1f79c",
-			"Rev": "fb1f79c6b65acda83063cbc69f6bba1522558bfc"
+			"Comment": "release.r60-46-g1952afa",
+			"Rev": "1952afaa557dc08e8e0d89eafab110fb501c1a2b"
 		},
 		{
 			"ImportPath": "github.com/fsnotify/fsnotify",
-			"Comment": "v1.4.2-2-gfd9ec7d",
-			"Rev": "fd9ec7deca8bf46ecd2a795baaacf2b3a9be1197"
+			"Comment": "v1.4.2-6-g4da3e2c",
+			"Rev": "4da3e2cfbabc9f751898f250b49f2439785783a1"
 		},
 		{
 			"ImportPath": "github.com/garyburd/redigo/internal",
-			"Comment": "v1.0.0-5-gffa8d46",
-			"Rev": "ffa8d46ada782d81cfda81a0fbd9f45ceae448e8"
+			"Comment": "v1.1.0-9-gb925df3",
+			"Rev": "b925df3cc15d8646e9b5b333ebaf3011385aba11"
 		},
 		{
 			"ImportPath": "github.com/garyburd/redigo/redis",
-			"Comment": "v1.0.0-5-gffa8d46",
-			"Rev": "ffa8d46ada782d81cfda81a0fbd9f45ceae448e8"
+			"Comment": "v1.1.0-9-gb925df3",
+			"Rev": "b925df3cc15d8646e9b5b333ebaf3011385aba11"
 		},
 		{
 			"ImportPath": "github.com/go-stack/stack",
-			"Comment": "v1.5.2",
-			"Rev": "100eb0c0a9c5b306ca2fb4f165df21d80ada4b82"
+			"Comment": "v1.6.0",
+			"Rev": "817915b46b97fd7bb80e8ab6b69f01a53ac3eebf"
 		},
 		{
 			"ImportPath": "github.com/golang/lint",
-			"Rev": "3390df4df2787994aea98de825b964ac7944b817"
+			"Rev": "c5fb716d6688a859aae56d26d3e6070808df29f7"
 		},
 		{
 			"ImportPath": "github.com/golang/protobuf/proto",
-			"Rev": "7a211bcf3bce0e3f1d74f9894916e6f116ae83b4"
+			"Rev": "ab9f9a6dab164b7d1246e0e688b0ab7b94d8553e"
 		},
 		{
 			"ImportPath": "github.com/golang/protobuf/protoc-gen-go/descriptor",
-			"Rev": "7a211bcf3bce0e3f1d74f9894916e6f116ae83b4"
+			"Rev": "ab9f9a6dab164b7d1246e0e688b0ab7b94d8553e"
 		},
 		{
 			"ImportPath": "github.com/golang/protobuf/ptypes",
-			"Rev": "7a211bcf3bce0e3f1d74f9894916e6f116ae83b4"
+			"Rev": "ab9f9a6dab164b7d1246e0e688b0ab7b94d8553e"
 		},
 		{
 			"ImportPath": "github.com/golang/protobuf/ptypes/any",
-			"Rev": "7a211bcf3bce0e3f1d74f9894916e6f116ae83b4"
+			"Rev": "ab9f9a6dab164b7d1246e0e688b0ab7b94d8553e"
 		},
 		{
 			"ImportPath": "github.com/golang/protobuf/ptypes/duration",
-			"Rev": "7a211bcf3bce0e3f1d74f9894916e6f116ae83b4"
+			"Rev": "ab9f9a6dab164b7d1246e0e688b0ab7b94d8553e"
 		},
 		{
 			"ImportPath": "github.com/golang/protobuf/ptypes/empty",
-			"Rev": "7a211bcf3bce0e3f1d74f9894916e6f116ae83b4"
+			"Rev": "ab9f9a6dab164b7d1246e0e688b0ab7b94d8553e"
 		},
 		{
 			"ImportPath": "github.com/golang/protobuf/ptypes/struct",
-			"Rev": "7a211bcf3bce0e3f1d74f9894916e6f116ae83b4"
+			"Rev": "ab9f9a6dab164b7d1246e0e688b0ab7b94d8553e"
 		},
 		{
 			"ImportPath": "github.com/golang/protobuf/ptypes/timestamp",
-			"Rev": "7a211bcf3bce0e3f1d74f9894916e6f116ae83b4"
+			"Rev": "ab9f9a6dab164b7d1246e0e688b0ab7b94d8553e"
 		},
 		{
 			"ImportPath": "github.com/golang/snappy",
-			"Rev": "d9eb7a3d35ec988b8585d4a0068e462c27d28380"
+			"Rev": "553a641470496b2327abcac10b36396bd98e45c9"
 		},
 		{
 			"ImportPath": "github.com/googleapis/gax-go",
-			"Rev": "da06d194a00e19ce00d9011a13931c3f6f6887c7"
+			"Rev": "2cadd475a3e966ec9b77a21afc530dbacec6d613"
 		},
 		{
 			"ImportPath": "github.com/gregjones/httpcache",
-			"Rev": "413781778738c08fdbb98e1dd65f5abffe8832d0"
+			"Rev": "787624de3eb7bd915c329cba748687a3b22666a6"
 		},
 		{
 			"ImportPath": "github.com/gregjones/httpcache/memcache",
-			"Rev": "413781778738c08fdbb98e1dd65f5abffe8832d0"
+			"Rev": "787624de3eb7bd915c329cba748687a3b22666a6"
 		},
 		{
 			"ImportPath": "github.com/hashicorp/hcl",
-			"Rev": "c3e054bfd4dcf77b9965ed2b79b22afa2f41d4eb"
+			"Rev": "392dba7d905ed5d04a5794ba89f558b27e2ba1ca"
 		},
 		{
 			"ImportPath": "github.com/hashicorp/hcl/hcl/ast",
-			"Rev": "c3e054bfd4dcf77b9965ed2b79b22afa2f41d4eb"
+			"Rev": "392dba7d905ed5d04a5794ba89f558b27e2ba1ca"
 		},
 		{
 			"ImportPath": "github.com/hashicorp/hcl/hcl/parser",
-			"Rev": "c3e054bfd4dcf77b9965ed2b79b22afa2f41d4eb"
+			"Rev": "392dba7d905ed5d04a5794ba89f558b27e2ba1ca"
 		},
 		{
 			"ImportPath": "github.com/hashicorp/hcl/hcl/scanner",
-			"Rev": "c3e054bfd4dcf77b9965ed2b79b22afa2f41d4eb"
+			"Rev": "392dba7d905ed5d04a5794ba89f558b27e2ba1ca"
 		},
 		{
 			"ImportPath": "github.com/hashicorp/hcl/hcl/strconv",
-			"Rev": "c3e054bfd4dcf77b9965ed2b79b22afa2f41d4eb"
+			"Rev": "392dba7d905ed5d04a5794ba89f558b27e2ba1ca"
 		},
 		{
 			"ImportPath": "github.com/hashicorp/hcl/hcl/token",
-			"Rev": "c3e054bfd4dcf77b9965ed2b79b22afa2f41d4eb"
+			"Rev": "392dba7d905ed5d04a5794ba89f558b27e2ba1ca"
 		},
 		{
 			"ImportPath": "github.com/hashicorp/hcl/json/parser",
-			"Rev": "c3e054bfd4dcf77b9965ed2b79b22afa2f41d4eb"
+			"Rev": "392dba7d905ed5d04a5794ba89f558b27e2ba1ca"
 		},
 		{
 			"ImportPath": "github.com/hashicorp/hcl/json/scanner",
-			"Rev": "c3e054bfd4dcf77b9965ed2b79b22afa2f41d4eb"
+			"Rev": "392dba7d905ed5d04a5794ba89f558b27e2ba1ca"
 		},
 		{
 			"ImportPath": "github.com/hashicorp/hcl/json/token",
-			"Rev": "c3e054bfd4dcf77b9965ed2b79b22afa2f41d4eb"
+			"Rev": "392dba7d905ed5d04a5794ba89f558b27e2ba1ca"
 		},
 		{
 			"ImportPath": "github.com/inconshreveable/log15",
-			"Comment": "v2.3-88-g46a701a",
-			"Rev": "46a701a619de90c65a78c04d1a58bf02585e9701"
+			"Comment": "v2.3-94-g74a0988",
+			"Rev": "74a0988b5f804e8ce9ff74fca4f16980776dff29"
 		},
 		{
 			"ImportPath": "github.com/inconshreveable/log15/term",
-			"Comment": "v2.3-88-g46a701a",
-			"Rev": "46a701a619de90c65a78c04d1a58bf02585e9701"
+			"Comment": "v2.3-94-g74a0988",
+			"Rev": "74a0988b5f804e8ce9ff74fca4f16980776dff29"
 		},
 		{
 			"ImportPath": "github.com/magiconair/properties",
-			"Comment": "v1.7.0-5-g0723e35",
-			"Rev": "0723e352fa358f9322c938cc2dadda874e9151a9"
+			"Comment": "v1.7.3",
+			"Rev": "be5ece7dd465ab0765a9682137865547526d1dfb"
 		},
 		{
 			"ImportPath": "github.com/mattn/go-colorable",
-			"Comment": "v0.0.6-6-g6c903ff",
-			"Rev": "6c903ff4aa50920ca86087a280590b36b3152b9c"
+			"Comment": "v0.0.9-6-gad5389d",
+			"Rev": "ad5389df28cdac544c99bd7b9161a0b5b6ca9d1b"
 		},
 		{
 			"ImportPath": "github.com/mattn/go-isatty",
-			"Rev": "66b8e73f3f5cda9f96b69efd03dd3d7fc4a5cdb8"
+			"Comment": "v0.0.2",
+			"Rev": "fc9e8d8ef48496124e79ae0df75490096eccf6fe"
 		},
 		{
 			"ImportPath": "github.com/mitchellh/mapstructure",
-			"Rev": "f3009df150dadf309fdee4a54ed65c124afad715"
-		},
-		{
-			"ImportPath": "github.com/pelletier/go-buffruneio",
-			"Rev": "df1e16fde7fc330a0ca68167c23bf7ed6ac31d6d"
+			"Rev": "d0303fe809921458f417bcf828397a65db30a7e4"
 		},
 		{
 			"ImportPath": "github.com/pelletier/go-toml",
-			"Comment": "v0.3.5-16-g45932ad",
-			"Rev": "45932ad32dfdd20826f5671da37a5f3ce9f26a8d"
+			"Comment": "v1.0.0-8-g4692b8f",
+			"Rev": "4692b8f9babfc93db58cc592ba2689d8736781de"
 		},
 		{
 			"ImportPath": "github.com/spf13/afero",
-			"Rev": "06b7e5f50606ecd49148a01a6008942d9b669217"
+			"Rev": "9be650865eab0c12963d8753212f4f9c66cdcf12"
 		},
 		{
 			"ImportPath": "github.com/spf13/afero/mem",
-			"Rev": "06b7e5f50606ecd49148a01a6008942d9b669217"
+			"Rev": "9be650865eab0c12963d8753212f4f9c66cdcf12"
 		},
 		{
 			"ImportPath": "github.com/spf13/cast",
-			"Rev": "24b6558033ffe202bf42f0f3b870dcc798dd2ba8"
+			"Comment": "v1.1.0",
+			"Rev": "acbeb36b902d72a7a4c18e8f3241075e7ab763e4"
 		},
 		{
 			"ImportPath": "github.com/spf13/jwalterweatherman",
-			"Rev": "33c24e77fb80341fe7130ee7c594256ff08ccc46"
+			"Rev": "0efa5202c04663c757d84f90f5219c1250baf94f"
 		},
 		{
 			"ImportPath": "github.com/spf13/pflag",
-			"Rev": "5ccb023bc27df288a957c5e994cd44fd19619465"
+			"Comment": "v1.0.0",
+			"Rev": "e57e3eeb33f795204c1ca35f56c44f83227c6e66"
 		},
 		{
 			"ImportPath": "github.com/spf13/viper",
-			"Rev": "651d9d916abc3c3d6a91a12549495caba5edffd2"
+			"Comment": "v1.0.0",
+			"Rev": "25b30aa063fc18e48662b86996252eabdcf2f0c7"
 		},
 		{
 			"ImportPath": "golang.org/x/net/context",
-			"Rev": "1a26cf06691746ee35aa7113c9b37289afc7ea28"
+			"Rev": "57efc9c3d9f91fb3277f8da1cff370539c4d3dc5"
 		},
 		{
 			"ImportPath": "golang.org/x/net/context/ctxhttp",
-			"Rev": "1a26cf06691746ee35aa7113c9b37289afc7ea28"
+			"Rev": "57efc9c3d9f91fb3277f8da1cff370539c4d3dc5"
 		},
 		{
 			"ImportPath": "golang.org/x/net/http2",
-			"Rev": "1a26cf06691746ee35aa7113c9b37289afc7ea28"
+			"Rev": "57efc9c3d9f91fb3277f8da1cff370539c4d3dc5"
 		},
 		{
 			"ImportPath": "golang.org/x/net/http2/hpack",
-			"Rev": "1a26cf06691746ee35aa7113c9b37289afc7ea28"
+			"Rev": "57efc9c3d9f91fb3277f8da1cff370539c4d3dc5"
 		},
 		{
 			"ImportPath": "golang.org/x/net/idna",
-			"Rev": "1a26cf06691746ee35aa7113c9b37289afc7ea28"
+			"Rev": "57efc9c3d9f91fb3277f8da1cff370539c4d3dc5"
 		},
 		{
 			"ImportPath": "golang.org/x/net/internal/timeseries",
-			"Rev": "1a26cf06691746ee35aa7113c9b37289afc7ea28"
+			"Rev": "57efc9c3d9f91fb3277f8da1cff370539c4d3dc5"
 		},
 		{
 			"ImportPath": "golang.org/x/net/lex/httplex",
-			"Rev": "1a26cf06691746ee35aa7113c9b37289afc7ea28"
+			"Rev": "57efc9c3d9f91fb3277f8da1cff370539c4d3dc5"
 		},
 		{
 			"ImportPath": "golang.org/x/net/trace",
-			"Rev": "1a26cf06691746ee35aa7113c9b37289afc7ea28"
+			"Rev": "57efc9c3d9f91fb3277f8da1cff370539c4d3dc5"
 		},
 		{
 			"ImportPath": "golang.org/x/oauth2",
-			"Rev": "96382aa079b72d8c014eb0c50f6c223d1e6a2de0"
+			"Rev": "9a379c6b3e95a790ffc43293c2a78dee0d7b6e20"
 		},
 		{
 			"ImportPath": "golang.org/x/oauth2/google",
-			"Rev": "96382aa079b72d8c014eb0c50f6c223d1e6a2de0"
+			"Rev": "9a379c6b3e95a790ffc43293c2a78dee0d7b6e20"
 		},
 		{
 			"ImportPath": "golang.org/x/oauth2/internal",
-			"Rev": "96382aa079b72d8c014eb0c50f6c223d1e6a2de0"
+			"Rev": "9a379c6b3e95a790ffc43293c2a78dee0d7b6e20"
 		},
 		{
 			"ImportPath": "golang.org/x/oauth2/jws",
-			"Rev": "96382aa079b72d8c014eb0c50f6c223d1e6a2de0"
+			"Rev": "9a379c6b3e95a790ffc43293c2a78dee0d7b6e20"
 		},
 		{
 			"ImportPath": "golang.org/x/oauth2/jwt",
-			"Rev": "96382aa079b72d8c014eb0c50f6c223d1e6a2de0"
+			"Rev": "9a379c6b3e95a790ffc43293c2a78dee0d7b6e20"
+		},
+		{
+			"ImportPath": "golang.org/x/sync/semaphore",
+			"Rev": "f52d1811a62927559de87708c8913c1650ce4f26"
 		},
 		{
 			"ImportPath": "golang.org/x/sys/unix",
-			"Rev": "b699b7032584f0953262cb2788a0ca19bb494703"
+			"Rev": "07c182904dbd53199946ba614a412c61d3c548f5"
 		},
 		{
 			"ImportPath": "golang.org/x/text/secure/bidirule",
-			"Rev": "a263ba8db058568bb9beba166777d9c9dbe75d68"
+			"Rev": "cc24f0397b10b6321b1a1322b6bd592984fdabf2"
 		},
 		{
 			"ImportPath": "golang.org/x/text/transform",
-			"Rev": "a263ba8db058568bb9beba166777d9c9dbe75d68"
+			"Rev": "cc24f0397b10b6321b1a1322b6bd592984fdabf2"
 		},
 		{
 			"ImportPath": "golang.org/x/text/unicode/bidi",
-			"Rev": "a263ba8db058568bb9beba166777d9c9dbe75d68"
+			"Rev": "cc24f0397b10b6321b1a1322b6bd592984fdabf2"
 		},
 		{
 			"ImportPath": "golang.org/x/text/unicode/norm",
-			"Rev": "a263ba8db058568bb9beba166777d9c9dbe75d68"
+			"Rev": "cc24f0397b10b6321b1a1322b6bd592984fdabf2"
+		},
+		{
+			"ImportPath": "golang.org/x/tools/go/gcexportdata",
+			"Rev": "1807494da808122833b9bd8e3e5fa179ef237d41"
 		},
 		{
 			"ImportPath": "golang.org/x/tools/go/gcimporter15",
-			"Rev": "ce1291533bf047e29f9e188b3f697d3a5157d142"
+			"Rev": "1807494da808122833b9bd8e3e5fa179ef237d41"
 		},
 		{
 			"ImportPath": "golang.org/x/tools/present",
-			"Rev": "ce1291533bf047e29f9e188b3f697d3a5157d142"
+			"Rev": "1807494da808122833b9bd8e3e5fa179ef237d41"
 		},
 		{
 			"ImportPath": "google.golang.org/api/googleapi/transport",
-			"Rev": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb"
+			"Rev": "dd6bdadc5852eae2d133075a3690d6ad744add48"
 		},
 		{
 			"ImportPath": "google.golang.org/api/internal",
-			"Rev": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb"
+			"Rev": "dd6bdadc5852eae2d133075a3690d6ad744add48"
 		},
 		{
 			"ImportPath": "google.golang.org/api/iterator",
-			"Rev": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb"
+			"Rev": "dd6bdadc5852eae2d133075a3690d6ad744add48"
 		},
 		{
 			"ImportPath": "google.golang.org/api/option",
-			"Rev": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb"
+			"Rev": "dd6bdadc5852eae2d133075a3690d6ad744add48"
 		},
 		{
 			"ImportPath": "google.golang.org/api/support/bundler",
-			"Rev": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb"
+			"Rev": "dd6bdadc5852eae2d133075a3690d6ad744add48"
 		},
 		{
 			"ImportPath": "google.golang.org/api/transport",
-			"Rev": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb"
+			"Rev": "dd6bdadc5852eae2d133075a3690d6ad744add48"
+		},
+		{
+			"ImportPath": "google.golang.org/api/transport/grpc",
+			"Rev": "dd6bdadc5852eae2d133075a3690d6ad744add48"
+		},
+		{
+			"ImportPath": "google.golang.org/api/transport/http",
+			"Rev": "dd6bdadc5852eae2d133075a3690d6ad744add48"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine",
-			"Comment": "v1.0.0-37-ga2f4131",
-			"Rev": "a2f4131514e563cedfdb6e7d267df9ad48591e93"
+			"Comment": "v1.0.0-42-gd9a072c",
+			"Rev": "d9a072cfa7b9736e44311ef77b3e09d804bfa599"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine/aetest",
-			"Comment": "v1.0.0-37-ga2f4131",
-			"Rev": "a2f4131514e563cedfdb6e7d267df9ad48591e93"
+			"Comment": "v1.0.0-42-gd9a072c",
+			"Rev": "d9a072cfa7b9736e44311ef77b3e09d804bfa599"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine/datastore",
-			"Comment": "v1.0.0-37-ga2f4131",
-			"Rev": "a2f4131514e563cedfdb6e7d267df9ad48591e93"
+			"Comment": "v1.0.0-42-gd9a072c",
+			"Rev": "d9a072cfa7b9736e44311ef77b3e09d804bfa599"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine/internal",
-			"Comment": "v1.0.0-37-ga2f4131",
-			"Rev": "a2f4131514e563cedfdb6e7d267df9ad48591e93"
+			"Comment": "v1.0.0-42-gd9a072c",
+			"Rev": "d9a072cfa7b9736e44311ef77b3e09d804bfa599"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine/internal/app_identity",
-			"Comment": "v1.0.0-37-ga2f4131",
-			"Rev": "a2f4131514e563cedfdb6e7d267df9ad48591e93"
+			"Comment": "v1.0.0-42-gd9a072c",
+			"Rev": "d9a072cfa7b9736e44311ef77b3e09d804bfa599"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine/internal/base",
-			"Comment": "v1.0.0-37-ga2f4131",
-			"Rev": "a2f4131514e563cedfdb6e7d267df9ad48591e93"
+			"Comment": "v1.0.0-42-gd9a072c",
+			"Rev": "d9a072cfa7b9736e44311ef77b3e09d804bfa599"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine/internal/datastore",
-			"Comment": "v1.0.0-37-ga2f4131",
-			"Rev": "a2f4131514e563cedfdb6e7d267df9ad48591e93"
+			"Comment": "v1.0.0-42-gd9a072c",
+			"Rev": "d9a072cfa7b9736e44311ef77b3e09d804bfa599"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine/internal/log",
-			"Comment": "v1.0.0-37-ga2f4131",
-			"Rev": "a2f4131514e563cedfdb6e7d267df9ad48591e93"
+			"Comment": "v1.0.0-42-gd9a072c",
+			"Rev": "d9a072cfa7b9736e44311ef77b3e09d804bfa599"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine/internal/memcache",
-			"Comment": "v1.0.0-37-ga2f4131",
-			"Rev": "a2f4131514e563cedfdb6e7d267df9ad48591e93"
+			"Comment": "v1.0.0-42-gd9a072c",
+			"Rev": "d9a072cfa7b9736e44311ef77b3e09d804bfa599"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine/internal/modules",
-			"Comment": "v1.0.0-37-ga2f4131",
-			"Rev": "a2f4131514e563cedfdb6e7d267df9ad48591e93"
+			"Comment": "v1.0.0-42-gd9a072c",
+			"Rev": "d9a072cfa7b9736e44311ef77b3e09d804bfa599"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine/internal/remote_api",
-			"Comment": "v1.0.0-37-ga2f4131",
-			"Rev": "a2f4131514e563cedfdb6e7d267df9ad48591e93"
+			"Comment": "v1.0.0-42-gd9a072c",
+			"Rev": "d9a072cfa7b9736e44311ef77b3e09d804bfa599"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine/internal/search",
-			"Comment": "v1.0.0-37-ga2f4131",
-			"Rev": "a2f4131514e563cedfdb6e7d267df9ad48591e93"
+			"Comment": "v1.0.0-42-gd9a072c",
+			"Rev": "d9a072cfa7b9736e44311ef77b3e09d804bfa599"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine/internal/urlfetch",
-			"Comment": "v1.0.0-37-ga2f4131",
-			"Rev": "a2f4131514e563cedfdb6e7d267df9ad48591e93"
+			"Comment": "v1.0.0-42-gd9a072c",
+			"Rev": "d9a072cfa7b9736e44311ef77b3e09d804bfa599"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine/internal/user",
-			"Comment": "v1.0.0-37-ga2f4131",
-			"Rev": "a2f4131514e563cedfdb6e7d267df9ad48591e93"
+			"Comment": "v1.0.0-42-gd9a072c",
+			"Rev": "d9a072cfa7b9736e44311ef77b3e09d804bfa599"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine/log",
-			"Comment": "v1.0.0-37-ga2f4131",
-			"Rev": "a2f4131514e563cedfdb6e7d267df9ad48591e93"
+			"Comment": "v1.0.0-42-gd9a072c",
+			"Rev": "d9a072cfa7b9736e44311ef77b3e09d804bfa599"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine/memcache",
-			"Comment": "v1.0.0-37-ga2f4131",
-			"Rev": "a2f4131514e563cedfdb6e7d267df9ad48591e93"
+			"Comment": "v1.0.0-42-gd9a072c",
+			"Rev": "d9a072cfa7b9736e44311ef77b3e09d804bfa599"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine/remote_api",
-			"Comment": "v1.0.0-37-ga2f4131",
-			"Rev": "a2f4131514e563cedfdb6e7d267df9ad48591e93"
+			"Comment": "v1.0.0-42-gd9a072c",
+			"Rev": "d9a072cfa7b9736e44311ef77b3e09d804bfa599"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine/search",
-			"Comment": "v1.0.0-37-ga2f4131",
-			"Rev": "a2f4131514e563cedfdb6e7d267df9ad48591e93"
+			"Comment": "v1.0.0-42-gd9a072c",
+			"Rev": "d9a072cfa7b9736e44311ef77b3e09d804bfa599"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine/urlfetch",
-			"Comment": "v1.0.0-37-ga2f4131",
-			"Rev": "a2f4131514e563cedfdb6e7d267df9ad48591e93"
+			"Comment": "v1.0.0-42-gd9a072c",
+			"Rev": "d9a072cfa7b9736e44311ef77b3e09d804bfa599"
 		},
 		{
 			"ImportPath": "google.golang.org/appengine/user",
-			"Comment": "v1.0.0-37-ga2f4131",
-			"Rev": "a2f4131514e563cedfdb6e7d267df9ad48591e93"
+			"Comment": "v1.0.0-42-gd9a072c",
+			"Rev": "d9a072cfa7b9736e44311ef77b3e09d804bfa599"
 		},
 		{
 			"ImportPath": "google.golang.org/genproto/googleapis/api/annotations",
-			"Rev": "b3e7c2fb04031add52c4817f53f43757ccbf9c18"
+			"Rev": "ee236bd376b077c7a89f260c026c4735b195e459"
+		},
+		{
+			"ImportPath": "google.golang.org/genproto/googleapis/api/distribution",
+			"Rev": "ee236bd376b077c7a89f260c026c4735b195e459"
 		},
 		{
 			"ImportPath": "google.golang.org/genproto/googleapis/api/label",
-			"Rev": "b3e7c2fb04031add52c4817f53f43757ccbf9c18"
+			"Rev": "ee236bd376b077c7a89f260c026c4735b195e459"
+		},
+		{
+			"ImportPath": "google.golang.org/genproto/googleapis/api/metric",
+			"Rev": "ee236bd376b077c7a89f260c026c4735b195e459"
 		},
 		{
 			"ImportPath": "google.golang.org/genproto/googleapis/api/monitoredres",
-			"Rev": "b3e7c2fb04031add52c4817f53f43757ccbf9c18"
+			"Rev": "ee236bd376b077c7a89f260c026c4735b195e459"
 		},
 		{
 			"ImportPath": "google.golang.org/genproto/googleapis/logging/type",
-			"Rev": "b3e7c2fb04031add52c4817f53f43757ccbf9c18"
+			"Rev": "ee236bd376b077c7a89f260c026c4735b195e459"
 		},
 		{
 			"ImportPath": "google.golang.org/genproto/googleapis/logging/v2",
-			"Rev": "b3e7c2fb04031add52c4817f53f43757ccbf9c18"
+			"Rev": "ee236bd376b077c7a89f260c026c4735b195e459"
 		},
 		{
 			"ImportPath": "google.golang.org/genproto/googleapis/rpc/status",
-			"Rev": "b3e7c2fb04031add52c4817f53f43757ccbf9c18"
+			"Rev": "ee236bd376b077c7a89f260c026c4735b195e459"
 		},
 		{
 			"ImportPath": "google.golang.org/grpc",
-			"Comment": "v1.0.5-3-g8712952",
-			"Rev": "8712952b7d646dbbbc6fb73a782174f3115060f3"
+			"Comment": "v1.2.0-222-g7db1564",
+			"Rev": "7db1564ba1229bc42919bb1f6d9c4186f3aa8678"
 		},
 		{
 			"ImportPath": "google.golang.org/grpc/codes",
-			"Comment": "v1.0.5-3-g8712952",
-			"Rev": "8712952b7d646dbbbc6fb73a782174f3115060f3"
+			"Comment": "v1.2.0-222-g7db1564",
+			"Rev": "7db1564ba1229bc42919bb1f6d9c4186f3aa8678"
+		},
+		{
+			"ImportPath": "google.golang.org/grpc/connectivity",
+			"Comment": "v1.2.0-222-g7db1564",
+			"Rev": "7db1564ba1229bc42919bb1f6d9c4186f3aa8678"
 		},
 		{
 			"ImportPath": "google.golang.org/grpc/credentials",
-			"Comment": "v1.0.5-3-g8712952",
-			"Rev": "8712952b7d646dbbbc6fb73a782174f3115060f3"
+			"Comment": "v1.2.0-222-g7db1564",
+			"Rev": "7db1564ba1229bc42919bb1f6d9c4186f3aa8678"
 		},
 		{
 			"ImportPath": "google.golang.org/grpc/credentials/oauth",
-			"Comment": "v1.0.5-3-g8712952",
-			"Rev": "8712952b7d646dbbbc6fb73a782174f3115060f3"
+			"Comment": "v1.2.0-222-g7db1564",
+			"Rev": "7db1564ba1229bc42919bb1f6d9c4186f3aa8678"
+		},
+		{
+			"ImportPath": "google.golang.org/grpc/grpclb/messages_only/grpc_lb_v1",
+			"Comment": "v1.2.0-222-g7db1564",
+			"Rev": "7db1564ba1229bc42919bb1f6d9c4186f3aa8678"
 		},
 		{
 			"ImportPath": "google.golang.org/grpc/grpclog",
-			"Comment": "v1.0.5-3-g8712952",
-			"Rev": "8712952b7d646dbbbc6fb73a782174f3115060f3"
+			"Comment": "v1.2.0-222-g7db1564",
+			"Rev": "7db1564ba1229bc42919bb1f6d9c4186f3aa8678"
 		},
 		{
 			"ImportPath": "google.golang.org/grpc/internal",
-			"Comment": "v1.0.5-3-g8712952",
-			"Rev": "8712952b7d646dbbbc6fb73a782174f3115060f3"
+			"Comment": "v1.2.0-222-g7db1564",
+			"Rev": "7db1564ba1229bc42919bb1f6d9c4186f3aa8678"
+		},
+		{
+			"ImportPath": "google.golang.org/grpc/keepalive",
+			"Comment": "v1.2.0-222-g7db1564",
+			"Rev": "7db1564ba1229bc42919bb1f6d9c4186f3aa8678"
 		},
 		{
 			"ImportPath": "google.golang.org/grpc/metadata",
-			"Comment": "v1.0.5-3-g8712952",
-			"Rev": "8712952b7d646dbbbc6fb73a782174f3115060f3"
+			"Comment": "v1.2.0-222-g7db1564",
+			"Rev": "7db1564ba1229bc42919bb1f6d9c4186f3aa8678"
 		},
 		{
 			"ImportPath": "google.golang.org/grpc/naming",
-			"Comment": "v1.0.5-3-g8712952",
-			"Rev": "8712952b7d646dbbbc6fb73a782174f3115060f3"
+			"Comment": "v1.2.0-222-g7db1564",
+			"Rev": "7db1564ba1229bc42919bb1f6d9c4186f3aa8678"
 		},
 		{
 			"ImportPath": "google.golang.org/grpc/peer",
-			"Comment": "v1.0.5-3-g8712952",
-			"Rev": "8712952b7d646dbbbc6fb73a782174f3115060f3"
+			"Comment": "v1.2.0-222-g7db1564",
+			"Rev": "7db1564ba1229bc42919bb1f6d9c4186f3aa8678"
 		},
 		{
 			"ImportPath": "google.golang.org/grpc/stats",
-			"Comment": "v1.0.5-3-g8712952",
-			"Rev": "8712952b7d646dbbbc6fb73a782174f3115060f3"
+			"Comment": "v1.2.0-222-g7db1564",
+			"Rev": "7db1564ba1229bc42919bb1f6d9c4186f3aa8678"
+		},
+		{
+			"ImportPath": "google.golang.org/grpc/status",
+			"Comment": "v1.2.0-222-g7db1564",
+			"Rev": "7db1564ba1229bc42919bb1f6d9c4186f3aa8678"
 		},
 		{
 			"ImportPath": "google.golang.org/grpc/tap",
-			"Comment": "v1.0.5-3-g8712952",
-			"Rev": "8712952b7d646dbbbc6fb73a782174f3115060f3"
+			"Comment": "v1.2.0-222-g7db1564",
+			"Rev": "7db1564ba1229bc42919bb1f6d9c4186f3aa8678"
 		},
 		{
 			"ImportPath": "google.golang.org/grpc/transport",
-			"Comment": "v1.0.5-3-g8712952",
-			"Rev": "8712952b7d646dbbbc6fb73a782174f3115060f3"
+			"Comment": "v1.2.0-222-g7db1564",
+			"Rev": "7db1564ba1229bc42919bb1f6d9c4186f3aa8678"
 		},
 		{
 			"ImportPath": "gopkg.in/yaml.v2",
-			"Rev": "a3f3340b5840cee44f372bddb5880fcbc419b46a"
+			"Rev": "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
 		}
 	]
 }
diff --git a/vendor/cloud.google.com/go/CONTRIBUTORS b/vendor/cloud.google.com/go/CONTRIBUTORS
index 07509cc..d4b376c 100644
--- a/vendor/cloud.google.com/go/CONTRIBUTORS
+++ b/vendor/cloud.google.com/go/CONTRIBUTORS
@@ -11,6 +11,7 @@
 
 # Keep the list alphabetically sorted.
 
+Alexis Hunt <lexer@google.com>
 Andreas Litt <andreas.litt@gmail.com>
 Andrew Gerrand <adg@golang.org>
 Brad Fitzpatrick <bradfitz@golang.org>
@@ -24,11 +25,13 @@
 Johan Euphrosine <proppy@google.com>
 Jonathan Amsterdam <jba@google.com>
 Luna Duclos <luna.duclos@palmstonegames.com>
+Magnus Hiie <magnus.hiie@gmail.com>
 Michael McGreevy <mcgreevy@golang.org>
 Omar Jarjur <ojarjur@google.com>
 PaweĊ‚ Knap <pawelknap88@gmail.com>
 Péter Szilágyi <peterke@gmail.com>
 Sarah Adams <shadams@google.com>
+Thanatat Tamtan <acoshift@gmail.com>
 Toby Burress <kurin@google.com>
 Tuo Shan <shantuo@google.com>
 Tyler Treat <ttreat31@gmail.com>
diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go
index 5c6f3bf..3a7cb28 100644
--- a/vendor/cloud.google.com/go/compute/metadata/metadata.go
+++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go
@@ -34,8 +34,6 @@
 
 	"golang.org/x/net/context"
 	"golang.org/x/net/context/ctxhttp"
-
-	"cloud.google.com/go/internal"
 )
 
 const (
@@ -48,6 +46,8 @@
 	// This is variable name is not defined by any spec, as far as
 	// I know; it was made up for the Go package.
 	metadataHostEnv = "GCE_METADATA_HOST"
+
+	userAgent = "gcloud-golang/0.1"
 )
 
 type cachedValue struct {
@@ -65,24 +65,20 @@
 
 var (
 	metaClient = &http.Client{
-		Transport: &internal.Transport{
-			Base: &http.Transport{
-				Dial: (&net.Dialer{
-					Timeout:   2 * time.Second,
-					KeepAlive: 30 * time.Second,
-				}).Dial,
-				ResponseHeaderTimeout: 2 * time.Second,
-			},
+		Transport: &http.Transport{
+			Dial: (&net.Dialer{
+				Timeout:   2 * time.Second,
+				KeepAlive: 30 * time.Second,
+			}).Dial,
+			ResponseHeaderTimeout: 2 * time.Second,
 		},
 	}
 	subscribeClient = &http.Client{
-		Transport: &internal.Transport{
-			Base: &http.Transport{
-				Dial: (&net.Dialer{
-					Timeout:   2 * time.Second,
-					KeepAlive: 30 * time.Second,
-				}).Dial,
-			},
+		Transport: &http.Transport{
+			Dial: (&net.Dialer{
+				Timeout:   2 * time.Second,
+				KeepAlive: 30 * time.Second,
+			}).Dial,
 		},
 	}
 )
@@ -132,6 +128,7 @@
 	url := "http://" + host + "/computeMetadata/v1/" + suffix
 	req, _ := http.NewRequest("GET", url, nil)
 	req.Header.Set("Metadata-Flavor", "Google")
+	req.Header.Set("User-Agent", userAgent)
 	res, err := client.Do(req)
 	if err != nil {
 		return "", "", err
@@ -202,7 +199,9 @@
 	// Try two strategies in parallel.
 	// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
 	go func() {
-		res, err := ctxhttp.Get(ctx, metaClient, "http://"+metadataIP)
+		req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
+		req.Header.Set("User-Agent", userAgent)
+		res, err := ctxhttp.Do(ctx, metaClient, req)
 		if err != nil {
 			resc <- false
 			return
diff --git a/vendor/cloud.google.com/go/internal/cloud.go b/vendor/cloud.google.com/go/internal/cloud.go
deleted file mode 100644
index 8e0c8f8..0000000
--- a/vendor/cloud.google.com/go/internal/cloud.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2014 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package internal provides support for the cloud packages.
-//
-// Users should not import this package directly.
-package internal
-
-import (
-	"fmt"
-	"net/http"
-)
-
-const userAgent = "gcloud-golang/0.1"
-
-// Transport is an http.RoundTripper that appends Google Cloud client's
-// user-agent to the original request's user-agent header.
-type Transport struct {
-	// TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does.
-	// Do User-Agent some other way.
-
-	// Base is the actual http.RoundTripper
-	// requests will use. It must not be nil.
-	Base http.RoundTripper
-}
-
-// RoundTrip appends a user-agent to the existing user-agent
-// header and delegates the request to the base http.RoundTripper.
-func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
-	req = cloneRequest(req)
-	ua := req.Header.Get("User-Agent")
-	if ua == "" {
-		ua = userAgent
-	} else {
-		ua = fmt.Sprintf("%s %s", ua, userAgent)
-	}
-	req.Header.Set("User-Agent", ua)
-	return t.Base.RoundTrip(req)
-}
-
-// cloneRequest returns a clone of the provided *http.Request.
-// The clone is a shallow copy of the struct and its Header map.
-func cloneRequest(r *http.Request) *http.Request {
-	// shallow copy of the struct
-	r2 := new(http.Request)
-	*r2 = *r
-	// deep copy of the Header
-	r2.Header = make(http.Header)
-	for k, s := range r.Header {
-		r2.Header[k] = s
-	}
-	return r2
-}
diff --git a/vendor/cloud.google.com/go/internal/retry.go b/vendor/cloud.google.com/go/internal/retry.go
deleted file mode 100644
index 79995be..0000000
--- a/vendor/cloud.google.com/go/internal/retry.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import (
-	"fmt"
-	"time"
-
-	gax "github.com/googleapis/gax-go"
-
-	"golang.org/x/net/context"
-)
-
-// Retry calls the supplied function f repeatedly according to the provided
-// backoff parameters. It returns when one of the following occurs:
-// When f's first return value is true, Retry immediately returns with f's second
-// return value.
-// When the provided context is done, Retry returns with ctx.Err().
-func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error {
-	return retry(ctx, bo, f, gax.Sleep)
-}
-
-func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error),
-	sleep func(context.Context, time.Duration) error) error {
-	var lastErr error
-	for {
-		stop, err := f()
-		if stop {
-			return err
-		}
-		// Remember the last "real" error from f.
-		if err != nil && err != context.Canceled && err != context.DeadlineExceeded {
-			lastErr = err
-		}
-		p := bo.Pause()
-		if cerr := sleep(ctx, p); cerr != nil {
-			if lastErr != nil {
-				return fmt.Errorf("%v; last function err: %v", cerr, lastErr)
-			}
-			return cerr
-		}
-	}
-}
diff --git a/vendor/cloud.google.com/go/internal/version/update_version.sh b/vendor/cloud.google.com/go/internal/version/update_version.sh
new file mode 100755
index 0000000..fecf1f0
--- /dev/null
+++ b/vendor/cloud.google.com/go/internal/version/update_version.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+today=$(date +%Y%m%d)
+
+sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE
+
diff --git a/vendor/cloud.google.com/go/internal/version/version.go b/vendor/cloud.google.com/go/internal/version/version.go
new file mode 100644
index 0000000..5eb06ba
--- /dev/null
+++ b/vendor/cloud.google.com/go/internal/version/version.go
@@ -0,0 +1,71 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:generate ./update_version.sh
+
+// Package version contains version information for Google Cloud Client
+// Libraries for Go, as reported in request headers.
+package version
+
+import (
+	"runtime"
+	"strings"
+	"unicode"
+)
+
+// Repo is the current version of the client libraries in this
+// repo. It should be a date in YYYYMMDD format.
+const Repo = "20170621"
+
+// Go returns the Go runtime version. The returned string
+// has no whitespace.
+func Go() string {
+	return goVersion
+}
+
+var goVersion = goVer(runtime.Version())
+
+const develPrefix = "devel +"
+
+func goVer(s string) string {
+	if strings.HasPrefix(s, develPrefix) {
+		s = s[len(develPrefix):]
+		if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
+			s = s[:p]
+		}
+		return s
+	}
+
+	if strings.HasPrefix(s, "go1") {
+		s = s[2:]
+		var prerelease string
+		if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
+			s, prerelease = s[:p], s[p:]
+		}
+		if strings.HasSuffix(s, ".") {
+			s += "0"
+		} else if strings.Count(s, ".") < 2 {
+			s += ".0"
+		}
+		if prerelease != "" {
+			s += "-" + prerelease
+		}
+		return s
+	}
+	return ""
+}
+
+func notSemverRune(r rune) bool {
+	return strings.IndexRune("0123456789.", r) < 0
+}
diff --git a/vendor/cloud.google.com/go/logging/apiv2/config_client.go b/vendor/cloud.google.com/go/logging/apiv2/config_client.go
index d67be97..3df8653 100644
--- a/vendor/cloud.google.com/go/logging/apiv2/config_client.go
+++ b/vendor/cloud.google.com/go/logging/apiv2/config_client.go
@@ -1,4 +1,4 @@
-// Copyright 2016, Google Inc. All rights reserved.
+// Copyright 2017, Google Inc. All rights reserved.
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -17,12 +17,10 @@
 package logging
 
 import (
-	"fmt"
 	"math"
-	"runtime"
-	"strings"
 	"time"
 
+	"cloud.google.com/go/internal/version"
 	gax "github.com/googleapis/gax-go"
 	"golang.org/x/net/context"
 	"google.golang.org/api/iterator"
@@ -31,12 +29,6 @@
 	loggingpb "google.golang.org/genproto/googleapis/logging/v2"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/metadata"
-)
-
-var (
-	configProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
-	configSinkPathTemplate    = gax.MustCompilePathTemplate("projects/{project}/sinks/{sink}")
 )
 
 // ConfigCallOptions contains the retry settings for each method of ConfigClient.
@@ -51,13 +43,7 @@
 func defaultConfigClientOptions() []option.ClientOption {
 	return []option.ClientOption{
 		option.WithEndpoint("logging.googleapis.com:443"),
-		option.WithScopes(
-			"https://www.googleapis.com/auth/cloud-platform",
-			"https://www.googleapis.com/auth/cloud-platform.read-only",
-			"https://www.googleapis.com/auth/logging.admin",
-			"https://www.googleapis.com/auth/logging.read",
-			"https://www.googleapis.com/auth/logging.write",
-		),
+		option.WithScopes(DefaultAuthScopes()...),
 	}
 }
 
@@ -67,6 +53,7 @@
 			gax.WithRetry(func() gax.Retryer {
 				return gax.OnCodes([]codes.Code{
 					codes.DeadlineExceeded,
+					codes.Internal,
 					codes.Unavailable,
 				}, gax.Backoff{
 					Initial:    100 * time.Millisecond,
@@ -97,7 +84,7 @@
 	CallOptions *ConfigCallOptions
 
 	// The metadata to be sent with each request.
-	metadata metadata.MD
+	xGoogHeader []string
 }
 
 // NewConfigClient creates a new config service v2 client.
@@ -115,7 +102,7 @@
 
 		configClient: loggingpb.NewConfigServiceV2Client(conn),
 	}
-	c.SetGoogleClientInfo("gax", gax.Version)
+	c.SetGoogleClientInfo()
 	return c, nil
 }
 
@@ -133,39 +120,34 @@
 // SetGoogleClientInfo sets the name and version of the application in
 // the `x-goog-api-client` header passed on each request. Intended for
 // use by Google-written clients.
-func (c *ConfigClient) SetGoogleClientInfo(name, version string) {
-	goVersion := strings.Replace(runtime.Version(), " ", "_", -1)
-	v := fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, goVersion)
-	c.metadata = metadata.Pairs("x-goog-api-client", v)
+func (c *ConfigClient) SetGoogleClientInfo(keyval ...string) {
+	kv := append([]string{"gl-go", version.Go()}, keyval...)
+	kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
+	c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
 }
 
 // ConfigProjectPath returns the path for the project resource.
 func ConfigProjectPath(project string) string {
-	path, err := configProjectPathTemplate.Render(map[string]string{
-		"project": project,
-	})
-	if err != nil {
-		panic(err)
-	}
-	return path
+	return "" +
+		"projects/" +
+		project +
+		""
 }
 
 // ConfigSinkPath returns the path for the sink resource.
 func ConfigSinkPath(project, sink string) string {
-	path, err := configSinkPathTemplate.Render(map[string]string{
-		"project": project,
-		"sink":    sink,
-	})
-	if err != nil {
-		panic(err)
-	}
-	return path
+	return "" +
+		"projects/" +
+		project +
+		"/sinks/" +
+		sink +
+		""
 }
 
 // ListSinks lists sinks.
-func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRequest) *LogSinkIterator {
-	md, _ := metadata.FromContext(ctx)
-	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRequest, opts ...gax.CallOption) *LogSinkIterator {
+	ctx = insertXGoog(ctx, c.xGoogHeader)
+	opts = append(c.CallOptions.ListSinks[0:len(c.CallOptions.ListSinks):len(c.CallOptions.ListSinks)], opts...)
 	it := &LogSinkIterator{}
 	it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogSink, string, error) {
 		var resp *loggingpb.ListSinksResponse
@@ -175,11 +157,11 @@
 		} else {
 			req.PageSize = int32(pageSize)
 		}
-		err := gax.Invoke(ctx, func(ctx context.Context) error {
+		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
 			var err error
-			resp, err = c.configClient.ListSinks(ctx, req)
+			resp, err = c.configClient.ListSinks(ctx, req, settings.GRPC...)
 			return err
-		}, c.CallOptions.ListSinks...)
+		}, opts...)
 		if err != nil {
 			return nil, "", err
 		}
@@ -198,15 +180,15 @@
 }
 
 // GetSink gets a sink.
-func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkRequest) (*loggingpb.LogSink, error) {
-	md, _ := metadata.FromContext(ctx)
-	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) {
+	ctx = insertXGoog(ctx, c.xGoogHeader)
+	opts = append(c.CallOptions.GetSink[0:len(c.CallOptions.GetSink):len(c.CallOptions.GetSink)], opts...)
 	var resp *loggingpb.LogSink
-	err := gax.Invoke(ctx, func(ctx context.Context) error {
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
 		var err error
-		resp, err = c.configClient.GetSink(ctx, req)
+		resp, err = c.configClient.GetSink(ctx, req, settings.GRPC...)
 		return err
-	}, c.CallOptions.GetSink...)
+	}, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -216,17 +198,17 @@
 // CreateSink creates a sink that exports specified log entries to a destination.  The
 // export of newly-ingested log entries begins immediately, unless the current
 // time is outside the sink's start and end times or the sink's
-// `writer_identity` is not permitted to write to the destination.  A sink can
+// writer_identity is not permitted to write to the destination.  A sink can
 // export log entries only from the resource owning the sink.
-func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSinkRequest) (*loggingpb.LogSink, error) {
-	md, _ := metadata.FromContext(ctx)
-	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) {
+	ctx = insertXGoog(ctx, c.xGoogHeader)
+	opts = append(c.CallOptions.CreateSink[0:len(c.CallOptions.CreateSink):len(c.CallOptions.CreateSink)], opts...)
 	var resp *loggingpb.LogSink
-	err := gax.Invoke(ctx, func(ctx context.Context) error {
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
 		var err error
-		resp, err = c.configClient.CreateSink(ctx, req)
+		resp, err = c.configClient.CreateSink(ctx, req, settings.GRPC...)
 		return err
-	}, c.CallOptions.CreateSink...)
+	}, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -235,37 +217,37 @@
 
 // UpdateSink updates a sink. If the named sink doesn't exist, then this method is
 // identical to
-// [sinks.create](/logging/docs/api/reference/rest/v2/projects.sinks/create).
+// sinks.create (at /logging/docs/api/reference/rest/v2/projects.sinks/create).
 // If the named sink does exist, then this method replaces the following
-// fields in the existing sink with values from the new sink: `destination`,
-// `filter`, `output_version_format`, `start_time`, and `end_time`.
-// The updated filter might also have a new `writer_identity`; see the
-// `unique_writer_identity` field.
-func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSinkRequest) (*loggingpb.LogSink, error) {
-	md, _ := metadata.FromContext(ctx)
-	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+// fields in the existing sink with values from the new sink: destination,
+// filter, output_version_format, start_time, and end_time.
+// The updated filter might also have a new writer_identity; see the
+// unique_writer_identity field.
+func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) {
+	ctx = insertXGoog(ctx, c.xGoogHeader)
+	opts = append(c.CallOptions.UpdateSink[0:len(c.CallOptions.UpdateSink):len(c.CallOptions.UpdateSink)], opts...)
 	var resp *loggingpb.LogSink
-	err := gax.Invoke(ctx, func(ctx context.Context) error {
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
 		var err error
-		resp, err = c.configClient.UpdateSink(ctx, req)
+		resp, err = c.configClient.UpdateSink(ctx, req, settings.GRPC...)
 		return err
-	}, c.CallOptions.UpdateSink...)
+	}, opts...)
 	if err != nil {
 		return nil, err
 	}
 	return resp, nil
 }
 
-// DeleteSink deletes a sink. If the sink has a unique `writer_identity`, then that
+// DeleteSink deletes a sink. If the sink has a unique writer_identity, then that
 // service account is also deleted.
-func (c *ConfigClient) DeleteSink(ctx context.Context, req *loggingpb.DeleteSinkRequest) error {
-	md, _ := metadata.FromContext(ctx)
-	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
-	err := gax.Invoke(ctx, func(ctx context.Context) error {
+func (c *ConfigClient) DeleteSink(ctx context.Context, req *loggingpb.DeleteSinkRequest, opts ...gax.CallOption) error {
+	ctx = insertXGoog(ctx, c.xGoogHeader)
+	opts = append(c.CallOptions.DeleteSink[0:len(c.CallOptions.DeleteSink):len(c.CallOptions.DeleteSink)], opts...)
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
 		var err error
-		_, err = c.configClient.DeleteSink(ctx, req)
+		_, err = c.configClient.DeleteSink(ctx, req, settings.GRPC...)
 		return err
-	}, c.CallOptions.DeleteSink...)
+	}, opts...)
 	return err
 }
 
diff --git a/vendor/cloud.google.com/go/logging/apiv2/doc.go b/vendor/cloud.google.com/go/logging/apiv2/doc.go
index 10c7439..116bb68 100644
--- a/vendor/cloud.google.com/go/logging/apiv2/doc.go
+++ b/vendor/cloud.google.com/go/logging/apiv2/doc.go
@@ -1,4 +1,4 @@
-// Copyright 2016, Google Inc. All rights reserved.
+// Copyright 2017, Google Inc. All rights reserved.
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
 // AUTO-GENERATED CODE. DO NOT EDIT.
 
 // Package logging is an experimental, auto-generated package for the
-// logging API.
+// Stackdriver Logging API.
 //
 // The Stackdriver Logging API lets you write log entries and manage your
 // logs, log sinks and logs-based metrics.
@@ -23,4 +23,25 @@
 // Use the client at cloud.google.com/go/logging in preference to this.
 package logging
 
-const gapicNameVersion = "gapic/0.1.0"
+import (
+	"golang.org/x/net/context"
+	"google.golang.org/grpc/metadata"
+)
+
+func insertXGoog(ctx context.Context, val []string) context.Context {
+	md, _ := metadata.FromOutgoingContext(ctx)
+	md = md.Copy()
+	md["x-goog-api-client"] = val
+	return metadata.NewOutgoingContext(ctx, md)
+}
+
+// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
+func DefaultAuthScopes() []string {
+	return []string{
+		"https://www.googleapis.com/auth/cloud-platform",
+		"https://www.googleapis.com/auth/cloud-platform.read-only",
+		"https://www.googleapis.com/auth/logging.admin",
+		"https://www.googleapis.com/auth/logging.read",
+		"https://www.googleapis.com/auth/logging.write",
+	}
+}
diff --git a/vendor/cloud.google.com/go/logging/apiv2/logging_client.go b/vendor/cloud.google.com/go/logging/apiv2/logging_client.go
index 81adc24..4f64ff0 100644
--- a/vendor/cloud.google.com/go/logging/apiv2/logging_client.go
+++ b/vendor/cloud.google.com/go/logging/apiv2/logging_client.go
@@ -1,4 +1,4 @@
-// Copyright 2016, Google Inc. All rights reserved.
+// Copyright 2017, Google Inc. All rights reserved.
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -17,12 +17,10 @@
 package logging
 
 import (
-	"fmt"
 	"math"
-	"runtime"
-	"strings"
 	"time"
 
+	"cloud.google.com/go/internal/version"
 	gax "github.com/googleapis/gax-go"
 	"golang.org/x/net/context"
 	"google.golang.org/api/iterator"
@@ -32,12 +30,6 @@
 	loggingpb "google.golang.org/genproto/googleapis/logging/v2"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/metadata"
-)
-
-var (
-	loggingProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
-	loggingLogPathTemplate     = gax.MustCompilePathTemplate("projects/{project}/logs/{log}")
 )
 
 // CallOptions contains the retry settings for each method of Client.
@@ -52,13 +44,7 @@
 func defaultClientOptions() []option.ClientOption {
 	return []option.ClientOption{
 		option.WithEndpoint("logging.googleapis.com:443"),
-		option.WithScopes(
-			"https://www.googleapis.com/auth/cloud-platform",
-			"https://www.googleapis.com/auth/cloud-platform.read-only",
-			"https://www.googleapis.com/auth/logging.admin",
-			"https://www.googleapis.com/auth/logging.read",
-			"https://www.googleapis.com/auth/logging.write",
-		),
+		option.WithScopes(DefaultAuthScopes()...),
 	}
 }
 
@@ -68,6 +54,7 @@
 			gax.WithRetry(func() gax.Retryer {
 				return gax.OnCodes([]codes.Code{
 					codes.DeadlineExceeded,
+					codes.Internal,
 					codes.Unavailable,
 				}, gax.Backoff{
 					Initial:    100 * time.Millisecond,
@@ -80,6 +67,7 @@
 			gax.WithRetry(func() gax.Retryer {
 				return gax.OnCodes([]codes.Code{
 					codes.DeadlineExceeded,
+					codes.Internal,
 					codes.Unavailable,
 				}, gax.Backoff{
 					Initial:    100 * time.Millisecond,
@@ -110,7 +98,7 @@
 	CallOptions *CallOptions
 
 	// The metadata to be sent with each request.
-	metadata metadata.MD
+	xGoogHeader []string
 }
 
 // NewClient creates a new logging service v2 client.
@@ -127,7 +115,7 @@
 
 		client: loggingpb.NewLoggingServiceV2Client(conn),
 	}
-	c.SetGoogleClientInfo("gax", gax.Version)
+	c.SetGoogleClientInfo()
 	return c, nil
 }
 
@@ -145,59 +133,55 @@
 // SetGoogleClientInfo sets the name and version of the application in
 // the `x-goog-api-client` header passed on each request. Intended for
 // use by Google-written clients.
-func (c *Client) SetGoogleClientInfo(name, version string) {
-	goVersion := strings.Replace(runtime.Version(), " ", "_", -1)
-	v := fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, goVersion)
-	c.metadata = metadata.Pairs("x-goog-api-client", v)
+func (c *Client) SetGoogleClientInfo(keyval ...string) {
+	kv := append([]string{"gl-go", version.Go()}, keyval...)
+	kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
+	c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
 }
 
-// LoggingProjectPath returns the path for the project resource.
-func LoggingProjectPath(project string) string {
-	path, err := loggingProjectPathTemplate.Render(map[string]string{
-		"project": project,
-	})
-	if err != nil {
-		panic(err)
-	}
-	return path
+// ProjectPath returns the path for the project resource.
+func ProjectPath(project string) string {
+	return "" +
+		"projects/" +
+		project +
+		""
 }
 
-// LoggingLogPath returns the path for the log resource.
-func LoggingLogPath(project, log string) string {
-	path, err := loggingLogPathTemplate.Render(map[string]string{
-		"project": project,
-		"log":     log,
-	})
-	if err != nil {
-		panic(err)
-	}
-	return path
+// LogPath returns the path for the log resource.
+func LogPath(project, log string) string {
+	return "" +
+		"projects/" +
+		project +
+		"/logs/" +
+		log +
+		""
 }
 
 // DeleteLog deletes all the log entries in a log.
 // The log reappears if it receives new entries.
-func (c *Client) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest) error {
-	md, _ := metadata.FromContext(ctx)
-	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
-	err := gax.Invoke(ctx, func(ctx context.Context) error {
+// Log entries written shortly before the delete operation might not be
+// deleted.
+func (c *Client) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest, opts ...gax.CallOption) error {
+	ctx = insertXGoog(ctx, c.xGoogHeader)
+	opts = append(c.CallOptions.DeleteLog[0:len(c.CallOptions.DeleteLog):len(c.CallOptions.DeleteLog)], opts...)
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
 		var err error
-		_, err = c.client.DeleteLog(ctx, req)
+		_, err = c.client.DeleteLog(ctx, req, settings.GRPC...)
 		return err
-	}, c.CallOptions.DeleteLog...)
+	}, opts...)
 	return err
 }
 
-// WriteLogEntries writes log entries to Stackdriver Logging.  All log entries are
-// written by this method.
-func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEntriesRequest) (*loggingpb.WriteLogEntriesResponse, error) {
-	md, _ := metadata.FromContext(ctx)
-	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+// WriteLogEntries writes log entries to Stackdriver Logging.
+func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEntriesRequest, opts ...gax.CallOption) (*loggingpb.WriteLogEntriesResponse, error) {
+	ctx = insertXGoog(ctx, c.xGoogHeader)
+	opts = append(c.CallOptions.WriteLogEntries[0:len(c.CallOptions.WriteLogEntries):len(c.CallOptions.WriteLogEntries)], opts...)
 	var resp *loggingpb.WriteLogEntriesResponse
-	err := gax.Invoke(ctx, func(ctx context.Context) error {
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
 		var err error
-		resp, err = c.client.WriteLogEntries(ctx, req)
+		resp, err = c.client.WriteLogEntries(ctx, req, settings.GRPC...)
 		return err
-	}, c.CallOptions.WriteLogEntries...)
+	}, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -206,10 +190,10 @@
 
 // ListLogEntries lists log entries.  Use this method to retrieve log entries from
 // Stackdriver Logging.  For ways to export log entries, see
-// [Exporting Logs](/logging/docs/export).
-func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntriesRequest) *LogEntryIterator {
-	md, _ := metadata.FromContext(ctx)
-	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+// Exporting Logs (at /logging/docs/export).
+func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntriesRequest, opts ...gax.CallOption) *LogEntryIterator {
+	ctx = insertXGoog(ctx, c.xGoogHeader)
+	opts = append(c.CallOptions.ListLogEntries[0:len(c.CallOptions.ListLogEntries):len(c.CallOptions.ListLogEntries)], opts...)
 	it := &LogEntryIterator{}
 	it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogEntry, string, error) {
 		var resp *loggingpb.ListLogEntriesResponse
@@ -219,11 +203,11 @@
 		} else {
 			req.PageSize = int32(pageSize)
 		}
-		err := gax.Invoke(ctx, func(ctx context.Context) error {
+		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
 			var err error
-			resp, err = c.client.ListLogEntries(ctx, req)
+			resp, err = c.client.ListLogEntries(ctx, req, settings.GRPC...)
 			return err
-		}, c.CallOptions.ListLogEntries...)
+		}, opts...)
 		if err != nil {
 			return nil, "", err
 		}
@@ -243,9 +227,9 @@
 
 // ListMonitoredResourceDescriptors lists the descriptors for monitored resource types used by Stackdriver
 // Logging.
-func (c *Client) ListMonitoredResourceDescriptors(ctx context.Context, req *loggingpb.ListMonitoredResourceDescriptorsRequest) *MonitoredResourceDescriptorIterator {
-	md, _ := metadata.FromContext(ctx)
-	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+func (c *Client) ListMonitoredResourceDescriptors(ctx context.Context, req *loggingpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator {
+	ctx = insertXGoog(ctx, c.xGoogHeader)
+	opts = append(c.CallOptions.ListMonitoredResourceDescriptors[0:len(c.CallOptions.ListMonitoredResourceDescriptors):len(c.CallOptions.ListMonitoredResourceDescriptors)], opts...)
 	it := &MonitoredResourceDescriptorIterator{}
 	it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) {
 		var resp *loggingpb.ListMonitoredResourceDescriptorsResponse
@@ -255,11 +239,11 @@
 		} else {
 			req.PageSize = int32(pageSize)
 		}
-		err := gax.Invoke(ctx, func(ctx context.Context) error {
+		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
 			var err error
-			resp, err = c.client.ListMonitoredResourceDescriptors(ctx, req)
+			resp, err = c.client.ListMonitoredResourceDescriptors(ctx, req, settings.GRPC...)
 			return err
-		}, c.CallOptions.ListMonitoredResourceDescriptors...)
+		}, opts...)
 		if err != nil {
 			return nil, "", err
 		}
@@ -277,11 +261,11 @@
 	return it
 }
 
-// ListLogs lists the logs in projects or organizations.
+// ListLogs lists the logs in projects, organizations, folders, or billing accounts.
 // Only logs that have entries are listed.
-func (c *Client) ListLogs(ctx context.Context, req *loggingpb.ListLogsRequest) *StringIterator {
-	md, _ := metadata.FromContext(ctx)
-	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+func (c *Client) ListLogs(ctx context.Context, req *loggingpb.ListLogsRequest, opts ...gax.CallOption) *StringIterator {
+	ctx = insertXGoog(ctx, c.xGoogHeader)
+	opts = append(c.CallOptions.ListLogs[0:len(c.CallOptions.ListLogs):len(c.CallOptions.ListLogs)], opts...)
 	it := &StringIterator{}
 	it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) {
 		var resp *loggingpb.ListLogsResponse
@@ -291,11 +275,11 @@
 		} else {
 			req.PageSize = int32(pageSize)
 		}
-		err := gax.Invoke(ctx, func(ctx context.Context) error {
+		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
 			var err error
-			resp, err = c.client.ListLogs(ctx, req)
+			resp, err = c.client.ListLogs(ctx, req, settings.GRPC...)
 			return err
-		}, c.CallOptions.ListLogs...)
+		}, opts...)
 		if err != nil {
 			return nil, "", err
 		}
diff --git a/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go b/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go
index ac838e8..e4cc57f 100644
--- a/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go
+++ b/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go
@@ -1,4 +1,4 @@
-// Copyright 2016, Google Inc. All rights reserved.
+// Copyright 2017, Google Inc. All rights reserved.
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -17,12 +17,10 @@
 package logging
 
 import (
-	"fmt"
 	"math"
-	"runtime"
-	"strings"
 	"time"
 
+	"cloud.google.com/go/internal/version"
 	gax "github.com/googleapis/gax-go"
 	"golang.org/x/net/context"
 	"google.golang.org/api/iterator"
@@ -31,12 +29,6 @@
 	loggingpb "google.golang.org/genproto/googleapis/logging/v2"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/metadata"
-)
-
-var (
-	metricsProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
-	metricsMetricPathTemplate  = gax.MustCompilePathTemplate("projects/{project}/metrics/{metric}")
 )
 
 // MetricsCallOptions contains the retry settings for each method of MetricsClient.
@@ -51,13 +43,7 @@
 func defaultMetricsClientOptions() []option.ClientOption {
 	return []option.ClientOption{
 		option.WithEndpoint("logging.googleapis.com:443"),
-		option.WithScopes(
-			"https://www.googleapis.com/auth/cloud-platform",
-			"https://www.googleapis.com/auth/cloud-platform.read-only",
-			"https://www.googleapis.com/auth/logging.admin",
-			"https://www.googleapis.com/auth/logging.read",
-			"https://www.googleapis.com/auth/logging.write",
-		),
+		option.WithScopes(DefaultAuthScopes()...),
 	}
 }
 
@@ -67,6 +53,7 @@
 			gax.WithRetry(func() gax.Retryer {
 				return gax.OnCodes([]codes.Code{
 					codes.DeadlineExceeded,
+					codes.Internal,
 					codes.Unavailable,
 				}, gax.Backoff{
 					Initial:    100 * time.Millisecond,
@@ -97,7 +84,7 @@
 	CallOptions *MetricsCallOptions
 
 	// The metadata to be sent with each request.
-	metadata metadata.MD
+	xGoogHeader []string
 }
 
 // NewMetricsClient creates a new metrics service v2 client.
@@ -114,7 +101,7 @@
 
 		metricsClient: loggingpb.NewMetricsServiceV2Client(conn),
 	}
-	c.SetGoogleClientInfo("gax", gax.Version)
+	c.SetGoogleClientInfo()
 	return c, nil
 }
 
@@ -132,39 +119,34 @@
 // SetGoogleClientInfo sets the name and version of the application in
 // the `x-goog-api-client` header passed on each request. Intended for
 // use by Google-written clients.
-func (c *MetricsClient) SetGoogleClientInfo(name, version string) {
-	goVersion := strings.Replace(runtime.Version(), " ", "_", -1)
-	v := fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, goVersion)
-	c.metadata = metadata.Pairs("x-goog-api-client", v)
+func (c *MetricsClient) SetGoogleClientInfo(keyval ...string) {
+	kv := append([]string{"gl-go", version.Go()}, keyval...)
+	kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
+	c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
 }
 
 // MetricsProjectPath returns the path for the project resource.
 func MetricsProjectPath(project string) string {
-	path, err := metricsProjectPathTemplate.Render(map[string]string{
-		"project": project,
-	})
-	if err != nil {
-		panic(err)
-	}
-	return path
+	return "" +
+		"projects/" +
+		project +
+		""
 }
 
 // MetricsMetricPath returns the path for the metric resource.
 func MetricsMetricPath(project, metric string) string {
-	path, err := metricsMetricPathTemplate.Render(map[string]string{
-		"project": project,
-		"metric":  metric,
-	})
-	if err != nil {
-		panic(err)
-	}
-	return path
+	return "" +
+		"projects/" +
+		project +
+		"/metrics/" +
+		metric +
+		""
 }
 
 // ListLogMetrics lists logs-based metrics.
-func (c *MetricsClient) ListLogMetrics(ctx context.Context, req *loggingpb.ListLogMetricsRequest) *LogMetricIterator {
-	md, _ := metadata.FromContext(ctx)
-	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+func (c *MetricsClient) ListLogMetrics(ctx context.Context, req *loggingpb.ListLogMetricsRequest, opts ...gax.CallOption) *LogMetricIterator {
+	ctx = insertXGoog(ctx, c.xGoogHeader)
+	opts = append(c.CallOptions.ListLogMetrics[0:len(c.CallOptions.ListLogMetrics):len(c.CallOptions.ListLogMetrics)], opts...)
 	it := &LogMetricIterator{}
 	it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogMetric, string, error) {
 		var resp *loggingpb.ListLogMetricsResponse
@@ -174,11 +156,11 @@
 		} else {
 			req.PageSize = int32(pageSize)
 		}
-		err := gax.Invoke(ctx, func(ctx context.Context) error {
+		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
 			var err error
-			resp, err = c.metricsClient.ListLogMetrics(ctx, req)
+			resp, err = c.metricsClient.ListLogMetrics(ctx, req, settings.GRPC...)
 			return err
-		}, c.CallOptions.ListLogMetrics...)
+		}, opts...)
 		if err != nil {
 			return nil, "", err
 		}
@@ -197,15 +179,15 @@
 }
 
 // GetLogMetric gets a logs-based metric.
-func (c *MetricsClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogMetricRequest) (*loggingpb.LogMetric, error) {
-	md, _ := metadata.FromContext(ctx)
-	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+func (c *MetricsClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) {
+	ctx = insertXGoog(ctx, c.xGoogHeader)
+	opts = append(c.CallOptions.GetLogMetric[0:len(c.CallOptions.GetLogMetric):len(c.CallOptions.GetLogMetric)], opts...)
 	var resp *loggingpb.LogMetric
-	err := gax.Invoke(ctx, func(ctx context.Context) error {
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
 		var err error
-		resp, err = c.metricsClient.GetLogMetric(ctx, req)
+		resp, err = c.metricsClient.GetLogMetric(ctx, req, settings.GRPC...)
 		return err
-	}, c.CallOptions.GetLogMetric...)
+	}, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -213,15 +195,15 @@
 }
 
 // CreateLogMetric creates a logs-based metric.
-func (c *MetricsClient) CreateLogMetric(ctx context.Context, req *loggingpb.CreateLogMetricRequest) (*loggingpb.LogMetric, error) {
-	md, _ := metadata.FromContext(ctx)
-	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+func (c *MetricsClient) CreateLogMetric(ctx context.Context, req *loggingpb.CreateLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) {
+	ctx = insertXGoog(ctx, c.xGoogHeader)
+	opts = append(c.CallOptions.CreateLogMetric[0:len(c.CallOptions.CreateLogMetric):len(c.CallOptions.CreateLogMetric)], opts...)
 	var resp *loggingpb.LogMetric
-	err := gax.Invoke(ctx, func(ctx context.Context) error {
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
 		var err error
-		resp, err = c.metricsClient.CreateLogMetric(ctx, req)
+		resp, err = c.metricsClient.CreateLogMetric(ctx, req, settings.GRPC...)
 		return err
-	}, c.CallOptions.CreateLogMetric...)
+	}, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -229,15 +211,15 @@
 }
 
 // UpdateLogMetric creates or updates a logs-based metric.
-func (c *MetricsClient) UpdateLogMetric(ctx context.Context, req *loggingpb.UpdateLogMetricRequest) (*loggingpb.LogMetric, error) {
-	md, _ := metadata.FromContext(ctx)
-	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+func (c *MetricsClient) UpdateLogMetric(ctx context.Context, req *loggingpb.UpdateLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) {
+	ctx = insertXGoog(ctx, c.xGoogHeader)
+	opts = append(c.CallOptions.UpdateLogMetric[0:len(c.CallOptions.UpdateLogMetric):len(c.CallOptions.UpdateLogMetric)], opts...)
 	var resp *loggingpb.LogMetric
-	err := gax.Invoke(ctx, func(ctx context.Context) error {
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
 		var err error
-		resp, err = c.metricsClient.UpdateLogMetric(ctx, req)
+		resp, err = c.metricsClient.UpdateLogMetric(ctx, req, settings.GRPC...)
 		return err
-	}, c.CallOptions.UpdateLogMetric...)
+	}, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -245,14 +227,14 @@
 }
 
 // DeleteLogMetric deletes a logs-based metric.
-func (c *MetricsClient) DeleteLogMetric(ctx context.Context, req *loggingpb.DeleteLogMetricRequest) error {
-	md, _ := metadata.FromContext(ctx)
-	ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
-	err := gax.Invoke(ctx, func(ctx context.Context) error {
+func (c *MetricsClient) DeleteLogMetric(ctx context.Context, req *loggingpb.DeleteLogMetricRequest, opts ...gax.CallOption) error {
+	ctx = insertXGoog(ctx, c.xGoogHeader)
+	opts = append(c.CallOptions.DeleteLogMetric[0:len(c.CallOptions.DeleteLogMetric):len(c.CallOptions.DeleteLogMetric)], opts...)
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
 		var err error
-		_, err = c.metricsClient.DeleteLogMetric(ctx, req)
+		_, err = c.metricsClient.DeleteLogMetric(ctx, req, settings.GRPC...)
 		return err
-	}, c.CallOptions.DeleteLogMetric...)
+	}, opts...)
 	return err
 }
 
diff --git a/vendor/cloud.google.com/go/logging/internal/common.go b/vendor/cloud.google.com/go/logging/internal/common.go
index 7d8ece0..38cfbb5 100644
--- a/vendor/cloud.google.com/go/logging/internal/common.go
+++ b/vendor/cloud.google.com/go/logging/internal/common.go
@@ -28,3 +28,12 @@
 	logID = strings.Replace(logID, "/", "%2F", -1)
 	return fmt.Sprintf("%s/logs/%s", parent, logID)
 }
+
+func LogIDFromPath(parent, path string) string {
+	start := len(parent) + len("/logs/")
+	if len(path) < start {
+		return ""
+	}
+	logID := path[start:]
+	return strings.Replace(logID, "%2F", "/", -1)
+}
diff --git a/vendor/cloud.google.com/go/logging/logging.go b/vendor/cloud.google.com/go/logging/logging.go
index 51d92eb..46d06dd 100644
--- a/vendor/cloud.google.com/go/logging/logging.go
+++ b/vendor/cloud.google.com/go/logging/logging.go
@@ -36,6 +36,8 @@
 	"sync"
 	"time"
 
+	"cloud.google.com/go/compute/metadata"
+	"cloud.google.com/go/internal/version"
 	vkit "cloud.google.com/go/logging/apiv2"
 	"cloud.google.com/go/logging/internal"
 	"github.com/golang/protobuf/proto"
@@ -95,6 +97,10 @@
 	loggers   sync.WaitGroup // so we can wait for loggers to close
 	closed    bool
 
+	mu      sync.Mutex
+	nErrs   int   // number of errors we saw
+	lastErr error // last error we saw
+
 	// OnError is called when an error occurs in a call to Log or Flush. The
 	// error may be due to an invalid Entry, an overflow because BufferLimit
 	// was reached (in which case the error will be ErrOverflow) or an error
@@ -125,7 +131,7 @@
 	if err != nil {
 		return nil, err
 	}
-	c.SetGoogleClientInfo("logging", internal.Version)
+	c.SetGoogleClientInfo("gccl", version.Repo)
 	client := &Client{
 		client:    c,
 		projectID: projectID,
@@ -170,18 +176,43 @@
 // log entry "ping" to a log named "ping".
 func (c *Client) Ping(ctx context.Context) error {
 	ent := &logpb.LogEntry{
-		Payload:   &logpb.LogEntry_TextPayload{"ping"},
+		Payload:   &logpb.LogEntry_TextPayload{TextPayload: "ping"},
 		Timestamp: unixZeroTimestamp, // Identical timestamps and insert IDs are both
 		InsertId:  "ping",            // necessary for the service to dedup these entries.
 	}
 	_, err := c.client.WriteLogEntries(ctx, &logpb.WriteLogEntriesRequest{
 		LogName:  internal.LogPath(c.parent(), "ping"),
-		Resource: &mrpb.MonitoredResource{Type: "global"},
+		Resource: globalResource(c.projectID),
 		Entries:  []*logpb.LogEntry{ent},
 	})
 	return err
 }
 
+// error puts the error on the client's error channel
+// without blocking, and records summary error info.
+func (c *Client) error(err error) {
+	select {
+	case c.errc <- err:
+	default:
+	}
+	c.mu.Lock()
+	c.lastErr = err
+	c.nErrs++
+	c.mu.Unlock()
+}
+
+func (c *Client) extractErrorInfo() error {
+	var err error
+	c.mu.Lock()
+	if c.lastErr != nil {
+		err = fmt.Errorf("saw %d errors; last: %v", c.nErrs, c.lastErr)
+		c.nErrs = 0
+		c.lastErr = nil
+	}
+	c.mu.Unlock()
+	return err
+}
+
 // A Logger is used to write log messages to a single log. It can be configured
 // with a log ID, common monitored resource, and a set of common labels.
 type Logger struct {
@@ -201,14 +232,58 @@
 }
 
 // CommonResource sets the monitored resource associated with all log entries
-// written from a Logger. If not provided, a resource of type "global" is used.
-// This value can be overridden by setting an Entry's Resource field.
+// written from a Logger. If not provided, the resource is automatically
+// detected based on the running environment.  This value can be overridden
+// per-entry by setting an Entry's Resource field.
 func CommonResource(r *mrpb.MonitoredResource) LoggerOption { return commonResource{r} }
 
 type commonResource struct{ *mrpb.MonitoredResource }
 
 func (r commonResource) set(l *Logger) { l.commonResource = r.MonitoredResource }
 
+var detectedResource struct {
+	pb   *mrpb.MonitoredResource
+	once sync.Once
+}
+
+func detectResource() *mrpb.MonitoredResource {
+	detectedResource.once.Do(func() {
+		if !metadata.OnGCE() {
+			return
+		}
+		projectID, err := metadata.ProjectID()
+		if err != nil {
+			return
+		}
+		id, err := metadata.InstanceID()
+		if err != nil {
+			return
+		}
+		zone, err := metadata.Zone()
+		if err != nil {
+			return
+		}
+		detectedResource.pb = &mrpb.MonitoredResource{
+			Type: "gce_instance",
+			Labels: map[string]string{
+				"project_id":  projectID,
+				"instance_id": id,
+				"zone":        zone,
+			},
+		}
+	})
+	return detectedResource.pb
+}
+
+func globalResource(projectID string) *mrpb.MonitoredResource {
+	return &mrpb.MonitoredResource{
+		Type: "global",
+		Labels: map[string]string{
+			"project_id": projectID,
+		},
+	}
+}
+
 // CommonLabels are labels that apply to all log entries written from a Logger,
 // so that you don't have to repeat them in each log entry's Labels field. If
 // any of the log entries contains a (key, value) with the same key that is in
@@ -287,10 +362,14 @@
 // characters: [A-Za-z0-9]; and punctuation characters: forward-slash,
 // underscore, hyphen, and period.
 func (c *Client) Logger(logID string, opts ...LoggerOption) *Logger {
+	r := detectResource()
+	if r == nil {
+		r = globalResource(c.projectID)
+	}
 	l := &Logger{
 		client:         c,
 		logName:        internal.LogPath(c.parent(), logID),
-		commonResource: &mrpb.MonitoredResource{Type: "global"},
+		commonResource: r,
 	}
 	// TODO(jba): determine the right context for the bundle handler.
 	ctx := context.TODO()
@@ -313,7 +392,7 @@
 	go func() {
 		defer c.loggers.Done()
 		<-c.donec
-		l.bundler.Stop()
+		l.bundler.Flush()
 	}()
 	return l
 }
@@ -331,7 +410,7 @@
 	return len(p), nil
 }
 
-// Close closes the client.
+// Close waits for all opened loggers to be flushed and closes the client.
 func (c *Client) Close() error {
 	if c.closed {
 		return nil
@@ -340,9 +419,12 @@
 	c.loggers.Wait() // wait for all bundlers to flush and close
 	// Now there can be no more errors.
 	close(c.errc) // terminate error goroutine
-	// Return only the first error. Since all clients share an underlying connection,
-	// Closes after the first always report a "connection is closing" error.
-	err := c.client.Close()
+	// Prefer logging errors to close errors.
+	err := c.extractErrorInfo()
+	err2 := c.client.Close()
+	if err == nil {
+		err = err2
+	}
 	c.closed = true
 	return err
 }
@@ -452,6 +534,11 @@
 	// by the client when reading entries. It is an error to set it when
 	// writing entries.
 	Resource *mrpb.MonitoredResource
+
+	// Trace is the resource name of the trace associated with the log entry,
+	// if any. If it contains a relative resource name, the name is assumed to
+	// be relative to //tracing.googleapis.com.
+	Trace string
 }
 
 // HTTPRequest contains an http.Request as well as additional
@@ -476,6 +563,10 @@
 	// received until the response was sent.
 	Latency time.Duration
 
+	// LocalIP is the IP address (IPv4 or IPv6) of the origin server that the request
+	// was sent to.
+	LocalIP string
+
 	// RemoteIP is the IP address (IPv4 or IPv6) of the client that issued the
 	// HTTP request. Examples: "192.168.1.1", "FE80::0202:B3FF:FE1E:8329".
 	RemoteIP string
@@ -506,6 +597,7 @@
 		Status:                         int32(r.Status),
 		ResponseSize:                   r.ResponseSize,
 		UserAgent:                      r.Request.UserAgent(),
+		ServerIp:                       r.LocalIP,
 		RemoteIp:                       r.RemoteIP, // TODO(jba): attempt to parse http.Request.RemoteAddr?
 		Referer:                        r.Request.Referer(),
 		CacheHit:                       r.CacheHit,
@@ -551,21 +643,21 @@
 func jsonValueToStructValue(v interface{}) *structpb.Value {
 	switch x := v.(type) {
 	case bool:
-		return &structpb.Value{Kind: &structpb.Value_BoolValue{x}}
+		return &structpb.Value{Kind: &structpb.Value_BoolValue{BoolValue: x}}
 	case float64:
-		return &structpb.Value{Kind: &structpb.Value_NumberValue{x}}
+		return &structpb.Value{Kind: &structpb.Value_NumberValue{NumberValue: x}}
 	case string:
-		return &structpb.Value{Kind: &structpb.Value_StringValue{x}}
+		return &structpb.Value{Kind: &structpb.Value_StringValue{StringValue: x}}
 	case nil:
 		return &structpb.Value{Kind: &structpb.Value_NullValue{}}
 	case map[string]interface{}:
-		return &structpb.Value{Kind: &structpb.Value_StructValue{jsonMapToProtoStruct(x)}}
+		return &structpb.Value{Kind: &structpb.Value_StructValue{StructValue: jsonMapToProtoStruct(x)}}
 	case []interface{}:
 		var vals []*structpb.Value
 		for _, e := range x {
 			vals = append(vals, jsonValueToStructValue(e))
 		}
-		return &structpb.Value{Kind: &structpb.Value_ListValue{&structpb.ListValue{vals}}}
+		return &structpb.Value{Kind: &structpb.Value_ListValue{ListValue: &structpb.ListValue{Values: vals}}}
 	default:
 		panic(fmt.Sprintf("bad type %T for JSON value", v))
 	}
@@ -593,17 +685,23 @@
 func (l *Logger) Log(e Entry) {
 	ent, err := toLogEntry(e)
 	if err != nil {
-		l.error(err)
+		l.client.error(err)
 		return
 	}
 	if err := l.bundler.Add(ent, proto.Size(ent)); err != nil {
-		l.error(err)
+		l.client.error(err)
 	}
 }
 
 // Flush blocks until all currently buffered log entries are sent.
-func (l *Logger) Flush() {
+//
+// If any errors occurred since the last call to Flush from any Logger, or the
+// creation of the client if this is the first call, then Flush returns a non-nil
+// error with summary information about the errors. This information is unlikely to
+// be actionable. For more accurate error reporting, set Client.OnError.
+func (l *Logger) Flush() error {
 	l.bundler.Flush()
+	return l.client.extractErrorInfo()
 }
 
 func (l *Logger) writeLogEntries(ctx context.Context, entries []*logpb.LogEntry) {
@@ -615,16 +713,7 @@
 	}
 	_, err := l.client.client.WriteLogEntries(ctx, req)
 	if err != nil {
-		l.error(err)
-	}
-}
-
-// error puts the error on the client's error channel
-// without blocking.
-func (l *Logger) error(err error) {
-	select {
-	case l.client.errc <- err:
-	default:
+		l.client.error(err)
 	}
 }
 
@@ -661,17 +750,18 @@
 		HttpRequest: fromHTTPRequest(e.HTTPRequest),
 		Operation:   e.Operation,
 		Labels:      e.Labels,
+		Trace:       e.Trace,
 	}
 
 	switch p := e.Payload.(type) {
 	case string:
-		ent.Payload = &logpb.LogEntry_TextPayload{p}
+		ent.Payload = &logpb.LogEntry_TextPayload{TextPayload: p}
 	default:
 		s, err := toProtoStruct(p)
 		if err != nil {
 			return nil, err
 		}
-		ent.Payload = &logpb.LogEntry_JsonPayload{s}
+		ent.Payload = &logpb.LogEntry_JsonPayload{JsonPayload: s}
 	}
 	return ent, nil
 }
diff --git a/vendor/github.com/bradfitz/gomemcache/memcache/memcache.go b/vendor/github.com/bradfitz/gomemcache/memcache/memcache.go
index 7b5442d..b98a765 100644
--- a/vendor/github.com/bradfitz/gomemcache/memcache/memcache.go
+++ b/vendor/github.com/bradfitz/gomemcache/memcache/memcache.go
@@ -56,7 +56,7 @@
 	ErrNoStats = errors.New("memcache: no statistics available")
 
 	// ErrMalformedKey is returned when an invalid key is used.
-	// Keys must be at maximum 250 bytes long, ASCII, and not
+	// Keys must be at maximum 250 bytes long and not
 	// contain whitespace or control characters.
 	ErrMalformedKey = errors.New("malformed: key is too long or contains invalid characters")
 
@@ -64,14 +64,17 @@
 	ErrNoServers = errors.New("memcache: no servers configured or available")
 )
 
-// DefaultTimeout is the default socket read/write timeout.
-const DefaultTimeout = 100 * time.Millisecond
-
 const (
-	buffered            = 8 // arbitrary buffered channel size, for readability
-	maxIdleConnsPerAddr = 2 // TODO(bradfitz): make this configurable?
+	// DefaultTimeout is the default socket read/write timeout.
+	DefaultTimeout = 100 * time.Millisecond
+
+	// DefaultMaxIdleConns is the default maximum number of idle connections
+	// kept for any single address.
+	DefaultMaxIdleConns = 2
 )
 
+const buffered = 8 // arbitrary buffered channel size, for readability
+
 // resumableError returns true if err is only a protocol-level cache error.
 // This is used to determine whether or not a server connection should
 // be re-used or not. If an error occurs, by default we don't reuse the
@@ -89,7 +92,7 @@
 		return false
 	}
 	for i := 0; i < len(key); i++ {
-		if key[i] <= ' ' || key[i] > 0x7e {
+		if key[i] <= ' ' || key[i] == 0x7f {
 			return false
 		}
 	}
@@ -133,6 +136,14 @@
 	// If zero, DefaultTimeout is used.
 	Timeout time.Duration
 
+	// MaxIdleConns specifies the maximum number of idle connections that will
+	// be maintained per address. If less than one, DefaultMaxIdleConns will be
+	// used.
+	//
+	// Consider your expected traffic rates and latency carefully. This should
+	// be set to a number higher than your peak parallel requests.
+	MaxIdleConns int
+
 	selector ServerSelector
 
 	lk       sync.Mutex
@@ -196,7 +207,7 @@
 		c.freeconn = make(map[string][]*conn)
 	}
 	freelist := c.freeconn[addr.String()]
-	if len(freelist) >= maxIdleConnsPerAddr {
+	if len(freelist) >= c.maxIdleConns() {
 		cn.nc.Close()
 		return
 	}
@@ -225,6 +236,13 @@
 	return DefaultTimeout
 }
 
+func (c *Client) maxIdleConns() int {
+	if c.MaxIdleConns > 0 {
+		return c.MaxIdleConns
+	}
+	return DefaultMaxIdleConns
+}
+
 // ConnectTimeoutError is the error type used when it takes
 // too long to connect to the desired host. This level of
 // detail can generally be ignored.
diff --git a/vendor/github.com/bradfitz/gomemcache/memcache/selector.go b/vendor/github.com/bradfitz/gomemcache/memcache/selector.go
index 10b04d3..89ad81e 100644
--- a/vendor/github.com/bradfitz/gomemcache/memcache/selector.go
+++ b/vendor/github.com/bradfitz/gomemcache/memcache/selector.go
@@ -41,6 +41,21 @@
 	addrs []net.Addr
 }
 
+// staticAddr caches the Network() and String() values from any net.Addr.
+type staticAddr struct {
+	ntw, str string
+}
+
+func newStaticAddr(a net.Addr) net.Addr {
+	return &staticAddr{
+		ntw: a.Network(),
+		str: a.String(),
+	}
+}
+
+func (s *staticAddr) Network() string { return s.ntw }
+func (s *staticAddr) String() string  { return s.str }
+
 // SetServers changes a ServerList's set of servers at runtime and is
 // safe for concurrent use by multiple goroutines.
 //
@@ -58,13 +73,13 @@
 			if err != nil {
 				return err
 			}
-			naddr[i] = addr
+			naddr[i] = newStaticAddr(addr)
 		} else {
 			tcpaddr, err := net.ResolveTCPAddr("tcp", server)
 			if err != nil {
 				return err
 			}
-			naddr[i] = tcpaddr
+			naddr[i] = newStaticAddr(tcpaddr)
 		}
 	}
 
diff --git a/vendor/github.com/fsnotify/fsnotify/.travis.yml b/vendor/github.com/fsnotify/fsnotify/.travis.yml
index 3a5c933..22e5fa4 100644
--- a/vendor/github.com/fsnotify/fsnotify/.travis.yml
+++ b/vendor/github.com/fsnotify/fsnotify/.travis.yml
@@ -2,12 +2,14 @@
 language: go
 
 go:
-  - 1.6.3
+  - 1.8
+  - 1.7.x
   - tip
 
 matrix:
   allow_failures:
     - go: tip
+  fast_finish: true
 
 before_script:
   - go get -u github.com/golang/lint/golint
diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md
index 25180c6..3993207 100644
--- a/vendor/github.com/fsnotify/fsnotify/README.md
+++ b/vendor/github.com/fsnotify/fsnotify/README.md
@@ -23,7 +23,7 @@
 
 \* Android and iOS are untested.
 
-Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) for usage. Consult the [Wiki](https://github.com/fsnotify/fsnotify/wiki) for the FAQ and further information.
+Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
 
 ## API stability
 
@@ -41,6 +41,35 @@
 
 See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
 
+## FAQ
+
+**When a file is moved to another directory is it still being watched?**
+
+No (it shouldn't be, unless you are watching where it was moved to).
+
+**When I watch a directory, are all subdirectories watched as well?**
+
+No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]).
+
+**Do I have to watch the Error and Event channels in a separate goroutine?**
+
+As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7])
+
+**Why am I receiving multiple events for the same file on OS X?**
+
+Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]).
+
+**How many files can be watched at once?**
+
+There are OS-specific limits as to how many watches can be created:
+* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
+* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
+
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#18]: https://github.com/fsnotify/fsnotify/issues/18
+[#11]: https://github.com/fsnotify/fsnotify/issues/11
+[#7]: https://github.com/howeyc/fsnotify/issues/7
+
 [contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
 
 ## Related Projects
diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go
index bfa9dbc..d9fd1b8 100644
--- a/vendor/github.com/fsnotify/fsnotify/inotify.go
+++ b/vendor/github.com/fsnotify/fsnotify/inotify.go
@@ -24,7 +24,6 @@
 	Events   chan Event
 	Errors   chan error
 	mu       sync.Mutex // Map access
-	cv       *sync.Cond // sync removing on rm_watch with IN_IGNORE
 	fd       int
 	poller   *fdPoller
 	watches  map[string]*watch // Map of inotify watches (key: path)
@@ -56,7 +55,6 @@
 		done:     make(chan struct{}),
 		doneResp: make(chan struct{}),
 	}
-	w.cv = sync.NewCond(&w.mu)
 
 	go w.readEvents()
 	return w, nil
@@ -103,21 +101,23 @@
 	var flags uint32 = agnosticEvents
 
 	w.mu.Lock()
-	watchEntry, found := w.watches[name]
-	w.mu.Unlock()
-	if found {
-		watchEntry.flags |= flags
-		flags |= unix.IN_MASK_ADD
+	defer w.mu.Unlock()
+	watchEntry := w.watches[name]
+	if watchEntry != nil {
+		flags |= watchEntry.flags | unix.IN_MASK_ADD
 	}
 	wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
 	if wd == -1 {
 		return errno
 	}
 
-	w.mu.Lock()
-	w.watches[name] = &watch{wd: uint32(wd), flags: flags}
-	w.paths[wd] = name
-	w.mu.Unlock()
+	if watchEntry == nil {
+		w.watches[name] = &watch{wd: uint32(wd), flags: flags}
+		w.paths[wd] = name
+	} else {
+		watchEntry.wd = uint32(wd)
+		watchEntry.flags = flags
+	}
 
 	return nil
 }
@@ -135,6 +135,13 @@
 	if !ok {
 		return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
 	}
+
+	// We successfully removed the watch if InotifyRmWatch doesn't return an
+	// error, we need to clean up our internal state to ensure it matches
+	// inotify's kernel state.
+	delete(w.paths, int(watch.wd))
+	delete(w.watches, name)
+
 	// inotify_rm_watch will return EINVAL if the file has been deleted;
 	// the inotify will already have been removed.
 	// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
@@ -152,13 +159,6 @@
 		return errno
 	}
 
-	// wait until ignoreLinux() deleting maps
-	exists := true
-	for exists {
-		w.cv.Wait()
-		_, exists = w.watches[name]
-	}
-
 	return nil
 }
 
@@ -259,8 +259,17 @@
 			// the "Name" field with a valid filename. We retrieve the path of the watch from
 			// the "paths" map.
 			w.mu.Lock()
-			name := w.paths[int(raw.Wd)]
+			name, ok := w.paths[int(raw.Wd)]
+			// IN_DELETE_SELF occurs when the file/directory being watched is removed.
+			// This is a sign to clean up the maps, otherwise we are no longer in sync
+			// with the inotify kernel state which has already deleted the watch
+			// automatically.
+			if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
+				delete(w.paths, int(raw.Wd))
+				delete(w.watches, name)
+			}
 			w.mu.Unlock()
+
 			if nameLen > 0 {
 				// Point "bytes" at the first byte of the filename
 				bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
@@ -271,7 +280,7 @@
 			event := newEvent(name, mask)
 
 			// Send the events that are not ignored on the events channel
-			if !event.ignoreLinux(w, raw.Wd, mask) {
+			if !event.ignoreLinux(mask) {
 				select {
 				case w.Events <- event:
 				case <-w.done:
@@ -288,15 +297,9 @@
 // Certain types of events can be "ignored" and not sent over the Events
 // channel. Such as events marked ignore by the kernel, or MODIFY events
 // against files that do not exist.
-func (e *Event) ignoreLinux(w *Watcher, wd int32, mask uint32) bool {
+func (e *Event) ignoreLinux(mask uint32) bool {
 	// Ignore anything the inotify API says to ignore
 	if mask&unix.IN_IGNORED == unix.IN_IGNORED {
-		w.mu.Lock()
-		defer w.mu.Unlock()
-		name := w.paths[int(wd)]
-		delete(w.paths, int(wd))
-		delete(w.watches, name)
-		w.cv.Broadcast()
 		return true
 	}
 
diff --git a/vendor/github.com/garyburd/redigo/redis/conn.go b/vendor/github.com/garyburd/redigo/redis/conn.go
index ed358c6..dfd40c5 100644
--- a/vendor/github.com/garyburd/redigo/redis/conn.go
+++ b/vendor/github.com/garyburd/redigo/redis/conn.go
@@ -17,6 +17,7 @@
 import (
 	"bufio"
 	"bytes"
+	"crypto/tls"
 	"errors"
 	"fmt"
 	"io"
@@ -75,6 +76,9 @@
 	dial         func(network, addr string) (net.Conn, error)
 	db           int
 	password     string
+	dialTLS      bool
+	skipVerify   bool
+	tlsConfig    *tls.Config
 }
 
 // DialReadTimeout specifies the timeout for reading a single command reply.
@@ -123,6 +127,22 @@
 	}}
 }
 
+// DialTLSConfig specifies the config to use when a TLS connection is dialed.
+// Has no effect when not dialing a TLS connection.
+func DialTLSConfig(c *tls.Config) DialOption {
+	return DialOption{func(do *dialOptions) {
+		do.tlsConfig = c
+	}}
+}
+
+// DialTLSSkipVerify to disable server name verification when connecting
+// over TLS. Has no effect when not dialing a TLS connection.
+func DialTLSSkipVerify(skip bool) DialOption {
+	return DialOption{func(do *dialOptions) {
+		do.skipVerify = skip
+	}}
+}
+
 // Dial connects to the Redis server at the given network and
 // address using the specified options.
 func Dial(network, address string, options ...DialOption) (Conn, error) {
@@ -137,6 +157,26 @@
 	if err != nil {
 		return nil, err
 	}
+
+	if do.dialTLS {
+		tlsConfig := cloneTLSClientConfig(do.tlsConfig, do.skipVerify)
+		if tlsConfig.ServerName == "" {
+			host, _, err := net.SplitHostPort(address)
+			if err != nil {
+				netConn.Close()
+				return nil, err
+			}
+			tlsConfig.ServerName = host
+		}
+
+		tlsConn := tls.Client(netConn, tlsConfig)
+		if err := tlsConn.Handshake(); err != nil {
+			netConn.Close()
+			return nil, err
+		}
+		netConn = tlsConn
+	}
+
 	c := &conn{
 		conn:         netConn,
 		bw:           bufio.NewWriter(netConn),
@@ -162,6 +202,10 @@
 	return c, nil
 }
 
+func dialTLS(do *dialOptions) {
+	do.dialTLS = true
+}
+
 var pathDBRegexp = regexp.MustCompile(`/(\d*)\z`)
 
 // DialURL connects to a Redis server at the given URL using the Redis
@@ -173,7 +217,7 @@
 		return nil, err
 	}
 
-	if u.Scheme != "redis" {
+	if u.Scheme != "redis" && u.Scheme != "rediss" {
 		return nil, fmt.Errorf("invalid redis URL scheme: %s", u.Scheme)
 	}
 
@@ -213,6 +257,10 @@
 		return nil, fmt.Errorf("invalid database: %s", u.Path[1:])
 	}
 
+	if u.Scheme == "rediss" {
+		options = append([]DialOption{{dialTLS}}, options...)
+	}
+
 	return Dial("tcp", address, options...)
 }
 
@@ -322,6 +370,10 @@
 			}
 		case nil:
 			err = c.writeString("")
+		case Argument:
+			var buf bytes.Buffer
+			fmt.Fprint(&buf, arg.RedisArg())
+			err = c.writeBytes(buf.Bytes())
 		default:
 			var buf bytes.Buffer
 			fmt.Fprint(&buf, arg)
diff --git a/vendor/github.com/garyburd/redigo/redis/doc.go b/vendor/github.com/garyburd/redigo/redis/doc.go
index e6fecca..bf5131a 100644
--- a/vendor/github.com/garyburd/redigo/redis/doc.go
+++ b/vendor/github.com/garyburd/redigo/redis/doc.go
@@ -127,7 +127,7 @@
 // send and flush a subscription management command. The receive method
 // converts a pushed message to convenient types for use in a type switch.
 //
-//  psc := redis.PubSubConn{c}
+//  psc := redis.PubSubConn{Conn: c}
 //  psc.Subscribe("example")
 //  for {
 //      switch v := psc.Receive().(type) {
@@ -165,4 +165,13 @@
 //   if _, err := redis.Scan(reply, &value1, &value2); err != nil {
 //      // handle error
 //  }
+//
+// Errors
+//
+// Connection methods return error replies from the server as type redis.Error.
+//
+// Call the connection Err() method to determine if the connection encountered
+// non-recoverable error such as a network error or protocol parsing error. If
+// Err() returns a non-nil value, then the connection is not usable and should
+// be closed.
 package redis
diff --git a/vendor/github.com/garyburd/redigo/redis/go17.go b/vendor/github.com/garyburd/redigo/redis/go17.go
new file mode 100644
index 0000000..3f951e5
--- /dev/null
+++ b/vendor/github.com/garyburd/redigo/redis/go17.go
@@ -0,0 +1,33 @@
+// +build go1.7
+
+package redis
+
+import "crypto/tls"
+
+// similar cloneTLSClientConfig in the stdlib, but also honor skipVerify for the nil case
+func cloneTLSClientConfig(cfg *tls.Config, skipVerify bool) *tls.Config {
+	if cfg == nil {
+		return &tls.Config{InsecureSkipVerify: skipVerify}
+	}
+	return &tls.Config{
+		Rand:                        cfg.Rand,
+		Time:                        cfg.Time,
+		Certificates:                cfg.Certificates,
+		NameToCertificate:           cfg.NameToCertificate,
+		GetCertificate:              cfg.GetCertificate,
+		RootCAs:                     cfg.RootCAs,
+		NextProtos:                  cfg.NextProtos,
+		ServerName:                  cfg.ServerName,
+		ClientAuth:                  cfg.ClientAuth,
+		ClientCAs:                   cfg.ClientCAs,
+		InsecureSkipVerify:          cfg.InsecureSkipVerify,
+		CipherSuites:                cfg.CipherSuites,
+		PreferServerCipherSuites:    cfg.PreferServerCipherSuites,
+		ClientSessionCache:          cfg.ClientSessionCache,
+		MinVersion:                  cfg.MinVersion,
+		MaxVersion:                  cfg.MaxVersion,
+		CurvePreferences:            cfg.CurvePreferences,
+		DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled,
+		Renegotiation:               cfg.Renegotiation,
+	}
+}
diff --git a/vendor/github.com/garyburd/redigo/redis/pool.go b/vendor/github.com/garyburd/redigo/redis/pool.go
index 3d23360..258bf79 100644
--- a/vendor/github.com/garyburd/redigo/redis/pool.go
+++ b/vendor/github.com/garyburd/redigo/redis/pool.go
@@ -46,44 +46,26 @@
 //
 // The following example shows how to use a pool in a web application. The
 // application creates a pool at application startup and makes it available to
-// request handlers using a global variable. The pool configuration used here
-// is an example, not a recommendation.
+// request handlers using a package level variable. The pool configuration used
+// here is an example, not a recommendation.
 //
-//  func newPool(server, password string) *redis.Pool {
-//      return &redis.Pool{
-//          MaxIdle: 3,
-//          IdleTimeout: 240 * time.Second,
-//          Dial: func () (redis.Conn, error) {
-//              c, err := redis.Dial("tcp", server)
-//              if err != nil {
-//                  return nil, err
-//              }
-//              if _, err := c.Do("AUTH", password); err != nil {
-//                  c.Close()
-//                  return nil, err
-//              }
-//              return c, err
-//          },
-//          TestOnBorrow: func(c redis.Conn, t time.Time) error {
-//              if time.Since(t) < time.Minute {
-//                  return nil
-//              }
-//              _, err := c.Do("PING")
-//              return err
-//          },
-//      }
+//  func newPool(addr string) *redis.Pool {
+//    return &redis.Pool{
+//      MaxIdle: 3,
+//      IdleTimeout: 240 * time.Second,
+//      Dial: func () (redis.Conn, error) { return redis.Dial("tcp", addr) },
+//    }
 //  }
 //
 //  var (
-//      pool *redis.Pool
-//      redisServer = flag.String("redisServer", ":6379", "")
-//      redisPassword = flag.String("redisPassword", "", "")
+//    pool *redis.Pool
+//    redisServer = flag.String("redisServer", ":6379", "")
 //  )
 //
 //  func main() {
-//      flag.Parse()
-//      pool = newPool(*redisServer, *redisPassword)
-//      ...
+//    flag.Parse()
+//    pool = newPool(*redisServer)
+//    ...
 //  }
 //
 // A request handler gets a connection from the pool and closes the connection
@@ -92,7 +74,44 @@
 //  func serveHome(w http.ResponseWriter, r *http.Request) {
 //      conn := pool.Get()
 //      defer conn.Close()
-//      ....
+//      ...
+//  }
+//
+// Use the Dial function to authenticate connections with the AUTH command or
+// select a database with the SELECT command:
+//
+//  pool := &redis.Pool{
+//    // Other pool configuration not shown in this example.
+//    Dial: func () (redis.Conn, error) {
+//      c, err := redis.Dial("tcp", server)
+//      if err != nil {
+//        return nil, err
+//      }
+//      if _, err := c.Do("AUTH", password); err != nil {
+//        c.Close()
+//        return nil, err
+//      }
+//      if _, err := c.Do("SELECT", db); err != nil {
+//        c.Close()
+//        return nil, err
+//      }
+//      return c, nil
+//    }
+//  }
+//
+// Use the TestOnBorrow function to check the health of an idle connection
+// before the connection is returned to the application. This example PINGs
+// connections that have been idle more than a minute:
+//
+//  pool := &redis.Pool{
+//    // Other pool configuration not shown in this example.
+//    TestOnBorrow: func(c redis.Conn, t time.Time) error {
+//      if time.Since(t) < time.Minute {
+//        return nil
+//      }
+//      _, err := c.Do("PING")
+//      return err
+//    },
 //  }
 //
 type Pool struct {
@@ -162,7 +181,7 @@
 	return &pooledConnection{p: p, c: c}
 }
 
-// ActiveCount returns the number of active connections in the pool.
+// ActiveCount returns the number of connections in the pool. The count includes idle connections and connections in use.
 func (p *Pool) ActiveCount() int {
 	p.mu.Lock()
 	active := p.active
@@ -170,6 +189,14 @@
 	return active
 }
 
+// IdleCount returns the number of idle connections in the pool.
+func (p *Pool) IdleCount() int {
+	p.mu.Lock()
+	idle := p.idle.Len()
+	p.mu.Unlock()
+	return idle
+}
+
 // Close releases the resources used by the pool.
 func (p *Pool) Close() error {
 	p.mu.Lock()
diff --git a/vendor/github.com/garyburd/redigo/redis/pre_go17.go b/vendor/github.com/garyburd/redigo/redis/pre_go17.go
new file mode 100644
index 0000000..0212f60
--- /dev/null
+++ b/vendor/github.com/garyburd/redigo/redis/pre_go17.go
@@ -0,0 +1,31 @@
+// +build !go1.7
+
+package redis
+
+import "crypto/tls"
+
+// similar cloneTLSClientConfig in the stdlib, but also honor skipVerify for the nil case
+func cloneTLSClientConfig(cfg *tls.Config, skipVerify bool) *tls.Config {
+	if cfg == nil {
+		return &tls.Config{InsecureSkipVerify: skipVerify}
+	}
+	return &tls.Config{
+		Rand:                     cfg.Rand,
+		Time:                     cfg.Time,
+		Certificates:             cfg.Certificates,
+		NameToCertificate:        cfg.NameToCertificate,
+		GetCertificate:           cfg.GetCertificate,
+		RootCAs:                  cfg.RootCAs,
+		NextProtos:               cfg.NextProtos,
+		ServerName:               cfg.ServerName,
+		ClientAuth:               cfg.ClientAuth,
+		ClientCAs:                cfg.ClientCAs,
+		InsecureSkipVerify:       cfg.InsecureSkipVerify,
+		CipherSuites:             cfg.CipherSuites,
+		PreferServerCipherSuites: cfg.PreferServerCipherSuites,
+		ClientSessionCache:       cfg.ClientSessionCache,
+		MinVersion:               cfg.MinVersion,
+		MaxVersion:               cfg.MaxVersion,
+		CurvePreferences:         cfg.CurvePreferences,
+	}
+}
diff --git a/vendor/github.com/garyburd/redigo/redis/pubsub.go b/vendor/github.com/garyburd/redigo/redis/pubsub.go
index c0ecce8..5019e7b 100644
--- a/vendor/github.com/garyburd/redigo/redis/pubsub.go
+++ b/vendor/github.com/garyburd/redigo/redis/pubsub.go
@@ -94,6 +94,9 @@
 }
 
 // Ping sends a PING to the server with the specified data.
+//
+// The connection must be subscribed to at least one channel or pattern when
+// calling this method.
 func (c PubSubConn) Ping(data string) error {
 	c.Conn.Send("PING", data)
 	return c.Conn.Flush()
diff --git a/vendor/github.com/garyburd/redigo/redis/redis.go b/vendor/github.com/garyburd/redigo/redis/redis.go
index c90a48e..7b4db7c 100644
--- a/vendor/github.com/garyburd/redigo/redis/redis.go
+++ b/vendor/github.com/garyburd/redigo/redis/redis.go
@@ -24,10 +24,7 @@
 	// Close closes the connection.
 	Close() error
 
-	// Err returns a non-nil value if the connection is broken. The returned
-	// value is either the first non-nil value returned from the underlying
-	// network connection or a protocol parsing error. Applications should
-	// close broken connections.
+	// Err returns a non-nil value when the connection is not usable.
 	Err() error
 
 	// Do sends a command to the server and returns the received reply.
@@ -42,3 +39,21 @@
 	// Receive receives a single reply from the Redis server
 	Receive() (reply interface{}, err error)
 }
+
+// Argument is implemented by types which want to control how their value is
+// interpreted when used as an argument to a redis command.
+type Argument interface {
+	// RedisArg returns the interface that represents the value to be used
+	// in redis commands.
+	RedisArg() interface{}
+}
+
+// Scanner is implemented by types which want to control how their value is
+// interpreted when read from redis.
+type Scanner interface {
+	// RedisScan assigns a value from a redis value.
+	//
+	// An error should be returned if the value cannot be stored without
+	// loss of information.
+	RedisScan(src interface{}) error
+}
diff --git a/vendor/github.com/garyburd/redigo/redis/reply.go b/vendor/github.com/garyburd/redigo/redis/reply.go
index 5789614..a4dd7c8 100644
--- a/vendor/github.com/garyburd/redigo/redis/reply.go
+++ b/vendor/github.com/garyburd/redigo/redis/reply.go
@@ -333,7 +333,7 @@
 		key, okKey := values[i].([]byte)
 		value, okValue := values[i+1].([]byte)
 		if !okKey || !okValue {
-			return nil, errors.New("redigo: ScanMap key not a bulk string value")
+			return nil, errors.New("redigo: StringMap key not a bulk string value")
 		}
 		m[string(key)] = string(value)
 	}
@@ -355,7 +355,7 @@
 	for i := 0; i < len(values); i += 2 {
 		key, ok := values[i].([]byte)
 		if !ok {
-			return nil, errors.New("redigo: ScanMap key not a bulk string value")
+			return nil, errors.New("redigo: IntMap key not a bulk string value")
 		}
 		value, err := Int(values[i+1], nil)
 		if err != nil {
@@ -381,7 +381,7 @@
 	for i := 0; i < len(values); i += 2 {
 		key, ok := values[i].([]byte)
 		if !ok {
-			return nil, errors.New("redigo: ScanMap key not a bulk string value")
+			return nil, errors.New("redigo: Int64Map key not a bulk string value")
 		}
 		value, err := Int64(values[i+1], nil)
 		if err != nil {
@@ -391,3 +391,35 @@
 	}
 	return m, nil
 }
+
+// Positions is a helper that converts an array of positions (lat, long)
+// into a [][2]float64. The GEOPOS command returns replies in this format.
+func Positions(result interface{}, err error) ([]*[2]float64, error) {
+	values, err := Values(result, err)
+	if err != nil {
+		return nil, err
+	}
+	positions := make([]*[2]float64, len(values))
+	for i := range values {
+		if values[i] == nil {
+			continue
+		}
+		p, ok := values[i].([]interface{})
+		if !ok {
+			return nil, fmt.Errorf("redigo: unexpected element type for interface slice, got type %T", values[i])
+		}
+		if len(p) != 2 {
+			return nil, fmt.Errorf("redigo: unexpected number of values for a member position, got %d", len(p))
+		}
+		lat, err := Float64(p[0], nil)
+		if err != nil {
+			return nil, err
+		}
+		long, err := Float64(p[1], nil)
+		if err != nil {
+			return nil, err
+		}
+		positions[i] = &[2]float64{lat, long}
+	}
+	return positions, nil
+}
diff --git a/vendor/github.com/garyburd/redigo/redis/scan.go b/vendor/github.com/garyburd/redigo/redis/scan.go
index 962e94b..ef9551b 100644
--- a/vendor/github.com/garyburd/redigo/redis/scan.go
+++ b/vendor/github.com/garyburd/redigo/redis/scan.go
@@ -110,6 +110,25 @@
 }
 
 func convertAssignValue(d reflect.Value, s interface{}) (err error) {
+	if d.Kind() != reflect.Ptr {
+		if d.CanAddr() {
+			d2 := d.Addr()
+			if d2.CanInterface() {
+				if scanner, ok := d2.Interface().(Scanner); ok {
+					return scanner.RedisScan(s)
+				}
+			}
+		}
+	} else if d.CanInterface() {
+		// Already a reflect.Ptr
+		if d.IsNil() {
+			d.Set(reflect.New(d.Type().Elem()))
+		}
+		if scanner, ok := d.Interface().(Scanner); ok {
+			return scanner.RedisScan(s)
+		}
+	}
+
 	switch s := s.(type) {
 	case []byte:
 		err = convertAssignBulkString(d, s)
@@ -135,11 +154,15 @@
 }
 
 func convertAssign(d interface{}, s interface{}) (err error) {
+	if scanner, ok := d.(Scanner); ok {
+		return scanner.RedisScan(s)
+	}
+
 	// Handle the most common destination types using type switches and
 	// fall back to reflection for all other types.
 	switch s := s.(type) {
 	case nil:
-		// ingore
+		// ignore
 	case []byte:
 		switch d := d.(type) {
 		case *string:
@@ -186,7 +209,11 @@
 	case string:
 		switch d := d.(type) {
 		case *string:
-			*d = string(s)
+			*d = s
+		case *interface{}:
+			*d = s
+		case nil:
+			// skip value
 		default:
 			err = cannotConvert(reflect.ValueOf(d), s)
 		}
@@ -215,6 +242,8 @@
 
 // Scan copies from src to the values pointed at by dest.
 //
+// Scan uses RedisScan if available otherwise:
+//
 // The values pointed at by dest must be an integer, float, boolean, string,
 // []byte, interface{} or slices of these types. Scan uses the standard strconv
 // package to convert bulk strings to numeric and boolean types.
@@ -355,6 +384,7 @@
 //
 // Fields with the tag redis:"-" are ignored.
 //
+// Each field uses RedisScan if available otherwise:
 // Integer, float, boolean, string and []byte fields are supported. Scan uses the
 // standard strconv package to convert bulk string values to numeric and
 // boolean types.
diff --git a/vendor/github.com/garyburd/redigo/redis/script.go b/vendor/github.com/garyburd/redigo/redis/script.go
index 78605a9..0ef1c82 100644
--- a/vendor/github.com/garyburd/redigo/redis/script.go
+++ b/vendor/github.com/garyburd/redigo/redis/script.go
@@ -55,6 +55,11 @@
 	return args
 }
 
+// Hash returns the script hash.
+func (s *Script) Hash() string {
+	return s.hash
+}
+
 // Do evaluates the script. Under the covers, Do optimistically evaluates the
 // script using the EVALSHA command. If the command fails because the script is
 // not loaded, then Do evaluates the script using the EVAL command (thus
diff --git a/vendor/github.com/go-stack/stack/LICENSE.md b/vendor/github.com/go-stack/stack/LICENSE.md
index c8ca66c..2abf98e 100644
--- a/vendor/github.com/go-stack/stack/LICENSE.md
+++ b/vendor/github.com/go-stack/stack/LICENSE.md
@@ -1,13 +1,21 @@
-Copyright 2014 Chris Hines
+The MIT License (MIT)
 
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
+Copyright (c) 2014 Chris Hines
 
-   http://www.apache.org/licenses/LICENSE-2.0
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
 
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/go-stack/stack/stack.go b/vendor/github.com/go-stack/stack/stack.go
index a614eee..79840ed 100644
--- a/vendor/github.com/go-stack/stack/stack.go
+++ b/vendor/github.com/go-stack/stack/stack.go
@@ -39,7 +39,7 @@
 	}
 
 	c.pc = pcs[1]
-	if runtime.FuncForPC(pcs[0]) != sigpanic {
+	if runtime.FuncForPC(pcs[0]).Name() != "runtime.sigpanic" {
 		c.pc--
 	}
 	c.fn = runtime.FuncForPC(c.pc)
@@ -71,6 +71,7 @@
 //    %s    source file
 //    %d    line number
 //    %n    function name
+//    %k    last segment of the package path
 //    %v    equivalent to %s:%d
 //
 // It accepts the '+' and '#' flags for most of the verbs as follows.
@@ -78,6 +79,7 @@
 //    %+s   path of source file relative to the compile time GOPATH
 //    %#s   full path of source file
 //    %+n   import path qualified function name
+//    %+k   full package path
 //    %+v   equivalent to %+s:%d
 //    %#v   equivalent to %#s:%d
 func (c Call) Format(s fmt.State, verb rune) {
@@ -111,6 +113,22 @@
 		buf := [6]byte{}
 		s.Write(strconv.AppendInt(buf[:0], int64(line), 10))
 
+	case 'k':
+		name := c.fn.Name()
+		const pathSep = "/"
+		start, end := 0, len(name)
+		if i := strings.LastIndex(name, pathSep); i != -1 {
+			start = i + len(pathSep)
+		}
+		const pkgSep = "."
+		if i := strings.Index(name[start:], pkgSep); i != -1 {
+			end = start + i
+		}
+		if s.Flag('+') {
+			start = 0
+		}
+		io.WriteString(s, name[start:end])
+
 	case 'n':
 		name := c.fn.Name()
 		if !s.Flag('+') {
@@ -205,33 +223,6 @@
 	s.Write(closeBracketBytes)
 }
 
-// findSigpanic intentionally executes faulting code to generate a stack trace
-// containing an entry for runtime.sigpanic.
-func findSigpanic() *runtime.Func {
-	var fn *runtime.Func
-	var p *int
-	func() int {
-		defer func() {
-			if p := recover(); p != nil {
-				var pcs [512]uintptr
-				n := runtime.Callers(2, pcs[:])
-				for _, pc := range pcs[:n] {
-					f := runtime.FuncForPC(pc)
-					if f.Name() == "runtime.sigpanic" {
-						fn = f
-						break
-					}
-				}
-			}
-		}()
-		// intentional nil pointer dereference to trigger sigpanic
-		return *p
-	}()
-	return fn
-}
-
-var sigpanic = findSigpanic()
-
 // Trace returns a CallStack for the current goroutine with element 0
 // identifying the calling function.
 func Trace() CallStack {
@@ -241,7 +232,7 @@
 
 	for i, pc := range pcs[:n] {
 		pcFix := pc
-		if i > 0 && cs[i-1].fn != sigpanic {
+		if i > 0 && cs[i-1].fn.Name() != "runtime.sigpanic" {
 			pcFix--
 		}
 		cs[i] = Call{
diff --git a/vendor/github.com/golang/lint/.travis.yml b/vendor/github.com/golang/lint/.travis.yml
index 3865f0a..09fb310 100644
--- a/vendor/github.com/golang/lint/.travis.yml
+++ b/vendor/github.com/golang/lint/.travis.yml
@@ -1,10 +1,17 @@
+sudo: false
 language: go
 go:
-  - 1.6
-  - 1.7
+  - 1.7.x
+  - 1.8.x
+  - master
 
 install:
   - go get -t -v ./...
 
 script:
-  - go test -v ./...
+  - go test -v -race ./...
+
+matrix:
+  allow_failures:
+    - go: master
+  fast_finish: true
diff --git a/vendor/github.com/golang/lint/README.md b/vendor/github.com/golang/lint/README.md
index 2906b68..3593ddd 100644
--- a/vendor/github.com/golang/lint/README.md
+++ b/vendor/github.com/golang/lint/README.md
@@ -10,7 +10,7 @@
 
 ## Usage
 
-Invoke `golint` with one or more filenames, a directory, or a package named
+Invoke `golint` with one or more filenames, directories, or packages named
 by its import path. Golint uses the same
 [import path syntax](https://golang.org/cmd/go/#hdr-Import_path_syntax) as
 the `go` command and therefore
diff --git a/vendor/github.com/golang/lint/lint.go b/vendor/github.com/golang/lint/lint.go
index 59fea7c..fb47da0 100644
--- a/vendor/github.com/golang/lint/lint.go
+++ b/vendor/github.com/golang/lint/lint.go
@@ -8,6 +8,7 @@
 package lint
 
 import (
+	"bufio"
 	"bytes"
 	"fmt"
 	"go/ast"
@@ -22,7 +23,7 @@
 	"unicode"
 	"unicode/utf8"
 
-	"golang.org/x/tools/go/gcimporter15"
+	"golang.org/x/tools/go/gcexportdata"
 )
 
 const styleGuideBase = "https://golang.org/wiki/CodeReviewComments"
@@ -81,15 +82,15 @@
 // LintFiles lints a set of files of a single package.
 // The argument is a map of filename to source.
 func (l *Linter) LintFiles(files map[string][]byte) ([]Problem, error) {
-	if len(files) == 0 {
-		return nil, nil
-	}
 	pkg := &pkg{
 		fset:  token.NewFileSet(),
 		files: make(map[string]*file),
 	}
 	var pkgName string
 	for filename, src := range files {
+		if isGenerated(src) {
+			continue // See issue #239
+		}
 		f, err := parser.ParseFile(pkg.fset, filename, src, parser.ParseComments)
 		if err != nil {
 			return nil, err
@@ -107,9 +108,30 @@
 			filename: filename,
 		}
 	}
+	if len(pkg.files) == 0 {
+		return nil, nil
+	}
 	return pkg.lint(), nil
 }
 
+var (
+	genHdr = []byte("// Code generated ")
+	genFtr = []byte(" DO NOT EDIT.")
+)
+
+// isGenerated reports whether the source file is generated code
+// according the rules from https://golang.org/s/generatedcode.
+func isGenerated(src []byte) bool {
+	sc := bufio.NewScanner(bytes.NewReader(src))
+	for sc.Scan() {
+		b := sc.Bytes()
+		if bytes.HasPrefix(b, genHdr) && bytes.HasSuffix(b, genFtr) && len(b) >= len(genHdr)+len(genFtr) {
+			return true
+		}
+	}
+	return false
+}
+
 // pkg represents a package being linted.
 type pkg struct {
 	fset  *token.FileSet
@@ -235,30 +257,15 @@
 	return &p.problems[len(p.problems)-1]
 }
 
-var gcImporter = gcimporter.Import
-
-// importer implements go/types.Importer{,From}.
-type importer struct {
-	impFn    func(packages map[string]*types.Package, path, srcDir string) (*types.Package, error)
-	packages map[string]*types.Package
-}
-
-func (i importer) Import(path string) (*types.Package, error) {
-	return i.impFn(i.packages, path, "")
-}
-
-func (i importer) ImportFrom(path, srcDir string, mode types.ImportMode) (*types.Package, error) {
-	return i.impFn(i.packages, path, srcDir)
+var newImporter = func(fset *token.FileSet) types.ImporterFrom {
+	return gcexportdata.NewImporter(fset, make(map[string]*types.Package))
 }
 
 func (p *pkg) typeCheck() error {
 	config := &types.Config{
 		// By setting a no-op error reporter, the type checker does as much work as possible.
-		Error: func(error) {},
-		Importer: importer{
-			impFn:    gcImporter,
-			packages: make(map[string]*types.Package),
-		},
+		Error:    func(error) {},
+		Importer: newImporter(p.fset),
 	}
 	info := &types.Info{
 		Types:  make(map[ast.Expr]types.TypeAndValue),
@@ -454,7 +461,6 @@
 
 // lintImports examines import blocks.
 func (f *file) lintImports() {
-
 	for i, is := range f.f.Imports {
 		_ = i
 		if is.Name != nil && is.Name.Name == "." && !f.isTest() {
@@ -462,7 +468,6 @@
 		}
 
 	}
-
 }
 
 const docCommentsLink = styleGuideBase + "#doc-comments"
@@ -561,6 +566,7 @@
 		if id.Name == should {
 			return
 		}
+
 		if len(id.Name) > 2 && strings.Contains(id.Name[1:], "_") {
 			f.errorf(id, 0.9, link("http://golang.org/doc/effective_go.html#mixed-caps"), category("naming"), "don't use underscores in Go names; %s %s should be %s", thing, id.Name, should)
 			return
@@ -598,7 +604,12 @@
 				thing = "method"
 			}
 
-			check(v.Name, thing)
+			// Exclude naming warnings for functions that are exported to C but
+			// not exported in the Go API.
+			// See https://github.com/golang/lint/issues/144.
+			if ast.IsExported(v.Name.Name) || !isCgoExported(v) {
+				check(v.Name, thing)
+			}
 
 			checkList(v.Type.Params, thing+" parameter")
 			checkList(v.Type.Results, thing+" result")
@@ -1155,21 +1166,30 @@
 	}
 }
 
-func lintCapAndPunct(s string) (isCap, isPunct bool) {
+func lintErrorString(s string) (isClean bool, conf float64) {
+	const basicConfidence = 0.8
+	const capConfidence = basicConfidence - 0.2
 	first, firstN := utf8.DecodeRuneInString(s)
 	last, _ := utf8.DecodeLastRuneInString(s)
-	isPunct = last == '.' || last == ':' || last == '!'
-	isCap = unicode.IsUpper(first)
-	if isCap && len(s) > firstN {
-		// Don't flag strings starting with something that looks like an initialism.
-		if second, _ := utf8.DecodeRuneInString(s[firstN:]); unicode.IsUpper(second) {
-			isCap = false
+	if last == '.' || last == ':' || last == '!' || last == '\n' {
+		return false, basicConfidence
+	}
+	if unicode.IsUpper(first) {
+		// People use proper nouns and exported Go identifiers in error strings,
+		// so decrease the confidence of warnings for capitalization.
+		if len(s) <= firstN {
+			return false, capConfidence
+		}
+		// Flag strings starting with something that doesn't look like an initialism.
+		if second, _ := utf8.DecodeRuneInString(s[firstN:]); !unicode.IsUpper(second) {
+			return false, capConfidence
 		}
 	}
-	return
+	return true, 0
 }
 
-// lintErrorStrings examines error strings. It complains if they are capitalized or end in punctuation.
+// lintErrorStrings examines error strings.
+// It complains if they are capitalized or end in punctuation or a newline.
 func (f *file) lintErrorStrings() {
 	f.walk(func(node ast.Node) bool {
 		ce, ok := node.(*ast.CallExpr)
@@ -1190,25 +1210,13 @@
 		if s == "" {
 			return true
 		}
-		isCap, isPunct := lintCapAndPunct(s)
-		var msg string
-		switch {
-		case isCap && isPunct:
-			msg = "error strings should not be capitalized and should not end with punctuation"
-		case isCap:
-			msg = "error strings should not be capitalized"
-		case isPunct:
-			msg = "error strings should not end with punctuation"
-		default:
+		clean, conf := lintErrorString(s)
+		if clean {
 			return true
 		}
-		// People use proper nouns and exported Go identifiers in error strings,
-		// so decrease the confidence of warnings for capitalization.
-		conf := 0.8
-		if isCap {
-			conf = 0.6
-		}
-		f.errorf(str, conf, link(styleGuideBase+"#error-strings"), category("errors"), msg)
+
+		f.errorf(str, conf, link(styleGuideBase+"#error-strings"), category("errors"),
+			"error strings should not be capitalized or end with punctuation or a newline")
 		return true
 	})
 }
@@ -1432,7 +1440,7 @@
 	}
 	key := f.pkg.typesInfo.Types[x.Args[1]]
 
-	if _, ok := key.Type.(*types.Basic); ok {
+	if ktyp, ok := key.Type.(*types.Basic); ok && ktyp.Kind() != types.Invalid {
 		f.errorf(x, 1.0, category("context"), fmt.Sprintf("should not use basic type %s as key in context.WithValue", key.Type))
 	}
 }
@@ -1528,6 +1536,20 @@
 	return ok && lit.Kind == token.INT && lit.Value == "1"
 }
 
+func isCgoExported(f *ast.FuncDecl) bool {
+	if f.Recv != nil || f.Doc == nil {
+		return false
+	}
+
+	cgoExport := regexp.MustCompile(fmt.Sprintf("(?m)^//export %s$", regexp.QuoteMeta(f.Name.Name)))
+	for _, c := range f.Doc.List {
+		if cgoExport.MatchString(c.Text) {
+			return true
+		}
+	}
+	return false
+}
+
 var basicTypeKinds = map[types.BasicKind]string{
 	types.UntypedBool:    "bool",
 	types.UntypedInt:     "int",
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
index 2b30f84..8b84d1b 100644
--- a/vendor/github.com/golang/protobuf/proto/encode.go
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -174,11 +174,11 @@
 // This is the format used for the sint64 protocol buffer type.
 func (p *Buffer) EncodeZigzag64(x uint64) error {
 	// use signed number to get arithmetic right shift.
-	return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+	return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63)))
 }
 
 func sizeZigzag64(x uint64) int {
-	return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+	return sizeVarint((x << 1) ^ uint64((int64(x) >> 63)))
 }
 
 // EncodeZigzag32 writes a zigzag-encoded 32-bit integer
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
index ac4ddbc..1c22550 100644
--- a/vendor/github.com/golang/protobuf/proto/lib.go
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -73,7 +73,6 @@
 When the .proto file specifies `syntax="proto3"`, there are some differences:
 
   - Non-repeated fields of non-message type are values instead of pointers.
-  - Getters are only generated for message and oneof fields.
   - Enum types do not get an Enum method.
 
 The simplest way to describe this is to see an example.
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
index 61f83c1..5e14513 100644
--- a/vendor/github.com/golang/protobuf/proto/text_parser.go
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -865,7 +865,7 @@
 		return p.readStruct(fv, terminator)
 	case reflect.Uint32:
 		if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
-			fv.SetUint(uint64(x))
+			fv.SetUint(x)
 			return nil
 		}
 	case reflect.Uint64:
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile
index 41a2d04..f706871 100644
--- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile
@@ -33,4 +33,5 @@
 # at src/google/protobuf/descriptor.proto
 regenerate:
 	@echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION
+	cp $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto .
 	protoc --go_out=../../../../.. -I$(HOME)/src/protobuf/include $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
index 63cf2c8..1d92cb2 100644
--- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
@@ -974,6 +974,7 @@
 	CcGenericServices   *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"`
 	JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"`
 	PyGenericServices   *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"`
+	PhpGenericServices  *bool `protobuf:"varint,19,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"`
 	// Is this file deprecated?
 	// Depending on the target platform, this can emit Deprecated annotations
 	// for everything in the file, or it will be completely ignored; in the very
@@ -995,6 +996,10 @@
 	// Sets the php class prefix which is prepended to all php generated classes
 	// from this .proto. Default is empty.
 	PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"`
+	// Use this option to change the namespace of php generated classes. Default
+	// is empty. When this option is empty, the package name will be used for
+	// determining the namespace.
+	PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
 	proto.XXX_InternalExtensions `json:"-"`
@@ -1020,6 +1025,7 @@
 const Default_FileOptions_CcGenericServices bool = false
 const Default_FileOptions_JavaGenericServices bool = false
 const Default_FileOptions_PyGenericServices bool = false
+const Default_FileOptions_PhpGenericServices bool = false
 const Default_FileOptions_Deprecated bool = false
 const Default_FileOptions_CcEnableArenas bool = false
 
@@ -1093,6 +1099,13 @@
 	return Default_FileOptions_PyGenericServices
 }
 
+func (m *FileOptions) GetPhpGenericServices() bool {
+	if m != nil && m.PhpGenericServices != nil {
+		return *m.PhpGenericServices
+	}
+	return Default_FileOptions_PhpGenericServices
+}
+
 func (m *FileOptions) GetDeprecated() bool {
 	if m != nil && m.Deprecated != nil {
 		return *m.Deprecated
@@ -1135,6 +1148,13 @@
 	return ""
 }
 
+func (m *FileOptions) GetPhpNamespace() string {
+	if m != nil && m.PhpNamespace != nil {
+		return *m.PhpNamespace
+	}
+	return ""
+}
+
 func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption {
 	if m != nil {
 		return m.UninterpretedOption
@@ -1994,159 +2014,161 @@
 func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor0) }
 
 var fileDescriptor0 = []byte{
-	// 2460 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0x5b, 0x6f, 0xdb, 0xc8,
-	0x15, 0x5e, 0x5d, 0x2d, 0x1d, 0xc9, 0xf2, 0x78, 0xec, 0x4d, 0x18, 0xef, 0x25, 0x8e, 0xf6, 0x12,
-	0x6f, 0xd2, 0xc8, 0x0b, 0xe7, 0xb2, 0x59, 0xa7, 0x48, 0x21, 0x4b, 0x8c, 0x57, 0xa9, 0x2c, 0xa9,
-	0x94, 0xdc, 0x4d, 0xf6, 0x85, 0x18, 0x93, 0x23, 0x99, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, 0xf7,
-	0x29, 0x40, 0x9f, 0x0a, 0xf4, 0x07, 0x14, 0x45, 0xd1, 0x87, 0x7d, 0x59, 0xa0, 0x3f, 0xa0, 0xcf,
-	0xfd, 0x05, 0x05, 0xf6, 0xb9, 0x2f, 0x45, 0x51, 0xa0, 0xfd, 0x07, 0x7d, 0x2d, 0x66, 0x86, 0xa4,
-	0x48, 0x5d, 0x12, 0x77, 0x81, 0xec, 0x3e, 0xd9, 0x73, 0xce, 0x77, 0x0e, 0xcf, 0x9c, 0xf9, 0x66,
-	0xce, 0x99, 0x11, 0x6c, 0x8f, 0x6c, 0x7b, 0x64, 0xd2, 0x5d, 0xc7, 0xb5, 0x7d, 0xfb, 0x64, 0x32,
-	0xdc, 0xd5, 0xa9, 0xa7, 0xb9, 0x86, 0xe3, 0xdb, 0x6e, 0x8d, 0xcb, 0xf0, 0x9a, 0x40, 0xd4, 0x42,
-	0x44, 0xf5, 0x08, 0xd6, 0x1f, 0x18, 0x26, 0x6d, 0x46, 0xc0, 0x3e, 0xf5, 0xf1, 0x5d, 0xc8, 0x0e,
-	0x0d, 0x93, 0x4a, 0xa9, 0xed, 0xcc, 0x4e, 0x69, 0xef, 0xc3, 0xda, 0x8c, 0x51, 0x2d, 0x69, 0xd1,
-	0x63, 0x62, 0x85, 0x5b, 0x54, 0xff, 0x95, 0x85, 0x8d, 0x05, 0x5a, 0x8c, 0x21, 0x6b, 0x91, 0x31,
-	0xf3, 0x98, 0xda, 0x29, 0x2a, 0xfc, 0x7f, 0x2c, 0xc1, 0x8a, 0x43, 0xb4, 0xa7, 0x64, 0x44, 0xa5,
-	0x34, 0x17, 0x87, 0x43, 0xfc, 0x3e, 0x80, 0x4e, 0x1d, 0x6a, 0xe9, 0xd4, 0xd2, 0xce, 0xa4, 0xcc,
-	0x76, 0x66, 0xa7, 0xa8, 0xc4, 0x24, 0xf8, 0x3a, 0xac, 0x3b, 0x93, 0x13, 0xd3, 0xd0, 0xd4, 0x18,
-	0x0c, 0xb6, 0x33, 0x3b, 0x39, 0x05, 0x09, 0x45, 0x73, 0x0a, 0xbe, 0x0a, 0x6b, 0xcf, 0x29, 0x79,
-	0x1a, 0x87, 0x96, 0x38, 0xb4, 0xc2, 0xc4, 0x31, 0x60, 0x03, 0xca, 0x63, 0xea, 0x79, 0x64, 0x44,
-	0x55, 0xff, 0xcc, 0xa1, 0x52, 0x96, 0xcf, 0x7e, 0x7b, 0x6e, 0xf6, 0xb3, 0x33, 0x2f, 0x05, 0x56,
-	0x83, 0x33, 0x87, 0xe2, 0x3a, 0x14, 0xa9, 0x35, 0x19, 0x0b, 0x0f, 0xb9, 0x25, 0xf9, 0x93, 0xad,
-	0xc9, 0x78, 0xd6, 0x4b, 0x81, 0x99, 0x05, 0x2e, 0x56, 0x3c, 0xea, 0x3e, 0x33, 0x34, 0x2a, 0xe5,
-	0xb9, 0x83, 0xab, 0x73, 0x0e, 0xfa, 0x42, 0x3f, 0xeb, 0x23, 0xb4, 0xc3, 0x0d, 0x28, 0xd2, 0x17,
-	0x3e, 0xb5, 0x3c, 0xc3, 0xb6, 0xa4, 0x15, 0xee, 0xe4, 0xa3, 0x05, 0xab, 0x48, 0x4d, 0x7d, 0xd6,
-	0xc5, 0xd4, 0x0e, 0xdf, 0x81, 0x15, 0xdb, 0xf1, 0x0d, 0xdb, 0xf2, 0xa4, 0xc2, 0x76, 0x6a, 0xa7,
-	0xb4, 0xf7, 0xee, 0x42, 0x22, 0x74, 0x05, 0x46, 0x09, 0xc1, 0xb8, 0x05, 0xc8, 0xb3, 0x27, 0xae,
-	0x46, 0x55, 0xcd, 0xd6, 0xa9, 0x6a, 0x58, 0x43, 0x5b, 0x2a, 0x72, 0x07, 0x97, 0xe7, 0x27, 0xc2,
-	0x81, 0x0d, 0x5b, 0xa7, 0x2d, 0x6b, 0x68, 0x2b, 0x15, 0x2f, 0x31, 0xc6, 0x17, 0x20, 0xef, 0x9d,
-	0x59, 0x3e, 0x79, 0x21, 0x95, 0x39, 0x43, 0x82, 0x51, 0xf5, 0xbf, 0x39, 0x58, 0x3b, 0x0f, 0xc5,
-	0xee, 0x41, 0x6e, 0xc8, 0x66, 0x29, 0xa5, 0xff, 0x9f, 0x1c, 0x08, 0x9b, 0x64, 0x12, 0xf3, 0x3f,
-	0x30, 0x89, 0x75, 0x28, 0x59, 0xd4, 0xf3, 0xa9, 0x2e, 0x18, 0x91, 0x39, 0x27, 0xa7, 0x40, 0x18,
-	0xcd, 0x53, 0x2a, 0xfb, 0x83, 0x28, 0xf5, 0x08, 0xd6, 0xa2, 0x90, 0x54, 0x97, 0x58, 0xa3, 0x90,
-	0x9b, 0xbb, 0xaf, 0x8b, 0xa4, 0x26, 0x87, 0x76, 0x0a, 0x33, 0x53, 0x2a, 0x34, 0x31, 0xc6, 0x4d,
-	0x00, 0xdb, 0xa2, 0xf6, 0x50, 0xd5, 0xa9, 0x66, 0x4a, 0x85, 0x25, 0x59, 0xea, 0x32, 0xc8, 0x5c,
-	0x96, 0x6c, 0x21, 0xd5, 0x4c, 0xfc, 0xf9, 0x94, 0x6a, 0x2b, 0x4b, 0x98, 0x72, 0x24, 0x36, 0xd9,
-	0x1c, 0xdb, 0x8e, 0xa1, 0xe2, 0x52, 0xc6, 0x7b, 0xaa, 0x07, 0x33, 0x2b, 0xf2, 0x20, 0x6a, 0xaf,
-	0x9d, 0x99, 0x12, 0x98, 0x89, 0x89, 0xad, 0xba, 0xf1, 0x21, 0xfe, 0x00, 0x22, 0x81, 0xca, 0x69,
-	0x05, 0xfc, 0x14, 0x2a, 0x87, 0xc2, 0x0e, 0x19, 0xd3, 0xad, 0xbb, 0x50, 0x49, 0xa6, 0x07, 0x6f,
-	0x42, 0xce, 0xf3, 0x89, 0xeb, 0x73, 0x16, 0xe6, 0x14, 0x31, 0xc0, 0x08, 0x32, 0xd4, 0xd2, 0xf9,
-	0x29, 0x97, 0x53, 0xd8, 0xbf, 0x5b, 0x9f, 0xc1, 0x6a, 0xe2, 0xf3, 0xe7, 0x35, 0xac, 0xfe, 0x3e,
-	0x0f, 0x9b, 0x8b, 0x38, 0xb7, 0x90, 0xfe, 0x17, 0x20, 0x6f, 0x4d, 0xc6, 0x27, 0xd4, 0x95, 0x32,
-	0xdc, 0x43, 0x30, 0xc2, 0x75, 0xc8, 0x99, 0xe4, 0x84, 0x9a, 0x52, 0x76, 0x3b, 0xb5, 0x53, 0xd9,
-	0xbb, 0x7e, 0x2e, 0x56, 0xd7, 0xda, 0xcc, 0x44, 0x11, 0x96, 0xf8, 0x3e, 0x64, 0x83, 0x23, 0x8e,
-	0x79, 0xb8, 0x76, 0x3e, 0x0f, 0x8c, 0x8b, 0x0a, 0xb7, 0xc3, 0xef, 0x40, 0x91, 0xfd, 0x15, 0xb9,
-	0xcd, 0xf3, 0x98, 0x0b, 0x4c, 0xc0, 0xf2, 0x8a, 0xb7, 0xa0, 0xc0, 0x69, 0xa6, 0xd3, 0xb0, 0x34,
-	0x44, 0x63, 0xb6, 0x30, 0x3a, 0x1d, 0x92, 0x89, 0xe9, 0xab, 0xcf, 0x88, 0x39, 0xa1, 0x9c, 0x30,
-	0x45, 0xa5, 0x1c, 0x08, 0x7f, 0xcd, 0x64, 0xf8, 0x32, 0x94, 0x04, 0x2b, 0x0d, 0x4b, 0xa7, 0x2f,
-	0xf8, 0xe9, 0x93, 0x53, 0x04, 0x51, 0x5b, 0x4c, 0xc2, 0x3e, 0xff, 0xc4, 0xb3, 0xad, 0x70, 0x69,
-	0xf9, 0x27, 0x98, 0x80, 0x7f, 0xfe, 0xb3, 0xd9, 0x83, 0xef, 0xbd, 0xc5, 0xd3, 0x9b, 0xe5, 0x62,
-	0xf5, 0x2f, 0x69, 0xc8, 0xf2, 0xfd, 0xb6, 0x06, 0xa5, 0xc1, 0xe3, 0x9e, 0xac, 0x36, 0xbb, 0xc7,
-	0x07, 0x6d, 0x19, 0xa5, 0x70, 0x05, 0x80, 0x0b, 0x1e, 0xb4, 0xbb, 0xf5, 0x01, 0x4a, 0x47, 0xe3,
-	0x56, 0x67, 0x70, 0xe7, 0x16, 0xca, 0x44, 0x06, 0xc7, 0x42, 0x90, 0x8d, 0x03, 0x6e, 0xee, 0xa1,
-	0x1c, 0x46, 0x50, 0x16, 0x0e, 0x5a, 0x8f, 0xe4, 0xe6, 0x9d, 0x5b, 0x28, 0x9f, 0x94, 0xdc, 0xdc,
-	0x43, 0x2b, 0x78, 0x15, 0x8a, 0x5c, 0x72, 0xd0, 0xed, 0xb6, 0x51, 0x21, 0xf2, 0xd9, 0x1f, 0x28,
-	0xad, 0xce, 0x21, 0x2a, 0x46, 0x3e, 0x0f, 0x95, 0xee, 0x71, 0x0f, 0x41, 0xe4, 0xe1, 0x48, 0xee,
-	0xf7, 0xeb, 0x87, 0x32, 0x2a, 0x45, 0x88, 0x83, 0xc7, 0x03, 0xb9, 0x8f, 0xca, 0x89, 0xb0, 0x6e,
-	0xee, 0xa1, 0xd5, 0xe8, 0x13, 0x72, 0xe7, 0xf8, 0x08, 0x55, 0xf0, 0x3a, 0xac, 0x8a, 0x4f, 0x84,
-	0x41, 0xac, 0xcd, 0x88, 0xee, 0xdc, 0x42, 0x68, 0x1a, 0x88, 0xf0, 0xb2, 0x9e, 0x10, 0xdc, 0xb9,
-	0x85, 0x70, 0xb5, 0x01, 0x39, 0xce, 0x2e, 0x8c, 0xa1, 0xd2, 0xae, 0x1f, 0xc8, 0x6d, 0xb5, 0xdb,
-	0x1b, 0xb4, 0xba, 0x9d, 0x7a, 0x1b, 0xa5, 0xa6, 0x32, 0x45, 0xfe, 0xd5, 0x71, 0x4b, 0x91, 0x9b,
-	0x28, 0x1d, 0x97, 0xf5, 0xe4, 0xfa, 0x40, 0x6e, 0xa2, 0x4c, 0x55, 0x83, 0xcd, 0x45, 0xe7, 0xcc,
-	0xc2, 0x9d, 0x11, 0x5b, 0xe2, 0xf4, 0x92, 0x25, 0xe6, 0xbe, 0xe6, 0x96, 0xf8, 0xdb, 0x14, 0x6c,
-	0x2c, 0x38, 0x6b, 0x17, 0x7e, 0xe4, 0x17, 0x90, 0x13, 0x14, 0x15, 0xd5, 0xe7, 0x93, 0x85, 0x87,
-	0x36, 0x27, 0xec, 0x5c, 0x05, 0xe2, 0x76, 0xf1, 0x0a, 0x9c, 0x59, 0x52, 0x81, 0x99, 0x8b, 0xb9,
-	0x20, 0x7f, 0x93, 0x02, 0x69, 0x99, 0xef, 0xd7, 0x1c, 0x14, 0xe9, 0xc4, 0x41, 0x71, 0x6f, 0x36,
-	0x80, 0x2b, 0xcb, 0xe7, 0x30, 0x17, 0xc5, 0x77, 0x29, 0xb8, 0xb0, 0xb8, 0x51, 0x59, 0x18, 0xc3,
-	0x7d, 0xc8, 0x8f, 0xa9, 0x7f, 0x6a, 0x87, 0xc5, 0xfa, 0xe3, 0x05, 0x25, 0x80, 0xa9, 0x67, 0x73,
-	0x15, 0x58, 0xc5, 0x6b, 0x48, 0x66, 0x59, 0xb7, 0x21, 0xa2, 0x99, 0x8b, 0xf4, 0xb7, 0x69, 0x78,
-	0x7b, 0xa1, 0xf3, 0x85, 0x81, 0xbe, 0x07, 0x60, 0x58, 0xce, 0xc4, 0x17, 0x05, 0x59, 0x9c, 0x4f,
-	0x45, 0x2e, 0xe1, 0x7b, 0x9f, 0x9d, 0x3d, 0x13, 0x3f, 0xd2, 0x67, 0xb8, 0x1e, 0x84, 0x88, 0x03,
-	0xee, 0x4e, 0x03, 0xcd, 0xf2, 0x40, 0xdf, 0x5f, 0x32, 0xd3, 0xb9, 0x5a, 0xf7, 0x29, 0x20, 0xcd,
-	0x34, 0xa8, 0xe5, 0xab, 0x9e, 0xef, 0x52, 0x32, 0x36, 0xac, 0x11, 0x3f, 0x80, 0x0b, 0xfb, 0xb9,
-	0x21, 0x31, 0x3d, 0xaa, 0xac, 0x09, 0x75, 0x3f, 0xd4, 0x32, 0x0b, 0x5e, 0x65, 0xdc, 0x98, 0x45,
-	0x3e, 0x61, 0x21, 0xd4, 0x91, 0x45, 0xf5, 0xef, 0x2b, 0x50, 0x8a, 0xb5, 0x75, 0xf8, 0x0a, 0x94,
-	0x9f, 0x90, 0x67, 0x44, 0x0d, 0x5b, 0x75, 0x91, 0x89, 0x12, 0x93, 0xf5, 0x82, 0x76, 0xfd, 0x53,
-	0xd8, 0xe4, 0x10, 0x7b, 0xe2, 0x53, 0x57, 0xd5, 0x4c, 0xe2, 0x79, 0x3c, 0x69, 0x05, 0x0e, 0xc5,
-	0x4c, 0xd7, 0x65, 0xaa, 0x46, 0xa8, 0xc1, 0xb7, 0x61, 0x83, 0x5b, 0x8c, 0x27, 0xa6, 0x6f, 0x38,
-	0x26, 0x55, 0xd9, 0xe5, 0xc1, 0xe3, 0x07, 0x71, 0x14, 0xd9, 0x3a, 0x43, 0x1c, 0x05, 0x00, 0x16,
-	0x91, 0x87, 0x9b, 0xf0, 0x1e, 0x37, 0x1b, 0x51, 0x8b, 0xba, 0xc4, 0xa7, 0x2a, 0xfd, 0x7a, 0x42,
-	0x4c, 0x4f, 0x25, 0x96, 0xae, 0x9e, 0x12, 0xef, 0x54, 0xda, 0x64, 0x0e, 0x0e, 0xd2, 0x52, 0x4a,
-	0xb9, 0xc4, 0x80, 0x87, 0x01, 0x4e, 0xe6, 0xb0, 0xba, 0xa5, 0x7f, 0x41, 0xbc, 0x53, 0xbc, 0x0f,
-	0x17, 0xb8, 0x17, 0xcf, 0x77, 0x0d, 0x6b, 0xa4, 0x6a, 0xa7, 0x54, 0x7b, 0xaa, 0x4e, 0xfc, 0xe1,
-	0x5d, 0xe9, 0x9d, 0xf8, 0xf7, 0x79, 0x84, 0x7d, 0x8e, 0x69, 0x30, 0xc8, 0xb1, 0x3f, 0xbc, 0x8b,
-	0xfb, 0x50, 0x66, 0x8b, 0x31, 0x36, 0xbe, 0xa1, 0xea, 0xd0, 0x76, 0x79, 0x65, 0xa9, 0x2c, 0xd8,
-	0xd9, 0xb1, 0x0c, 0xd6, 0xba, 0x81, 0xc1, 0x91, 0xad, 0xd3, 0xfd, 0x5c, 0xbf, 0x27, 0xcb, 0x4d,
-	0xa5, 0x14, 0x7a, 0x79, 0x60, 0xbb, 0x8c, 0x50, 0x23, 0x3b, 0x4a, 0x70, 0x49, 0x10, 0x6a, 0x64,
-	0x87, 0xe9, 0xbd, 0x0d, 0x1b, 0x9a, 0x26, 0xe6, 0x6c, 0x68, 0x6a, 0xd0, 0xe2, 0x7b, 0x12, 0x4a,
-	0x24, 0x4b, 0xd3, 0x0e, 0x05, 0x20, 0xe0, 0xb8, 0x87, 0x3f, 0x87, 0xb7, 0xa7, 0xc9, 0x8a, 0x1b,
-	0xae, 0xcf, 0xcd, 0x72, 0xd6, 0xf4, 0x36, 0x6c, 0x38, 0x67, 0xf3, 0x86, 0x38, 0xf1, 0x45, 0xe7,
-	0x6c, 0xd6, 0xec, 0x23, 0x7e, 0x6d, 0x73, 0xa9, 0x46, 0x7c, 0xaa, 0x4b, 0x17, 0xe3, 0xe8, 0x98,
-	0x02, 0xef, 0x02, 0xd2, 0x34, 0x95, 0x5a, 0xe4, 0xc4, 0xa4, 0x2a, 0x71, 0xa9, 0x45, 0x3c, 0xe9,
-	0x72, 0x1c, 0x5c, 0xd1, 0x34, 0x99, 0x6b, 0xeb, 0x5c, 0x89, 0xaf, 0xc1, 0xba, 0x7d, 0xf2, 0x44,
-	0x13, 0xcc, 0x52, 0x1d, 0x97, 0x0e, 0x8d, 0x17, 0xd2, 0x87, 0x3c, 0x4d, 0x6b, 0x4c, 0xc1, 0x79,
-	0xd5, 0xe3, 0x62, 0xfc, 0x09, 0x20, 0xcd, 0x3b, 0x25, 0xae, 0xc3, 0x4b, 0xbb, 0xe7, 0x10, 0x8d,
-	0x4a, 0x1f, 0x09, 0xa8, 0x90, 0x77, 0x42, 0x31, 0x63, 0xb6, 0xf7, 0xdc, 0x18, 0xfa, 0xa1, 0xc7,
-	0xab, 0x82, 0xd9, 0x5c, 0x16, 0x78, 0xdb, 0x01, 0xe4, 0x9c, 0x3a, 0xc9, 0x0f, 0xef, 0x70, 0x58,
-	0xc5, 0x39, 0x75, 0xe2, 0xdf, 0x7d, 0x04, 0x9b, 0x13, 0xcb, 0xb0, 0x7c, 0xea, 0x3a, 0x2e, 0x65,
-	0xed, 0xbe, 0xd8, 0xb3, 0xd2, 0xbf, 0x57, 0x96, 0x34, 0xec, 0xc7, 0x71, 0xb4, 0xa0, 0x8a, 0xb2,
-	0x31, 0x99, 0x17, 0x56, 0xf7, 0xa1, 0x1c, 0x67, 0x10, 0x2e, 0x82, 0xe0, 0x10, 0x4a, 0xb1, 0x6a,
-	0xdc, 0xe8, 0x36, 0x59, 0x1d, 0xfd, 0x4a, 0x46, 0x69, 0x56, 0xcf, 0xdb, 0xad, 0x81, 0xac, 0x2a,
-	0xc7, 0x9d, 0x41, 0xeb, 0x48, 0x46, 0x99, 0x6b, 0xc5, 0xc2, 0x7f, 0x56, 0xd0, 0xcb, 0x97, 0x2f,
-	0x5f, 0xa6, 0x1f, 0x66, 0x0b, 0x1f, 0xa3, 0xab, 0xd5, 0xef, 0xd3, 0x50, 0x49, 0x76, 0xd2, 0xf8,
-	0xe7, 0x70, 0x31, 0xbc, 0xf6, 0x7a, 0xd4, 0x57, 0x9f, 0x1b, 0x2e, 0xa7, 0xf6, 0x98, 0x88, 0x5e,
-	0x34, 0x5a, 0x95, 0xcd, 0x00, 0xd5, 0xa7, 0xfe, 0x97, 0x86, 0xcb, 0x88, 0x3b, 0x26, 0x3e, 0x6e,
-	0xc3, 0x65, 0xcb, 0x56, 0x3d, 0x9f, 0x58, 0x3a, 0x71, 0x75, 0x75, 0xfa, 0xe0, 0xa0, 0x12, 0x4d,
-	0xa3, 0x9e, 0x67, 0x8b, 0x92, 0x12, 0x79, 0x79, 0xd7, 0xb2, 0xfb, 0x01, 0x78, 0x7a, 0xd6, 0xd6,
-	0x03, 0xe8, 0x0c, 0x83, 0x32, 0xcb, 0x18, 0xf4, 0x0e, 0x14, 0xc7, 0xc4, 0x51, 0xa9, 0xe5, 0xbb,
-	0x67, 0xbc, 0xff, 0x2b, 0x28, 0x85, 0x31, 0x71, 0x64, 0x36, 0x7e, 0x73, 0x2b, 0x91, 0xcc, 0x66,
-	0x01, 0x15, 0x1f, 0x66, 0x0b, 0x45, 0x04, 0xd5, 0x7f, 0x66, 0xa0, 0x1c, 0xef, 0x07, 0x59, 0x7b,
-	0xad, 0xf1, 0xb3, 0x3f, 0xc5, 0x4f, 0x87, 0x0f, 0x5e, 0xd9, 0x3d, 0xd6, 0x1a, 0xac, 0x28, 0xec,
-	0xe7, 0x45, 0x97, 0xa6, 0x08, 0x4b, 0x56, 0x90, 0xd9, 0x79, 0x40, 0x45, 0xef, 0x5f, 0x50, 0x82,
-	0x11, 0x3e, 0x84, 0xfc, 0x13, 0x8f, 0xfb, 0xce, 0x73, 0xdf, 0x1f, 0xbe, 0xda, 0xf7, 0xc3, 0x3e,
-	0x77, 0x5e, 0x7c, 0xd8, 0x57, 0x3b, 0x5d, 0xe5, 0xa8, 0xde, 0x56, 0x02, 0x73, 0x7c, 0x09, 0xb2,
-	0x26, 0xf9, 0xe6, 0x2c, 0x59, 0x3e, 0xb8, 0xe8, 0xbc, 0x8b, 0x70, 0x09, 0xb2, 0xcf, 0x29, 0x79,
-	0x9a, 0x3c, 0xb4, 0xb9, 0xe8, 0x0d, 0x6e, 0x86, 0x5d, 0xc8, 0xf1, 0x7c, 0x61, 0x80, 0x20, 0x63,
-	0xe8, 0x2d, 0x5c, 0x80, 0x6c, 0xa3, 0xab, 0xb0, 0x0d, 0x81, 0xa0, 0x2c, 0xa4, 0x6a, 0xaf, 0x25,
-	0x37, 0x64, 0x94, 0xae, 0xde, 0x86, 0xbc, 0x48, 0x02, 0xdb, 0x2c, 0x51, 0x1a, 0xd0, 0x5b, 0xc1,
-	0x30, 0xf0, 0x91, 0x0a, 0xb5, 0xc7, 0x47, 0x07, 0xb2, 0x82, 0xd2, 0xc9, 0xa5, 0xce, 0xa2, 0x5c,
-	0xd5, 0x83, 0x72, 0xbc, 0x21, 0xfc, 0x51, 0x58, 0x56, 0xfd, 0x6b, 0x0a, 0x4a, 0xb1, 0x06, 0x8f,
-	0xb5, 0x16, 0xc4, 0x34, 0xed, 0xe7, 0x2a, 0x31, 0x0d, 0xe2, 0x05, 0xd4, 0x00, 0x2e, 0xaa, 0x33,
-	0xc9, 0x79, 0x97, 0xee, 0x47, 0xda, 0x22, 0x39, 0x94, 0xaf, 0xfe, 0x29, 0x05, 0x68, 0xb6, 0x45,
-	0x9c, 0x09, 0x33, 0xf5, 0x53, 0x86, 0x59, 0xfd, 0x63, 0x0a, 0x2a, 0xc9, 0xbe, 0x70, 0x26, 0xbc,
-	0x2b, 0x3f, 0x69, 0x78, 0xff, 0x48, 0xc3, 0x6a, 0xa2, 0x1b, 0x3c, 0x6f, 0x74, 0x5f, 0xc3, 0xba,
-	0xa1, 0xd3, 0xb1, 0x63, 0xfb, 0xd4, 0xd2, 0xce, 0x54, 0x93, 0x3e, 0xa3, 0xa6, 0x54, 0xe5, 0x87,
-	0xc6, 0xee, 0xab, 0xfb, 0xcd, 0x5a, 0x6b, 0x6a, 0xd7, 0x66, 0x66, 0xfb, 0x1b, 0xad, 0xa6, 0x7c,
-	0xd4, 0xeb, 0x0e, 0xe4, 0x4e, 0xe3, 0xb1, 0x7a, 0xdc, 0xf9, 0x65, 0xa7, 0xfb, 0x65, 0x47, 0x41,
-	0xc6, 0x0c, 0xec, 0x0d, 0x6e, 0xfb, 0x1e, 0xa0, 0xd9, 0xa0, 0xf0, 0x45, 0x58, 0x14, 0x16, 0x7a,
-	0x0b, 0x6f, 0xc0, 0x5a, 0xa7, 0xab, 0xf6, 0x5b, 0x4d, 0x59, 0x95, 0x1f, 0x3c, 0x90, 0x1b, 0x83,
-	0xbe, 0xb8, 0x80, 0x47, 0xe8, 0x41, 0x62, 0x83, 0x57, 0xff, 0x90, 0x81, 0x8d, 0x05, 0x91, 0xe0,
-	0x7a, 0xd0, 0xfb, 0x8b, 0xeb, 0xc8, 0x8d, 0xf3, 0x44, 0x5f, 0x63, 0xdd, 0x45, 0x8f, 0xb8, 0x7e,
-	0x70, 0x55, 0xf8, 0x04, 0x58, 0x96, 0x2c, 0xdf, 0x18, 0x1a, 0xd4, 0x0d, 0xde, 0x2b, 0xc4, 0x85,
-	0x60, 0x6d, 0x2a, 0x17, 0x4f, 0x16, 0x3f, 0x03, 0xec, 0xd8, 0x9e, 0xe1, 0x1b, 0xcf, 0xa8, 0x6a,
-	0x58, 0xe1, 0xe3, 0x06, 0xbb, 0x20, 0x64, 0x15, 0x14, 0x6a, 0x5a, 0x96, 0x1f, 0xa1, 0x2d, 0x3a,
-	0x22, 0x33, 0x68, 0x76, 0x98, 0x67, 0x14, 0x14, 0x6a, 0x22, 0xf4, 0x15, 0x28, 0xeb, 0xf6, 0x84,
-	0xb5, 0x5b, 0x02, 0xc7, 0x6a, 0x47, 0x4a, 0x29, 0x09, 0x59, 0x04, 0x09, 0xfa, 0xe1, 0xe9, 0xab,
-	0x4a, 0x59, 0x29, 0x09, 0x99, 0x80, 0x5c, 0x85, 0x35, 0x32, 0x1a, 0xb9, 0xcc, 0x79, 0xe8, 0x48,
-	0x74, 0xf8, 0x95, 0x48, 0xcc, 0x81, 0x5b, 0x0f, 0xa1, 0x10, 0xe6, 0x81, 0x95, 0x6a, 0x96, 0x09,
-	0xd5, 0x11, 0x6f, 0x5b, 0xe9, 0x9d, 0xa2, 0x52, 0xb0, 0x42, 0xe5, 0x15, 0x28, 0x1b, 0x9e, 0x3a,
-	0x7d, 0x64, 0x4d, 0x6f, 0xa7, 0x77, 0x0a, 0x4a, 0xc9, 0xf0, 0xa2, 0x57, 0xb5, 0xea, 0x77, 0x69,
-	0xa8, 0x24, 0x1f, 0x89, 0x71, 0x13, 0x0a, 0xa6, 0xad, 0x11, 0x4e, 0x2d, 0xf1, 0x0b, 0xc5, 0xce,
-	0x6b, 0xde, 0x95, 0x6b, 0xed, 0x00, 0xaf, 0x44, 0x96, 0x5b, 0x7f, 0x4b, 0x41, 0x21, 0x14, 0xe3,
-	0x0b, 0x90, 0x75, 0x88, 0x7f, 0xca, 0xdd, 0xe5, 0x0e, 0xd2, 0x28, 0xa5, 0xf0, 0x31, 0x93, 0x7b,
-	0x0e, 0xb1, 0x38, 0x05, 0x02, 0x39, 0x1b, 0xb3, 0x75, 0x35, 0x29, 0xd1, 0xf9, 0xf5, 0xc1, 0x1e,
-	0x8f, 0xa9, 0xe5, 0x7b, 0xe1, 0xba, 0x06, 0xf2, 0x46, 0x20, 0xc6, 0xd7, 0x61, 0xdd, 0x77, 0x89,
-	0x61, 0x26, 0xb0, 0x59, 0x8e, 0x45, 0xa1, 0x22, 0x02, 0xef, 0xc3, 0xa5, 0xd0, 0xaf, 0x4e, 0x7d,
-	0xa2, 0x9d, 0x52, 0x7d, 0x6a, 0x94, 0xe7, 0x2f, 0x90, 0x17, 0x03, 0x40, 0x33, 0xd0, 0x87, 0xb6,
-	0xd5, 0xef, 0x53, 0xb0, 0x1e, 0x5e, 0x78, 0xf4, 0x28, 0x59, 0x47, 0x00, 0xc4, 0xb2, 0x6c, 0x3f,
-	0x9e, 0xae, 0x79, 0x2a, 0xcf, 0xd9, 0xd5, 0xea, 0x91, 0x91, 0x12, 0x73, 0xb0, 0x35, 0x06, 0x98,
-	0x6a, 0x96, 0xa6, 0xed, 0x32, 0x94, 0x82, 0x5f, 0x00, 0xf8, 0xcf, 0x48, 0xe2, 0x8a, 0x0c, 0x42,
-	0xc4, 0x6e, 0x46, 0x78, 0x13, 0x72, 0x27, 0x74, 0x64, 0x58, 0xc1, 0xbb, 0xa4, 0x18, 0x84, 0xaf,
-	0x9d, 0xd9, 0xe8, 0xb5, 0xf3, 0xe0, 0x77, 0x29, 0xd8, 0xd0, 0xec, 0xf1, 0x6c, 0xbc, 0x07, 0x68,
-	0xe6, 0x9e, 0xee, 0x7d, 0x91, 0xfa, 0xea, 0xfe, 0xc8, 0xf0, 0x4f, 0x27, 0x27, 0x35, 0xcd, 0x1e,
-	0xef, 0x8e, 0x6c, 0x93, 0x58, 0xa3, 0xe9, 0xef, 0x60, 0xfc, 0x1f, 0xed, 0xc6, 0x88, 0x5a, 0x37,
-	0x46, 0x76, 0xec, 0x57, 0xb1, 0x7b, 0xd3, 0x7f, 0xbf, 0x4d, 0x67, 0x0e, 0x7b, 0x07, 0x7f, 0x4e,
-	0x6f, 0x1d, 0x8a, 0x6f, 0xf5, 0xc2, 0xdc, 0x28, 0x74, 0x68, 0x52, 0x8d, 0xcd, 0xf7, 0x7f, 0x01,
-	0x00, 0x00, 0xff, 0xff, 0x8e, 0x54, 0xe7, 0xef, 0x60, 0x1b, 0x00, 0x00,
+	// 2490 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x8e, 0xdb, 0xc6,
+	0x15, 0x8e, 0x7e, 0x57, 0x3a, 0xd2, 0x6a, 0x67, 0x67, 0x37, 0x36, 0xbd, 0xf9, 0xf1, 0x5a, 0xf9,
+	0xf1, 0x3a, 0x69, 0xb4, 0xc1, 0xc6, 0x76, 0x9c, 0x4d, 0xe1, 0x42, 0x2b, 0xd1, 0x1b, 0xb9, 0x5a,
+	0x49, 0xa5, 0xb4, 0x8d, 0x9d, 0x1b, 0x62, 0x96, 0x1c, 0x49, 0xb4, 0x29, 0x92, 0x21, 0x29, 0xdb,
+	0x9b, 0x2b, 0x03, 0xbd, 0x2a, 0xd0, 0x07, 0x28, 0x8a, 0xa2, 0x17, 0xb9, 0x09, 0xd0, 0x07, 0x28,
+	0xd0, 0xbb, 0x3e, 0x41, 0x81, 0xbc, 0x41, 0x51, 0x14, 0x68, 0xdf, 0xa0, 0xb7, 0xc5, 0xcc, 0x90,
+	0x14, 0xa9, 0x1f, 0x7b, 0x1b, 0xc0, 0xc9, 0x95, 0x34, 0xdf, 0xf9, 0xce, 0x99, 0x33, 0x67, 0xce,
+	0xcc, 0x9c, 0x19, 0xc2, 0xee, 0xc8, 0xb6, 0x47, 0x26, 0xdd, 0x77, 0x5c, 0xdb, 0xb7, 0xcf, 0xa6,
+	0xc3, 0x7d, 0x9d, 0x7a, 0x9a, 0x6b, 0x38, 0xbe, 0xed, 0xd6, 0x38, 0x86, 0x37, 0x04, 0xa3, 0x16,
+	0x32, 0xaa, 0x27, 0xb0, 0x79, 0xcf, 0x30, 0x69, 0x33, 0x22, 0xf6, 0xa9, 0x8f, 0xef, 0x40, 0x76,
+	0x68, 0x98, 0x54, 0x4a, 0xed, 0x66, 0xf6, 0x4a, 0x07, 0xef, 0xd6, 0xe6, 0x94, 0x6a, 0x49, 0x8d,
+	0x1e, 0x83, 0x15, 0xae, 0x51, 0xfd, 0x57, 0x16, 0xb6, 0x96, 0x48, 0x31, 0x86, 0xac, 0x45, 0x26,
+	0xcc, 0x62, 0x6a, 0xaf, 0xa8, 0xf0, 0xff, 0x58, 0x82, 0x35, 0x87, 0x68, 0x8f, 0xc9, 0x88, 0x4a,
+	0x69, 0x0e, 0x87, 0x4d, 0xfc, 0x36, 0x80, 0x4e, 0x1d, 0x6a, 0xe9, 0xd4, 0xd2, 0xce, 0xa5, 0xcc,
+	0x6e, 0x66, 0xaf, 0xa8, 0xc4, 0x10, 0xfc, 0x21, 0x6c, 0x3a, 0xd3, 0x33, 0xd3, 0xd0, 0xd4, 0x18,
+	0x0d, 0x76, 0x33, 0x7b, 0x39, 0x05, 0x09, 0x41, 0x73, 0x46, 0xbe, 0x0e, 0x1b, 0x4f, 0x29, 0x79,
+	0x1c, 0xa7, 0x96, 0x38, 0xb5, 0xc2, 0xe0, 0x18, 0xb1, 0x01, 0xe5, 0x09, 0xf5, 0x3c, 0x32, 0xa2,
+	0xaa, 0x7f, 0xee, 0x50, 0x29, 0xcb, 0x47, 0xbf, 0xbb, 0x30, 0xfa, 0xf9, 0x91, 0x97, 0x02, 0xad,
+	0xc1, 0xb9, 0x43, 0x71, 0x1d, 0x8a, 0xd4, 0x9a, 0x4e, 0x84, 0x85, 0xdc, 0x8a, 0xf8, 0xc9, 0xd6,
+	0x74, 0x32, 0x6f, 0xa5, 0xc0, 0xd4, 0x02, 0x13, 0x6b, 0x1e, 0x75, 0x9f, 0x18, 0x1a, 0x95, 0xf2,
+	0xdc, 0xc0, 0xf5, 0x05, 0x03, 0x7d, 0x21, 0x9f, 0xb7, 0x11, 0xea, 0xe1, 0x06, 0x14, 0xe9, 0x33,
+	0x9f, 0x5a, 0x9e, 0x61, 0x5b, 0xd2, 0x1a, 0x37, 0xf2, 0xde, 0x92, 0x59, 0xa4, 0xa6, 0x3e, 0x6f,
+	0x62, 0xa6, 0x87, 0x6f, 0xc3, 0x9a, 0xed, 0xf8, 0x86, 0x6d, 0x79, 0x52, 0x61, 0x37, 0xb5, 0x57,
+	0x3a, 0x78, 0x73, 0x69, 0x22, 0x74, 0x05, 0x47, 0x09, 0xc9, 0xb8, 0x05, 0xc8, 0xb3, 0xa7, 0xae,
+	0x46, 0x55, 0xcd, 0xd6, 0xa9, 0x6a, 0x58, 0x43, 0x5b, 0x2a, 0x72, 0x03, 0x57, 0x17, 0x07, 0xc2,
+	0x89, 0x0d, 0x5b, 0xa7, 0x2d, 0x6b, 0x68, 0x2b, 0x15, 0x2f, 0xd1, 0xc6, 0x97, 0x20, 0xef, 0x9d,
+	0x5b, 0x3e, 0x79, 0x26, 0x95, 0x79, 0x86, 0x04, 0xad, 0xea, 0x7f, 0x73, 0xb0, 0x71, 0x91, 0x14,
+	0xfb, 0x1c, 0x72, 0x43, 0x36, 0x4a, 0x29, 0xfd, 0xff, 0xc4, 0x40, 0xe8, 0x24, 0x83, 0x98, 0xff,
+	0x81, 0x41, 0xac, 0x43, 0xc9, 0xa2, 0x9e, 0x4f, 0x75, 0x91, 0x11, 0x99, 0x0b, 0xe6, 0x14, 0x08,
+	0xa5, 0xc5, 0x94, 0xca, 0xfe, 0xa0, 0x94, 0x7a, 0x00, 0x1b, 0x91, 0x4b, 0xaa, 0x4b, 0xac, 0x51,
+	0x98, 0x9b, 0xfb, 0x2f, 0xf3, 0xa4, 0x26, 0x87, 0x7a, 0x0a, 0x53, 0x53, 0x2a, 0x34, 0xd1, 0xc6,
+	0x4d, 0x00, 0xdb, 0xa2, 0xf6, 0x50, 0xd5, 0xa9, 0x66, 0x4a, 0x85, 0x15, 0x51, 0xea, 0x32, 0xca,
+	0x42, 0x94, 0x6c, 0x81, 0x6a, 0x26, 0xfe, 0x6c, 0x96, 0x6a, 0x6b, 0x2b, 0x32, 0xe5, 0x44, 0x2c,
+	0xb2, 0x85, 0x6c, 0x3b, 0x85, 0x8a, 0x4b, 0x59, 0xde, 0x53, 0x3d, 0x18, 0x59, 0x91, 0x3b, 0x51,
+	0x7b, 0xe9, 0xc8, 0x94, 0x40, 0x4d, 0x0c, 0x6c, 0xdd, 0x8d, 0x37, 0xf1, 0x3b, 0x10, 0x01, 0x2a,
+	0x4f, 0x2b, 0xe0, 0xbb, 0x50, 0x39, 0x04, 0x3b, 0x64, 0x42, 0x77, 0xee, 0x40, 0x25, 0x19, 0x1e,
+	0xbc, 0x0d, 0x39, 0xcf, 0x27, 0xae, 0xcf, 0xb3, 0x30, 0xa7, 0x88, 0x06, 0x46, 0x90, 0xa1, 0x96,
+	0xce, 0x77, 0xb9, 0x9c, 0xc2, 0xfe, 0xee, 0x7c, 0x0a, 0xeb, 0x89, 0xee, 0x2f, 0xaa, 0x58, 0xfd,
+	0x7d, 0x1e, 0xb6, 0x97, 0xe5, 0xdc, 0xd2, 0xf4, 0xbf, 0x04, 0x79, 0x6b, 0x3a, 0x39, 0xa3, 0xae,
+	0x94, 0xe1, 0x16, 0x82, 0x16, 0xae, 0x43, 0xce, 0x24, 0x67, 0xd4, 0x94, 0xb2, 0xbb, 0xa9, 0xbd,
+	0xca, 0xc1, 0x87, 0x17, 0xca, 0xea, 0x5a, 0x9b, 0xa9, 0x28, 0x42, 0x13, 0xdf, 0x85, 0x6c, 0xb0,
+	0xc5, 0x31, 0x0b, 0x1f, 0x5c, 0xcc, 0x02, 0xcb, 0x45, 0x85, 0xeb, 0xe1, 0x37, 0xa0, 0xc8, 0x7e,
+	0x45, 0x6c, 0xf3, 0xdc, 0xe7, 0x02, 0x03, 0x58, 0x5c, 0xf1, 0x0e, 0x14, 0x78, 0x9a, 0xe9, 0x34,
+	0x3c, 0x1a, 0xa2, 0x36, 0x9b, 0x18, 0x9d, 0x0e, 0xc9, 0xd4, 0xf4, 0xd5, 0x27, 0xc4, 0x9c, 0x52,
+	0x9e, 0x30, 0x45, 0xa5, 0x1c, 0x80, 0xbf, 0x66, 0x18, 0xbe, 0x0a, 0x25, 0x91, 0x95, 0x86, 0xa5,
+	0xd3, 0x67, 0x7c, 0xf7, 0xc9, 0x29, 0x22, 0x51, 0x5b, 0x0c, 0x61, 0xdd, 0x3f, 0xf2, 0x6c, 0x2b,
+	0x9c, 0x5a, 0xde, 0x05, 0x03, 0x78, 0xf7, 0x9f, 0xce, 0x6f, 0x7c, 0x6f, 0x2d, 0x1f, 0xde, 0x7c,
+	0x2e, 0x56, 0xff, 0x92, 0x86, 0x2c, 0x5f, 0x6f, 0x1b, 0x50, 0x1a, 0x3c, 0xec, 0xc9, 0x6a, 0xb3,
+	0x7b, 0x7a, 0xd4, 0x96, 0x51, 0x0a, 0x57, 0x00, 0x38, 0x70, 0xaf, 0xdd, 0xad, 0x0f, 0x50, 0x3a,
+	0x6a, 0xb7, 0x3a, 0x83, 0xdb, 0x37, 0x51, 0x26, 0x52, 0x38, 0x15, 0x40, 0x36, 0x4e, 0xf8, 0xe4,
+	0x00, 0xe5, 0x30, 0x82, 0xb2, 0x30, 0xd0, 0x7a, 0x20, 0x37, 0x6f, 0xdf, 0x44, 0xf9, 0x24, 0xf2,
+	0xc9, 0x01, 0x5a, 0xc3, 0xeb, 0x50, 0xe4, 0xc8, 0x51, 0xb7, 0xdb, 0x46, 0x85, 0xc8, 0x66, 0x7f,
+	0xa0, 0xb4, 0x3a, 0xc7, 0xa8, 0x18, 0xd9, 0x3c, 0x56, 0xba, 0xa7, 0x3d, 0x04, 0x91, 0x85, 0x13,
+	0xb9, 0xdf, 0xaf, 0x1f, 0xcb, 0xa8, 0x14, 0x31, 0x8e, 0x1e, 0x0e, 0xe4, 0x3e, 0x2a, 0x27, 0xdc,
+	0xfa, 0xe4, 0x00, 0xad, 0x47, 0x5d, 0xc8, 0x9d, 0xd3, 0x13, 0x54, 0xc1, 0x9b, 0xb0, 0x2e, 0xba,
+	0x08, 0x9d, 0xd8, 0x98, 0x83, 0x6e, 0xdf, 0x44, 0x68, 0xe6, 0x88, 0xb0, 0xb2, 0x99, 0x00, 0x6e,
+	0xdf, 0x44, 0xb8, 0xda, 0x80, 0x1c, 0xcf, 0x2e, 0x8c, 0xa1, 0xd2, 0xae, 0x1f, 0xc9, 0x6d, 0xb5,
+	0xdb, 0x1b, 0xb4, 0xba, 0x9d, 0x7a, 0x1b, 0xa5, 0x66, 0x98, 0x22, 0xff, 0xea, 0xb4, 0xa5, 0xc8,
+	0x4d, 0x94, 0x8e, 0x63, 0x3d, 0xb9, 0x3e, 0x90, 0x9b, 0x28, 0x53, 0xd5, 0x60, 0x7b, 0xd9, 0x3e,
+	0xb3, 0x74, 0x65, 0xc4, 0xa6, 0x38, 0xbd, 0x62, 0x8a, 0xb9, 0xad, 0x85, 0x29, 0xfe, 0x36, 0x05,
+	0x5b, 0x4b, 0xf6, 0xda, 0xa5, 0x9d, 0xfc, 0x02, 0x72, 0x22, 0x45, 0xc5, 0xe9, 0x73, 0x63, 0xe9,
+	0xa6, 0xcd, 0x13, 0x76, 0xe1, 0x04, 0xe2, 0x7a, 0xf1, 0x13, 0x38, 0xb3, 0xe2, 0x04, 0x66, 0x26,
+	0x16, 0x9c, 0xfc, 0x4d, 0x0a, 0xa4, 0x55, 0xb6, 0x5f, 0xb2, 0x51, 0xa4, 0x13, 0x1b, 0xc5, 0xe7,
+	0xf3, 0x0e, 0x5c, 0x5b, 0x3d, 0x86, 0x05, 0x2f, 0xbe, 0x4b, 0xc1, 0xa5, 0xe5, 0x85, 0xca, 0x52,
+	0x1f, 0xee, 0x42, 0x7e, 0x42, 0xfd, 0xb1, 0x1d, 0x1e, 0xd6, 0xef, 0x2f, 0x39, 0x02, 0x98, 0x78,
+	0x3e, 0x56, 0x81, 0x56, 0xfc, 0x0c, 0xc9, 0xac, 0xaa, 0x36, 0x84, 0x37, 0x0b, 0x9e, 0xfe, 0x36,
+	0x0d, 0xaf, 0x2f, 0x35, 0xbe, 0xd4, 0xd1, 0xb7, 0x00, 0x0c, 0xcb, 0x99, 0xfa, 0xe2, 0x40, 0x16,
+	0xfb, 0x53, 0x91, 0x23, 0x7c, 0xed, 0xb3, 0xbd, 0x67, 0xea, 0x47, 0xf2, 0x0c, 0x97, 0x83, 0x80,
+	0x38, 0xe1, 0xce, 0xcc, 0xd1, 0x2c, 0x77, 0xf4, 0xed, 0x15, 0x23, 0x5d, 0x38, 0xeb, 0x3e, 0x06,
+	0xa4, 0x99, 0x06, 0xb5, 0x7c, 0xd5, 0xf3, 0x5d, 0x4a, 0x26, 0x86, 0x35, 0xe2, 0x1b, 0x70, 0xe1,
+	0x30, 0x37, 0x24, 0xa6, 0x47, 0x95, 0x0d, 0x21, 0xee, 0x87, 0x52, 0xa6, 0xc1, 0x4f, 0x19, 0x37,
+	0xa6, 0x91, 0x4f, 0x68, 0x08, 0x71, 0xa4, 0x51, 0xfd, 0x6b, 0x01, 0x4a, 0xb1, 0xb2, 0x0e, 0x5f,
+	0x83, 0xf2, 0x23, 0xf2, 0x84, 0xa8, 0x61, 0xa9, 0x2e, 0x22, 0x51, 0x62, 0x58, 0x2f, 0x28, 0xd7,
+	0x3f, 0x86, 0x6d, 0x4e, 0xb1, 0xa7, 0x3e, 0x75, 0x55, 0xcd, 0x24, 0x9e, 0xc7, 0x83, 0x56, 0xe0,
+	0x54, 0xcc, 0x64, 0x5d, 0x26, 0x6a, 0x84, 0x12, 0x7c, 0x0b, 0xb6, 0xb8, 0xc6, 0x64, 0x6a, 0xfa,
+	0x86, 0x63, 0x52, 0x95, 0x5d, 0x1e, 0x3c, 0xbe, 0x11, 0x47, 0x9e, 0x6d, 0x32, 0xc6, 0x49, 0x40,
+	0x60, 0x1e, 0x79, 0xb8, 0x09, 0x6f, 0x71, 0xb5, 0x11, 0xb5, 0xa8, 0x4b, 0x7c, 0xaa, 0xd2, 0xaf,
+	0xa7, 0xc4, 0xf4, 0x54, 0x62, 0xe9, 0xea, 0x98, 0x78, 0x63, 0x69, 0x9b, 0x19, 0x38, 0x4a, 0x4b,
+	0x29, 0xe5, 0x0a, 0x23, 0x1e, 0x07, 0x3c, 0x99, 0xd3, 0xea, 0x96, 0xfe, 0x05, 0xf1, 0xc6, 0xf8,
+	0x10, 0x2e, 0x71, 0x2b, 0x9e, 0xef, 0x1a, 0xd6, 0x48, 0xd5, 0xc6, 0x54, 0x7b, 0xac, 0x4e, 0xfd,
+	0xe1, 0x1d, 0xe9, 0x8d, 0x78, 0xff, 0xdc, 0xc3, 0x3e, 0xe7, 0x34, 0x18, 0xe5, 0xd4, 0x1f, 0xde,
+	0xc1, 0x7d, 0x28, 0xb3, 0xc9, 0x98, 0x18, 0xdf, 0x50, 0x75, 0x68, 0xbb, 0xfc, 0x64, 0xa9, 0x2c,
+	0x59, 0xd9, 0xb1, 0x08, 0xd6, 0xba, 0x81, 0xc2, 0x89, 0xad, 0xd3, 0xc3, 0x5c, 0xbf, 0x27, 0xcb,
+	0x4d, 0xa5, 0x14, 0x5a, 0xb9, 0x67, 0xbb, 0x2c, 0xa1, 0x46, 0x76, 0x14, 0xe0, 0x92, 0x48, 0xa8,
+	0x91, 0x1d, 0x86, 0xf7, 0x16, 0x6c, 0x69, 0x9a, 0x18, 0xb3, 0xa1, 0xa9, 0x41, 0x89, 0xef, 0x49,
+	0x28, 0x11, 0x2c, 0x4d, 0x3b, 0x16, 0x84, 0x20, 0xc7, 0x3d, 0xfc, 0x19, 0xbc, 0x3e, 0x0b, 0x56,
+	0x5c, 0x71, 0x73, 0x61, 0x94, 0xf3, 0xaa, 0xb7, 0x60, 0xcb, 0x39, 0x5f, 0x54, 0xc4, 0x89, 0x1e,
+	0x9d, 0xf3, 0x79, 0xb5, 0x4f, 0x61, 0xdb, 0x19, 0x3b, 0x8b, 0x7a, 0x5b, 0x71, 0x3d, 0xec, 0x8c,
+	0x9d, 0x79, 0xc5, 0xf7, 0xf8, 0x7d, 0xcf, 0xa5, 0x1a, 0xf1, 0xa9, 0x2e, 0x5d, 0x8e, 0xd3, 0x63,
+	0x02, 0xbc, 0x0f, 0x48, 0xd3, 0x54, 0x6a, 0x91, 0x33, 0x93, 0xaa, 0xc4, 0xa5, 0x16, 0xf1, 0xa4,
+	0xab, 0x71, 0x72, 0x45, 0xd3, 0x64, 0x2e, 0xad, 0x73, 0x21, 0xfe, 0x00, 0x36, 0xed, 0xb3, 0x47,
+	0x9a, 0x48, 0x49, 0xd5, 0x71, 0xe9, 0xd0, 0x78, 0x26, 0xbd, 0xcb, 0xe3, 0xbb, 0xc1, 0x04, 0x3c,
+	0x21, 0x7b, 0x1c, 0xc6, 0x37, 0x00, 0x69, 0xde, 0x98, 0xb8, 0x0e, 0xaf, 0x09, 0x3c, 0x87, 0x68,
+	0x54, 0x7a, 0x4f, 0x50, 0x05, 0xde, 0x09, 0x61, 0xb6, 0x24, 0xbc, 0xa7, 0xc6, 0xd0, 0x0f, 0x2d,
+	0x5e, 0x17, 0x4b, 0x82, 0x63, 0x81, 0xb5, 0x3d, 0x40, 0x2c, 0x14, 0x89, 0x8e, 0xf7, 0x38, 0xad,
+	0xe2, 0x8c, 0x9d, 0x78, 0xbf, 0xef, 0xc0, 0x3a, 0x63, 0xce, 0x3a, 0xbd, 0x21, 0xea, 0x19, 0x67,
+	0x1c, 0xeb, 0xf1, 0x01, 0x6c, 0x4f, 0x2d, 0xc3, 0xf2, 0xa9, 0xeb, 0xb8, 0x94, 0x5d, 0x26, 0xc4,
+	0x8e, 0x20, 0xfd, 0x7b, 0x6d, 0xc5, 0x75, 0xe0, 0x34, 0xce, 0x16, 0x89, 0xa8, 0x6c, 0x4d, 0x17,
+	0xc1, 0xea, 0x21, 0x94, 0xe3, 0xf9, 0x89, 0x8b, 0x20, 0x32, 0x14, 0xa5, 0xd8, 0x59, 0xdf, 0xe8,
+	0x36, 0xd9, 0x29, 0xfd, 0x95, 0x8c, 0xd2, 0xac, 0x5a, 0x68, 0xb7, 0x06, 0xb2, 0xaa, 0x9c, 0x76,
+	0x06, 0xad, 0x13, 0x19, 0x65, 0x3e, 0x28, 0x16, 0xfe, 0xb3, 0x86, 0x9e, 0x3f, 0x7f, 0xfe, 0x3c,
+	0x7d, 0x3f, 0x5b, 0x78, 0x1f, 0x5d, 0xaf, 0x7e, 0x9f, 0x86, 0x4a, 0xb2, 0x4e, 0xc7, 0x3f, 0x87,
+	0xcb, 0xe1, 0xa5, 0xda, 0xa3, 0xbe, 0xfa, 0xd4, 0x70, 0xf9, 0xc2, 0x99, 0x10, 0x51, 0xe9, 0x46,
+	0x53, 0xb7, 0x1d, 0xb0, 0xfa, 0xd4, 0xff, 0xd2, 0x70, 0xd9, 0xb2, 0x98, 0x10, 0x1f, 0xb7, 0xe1,
+	0xaa, 0x65, 0xab, 0x9e, 0x4f, 0x2c, 0x9d, 0xb8, 0xba, 0x3a, 0x7b, 0xce, 0x50, 0x89, 0xa6, 0x51,
+	0xcf, 0xb3, 0xc5, 0x81, 0x15, 0x59, 0x79, 0xd3, 0xb2, 0xfb, 0x01, 0x79, 0xb6, 0x93, 0xd7, 0x03,
+	0xea, 0x5c, 0x9a, 0x65, 0x56, 0xa5, 0xd9, 0x1b, 0x50, 0x9c, 0x10, 0x47, 0xa5, 0x96, 0xef, 0x9e,
+	0xf3, 0xea, 0xb2, 0xa0, 0x14, 0x26, 0xc4, 0x91, 0x59, 0xfb, 0xd5, 0xcd, 0x44, 0x32, 0x9a, 0x05,
+	0x54, 0xbc, 0x9f, 0x2d, 0x14, 0x11, 0x54, 0xff, 0x99, 0x81, 0x72, 0xbc, 0xda, 0x64, 0xc5, 0xbb,
+	0xc6, 0x4f, 0x96, 0x14, 0xdf, 0x7b, 0xde, 0x79, 0x61, 0x6d, 0x5a, 0x6b, 0xb0, 0x23, 0xe7, 0x30,
+	0x2f, 0x6a, 0x40, 0x45, 0x68, 0xb2, 0xe3, 0x9e, 0xed, 0x36, 0x54, 0xdc, 0x2c, 0x0a, 0x4a, 0xd0,
+	0xc2, 0xc7, 0x90, 0x7f, 0xe4, 0x71, 0xdb, 0x79, 0x6e, 0xfb, 0xdd, 0x17, 0xdb, 0xbe, 0xdf, 0xe7,
+	0xc6, 0x8b, 0xf7, 0xfb, 0x6a, 0xa7, 0xab, 0x9c, 0xd4, 0xdb, 0x4a, 0xa0, 0x8e, 0xaf, 0x40, 0xd6,
+	0x24, 0xdf, 0x9c, 0x27, 0x0f, 0x27, 0x0e, 0x5d, 0x74, 0x12, 0xae, 0x40, 0xf6, 0x29, 0x25, 0x8f,
+	0x93, 0x47, 0x02, 0x87, 0x5e, 0xe1, 0x62, 0xd8, 0x87, 0x1c, 0x8f, 0x17, 0x06, 0x08, 0x22, 0x86,
+	0x5e, 0xc3, 0x05, 0xc8, 0x36, 0xba, 0x0a, 0x5b, 0x10, 0x08, 0xca, 0x02, 0x55, 0x7b, 0x2d, 0xb9,
+	0x21, 0xa3, 0x74, 0xf5, 0x16, 0xe4, 0x45, 0x10, 0xd8, 0x62, 0x89, 0xc2, 0x80, 0x5e, 0x0b, 0x9a,
+	0x81, 0x8d, 0x54, 0x28, 0x3d, 0x3d, 0x39, 0x92, 0x15, 0x94, 0x4e, 0x4e, 0x75, 0x16, 0xe5, 0xaa,
+	0x1e, 0x94, 0xe3, 0xe5, 0xe6, 0x8f, 0x92, 0x65, 0xd5, 0xbf, 0xa5, 0xa0, 0x14, 0x2b, 0x1f, 0x59,
+	0xe1, 0x42, 0x4c, 0xd3, 0x7e, 0xaa, 0x12, 0xd3, 0x20, 0x5e, 0x90, 0x1a, 0xc0, 0xa1, 0x3a, 0x43,
+	0x2e, 0x3a, 0x75, 0x3f, 0xd2, 0x12, 0xc9, 0xa1, 0x7c, 0xf5, 0x4f, 0x29, 0x40, 0xf3, 0x05, 0xe8,
+	0x9c, 0x9b, 0xa9, 0x9f, 0xd2, 0xcd, 0xea, 0x1f, 0x53, 0x50, 0x49, 0x56, 0x9d, 0x73, 0xee, 0x5d,
+	0xfb, 0x49, 0xdd, 0xfb, 0x47, 0x1a, 0xd6, 0x13, 0xb5, 0xe6, 0x45, 0xbd, 0xfb, 0x1a, 0x36, 0x0d,
+	0x9d, 0x4e, 0x1c, 0xdb, 0xa7, 0x96, 0x76, 0xae, 0x9a, 0xf4, 0x09, 0x35, 0xa5, 0x2a, 0xdf, 0x34,
+	0xf6, 0x5f, 0x5c, 0xcd, 0xd6, 0x5a, 0x33, 0xbd, 0x36, 0x53, 0x3b, 0xdc, 0x6a, 0x35, 0xe5, 0x93,
+	0x5e, 0x77, 0x20, 0x77, 0x1a, 0x0f, 0xd5, 0xd3, 0xce, 0x2f, 0x3b, 0xdd, 0x2f, 0x3b, 0x0a, 0x32,
+	0xe6, 0x68, 0xaf, 0x70, 0xd9, 0xf7, 0x00, 0xcd, 0x3b, 0x85, 0x2f, 0xc3, 0x32, 0xb7, 0xd0, 0x6b,
+	0x78, 0x0b, 0x36, 0x3a, 0x5d, 0xb5, 0xdf, 0x6a, 0xca, 0xaa, 0x7c, 0xef, 0x9e, 0xdc, 0x18, 0xf4,
+	0xc5, 0xf5, 0x3e, 0x62, 0x0f, 0x12, 0x0b, 0xbc, 0xfa, 0x87, 0x0c, 0x6c, 0x2d, 0xf1, 0x04, 0xd7,
+	0x83, 0x9b, 0x85, 0xb8, 0xec, 0x7c, 0x74, 0x11, 0xef, 0x6b, 0xac, 0x20, 0xe8, 0x11, 0xd7, 0x0f,
+	0x2e, 0x22, 0x37, 0x80, 0x45, 0xc9, 0xf2, 0x8d, 0xa1, 0x41, 0xdd, 0xe0, 0x35, 0x44, 0x5c, 0x37,
+	0x36, 0x66, 0xb8, 0x78, 0x10, 0xf9, 0x19, 0x60, 0xc7, 0xf6, 0x0c, 0xdf, 0x78, 0x42, 0x55, 0xc3,
+	0x0a, 0x9f, 0x4e, 0xd8, 0xf5, 0x23, 0xab, 0xa0, 0x50, 0xd2, 0xb2, 0xfc, 0x88, 0x6d, 0xd1, 0x11,
+	0x99, 0x63, 0xb3, 0xcd, 0x3c, 0xa3, 0xa0, 0x50, 0x12, 0xb1, 0xaf, 0x41, 0x59, 0xb7, 0xa7, 0xac,
+	0x26, 0x13, 0x3c, 0x76, 0x76, 0xa4, 0x94, 0x92, 0xc0, 0x22, 0x4a, 0x50, 0x6d, 0xcf, 0xde, 0x6c,
+	0xca, 0x4a, 0x49, 0x60, 0x82, 0x72, 0x1d, 0x36, 0xc8, 0x68, 0xe4, 0x32, 0xe3, 0xa1, 0x21, 0x71,
+	0x7f, 0xa8, 0x44, 0x30, 0x27, 0xee, 0xdc, 0x87, 0x42, 0x18, 0x07, 0x76, 0x54, 0xb3, 0x48, 0xa8,
+	0x8e, 0x78, 0x39, 0x4b, 0xef, 0x15, 0x95, 0x82, 0x15, 0x0a, 0xaf, 0x41, 0xd9, 0xf0, 0xd4, 0xd9,
+	0x13, 0x6e, 0x7a, 0x37, 0xbd, 0x57, 0x50, 0x4a, 0x86, 0x17, 0xbd, 0xd9, 0x55, 0xbf, 0x4b, 0x43,
+	0x25, 0xf9, 0x04, 0x8d, 0x9b, 0x50, 0x30, 0x6d, 0x8d, 0xf0, 0xd4, 0x12, 0xdf, 0x3f, 0xf6, 0x5e,
+	0xf2, 0x6a, 0x5d, 0x6b, 0x07, 0x7c, 0x25, 0xd2, 0xdc, 0xf9, 0x7b, 0x0a, 0x0a, 0x21, 0x8c, 0x2f,
+	0x41, 0xd6, 0x21, 0xfe, 0x98, 0x9b, 0xcb, 0x1d, 0xa5, 0x51, 0x4a, 0xe1, 0x6d, 0x86, 0x7b, 0x0e,
+	0xb1, 0x78, 0x0a, 0x04, 0x38, 0x6b, 0xb3, 0x79, 0x35, 0x29, 0xd1, 0xf9, 0xe5, 0xc4, 0x9e, 0x4c,
+	0xa8, 0xe5, 0x7b, 0xe1, 0xbc, 0x06, 0x78, 0x23, 0x80, 0xf1, 0x87, 0xb0, 0xe9, 0xbb, 0xc4, 0x30,
+	0x13, 0xdc, 0x2c, 0xe7, 0xa2, 0x50, 0x10, 0x91, 0x0f, 0xe1, 0x4a, 0x68, 0x57, 0xa7, 0x3e, 0xd1,
+	0xc6, 0x54, 0x9f, 0x29, 0xe5, 0xf9, 0xfb, 0xe6, 0xe5, 0x80, 0xd0, 0x0c, 0xe4, 0xa1, 0x6e, 0xf5,
+	0xfb, 0x14, 0x6c, 0x86, 0xd7, 0x29, 0x3d, 0x0a, 0xd6, 0x09, 0x00, 0xb1, 0x2c, 0xdb, 0x8f, 0x87,
+	0x6b, 0x31, 0x95, 0x17, 0xf4, 0x6a, 0xf5, 0x48, 0x49, 0x89, 0x19, 0xd8, 0x99, 0x00, 0xcc, 0x24,
+	0x2b, 0xc3, 0x76, 0x15, 0x4a, 0xc1, 0xf7, 0x05, 0xfe, 0x91, 0x4a, 0x5c, 0xc0, 0x41, 0x40, 0xec,
+	0xde, 0x85, 0xb7, 0x21, 0x77, 0x46, 0x47, 0x86, 0x15, 0xbc, 0x7a, 0x8a, 0x46, 0xf8, 0x96, 0x9a,
+	0x8d, 0xde, 0x52, 0x8f, 0x7e, 0x97, 0x82, 0x2d, 0xcd, 0x9e, 0xcc, 0xfb, 0x7b, 0x84, 0xe6, 0x5e,
+	0x01, 0xbc, 0x2f, 0x52, 0x5f, 0xdd, 0x1d, 0x19, 0xfe, 0x78, 0x7a, 0x56, 0xd3, 0xec, 0xc9, 0xfe,
+	0xc8, 0x36, 0x89, 0x35, 0x9a, 0x7d, 0x65, 0xe3, 0x7f, 0xb4, 0x8f, 0x46, 0xd4, 0xfa, 0x68, 0x64,
+	0xc7, 0xbe, 0xb9, 0x7d, 0x3e, 0xfb, 0xfb, 0x6d, 0x3a, 0x73, 0xdc, 0x3b, 0xfa, 0x73, 0x7a, 0xe7,
+	0x58, 0xf4, 0xd5, 0x0b, 0x63, 0xa3, 0xd0, 0xa1, 0x49, 0x35, 0x36, 0xde, 0xff, 0x05, 0x00, 0x00,
+	0xff, 0xff, 0xa2, 0xc3, 0x4e, 0x18, 0xbe, 0x1b, 0x00, 0x00,
 }
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
new file mode 100644
index 0000000..70b82a4
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
@@ -0,0 +1,837 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+//  Based on original Protocol Buffers design by
+//  Sanjay Ghemawat, Jeff Dean, and others.
+//
+// The messages in this file describe the definitions found in .proto files.
+// A valid .proto file can be translated directly to a FileDescriptorProto
+// without any other information (e.g. without reading its imports).
+
+
+syntax = "proto2";
+
+package google.protobuf;
+option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "DescriptorProtos";
+option csharp_namespace = "Google.Protobuf.Reflection";
+option objc_class_prefix = "GPB";
+
+// descriptor.proto must be optimized for speed because reflection-based
+// algorithms don't work during bootstrapping.
+option optimize_for = SPEED;
+
+// The protocol compiler can output a FileDescriptorSet containing the .proto
+// files it parses.
+message FileDescriptorSet {
+  repeated FileDescriptorProto file = 1;
+}
+
+// Describes a complete .proto file.
+message FileDescriptorProto {
+  optional string name = 1;       // file name, relative to root of source tree
+  optional string package = 2;    // e.g. "foo", "foo.bar", etc.
+
+  // Names of files imported by this file.
+  repeated string dependency = 3;
+  // Indexes of the public imported files in the dependency list above.
+  repeated int32 public_dependency = 10;
+  // Indexes of the weak imported files in the dependency list.
+  // For Google-internal migration only. Do not use.
+  repeated int32 weak_dependency = 11;
+
+  // All top-level definitions in this file.
+  repeated DescriptorProto message_type = 4;
+  repeated EnumDescriptorProto enum_type = 5;
+  repeated ServiceDescriptorProto service = 6;
+  repeated FieldDescriptorProto extension = 7;
+
+  optional FileOptions options = 8;
+
+  // This field contains optional information about the original source code.
+  // You may safely remove this entire field without harming runtime
+  // functionality of the descriptors -- the information is needed only by
+  // development tools.
+  optional SourceCodeInfo source_code_info = 9;
+
+  // The syntax of the proto file.
+  // The supported values are "proto2" and "proto3".
+  optional string syntax = 12;
+}
+
+// Describes a message type.
+message DescriptorProto {
+  optional string name = 1;
+
+  repeated FieldDescriptorProto field = 2;
+  repeated FieldDescriptorProto extension = 6;
+
+  repeated DescriptorProto nested_type = 3;
+  repeated EnumDescriptorProto enum_type = 4;
+
+  message ExtensionRange {
+    optional int32 start = 1;
+    optional int32 end = 2;
+  }
+  repeated ExtensionRange extension_range = 5;
+
+  repeated OneofDescriptorProto oneof_decl = 8;
+
+  optional MessageOptions options = 7;
+
+  // Range of reserved tag numbers. Reserved tag numbers may not be used by
+  // fields or extension ranges in the same message. Reserved ranges may
+  // not overlap.
+  message ReservedRange {
+    optional int32 start = 1; // Inclusive.
+    optional int32 end = 2;   // Exclusive.
+  }
+  repeated ReservedRange reserved_range = 9;
+  // Reserved field names, which may not be used by fields in the same message.
+  // A given name may only be reserved once.
+  repeated string reserved_name = 10;
+}
+
+// Describes a field within a message.
+message FieldDescriptorProto {
+  enum Type {
+    // 0 is reserved for errors.
+    // Order is weird for historical reasons.
+    TYPE_DOUBLE         = 1;
+    TYPE_FLOAT          = 2;
+    // Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT64 if
+    // negative values are likely.
+    TYPE_INT64          = 3;
+    TYPE_UINT64         = 4;
+    // Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT32 if
+    // negative values are likely.
+    TYPE_INT32          = 5;
+    TYPE_FIXED64        = 6;
+    TYPE_FIXED32        = 7;
+    TYPE_BOOL           = 8;
+    TYPE_STRING         = 9;
+    // Tag-delimited aggregate.
+    // Group type is deprecated and not supported in proto3. However, Proto3
+    // implementations should still be able to parse the group wire format and
+    // treat group fields as unknown fields.
+    TYPE_GROUP          = 10;
+    TYPE_MESSAGE        = 11;  // Length-delimited aggregate.
+
+    // New in version 2.
+    TYPE_BYTES          = 12;
+    TYPE_UINT32         = 13;
+    TYPE_ENUM           = 14;
+    TYPE_SFIXED32       = 15;
+    TYPE_SFIXED64       = 16;
+    TYPE_SINT32         = 17;  // Uses ZigZag encoding.
+    TYPE_SINT64         = 18;  // Uses ZigZag encoding.
+  };
+
+  enum Label {
+    // 0 is reserved for errors
+    LABEL_OPTIONAL      = 1;
+    LABEL_REQUIRED      = 2;
+    LABEL_REPEATED      = 3;
+  };
+
+  optional string name = 1;
+  optional int32 number = 3;
+  optional Label label = 4;
+
+  // If type_name is set, this need not be set.  If both this and type_name
+  // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
+  optional Type type = 5;
+
+  // For message and enum types, this is the name of the type.  If the name
+  // starts with a '.', it is fully-qualified.  Otherwise, C++-like scoping
+  // rules are used to find the type (i.e. first the nested types within this
+  // message are searched, then within the parent, on up to the root
+  // namespace).
+  optional string type_name = 6;
+
+  // For extensions, this is the name of the type being extended.  It is
+  // resolved in the same manner as type_name.
+  optional string extendee = 2;
+
+  // For numeric types, contains the original text representation of the value.
+  // For booleans, "true" or "false".
+  // For strings, contains the default text contents (not escaped in any way).
+  // For bytes, contains the C escaped value.  All bytes >= 128 are escaped.
+  // TODO(kenton):  Base-64 encode?
+  optional string default_value = 7;
+
+  // If set, gives the index of a oneof in the containing type's oneof_decl
+  // list.  This field is a member of that oneof.
+  optional int32 oneof_index = 9;
+
+  // JSON name of this field. The value is set by protocol compiler. If the
+  // user has set a "json_name" option on this field, that option's value
+  // will be used. Otherwise, it's deduced from the field's name by converting
+  // it to camelCase.
+  optional string json_name = 10;
+
+  optional FieldOptions options = 8;
+}
+
+// Describes a oneof.
+message OneofDescriptorProto {
+  optional string name = 1;
+  optional OneofOptions options = 2;
+}
+
+// Describes an enum type.
+message EnumDescriptorProto {
+  optional string name = 1;
+
+  repeated EnumValueDescriptorProto value = 2;
+
+  optional EnumOptions options = 3;
+}
+
+// Describes a value within an enum.
+message EnumValueDescriptorProto {
+  optional string name = 1;
+  optional int32 number = 2;
+
+  optional EnumValueOptions options = 3;
+}
+
+// Describes a service.
+message ServiceDescriptorProto {
+  optional string name = 1;
+  repeated MethodDescriptorProto method = 2;
+
+  optional ServiceOptions options = 3;
+}
+
+// Describes a method of a service.
+message MethodDescriptorProto {
+  optional string name = 1;
+
+  // Input and output type names.  These are resolved in the same way as
+  // FieldDescriptorProto.type_name, but must refer to a message type.
+  optional string input_type = 2;
+  optional string output_type = 3;
+
+  optional MethodOptions options = 4;
+
+  // Identifies if client streams multiple client messages
+  optional bool client_streaming = 5 [default=false];
+  // Identifies if server streams multiple server messages
+  optional bool server_streaming = 6 [default=false];
+}
+
+
+// ===================================================================
+// Options
+
+// Each of the definitions above may have "options" attached.  These are
+// just annotations which may cause code to be generated slightly differently
+// or may contain hints for code that manipulates protocol messages.
+//
+// Clients may define custom options as extensions of the *Options messages.
+// These extensions may not yet be known at parsing time, so the parser cannot
+// store the values in them.  Instead it stores them in a field in the *Options
+// message called uninterpreted_option. This field must have the same name
+// across all *Options messages. We then use this field to populate the
+// extensions when we build a descriptor, at which point all protos have been
+// parsed and so all extensions are known.
+//
+// Extension numbers for custom options may be chosen as follows:
+// * For options which will only be used within a single application or
+//   organization, or for experimental options, use field numbers 50000
+//   through 99999.  It is up to you to ensure that you do not use the
+//   same number for multiple options.
+// * For options which will be published and used publicly by multiple
+//   independent entities, e-mail protobuf-global-extension-registry@google.com
+//   to reserve extension numbers. Simply provide your project name (e.g.
+//   Objective-C plugin) and your project website (if available) -- there's no
+//   need to explain how you intend to use them. Usually you only need one
+//   extension number. You can declare multiple options with only one extension
+//   number by putting them in a sub-message. See the Custom Options section of
+//   the docs for examples:
+//   https://developers.google.com/protocol-buffers/docs/proto#options
+//   If this turns out to be popular, a web service will be set up
+//   to automatically assign option numbers.
+
+
+message FileOptions {
+
+  // Sets the Java package where classes generated from this .proto will be
+  // placed.  By default, the proto package is used, but this is often
+  // inappropriate because proto packages do not normally start with backwards
+  // domain names.
+  optional string java_package = 1;
+
+
+  // If set, all the classes from the .proto file are wrapped in a single
+  // outer class with the given name.  This applies to both Proto1
+  // (equivalent to the old "--one_java_file" option) and Proto2 (where
+  // a .proto always translates to a single class, but you may want to
+  // explicitly choose the class name).
+  optional string java_outer_classname = 8;
+
+  // If set true, then the Java code generator will generate a separate .java
+  // file for each top-level message, enum, and service defined in the .proto
+  // file.  Thus, these types will *not* be nested inside the outer class
+  // named by java_outer_classname.  However, the outer class will still be
+  // generated to contain the file's getDescriptor() method as well as any
+  // top-level extensions defined in the file.
+  optional bool java_multiple_files = 10 [default=false];
+
+  // This option does nothing.
+  optional bool java_generate_equals_and_hash = 20 [deprecated=true];
+
+  // If set true, then the Java2 code generator will generate code that
+  // throws an exception whenever an attempt is made to assign a non-UTF-8
+  // byte sequence to a string field.
+  // Message reflection will do the same.
+  // However, an extension field still accepts non-UTF-8 byte sequences.
+  // This option has no effect on when used with the lite runtime.
+  optional bool java_string_check_utf8 = 27 [default=false];
+
+
+  // Generated classes can be optimized for speed or code size.
+  enum OptimizeMode {
+    SPEED = 1;        // Generate complete code for parsing, serialization,
+                      // etc.
+    CODE_SIZE = 2;    // Use ReflectionOps to implement these methods.
+    LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
+  }
+  optional OptimizeMode optimize_for = 9 [default=SPEED];
+
+  // Sets the Go package where structs generated from this .proto will be
+  // placed. If omitted, the Go package will be derived from the following:
+  //   - The basename of the package import path, if provided.
+  //   - Otherwise, the package statement in the .proto file, if present.
+  //   - Otherwise, the basename of the .proto file, without extension.
+  optional string go_package = 11;
+
+
+
+  // Should generic services be generated in each language?  "Generic" services
+  // are not specific to any particular RPC system.  They are generated by the
+  // main code generators in each language (without additional plugins).
+  // Generic services were the only kind of service generation supported by
+  // early versions of google.protobuf.
+  //
+  // Generic services are now considered deprecated in favor of using plugins
+  // that generate code specific to your particular RPC system.  Therefore,
+  // these default to false.  Old code which depends on generic services should
+  // explicitly set them to true.
+  optional bool cc_generic_services = 16 [default=false];
+  optional bool java_generic_services = 17 [default=false];
+  optional bool py_generic_services = 18 [default=false];
+  optional bool php_generic_services = 19 [default=false];
+
+  // Is this file deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for everything in the file, or it will be completely ignored; in the very
+  // least, this is a formalization for deprecating files.
+  optional bool deprecated = 23 [default=false];
+
+  // Enables the use of arenas for the proto messages in this file. This applies
+  // only to generated classes for C++.
+  optional bool cc_enable_arenas = 31 [default=false];
+
+
+  // Sets the objective c class prefix which is prepended to all objective c
+  // generated classes from this .proto. There is no default.
+  optional string objc_class_prefix = 36;
+
+  // Namespace for generated classes; defaults to the package.
+  optional string csharp_namespace = 37;
+
+  // By default Swift generators will take the proto package and CamelCase it
+  // replacing '.' with underscore and use that to prefix the types/symbols
+  // defined. When this options is provided, they will use this value instead
+  // to prefix the types/symbols defined.
+  optional string swift_prefix = 39;
+
+  // Sets the php class prefix which is prepended to all php generated classes
+  // from this .proto. Default is empty.
+  optional string php_class_prefix = 40;
+
+  // Use this option to change the namespace of php generated classes. Default
+  // is empty. When this option is empty, the package name will be used for
+  // determining the namespace.
+  optional string php_namespace = 41;
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+
+  reserved 38;
+}
+
+message MessageOptions {
+  // Set true to use the old proto1 MessageSet wire format for extensions.
+  // This is provided for backwards-compatibility with the MessageSet wire
+  // format.  You should not use this for any other reason:  It's less
+  // efficient, has fewer features, and is more complicated.
+  //
+  // The message must be defined exactly as follows:
+  //   message Foo {
+  //     option message_set_wire_format = true;
+  //     extensions 4 to max;
+  //   }
+  // Note that the message cannot have any defined fields; MessageSets only
+  // have extensions.
+  //
+  // All extensions of your type must be singular messages; e.g. they cannot
+  // be int32s, enums, or repeated messages.
+  //
+  // Because this is an option, the above two restrictions are not enforced by
+  // the protocol compiler.
+  optional bool message_set_wire_format = 1 [default=false];
+
+  // Disables the generation of the standard "descriptor()" accessor, which can
+  // conflict with a field of the same name.  This is meant to make migration
+  // from proto1 easier; new code should avoid fields named "descriptor".
+  optional bool no_standard_descriptor_accessor = 2 [default=false];
+
+  // Is this message deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the message, or it will be completely ignored; in the very least,
+  // this is a formalization for deprecating messages.
+  optional bool deprecated = 3 [default=false];
+
+  // Whether the message is an automatically generated map entry type for the
+  // maps field.
+  //
+  // For maps fields:
+  //     map<KeyType, ValueType> map_field = 1;
+  // The parsed descriptor looks like:
+  //     message MapFieldEntry {
+  //         option map_entry = true;
+  //         optional KeyType key = 1;
+  //         optional ValueType value = 2;
+  //     }
+  //     repeated MapFieldEntry map_field = 1;
+  //
+  // Implementations may choose not to generate the map_entry=true message, but
+  // use a native map in the target language to hold the keys and values.
+  // The reflection APIs in such implementions still need to work as
+  // if the field is a repeated message field.
+  //
+  // NOTE: Do not set the option in .proto files. Always use the maps syntax
+  // instead. The option should only be implicitly set by the proto compiler
+  // parser.
+  optional bool map_entry = 7;
+
+  reserved 8;  // javalite_serializable
+  reserved 9;  // javanano_as_lite
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message FieldOptions {
+  // The ctype option instructs the C++ code generator to use a different
+  // representation of the field than it normally would.  See the specific
+  // options below.  This option is not yet implemented in the open source
+  // release -- sorry, we'll try to include it in a future version!
+  optional CType ctype = 1 [default = STRING];
+  enum CType {
+    // Default mode.
+    STRING = 0;
+
+    CORD = 1;
+
+    STRING_PIECE = 2;
+  }
+  // The packed option can be enabled for repeated primitive fields to enable
+  // a more efficient representation on the wire. Rather than repeatedly
+  // writing the tag and type for each element, the entire array is encoded as
+  // a single length-delimited blob. In proto3, only explicit setting it to
+  // false will avoid using packed encoding.
+  optional bool packed = 2;
+
+  // The jstype option determines the JavaScript type used for values of the
+  // field.  The option is permitted only for 64 bit integral and fixed types
+  // (int64, uint64, sint64, fixed64, sfixed64).  By default these types are
+  // represented as JavaScript strings.  This avoids loss of precision that can
+  // happen when a large value is converted to a floating point JavaScript
+  // numbers.  Specifying JS_NUMBER for the jstype causes the generated
+  // JavaScript code to use the JavaScript "number" type instead of strings.
+  // This option is an enum to permit additional types to be added,
+  // e.g. goog.math.Integer.
+  optional JSType jstype = 6 [default = JS_NORMAL];
+  enum JSType {
+    // Use the default type.
+    JS_NORMAL = 0;
+
+    // Use JavaScript strings.
+    JS_STRING = 1;
+
+    // Use JavaScript numbers.
+    JS_NUMBER = 2;
+  }
+
+  // Should this field be parsed lazily?  Lazy applies only to message-type
+  // fields.  It means that when the outer message is initially parsed, the
+  // inner message's contents will not be parsed but instead stored in encoded
+  // form.  The inner message will actually be parsed when it is first accessed.
+  //
+  // This is only a hint.  Implementations are free to choose whether to use
+  // eager or lazy parsing regardless of the value of this option.  However,
+  // setting this option true suggests that the protocol author believes that
+  // using lazy parsing on this field is worth the additional bookkeeping
+  // overhead typically needed to implement it.
+  //
+  // This option does not affect the public interface of any generated code;
+  // all method signatures remain the same.  Furthermore, thread-safety of the
+  // interface is not affected by this option; const methods remain safe to
+  // call from multiple threads concurrently, while non-const methods continue
+  // to require exclusive access.
+  //
+  //
+  // Note that implementations may choose not to check required fields within
+  // a lazy sub-message.  That is, calling IsInitialized() on the outer message
+  // may return true even if the inner message has missing required fields.
+  // This is necessary because otherwise the inner message would have to be
+  // parsed in order to perform the check, defeating the purpose of lazy
+  // parsing.  An implementation which chooses not to check required fields
+  // must be consistent about it.  That is, for any particular sub-message, the
+  // implementation must either *always* check its required fields, or *never*
+  // check its required fields, regardless of whether or not the message has
+  // been parsed.
+  optional bool lazy = 5 [default=false];
+
+  // Is this field deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for accessors, or it will be completely ignored; in the very least, this
+  // is a formalization for deprecating fields.
+  optional bool deprecated = 3 [default=false];
+
+  // For Google-internal migration only. Do not use.
+  optional bool weak = 10 [default=false];
+
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+
+  reserved 4;  // removed jtype
+}
+
+message OneofOptions {
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message EnumOptions {
+
+  // Set this option to true to allow mapping different tag names to the same
+  // value.
+  optional bool allow_alias = 2;
+
+  // Is this enum deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the enum, or it will be completely ignored; in the very least, this
+  // is a formalization for deprecating enums.
+  optional bool deprecated = 3 [default=false];
+
+  reserved 5;  // javanano_as_lite
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message EnumValueOptions {
+  // Is this enum value deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the enum value, or it will be completely ignored; in the very least,
+  // this is a formalization for deprecating enum values.
+  optional bool deprecated = 1 [default=false];
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message ServiceOptions {
+
+  // Note:  Field numbers 1 through 32 are reserved for Google's internal RPC
+  //   framework.  We apologize for hoarding these numbers to ourselves, but
+  //   we were already using them long before we decided to release Protocol
+  //   Buffers.
+
+  // Is this service deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the service, or it will be completely ignored; in the very least,
+  // this is a formalization for deprecating services.
+  optional bool deprecated = 33 [default=false];
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message MethodOptions {
+
+  // Note:  Field numbers 1 through 32 are reserved for Google's internal RPC
+  //   framework.  We apologize for hoarding these numbers to ourselves, but
+  //   we were already using them long before we decided to release Protocol
+  //   Buffers.
+
+  // Is this method deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the method, or it will be completely ignored; in the very least,
+  // this is a formalization for deprecating methods.
+  optional bool deprecated = 33 [default=false];
+
+  // Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
+  // or neither? HTTP based RPC implementation may choose GET verb for safe
+  // methods, and PUT verb for idempotent methods instead of the default POST.
+  enum IdempotencyLevel {
+    IDEMPOTENCY_UNKNOWN = 0;
+    NO_SIDE_EFFECTS     = 1; // implies idempotent
+    IDEMPOTENT          = 2; // idempotent, but may have side effects
+  }
+  optional IdempotencyLevel idempotency_level =
+      34 [default=IDEMPOTENCY_UNKNOWN];
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+
+// A message representing a option the parser does not recognize. This only
+// appears in options protos created by the compiler::Parser class.
+// DescriptorPool resolves these when building Descriptor objects. Therefore,
+// options protos in descriptor objects (e.g. returned by Descriptor::options(),
+// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
+// in them.
+message UninterpretedOption {
+  // The name of the uninterpreted option.  Each string represents a segment in
+  // a dot-separated name.  is_extension is true iff a segment represents an
+  // extension (denoted with parentheses in options specs in .proto files).
+  // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
+  // "foo.(bar.baz).qux".
+  message NamePart {
+    required string name_part = 1;
+    required bool is_extension = 2;
+  }
+  repeated NamePart name = 2;
+
+  // The value of the uninterpreted option, in whatever type the tokenizer
+  // identified it as during parsing. Exactly one of these should be set.
+  optional string identifier_value = 3;
+  optional uint64 positive_int_value = 4;
+  optional int64 negative_int_value = 5;
+  optional double double_value = 6;
+  optional bytes string_value = 7;
+  optional string aggregate_value = 8;
+}
+
+// ===================================================================
+// Optional source code info
+
+// Encapsulates information about the original source file from which a
+// FileDescriptorProto was generated.
+message SourceCodeInfo {
+  // A Location identifies a piece of source code in a .proto file which
+  // corresponds to a particular definition.  This information is intended
+  // to be useful to IDEs, code indexers, documentation generators, and similar
+  // tools.
+  //
+  // For example, say we have a file like:
+  //   message Foo {
+  //     optional string foo = 1;
+  //   }
+  // Let's look at just the field definition:
+  //   optional string foo = 1;
+  //   ^       ^^     ^^  ^  ^^^
+  //   a       bc     de  f  ghi
+  // We have the following locations:
+  //   span   path               represents
+  //   [a,i)  [ 4, 0, 2, 0 ]     The whole field definition.
+  //   [a,b)  [ 4, 0, 2, 0, 4 ]  The label (optional).
+  //   [c,d)  [ 4, 0, 2, 0, 5 ]  The type (string).
+  //   [e,f)  [ 4, 0, 2, 0, 1 ]  The name (foo).
+  //   [g,h)  [ 4, 0, 2, 0, 3 ]  The number (1).
+  //
+  // Notes:
+  // - A location may refer to a repeated field itself (i.e. not to any
+  //   particular index within it).  This is used whenever a set of elements are
+  //   logically enclosed in a single code segment.  For example, an entire
+  //   extend block (possibly containing multiple extension definitions) will
+  //   have an outer location whose path refers to the "extensions" repeated
+  //   field without an index.
+  // - Multiple locations may have the same path.  This happens when a single
+  //   logical declaration is spread out across multiple places.  The most
+  //   obvious example is the "extend" block again -- there may be multiple
+  //   extend blocks in the same scope, each of which will have the same path.
+  // - A location's span is not always a subset of its parent's span.  For
+  //   example, the "extendee" of an extension declaration appears at the
+  //   beginning of the "extend" block and is shared by all extensions within
+  //   the block.
+  // - Just because a location's span is a subset of some other location's span
+  //   does not mean that it is a descendent.  For example, a "group" defines
+  //   both a type and a field in a single declaration.  Thus, the locations
+  //   corresponding to the type and field and their components will overlap.
+  // - Code which tries to interpret locations should probably be designed to
+  //   ignore those that it doesn't understand, as more types of locations could
+  //   be recorded in the future.
+  repeated Location location = 1;
+  message Location {
+    // Identifies which part of the FileDescriptorProto was defined at this
+    // location.
+    //
+    // Each element is a field number or an index.  They form a path from
+    // the root FileDescriptorProto to the place where the definition.  For
+    // example, this path:
+    //   [ 4, 3, 2, 7, 1 ]
+    // refers to:
+    //   file.message_type(3)  // 4, 3
+    //       .field(7)         // 2, 7
+    //       .name()           // 1
+    // This is because FileDescriptorProto.message_type has field number 4:
+    //   repeated DescriptorProto message_type = 4;
+    // and DescriptorProto.field has field number 2:
+    //   repeated FieldDescriptorProto field = 2;
+    // and FieldDescriptorProto.name has field number 1:
+    //   optional string name = 1;
+    //
+    // Thus, the above path gives the location of a field name.  If we removed
+    // the last element:
+    //   [ 4, 3, 2, 7 ]
+    // this path refers to the whole field declaration (from the beginning
+    // of the label to the terminating semicolon).
+    repeated int32 path = 1 [packed=true];
+
+    // Always has exactly three or four elements: start line, start column,
+    // end line (optional, otherwise assumed same as start line), end column.
+    // These are packed into a single field for efficiency.  Note that line
+    // and column numbers are zero-based -- typically you will want to add
+    // 1 to each before displaying to a user.
+    repeated int32 span = 2 [packed=true];
+
+    // If this SourceCodeInfo represents a complete declaration, these are any
+    // comments appearing before and after the declaration which appear to be
+    // attached to the declaration.
+    //
+    // A series of line comments appearing on consecutive lines, with no other
+    // tokens appearing on those lines, will be treated as a single comment.
+    //
+    // leading_detached_comments will keep paragraphs of comments that appear
+    // before (but not connected to) the current element. Each paragraph,
+    // separated by empty lines, will be one comment element in the repeated
+    // field.
+    //
+    // Only the comment content is provided; comment markers (e.g. //) are
+    // stripped out.  For block comments, leading whitespace and an asterisk
+    // will be stripped from the beginning of each line other than the first.
+    // Newlines are included in the output.
+    //
+    // Examples:
+    //
+    //   optional int32 foo = 1;  // Comment attached to foo.
+    //   // Comment attached to bar.
+    //   optional int32 bar = 2;
+    //
+    //   optional string baz = 3;
+    //   // Comment attached to baz.
+    //   // Another line attached to baz.
+    //
+    //   // Comment attached to qux.
+    //   //
+    //   // Another line attached to qux.
+    //   optional double qux = 4;
+    //
+    //   // Detached comment for corge. This is not leading or trailing comments
+    //   // to qux or corge because there are blank lines separating it from
+    //   // both.
+    //
+    //   // Detached comment for corge paragraph 2.
+    //
+    //   optional string corge = 5;
+    //   /* Block comment attached
+    //    * to corge.  Leading asterisks
+    //    * will be removed. */
+    //   /* Block comment attached to
+    //    * grault. */
+    //   optional int32 grault = 6;
+    //
+    //   // ignored detached comments.
+    optional string leading_comments = 3;
+    optional string trailing_comments = 4;
+    repeated string leading_detached_comments = 6;
+  }
+}
+
+// Describes the relationship between generated code and its original source
+// file. A GeneratedCodeInfo message is associated with only one generated
+// source file, but may contain references to different source .proto files.
+message GeneratedCodeInfo {
+  // An Annotation connects some span of text in generated code to an element
+  // of its generating .proto file.
+  repeated Annotation annotation = 1;
+  message Annotation {
+    // Identifies the element in the original source .proto file. This field
+    // is formatted the same as SourceCodeInfo.Location.path.
+    repeated int32 path = 1 [packed=true];
+
+    // Identifies the filesystem path to the original source .proto.
+    optional string source_file = 2;
+
+    // Identifies the starting offset in bytes in the generated code
+    // that relates to the identified object.
+    optional int32 begin = 3;
+
+    // Identifies the ending offset in bytes in the generated code that
+    // relates to the identified offset. The end offset should be one past
+    // the last relevant byte (so the length of the text = end - begin).
+    optional int32 end = 4;
+  }
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
index 89e07ae..b2af97f 100644
--- a/vendor/github.com/golang/protobuf/ptypes/any.go
+++ b/vendor/github.com/golang/protobuf/ptypes/any.go
@@ -51,6 +51,9 @@
 // function. AnyMessageName is provided for less common use cases like filtering a
 // sequence of Any messages based on a set of allowed message type names.
 func AnyMessageName(any *any.Any) (string, error) {
+	if any == nil {
+		return "", fmt.Errorf("message is nil")
+	}
 	slash := strings.LastIndex(any.TypeUrl, "/")
 	if slash < 0 {
 		return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
index 1fbaa44..6c9a6cf 100644
--- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
@@ -1,11 +1,11 @@
 // Code generated by protoc-gen-go. DO NOT EDIT.
-// source: github.com/golang/protobuf/ptypes/any/any.proto
+// source: google/protobuf/any.proto
 
 /*
 Package any is a generated protocol buffer package.
 
 It is generated from these files:
-	github.com/golang/protobuf/ptypes/any/any.proto
+	google/protobuf/any.proto
 
 It has these top-level messages:
 	Any
@@ -149,20 +149,20 @@
 	proto.RegisterType((*Any)(nil), "google.protobuf.Any")
 }
 
-func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) }
+func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor0) }
 
 var fileDescriptor0 = []byte{
-	// 184 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9,
-	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
-	0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc,
-	0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c,
-	0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69,
-	0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24,
-	0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1,
-	0x38, 0xe5, 0x73, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19,
-	0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x45, 0x4c, 0xcc, 0xee, 0x01, 0x4e, 0xab,
-	0x98, 0xe4, 0xdc, 0x21, 0x46, 0x05, 0x40, 0x95, 0xe8, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5,
-	0x97, 0xe7, 0x85, 0x80, 0x94, 0x26, 0xb1, 0x81, 0xf5, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff,
-	0x45, 0x1f, 0x1a, 0xf2, 0xf3, 0x00, 0x00, 0x00,
+	// 185 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
+	0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
+	0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
+	0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
+	0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce,
+	0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52,
+	0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc,
+	0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c,
+	0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce,
+	0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff,
+	0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00,
 }
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
index fe3350b..b2410a0 100644
--- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
@@ -1,11 +1,11 @@
 // Code generated by protoc-gen-go. DO NOT EDIT.
-// source: github.com/golang/protobuf/ptypes/duration/duration.proto
+// source: google/protobuf/duration.proto
 
 /*
 Package duration is a generated protocol buffer package.
 
 It is generated from these files:
-	github.com/golang/protobuf/ptypes/duration/duration.proto
+	google/protobuf/duration.proto
 
 It has these top-level messages:
 	Duration
@@ -125,22 +125,20 @@
 	proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
 }
 
-func init() {
-	proto.RegisterFile("github.com/golang/protobuf/ptypes/duration/duration.proto", fileDescriptor0)
-}
+func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor0) }
 
 var fileDescriptor0 = []byte{
-	// 189 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9,
-	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
-	0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0x29,
-	0x2d, 0x4a, 0x2c, 0xc9, 0xcc, 0xcf, 0x83, 0x33, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3,
-	0xd3, 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24, 0xb8,
-	0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x60,
-	0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xd6,
-	0x20, 0x08, 0xc7, 0xa9, 0x86, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x48, 0x27, 0x5e, 0x98,
-	0x81, 0x01, 0x20, 0x91, 0x00, 0xc6, 0x28, 0x2d, 0xe2, 0xdd, 0xfb, 0x83, 0x91, 0x71, 0x11, 0x13,
-	0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xb9, 0x01, 0x50, 0xa5, 0x7a, 0xe1, 0xa9,
-	0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x2d, 0x49, 0x6c, 0x60, 0x33, 0x8c, 0x01,
-	0x01, 0x00, 0x00, 0xff, 0xff, 0x45, 0x5a, 0x81, 0x3d, 0x0e, 0x01, 0x00, 0x00,
+	// 190 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
+	0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
+	0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
+	0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
+	0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c,
+	0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56,
+	0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e,
+	0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4,
+	0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78,
+	0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63,
+	0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00,
 }
diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
index ae15941..e877b72 100644
--- a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
@@ -1,11 +1,11 @@
 // Code generated by protoc-gen-go. DO NOT EDIT.
-// source: github.com/golang/protobuf/ptypes/empty/empty.proto
+// source: google/protobuf/empty.proto
 
 /*
 Package empty is a generated protocol buffer package.
 
 It is generated from these files:
-	github.com/golang/protobuf/ptypes/empty/empty.proto
+	google/protobuf/empty.proto
 
 It has these top-level messages:
 	Empty
@@ -49,20 +49,18 @@
 	proto.RegisterType((*Empty)(nil), "google.protobuf.Empty")
 }
 
-func init() {
-	proto.RegisterFile("github.com/golang/protobuf/ptypes/empty/empty.proto", fileDescriptor0)
-}
+func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor0) }
 
 var fileDescriptor0 = []byte{
-	// 147 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4e, 0xcf, 0x2c, 0xc9,
-	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
-	0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcd,
-	0x2d, 0x28, 0xa9, 0x84, 0x90, 0x7a, 0x60, 0x39, 0x21, 0xfe, 0xf4, 0xfc, 0xfc, 0xf4, 0x9c, 0x54,
-	0x3d, 0x98, 0x4a, 0x25, 0x76, 0x2e, 0x56, 0x57, 0x90, 0xbc, 0x53, 0x19, 0x97, 0x70, 0x72, 0x7e,
-	0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36, 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x27, 0xd2,
-	0xce, 0x1f, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x4c,
-	0x0c, 0x80, 0xaa, 0xd3, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9,
-	0x4f, 0x62, 0x03, 0x1b, 0x60, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x6e, 0x8e, 0x0a, 0x06, 0xcf,
-	0x00, 0x00, 0x00,
+	// 148 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28,
+	0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57,
+	0x90, 0xbc, 0x53, 0x19, 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36,
+	0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf,
+	0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0x47, 0x58, 0x53, 0x50, 0x52, 0x59, 0x90, 0x5a, 0x0c,
+	0xb1, 0xed, 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10,
+	0x13, 0x03, 0xa0, 0xea, 0xf4, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40,
+	0xea, 0x93, 0xd8, 0xc0, 0x06, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x64, 0xd4, 0xb3, 0xa6,
+	0xb7, 0x00, 0x00, 0x00,
 }
diff --git a/vendor/github.com/golang/protobuf/ptypes/regen.sh b/vendor/github.com/golang/protobuf/ptypes/regen.sh
index 2a5b4e8..b50a941 100755
--- a/vendor/github.com/golang/protobuf/ptypes/regen.sh
+++ b/vendor/github.com/golang/protobuf/ptypes/regen.sh
@@ -8,14 +8,7 @@
 PKG=github.com/golang/protobuf/ptypes
 UPSTREAM=https://github.com/google/protobuf
 UPSTREAM_SUBDIR=src/google/protobuf
-PROTO_FILES='
-  any.proto
-  duration.proto
-  empty.proto
-  struct.proto
-  timestamp.proto
-  wrappers.proto
-'
+PROTO_FILES=(any duration empty struct timestamp wrappers)
 
 function die() {
   echo 1>&2 $*
@@ -36,31 +29,15 @@
 echo 1>&2 $pkgdir
 base=$(echo $pkgdir | sed "s,/$PKG\$,,")
 echo 1>&2 "base: $base"
-cd $base
+cd "$base"
 
 echo 1>&2 "fetching latest protos... "
 git clone -q $UPSTREAM $tmpdir
-# Pass 1: build mapping from upstream filename to our filename.
-declare -A filename_map
-for f in $(cd $PKG && find * -name '*.proto'); do
-  echo -n 1>&2 "looking for latest version of $f... "
-  up=$(cd $tmpdir/$UPSTREAM_SUBDIR && find * -name $(basename $f) | grep -v /testdata/)
-  echo 1>&2 $up
-  if [ $(echo $up | wc -w) != "1" ]; then
-    die "not exactly one match"
-  fi
-  filename_map[$up]=$f
-done
-# Pass 2: copy files
-for up in "${!filename_map[@]}"; do
-  f=${filename_map[$up]}
-  shortname=$(basename $f | sed 's,\.proto$,,')
-  cp $tmpdir/$UPSTREAM_SUBDIR/$up $PKG/$f
+
+for file in ${PROTO_FILES[@]}; do
+  echo 1>&2 "* $file"
+  protoc --go_out=. -I$tmpdir/src $tmpdir/src/google/protobuf/$file.proto || die
+  cp $tmpdir/src/google/protobuf/$file.proto $PKG/$file
 done
 
-# Run protoc once per package.
-for dir in $(find $PKG -name '*.proto' | xargs dirname | sort | uniq); do
-  echo 1>&2 "* $dir"
-  protoc --go_out=. $dir/*.proto
-done
 echo 1>&2 "All OK"
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
index 35a8ec5..4cfe608 100644
--- a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
@@ -1,11 +1,11 @@
 // Code generated by protoc-gen-go. DO NOT EDIT.
-// source: github.com/golang/protobuf/ptypes/struct/struct.proto
+// source: google/protobuf/struct.proto
 
 /*
 Package structpb is a generated protocol buffer package.
 
 It is generated from these files:
-	github.com/golang/protobuf/ptypes/struct/struct.proto
+	google/protobuf/struct.proto
 
 It has these top-level messages:
 	Struct
@@ -346,37 +346,35 @@
 	proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value)
 }
 
-func init() {
-	proto.RegisterFile("github.com/golang/protobuf/ptypes/struct/struct.proto", fileDescriptor0)
-}
+func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor0) }
 
 var fileDescriptor0 = []byte{
 	// 417 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x41, 0x8b, 0xd3, 0x40,
-	0x14, 0x80, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa0, 0xa1, 0x7b, 0x09,
-	0x22, 0x09, 0x56, 0x04, 0x31, 0x5e, 0x0c, 0xac, 0xbb, 0x60, 0x58, 0x62, 0x74, 0x57, 0xf0, 0x52,
-	0x9a, 0x34, 0x8d, 0xa1, 0xd3, 0x99, 0x90, 0xcc, 0x28, 0x3d, 0xfa, 0x2f, 0x3c, 0x7b, 0xf4, 0xe8,
-	0xaf, 0xf3, 0x28, 0x33, 0x93, 0x44, 0x69, 0x29, 0x78, 0x9a, 0xbe, 0x37, 0xdf, 0xfb, 0xe6, 0xbd,
-	0xd7, 0xc0, 0xf3, 0xb2, 0xe2, 0x9f, 0x45, 0xe6, 0xe7, 0x6c, 0x13, 0x94, 0x8c, 0x2c, 0x68, 0x19,
-	0xd4, 0x0d, 0xe3, 0x2c, 0x13, 0xab, 0xa0, 0xe6, 0xdb, 0xba, 0x68, 0x83, 0x96, 0x37, 0x22, 0xe7,
-	0xdd, 0xe1, 0xab, 0x5b, 0x7c, 0xa7, 0x64, 0xac, 0x24, 0x85, 0xdf, 0xb3, 0xd3, 0xef, 0x08, 0xac,
-	0xf7, 0x8a, 0xc0, 0x21, 0x58, 0xab, 0xaa, 0x20, 0xcb, 0x76, 0x82, 0x5c, 0xd3, 0x73, 0x66, 0x67,
-	0xfe, 0x0e, 0xec, 0x6b, 0xd0, 0x7f, 0xa3, 0xa8, 0x73, 0xca, 0x9b, 0x6d, 0xda, 0x95, 0x9c, 0xbe,
-	0x03, 0xe7, 0x9f, 0x34, 0x3e, 0x01, 0x73, 0x5d, 0x6c, 0x27, 0xc8, 0x45, 0x9e, 0x9d, 0xca, 0x9f,
-	0xf8, 0x09, 0x8c, 0xbf, 0x2c, 0x88, 0x28, 0x26, 0x86, 0x8b, 0x3c, 0x67, 0x76, 0x6f, 0x4f, 0x7e,
-	0x23, 0x6f, 0x53, 0x0d, 0xbd, 0x34, 0x5e, 0xa0, 0xe9, 0x2f, 0x03, 0xc6, 0x2a, 0x89, 0x43, 0x00,
-	0x2a, 0x08, 0x99, 0x6b, 0x81, 0x94, 0x1e, 0xcf, 0x4e, 0xf7, 0x04, 0x57, 0x82, 0x10, 0xc5, 0x5f,
-	0x8e, 0x52, 0x9b, 0xf6, 0x01, 0x3e, 0x83, 0xdb, 0x54, 0x6c, 0xb2, 0xa2, 0x99, 0xff, 0x7d, 0x1f,
-	0x5d, 0x8e, 0x52, 0x47, 0x67, 0x07, 0xa8, 0xe5, 0x4d, 0x45, 0xcb, 0x0e, 0x32, 0x65, 0xe3, 0x12,
-	0xd2, 0x59, 0x0d, 0x3d, 0x02, 0xc8, 0x18, 0xeb, 0xdb, 0x38, 0x72, 0x91, 0x77, 0x4b, 0x3e, 0x25,
-	0x73, 0x1a, 0x78, 0xa5, 0x2c, 0x22, 0xe7, 0x1d, 0x32, 0x56, 0xa3, 0xde, 0x3f, 0xb0, 0xc7, 0x4e,
-	0x2f, 0x72, 0x3e, 0x4c, 0x49, 0xaa, 0xb6, 0xaf, 0xb5, 0x54, 0xed, 0xfe, 0x94, 0x71, 0xd5, 0xf2,
-	0x61, 0x4a, 0xd2, 0x07, 0x91, 0x05, 0x47, 0xeb, 0x8a, 0x2e, 0xa7, 0x21, 0xd8, 0x03, 0x81, 0x7d,
-	0xb0, 0x94, 0xac, 0xff, 0x47, 0x0f, 0x2d, 0xbd, 0xa3, 0x1e, 0x3f, 0x00, 0x7b, 0x58, 0x22, 0x3e,
-	0x06, 0xb8, 0xba, 0x8e, 0xe3, 0xf9, 0xcd, 0xeb, 0xf8, 0xfa, 0xfc, 0x64, 0x14, 0x7d, 0x43, 0x70,
-	0x37, 0x67, 0x9b, 0x5d, 0x45, 0xe4, 0xe8, 0x69, 0x12, 0x19, 0x27, 0xe8, 0xd3, 0xd3, 0xff, 0xfd,
-	0x30, 0x43, 0x7d, 0xd4, 0xd9, 0x6f, 0x84, 0x7e, 0x18, 0xe6, 0x45, 0x12, 0xfd, 0x34, 0x1e, 0x5e,
-	0x68, 0x79, 0xd2, 0xf7, 0xf7, 0xb1, 0x20, 0xe4, 0x2d, 0x65, 0x5f, 0xe9, 0x07, 0x59, 0x99, 0x59,
-	0x4a, 0xf5, 0xec, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9b, 0x6e, 0x5d, 0x3c, 0xfe, 0x02, 0x00,
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40,
+	0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09,
+	0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94,
+	0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa,
+	0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff,
+	0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc,
+	0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15,
+	0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d,
+	0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce,
+	0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39,
+	0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab,
+	0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84,
+	0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48,
+	0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f,
+	0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59,
+	0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a,
+	0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64,
+	0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92,
+	0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25,
+	0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37,
+	0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6,
+	0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4,
+	0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda,
+	0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9,
+	0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53,
+	0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00,
 	0x00,
 }
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
index 1b36576..47f10db 100644
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
@@ -99,6 +99,15 @@
 	return t, validateTimestamp(ts)
 }
 
+// TimestampNow returns a google.protobuf.Timestamp for the current time.
+func TimestampNow() *tspb.Timestamp {
+	ts, err := TimestampProto(time.Now())
+	if err != nil {
+		panic("ptypes: time.Now() out of Timestamp range")
+	}
+	return ts
+}
+
 // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
 // It returns an error if the resulting Timestamp is invalid.
 func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
index 3b76261..e23e4a2 100644
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
@@ -1,11 +1,11 @@
 // Code generated by protoc-gen-go. DO NOT EDIT.
-// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
+// source: google/protobuf/timestamp.proto
 
 /*
 Package timestamp is a generated protocol buffer package.
 
 It is generated from these files:
-	github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
+	google/protobuf/timestamp.proto
 
 It has these top-level messages:
 	Timestamp
@@ -141,22 +141,20 @@
 	proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
 }
 
-func init() {
-	proto.RegisterFile("github.com/golang/protobuf/ptypes/timestamp/timestamp.proto", fileDescriptor0)
-}
+func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor0) }
 
 var fileDescriptor0 = []byte{
-	// 190 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9,
-	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
-	0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0xc9,
-	0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x40, 0xb0, 0xf4, 0xc0, 0x6a, 0x84, 0xf8, 0xd3, 0xf3,
-	0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x60, 0x3a, 0x94, 0xac, 0xb9, 0x38, 0x43, 0x60, 0x6a, 0x84, 0x24,
-	0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83,
-	0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d,
-	0xd6, 0x20, 0x08, 0xc7, 0xa9, 0x8e, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x4c, 0x27, 0x3e,
-	0xb8, 0x89, 0x01, 0x20, 0xa1, 0x00, 0xc6, 0x28, 0x6d, 0x12, 0xdc, 0xfc, 0x83, 0x91, 0x71, 0x11,
-	0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, 0xe1,
-	0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, 0x8c,
-	0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x6b, 0x59, 0x0a, 0x4d, 0x13, 0x01, 0x00, 0x00,
+	// 191 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
+	0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
+	0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
+	0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
+	0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70,
+	0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51,
+	0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89,
+	0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71,
+	0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a,
+	0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43,
+	0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00,
 }
diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go
index 8749689..8d393e9 100644
--- a/vendor/github.com/golang/snappy/encode.go
+++ b/vendor/github.com/golang/snappy/encode.go
@@ -138,7 +138,7 @@
 	}
 }
 
-// Writer is an io.Writer than can write Snappy-compressed bytes.
+// Writer is an io.Writer that can write Snappy-compressed bytes.
 type Writer struct {
 	w   io.Writer
 	err error
diff --git a/vendor/github.com/googleapis/gax-go/call_option.go b/vendor/github.com/googleapis/gax-go/call_option.go
index 4ba1cdf..7b62164 100644
--- a/vendor/github.com/googleapis/gax-go/call_option.go
+++ b/vendor/github.com/googleapis/gax-go/call_option.go
@@ -35,6 +35,7 @@
 
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
 )
 
 // CallOption is an option used by Invoke to control behaviors of RPC calls.
@@ -80,7 +81,11 @@
 }
 
 func (r *boRetryer) Retry(err error) (time.Duration, bool) {
-	c := grpc.Code(err)
+	st, ok := status.FromError(err)
+	if !ok {
+		return 0, false
+	}
+	c := st.Code()
 	for _, rc := range r.codes {
 		if c == rc {
 			return r.backoff.Pause(), true
@@ -121,6 +126,9 @@
 	if bo.Multiplier < 1 {
 		bo.Multiplier = 2
 	}
+	// Select a duration between zero and the current max. It might seem counterintuitive to
+	// have so much jitter, but https://www.awsarchitectureblog.com/2015/03/backoff.html
+	// argues that that is the best strategy.
 	d := time.Duration(rand.Int63n(int64(bo.cur)))
 	bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier)
 	if bo.cur > bo.Max {
@@ -129,8 +137,21 @@
 	return d
 }
 
+type grpcOpt []grpc.CallOption
+
+func (o grpcOpt) Resolve(s *CallSettings) {
+	s.GRPC = o
+}
+
+func WithGRPCOptions(opt ...grpc.CallOption) CallOption {
+	return grpcOpt(append([]grpc.CallOption(nil), opt...))
+}
+
 type CallSettings struct {
 	// Retry returns a Retryer to be used to control retry logic of a method call.
 	// If Retry is nil or the returned Retryer is nil, the call will not be retried.
 	Retry func() Retryer
+
+	// CallOptions to be forwarded to GRPC.
+	GRPC []grpc.CallOption
 }
diff --git a/vendor/github.com/googleapis/gax-go/gax.go b/vendor/github.com/googleapis/gax-go/gax.go
index 5ebedff..d56c0c7 100644
--- a/vendor/github.com/googleapis/gax-go/gax.go
+++ b/vendor/github.com/googleapis/gax-go/gax.go
@@ -37,4 +37,4 @@
 // This project is currently experimental and not supported.
 package gax
 
-const Version = "0.1.0"
+const Version = "0.2.0"
diff --git a/vendor/github.com/googleapis/gax-go/header.go b/vendor/github.com/googleapis/gax-go/header.go
new file mode 100644
index 0000000..d81455e
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/header.go
@@ -0,0 +1,24 @@
+package gax
+
+import "bytes"
+
+// XGoogHeader is for use by the Google Cloud Libraries only.
+//
+// XGoogHeader formats key-value pairs.
+// The resulting string is suitable for x-goog-api-client header.
+func XGoogHeader(keyval ...string) string {
+	if len(keyval) == 0 {
+		return ""
+	}
+	if len(keyval)%2 != 0 {
+		panic("gax.Header: odd argument count")
+	}
+	var buf bytes.Buffer
+	for i := 0; i < len(keyval); i += 2 {
+		buf.WriteByte(' ')
+		buf.WriteString(keyval[i])
+		buf.WriteByte('/')
+		buf.WriteString(keyval[i+1])
+	}
+	return buf.String()[1:]
+}
diff --git a/vendor/github.com/googleapis/gax-go/invoke.go b/vendor/github.com/googleapis/gax-go/invoke.go
index d2134e1..86049d8 100644
--- a/vendor/github.com/googleapis/gax-go/invoke.go
+++ b/vendor/github.com/googleapis/gax-go/invoke.go
@@ -36,7 +36,7 @@
 )
 
 // A user defined call stub.
-type APICall func(context.Context) error
+type APICall func(context.Context, CallSettings) error
 
 // Invoke calls the given APICall,
 // performing retries as specified by opts, if any.
@@ -67,7 +67,7 @@
 func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error {
 	var retryer Retryer
 	for {
-		err := call(ctx)
+		err := call(ctx, settings)
 		if err == nil {
 			return nil
 		}
diff --git a/vendor/github.com/googleapis/gax-go/path_template.go b/vendor/github.com/googleapis/gax-go/path_template.go
deleted file mode 100644
index 41bda94..0000000
--- a/vendor/github.com/googleapis/gax-go/path_template.go
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2016, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package gax
-
-import (
-	"errors"
-	"fmt"
-	"strings"
-)
-
-type matcher interface {
-	match([]string) (int, error)
-	String() string
-}
-
-type segment struct {
-	matcher
-	name string
-}
-
-type labelMatcher string
-
-func (ls labelMatcher) match(segments []string) (int, error) {
-	if len(segments) == 0 {
-		return 0, fmt.Errorf("expected %s but no more segments found", ls)
-	}
-	if segments[0] != string(ls) {
-		return 0, fmt.Errorf("expected %s but got %s", ls, segments[0])
-	}
-	return 1, nil
-}
-
-func (ls labelMatcher) String() string {
-	return string(ls)
-}
-
-type wildcardMatcher int
-
-func (wm wildcardMatcher) match(segments []string) (int, error) {
-	if len(segments) == 0 {
-		return 0, errors.New("no more segments found")
-	}
-	return 1, nil
-}
-
-func (wm wildcardMatcher) String() string {
-	return "*"
-}
-
-type pathWildcardMatcher int
-
-func (pwm pathWildcardMatcher) match(segments []string) (int, error) {
-	length := len(segments) - int(pwm)
-	if length <= 0 {
-		return 0, errors.New("not sufficient segments are supplied for path wildcard")
-	}
-	return length, nil
-}
-
-func (pwm pathWildcardMatcher) String() string {
-	return "**"
-}
-
-type ParseError struct {
-	Pos      int
-	Template string
-	Message  string
-}
-
-func (pe ParseError) Error() string {
-	return fmt.Sprintf("at %d of template '%s', %s", pe.Pos, pe.Template, pe.Message)
-}
-
-// PathTemplate manages the template to build and match with paths used
-// by API services. It holds a template and variable names in it, and
-// it can extract matched patterns from a path string or build a path
-// string from a binding.
-//
-// See http.proto in github.com/googleapis/googleapis/ for the details of
-// the template syntax.
-type PathTemplate struct {
-	segments []segment
-}
-
-// NewPathTemplate parses a path template, and returns a PathTemplate
-// instance if successful.
-func NewPathTemplate(template string) (*PathTemplate, error) {
-	return parsePathTemplate(template)
-}
-
-// MustCompilePathTemplate is like NewPathTemplate but panics if the
-// expression cannot be parsed. It simplifies safe initialization of
-// global variables holding compiled regular expressions.
-func MustCompilePathTemplate(template string) *PathTemplate {
-	pt, err := NewPathTemplate(template)
-	if err != nil {
-		panic(err)
-	}
-	return pt
-}
-
-// Match attempts to match the given path with the template, and returns
-// the mapping of the variable name to the matched pattern string.
-func (pt *PathTemplate) Match(path string) (map[string]string, error) {
-	paths := strings.Split(path, "/")
-	values := map[string]string{}
-	for _, segment := range pt.segments {
-		length, err := segment.match(paths)
-		if err != nil {
-			return nil, err
-		}
-		if segment.name != "" {
-			value := strings.Join(paths[:length], "/")
-			if oldValue, ok := values[segment.name]; ok {
-				values[segment.name] = oldValue + "/" + value
-			} else {
-				values[segment.name] = value
-			}
-		}
-		paths = paths[length:]
-	}
-	if len(paths) != 0 {
-		return nil, fmt.Errorf("Trailing path %s remains after the matching", strings.Join(paths, "/"))
-	}
-	return values, nil
-}
-
-// Render creates a path string from its template and the binding from
-// the variable name to the value.
-func (pt *PathTemplate) Render(binding map[string]string) (string, error) {
-	result := make([]string, 0, len(pt.segments))
-	var lastVariableName string
-	for _, segment := range pt.segments {
-		name := segment.name
-		if lastVariableName != "" && name == lastVariableName {
-			continue
-		}
-		lastVariableName = name
-		if name == "" {
-			result = append(result, segment.String())
-		} else if value, ok := binding[name]; ok {
-			result = append(result, value)
-		} else {
-			return "", fmt.Errorf("%s is not found", name)
-		}
-	}
-	built := strings.Join(result, "/")
-	return built, nil
-}
diff --git a/vendor/github.com/googleapis/gax-go/path_template_parser.go b/vendor/github.com/googleapis/gax-go/path_template_parser.go
deleted file mode 100644
index 79c8e75..0000000
--- a/vendor/github.com/googleapis/gax-go/path_template_parser.go
+++ /dev/null
@@ -1,227 +0,0 @@
-// Copyright 2016, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package gax
-
-import (
-	"fmt"
-	"io"
-	"strings"
-)
-
-// This parser follows the syntax of path templates, from
-// https://github.com/googleapis/googleapis/blob/master/google/api/http.proto.
-// The differences are that there is no custom verb, we allow the initial slash
-// to be absent, and that we are not strict as
-// https://tools.ietf.org/html/rfc6570 about the characters in identifiers and
-// literals.
-
-type pathTemplateParser struct {
-	r                *strings.Reader
-	runeCount        int             // the number of the current rune in the original string
-	nextVar          int             // the number to use for the next unnamed variable
-	seenName         map[string]bool // names we've seen already
-	seenPathWildcard bool            // have we seen "**" already?
-}
-
-func parsePathTemplate(template string) (pt *PathTemplate, err error) {
-	p := &pathTemplateParser{
-		r:        strings.NewReader(template),
-		seenName: map[string]bool{},
-	}
-
-	// Handle panics with strings like errors.
-	// See pathTemplateParser.error, below.
-	defer func() {
-		if x := recover(); x != nil {
-			errmsg, ok := x.(errString)
-			if !ok {
-				panic(x)
-			}
-			pt = nil
-			err = ParseError{p.runeCount, template, string(errmsg)}
-		}
-	}()
-
-	segs := p.template()
-	// If there is a path wildcard, set its length. We can't do this
-	// until we know how many segments we've got all together.
-	for i, seg := range segs {
-		if _, ok := seg.matcher.(pathWildcardMatcher); ok {
-			segs[i].matcher = pathWildcardMatcher(len(segs) - i - 1)
-			break
-		}
-	}
-	return &PathTemplate{segments: segs}, nil
-
-}
-
-// Used to indicate errors "thrown" by this parser. We don't use string because
-// many parts of the standard library panic with strings.
-type errString string
-
-// Terminates parsing immediately with an error.
-func (p *pathTemplateParser) error(msg string) {
-	panic(errString(msg))
-}
-
-// Template = [ "/" ] Segments
-func (p *pathTemplateParser) template() []segment {
-	var segs []segment
-	if p.consume('/') {
-		// Initial '/' needs an initial empty matcher.
-		segs = append(segs, segment{matcher: labelMatcher("")})
-	}
-	return append(segs, p.segments("")...)
-}
-
-// Segments = Segment { "/" Segment }
-func (p *pathTemplateParser) segments(name string) []segment {
-	var segs []segment
-	for {
-		subsegs := p.segment(name)
-		segs = append(segs, subsegs...)
-		if !p.consume('/') {
-			break
-		}
-	}
-	return segs
-}
-
-// Segment  = "*" | "**" | LITERAL | Variable
-func (p *pathTemplateParser) segment(name string) []segment {
-	if p.consume('*') {
-		if name == "" {
-			name = fmt.Sprintf("$%d", p.nextVar)
-			p.nextVar++
-		}
-		if p.consume('*') {
-			if p.seenPathWildcard {
-				p.error("multiple '**' disallowed")
-			}
-			p.seenPathWildcard = true
-			// We'll change 0 to the right number at the end.
-			return []segment{{name: name, matcher: pathWildcardMatcher(0)}}
-		}
-		return []segment{{name: name, matcher: wildcardMatcher(0)}}
-	}
-	if p.consume('{') {
-		if name != "" {
-			p.error("recursive named bindings are not allowed")
-		}
-		return p.variable()
-	}
-	return []segment{{name: name, matcher: labelMatcher(p.literal())}}
-}
-
-// Variable = "{" FieldPath [ "=" Segments ] "}"
-// "{" is already consumed.
-func (p *pathTemplateParser) variable() []segment {
-	// Simplification: treat FieldPath as LITERAL, instead of IDENT { '.' IDENT }
-	name := p.literal()
-	if p.seenName[name] {
-		p.error(name + " appears multiple times")
-	}
-	p.seenName[name] = true
-	var segs []segment
-	if p.consume('=') {
-		segs = p.segments(name)
-	} else {
-		// "{var}" is equivalent to "{var=*}"
-		segs = []segment{{name: name, matcher: wildcardMatcher(0)}}
-	}
-	if !p.consume('}') {
-		p.error("expected '}'")
-	}
-	return segs
-}
-
-// A literal is any sequence of characters other than a few special ones.
-// The list of stop characters is not quite the same as in the template RFC.
-func (p *pathTemplateParser) literal() string {
-	lit := p.consumeUntil("/*}{=")
-	if lit == "" {
-		p.error("empty literal")
-	}
-	return lit
-}
-
-// Read runes until EOF or one of the runes in stopRunes is encountered.
-// If the latter, unread the stop rune. Return the accumulated runes as a string.
-func (p *pathTemplateParser) consumeUntil(stopRunes string) string {
-	var runes []rune
-	for {
-		r, ok := p.readRune()
-		if !ok {
-			break
-		}
-		if strings.IndexRune(stopRunes, r) >= 0 {
-			p.unreadRune()
-			break
-		}
-		runes = append(runes, r)
-	}
-	return string(runes)
-}
-
-// If the next rune is r, consume it and return true.
-// Otherwise, leave the input unchanged and return false.
-func (p *pathTemplateParser) consume(r rune) bool {
-	rr, ok := p.readRune()
-	if !ok {
-		return false
-	}
-	if r == rr {
-		return true
-	}
-	p.unreadRune()
-	return false
-}
-
-// Read the next rune from the input. Return it.
-// The second return value is false at EOF.
-func (p *pathTemplateParser) readRune() (rune, bool) {
-	r, _, err := p.r.ReadRune()
-	if err == io.EOF {
-		return r, false
-	}
-	if err != nil {
-		p.error(err.Error())
-	}
-	p.runeCount++
-	return r, true
-}
-
-// Put the last rune that was read back on the input.
-func (p *pathTemplateParser) unreadRune() {
-	if err := p.r.UnreadRune(); err != nil {
-		p.error(err.Error())
-	}
-	p.runeCount--
-}
diff --git a/vendor/github.com/gregjones/httpcache/.gitignore b/vendor/github.com/gregjones/httpcache/.gitignore
deleted file mode 100644
index 0026861..0000000
--- a/vendor/github.com/gregjones/httpcache/.gitignore
+++ /dev/null
@@ -1,22 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
diff --git a/vendor/github.com/gregjones/httpcache/.travis.yml b/vendor/github.com/gregjones/httpcache/.travis.yml
index e34612c..2bca4c5 100644
--- a/vendor/github.com/gregjones/httpcache/.travis.yml
+++ b/vendor/github.com/gregjones/httpcache/.travis.yml
@@ -1,12 +1,13 @@
 sudo: false
 language: go
 go:
-  - 1.6
-  - 1.7
-  - tip
+  - 1.6.x
+  - 1.7.x
+  - 1.8.x
+  - master
 matrix:
   allow_failures:
-    - go: tip
+    - go: master
   fast_finish: true
 install:
   - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
diff --git a/vendor/github.com/gregjones/httpcache/README.md b/vendor/github.com/gregjones/httpcache/README.md
index ccd0172..61bd830 100644
--- a/vendor/github.com/gregjones/httpcache/README.md
+++ b/vendor/github.com/gregjones/httpcache/README.md
@@ -1,19 +1,13 @@
 httpcache
 =========
 
-[![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache)
-
-A Transport for Go's http.Client that will cache responses according to the HTTP RFC
+[![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache) [![GoDoc](https://godoc.org/github.com/gregjones/httpcache?status.svg)](https://godoc.org/github.com/gregjones/httpcache)
 
 Package httpcache provides a http.RoundTripper implementation that works as a mostly RFC-compliant cache for http responses.
 
 It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy).
 
-**Documentation:** http://godoc.org/github.com/gregjones/httpcache
-
-**License:** MIT (see LICENSE.txt)
-
-Cache backends
+Cache Backends
 --------------
 
 - The built-in 'memory' cache stores responses in an in-memory map.
@@ -23,3 +17,8 @@
 - [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb).
 - [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries.
 - [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache.
+
+License
+-------
+
+-	[MIT License](LICENSE.txt)
diff --git a/vendor/github.com/gregjones/httpcache/httpcache.go b/vendor/github.com/gregjones/httpcache/httpcache.go
index 69842a7..8239edc 100644
--- a/vendor/github.com/gregjones/httpcache/httpcache.go
+++ b/vendor/github.com/gregjones/httpcache/httpcache.go
@@ -12,7 +12,7 @@
 	"errors"
 	"fmt"
 	"io"
-	"log"
+	"io/ioutil"
 	"net/http"
 	"net/http/httputil"
 	"strings"
@@ -90,33 +90,6 @@
 	return c
 }
 
-// onEOFReader executes a function on reader EOF or close
-type onEOFReader struct {
-	rc io.ReadCloser
-	fn func()
-}
-
-func (r *onEOFReader) Read(p []byte) (n int, err error) {
-	n, err = r.rc.Read(p)
-	if err == io.EOF {
-		r.runFunc()
-	}
-	return
-}
-
-func (r *onEOFReader) Close() error {
-	err := r.rc.Close()
-	r.runFunc()
-	return err
-}
-
-func (r *onEOFReader) runFunc() {
-	if fn := r.fn; fn != nil {
-		fn()
-		r.fn = nil
-	}
-}
-
 // Transport is an implementation of http.RoundTripper that will return values from a cache
 // where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since)
 // to repeated requests allowing servers to return 304 / Not Modified
@@ -127,10 +100,6 @@
 	Cache     Cache
 	// If true, responses returned from the cache will be given an extra header, X-From-Cache
 	MarkCachedResponses bool
-	// guards modReq
-	mu sync.RWMutex
-	// Mapping of original request => cloned
-	modReq map[*http.Request]*http.Request
 }
 
 // NewTransport returns a new Transport with the
@@ -156,20 +125,6 @@
 	return true
 }
 
-// setModReq maintains a mapping between original requests and their associated cloned requests
-func (t *Transport) setModReq(orig, mod *http.Request) {
-	t.mu.Lock()
-	if t.modReq == nil {
-		t.modReq = make(map[*http.Request]*http.Request)
-	}
-	if mod == nil {
-		delete(t.modReq, orig)
-	} else {
-		t.modReq[orig] = mod
-	}
-	t.mu.Unlock()
-}
-
 // RoundTrip takes a Request and returns a Response
 //
 // If there is a fresh Response already in cache, then it will be returned without connecting to
@@ -222,23 +177,7 @@
 					req2.Header.Set("if-modified-since", lastModified)
 				}
 				if req2 != nil {
-					// Associate original request with cloned request so we can refer to
-					// it in CancelRequest()
-					t.setModReq(req, req2)
 					req = req2
-					defer func() {
-						// Release req/clone mapping on error
-						if err != nil {
-							t.setModReq(req, nil)
-						}
-						if resp != nil {
-							// Release req/clone mapping on body close/EOF
-							resp.Body = &onEOFReader{
-								rc: resp.Body,
-								fn: func() { t.setModReq(req, nil) },
-							}
-						}
-					}()
 				}
 			}
 		}
@@ -290,9 +229,25 @@
 				resp.Header.Set(fakeHeader, reqValue)
 			}
 		}
-		respBytes, err := httputil.DumpResponse(resp, true)
-		if err == nil {
-			t.Cache.Set(cacheKey, respBytes)
+		switch req.Method {
+		case "GET":
+			// Delay caching until EOF is reached.
+			resp.Body = &cachingReadCloser{
+				R: resp.Body,
+				OnEOF: func(r io.Reader) {
+					resp := *resp
+					resp.Body = ioutil.NopCloser(r)
+					respBytes, err := httputil.DumpResponse(&resp, true)
+					if err == nil {
+						t.Cache.Set(cacheKey, respBytes)
+					}
+				},
+			}
+		default:
+			respBytes, err := httputil.DumpResponse(resp, true)
+			if err == nil {
+				t.Cache.Set(cacheKey, respBytes)
+			}
 		}
 	} else {
 		t.Cache.Delete(cacheKey)
@@ -300,31 +255,6 @@
 	return resp, nil
 }
 
-// CancelRequest calls CancelRequest on the underlaying transport if implemented or
-// throw a warning otherwise.
-func (t *Transport) CancelRequest(req *http.Request) {
-	type canceler interface {
-		CancelRequest(*http.Request)
-	}
-	tr, ok := t.Transport.(canceler)
-	if !ok {
-		log.Printf("httpcache: Client Transport of type %T doesn't support CancelRequest; Timeout not supported", t.Transport)
-		return
-	}
-
-	t.mu.RLock()
-	if modReq, ok := t.modReq[req]; ok {
-		t.mu.RUnlock()
-		t.mu.Lock()
-		delete(t.modReq, req)
-		t.mu.Unlock()
-		tr.CancelRequest(modReq)
-	} else {
-		t.mu.RUnlock()
-		tr.CancelRequest(req)
-	}
-}
-
 // ErrNoDateHeader indicates that the HTTP headers contained no Date header.
 var ErrNoDateHeader = errors.New("no Date header")
 
@@ -586,6 +516,35 @@
 	return vals
 }
 
+// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF
+// handler with a full copy of the content read from R when EOF is
+// reached.
+type cachingReadCloser struct {
+	// Underlying ReadCloser.
+	R io.ReadCloser
+	// OnEOF is called with a copy of the content of R when EOF is reached.
+	OnEOF func(io.Reader)
+
+	buf bytes.Buffer // buf stores a copy of the content of R.
+}
+
+// Read reads the next len(p) bytes from R or until R is drained. The
+// return value n is the number of bytes read. If R has no data to
+// return, err is io.EOF and OnEOF is called with a full copy of what
+// has been read so far.
+func (r *cachingReadCloser) Read(p []byte) (n int, err error) {
+	n, err = r.R.Read(p)
+	r.buf.Write(p[:n])
+	if err == io.EOF {
+		r.OnEOF(bytes.NewReader(r.buf.Bytes()))
+	}
+	return n, err
+}
+
+func (r *cachingReadCloser) Close() error {
+	return r.R.Close()
+}
+
 // NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation
 func NewMemoryCacheTransport() *Transport {
 	c := NewMemoryCache()
diff --git a/vendor/github.com/hashicorp/hcl/.travis.yml b/vendor/github.com/hashicorp/hcl/.travis.yml
index b7063d0..3f83d90 100644
--- a/vendor/github.com/hashicorp/hcl/.travis.yml
+++ b/vendor/github.com/hashicorp/hcl/.travis.yml
@@ -3,7 +3,7 @@
 language: go
 
 go:
-  - 1.7
+  - 1.8
 
 branches:
   only:
diff --git a/vendor/github.com/hashicorp/hcl/appveyor.yml b/vendor/github.com/hashicorp/hcl/appveyor.yml
index 3c8cdf8..4db0b71 100644
--- a/vendor/github.com/hashicorp/hcl/appveyor.yml
+++ b/vendor/github.com/hashicorp/hcl/appveyor.yml
@@ -4,7 +4,7 @@
 environment:
   GOPATH: c:\gopath
 init:
-  - git config --global core.autocrlf true
+  - git config --global core.autocrlf false
 install:
 - cmd: >-
     echo %Path%
diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go
index c8a077d..6e75ece 100644
--- a/vendor/github.com/hashicorp/hcl/decoder.go
+++ b/vendor/github.com/hashicorp/hcl/decoder.go
@@ -89,9 +89,9 @@
 	switch k.Kind() {
 	case reflect.Bool:
 		return d.decodeBool(name, node, result)
-	case reflect.Float64:
+	case reflect.Float32, reflect.Float64:
 		return d.decodeFloat(name, node, result)
-	case reflect.Int:
+	case reflect.Int, reflect.Int32, reflect.Int64:
 		return d.decodeInt(name, node, result)
 	case reflect.Interface:
 		// When we see an interface, we make our own thing
@@ -143,7 +143,7 @@
 				return err
 			}
 
-			result.Set(reflect.ValueOf(v))
+			result.Set(reflect.ValueOf(v).Convert(result.Type()))
 			return nil
 		}
 	}
@@ -164,7 +164,11 @@
 				return err
 			}
 
-			result.Set(reflect.ValueOf(int(v)))
+			if result.Kind() == reflect.Interface {
+				result.Set(reflect.ValueOf(int(v)))
+			} else {
+				result.SetInt(v)
+			}
 			return nil
 		case token.STRING:
 			v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
@@ -172,7 +176,11 @@
 				return err
 			}
 
-			result.Set(reflect.ValueOf(int(v)))
+			if result.Kind() == reflect.Interface {
+				result.Set(reflect.ValueOf(int(v)))
+			} else {
+				result.SetInt(v)
+			}
 			return nil
 		}
 	}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
index 54a6493..b488180 100644
--- a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
@@ -3,6 +3,7 @@
 package parser
 
 import (
+	"bytes"
 	"errors"
 	"fmt"
 	"strings"
@@ -36,6 +37,11 @@
 
 // Parse returns the fully parsed source and returns the abstract syntax tree.
 func Parse(src []byte) (*ast.File, error) {
+	// normalize all line endings
+	// since the scanner and output only work with "\n" line endings, we may
+	// end up with dangling "\r" characters in the parsed data.
+	src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1)
+
 	p := newParser(src)
 	return p.Parse()
 }
@@ -256,7 +262,10 @@
 			keyCount++
 			keys = append(keys, &ast.ObjectKey{Token: p.tok})
 		case token.ILLEGAL:
-			fmt.Println("illegal")
+			return keys, &PosError{
+				Pos: p.tok.Pos,
+				Err: fmt.Errorf("illegal character"),
+			}
 		default:
 			return keys, &PosError{
 				Pos: p.tok.Pos,
@@ -343,7 +352,7 @@
 			}
 		}
 		switch tok.Type {
-		case token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
+		case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
 			node, err := p.literalType()
 			if err != nil {
 				return nil, err
@@ -385,12 +394,16 @@
 			}
 			l.Add(node)
 			needComma = true
-		case token.BOOL:
-			// TODO(arslan) should we support? not supported by HCL yet
 		case token.LBRACK:
-			// TODO(arslan) should we support nested lists? Even though it's
-			// written in README of HCL, it's not a part of the grammar
-			// (not defined in parse.y)
+			node, err := p.listType()
+			if err != nil {
+				return nil, &PosError{
+					Pos: tok.Pos,
+					Err: fmt.Errorf(
+						"error while trying to parse list within list: %s", err),
+				}
+			}
+			l.Add(node)
 		case token.RBRACK:
 			// finished
 			l.Rbrack = p.tok.Pos
diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
index 0735d95..6966236 100644
--- a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
+++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
@@ -95,6 +95,12 @@
 		s.srcPos.Column = 0
 	}
 
+	// If we see a null character with data left, then that is an error
+	if ch == '\x00' && s.buf.Len() > 0 {
+		s.err("unexpected null character (0x00)")
+		return eof
+	}
+
 	// debug
 	// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
 	return ch
@@ -474,7 +480,7 @@
 		// read character after quote
 		ch := s.next()
 
-		if ch < 0 || ch == eof {
+		if (ch == '\n' && braces == 0) || ch < 0 || ch == eof {
 			s.err("literal not terminated")
 			return
 		}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
index d578769..5f981ea 100644
--- a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
+++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
@@ -27,6 +27,9 @@
 	if quote != '"' {
 		return "", ErrSyntax
 	}
+	if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') {
+		return "", ErrSyntax
+	}
 
 	// Is it trivial?  Avoid allocation.
 	if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') {
@@ -84,6 +87,10 @@
 			}
 		}
 
+		if s[0] == '\n' {
+			return "", ErrSyntax
+		}
+
 		c, multibyte, ss, err := unquoteChar(s, quote)
 		if err != nil {
 			return "", err
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go
index 6f46085..125a5f0 100644
--- a/vendor/github.com/hashicorp/hcl/json/parser/parser.go
+++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go
@@ -147,7 +147,7 @@
 			// Done
 			return keys, nil
 		case token.ILLEGAL:
-			fmt.Println("illegal")
+			return nil, errors.New("illegal")
 		default:
 			return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
 		}
diff --git a/vendor/github.com/inconshreveable/log15/.travis.yml b/vendor/github.com/inconshreveable/log15/.travis.yml
index bde823d..172ef74 100644
--- a/vendor/github.com/inconshreveable/log15/.travis.yml
+++ b/vendor/github.com/inconshreveable/log15/.travis.yml
@@ -1,8 +1,12 @@
 language: go
+go_import_path: github.com/inconshreveable/log15
+sudo: false
 
 go:
   - 1.3
   - 1.4
   - 1.5
   - 1.6
+  - 1.7
+  - 1.8
   - tip
diff --git a/vendor/github.com/inconshreveable/log15/term/terminal_darwin.go b/vendor/github.com/inconshreveable/log15/term/terminal_darwin.go
index b05de4c..d8f351b 100644
--- a/vendor/github.com/inconshreveable/log15/term/terminal_darwin.go
+++ b/vendor/github.com/inconshreveable/log15/term/terminal_darwin.go
@@ -2,6 +2,7 @@
 // Copyright 2013 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
+// +build !appengine
 
 package term
 
diff --git a/vendor/github.com/magiconair/properties/.travis.yml b/vendor/github.com/magiconair/properties/.travis.yml
index 8807fe9..60436b2 100644
--- a/vendor/github.com/magiconair/properties/.travis.yml
+++ b/vendor/github.com/magiconair/properties/.travis.yml
@@ -1,6 +1,8 @@
 language: go
 go:
-    - 1.4.3
-    - 1.5.3
-    - 1.6.3
-    - 1.7.1
+    - 1.4.x
+    - 1.5.x
+    - 1.6.x
+    - 1.7.x
+    - 1.8.x
+    - tip
diff --git a/vendor/github.com/magiconair/properties/CHANGELOG.md b/vendor/github.com/magiconair/properties/CHANGELOG.md
index bf49a13..4905fec 100644
--- a/vendor/github.com/magiconair/properties/CHANGELOG.md
+++ b/vendor/github.com/magiconair/properties/CHANGELOG.md
@@ -1,9 +1,24 @@
 ## Changelog
 
+### [1.7.3](https://github.com/magiconair/properties/tags/v1.7.3) - 10 Jul 2017
+
+ * [Issue #17](https://github.com/magiconair/properties/issues/17): Add [SetValue()](http://godoc.org/github.com/magiconair/properties#Properties.SetValue) method to set values generically
+ * [Issue #22](https://github.com/magiconair/properties/issues/22): Add [LoadMap()](http://godoc.org/github.com/magiconair/properties#LoadMap) function to load properties from a string map
+
+### [1.7.2](https://github.com/magiconair/properties/tags/v1.7.2) - 20 Mar 2017
+
+ * [Issue #15](https://github.com/magiconair/properties/issues/15): Drop gocheck dependency
+ * [PR #21](https://github.com/magiconair/properties/pull/21): Add [Map()](http://godoc.org/github.com/magiconair/properties#Properties.Map) and [FilterFunc()](http://godoc.org/github.com/magiconair/properties#Properties.FilterFunc)
+
+### [1.7.1](https://github.com/magiconair/properties/tags/v1.7.1) - 13 Jan 2017
+
+ * [PR #16](https://github.com/magiconair/properties/pull/16): Keep gofmt happy
+ * [PR #18](https://github.com/magiconair/properties/pull/18): Fix Delete() function
+
 ### [1.7.0](https://github.com/magiconair/properties/tags/v1.7.0) - 20 Mar 2016
 
- * [Issue #10](https://github.com/magiconair/properties/issues/10): Add [LoadURL,LoadURLs,MustLoadURL,MustLoadURLs](http://godoc.org/github.com/magiconair/properties#Properties.LoadURL) method to load properties from a URL.
- * [Issue #11](https://github.com/magiconair/properties/issues/11): Add [LoadString,MustLoadString](http://godoc.org/github.com/magiconair/properties#Properties.LoadString) method to load properties from an UTF8 string.
+ * [Issue #10](https://github.com/magiconair/properties/issues/10): Add [LoadURL,LoadURLs,MustLoadURL,MustLoadURLs](http://godoc.org/github.com/magiconair/properties#LoadURL) method to load properties from a URL.
+ * [Issue #11](https://github.com/magiconair/properties/issues/11): Add [LoadString,MustLoadString](http://godoc.org/github.com/magiconair/properties#LoadString) method to load properties from an UTF8 string.
  * [PR #8](https://github.com/magiconair/properties/pull/8): Add [MustFlag](http://godoc.org/github.com/magiconair/properties#Properties.MustFlag) method to provide overrides via command line flags. (@pascaldekloe)
 
 ### [1.6.0](https://github.com/magiconair/properties/tags/v1.6.0) - 11 Dec 2015
diff --git a/vendor/github.com/magiconair/properties/README.md b/vendor/github.com/magiconair/properties/README.md
index 1ae0035..eb3b8c4 100644
--- a/vendor/github.com/magiconair/properties/README.md
+++ b/vendor/github.com/magiconair/properties/README.md
@@ -1,7 +1,7 @@
 Overview [![Build Status](https://travis-ci.org/magiconair/properties.svg?branch=master)](https://travis-ci.org/magiconair/properties)
 ========
 
-#### Current version: 1.7.0
+#### Current version: 1.7.3
 
 properties is a Go library for reading and writing properties files.
 
@@ -25,6 +25,8 @@
 error handling functions can be provided. See the package documentation for
 details.
 
+Read the full documentation on [GoDoc](https://godoc.org/github.com/magiconair/properties)   [![GoDoc](https://godoc.org/github.com/magiconair/properties?status.png)](https://godoc.org/github.com/magiconair/properties)
+
 Getting Started
 ---------------
 
@@ -35,13 +37,38 @@
 )
 
 func main() {
+	// init from a file
 	p := properties.MustLoadFile("${HOME}/config.properties", properties.UTF8)
 
-	// via getters
+	// or multiple files
+	p = properties.MustLoadFiles([]string{
+			"${HOME}/config.properties",
+			"${HOME}/config-${USER}.properties",
+		}, properties.UTF8, true)
+
+	// or from a map
+	p = properties.LoadMap(map[string]string{"key": "value", "abc": "def"})
+
+	// or from a string
+	p = properties.MustLoadString("key=value\nabc=def")
+
+	// or from a URL
+	p = properties.MustLoadURL("http://host/path")
+
+	// or from multiple URLs
+	p = properties.MustLoadURL([]string{
+			"http://host/config",
+			"http://host/config-${USER}",
+		}, true)
+
+	// or from flags
+	p.MustFlag(flag.CommandLine)
+
+	// get values through getters
 	host := p.MustGetString("host")
 	port := p.GetInt("port", 8080)
 
-	// or via decode
+	// or through Decode
 	type Config struct {
 		Host    string        `properties:"host"`
 		Port    int           `properties:"port,default=9000"`
@@ -52,18 +79,10 @@
 	if err := p.Decode(&cfg); err != nil {
 		log.Fatal(err)
 	}
-
-	// or via flags
-	p.MustFlag(flag.CommandLine)
-
-	// or via url
-	p = properties.MustLoadURL("http://host/path")
 }
 
 ```
 
-Read the full documentation on [GoDoc](https://godoc.org/github.com/magiconair/properties)   [![GoDoc](https://godoc.org/github.com/magiconair/properties?status.png)](https://godoc.org/github.com/magiconair/properties)
-
 Installation and Upgrade
 ------------------------
 
diff --git a/vendor/github.com/magiconair/properties/decode.go b/vendor/github.com/magiconair/properties/decode.go
index b717a64..0a961bb 100644
--- a/vendor/github.com/magiconair/properties/decode.go
+++ b/vendor/github.com/magiconair/properties/decode.go
@@ -1,4 +1,4 @@
-// Copyright 2016 Frank Schroeder. All rights reserved.
+// Copyright 2017 Frank Schroeder. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
@@ -158,16 +158,16 @@
 	// keydef returns the property key and the default value based on the
 	// name of the struct field and the options in the tag.
 	keydef := func(f reflect.StructField) (string, *string, map[string]string) {
-		key, opts := parseTag(f.Tag.Get("properties"))
+		_key, _opts := parseTag(f.Tag.Get("properties"))
 
-		var def *string
-		if d, ok := opts["default"]; ok {
-			def = &d
+		var _def *string
+		if d, ok := _opts["default"]; ok {
+			_def = &d
 		}
-		if key != "" {
-			return key, def, opts
+		if _key != "" {
+			return _key, _def, _opts
 		}
-		return f.Name, def, opts
+		return f.Name, _def, _opts
 	}
 
 	switch {
@@ -223,7 +223,7 @@
 	case isMap(t):
 		valT := t.Elem()
 		m := reflect.MakeMap(t)
-		for postfix, _ := range p.FilterStripPrefix(key + ".").m {
+		for postfix := range p.FilterStripPrefix(key + ".").m {
 			pp := strings.SplitN(postfix, ".", 2)
 			mk, mv := pp[0], reflect.New(valT)
 			if err := dec(p, key+"."+mk, nil, nil, mv); err != nil {
@@ -274,7 +274,6 @@
 func isBool(t reflect.Type) bool     { return t.Kind() == reflect.Bool }
 func isDuration(t reflect.Type) bool { return t == reflect.TypeOf(time.Second) }
 func isMap(t reflect.Type) bool      { return t.Kind() == reflect.Map }
-func isNumeric(t reflect.Type) bool  { return isInt(t) || isUint(t) || isFloat(t) }
 func isPtr(t reflect.Type) bool      { return t.Kind() == reflect.Ptr }
 func isString(t reflect.Type) bool   { return t.Kind() == reflect.String }
 func isStruct(t reflect.Type) bool   { return t.Kind() == reflect.Struct }
diff --git a/vendor/github.com/magiconair/properties/doc.go b/vendor/github.com/magiconair/properties/doc.go
index ed1ff51..36c8368 100644
--- a/vendor/github.com/magiconair/properties/doc.go
+++ b/vendor/github.com/magiconair/properties/doc.go
@@ -1,4 +1,4 @@
-// Copyright 2016 Frank Schroeder. All rights reserved.
+// Copyright 2017 Frank Schroeder. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/vendor/github.com/magiconair/properties/integrate.go b/vendor/github.com/magiconair/properties/integrate.go
index 37baaad..0d775e0 100644
--- a/vendor/github.com/magiconair/properties/integrate.go
+++ b/vendor/github.com/magiconair/properties/integrate.go
@@ -1,4 +1,4 @@
-// Copyright 2016 Frank Schroeder. All rights reserved.
+// Copyright 2017 Frank Schroeder. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/vendor/github.com/magiconair/properties/lex.go b/vendor/github.com/magiconair/properties/lex.go
index 014e63f..a3cba03 100644
--- a/vendor/github.com/magiconair/properties/lex.go
+++ b/vendor/github.com/magiconair/properties/lex.go
@@ -1,4 +1,4 @@
-// Copyright 2016 Frank Schroeder. All rights reserved.
+// Copyright 2017 Frank Schroeder. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 //
@@ -72,7 +72,7 @@
 
 // next returns the next rune in the input.
 func (l *lexer) next() rune {
-	if int(l.pos) >= len(l.input) {
+	if l.pos >= len(l.input) {
 		l.width = 0
 		return eof
 	}
@@ -96,8 +96,8 @@
 
 // emit passes an item back to the client.
 func (l *lexer) emit(t itemType) {
-	item := item{t, l.start, string(l.runes)}
-	l.items <- item
+	i := item{t, l.start, string(l.runes)}
+	l.items <- i
 	l.start = l.pos
 	l.runes = l.runes[:0]
 }
@@ -114,7 +114,7 @@
 
 // accept consumes the next rune if it's from the valid set.
 func (l *lexer) accept(valid string) bool {
-	if strings.IndexRune(valid, l.next()) >= 0 {
+	if strings.ContainsRune(valid, l.next()) {
 		return true
 	}
 	l.backup()
@@ -123,7 +123,7 @@
 
 // acceptRun consumes a run of runes from the valid set.
 func (l *lexer) acceptRun(valid string) {
-	for strings.IndexRune(valid, l.next()) >= 0 {
+	for strings.ContainsRune(valid, l.next()) {
 	}
 	l.backup()
 }
@@ -156,9 +156,9 @@
 
 // nextItem returns the next item from the input.
 func (l *lexer) nextItem() item {
-	item := <-l.items
-	l.lastPos = item.pos
-	return item
+	i := <-l.items
+	l.lastPos = i.pos
+	return i
 }
 
 // lex creates a new scanner for the input string.
@@ -279,8 +279,7 @@
 	for {
 		switch r := l.next(); {
 		case isEscape(r):
-			r := l.peek()
-			if isEOL(r) {
+			if isEOL(l.peek()) {
 				l.next()
 				l.acceptRun(whitespace)
 			} else {
diff --git a/vendor/github.com/magiconair/properties/load.go b/vendor/github.com/magiconair/properties/load.go
index 4300fec..278cc2e 100644
--- a/vendor/github.com/magiconair/properties/load.go
+++ b/vendor/github.com/magiconair/properties/load.go
@@ -1,4 +1,4 @@
-// Copyright 2016 Frank Schroeder. All rights reserved.
+// Copyright 2017 Frank Schroeder. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
@@ -33,6 +33,15 @@
 	return loadBuf([]byte(s), UTF8)
 }
 
+// LoadMap creates a new Properties struct from a string map.
+func LoadMap(m map[string]string) *Properties {
+	p := NewProperties()
+	for k, v := range m {
+		p.Set(k, v)
+	}
+	return p
+}
+
 // LoadFile reads a file into a Properties struct.
 func LoadFile(filename string, enc Encoding) (*Properties, error) {
 	return loadAll([]string{filename}, enc, false)
@@ -98,7 +107,7 @@
 	return must(LoadURL(url))
 }
 
-// MustLoadFiles reads the content of multiple URLs in the given order into a
+// MustLoadURLs reads the content of multiple URLs in the given order into a
 // Properties struct and panics on error. If 'ignoreMissing' is true then a 404
 // status code will not be reported as error.
 func MustLoadURLs(urls []string, ignoreMissing bool) *Properties {
@@ -172,10 +181,12 @@
 		return nil, fmt.Errorf("properties: %s returned %d", url, resp.StatusCode)
 	}
 	body, err := ioutil.ReadAll(resp.Body)
-	resp.Body.Close()
 	if err != nil {
 		return nil, fmt.Errorf("properties: %s error reading response. %s", url, err)
 	}
+	if err = resp.Body.Close(); err != nil {
+		return nil, fmt.Errorf("properties: %s error reading response. %s", url, err)
+	}
 
 	ct := resp.Header.Get("Content-Type")
 	var enc Encoding
diff --git a/vendor/github.com/magiconair/properties/parser.go b/vendor/github.com/magiconair/properties/parser.go
index ff0e1e1..90f555c 100644
--- a/vendor/github.com/magiconair/properties/parser.go
+++ b/vendor/github.com/magiconair/properties/parser.go
@@ -1,4 +1,4 @@
-// Copyright 2016 Frank Schroeder. All rights reserved.
+// Copyright 2017 Frank Schroeder. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go
index 50209d8..4f3d5a4 100644
--- a/vendor/github.com/magiconair/properties/properties.go
+++ b/vendor/github.com/magiconair/properties/properties.go
@@ -1,4 +1,4 @@
-// Copyright 2016 Frank Schroeder. All rights reserved.
+// Copyright 2017 Frank Schroeder. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
@@ -28,8 +28,10 @@
 // functions. The default is LogFatalHandler.
 var ErrorHandler ErrorHandlerFunc = LogFatalHandler
 
+// LogHandlerFunc defines the function prototype for logging errors.
 type LogHandlerFunc func(fmt string, args ...interface{})
 
+// LogPrintf defines a log handler which uses log.Printf.
 var LogPrintf LogHandlerFunc = log.Printf
 
 // LogFatalHandler handles the error by logging a fatal error and exiting.
@@ -444,6 +446,8 @@
 	pp := NewProperties()
 	for _, k := range p.k {
 		if re.MatchString(k) {
+			// TODO(fs): we are ignoring the error which flags a circular reference.
+			// TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed)
 			pp.Set(k, p.m[k])
 		}
 	}
@@ -456,6 +460,8 @@
 	pp := NewProperties()
 	for _, k := range p.k {
 		if strings.HasPrefix(k, prefix) {
+			// TODO(fs): we are ignoring the error which flags a circular reference.
+			// TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed)
 			pp.Set(k, p.m[k])
 		}
 	}
@@ -469,6 +475,9 @@
 	n := len(prefix)
 	for _, k := range p.k {
 		if len(k) > len(prefix) && strings.HasPrefix(k, prefix) {
+			// TODO(fs): we are ignoring the error which flags a circular reference.
+			// TODO(fs): since we are modifying keys I am not entirely sure whether we can create a circular reference
+			// TODO(fs): this function should probably return an error but the signature is fixed
 			pp.Set(k[n:], p.m[k])
 		}
 	}
@@ -483,9 +492,7 @@
 // Keys returns all keys in the same order as in the input.
 func (p *Properties) Keys() []string {
 	keys := make([]string, len(p.k))
-	for i, k := range p.k {
-		keys[i] = k
-	}
+	copy(keys, p.k)
 	return keys
 }
 
@@ -535,6 +542,13 @@
 	return prev, ok, nil
 }
 
+// SetValue sets property key to the default string value
+// as defined by fmt.Sprintf("%v").
+func (p *Properties) SetValue(key string, value interface{}) error {
+	_, _, err := p.Set(key, fmt.Sprintf("%v", value))
+	return err
+}
+
 // MustSet sets the property key to the corresponding value.
 // If a value for key existed before then ok is true and prev
 // contains the previous value. An empty key is silently ignored.
@@ -615,6 +629,30 @@
 	return
 }
 
+// Map returns a copy of the properties as a map.
+func (p *Properties) Map() map[string]string {
+	m := make(map[string]string)
+	for k, v := range p.m {
+		m[k] = v
+	}
+	return m
+}
+
+// FilterFunc returns a copy of the properties which includes the values which passed all filters.
+func (p *Properties) FilterFunc(filters ...func(k, v string) bool) *Properties {
+	pp := NewProperties()
+outer:
+	for k, v := range p.m {
+		for _, f := range filters {
+			if !f(k, v) {
+				continue outer
+			}
+			pp.Set(k, v)
+		}
+	}
+	return pp
+}
+
 // ----------------------------------------------------------------------------
 
 // Delete removes the key and its comments.
@@ -624,7 +662,7 @@
 	newKeys := []string{}
 	for _, k := range p.k {
 		if k != key {
-			newKeys = append(newKeys, key)
+			newKeys = append(newKeys, k)
 		}
 	}
 	p.k = newKeys
diff --git a/vendor/github.com/magiconair/properties/rangecheck.go b/vendor/github.com/magiconair/properties/rangecheck.go
index d9ce280..2e907d5 100644
--- a/vendor/github.com/magiconair/properties/rangecheck.go
+++ b/vendor/github.com/magiconair/properties/rangecheck.go
@@ -1,4 +1,4 @@
-// Copyright 2016 Frank Schroeder. All rights reserved.
+// Copyright 2017 Frank Schroeder. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/vendor/github.com/mattn/go-colorable/.travis.yml b/vendor/github.com/mattn/go-colorable/.travis.yml
index 42768b8..98db8f0 100644
--- a/vendor/github.com/mattn/go-colorable/.travis.yml
+++ b/vendor/github.com/mattn/go-colorable/.travis.yml
@@ -2,7 +2,8 @@
 go:
   - tip
 
-sudo: false
-
+before_install:
+  - go get github.com/mattn/goveralls
+  - go get golang.org/x/tools/cmd/cover
 script:
- - go test -v
+  - $HOME/gopath/bin/goveralls -repotoken xnXqRGwgW3SXIguzxf90ZSK1GPYZPaGrw
diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md
index e84226a..56729a9 100644
--- a/vendor/github.com/mattn/go-colorable/README.md
+++ b/vendor/github.com/mattn/go-colorable/README.md
@@ -1,5 +1,10 @@
 # go-colorable
 
+[![Godoc Reference](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable)
+[![Build Status](https://travis-ci.org/mattn/go-colorable.svg?branch=master)](https://travis-ci.org/mattn/go-colorable)
+[![Coverage Status](https://coveralls.io/repos/github/mattn/go-colorable/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-colorable?branch=master)
+[![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable)
+
 Colorable writer for windows.
 
 For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.)
diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go
new file mode 100644
index 0000000..1f28d77
--- /dev/null
+++ b/vendor/github.com/mattn/go-colorable/colorable_appengine.go
@@ -0,0 +1,29 @@
+// +build appengine
+
+package colorable
+
+import (
+	"io"
+	"os"
+
+	_ "github.com/mattn/go-isatty"
+)
+
+// NewColorable return new instance of Writer which handle escape sequence.
+func NewColorable(file *os.File) io.Writer {
+	if file == nil {
+		panic("nil passed instead of *os.File to NewColorable()")
+	}
+
+	return file
+}
+
+// NewColorableStdout return new instance of Writer which handle escape sequence for stdout.
+func NewColorableStdout() io.Writer {
+	return os.Stdout
+}
+
+// NewColorableStderr return new instance of Writer which handle escape sequence for stderr.
+func NewColorableStderr() io.Writer {
+	return os.Stderr
+}
diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go
index 52d6653..887f203 100644
--- a/vendor/github.com/mattn/go-colorable/colorable_others.go
+++ b/vendor/github.com/mattn/go-colorable/colorable_others.go
@@ -1,12 +1,16 @@
 // +build !windows
+// +build !appengine
 
 package colorable
 
 import (
 	"io"
 	"os"
+
+	_ "github.com/mattn/go-isatty"
 )
 
+// NewColorable return new instance of Writer which handle escape sequence.
 func NewColorable(file *os.File) io.Writer {
 	if file == nil {
 		panic("nil passed instead of *os.File to NewColorable()")
@@ -15,10 +19,12 @@
 	return file
 }
 
+// NewColorableStdout return new instance of Writer which handle escape sequence for stdout.
 func NewColorableStdout() io.Writer {
 	return os.Stdout
 }
 
+// NewColorableStderr return new instance of Writer which handle escape sequence for stderr.
 func NewColorableStderr() io.Writer {
 	return os.Stderr
 }
diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go
index bc84adf..15a014f 100644
--- a/vendor/github.com/mattn/go-colorable/colorable_windows.go
+++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go
@@ -1,3 +1,6 @@
+// +build windows
+// +build !appengine
+
 package colorable
 
 import (
@@ -26,6 +29,15 @@
 	backgroundMask      = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity)
 )
 
+const (
+	genericRead  = 0x80000000
+	genericWrite = 0x40000000
+)
+
+const (
+	consoleTextmodeBuffer = 0x1
+)
+
 type wchar uint16
 type short int16
 type dword uint32
@@ -65,15 +77,21 @@
 	procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute")
 	procGetConsoleCursorInfo       = kernel32.NewProc("GetConsoleCursorInfo")
 	procSetConsoleCursorInfo       = kernel32.NewProc("SetConsoleCursorInfo")
+	procSetConsoleTitle            = kernel32.NewProc("SetConsoleTitleW")
+	procCreateConsoleScreenBuffer  = kernel32.NewProc("CreateConsoleScreenBuffer")
 )
 
+// Writer provide colorable Writer to the console
 type Writer struct {
-	out     io.Writer
-	handle  syscall.Handle
-	lastbuf bytes.Buffer
-	oldattr word
+	out       io.Writer
+	handle    syscall.Handle
+	althandle syscall.Handle
+	oldattr   word
+	oldpos    coord
+	rest      bytes.Buffer
 }
 
+// NewColorable return new instance of Writer which handle escape sequence from File.
 func NewColorable(file *os.File) io.Writer {
 	if file == nil {
 		panic("nil passed instead of *os.File to NewColorable()")
@@ -83,16 +101,17 @@
 		var csbi consoleScreenBufferInfo
 		handle := syscall.Handle(file.Fd())
 		procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
-		return &Writer{out: file, handle: handle, oldattr: csbi.attributes}
-	} else {
-		return file
+		return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}}
 	}
+	return file
 }
 
+// NewColorableStdout return new instance of Writer which handle escape sequence for stdout.
 func NewColorableStdout() io.Writer {
 	return NewColorable(os.Stdout)
 }
 
+// NewColorableStderr return new instance of Writer which handle escape sequence for stderr.
 func NewColorableStderr() io.Writer {
 	return NewColorable(os.Stderr)
 }
@@ -356,19 +375,65 @@
 	255: 0xeeeeee,
 }
 
+// `\033]0;TITLESTR\007`
+func doTitleSequence(er *bytes.Reader) error {
+	var c byte
+	var err error
+
+	c, err = er.ReadByte()
+	if err != nil {
+		return err
+	}
+	if c != '0' && c != '2' {
+		return nil
+	}
+	c, err = er.ReadByte()
+	if err != nil {
+		return err
+	}
+	if c != ';' {
+		return nil
+	}
+	title := make([]byte, 0, 80)
+	for {
+		c, err = er.ReadByte()
+		if err != nil {
+			return err
+		}
+		if c == 0x07 || c == '\n' {
+			break
+		}
+		title = append(title, c)
+	}
+	if len(title) > 0 {
+		title8, err := syscall.UTF16PtrFromString(string(title))
+		if err == nil {
+			procSetConsoleTitle.Call(uintptr(unsafe.Pointer(title8)))
+		}
+	}
+	return nil
+}
+
+// Write write data on console
 func (w *Writer) Write(data []byte) (n int, err error) {
 	var csbi consoleScreenBufferInfo
 	procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
 
-	er := bytes.NewReader(data)
+	handle := w.handle
+
+	var er *bytes.Reader
+	if w.rest.Len() > 0 {
+		var rest bytes.Buffer
+		w.rest.WriteTo(&rest)
+		w.rest.Reset()
+		rest.Write(data)
+		er = bytes.NewReader(rest.Bytes())
+	} else {
+		er = bytes.NewReader(data)
+	}
 	var bw [1]byte
 loop:
 	for {
-		r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
-		if r1 == 0 {
-			break loop
-		}
-
 		c1, err := er.ReadByte()
 		if err != nil {
 			break loop
@@ -380,155 +445,190 @@
 		}
 		c2, err := er.ReadByte()
 		if err != nil {
-			w.lastbuf.WriteByte(c1)
 			break loop
 		}
+
+		if c2 == ']' {
+			w.rest.WriteByte(c1)
+			w.rest.WriteByte(c2)
+			er.WriteTo(&w.rest)
+			if bytes.IndexByte(w.rest.Bytes(), 0x07) == -1 {
+				break loop
+			}
+			er = bytes.NewReader(w.rest.Bytes()[2:])
+			err := doTitleSequence(er)
+			if err != nil {
+				break loop
+			}
+			w.rest.Reset()
+			continue
+		}
 		if c2 != 0x5b {
-			w.lastbuf.WriteByte(c1)
-			w.lastbuf.WriteByte(c2)
 			continue
 		}
 
+		w.rest.WriteByte(c1)
+		w.rest.WriteByte(c2)
+		er.WriteTo(&w.rest)
+
 		var buf bytes.Buffer
 		var m byte
-		for {
-			c, err := er.ReadByte()
-			if err != nil {
-				w.lastbuf.WriteByte(c1)
-				w.lastbuf.WriteByte(c2)
-				w.lastbuf.Write(buf.Bytes())
-				break loop
-			}
+		for i, c := range w.rest.Bytes()[2:] {
 			if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
 				m = c
+				er = bytes.NewReader(w.rest.Bytes()[2+i+1:])
+				w.rest.Reset()
 				break
 			}
 			buf.Write([]byte(string(c)))
 		}
+		if m == 0 {
+			break loop
+		}
 
-		var csbi consoleScreenBufferInfo
 		switch m {
 		case 'A':
 			n, err = strconv.Atoi(buf.String())
 			if err != nil {
 				continue
 			}
-			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
 			csbi.cursorPosition.y -= short(n)
-			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+			procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
 		case 'B':
 			n, err = strconv.Atoi(buf.String())
 			if err != nil {
 				continue
 			}
-			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
 			csbi.cursorPosition.y += short(n)
-			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+			procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
 		case 'C':
 			n, err = strconv.Atoi(buf.String())
 			if err != nil {
 				continue
 			}
-			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
-			csbi.cursorPosition.x -= short(n)
-			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+			procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
+			csbi.cursorPosition.x += short(n)
+			procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
 		case 'D':
 			n, err = strconv.Atoi(buf.String())
 			if err != nil {
 				continue
 			}
-			if n, err = strconv.Atoi(buf.String()); err == nil {
-				var csbi consoleScreenBufferInfo
-				procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
-				csbi.cursorPosition.x += short(n)
-				procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+			procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
+			csbi.cursorPosition.x -= short(n)
+			if csbi.cursorPosition.x < 0 {
+				csbi.cursorPosition.x = 0
 			}
+			procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
 		case 'E':
 			n, err = strconv.Atoi(buf.String())
 			if err != nil {
 				continue
 			}
-			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
 			csbi.cursorPosition.x = 0
 			csbi.cursorPosition.y += short(n)
-			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+			procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
 		case 'F':
 			n, err = strconv.Atoi(buf.String())
 			if err != nil {
 				continue
 			}
-			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
 			csbi.cursorPosition.x = 0
 			csbi.cursorPosition.y -= short(n)
-			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+			procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
 		case 'G':
 			n, err = strconv.Atoi(buf.String())
 			if err != nil {
 				continue
 			}
-			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
 			csbi.cursorPosition.x = short(n - 1)
-			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
-		case 'H':
-			token := strings.Split(buf.String(), ";")
-			if len(token) != 2 {
-				continue
+			procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'H', 'f':
+			procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
+			if buf.Len() > 0 {
+				token := strings.Split(buf.String(), ";")
+				switch len(token) {
+				case 1:
+					n1, err := strconv.Atoi(token[0])
+					if err != nil {
+						continue
+					}
+					csbi.cursorPosition.y = short(n1 - 1)
+				case 2:
+					n1, err := strconv.Atoi(token[0])
+					if err != nil {
+						continue
+					}
+					n2, err := strconv.Atoi(token[1])
+					if err != nil {
+						continue
+					}
+					csbi.cursorPosition.x = short(n2 - 1)
+					csbi.cursorPosition.y = short(n1 - 1)
+				}
+			} else {
+				csbi.cursorPosition.y = 0
 			}
-			n1, err := strconv.Atoi(token[0])
-			if err != nil {
-				continue
-			}
-			n2, err := strconv.Atoi(token[1])
-			if err != nil {
-				continue
-			}
-			csbi.cursorPosition.x = short(n2 - 1)
-			csbi.cursorPosition.y = short(n1 - 1)
-			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+			procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
 		case 'J':
-			n, err := strconv.Atoi(buf.String())
-			if err != nil {
-				continue
+			n := 0
+			if buf.Len() > 0 {
+				n, err = strconv.Atoi(buf.String())
+				if err != nil {
+					continue
+				}
 			}
-			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			var count, written dword
 			var cursor coord
+			procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
 			switch n {
 			case 0:
 				cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
+				count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x)
 			case 1:
 				cursor = coord{x: csbi.window.left, y: csbi.window.top}
+				count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.window.top-csbi.cursorPosition.y)*csbi.size.x)
 			case 2:
 				cursor = coord{x: csbi.window.left, y: csbi.window.top}
+				count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x)
 			}
-			var count, written dword
-			count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x)
-			procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
-			procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+			procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+			procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
 		case 'K':
-			n, err := strconv.Atoi(buf.String())
-			if err != nil {
-				continue
+			n := 0
+			if buf.Len() > 0 {
+				n, err = strconv.Atoi(buf.String())
+				if err != nil {
+					continue
+				}
 			}
-			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
 			var cursor coord
+			var count, written dword
 			switch n {
 			case 0:
 				cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
+				count = dword(csbi.size.x - csbi.cursorPosition.x)
 			case 1:
 				cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
+				count = dword(csbi.size.x - csbi.cursorPosition.x)
 			case 2:
 				cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
+				count = dword(csbi.size.x)
 			}
-			var count, written dword
-			count = dword(csbi.size.x - csbi.cursorPosition.x)
-			procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
-			procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+			procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+			procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
 		case 'm':
-			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
 			attr := csbi.attributes
 			cs := buf.String()
 			if cs == "" {
-				procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr))
+				procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(w.oldattr))
 				continue
 			}
 			token := strings.Split(cs, ";")
@@ -542,7 +642,7 @@
 						attr |= foregroundIntensity
 					case n == 7:
 						attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
-					case 22 == n || n == 25 || n == 25:
+					case n == 22 || n == 25:
 						attr |= foregroundIntensity
 					case n == 27:
 						attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
@@ -567,6 +667,21 @@
 								attr |= n256foreAttr[n256]
 								i += 2
 							}
+						} else if len(token) == 5 && token[i+1] == "2" {
+							var r, g, b int
+							r, _ = strconv.Atoi(token[i+2])
+							g, _ = strconv.Atoi(token[i+3])
+							b, _ = strconv.Atoi(token[i+4])
+							i += 4
+							if r > 127 {
+								attr |= foregroundRed
+							}
+							if g > 127 {
+								attr |= foregroundGreen
+							}
+							if b > 127 {
+								attr |= foregroundBlue
+							}
 						} else {
 							attr = attr & (w.oldattr & backgroundMask)
 						}
@@ -594,6 +709,21 @@
 								attr |= n256backAttr[n256]
 								i += 2
 							}
+						} else if len(token) == 5 && token[i+1] == "2" {
+							var r, g, b int
+							r, _ = strconv.Atoi(token[i+2])
+							g, _ = strconv.Atoi(token[i+3])
+							b, _ = strconv.Atoi(token[i+4])
+							i += 4
+							if r > 127 {
+								attr |= backgroundRed
+							}
+							if g > 127 {
+								attr |= backgroundGreen
+							}
+							if b > 127 {
+								attr |= backgroundBlue
+							}
 						} else {
 							attr = attr & (w.oldattr & foregroundMask)
 						}
@@ -625,28 +755,56 @@
 							attr |= backgroundBlue
 						}
 					}
-					procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr))
+					procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(attr))
 				}
 			}
 		case 'h':
+			var ci consoleCursorInfo
 			cs := buf.String()
-			if cs == "?25" {
-				var ci consoleCursorInfo
-				procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
+			if cs == "5>" {
+				procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci)))
+				ci.visible = 0
+				procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci)))
+			} else if cs == "?25" {
+				procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci)))
 				ci.visible = 1
-				procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
+				procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci)))
+			} else if cs == "?1049" {
+				if w.althandle == 0 {
+					h, _, _ := procCreateConsoleScreenBuffer.Call(uintptr(genericRead|genericWrite), 0, 0, uintptr(consoleTextmodeBuffer), 0, 0)
+					w.althandle = syscall.Handle(h)
+					if w.althandle != 0 {
+						handle = w.althandle
+					}
+				}
 			}
 		case 'l':
+			var ci consoleCursorInfo
 			cs := buf.String()
-			if cs == "?25" {
-				var ci consoleCursorInfo
-				procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
+			if cs == "5>" {
+				procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci)))
+				ci.visible = 1
+				procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci)))
+			} else if cs == "?25" {
+				procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci)))
 				ci.visible = 0
-				procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
+				procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci)))
+			} else if cs == "?1049" {
+				if w.althandle != 0 {
+					syscall.CloseHandle(w.althandle)
+					w.althandle = 0
+					handle = w.handle
+				}
 			}
+		case 's':
+			procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
+			w.oldpos = csbi.cursorPosition
+		case 'u':
+			procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos)))
 		}
 	}
-	return len(data) - w.lastbuf.Len(), nil
+
+	return len(data), nil
 }
 
 type consoleColor struct {
@@ -690,22 +848,22 @@
 }
 
 var color16 = []consoleColor{
-	consoleColor{0x000000, false, false, false, false},
-	consoleColor{0x000080, false, false, true, false},
-	consoleColor{0x008000, false, true, false, false},
-	consoleColor{0x008080, false, true, true, false},
-	consoleColor{0x800000, true, false, false, false},
-	consoleColor{0x800080, true, false, true, false},
-	consoleColor{0x808000, true, true, false, false},
-	consoleColor{0xc0c0c0, true, true, true, false},
-	consoleColor{0x808080, false, false, false, true},
-	consoleColor{0x0000ff, false, false, true, true},
-	consoleColor{0x00ff00, false, true, false, true},
-	consoleColor{0x00ffff, false, true, true, true},
-	consoleColor{0xff0000, true, false, false, true},
-	consoleColor{0xff00ff, true, false, true, true},
-	consoleColor{0xffff00, true, true, false, true},
-	consoleColor{0xffffff, true, true, true, true},
+	{0x000000, false, false, false, false},
+	{0x000080, false, false, true, false},
+	{0x008000, false, true, false, false},
+	{0x008080, false, true, true, false},
+	{0x800000, true, false, false, false},
+	{0x800080, true, false, true, false},
+	{0x808000, true, true, false, false},
+	{0xc0c0c0, true, true, true, false},
+	{0x808080, false, false, false, true},
+	{0x0000ff, false, false, true, true},
+	{0x00ff00, false, true, false, true},
+	{0x00ffff, false, true, true, true},
+	{0xff0000, true, false, false, true},
+	{0xff00ff, true, false, true, true},
+	{0xffff00, true, true, false, true},
+	{0xffffff, true, true, true, true},
 }
 
 type hsv struct {
diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go
index b60801d..9721e16 100644
--- a/vendor/github.com/mattn/go-colorable/noncolorable.go
+++ b/vendor/github.com/mattn/go-colorable/noncolorable.go
@@ -5,15 +5,17 @@
 	"io"
 )
 
+// NonColorable hold writer but remove escape sequence.
 type NonColorable struct {
-	out     io.Writer
-	lastbuf bytes.Buffer
+	out io.Writer
 }
 
+// NewNonColorable return new instance of Writer which remove escape sequence from Writer.
 func NewNonColorable(w io.Writer) io.Writer {
 	return &NonColorable{out: w}
 }
 
+// Write write data on console
 func (w *NonColorable) Write(data []byte) (n int, err error) {
 	er := bytes.NewReader(data)
 	var bw [1]byte
@@ -30,12 +32,9 @@
 		}
 		c2, err := er.ReadByte()
 		if err != nil {
-			w.lastbuf.WriteByte(c1)
 			break loop
 		}
 		if c2 != 0x5b {
-			w.lastbuf.WriteByte(c1)
-			w.lastbuf.WriteByte(c2)
 			continue
 		}
 
@@ -43,9 +42,6 @@
 		for {
 			c, err := er.ReadByte()
 			if err != nil {
-				w.lastbuf.WriteByte(c1)
-				w.lastbuf.WriteByte(c2)
-				w.lastbuf.Write(buf.Bytes())
 				break loop
 			}
 			if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
@@ -54,5 +50,6 @@
 			buf.Write([]byte(string(c)))
 		}
 	}
-	return len(data) - w.lastbuf.Len(), nil
+
+	return len(data), nil
 }
diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml
new file mode 100644
index 0000000..b9f8b23
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/.travis.yml
@@ -0,0 +1,9 @@
+language: go
+go:
+  - tip
+
+before_install:
+  - go get github.com/mattn/goveralls
+  - go get golang.org/x/tools/cmd/cover
+script:
+  - $HOME/gopath/bin/goveralls -repotoken 3gHdORO5k5ziZcWMBxnd9LrMZaJs8m9x5
diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md
index 74845de..1e69004 100644
--- a/vendor/github.com/mattn/go-isatty/README.md
+++ b/vendor/github.com/mattn/go-isatty/README.md
@@ -1,5 +1,10 @@
 # go-isatty
 
+[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty)
+[![Build Status](https://travis-ci.org/mattn/go-isatty.svg?branch=master)](https://travis-ci.org/mattn/go-isatty)
+[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master)
+[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty)
+
 isatty for golang
 
 ## Usage
@@ -16,6 +21,8 @@
 func main() {
 	if isatty.IsTerminal(os.Stdout.Fd()) {
 		fmt.Println("Is Terminal")
+	} else if isatty.IsCygwinTerminal(os.Stdout.Fd()) {
+		fmt.Println("Is Cygwin/MSYS2 Terminal")
 	} else {
 		fmt.Println("Is Not Terminal")
 	}
@@ -28,10 +35,16 @@
 $ go get github.com/mattn/go-isatty
 ```
 
-# License
+## License
 
 MIT
 
-# Author
+## Author
 
 Yasuhiro Matsumoto (a.k.a mattn)
+
+## Thanks
+
+* k-takata: base idea for IsCygwinTerminal
+
+    https://github.com/k-takata/go-iscygpty
diff --git a/vendor/github.com/mattn/go-isatty/isatty_appengine.go b/vendor/github.com/mattn/go-isatty/isatty_appengine.go
index 83c5887..9584a98 100644
--- a/vendor/github.com/mattn/go-isatty/isatty_appengine.go
+++ b/vendor/github.com/mattn/go-isatty/isatty_appengine.go
@@ -7,3 +7,9 @@
 func IsTerminal(fd uintptr) bool {
 	return false
 }
+
+// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
+// terminal. This is also always false on this environment.
+func IsCygwinTerminal(fd uintptr) bool {
+	return false
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go
new file mode 100644
index 0000000..ff4de3d
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_others.go
@@ -0,0 +1,10 @@
+// +build !windows
+// +build !appengine
+
+package isatty
+
+// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
+// terminal. This is also always false on this environment.
+func IsCygwinTerminal(fd uintptr) bool {
+	return false
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go
index 83c398b..af51cbc 100644
--- a/vendor/github.com/mattn/go-isatty/isatty_windows.go
+++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go
@@ -4,12 +4,30 @@
 package isatty
 
 import (
+	"strings"
 	"syscall"
+	"unicode/utf16"
 	"unsafe"
 )
 
-var kernel32 = syscall.NewLazyDLL("kernel32.dll")
-var procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+const (
+	fileNameInfo uintptr = 2
+	fileTypePipe         = 3
+)
+
+var (
+	kernel32                         = syscall.NewLazyDLL("kernel32.dll")
+	procGetConsoleMode               = kernel32.NewProc("GetConsoleMode")
+	procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx")
+	procGetFileType                  = kernel32.NewProc("GetFileType")
+)
+
+func init() {
+	// Check if GetFileInformationByHandleEx is available.
+	if procGetFileInformationByHandleEx.Find() != nil {
+		procGetFileInformationByHandleEx = nil
+	}
+}
 
 // IsTerminal return true if the file descriptor is terminal.
 func IsTerminal(fd uintptr) bool {
@@ -17,3 +35,60 @@
 	r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
 	return r != 0 && e == 0
 }
+
+// Check pipe name is used for cygwin/msys2 pty.
+// Cygwin/MSYS2 PTY has a name like:
+//   \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master
+func isCygwinPipeName(name string) bool {
+	token := strings.Split(name, "-")
+	if len(token) < 5 {
+		return false
+	}
+
+	if token[0] != `\msys` && token[0] != `\cygwin` {
+		return false
+	}
+
+	if token[1] == "" {
+		return false
+	}
+
+	if !strings.HasPrefix(token[2], "pty") {
+		return false
+	}
+
+	if token[3] != `from` && token[3] != `to` {
+		return false
+	}
+
+	if token[4] != "master" {
+		return false
+	}
+
+	return true
+}
+
+// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
+// terminal.
+func IsCygwinTerminal(fd uintptr) bool {
+	if procGetFileInformationByHandleEx == nil {
+		return false
+	}
+
+	// Cygwin/msys's pty is a pipe.
+	ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0)
+	if ft != fileTypePipe || e != 0 {
+		return false
+	}
+
+	var buf [2 + syscall.MAX_PATH]uint16
+	r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(),
+		4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)),
+		uintptr(len(buf)*2), 0, 0)
+	if r == 0 || e != 0 {
+		return false
+	}
+
+	l := *(*uint32)(unsafe.Pointer(&buf))
+	return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2])))
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/.travis.yml b/vendor/github.com/mitchellh/mapstructure/.travis.yml
index 7f3fe9a..5c14c13 100644
--- a/vendor/github.com/mitchellh/mapstructure/.travis.yml
+++ b/vendor/github.com/mitchellh/mapstructure/.travis.yml
@@ -1,7 +1,7 @@
 language: go 
 
 go: 
-  - 1.4
+  - 1.8.1
   
 script:
   - go test 
diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
index 115ae67..afcfd5e 100644
--- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
+++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
@@ -38,12 +38,6 @@
 	raw DecodeHookFunc,
 	from reflect.Type, to reflect.Type,
 	data interface{}) (interface{}, error) {
-	// Build our arguments that reflect expects
-	argVals := make([]reflect.Value, 3)
-	argVals[0] = reflect.ValueOf(from)
-	argVals[1] = reflect.ValueOf(to)
-	argVals[2] = reflect.ValueOf(data)
-
 	switch f := typedDecodeHook(raw).(type) {
 	case DecodeHookFuncType:
 		return f(from, to, data)
@@ -121,6 +115,11 @@
 	}
 }
 
+// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
+// the decoder.
+//
+// Note that this is significantly different from the WeaklyTypedInput option
+// of the DecoderConfig.
 func WeaklyTypedHook(
 	f reflect.Kind,
 	t reflect.Kind,
@@ -132,9 +131,8 @@
 		case reflect.Bool:
 			if dataVal.Bool() {
 				return "1", nil
-			} else {
-				return "0", nil
 			}
+			return "0", nil
 		case reflect.Float32:
 			return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
 		case reflect.Int:
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
index b0ab89b..6ec5c33 100644
--- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go
+++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
@@ -1,5 +1,5 @@
-// The mapstructure package exposes functionality to convert an
-// abitrary map[string]interface{} into a native Go structure.
+// Package mapstructure exposes functionality to convert an arbitrary
+// map[string]interface{} into a native Go structure.
 //
 // The Go structure can be arbitrarily complex, containing slices,
 // other structs, etc. and the decoder will properly decode nested
@@ -32,7 +32,12 @@
 // both.
 type DecodeHookFunc interface{}
 
+// DecodeHookFuncType is a DecodeHookFunc which has complete information about
+// the source and target types.
 type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
+
+// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the
+// source and target types.
 type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
 
 // DecoderConfig is the configuration that is used to create a new decoder
@@ -69,6 +74,9 @@
 	//   - empty array = empty map and vice versa
 	//   - negative numbers to overflowed uint values (base 10)
 	//   - slice of maps to a merged map
+	//   - single values are converted to slices if required. Each
+	//     element is weakly decoded. For example: "4" can become []int{4}
+	//     if the target type is an int slice.
 	//
 	WeaklyTypedInput bool
 
@@ -202,7 +210,7 @@
 			d.config.DecodeHook,
 			dataVal.Type(), val.Type(), data)
 		if err != nil {
-			return err
+			return fmt.Errorf("error decoding '%s': %s", name, err)
 		}
 	}
 
@@ -229,6 +237,8 @@
 		err = d.decodePtr(name, data, val)
 	case reflect.Slice:
 		err = d.decodeSlice(name, data, val)
+	case reflect.Func:
+		err = d.decodeFunc(name, data, val)
 	default:
 		// If we reached this point then we weren't able to decode it
 		return fmt.Errorf("%s: unsupported type: %s", name, dataKind)
@@ -431,7 +441,7 @@
 	case dataKind == reflect.Uint:
 		val.SetFloat(float64(dataVal.Uint()))
 	case dataKind == reflect.Float32:
-		val.SetFloat(float64(dataVal.Float()))
+		val.SetFloat(dataVal.Float())
 	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
 		if dataVal.Bool() {
 			val.SetFloat(1)
@@ -546,7 +556,12 @@
 	// into that. Then set the value of the pointer to this type.
 	valType := val.Type()
 	valElemType := valType.Elem()
-	realVal := reflect.New(valElemType)
+
+	realVal := val
+	if realVal.IsNil() || d.config.ZeroFields {
+		realVal = reflect.New(valElemType)
+	}
+
 	if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
 		return err
 	}
@@ -555,6 +570,19 @@
 	return nil
 }
 
+func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error {
+	// Create an element of the concrete (non pointer) type and decode
+	// into that. Then set the value of the pointer to this type.
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	if val.Type() != dataVal.Type() {
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+	val.Set(dataVal)
+	return nil
+}
+
 func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error {
 	dataVal := reflect.Indirect(reflect.ValueOf(data))
 	dataValKind := dataVal.Kind()
@@ -562,26 +590,44 @@
 	valElemType := valType.Elem()
 	sliceType := reflect.SliceOf(valElemType)
 
-	// Check input type
-	if dataValKind != reflect.Array && dataValKind != reflect.Slice {
-		// Accept empty map instead of array/slice in weakly typed mode
-		if d.config.WeaklyTypedInput && dataVal.Kind() == reflect.Map && dataVal.Len() == 0 {
-			val.Set(reflect.MakeSlice(sliceType, 0, 0))
-			return nil
-		} else {
+	valSlice := val
+	if valSlice.IsNil() || d.config.ZeroFields {
+		// Check input type
+		if dataValKind != reflect.Array && dataValKind != reflect.Slice {
+			if d.config.WeaklyTypedInput {
+				switch {
+				// Empty maps turn into empty slices
+				case dataValKind == reflect.Map:
+					if dataVal.Len() == 0 {
+						val.Set(reflect.MakeSlice(sliceType, 0, 0))
+						return nil
+					}
+
+				// All other types we try to convert to the slice type
+				// and "lift" it into it. i.e. a string becomes a string slice.
+				default:
+					// Just re-try this function with data as a slice.
+					return d.decodeSlice(name, []interface{}{data}, val)
+				}
+			}
+
 			return fmt.Errorf(
 				"'%s': source data must be an array or slice, got %s", name, dataValKind)
-		}
-	}
 
-	// Make a new slice to hold our result, same size as the original data.
-	valSlice := reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
+		}
+
+		// Make a new slice to hold our result, same size as the original data.
+		valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
+	}
 
 	// Accumulate any errors
 	errors := make([]string, 0)
 
 	for i := 0; i < dataVal.Len(); i++ {
 		currentData := dataVal.Index(i).Interface()
+		for valSlice.Len() <= i {
+			valSlice = reflect.Append(valSlice, reflect.Zero(valElemType))
+		}
 		currentField := valSlice.Index(i)
 
 		fieldName := fmt.Sprintf("%s[%d]", name, i)
diff --git a/vendor/github.com/pelletier/go-buffruneio/.gitignore b/vendor/github.com/pelletier/go-buffruneio/.gitignore
deleted file mode 100644
index c56069f..0000000
--- a/vendor/github.com/pelletier/go-buffruneio/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-*.test
\ No newline at end of file
diff --git a/vendor/github.com/pelletier/go-buffruneio/.travis.yml b/vendor/github.com/pelletier/go-buffruneio/.travis.yml
deleted file mode 100644
index 9720442..0000000
--- a/vendor/github.com/pelletier/go-buffruneio/.travis.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-language: go
-sudo: false
-go:
-    - 1.3.3
-    - 1.4.3
-    - 1.5.3
-    - tip
diff --git a/vendor/github.com/pelletier/go-buffruneio/README.md b/vendor/github.com/pelletier/go-buffruneio/README.md
deleted file mode 100644
index ff608b3..0000000
--- a/vendor/github.com/pelletier/go-buffruneio/README.md
+++ /dev/null
@@ -1,62 +0,0 @@
-# buffruneio
-
-[![Tests Status](https://travis-ci.org/pelletier/go-buffruneio.svg?branch=master)](https://travis-ci.org/pelletier/go-buffruneio)
-[![GoDoc](https://godoc.org/github.com/pelletier/go-buffruneio?status.svg)](https://godoc.org/github.com/pelletier/go-buffruneio)
-
-Buffruneio is a wrapper around bufio to provide buffered runes access with
-unlimited unreads.
-
-```go
-import "github.com/pelletier/go-buffruneio"
-```
-
-## Examples
-
-```go
-import (
-    "fmt"
-    "github.com/pelletier/go-buffruneio"
-    "strings"
-)
-
-reader := buffruneio.NewReader(strings.NewReader("abcd"))
-fmt.Println(reader.ReadRune()) // 'a'
-fmt.Println(reader.ReadRune()) // 'b'
-fmt.Println(reader.ReadRune()) // 'c'
-reader.UnreadRune()
-reader.UnreadRune()
-fmt.Println(reader.ReadRune()) // 'b'
-fmt.Println(reader.ReadRune()) // 'c'
-```
-
-## Documentation
-
-The documentation and additional examples are available at
-[godoc.org](http://godoc.org/github.com/pelletier/go-buffruneio).
-
-## Contribute
-
-Feel free to report bugs and patches using GitHub's pull requests system on
-[pelletier/go-toml](https://github.com/pelletier/go-buffruneio). Any feedback is
-much appreciated!
-
-## LICENSE
-
-Copyright (c) 2016 Thomas Pelletier
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/pelletier/go-buffruneio/buffruneio.go b/vendor/github.com/pelletier/go-buffruneio/buffruneio.go
deleted file mode 100644
index 41cab87..0000000
--- a/vendor/github.com/pelletier/go-buffruneio/buffruneio.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// Package buffruneio is a wrapper around bufio to provide buffered runes access with unlimited unreads.
-package buffruneio
-
-import (
-	"bufio"
-	"container/list"
-	"errors"
-	"io"
-)
-
-// Rune to indicate end of file.
-const (
-	EOF = -(iota + 1)
-)
-
-// ErrNoRuneToUnread is returned by UnreadRune() when the read index is already at the beginning of the buffer.
-var ErrNoRuneToUnread = errors.New("no rune to unwind")
-
-// Reader implements runes buffering for an io.Reader object.
-type Reader struct {
-	buffer  *list.List
-	current *list.Element
-	input   *bufio.Reader
-}
-
-// NewReader returns a new Reader.
-func NewReader(rd io.Reader) *Reader {
-	return &Reader{
-		buffer: list.New(),
-		input:  bufio.NewReader(rd),
-	}
-}
-
-func (rd *Reader) feedBuffer() error {
-	r, _, err := rd.input.ReadRune()
-
-	if err != nil {
-		if err != io.EOF {
-			return err
-		}
-		r = EOF
-	}
-
-	rd.buffer.PushBack(r)
-	if rd.current == nil {
-		rd.current = rd.buffer.Back()
-	}
-	return nil
-}
-
-// ReadRune reads the next rune from buffer, or from the underlying reader if needed.
-func (rd *Reader) ReadRune() (rune, error) {
-	if rd.current == rd.buffer.Back() || rd.current == nil {
-		err := rd.feedBuffer()
-		if err != nil {
-			return EOF, err
-		}
-	}
-
-	r := rd.current.Value
-	rd.current = rd.current.Next()
-	return r.(rune), nil
-}
-
-// UnreadRune pushes back the previously read rune in the buffer, extending it if needed.
-func (rd *Reader) UnreadRune() error {
-	if rd.current == rd.buffer.Front() {
-		return ErrNoRuneToUnread
-	}
-	if rd.current == nil {
-		rd.current = rd.buffer.Back()
-	} else {
-		rd.current = rd.current.Prev()
-	}
-	return nil
-}
-
-// Forget removes runes stored before the current stream position index.
-func (rd *Reader) Forget() {
-	if rd.current == nil {
-		rd.current = rd.buffer.Back()
-	}
-	for ; rd.current != rd.buffer.Front(); rd.buffer.Remove(rd.current.Prev()) {
-	}
-}
-
-// Peek returns at most the next n runes, reading from the uderlying source if
-// needed. Does not move the current index. It includes EOF if reached.
-func (rd *Reader) Peek(n int) []rune {
-	res := make([]rune, 0, n)
-	cursor := rd.current
-	for i := 0; i < n; i++ {
-		if cursor == nil {
-			err := rd.feedBuffer()
-			if err != nil {
-				return res
-			}
-			cursor = rd.buffer.Back()
-		}
-		if cursor != nil {
-			r := cursor.Value.(rune)
-			res = append(res, r)
-			if r == EOF {
-				return res
-			}
-			cursor = cursor.Next()
-		}
-	}
-	return res
-}
diff --git a/vendor/github.com/pelletier/go-toml/.travis.yml b/vendor/github.com/pelletier/go-toml/.travis.yml
index 5307ea8..1f8b41f 100644
--- a/vendor/github.com/pelletier/go-toml/.travis.yml
+++ b/vendor/github.com/pelletier/go-toml/.travis.yml
@@ -1,18 +1,23 @@
+sudo: false
 language: go
 go:
-  - 1.5.4
-  - 1.6.3
-  - 1.7
+  - 1.6.4
+  - 1.7.6
+  - 1.8.3
   - tip
 matrix:
   allow_failures:
     - go: tip
   fast_finish: true
 script:
+  - if [ -n "$(go fmt ./...)" ]; then exit 1; fi
   - ./test.sh
+  - ./benchmark.sh $TRAVIS_BRANCH https://github.com/$TRAVIS_REPO_SLUG.git
 before_install:
   - go get github.com/axw/gocov/gocov
   - go get github.com/mattn/goveralls
   - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
+branches:
+  only: [master]
 after_success:
-  - $HOME/gopath/bin/goveralls -service=travis-ci
+  - $HOME/gopath/bin/goveralls -service=travis-ci -coverprofile=coverage.out -repotoken $COVERALLS_TOKEN
diff --git a/vendor/github.com/pelletier/go-toml/LICENSE b/vendor/github.com/pelletier/go-toml/LICENSE
index 5f9f53d..583bdae 100644
--- a/vendor/github.com/pelletier/go-toml/LICENSE
+++ b/vendor/github.com/pelletier/go-toml/LICENSE
@@ -1,6 +1,6 @@
 The MIT License (MIT)
 
-Copyright (c) 2013 - 2016 Thomas Pelletier, Eric Anderton
+Copyright (c) 2013 - 2017 Thomas Pelletier, Eric Anderton
 
 Permission is hereby granted, free of charge, to any person obtaining a copy
 of this software and associated documentation files (the "Software"), to deal
@@ -19,4 +19,3 @@
 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 SOFTWARE.
-
diff --git a/vendor/github.com/pelletier/go-toml/README.md b/vendor/github.com/pelletier/go-toml/README.md
index b511f39..22da41a 100644
--- a/vendor/github.com/pelletier/go-toml/README.md
+++ b/vendor/github.com/pelletier/go-toml/README.md
@@ -6,7 +6,7 @@
 [v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
 
 [![GoDoc](https://godoc.org/github.com/pelletier/go-toml?status.svg)](http://godoc.org/github.com/pelletier/go-toml)
-[![License](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/goadesign/goa/blob/master/LICENSE)
+[![license](https://img.shields.io/github/license/pelletier/go-toml.svg)](https://github.com/pelletier/go-toml/blob/master/LICENSE)
 [![Build Status](https://travis-ci.org/pelletier/go-toml.svg?branch=master)](https://travis-ci.org/pelletier/go-toml)
 [![Coverage Status](https://coveralls.io/repos/github/pelletier/go-toml/badge.svg?branch=master)](https://coveralls.io/github/pelletier/go-toml?branch=master)
 [![Go Report Card](https://goreportcard.com/badge/github.com/pelletier/go-toml)](https://goreportcard.com/report/github.com/pelletier/go-toml)
@@ -16,65 +16,63 @@
 Go-toml provides the following features for using data parsed from TOML documents:
 
 * Load TOML documents from files and string data
-* Easily navigate TOML structure using TomlTree
+* Easily navigate TOML structure using Tree
+* Mashaling and unmarshaling to and from data structures
 * Line & column position data for all parsed elements
-* Query support similar to JSON-Path
+* [Query support similar to JSON-Path](query/)
 * Syntax errors contain line and column numbers
 
-Go-toml is designed to help cover use-cases not covered by reflection-based TOML parsing:
-
-* Semantic evaluation of parsed TOML
-* Informing a user of mistakes in the source document, after it has been parsed
-* Programatic handling of default values on a case-by-case basis
-* Using a TOML document as a flexible data-store
-
 ## Import
 
-    import "github.com/pelletier/go-toml"
-
-## Usage
-
-### Example
-
-Say you have a TOML file that looks like this:
-
-```toml
-[postgres]
-user = "pelletier"
-password = "mypassword"
+```go
+import "github.com/pelletier/go-toml"
 ```
 
-Read the username and password like this:
+## Usage example
+
+Read a TOML document:
 
 ```go
-import (
-    "fmt"
-    "github.com/pelletier/go-toml"
-)
+config, _ := toml.LoadString(`
+[postgres]
+user = "pelletier"
+password = "mypassword"`)
+// retrieve data directly
+user := config.Get("postgres.user").(string)
 
-config, err := toml.LoadFile("config.toml")
-if err != nil {
-    fmt.Println("Error ", err.Error())
-} else {
-    // retrieve data directly
-    user := config.Get("postgres.user").(string)
-    password := config.Get("postgres.password").(string)
+// or using an intermediate object
+postgresConfig := config.Get("postgres").(*toml.Tree)
+password = postgresConfig.Get("password").(string)
+```
 
-    // or using an intermediate object
-    configTree := config.Get("postgres").(*toml.TomlTree)
-    user = configTree.Get("user").(string)
-    password = configTree.Get("password").(string)
-    fmt.Println("User is ", user, ". Password is ", password)
+Or use Unmarshal:
 
-    // show where elements are in the file
-    fmt.Println("User position: %v", configTree.GetPosition("user"))
-    fmt.Println("Password position: %v", configTree.GetPosition("password"))
+```go
+type Postgres struct {
+    User     string
+    Password string
+}
+type Config struct {
+    Postgres Postgres
+}
 
-    // use a query to gather elements without walking the tree
-    results, _ := config.Query("$..[user,password]")
-    for ii, item := range results.Values() {
-      fmt.Println("Query result %d: %v", ii, item)
-    }
+doc := []byte(`
+[postgres]
+user = "pelletier"
+password = "mypassword"`)
+
+config := Config{}
+Unmarshal(doc, &config)
+fmt.Println("user=", config.Postgres.User)
+```
+
+Or use a query:
+
+```go
+// use a query to gather elements without walking the tree
+results, _ := config.Query("$..[user,password]")
+for ii, item := range results.Values() {
+    fmt.Println("Query result %d: %v", ii, item)
 }
 ```
 
@@ -96,7 +94,7 @@
 * `tomljson`: Reads a TOML file and outputs its JSON representation.
 
     ```
-    go install github.com/pelletier/go-toml/cmd/tomjson
+    go install github.com/pelletier/go-toml/cmd/tomljson
     tomljson --help
     ```
 
diff --git a/vendor/github.com/pelletier/go-toml/benchmark.json b/vendor/github.com/pelletier/go-toml/benchmark.json
new file mode 100644
index 0000000..86f99c6
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/benchmark.json
@@ -0,0 +1,164 @@
+{
+    "array": {
+        "key1": [
+            1,
+            2,
+            3
+        ],
+        "key2": [
+            "red",
+            "yellow",
+            "green"
+        ],
+        "key3": [
+            [
+                1,
+                2
+            ],
+            [
+                3,
+                4,
+                5
+            ]
+        ],
+        "key4": [
+            [
+                1,
+                2
+            ],
+            [
+                "a",
+                "b",
+                "c"
+            ]
+        ],
+        "key5": [
+            1,
+            2,
+            3
+        ],
+        "key6": [
+            1,
+            2
+        ]
+    },
+    "boolean": {
+        "False": false,
+        "True": true
+    },
+    "datetime": {
+        "key1": "1979-05-27T07:32:00Z",
+        "key2": "1979-05-27T00:32:00-07:00",
+        "key3": "1979-05-27T00:32:00.999999-07:00"
+    },
+    "float": {
+        "both": {
+            "key": 6.626e-34
+        },
+        "exponent": {
+            "key1": 5e+22,
+            "key2": 1000000,
+            "key3": -0.02
+        },
+        "fractional": {
+            "key1": 1,
+            "key2": 3.1415,
+            "key3": -0.01
+        },
+        "underscores": {
+            "key1": 9224617.445991227,
+            "key2": 1e+100
+        }
+    },
+    "fruit": [{
+            "name": "apple",
+            "physical": {
+                "color": "red",
+                "shape": "round"
+            },
+            "variety": [{
+                    "name": "red delicious"
+                },
+                {
+                    "name": "granny smith"
+                }
+            ]
+        },
+        {
+            "name": "banana",
+            "variety": [{
+                "name": "plantain"
+            }]
+        }
+    ],
+    "integer": {
+        "key1": 99,
+        "key2": 42,
+        "key3": 0,
+        "key4": -17,
+        "underscores": {
+            "key1": 1000,
+            "key2": 5349221,
+            "key3": 12345
+        }
+    },
+    "products": [{
+            "name": "Hammer",
+            "sku": 738594937
+        },
+        {},
+        {
+            "color": "gray",
+            "name": "Nail",
+            "sku": 284758393
+        }
+    ],
+    "string": {
+        "basic": {
+            "basic": "I'm a string. \"You can quote me\". Name\tJosé\nLocation\tSF."
+        },
+        "literal": {
+            "multiline": {
+                "lines": "The first newline is\ntrimmed in raw strings.\n   All other whitespace\n   is preserved.\n",
+                "regex2": "I [dw]on't need \\d{2} apples"
+            },
+            "quoted": "Tom \"Dubs\" Preston-Werner",
+            "regex": "\u003c\\i\\c*\\s*\u003e",
+            "winpath": "C:\\Users\\nodejs\\templates",
+            "winpath2": "\\\\ServerX\\admin$\\system32\\"
+        },
+        "multiline": {
+            "continued": {
+                "key1": "The quick brown fox jumps over the lazy dog.",
+                "key2": "The quick brown fox jumps over the lazy dog.",
+                "key3": "The quick brown fox jumps over the lazy dog."
+            },
+            "key1": "One\nTwo",
+            "key2": "One\nTwo",
+            "key3": "One\nTwo"
+        }
+    },
+    "table": {
+        "inline": {
+            "name": {
+                "first": "Tom",
+                "last": "Preston-Werner"
+            },
+            "point": {
+                "x": 1,
+                "y": 2
+            }
+        },
+        "key": "value",
+        "subtable": {
+            "key": "another value"
+        }
+    },
+    "x": {
+        "y": {
+            "z": {
+                "w": {}
+            }
+        }
+    }
+}
diff --git a/vendor/github.com/pelletier/go-toml/benchmark.sh b/vendor/github.com/pelletier/go-toml/benchmark.sh
new file mode 100755
index 0000000..8b8bb52
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/benchmark.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+set -e
+
+reference_ref=${1:-master}
+reference_git=${2:-.}
+
+if ! `hash benchstat 2>/dev/null`; then
+    echo "Installing benchstat"
+    go get golang.org/x/perf/cmd/benchstat
+    go install golang.org/x/perf/cmd/benchstat
+fi
+
+tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX`
+ref_tempdir="${tempdir}/ref"
+ref_benchmark="${ref_tempdir}/benchmark-`echo -n ${reference_ref}|tr -s '/' '-'`.txt"
+local_benchmark="`pwd`/benchmark-local.txt"
+
+echo "=== ${reference_ref} (${ref_tempdir})"
+git clone ${reference_git} ${ref_tempdir} >/dev/null 2>/dev/null
+pushd ${ref_tempdir} >/dev/null
+git checkout ${reference_ref} >/dev/null 2>/dev/null
+go test -bench=. -benchmem | tee ${ref_benchmark}
+popd >/dev/null
+
+echo ""
+echo "=== local"
+go test -bench=. -benchmem  | tee ${local_benchmark}
+
+echo ""
+echo "=== diff"
+benchstat -delta-test=none ${ref_benchmark} ${local_benchmark}
\ No newline at end of file
diff --git a/vendor/github.com/pelletier/go-toml/benchmark.toml b/vendor/github.com/pelletier/go-toml/benchmark.toml
new file mode 100644
index 0000000..dfd77e0
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/benchmark.toml
@@ -0,0 +1,244 @@
+################################################################################
+## Comment
+
+# Speak your mind with the hash symbol. They go from the symbol to the end of
+# the line.
+
+
+################################################################################
+## Table
+
+# Tables (also known as hash tables or dictionaries) are collections of
+# key/value pairs. They appear in square brackets on a line by themselves.
+
+[table]
+
+key = "value" # Yeah, you can do this.
+
+# Nested tables are denoted by table names with dots in them. Name your tables
+# whatever crap you please, just don't use #, ., [ or ].
+
+[table.subtable]
+
+key = "another value"
+
+# You don't need to specify all the super-tables if you don't want to. TOML
+# knows how to do it for you.
+
+# [x] you
+# [x.y] don't
+# [x.y.z] need these
+[x.y.z.w] # for this to work
+
+
+################################################################################
+## Inline Table
+
+# Inline tables provide a more compact syntax for expressing tables. They are
+# especially useful for grouped data that can otherwise quickly become verbose.
+# Inline tables are enclosed in curly braces `{` and `}`. No newlines are
+# allowed between the curly braces unless they are valid within a value.
+
+[table.inline]
+
+name = { first = "Tom", last = "Preston-Werner" }
+point = { x = 1, y = 2 }
+
+
+################################################################################
+## String
+
+# There are four ways to express strings: basic, multi-line basic, literal, and
+# multi-line literal. All strings must contain only valid UTF-8 characters.
+
+[string.basic]
+
+basic = "I'm a string. \"You can quote me\". Name\tJos\u00E9\nLocation\tSF."
+
+[string.multiline]
+
+# The following strings are byte-for-byte equivalent:
+key1 = "One\nTwo"
+key2 = """One\nTwo"""
+key3 = """
+One
+Two"""
+
+[string.multiline.continued]
+
+# The following strings are byte-for-byte equivalent:
+key1 = "The quick brown fox jumps over the lazy dog."
+
+key2 = """
+The quick brown \
+
+
+  fox jumps over \
+    the lazy dog."""
+
+key3 = """\
+       The quick brown \
+       fox jumps over \
+       the lazy dog.\
+       """
+
+[string.literal]
+
+# What you see is what you get.
+winpath  = 'C:\Users\nodejs\templates'
+winpath2 = '\\ServerX\admin$\system32\'
+quoted   = 'Tom "Dubs" Preston-Werner'
+regex    = '<\i\c*\s*>'
+
+
+[string.literal.multiline]
+
+regex2 = '''I [dw]on't need \d{2} apples'''
+lines  = '''
+The first newline is
+trimmed in raw strings.
+   All other whitespace
+   is preserved.
+'''
+
+
+################################################################################
+## Integer
+
+# Integers are whole numbers. Positive numbers may be prefixed with a plus sign.
+# Negative numbers are prefixed with a minus sign.
+
+[integer]
+
+key1 = +99
+key2 = 42
+key3 = 0
+key4 = -17
+
+[integer.underscores]
+
+# For large numbers, you may use underscores to enhance readability. Each
+# underscore must be surrounded by at least one digit.
+key1 = 1_000
+key2 = 5_349_221
+key3 = 1_2_3_4_5     # valid but inadvisable
+
+
+################################################################################
+## Float
+
+# A float consists of an integer part (which may be prefixed with a plus or
+# minus sign) followed by a fractional part and/or an exponent part.
+
+[float.fractional]
+
+key1 = +1.0
+key2 = 3.1415
+key3 = -0.01
+
+[float.exponent]
+
+key1 = 5e+22
+key2 = 1e6
+key3 = -2E-2
+
+[float.both]
+
+key = 6.626e-34
+
+[float.underscores]
+
+key1 = 9_224_617.445_991_228_313
+key2 = 1e1_00
+
+
+################################################################################
+## Boolean
+
+# Booleans are just the tokens you're used to. Always lowercase.
+
+[boolean]
+
+True = true
+False = false
+
+
+################################################################################
+## Datetime
+
+# Datetimes are RFC 3339 dates.
+
+[datetime]
+
+key1 = 1979-05-27T07:32:00Z
+key2 = 1979-05-27T00:32:00-07:00
+key3 = 1979-05-27T00:32:00.999999-07:00
+
+
+################################################################################
+## Array
+
+# Arrays are square brackets with other primitives inside. Whitespace is
+# ignored. Elements are separated by commas. Data types may not be mixed.
+
+[array]
+
+key1 = [ 1, 2, 3 ]
+key2 = [ "red", "yellow", "green" ]
+key3 = [ [ 1, 2 ], [3, 4, 5] ]
+#key4 = [ [ 1, 2 ], ["a", "b", "c"] ] # this is ok
+
+# Arrays can also be multiline. So in addition to ignoring whitespace, arrays
+# also ignore newlines between the brackets.  Terminating commas are ok before
+# the closing bracket.
+
+key5 = [
+  1, 2, 3
+]
+key6 = [
+  1,
+  2, # this is ok
+]
+
+
+################################################################################
+## Array of Tables
+
+# These can be expressed by using a table name in double brackets. Each table
+# with the same double bracketed name will be an element in the array. The
+# tables are inserted in the order encountered.
+
+[[products]]
+
+name = "Hammer"
+sku = 738594937
+
+[[products]]
+
+[[products]]
+
+name = "Nail"
+sku = 284758393
+color = "gray"
+
+
+# You can create nested arrays of tables as well.
+
+[[fruit]]
+  name = "apple"
+
+  [fruit.physical]
+    color = "red"
+    shape = "round"
+
+  [[fruit.variety]]
+    name = "red delicious"
+
+  [[fruit.variety]]
+    name = "granny smith"
+
+[[fruit]]
+  name = "banana"
+
+  [[fruit.variety]]
+    name = "plantain"
diff --git a/vendor/github.com/pelletier/go-toml/benchmark.yml b/vendor/github.com/pelletier/go-toml/benchmark.yml
new file mode 100644
index 0000000..0bd19f0
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/benchmark.yml
@@ -0,0 +1,121 @@
+---
+array:
+  key1:
+  - 1
+  - 2
+  - 3
+  key2:
+  - red
+  - yellow
+  - green
+  key3:
+  - - 1
+    - 2
+  - - 3
+    - 4
+    - 5
+  key4:
+  - - 1
+    - 2
+  - - a
+    - b
+    - c
+  key5:
+  - 1
+  - 2
+  - 3
+  key6:
+  - 1
+  - 2
+boolean:
+  'False': false
+  'True': true
+datetime:
+  key1: '1979-05-27T07:32:00Z'
+  key2: '1979-05-27T00:32:00-07:00'
+  key3: '1979-05-27T00:32:00.999999-07:00'
+float:
+  both:
+    key: 6.626e-34
+  exponent:
+    key1: 5.0e+22
+    key2: 1000000
+    key3: -0.02
+  fractional:
+    key1: 1
+    key2: 3.1415
+    key3: -0.01
+  underscores:
+    key1: 9224617.445991227
+    key2: 1.0e+100
+fruit:
+- name: apple
+  physical:
+    color: red
+    shape: round
+  variety:
+  - name: red delicious
+  - name: granny smith
+- name: banana
+  variety:
+  - name: plantain
+integer:
+  key1: 99
+  key2: 42
+  key3: 0
+  key4: -17
+  underscores:
+    key1: 1000
+    key2: 5349221
+    key3: 12345
+products:
+- name: Hammer
+  sku: 738594937
+- {}
+- color: gray
+  name: Nail
+  sku: 284758393
+string:
+  basic:
+    basic: "I'm a string. \"You can quote me\". Name\tJosé\nLocation\tSF."
+  literal:
+    multiline:
+      lines: |
+        The first newline is
+        trimmed in raw strings.
+           All other whitespace
+           is preserved.
+      regex2: I [dw]on't need \d{2} apples
+    quoted: Tom "Dubs" Preston-Werner
+    regex: "<\\i\\c*\\s*>"
+    winpath: C:\Users\nodejs\templates
+    winpath2: "\\\\ServerX\\admin$\\system32\\"
+  multiline:
+    continued:
+      key1: The quick brown fox jumps over the lazy dog.
+      key2: The quick brown fox jumps over the lazy dog.
+      key3: The quick brown fox jumps over the lazy dog.
+    key1: |-
+      One
+      Two
+    key2: |-
+      One
+      Two
+    key3: |-
+      One
+      Two
+table:
+  inline:
+    name:
+      first: Tom
+      last: Preston-Werner
+    point:
+      x: 1
+      y: 2
+  key: value
+  subtable:
+    key: another value
+x:
+  y:
+    z:
+      w: {}
diff --git a/vendor/github.com/pelletier/go-toml/clean.sh b/vendor/github.com/pelletier/go-toml/clean.sh
deleted file mode 100755
index 44d49d9..0000000
--- a/vendor/github.com/pelletier/go-toml/clean.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-# fail out of the script if anything here fails
-set -e
-
-# clear out stuff generated by test.sh
-rm -rf src test_program_bin toml-test
diff --git a/vendor/github.com/pelletier/go-toml/doc.go b/vendor/github.com/pelletier/go-toml/doc.go
index c8c9add..3c89619 100644
--- a/vendor/github.com/pelletier/go-toml/doc.go
+++ b/vendor/github.com/pelletier/go-toml/doc.go
@@ -1,250 +1,23 @@
-// Package toml is a TOML markup language parser.
+// Package toml is a TOML parser and manipulation library.
 //
 // This version supports the specification as described in
 // https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md
 //
-// TOML Parsing
+// Marshaling
 //
-// TOML data may be parsed in two ways: by file, or by string.
+// Go-toml can marshal and unmarshal TOML documents from and to data
+// structures.
 //
-//   // load TOML data by filename
-//   tree, err := toml.LoadFile("filename.toml")
+// TOML document as a tree
 //
-//   // load TOML data stored in a string
-//   tree, err := toml.Load(stringContainingTomlData)
+// Go-toml can operate on a TOML document as a tree. Use one of the Load*
+// functions to parse TOML data and obtain a Tree instance, then one of its
+// methods to manipulate the tree.
 //
-// Either way, the result is a TomlTree object that can be used to navigate the
-// structure and data within the original document.
+// JSONPath-like queries
 //
-//
-// Getting data from the TomlTree
-//
-// After parsing TOML data with Load() or LoadFile(), use the Has() and Get()
-// methods on the returned TomlTree, to find your way through the document data.
-//
-//   if tree.Has('foo') {
-//     fmt.Prinln("foo is: %v", tree.Get('foo'))
-//   }
-//
-// Working with Paths
-//
-// Go-toml has support for basic dot-separated key paths on the Has(), Get(), Set()
-// and GetDefault() methods.  These are the same kind of key paths used within the
-// TOML specification for struct tames.
-//
-//   // looks for a key named 'baz', within struct 'bar', within struct 'foo'
-//   tree.Has("foo.bar.baz")
-//
-//   // returns the key at this path, if it is there
-//   tree.Get("foo.bar.baz")
-//
-// TOML allows keys to contain '.', which can cause this syntax to be problematic
-// for some documents.  In such cases, use the GetPath(), HasPath(), and SetPath(),
-// methods to explicitly define the path.  This form is also faster, since
-// it avoids having to parse the passed key for '.' delimiters.
-//
-//   // looks for a key named 'baz', within struct 'bar', within struct 'foo'
-//   tree.HasPath(string{}{"foo","bar","baz"})
-//
-//   // returns the key at this path, if it is there
-//   tree.GetPath(string{}{"foo","bar","baz"})
-//
-// Note that this is distinct from the heavyweight query syntax supported by
-// TomlTree.Query() and the Query() struct (see below).
-//
-// Position Support
-//
-// Each element within the TomlTree is stored with position metadata, which is
-// invaluable for providing semantic feedback to a user.  This helps in
-// situations where the TOML file parses correctly, but contains data that is
-// not correct for the application.  In such cases, an error message can be
-// generated that indicates the problem line and column number in the source
-// TOML document.
-//
-//   // load TOML data
-//   tree, _ := toml.Load("filename.toml")
-//
-//   // get an entry and report an error if it's the wrong type
-//   element := tree.Get("foo")
-//   if value, ok := element.(int64); !ok {
-//       return fmt.Errorf("%v: Element 'foo' must be an integer", tree.GetPosition("foo"))
-//   }
-//
-//   // report an error if an expected element is missing
-//   if !tree.Has("bar") {
-//      return fmt.Errorf("%v: Expected 'bar' element", tree.GetPosition(""))
-//   }
-//
-// Query Support
-//
-// The TOML query path implementation is based loosely on the JSONPath specification:
-// http://goessner.net/articles/JsonPath/
-//
-// The idea behind a query path is to allow quick access to any element, or set
-// of elements within TOML document, with a single expression.
-//
-//   result, err := tree.Query("$.foo.bar.baz")
-//
-// This is roughly equivalent to:
-//
-//   next := tree.Get("foo")
-//   if next != nil {
-//     next = next.Get("bar")
-//     if next != nil {
-//       next = next.Get("baz")
-//     }
-//   }
-//   result := next
-//
-// err is nil if any parsing exception occurs.
-//
-// If no node in the tree matches the query, result will simply contain an empty list of
-// items.
-//
-// As illustrated above, the query path is much more efficient, especially since
-// the structure of the TOML file can vary.  Rather than making assumptions about
-// a document's structure, a query allows the programmer to make structured
-// requests into the document, and get zero or more values as a result.
-//
-// The syntax of a query begins with a root token, followed by any number
-// sub-expressions:
-//
-//   $
-//                    Root of the TOML tree.  This must always come first.
-//   .name
-//                    Selects child of this node, where 'name' is a TOML key
-//                    name.
-//   ['name']
-//                    Selects child of this node, where 'name' is a string
-//                    containing a TOML key name.
-//   [index]
-//                    Selcts child array element at 'index'.
-//   ..expr
-//                    Recursively selects all children, filtered by an a union,
-//                    index, or slice expression.
-//   ..*
-//                    Recursive selection of all nodes at this point in the
-//                    tree.
-//   .*
-//                    Selects all children of the current node.
-//   [expr,expr]
-//                    Union operator - a logical 'or' grouping of two or more
-//                    sub-expressions: index, key name, or filter.
-//   [start:end:step]
-//                    Slice operator - selects array elements from start to
-//                    end-1, at the given step.  All three arguments are
-//                    optional.
-//   [?(filter)]
-//                    Named filter expression - the function 'filter' is
-//                    used to filter children at this node.
-//
-// Query Indexes And Slices
-//
-// Index expressions perform no bounds checking, and will contribute no
-// values to the result set if the provided index or index range is invalid.
-// Negative indexes represent values from the end of the array, counting backwards.
-//
-//   // select the last index of the array named 'foo'
-//   tree.Query("$.foo[-1]")
-//
-// Slice expressions are supported, by using ':' to separate a start/end index pair.
-//
-//   // select up to the first five elements in the array
-//   tree.Query("$.foo[0:5]")
-//
-// Slice expressions also allow negative indexes for the start and stop
-// arguments.
-//
-//   // select all array elements.
-//   tree.Query("$.foo[0:-1]")
-//
-// Slice expressions may have an optional stride/step parameter:
-//
-//   // select every other element
-//   tree.Query("$.foo[0:-1:2]")
-//
-// Slice start and end parameters are also optional:
-//
-//   // these are all equivalent and select all the values in the array
-//   tree.Query("$.foo[:]")
-//   tree.Query("$.foo[0:]")
-//   tree.Query("$.foo[:-1]")
-//   tree.Query("$.foo[0:-1:]")
-//   tree.Query("$.foo[::1]")
-//   tree.Query("$.foo[0::1]")
-//   tree.Query("$.foo[:-1:1]")
-//   tree.Query("$.foo[0:-1:1]")
-//
-// Query Filters
-//
-// Query filters are used within a Union [,] or single Filter [] expression.
-// A filter only allows nodes that qualify through to the next expression,
-// and/or into the result set.
-//
-//   // returns children of foo that are permitted by the 'bar' filter.
-//   tree.Query("$.foo[?(bar)]")
-//
-// There are several filters provided with the library:
-//
-//   tree
-//          Allows nodes of type TomlTree.
-//   int
-//          Allows nodes of type int64.
-//   float
-//          Allows nodes of type float64.
-//   string
-//          Allows nodes of type string.
-//   time
-//          Allows nodes of type time.Time.
-//   bool
-//          Allows nodes of type bool.
-//
-// Query Results
-//
-// An executed query returns a QueryResult object.  This contains the nodes
-// in the TOML tree that qualify the query expression.  Position information
-// is also available for each value in the set.
-//
-//   // display the results of a query
-//   results := tree.Query("$.foo.bar.baz")
-//   for idx, value := results.Values() {
-//       fmt.Println("%v: %v", results.Positions()[idx], value)
-//   }
-//
-// Compiled Queries
-//
-// Queries may be executed directly on a TomlTree object, or compiled ahead
-// of time and executed discretely.  The former is more convienent, but has the
-// penalty of having to recompile the query expression each time.
-//
-//   // basic query
-//   results := tree.Query("$.foo.bar.baz")
-//
-//   // compiled query
-//   query := toml.CompileQuery("$.foo.bar.baz")
-//   results := query.Execute(tree)
-//
-//   // run the compiled query again on a different tree
-//   moreResults := query.Execute(anotherTree)
-//
-// User Defined Query Filters
-//
-// Filter expressions may also be user defined by using the SetFilter()
-// function on the Query object.  The function must return true/false, which
-// signifies if the passed node is kept or discarded, respectively.
-//
-//   // create a query that references a user-defined filter
-//   query, _ := CompileQuery("$[?(bazOnly)]")
-//
-//   // define the filter, and assign it to the query
-//   query.SetFilter("bazOnly", func(node interface{}) bool{
-//       if tree, ok := node.(*TomlTree); ok {
-//           return tree.Has("baz")
-//       }
-//       return false  // reject all other node types
-//   })
-//
-//   // run the query
-//   query.Execute(tree)
+// The package github.com/pelletier/go-toml/query implements a system
+// similar to JSONPath to quickly retrive elements of a TOML document using a
+// single expression. See the package documentation for more information.
 //
 package toml
diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go
index b67664f..d62ca5f 100644
--- a/vendor/github.com/pelletier/go-toml/keysparsing.go
+++ b/vendor/github.com/pelletier/go-toml/keysparsing.go
@@ -4,6 +4,7 @@
 
 import (
 	"bytes"
+	"errors"
 	"fmt"
 	"unicode"
 )
@@ -47,7 +48,7 @@
 			} else {
 				if !wasInQuotes {
 					if buffer.Len() == 0 {
-						return nil, fmt.Errorf("empty key group")
+						return nil, errors.New("empty table key")
 					}
 					groups = append(groups, buffer.String())
 					buffer.Reset()
@@ -67,23 +68,23 @@
 				return nil, fmt.Errorf("invalid bare character: %c", char)
 			}
 			if !inQuotes && expectDot {
-				return nil, fmt.Errorf("what?")
+				return nil, errors.New("what?")
 			}
 			buffer.WriteRune(char)
 			expectDot = false
 		}
 	}
 	if inQuotes {
-		return nil, fmt.Errorf("mismatched quotes")
+		return nil, errors.New("mismatched quotes")
 	}
 	if escapeNext {
-		return nil, fmt.Errorf("unfinished escape sequence")
+		return nil, errors.New("unfinished escape sequence")
 	}
 	if buffer.Len() > 0 {
 		groups = append(groups, buffer.String())
 	}
 	if len(groups) == 0 {
-		return nil, fmt.Errorf("empty key")
+		return nil, errors.New("empty key")
 	}
 	return groups, nil
 }
diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go
index eb4d999..1b6647d 100644
--- a/vendor/github.com/pelletier/go-toml/lexer.go
+++ b/vendor/github.com/pelletier/go-toml/lexer.go
@@ -6,14 +6,12 @@
 package toml
 
 import (
+	"bytes"
 	"errors"
 	"fmt"
-	"io"
 	"regexp"
 	"strconv"
 	"strings"
-
-	"github.com/pelletier/go-buffruneio"
 )
 
 var dateRegexp *regexp.Regexp
@@ -23,29 +21,29 @@
 
 // Define lexer
 type tomlLexer struct {
-	input         *buffruneio.Reader // Textual source
-	buffer        []rune             // Runes composing the current token
-	tokens        chan token
-	depth         int
-	line          int
-	col           int
-	endbufferLine int
-	endbufferCol  int
+	inputIdx          int
+	input             []rune // Textual source
+	currentTokenStart int
+	currentTokenStop  int
+	tokens            []token
+	depth             int
+	line              int
+	col               int
+	endbufferLine     int
+	endbufferCol      int
 }
 
 // Basic read operations on input
 
 func (l *tomlLexer) read() rune {
-	r, err := l.input.ReadRune()
-	if err != nil {
-		panic(err)
-	}
+	r := l.peek()
 	if r == '\n' {
 		l.endbufferLine++
 		l.endbufferCol = 1
 	} else {
 		l.endbufferCol++
 	}
+	l.inputIdx++
 	return r
 }
 
@@ -53,13 +51,13 @@
 	r := l.read()
 
 	if r != eof {
-		l.buffer = append(l.buffer, r)
+		l.currentTokenStop++
 	}
 	return r
 }
 
 func (l *tomlLexer) ignore() {
-	l.buffer = make([]rune, 0)
+	l.currentTokenStart = l.currentTokenStop
 	l.line = l.endbufferLine
 	l.col = l.endbufferCol
 }
@@ -76,49 +74,46 @@
 }
 
 func (l *tomlLexer) emitWithValue(t tokenType, value string) {
-	l.tokens <- token{
+	l.tokens = append(l.tokens, token{
 		Position: Position{l.line, l.col},
 		typ:      t,
 		val:      value,
-	}
+	})
 	l.ignore()
 }
 
 func (l *tomlLexer) emit(t tokenType) {
-	l.emitWithValue(t, string(l.buffer))
+	l.emitWithValue(t, string(l.input[l.currentTokenStart:l.currentTokenStop]))
 }
 
 func (l *tomlLexer) peek() rune {
-	r, err := l.input.ReadRune()
-	if err != nil {
-		panic(err)
+	if l.inputIdx >= len(l.input) {
+		return eof
 	}
-	l.input.UnreadRune()
-	return r
+	return l.input[l.inputIdx]
+}
+
+func (l *tomlLexer) peekString(size int) string {
+	maxIdx := len(l.input)
+	upperIdx := l.inputIdx + size // FIXME: potential overflow
+	if upperIdx > maxIdx {
+		upperIdx = maxIdx
+	}
+	return string(l.input[l.inputIdx:upperIdx])
 }
 
 func (l *tomlLexer) follow(next string) bool {
-	for _, expectedRune := range next {
-		r, err := l.input.ReadRune()
-		defer l.input.UnreadRune()
-		if err != nil {
-			panic(err)
-		}
-		if expectedRune != r {
-			return false
-		}
-	}
-	return true
+	return next == l.peekString(len(next))
 }
 
 // Error management
 
 func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn {
-	l.tokens <- token{
+	l.tokens = append(l.tokens, token{
 		Position: Position{l.line, l.col},
 		typ:      tokenError,
 		val:      fmt.Sprintf(format, args...),
-	}
+	})
 	return nil
 }
 
@@ -129,9 +124,9 @@
 		next := l.peek()
 		switch next {
 		case '[':
-			return l.lexKeyGroup
+			return l.lexTableKey
 		case '#':
-			return l.lexComment
+			return l.lexComment(l.lexVoid)
 		case '=':
 			return l.lexEqual
 		case '\r':
@@ -182,7 +177,7 @@
 		case '}':
 			return l.lexRightCurlyBrace
 		case '#':
-			return l.lexComment
+			return l.lexComment(l.lexRvalue)
 		case '"':
 			return l.lexString
 		case '\'':
@@ -219,7 +214,7 @@
 			break
 		}
 
-		possibleDate := string(l.input.Peek(35))
+		possibleDate := l.peekString(35)
 		dateMatch := dateRegexp.FindString(possibleDate)
 		if dateMatch != "" {
 			l.fastForward(len(dateMatch))
@@ -309,15 +304,17 @@
 	return l.lexVoid
 }
 
-func (l *tomlLexer) lexComment() tomlLexStateFn {
-	for next := l.peek(); next != '\n' && next != eof; next = l.peek() {
-		if next == '\r' && l.follow("\r\n") {
-			break
+func (l *tomlLexer) lexComment(previousState tomlLexStateFn) tomlLexStateFn {
+	return func() tomlLexStateFn {
+		for next := l.peek(); next != '\n' && next != eof; next = l.peek() {
+			if next == '\r' && l.follow("\r\n") {
+				break
+			}
+			l.next()
 		}
-		l.next()
+		l.ignore()
+		return previousState
 	}
-	l.ignore()
-	return l.lexVoid
 }
 
 func (l *tomlLexer) lexLeftBracket() tomlLexStateFn {
@@ -516,25 +513,25 @@
 	return l.lexRvalue
 }
 
-func (l *tomlLexer) lexKeyGroup() tomlLexStateFn {
+func (l *tomlLexer) lexTableKey() tomlLexStateFn {
 	l.next()
 
 	if l.peek() == '[' {
-		// token '[[' signifies an array of anonymous key groups
+		// token '[[' signifies an array of tables
 		l.next()
 		l.emit(tokenDoubleLeftBracket)
-		return l.lexInsideKeyGroupArray
+		return l.lexInsideTableArrayKey
 	}
-	// vanilla key group
+	// vanilla table key
 	l.emit(tokenLeftBracket)
-	return l.lexInsideKeyGroup
+	return l.lexInsideTableKey
 }
 
-func (l *tomlLexer) lexInsideKeyGroupArray() tomlLexStateFn {
+func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn {
 	for r := l.peek(); r != eof; r = l.peek() {
 		switch r {
 		case ']':
-			if len(l.buffer) > 0 {
+			if l.currentTokenStop > l.currentTokenStart {
 				l.emit(tokenKeyGroupArray)
 			}
 			l.next()
@@ -545,31 +542,31 @@
 			l.emit(tokenDoubleRightBracket)
 			return l.lexVoid
 		case '[':
-			return l.errorf("group name cannot contain ']'")
+			return l.errorf("table array key cannot contain ']'")
 		default:
 			l.next()
 		}
 	}
-	return l.errorf("unclosed key group array")
+	return l.errorf("unclosed table array key")
 }
 
-func (l *tomlLexer) lexInsideKeyGroup() tomlLexStateFn {
+func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn {
 	for r := l.peek(); r != eof; r = l.peek() {
 		switch r {
 		case ']':
-			if len(l.buffer) > 0 {
+			if l.currentTokenStop > l.currentTokenStart {
 				l.emit(tokenKeyGroup)
 			}
 			l.next()
 			l.emit(tokenRightBracket)
 			return l.lexVoid
 		case '[':
-			return l.errorf("group name cannot contain ']'")
+			return l.errorf("table key cannot contain ']'")
 		default:
 			l.next()
 		}
 	}
-	return l.errorf("unclosed key group")
+	return l.errorf("unclosed table key")
 }
 
 func (l *tomlLexer) lexRightBracket() tomlLexStateFn {
@@ -632,7 +629,6 @@
 	for state := l.lexVoid; state != nil; {
 		state = state()
 	}
-	close(l.tokens)
 }
 
 func init() {
@@ -640,16 +636,16 @@
 }
 
 // Entry point
-func lexToml(input io.Reader) chan token {
-	bufferedInput := buffruneio.NewReader(input)
+func lexToml(inputBytes []byte) []token {
+	runes := bytes.Runes(inputBytes)
 	l := &tomlLexer{
-		input:         bufferedInput,
-		tokens:        make(chan token),
+		input:         runes,
+		tokens:        make([]token, 0, 256),
 		line:          1,
 		col:           1,
 		endbufferLine: 1,
 		endbufferCol:  1,
 	}
-	go l.run()
+	l.run()
 	return l.tokens
 }
diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go
new file mode 100644
index 0000000..1a3176f
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/marshal.go
@@ -0,0 +1,489 @@
+package toml
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"reflect"
+	"strings"
+	"time"
+)
+
+type tomlOpts struct {
+	name      string
+	include   bool
+	omitempty bool
+}
+
+var timeType = reflect.TypeOf(time.Time{})
+var marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
+
+// Check if the given marshall type maps to a Tree primitive
+func isPrimitive(mtype reflect.Type) bool {
+	switch mtype.Kind() {
+	case reflect.Ptr:
+		return isPrimitive(mtype.Elem())
+	case reflect.Bool:
+		return true
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return true
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		return true
+	case reflect.Float32, reflect.Float64:
+		return true
+	case reflect.String:
+		return true
+	case reflect.Struct:
+		return mtype == timeType || isCustomMarshaler(mtype)
+	default:
+		return false
+	}
+}
+
+// Check if the given marshall type maps to a Tree slice
+func isTreeSlice(mtype reflect.Type) bool {
+	switch mtype.Kind() {
+	case reflect.Slice:
+		return !isOtherSlice(mtype)
+	default:
+		return false
+	}
+}
+
+// Check if the given marshall type maps to a non-Tree slice
+func isOtherSlice(mtype reflect.Type) bool {
+	switch mtype.Kind() {
+	case reflect.Ptr:
+		return isOtherSlice(mtype.Elem())
+	case reflect.Slice:
+		return isPrimitive(mtype.Elem()) || isOtherSlice(mtype.Elem())
+	default:
+		return false
+	}
+}
+
+// Check if the given marshall type maps to a Tree
+func isTree(mtype reflect.Type) bool {
+	switch mtype.Kind() {
+	case reflect.Map:
+		return true
+	case reflect.Struct:
+		return !isPrimitive(mtype)
+	default:
+		return false
+	}
+}
+
+func isCustomMarshaler(mtype reflect.Type) bool {
+	return mtype.Implements(marshalerType)
+}
+
+func callCustomMarshaler(mval reflect.Value) ([]byte, error) {
+	return mval.Interface().(Marshaler).MarshalTOML()
+}
+
+// Marshaler is the interface implemented by types that
+// can marshal themselves into valid TOML.
+type Marshaler interface {
+	MarshalTOML() ([]byte, error)
+}
+
+/*
+Marshal returns the TOML encoding of v.  Behavior is similar to the Go json
+encoder, except that there is no concept of a Marshaler interface or MarshalTOML
+function for sub-structs, and currently only definite types can be marshaled
+(i.e. no `interface{}`).
+
+Note that pointers are automatically assigned the "omitempty" option, as TOML
+explicity does not handle null values (saying instead the label should be
+dropped).
+
+Tree structural types and corresponding marshal types:
+
+  *Tree                            (*)struct, (*)map[string]interface{}
+  []*Tree                          (*)[](*)struct, (*)[](*)map[string]interface{}
+  []interface{} (as interface{})   (*)[]primitive, (*)[]([]interface{})
+  interface{}                      (*)primitive
+
+Tree primitive types and corresponding marshal types:
+
+  uint64     uint, uint8-uint64, pointers to same
+  int64      int, int8-uint64, pointers to same
+  float64    float32, float64, pointers to same
+  string     string, pointers to same
+  bool       bool, pointers to same
+  time.Time  time.Time{}, pointers to same
+*/
+func Marshal(v interface{}) ([]byte, error) {
+	mtype := reflect.TypeOf(v)
+	if mtype.Kind() != reflect.Struct {
+		return []byte{}, errors.New("Only a struct can be marshaled to TOML")
+	}
+	sval := reflect.ValueOf(v)
+	if isCustomMarshaler(mtype) {
+		return callCustomMarshaler(sval)
+	}
+	t, err := valueToTree(mtype, sval)
+	if err != nil {
+		return []byte{}, err
+	}
+	s, err := t.ToTomlString()
+	return []byte(s), err
+}
+
+// Convert given marshal struct or map value to toml tree
+func valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) {
+	if mtype.Kind() == reflect.Ptr {
+		return valueToTree(mtype.Elem(), mval.Elem())
+	}
+	tval := newTree()
+	switch mtype.Kind() {
+	case reflect.Struct:
+		for i := 0; i < mtype.NumField(); i++ {
+			mtypef, mvalf := mtype.Field(i), mval.Field(i)
+			opts := tomlOptions(mtypef)
+			if opts.include && (!opts.omitempty || !isZero(mvalf)) {
+				val, err := valueToToml(mtypef.Type, mvalf)
+				if err != nil {
+					return nil, err
+				}
+				tval.Set(opts.name, val)
+			}
+		}
+	case reflect.Map:
+		for _, key := range mval.MapKeys() {
+			mvalf := mval.MapIndex(key)
+			val, err := valueToToml(mtype.Elem(), mvalf)
+			if err != nil {
+				return nil, err
+			}
+			tval.Set(key.String(), val)
+		}
+	}
+	return tval, nil
+}
+
+// Convert given marshal slice to slice of Toml trees
+func valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) {
+	tval := make([]*Tree, mval.Len(), mval.Len())
+	for i := 0; i < mval.Len(); i++ {
+		val, err := valueToTree(mtype.Elem(), mval.Index(i))
+		if err != nil {
+			return nil, err
+		}
+		tval[i] = val
+	}
+	return tval, nil
+}
+
+// Convert given marshal slice to slice of toml values
+func valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) {
+	tval := make([]interface{}, mval.Len(), mval.Len())
+	for i := 0; i < mval.Len(); i++ {
+		val, err := valueToToml(mtype.Elem(), mval.Index(i))
+		if err != nil {
+			return nil, err
+		}
+		tval[i] = val
+	}
+	return tval, nil
+}
+
+// Convert given marshal value to toml value
+func valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) {
+	if mtype.Kind() == reflect.Ptr {
+		return valueToToml(mtype.Elem(), mval.Elem())
+	}
+	switch {
+	case isCustomMarshaler(mtype):
+		return callCustomMarshaler(mval)
+	case isTree(mtype):
+		return valueToTree(mtype, mval)
+	case isTreeSlice(mtype):
+		return valueToTreeSlice(mtype, mval)
+	case isOtherSlice(mtype):
+		return valueToOtherSlice(mtype, mval)
+	default:
+		switch mtype.Kind() {
+		case reflect.Bool:
+			return mval.Bool(), nil
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			return mval.Int(), nil
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+			return mval.Uint(), nil
+		case reflect.Float32, reflect.Float64:
+			return mval.Float(), nil
+		case reflect.String:
+			return mval.String(), nil
+		case reflect.Struct:
+			return mval.Interface().(time.Time), nil
+		default:
+			return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind())
+		}
+	}
+}
+
+// Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v.
+// Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for
+// sub-structs, and only definite types can be unmarshaled.
+func (t *Tree) Unmarshal(v interface{}) error {
+	mtype := reflect.TypeOf(v)
+	if mtype.Kind() != reflect.Ptr || mtype.Elem().Kind() != reflect.Struct {
+		return errors.New("Only a pointer to struct can be unmarshaled from TOML")
+	}
+
+	sval, err := valueFromTree(mtype.Elem(), t)
+	if err != nil {
+		return err
+	}
+	reflect.ValueOf(v).Elem().Set(sval)
+	return nil
+}
+
+// Unmarshal parses the TOML-encoded data and stores the result in the value
+// pointed to by v. Behavior is similar to the Go json encoder, except that there
+// is no concept of an Unmarshaler interface or UnmarshalTOML function for
+// sub-structs, and currently only definite types can be unmarshaled to (i.e. no
+// `interface{}`).
+//
+// See Marshal() documentation for types mapping table.
+func Unmarshal(data []byte, v interface{}) error {
+	t, err := LoadReader(bytes.NewReader(data))
+	if err != nil {
+		return err
+	}
+	return t.Unmarshal(v)
+}
+
+// Convert toml tree to marshal struct or map, using marshal type
+func valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, error) {
+	if mtype.Kind() == reflect.Ptr {
+		return unwrapPointer(mtype, tval)
+	}
+	var mval reflect.Value
+	switch mtype.Kind() {
+	case reflect.Struct:
+		mval = reflect.New(mtype).Elem()
+		for i := 0; i < mtype.NumField(); i++ {
+			mtypef := mtype.Field(i)
+			opts := tomlOptions(mtypef)
+			if opts.include {
+				baseKey := opts.name
+				keysToTry := []string{baseKey, strings.ToLower(baseKey), strings.ToTitle(baseKey)}
+				for _, key := range keysToTry {
+					exists := tval.Has(key)
+					if !exists {
+						continue
+					}
+					val := tval.Get(key)
+					mvalf, err := valueFromToml(mtypef.Type, val)
+					if err != nil {
+						return mval, formatError(err, tval.GetPosition(key))
+					}
+					mval.Field(i).Set(mvalf)
+					break
+				}
+			}
+		}
+	case reflect.Map:
+		mval = reflect.MakeMap(mtype)
+		for _, key := range tval.Keys() {
+			val := tval.Get(key)
+			mvalf, err := valueFromToml(mtype.Elem(), val)
+			if err != nil {
+				return mval, formatError(err, tval.GetPosition(key))
+			}
+			mval.SetMapIndex(reflect.ValueOf(key), mvalf)
+		}
+	}
+	return mval, nil
+}
+
+// Convert toml value to marshal struct/map slice, using marshal type
+func valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) {
+	mval := reflect.MakeSlice(mtype, len(tval), len(tval))
+	for i := 0; i < len(tval); i++ {
+		val, err := valueFromTree(mtype.Elem(), tval[i])
+		if err != nil {
+			return mval, err
+		}
+		mval.Index(i).Set(val)
+	}
+	return mval, nil
+}
+
+// Convert toml value to marshal primitive slice, using marshal type
+func valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) {
+	mval := reflect.MakeSlice(mtype, len(tval), len(tval))
+	for i := 0; i < len(tval); i++ {
+		val, err := valueFromToml(mtype.Elem(), tval[i])
+		if err != nil {
+			return mval, err
+		}
+		mval.Index(i).Set(val)
+	}
+	return mval, nil
+}
+
+// Convert toml value to marshal value, using marshal type
+func valueFromToml(mtype reflect.Type, tval interface{}) (reflect.Value, error) {
+	if mtype.Kind() == reflect.Ptr {
+		return unwrapPointer(mtype, tval)
+	}
+	switch {
+	case isTree(mtype):
+		return valueFromTree(mtype, tval.(*Tree))
+	case isTreeSlice(mtype):
+		return valueFromTreeSlice(mtype, tval.([]*Tree))
+	case isOtherSlice(mtype):
+		return valueFromOtherSlice(mtype, tval.([]interface{}))
+	default:
+		switch mtype.Kind() {
+		case reflect.Bool:
+			val, ok := tval.(bool)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to bool", tval, tval)
+			}
+			return reflect.ValueOf(val), nil
+		case reflect.Int:
+			val, ok := tval.(int64)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
+			}
+			return reflect.ValueOf(int(val)), nil
+		case reflect.Int8:
+			val, ok := tval.(int64)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
+			}
+			return reflect.ValueOf(int8(val)), nil
+		case reflect.Int16:
+			val, ok := tval.(int64)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
+			}
+			return reflect.ValueOf(int16(val)), nil
+		case reflect.Int32:
+			val, ok := tval.(int64)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
+			}
+			return reflect.ValueOf(int32(val)), nil
+		case reflect.Int64:
+			val, ok := tval.(int64)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
+			}
+			return reflect.ValueOf(val), nil
+		case reflect.Uint:
+			val, ok := tval.(int64)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
+			}
+			return reflect.ValueOf(uint(val)), nil
+		case reflect.Uint8:
+			val, ok := tval.(int64)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
+			}
+			return reflect.ValueOf(uint8(val)), nil
+		case reflect.Uint16:
+			val, ok := tval.(int64)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
+			}
+			return reflect.ValueOf(uint16(val)), nil
+		case reflect.Uint32:
+			val, ok := tval.(int64)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
+			}
+			return reflect.ValueOf(uint32(val)), nil
+		case reflect.Uint64:
+			val, ok := tval.(int64)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
+			}
+			return reflect.ValueOf(uint64(val)), nil
+		case reflect.Float32:
+			val, ok := tval.(float64)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to float", tval, tval)
+			}
+			return reflect.ValueOf(float32(val)), nil
+		case reflect.Float64:
+			val, ok := tval.(float64)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to float", tval, tval)
+			}
+			return reflect.ValueOf(val), nil
+		case reflect.String:
+			val, ok := tval.(string)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to string", tval, tval)
+			}
+			return reflect.ValueOf(val), nil
+		case reflect.Struct:
+			val, ok := tval.(time.Time)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to time", tval, tval)
+			}
+			return reflect.ValueOf(val), nil
+		default:
+			return reflect.ValueOf(nil), fmt.Errorf("Unmarshal can't handle %v(%v)", mtype, mtype.Kind())
+		}
+	}
+}
+
+func unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.Value, error) {
+	val, err := valueFromToml(mtype.Elem(), tval)
+	if err != nil {
+		return reflect.ValueOf(nil), err
+	}
+	mval := reflect.New(mtype.Elem())
+	mval.Elem().Set(val)
+	return mval, nil
+}
+
+func tomlOptions(vf reflect.StructField) tomlOpts {
+	tag := vf.Tag.Get("toml")
+	parse := strings.Split(tag, ",")
+	result := tomlOpts{vf.Name, true, false}
+	if parse[0] != "" {
+		if parse[0] == "-" && len(parse) == 1 {
+			result.include = false
+		} else {
+			result.name = strings.Trim(parse[0], " ")
+		}
+	}
+	if vf.PkgPath != "" {
+		result.include = false
+	}
+	if len(parse) > 1 && strings.Trim(parse[1], " ") == "omitempty" {
+		result.omitempty = true
+	}
+	if vf.Type.Kind() == reflect.Ptr {
+		result.omitempty = true
+	}
+	return result
+}
+
+func isZero(val reflect.Value) bool {
+	switch val.Type().Kind() {
+	case reflect.Map:
+		fallthrough
+	case reflect.Array:
+		fallthrough
+	case reflect.Slice:
+		return val.Len() == 0
+	default:
+		return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface())
+	}
+}
+
+func formatError(err error, pos Position) error {
+	if err.Error()[0] == '(' { // Error already contains position information
+		return err
+	}
+	return fmt.Errorf("%s: %s", pos, err)
+}
diff --git a/vendor/github.com/pelletier/go-toml/marshal_test.toml b/vendor/github.com/pelletier/go-toml/marshal_test.toml
new file mode 100644
index 0000000..1c5f98e
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/marshal_test.toml
@@ -0,0 +1,38 @@
+title = "TOML Marshal Testing"
+
+[basic]
+  bool = true
+  date = 1979-05-27T07:32:00Z
+  float = 123.4
+  int = 5000
+  string = "Bite me"
+  uint = 5001
+
+[basic_lists]
+  bools = [true,false,true]
+  dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z]
+  floats = [12.3,45.6,78.9]
+  ints = [8001,8001,8002]
+  strings = ["One","Two","Three"]
+  uints = [5002,5003]
+
+[basic_map]
+  one = "one"
+  two = "two"
+
+[subdoc]
+
+  [subdoc.first]
+    name = "First"
+
+  [subdoc.second]
+    name = "Second"
+
+[[subdoclist]]
+  name = "List.First"
+
+[[subdoclist]]
+  name = "List.Second"
+
+[[subdocptrs]]
+  name = "Second"
diff --git a/vendor/github.com/pelletier/go-toml/match.go b/vendor/github.com/pelletier/go-toml/match.go
deleted file mode 100644
index 48b0f2a..0000000
--- a/vendor/github.com/pelletier/go-toml/match.go
+++ /dev/null
@@ -1,234 +0,0 @@
-package toml
-
-import (
-	"fmt"
-)
-
-// support function to set positions for tomlValues
-// NOTE: this is done to allow ctx.lastPosition to indicate the start of any
-// values returned by the query engines
-func tomlValueCheck(node interface{}, ctx *queryContext) interface{} {
-	switch castNode := node.(type) {
-	case *tomlValue:
-		ctx.lastPosition = castNode.position
-		return castNode.value
-	case []*TomlTree:
-		if len(castNode) > 0 {
-			ctx.lastPosition = castNode[0].position
-		}
-		return node
-	default:
-		return node
-	}
-}
-
-// base match
-type matchBase struct {
-	next pathFn
-}
-
-func (f *matchBase) setNext(next pathFn) {
-	f.next = next
-}
-
-// terminating functor - gathers results
-type terminatingFn struct {
-	// empty
-}
-
-func newTerminatingFn() *terminatingFn {
-	return &terminatingFn{}
-}
-
-func (f *terminatingFn) setNext(next pathFn) {
-	// do nothing
-}
-
-func (f *terminatingFn) call(node interface{}, ctx *queryContext) {
-	switch castNode := node.(type) {
-	case *TomlTree:
-		ctx.result.appendResult(node, castNode.position)
-	case *tomlValue:
-		ctx.result.appendResult(node, castNode.position)
-	default:
-		// use last position for scalars
-		ctx.result.appendResult(node, ctx.lastPosition)
-	}
-}
-
-// match single key
-type matchKeyFn struct {
-	matchBase
-	Name string
-}
-
-func newMatchKeyFn(name string) *matchKeyFn {
-	return &matchKeyFn{Name: name}
-}
-
-func (f *matchKeyFn) call(node interface{}, ctx *queryContext) {
-	if array, ok := node.([]*TomlTree); ok {
-		for _, tree := range array {
-			item := tree.values[f.Name]
-			if item != nil {
-				f.next.call(item, ctx)
-			}
-		}
-	} else if tree, ok := node.(*TomlTree); ok {
-		item := tree.values[f.Name]
-		if item != nil {
-			f.next.call(item, ctx)
-		}
-	}
-}
-
-// match single index
-type matchIndexFn struct {
-	matchBase
-	Idx int
-}
-
-func newMatchIndexFn(idx int) *matchIndexFn {
-	return &matchIndexFn{Idx: idx}
-}
-
-func (f *matchIndexFn) call(node interface{}, ctx *queryContext) {
-	if arr, ok := tomlValueCheck(node, ctx).([]interface{}); ok {
-		if f.Idx < len(arr) && f.Idx >= 0 {
-			f.next.call(arr[f.Idx], ctx)
-		}
-	}
-}
-
-// filter by slicing
-type matchSliceFn struct {
-	matchBase
-	Start, End, Step int
-}
-
-func newMatchSliceFn(start, end, step int) *matchSliceFn {
-	return &matchSliceFn{Start: start, End: end, Step: step}
-}
-
-func (f *matchSliceFn) call(node interface{}, ctx *queryContext) {
-	if arr, ok := tomlValueCheck(node, ctx).([]interface{}); ok {
-		// adjust indexes for negative values, reverse ordering
-		realStart, realEnd := f.Start, f.End
-		if realStart < 0 {
-			realStart = len(arr) + realStart
-		}
-		if realEnd < 0 {
-			realEnd = len(arr) + realEnd
-		}
-		if realEnd < realStart {
-			realEnd, realStart = realStart, realEnd // swap
-		}
-		// loop and gather
-		for idx := realStart; idx < realEnd; idx += f.Step {
-			f.next.call(arr[idx], ctx)
-		}
-	}
-}
-
-// match anything
-type matchAnyFn struct {
-	matchBase
-}
-
-func newMatchAnyFn() *matchAnyFn {
-	return &matchAnyFn{}
-}
-
-func (f *matchAnyFn) call(node interface{}, ctx *queryContext) {
-	if tree, ok := node.(*TomlTree); ok {
-		for _, v := range tree.values {
-			f.next.call(v, ctx)
-		}
-	}
-}
-
-// filter through union
-type matchUnionFn struct {
-	Union []pathFn
-}
-
-func (f *matchUnionFn) setNext(next pathFn) {
-	for _, fn := range f.Union {
-		fn.setNext(next)
-	}
-}
-
-func (f *matchUnionFn) call(node interface{}, ctx *queryContext) {
-	for _, fn := range f.Union {
-		fn.call(node, ctx)
-	}
-}
-
-// match every single last node in the tree
-type matchRecursiveFn struct {
-	matchBase
-}
-
-func newMatchRecursiveFn() *matchRecursiveFn {
-	return &matchRecursiveFn{}
-}
-
-func (f *matchRecursiveFn) call(node interface{}, ctx *queryContext) {
-	if tree, ok := node.(*TomlTree); ok {
-		var visit func(tree *TomlTree)
-		visit = func(tree *TomlTree) {
-			for _, v := range tree.values {
-				f.next.call(v, ctx)
-				switch node := v.(type) {
-				case *TomlTree:
-					visit(node)
-				case []*TomlTree:
-					for _, subtree := range node {
-						visit(subtree)
-					}
-				}
-			}
-		}
-		f.next.call(tree, ctx)
-		visit(tree)
-	}
-}
-
-// match based on an externally provided functional filter
-type matchFilterFn struct {
-	matchBase
-	Pos  Position
-	Name string
-}
-
-func newMatchFilterFn(name string, pos Position) *matchFilterFn {
-	return &matchFilterFn{Name: name, Pos: pos}
-}
-
-func (f *matchFilterFn) call(node interface{}, ctx *queryContext) {
-	fn, ok := (*ctx.filters)[f.Name]
-	if !ok {
-		panic(fmt.Sprintf("%s: query context does not have filter '%s'",
-			f.Pos.String(), f.Name))
-	}
-	switch castNode := tomlValueCheck(node, ctx).(type) {
-	case *TomlTree:
-		for _, v := range castNode.values {
-			if tv, ok := v.(*tomlValue); ok {
-				if fn(tv.value) {
-					f.next.call(v, ctx)
-				}
-			} else {
-				if fn(v) {
-					f.next.call(v, ctx)
-				}
-			}
-		}
-	case []interface{}:
-		for _, v := range castNode {
-			if fn(v) {
-				f.next.call(v, ctx)
-			}
-		}
-	}
-}
diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go
index 25932d7..8ee49cb 100644
--- a/vendor/github.com/pelletier/go-toml/parser.go
+++ b/vendor/github.com/pelletier/go-toml/parser.go
@@ -3,6 +3,7 @@
 package toml
 
 import (
+	"errors"
 	"fmt"
 	"reflect"
 	"regexp"
@@ -12,11 +13,11 @@
 )
 
 type tomlParser struct {
-	flow          chan token
-	tree          *TomlTree
-	tokensBuffer  []token
-	currentGroup  []string
-	seenGroupKeys []string
+	flowIdx       int
+	flow          []token
+	tree          *Tree
+	currentTable  []string
+	seenTableKeys []string
 }
 
 type tomlParserStateFn func() tomlParserStateFn
@@ -33,16 +34,10 @@
 }
 
 func (p *tomlParser) peek() *token {
-	if len(p.tokensBuffer) != 0 {
-		return &(p.tokensBuffer[0])
-	}
-
-	tok, ok := <-p.flow
-	if !ok {
+	if p.flowIdx >= len(p.flow) {
 		return nil
 	}
-	p.tokensBuffer = append(p.tokensBuffer, tok)
-	return &tok
+	return &p.flow[p.flowIdx]
 }
 
 func (p *tomlParser) assume(typ tokenType) {
@@ -56,16 +51,12 @@
 }
 
 func (p *tomlParser) getToken() *token {
-	if len(p.tokensBuffer) != 0 {
-		tok := p.tokensBuffer[0]
-		p.tokensBuffer = p.tokensBuffer[1:]
-		return &tok
-	}
-	tok, ok := <-p.flow
-	if !ok {
+	tok := p.peek()
+	if tok == nil {
 		return nil
 	}
-	return &tok
+	p.flowIdx++
+	return tok
 }
 
 func (p *tomlParser) parseStart() tomlParserStateFn {
@@ -95,48 +86,48 @@
 	startToken := p.getToken() // discard the [[
 	key := p.getToken()
 	if key.typ != tokenKeyGroupArray {
-		p.raiseError(key, "unexpected token %s, was expecting a key group array", key)
+		p.raiseError(key, "unexpected token %s, was expecting a table array key", key)
 	}
 
-	// get or create group array element at the indicated part in the path
+	// get or create table array element at the indicated part in the path
 	keys, err := parseKey(key.val)
 	if err != nil {
-		p.raiseError(key, "invalid group array key: %s", err)
+		p.raiseError(key, "invalid table array key: %s", err)
 	}
 	p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries
 	destTree := p.tree.GetPath(keys)
-	var array []*TomlTree
+	var array []*Tree
 	if destTree == nil {
-		array = make([]*TomlTree, 0)
-	} else if target, ok := destTree.([]*TomlTree); ok && target != nil {
-		array = destTree.([]*TomlTree)
+		array = make([]*Tree, 0)
+	} else if target, ok := destTree.([]*Tree); ok && target != nil {
+		array = destTree.([]*Tree)
 	} else {
-		p.raiseError(key, "key %s is already assigned and not of type group array", key)
+		p.raiseError(key, "key %s is already assigned and not of type table array", key)
 	}
-	p.currentGroup = keys
+	p.currentTable = keys
 
-	// add a new tree to the end of the group array
-	newTree := newTomlTree()
+	// add a new tree to the end of the table array
+	newTree := newTree()
 	newTree.position = startToken.Position
 	array = append(array, newTree)
-	p.tree.SetPath(p.currentGroup, array)
+	p.tree.SetPath(p.currentTable, array)
 
-	// remove all keys that were children of this group array
+	// remove all keys that were children of this table array
 	prefix := key.val + "."
 	found := false
-	for ii := 0; ii < len(p.seenGroupKeys); {
-		groupKey := p.seenGroupKeys[ii]
-		if strings.HasPrefix(groupKey, prefix) {
-			p.seenGroupKeys = append(p.seenGroupKeys[:ii], p.seenGroupKeys[ii+1:]...)
+	for ii := 0; ii < len(p.seenTableKeys); {
+		tableKey := p.seenTableKeys[ii]
+		if strings.HasPrefix(tableKey, prefix) {
+			p.seenTableKeys = append(p.seenTableKeys[:ii], p.seenTableKeys[ii+1:]...)
 		} else {
-			found = (groupKey == key.val)
+			found = (tableKey == key.val)
 			ii++
 		}
 	}
 
 	// keep this key name from use by other kinds of assignments
 	if !found {
-		p.seenGroupKeys = append(p.seenGroupKeys, key.val)
+		p.seenTableKeys = append(p.seenTableKeys, key.val)
 	}
 
 	// move to next parser state
@@ -148,24 +139,24 @@
 	startToken := p.getToken() // discard the [
 	key := p.getToken()
 	if key.typ != tokenKeyGroup {
-		p.raiseError(key, "unexpected token %s, was expecting a key group", key)
+		p.raiseError(key, "unexpected token %s, was expecting a table key", key)
 	}
-	for _, item := range p.seenGroupKeys {
+	for _, item := range p.seenTableKeys {
 		if item == key.val {
 			p.raiseError(key, "duplicated tables")
 		}
 	}
 
-	p.seenGroupKeys = append(p.seenGroupKeys, key.val)
+	p.seenTableKeys = append(p.seenTableKeys, key.val)
 	keys, err := parseKey(key.val)
 	if err != nil {
-		p.raiseError(key, "invalid group array key: %s", err)
+		p.raiseError(key, "invalid table array key: %s", err)
 	}
 	if err := p.tree.createSubTree(keys, startToken.Position); err != nil {
 		p.raiseError(key, "%s", err)
 	}
 	p.assume(tokenRightBracket)
-	p.currentGroup = keys
+	p.currentTable = keys
 	return p.parseStart
 }
 
@@ -174,26 +165,26 @@
 	p.assume(tokenEqual)
 
 	value := p.parseRvalue()
-	var groupKey []string
-	if len(p.currentGroup) > 0 {
-		groupKey = p.currentGroup
+	var tableKey []string
+	if len(p.currentTable) > 0 {
+		tableKey = p.currentTable
 	} else {
-		groupKey = []string{}
+		tableKey = []string{}
 	}
 
-	// find the group to assign, looking out for arrays of groups
-	var targetNode *TomlTree
-	switch node := p.tree.GetPath(groupKey).(type) {
-	case []*TomlTree:
+	// find the table to assign, looking out for arrays of tables
+	var targetNode *Tree
+	switch node := p.tree.GetPath(tableKey).(type) {
+	case []*Tree:
 		targetNode = node[len(node)-1]
-	case *TomlTree:
+	case *Tree:
 		targetNode = node
 	default:
-		p.raiseError(key, "Unknown group type for path: %s",
-			strings.Join(groupKey, "."))
+		p.raiseError(key, "Unknown table type for path: %s",
+			strings.Join(tableKey, "."))
 	}
 
-	// assign value to the found group
+	// assign value to the found table
 	keyVals, err := parseKey(key.val)
 	if err != nil {
 		p.raiseError(key, "%s", err)
@@ -203,7 +194,7 @@
 	}
 	keyVal := keyVals[0]
 	localKey := []string{keyVal}
-	finalKey := append(groupKey, keyVal)
+	finalKey := append(tableKey, keyVal)
 	if targetNode.GetPath(localKey) != nil {
 		p.raiseError(key, "The following key was defined twice: %s",
 			strings.Join(finalKey, "."))
@@ -211,7 +202,7 @@
 	var toInsert interface{}
 
 	switch value.(type) {
-	case *TomlTree:
+	case *Tree, []*Tree:
 		toInsert = value
 	default:
 		toInsert = &tomlValue{value, key.Position}
@@ -224,7 +215,7 @@
 
 func cleanupNumberToken(value string) (string, error) {
 	if numberUnderscoreInvalidRegexp.MatchString(value) {
-		return "", fmt.Errorf("invalid use of _ in number")
+		return "", errors.New("invalid use of _ in number")
 	}
 	cleanedVal := strings.Replace(value, "_", "", -1)
 	return cleanedVal, nil
@@ -288,8 +279,8 @@
 	return t != nil && t.typ == tokenComma
 }
 
-func (p *tomlParser) parseInlineTable() *TomlTree {
-	tree := newTomlTree()
+func (p *tomlParser) parseInlineTable() *Tree {
+	tree := newTree()
 	var previous *token
 Loop:
 	for {
@@ -359,29 +350,29 @@
 			p.getToken()
 		}
 	}
-	// An array of TomlTrees is actually an array of inline
+	// An array of Trees is actually an array of inline
 	// tables, which is a shorthand for a table array. If the
-	// array was not converted from []interface{} to []*TomlTree,
+	// array was not converted from []interface{} to []*Tree,
 	// the two notations would not be equivalent.
-	if arrayType == reflect.TypeOf(newTomlTree()) {
-		tomlArray := make([]*TomlTree, len(array))
+	if arrayType == reflect.TypeOf(newTree()) {
+		tomlArray := make([]*Tree, len(array))
 		for i, v := range array {
-			tomlArray[i] = v.(*TomlTree)
+			tomlArray[i] = v.(*Tree)
 		}
 		return tomlArray
 	}
 	return array
 }
 
-func parseToml(flow chan token) *TomlTree {
-	result := newTomlTree()
+func parseToml(flow []token) *Tree {
+	result := newTree()
 	result.position = Position{1, 1}
 	parser := &tomlParser{
+		flowIdx:       0,
 		flow:          flow,
 		tree:          result,
-		tokensBuffer:  make([]token, 0),
-		currentGroup:  make([]string, 0),
-		seenGroupKeys: make([]string, 0),
+		currentTable:  make([]string, 0),
+		seenTableKeys: make([]string, 0),
 	}
 	parser.run()
 	return result
diff --git a/vendor/github.com/pelletier/go-toml/query.go b/vendor/github.com/pelletier/go-toml/query.go
deleted file mode 100644
index 307a1ec..0000000
--- a/vendor/github.com/pelletier/go-toml/query.go
+++ /dev/null
@@ -1,153 +0,0 @@
-package toml
-
-import (
-	"time"
-)
-
-// NodeFilterFn represents a user-defined filter function, for use with
-// Query.SetFilter().
-//
-// The return value of the function must indicate if 'node' is to be included
-// at this stage of the TOML path.  Returning true will include the node, and
-// returning false will exclude it.
-//
-// NOTE: Care should be taken to write script callbacks such that they are safe
-// to use from multiple goroutines.
-type NodeFilterFn func(node interface{}) bool
-
-// QueryResult is the result of Executing a Query.
-type QueryResult struct {
-	items     []interface{}
-	positions []Position
-}
-
-// appends a value/position pair to the result set.
-func (r *QueryResult) appendResult(node interface{}, pos Position) {
-	r.items = append(r.items, node)
-	r.positions = append(r.positions, pos)
-}
-
-// Values is a set of values within a QueryResult.  The order of values is not
-// guaranteed to be in document order, and may be different each time a query is
-// executed.
-func (r QueryResult) Values() []interface{} {
-	values := make([]interface{}, len(r.items))
-	for i, v := range r.items {
-		o, ok := v.(*tomlValue)
-		if ok {
-			values[i] = o.value
-		} else {
-			values[i] = v
-		}
-	}
-	return values
-}
-
-// Positions is a set of positions for values within a QueryResult.  Each index
-// in Positions() corresponds to the entry in Value() of the same index.
-func (r QueryResult) Positions() []Position {
-	return r.positions
-}
-
-// runtime context for executing query paths
-type queryContext struct {
-	result       *QueryResult
-	filters      *map[string]NodeFilterFn
-	lastPosition Position
-}
-
-// generic path functor interface
-type pathFn interface {
-	setNext(next pathFn)
-	call(node interface{}, ctx *queryContext)
-}
-
-// A Query is the representation of a compiled TOML path.  A Query is safe
-// for concurrent use by multiple goroutines.
-type Query struct {
-	root    pathFn
-	tail    pathFn
-	filters *map[string]NodeFilterFn
-}
-
-func newQuery() *Query {
-	return &Query{
-		root:    nil,
-		tail:    nil,
-		filters: &defaultFilterFunctions,
-	}
-}
-
-func (q *Query) appendPath(next pathFn) {
-	if q.root == nil {
-		q.root = next
-	} else {
-		q.tail.setNext(next)
-	}
-	q.tail = next
-	next.setNext(newTerminatingFn()) // init the next functor
-}
-
-// CompileQuery compiles a TOML path expression.  The returned Query can be used
-// to match elements within a TomlTree and its descendants.
-func CompileQuery(path string) (*Query, error) {
-	return parseQuery(lexQuery(path))
-}
-
-// Execute executes a query against a TomlTree, and returns the result of the query.
-func (q *Query) Execute(tree *TomlTree) *QueryResult {
-	result := &QueryResult{
-		items:     []interface{}{},
-		positions: []Position{},
-	}
-	if q.root == nil {
-		result.appendResult(tree, tree.GetPosition(""))
-	} else {
-		ctx := &queryContext{
-			result:  result,
-			filters: q.filters,
-		}
-		q.root.call(tree, ctx)
-	}
-	return result
-}
-
-// SetFilter sets a user-defined filter function.  These may be used inside
-// "?(..)" query expressions to filter TOML document elements within a query.
-func (q *Query) SetFilter(name string, fn NodeFilterFn) {
-	if q.filters == &defaultFilterFunctions {
-		// clone the static table
-		q.filters = &map[string]NodeFilterFn{}
-		for k, v := range defaultFilterFunctions {
-			(*q.filters)[k] = v
-		}
-	}
-	(*q.filters)[name] = fn
-}
-
-var defaultFilterFunctions = map[string]NodeFilterFn{
-	"tree": func(node interface{}) bool {
-		_, ok := node.(*TomlTree)
-		return ok
-	},
-	"int": func(node interface{}) bool {
-		_, ok := node.(int64)
-		return ok
-	},
-	"float": func(node interface{}) bool {
-		_, ok := node.(float64)
-		return ok
-	},
-	"string": func(node interface{}) bool {
-		_, ok := node.(string)
-		return ok
-	},
-	"time": func(node interface{}) bool {
-		_, ok := node.(time.Time)
-		return ok
-	},
-	"bool": func(node interface{}) bool {
-		_, ok := node.(bool)
-		return ok
-	},
-}
diff --git a/vendor/github.com/pelletier/go-toml/querylexer.go b/vendor/github.com/pelletier/go-toml/querylexer.go
deleted file mode 100644
index 960681d..0000000
--- a/vendor/github.com/pelletier/go-toml/querylexer.go
+++ /dev/null
@@ -1,356 +0,0 @@
-// TOML JSONPath lexer.
-//
-// Written using the principles developed by Rob Pike in
-// http://www.youtube.com/watch?v=HxaD_trXwRE
-
-package toml
-
-import (
-	"fmt"
-	"strconv"
-	"strings"
-	"unicode/utf8"
-)
-
-// Lexer state function
-type queryLexStateFn func() queryLexStateFn
-
-// Lexer definition
-type queryLexer struct {
-	input      string
-	start      int
-	pos        int
-	width      int
-	tokens     chan token
-	depth      int
-	line       int
-	col        int
-	stringTerm string
-}
-
-func (l *queryLexer) run() {
-	for state := l.lexVoid; state != nil; {
-		state = state()
-	}
-	close(l.tokens)
-}
-
-func (l *queryLexer) nextStart() {
-	// iterate by runes (utf8 characters)
-	// search for newlines and advance line/col counts
-	for i := l.start; i < l.pos; {
-		r, width := utf8.DecodeRuneInString(l.input[i:])
-		if r == '\n' {
-			l.line++
-			l.col = 1
-		} else {
-			l.col++
-		}
-		i += width
-	}
-	// advance start position to next token
-	l.start = l.pos
-}
-
-func (l *queryLexer) emit(t tokenType) {
-	l.tokens <- token{
-		Position: Position{l.line, l.col},
-		typ:      t,
-		val:      l.input[l.start:l.pos],
-	}
-	l.nextStart()
-}
-
-func (l *queryLexer) emitWithValue(t tokenType, value string) {
-	l.tokens <- token{
-		Position: Position{l.line, l.col},
-		typ:      t,
-		val:      value,
-	}
-	l.nextStart()
-}
-
-func (l *queryLexer) next() rune {
-	if l.pos >= len(l.input) {
-		l.width = 0
-		return eof
-	}
-	var r rune
-	r, l.width = utf8.DecodeRuneInString(l.input[l.pos:])
-	l.pos += l.width
-	return r
-}
-
-func (l *queryLexer) ignore() {
-	l.nextStart()
-}
-
-func (l *queryLexer) backup() {
-	l.pos -= l.width
-}
-
-func (l *queryLexer) errorf(format string, args ...interface{}) queryLexStateFn {
-	l.tokens <- token{
-		Position: Position{l.line, l.col},
-		typ:      tokenError,
-		val:      fmt.Sprintf(format, args...),
-	}
-	return nil
-}
-
-func (l *queryLexer) peek() rune {
-	r := l.next()
-	l.backup()
-	return r
-}
-
-func (l *queryLexer) accept(valid string) bool {
-	if strings.ContainsRune(valid, l.next()) {
-		return true
-	}
-	l.backup()
-	return false
-}
-
-func (l *queryLexer) follow(next string) bool {
-	return strings.HasPrefix(l.input[l.pos:], next)
-}
-
-func (l *queryLexer) lexVoid() queryLexStateFn {
-	for {
-		next := l.peek()
-		switch next {
-		case '$':
-			l.pos++
-			l.emit(tokenDollar)
-			continue
-		case '.':
-			if l.follow("..") {
-				l.pos += 2
-				l.emit(tokenDotDot)
-			} else {
-				l.pos++
-				l.emit(tokenDot)
-			}
-			continue
-		case '[':
-			l.pos++
-			l.emit(tokenLeftBracket)
-			continue
-		case ']':
-			l.pos++
-			l.emit(tokenRightBracket)
-			continue
-		case ',':
-			l.pos++
-			l.emit(tokenComma)
-			continue
-		case '*':
-			l.pos++
-			l.emit(tokenStar)
-			continue
-		case '(':
-			l.pos++
-			l.emit(tokenLeftParen)
-			continue
-		case ')':
-			l.pos++
-			l.emit(tokenRightParen)
-			continue
-		case '?':
-			l.pos++
-			l.emit(tokenQuestion)
-			continue
-		case ':':
-			l.pos++
-			l.emit(tokenColon)
-			continue
-		case '\'':
-			l.ignore()
-			l.stringTerm = string(next)
-			return l.lexString
-		case '"':
-			l.ignore()
-			l.stringTerm = string(next)
-			return l.lexString
-		}
-
-		if isSpace(next) {
-			l.next()
-			l.ignore()
-			continue
-		}
-
-		if isAlphanumeric(next) {
-			return l.lexKey
-		}
-
-		if next == '+' || next == '-' || isDigit(next) {
-			return l.lexNumber
-		}
-
-		if l.next() == eof {
-			break
-		}
-
-		return l.errorf("unexpected char: '%v'", next)
-	}
-	l.emit(tokenEOF)
-	return nil
-}
-
-func (l *queryLexer) lexKey() queryLexStateFn {
-	for {
-		next := l.peek()
-		if !isAlphanumeric(next) {
-			l.emit(tokenKey)
-			return l.lexVoid
-		}
-
-		if l.next() == eof {
-			break
-		}
-	}
-	l.emit(tokenEOF)
-	return nil
-}
-
-func (l *queryLexer) lexString() queryLexStateFn {
-	l.pos++
-	l.ignore()
-	growingString := ""
-
-	for {
-		if l.follow(l.stringTerm) {
-			l.emitWithValue(tokenString, growingString)
-			l.pos++
-			l.ignore()
-			return l.lexVoid
-		}
-
-		if l.follow("\\\"") {
-			l.pos++
-			growingString += "\""
-		} else if l.follow("\\'") {
-			l.pos++
-			growingString += "'"
-		} else if l.follow("\\n") {
-			l.pos++
-			growingString += "\n"
-		} else if l.follow("\\b") {
-			l.pos++
-			growingString += "\b"
-		} else if l.follow("\\f") {
-			l.pos++
-			growingString += "\f"
-		} else if l.follow("\\/") {
-			l.pos++
-			growingString += "/"
-		} else if l.follow("\\t") {
-			l.pos++
-			growingString += "\t"
-		} else if l.follow("\\r") {
-			l.pos++
-			growingString += "\r"
-		} else if l.follow("\\\\") {
-			l.pos++
-			growingString += "\\"
-		} else if l.follow("\\u") {
-			l.pos += 2
-			code := ""
-			for i := 0; i < 4; i++ {
-				c := l.peek()
-				l.pos++
-				if !isHexDigit(c) {
-					return l.errorf("unfinished unicode escape")
-				}
-				code = code + string(c)
-			}
-			l.pos--
-			intcode, err := strconv.ParseInt(code, 16, 32)
-			if err != nil {
-				return l.errorf("invalid unicode escape: \\u" + code)
-			}
-			growingString += string(rune(intcode))
-		} else if l.follow("\\U") {
-			l.pos += 2
-			code := ""
-			for i := 0; i < 8; i++ {
-				c := l.peek()
-				l.pos++
-				if !isHexDigit(c) {
-					return l.errorf("unfinished unicode escape")
-				}
-				code = code + string(c)
-			}
-			l.pos--
-			intcode, err := strconv.ParseInt(code, 16, 32)
-			if err != nil {
-				return l.errorf("invalid unicode escape: \\u" + code)
-			}
-			growingString += string(rune(intcode))
-		} else if l.follow("\\") {
-			l.pos++
-			return l.errorf("invalid escape sequence: \\" + string(l.peek()))
-		} else {
-			growingString += string(l.peek())
-		}
-
-		if l.next() == eof {
-			break
-		}
-	}
-
-	return l.errorf("unclosed string")
-}
-
-func (l *queryLexer) lexNumber() queryLexStateFn {
-	l.ignore()
-	if !l.accept("+") {
-		l.accept("-")
-	}
-	pointSeen := false
-	digitSeen := false
-	for {
-		next := l.next()
-		if next == '.' {
-			if pointSeen {
-				return l.errorf("cannot have two dots in one float")
-			}
-			if !isDigit(l.peek()) {
-				return l.errorf("float cannot end with a dot")
-			}
-			pointSeen = true
-		} else if isDigit(next) {
-			digitSeen = true
-		} else {
-			l.backup()
-			break
-		}
-		if pointSeen && !digitSeen {
-			return l.errorf("cannot start float with a dot")
-		}
-	}
-
-	if !digitSeen {
-		return l.errorf("no digit in that number")
-	}
-	if pointSeen {
-		l.emit(tokenFloat)
-	} else {
-		l.emit(tokenInteger)
-	}
-	return l.lexVoid
-}
-
-// Entry point
-func lexQuery(input string) chan token {
-	l := &queryLexer{
-		input:  input,
-		tokens: make(chan token),
-		line:   1,
-		col:    1,
-	}
-	go l.run()
-	return l.tokens
-}
diff --git a/vendor/github.com/pelletier/go-toml/queryparser.go b/vendor/github.com/pelletier/go-toml/queryparser.go
deleted file mode 100644
index 1cbfc83..0000000
--- a/vendor/github.com/pelletier/go-toml/queryparser.go
+++ /dev/null
@@ -1,275 +0,0 @@
-/*
-  Based on the "jsonpath" spec/concept.
-
-  http://goessner.net/articles/JsonPath/
-  https://code.google.com/p/json-path/
-*/
-
-package toml
-
-import (
-	"fmt"
-)
-
-const maxInt = int(^uint(0) >> 1)
-
-type queryParser struct {
-	flow         chan token
-	tokensBuffer []token
-	query        *Query
-	union        []pathFn
-	err          error
-}
-
-type queryParserStateFn func() queryParserStateFn
-
-// Formats and panics an error message based on a token
-func (p *queryParser) parseError(tok *token, msg string, args ...interface{}) queryParserStateFn {
-	p.err = fmt.Errorf(tok.Position.String()+": "+msg, args...)
-	return nil // trigger parse to end
-}
-
-func (p *queryParser) run() {
-	for state := p.parseStart; state != nil; {
-		state = state()
-	}
-}
-
-func (p *queryParser) backup(tok *token) {
-	p.tokensBuffer = append(p.tokensBuffer, *tok)
-}
-
-func (p *queryParser) peek() *token {
-	if len(p.tokensBuffer) != 0 {
-		return &(p.tokensBuffer[0])
-	}
-
-	tok, ok := <-p.flow
-	if !ok {
-		return nil
-	}
-	p.backup(&tok)
-	return &tok
-}
-
-func (p *queryParser) lookahead(types ...tokenType) bool {
-	result := true
-	buffer := []token{}
-
-	for _, typ := range types {
-		tok := p.getToken()
-		if tok == nil {
-			result = false
-			break
-		}
-		buffer = append(buffer, *tok)
-		if tok.typ != typ {
-			result = false
-			break
-		}
-	}
-	// add the tokens back to the buffer, and return
-	p.tokensBuffer = append(p.tokensBuffer, buffer...)
-	return result
-}
-
-func (p *queryParser) getToken() *token {
-	if len(p.tokensBuffer) != 0 {
-		tok := p.tokensBuffer[0]
-		p.tokensBuffer = p.tokensBuffer[1:]
-		return &tok
-	}
-	tok, ok := <-p.flow
-	if !ok {
-		return nil
-	}
-	return &tok
-}
-
-func (p *queryParser) parseStart() queryParserStateFn {
-	tok := p.getToken()
-
-	if tok == nil || tok.typ == tokenEOF {
-		return nil
-	}
-
-	if tok.typ != tokenDollar {
-		return p.parseError(tok, "Expected '$' at start of expression")
-	}
-
-	return p.parseMatchExpr
-}
-
-// handle '.' prefix, '[]', and '..'
-func (p *queryParser) parseMatchExpr() queryParserStateFn {
-	tok := p.getToken()
-	switch tok.typ {
-	case tokenDotDot:
-		p.query.appendPath(&matchRecursiveFn{})
-		// nested parse for '..'
-		tok := p.getToken()
-		switch tok.typ {
-		case tokenKey:
-			p.query.appendPath(newMatchKeyFn(tok.val))
-			return p.parseMatchExpr
-		case tokenLeftBracket:
-			return p.parseBracketExpr
-		case tokenStar:
-			// do nothing - the recursive predicate is enough
-			return p.parseMatchExpr
-		}
-
-	case tokenDot:
-		// nested parse for '.'
-		tok := p.getToken()
-		switch tok.typ {
-		case tokenKey:
-			p.query.appendPath(newMatchKeyFn(tok.val))
-			return p.parseMatchExpr
-		case tokenStar:
-			p.query.appendPath(&matchAnyFn{})
-			return p.parseMatchExpr
-		}
-
-	case tokenLeftBracket:
-		return p.parseBracketExpr
-
-	case tokenEOF:
-		return nil // allow EOF at this stage
-	}
-	return p.parseError(tok, "expected match expression")
-}
-
-func (p *queryParser) parseBracketExpr() queryParserStateFn {
-	if p.lookahead(tokenInteger, tokenColon) {
-		return p.parseSliceExpr
-	}
-	if p.peek().typ == tokenColon {
-		return p.parseSliceExpr
-	}
-	return p.parseUnionExpr
-}
-
-func (p *queryParser) parseUnionExpr() queryParserStateFn {
-	var tok *token
-
-	// this state can be traversed after some sub-expressions
-	// so be careful when setting up state in the parser
-	if p.union == nil {
-		p.union = []pathFn{}
-	}
-
-loop: // labeled loop for easy breaking
-	for {
-		if len(p.union) > 0 {
-			// parse delimiter or terminator
-			tok = p.getToken()
-			switch tok.typ {
-			case tokenComma:
-				// do nothing
-			case tokenRightBracket:
-				break loop
-			default:
-				return p.parseError(tok, "expected ',' or ']', not '%s'", tok.val)
-			}
-		}
-
-		// parse sub expression
-		tok = p.getToken()
-		switch tok.typ {
-		case tokenInteger:
-			p.union = append(p.union, newMatchIndexFn(tok.Int()))
-		case tokenKey:
-			p.union = append(p.union, newMatchKeyFn(tok.val))
-		case tokenString:
-			p.union = append(p.union, newMatchKeyFn(tok.val))
-		case tokenQuestion:
-			return p.parseFilterExpr
-		default:
-			return p.parseError(tok, "expected union sub expression, not '%s', %d", tok.val, len(p.union))
-		}
-	}
-
-	// if there is only one sub-expression, use that instead
-	if len(p.union) == 1 {
-		p.query.appendPath(p.union[0])
-	} else {
-		p.query.appendPath(&matchUnionFn{p.union})
-	}
-
-	p.union = nil // clear out state
-	return p.parseMatchExpr
-}
-
-func (p *queryParser) parseSliceExpr() queryParserStateFn {
-	// init slice to grab all elements
-	start, end, step := 0, maxInt, 1
-
-	// parse optional start
-	tok := p.getToken()
-	if tok.typ == tokenInteger {
-		start = tok.Int()
-		tok = p.getToken()
-	}
-	if tok.typ != tokenColon {
-		return p.parseError(tok, "expected ':'")
-	}
-
-	// parse optional end
-	tok = p.getToken()
-	if tok.typ == tokenInteger {
-		end = tok.Int()
-		tok = p.getToken()
-	}
-	if tok.typ == tokenRightBracket {
-		p.query.appendPath(newMatchSliceFn(start, end, step))
-		return p.parseMatchExpr
-	}
-	if tok.typ != tokenColon {
-		return p.parseError(tok, "expected ']' or ':'")
-	}
-
-	// parse optional step
-	tok = p.getToken()
-	if tok.typ == tokenInteger {
-		step = tok.Int()
-		if step < 0 {
-			return p.parseError(tok, "step must be a positive value")
-		}
-		tok = p.getToken()
-	}
-	if tok.typ != tokenRightBracket {
-		return p.parseError(tok, "expected ']'")
-	}
-
-	p.query.appendPath(newMatchSliceFn(start, end, step))
-	return p.parseMatchExpr
-}
-
-func (p *queryParser) parseFilterExpr() queryParserStateFn {
-	tok := p.getToken()
-	if tok.typ != tokenLeftParen {
-		return p.parseError(tok, "expected left-parenthesis for filter expression")
-	}
-	tok = p.getToken()
-	if tok.typ != tokenKey && tok.typ != tokenString {
-		return p.parseError(tok, "expected key or string for filter funciton name")
-	}
-	name := tok.val
-	tok = p.getToken()
-	if tok.typ != tokenRightParen {
-		return p.parseError(tok, "expected right-parenthesis for filter expression")
-	}
-	p.union = append(p.union, newMatchFilterFn(name, tok.Position))
-	return p.parseUnionExpr
-}
-
-func parseQuery(flow chan token) (*Query, error) {
-	parser := &queryParser{
-		flow:         flow,
-		tokensBuffer: []token{},
-		query:        newQuery(),
-	}
-	parser.run()
-	return parser.query, parser.err
-}
diff --git a/vendor/github.com/pelletier/go-toml/test.sh b/vendor/github.com/pelletier/go-toml/test.sh
index 15ac1e1..91a8896 100755
--- a/vendor/github.com/pelletier/go-toml/test.sh
+++ b/vendor/github.com/pelletier/go-toml/test.sh
@@ -19,8 +19,16 @@
   popd
 }
 
+# Remove potential previous runs
+rm -rf src test_program_bin toml-test
+
+# Run go vet
+go vet ./...
+
 go get github.com/pelletier/go-buffruneio
 go get github.com/davecgh/go-spew/spew
+go get gopkg.in/yaml.v2
+go get github.com/BurntSushi/toml
 
 # get code for BurntSushi TOML validation
 # pinning all to 'HEAD' for version 0.3.x work (TODO: pin to commit hash when tests stabilize)
@@ -33,13 +41,16 @@
 # vendorize the current lib for testing
 # NOTE: this basically mocks an install without having to go back out to github for code
 mkdir -p src/github.com/pelletier/go-toml/cmd
+mkdir -p src/github.com/pelletier/go-toml/query
 cp *.go *.toml src/github.com/pelletier/go-toml
 cp -R cmd/* src/github.com/pelletier/go-toml/cmd
+cp -R query/* src/github.com/pelletier/go-toml/query
 go build -o test_program_bin src/github.com/pelletier/go-toml/cmd/test_program.go
 
 # Run basic unit tests
-go test github.com/pelletier/go-toml \
-        github.com/pelletier/go-toml/cmd/tomljson
+go test github.com/pelletier/go-toml -covermode=count -coverprofile=coverage.out
+go test github.com/pelletier/go-toml/cmd/tomljson
+go test github.com/pelletier/go-toml/query
 
 # run the entire BurntSushi test suite
 if [[ $# -eq 0 ]] ; then
diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go
index e598cf9..5581fe0 100644
--- a/vendor/github.com/pelletier/go-toml/token.go
+++ b/vendor/github.com/pelletier/go-toml/token.go
@@ -135,5 +135,6 @@
 
 func isHexDigit(r rune) bool {
 	return isDigit(r) ||
-		r == 'A' || r == 'B' || r == 'C' || r == 'D' || r == 'E' || r == 'F'
+		(r >= 'a' && r <= 'f') ||
+		(r >= 'A' && r <= 'F')
 }
diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go
index ad23fe8..64f19ed 100644
--- a/vendor/github.com/pelletier/go-toml/toml.go
+++ b/vendor/github.com/pelletier/go-toml/toml.go
@@ -4,38 +4,46 @@
 	"errors"
 	"fmt"
 	"io"
+	"io/ioutil"
 	"os"
 	"runtime"
 	"strings"
 )
 
 type tomlValue struct {
-	value    interface{}
+	value    interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list
 	position Position
 }
 
-// TomlTree is the result of the parsing of a TOML file.
-type TomlTree struct {
-	values   map[string]interface{}
+// Tree is the result of the parsing of a TOML file.
+type Tree struct {
+	values   map[string]interface{} // string -> *tomlValue, *Tree, []*Tree
 	position Position
 }
 
-func newTomlTree() *TomlTree {
-	return &TomlTree{
+func newTree() *Tree {
+	return &Tree{
 		values:   make(map[string]interface{}),
 		position: Position{},
 	}
 }
 
-// TreeFromMap initializes a new TomlTree object using the given map.
-func TreeFromMap(m map[string]interface{}) *TomlTree {
-	return &TomlTree{
-		values: m,
+// TreeFromMap initializes a new Tree object using the given map.
+func TreeFromMap(m map[string]interface{}) (*Tree, error) {
+	result, err := toTree(m)
+	if err != nil {
+		return nil, err
 	}
+	return result.(*Tree), nil
+}
+
+// Position returns the position of the tree.
+func (t *Tree) Position() Position {
+	return t.position
 }
 
 // Has returns a boolean indicating if the given key exists.
-func (t *TomlTree) Has(key string) bool {
+func (t *Tree) Has(key string) bool {
 	if key == "" {
 		return false
 	}
@@ -43,25 +51,26 @@
 }
 
 // HasPath returns true if the given path of keys exists, false otherwise.
-func (t *TomlTree) HasPath(keys []string) bool {
+func (t *Tree) HasPath(keys []string) bool {
 	return t.GetPath(keys) != nil
 }
 
-// Keys returns the keys of the toplevel tree.
-// Warning: this is a costly operation.
-func (t *TomlTree) Keys() []string {
-	var keys []string
+// Keys returns the keys of the toplevel tree (does not recurse).
+func (t *Tree) Keys() []string {
+	keys := make([]string, len(t.values))
+	i := 0
 	for k := range t.values {
-		keys = append(keys, k)
+		keys[i] = k
+		i++
 	}
 	return keys
 }
 
-// Get the value at key in the TomlTree.
+// Get the value at key in the Tree.
 // Key is a dot-separated path (e.g. a.b.c).
 // Returns nil if the path does not exist in the tree.
 // If keys is of length zero, the current tree is returned.
-func (t *TomlTree) Get(key string) interface{} {
+func (t *Tree) Get(key string) interface{} {
 	if key == "" {
 		return t
 	}
@@ -74,7 +83,7 @@
 
 // GetPath returns the element in the tree indicated by 'keys'.
 // If keys is of length zero, the current tree is returned.
-func (t *TomlTree) GetPath(keys []string) interface{} {
+func (t *Tree) GetPath(keys []string) interface{} {
 	if len(keys) == 0 {
 		return t
 	}
@@ -85,9 +94,9 @@
 			return nil
 		}
 		switch node := value.(type) {
-		case *TomlTree:
+		case *Tree:
 			subtree = node
-		case []*TomlTree:
+		case []*Tree:
 			// go to most recent element
 			if len(node) == 0 {
 				return nil
@@ -107,7 +116,7 @@
 }
 
 // GetPosition returns the position of the given key.
-func (t *TomlTree) GetPosition(key string) Position {
+func (t *Tree) GetPosition(key string) Position {
 	if key == "" {
 		return t.position
 	}
@@ -116,7 +125,7 @@
 
 // GetPositionPath returns the element in the tree indicated by 'keys'.
 // If keys is of length zero, the current tree is returned.
-func (t *TomlTree) GetPositionPath(keys []string) Position {
+func (t *Tree) GetPositionPath(keys []string) Position {
 	if len(keys) == 0 {
 		return t.position
 	}
@@ -127,9 +136,9 @@
 			return Position{0, 0}
 		}
 		switch node := value.(type) {
-		case *TomlTree:
+		case *Tree:
 			subtree = node
-		case []*TomlTree:
+		case []*Tree:
 			// go to most recent element
 			if len(node) == 0 {
 				return Position{0, 0}
@@ -143,9 +152,9 @@
 	switch node := subtree.values[keys[len(keys)-1]].(type) {
 	case *tomlValue:
 		return node.position
-	case *TomlTree:
+	case *Tree:
 		return node.position
-	case []*TomlTree:
+	case []*Tree:
 		// go to most recent element
 		if len(node) == 0 {
 			return Position{0, 0}
@@ -157,7 +166,7 @@
 }
 
 // GetDefault works like Get but with a default value
-func (t *TomlTree) GetDefault(key string, def interface{}) interface{} {
+func (t *Tree) GetDefault(key string, def interface{}) interface{} {
 	val := t.Get(key)
 	if val == nil {
 		return def
@@ -167,30 +176,30 @@
 
 // Set an element in the tree.
 // Key is a dot-separated path (e.g. a.b.c).
-// Creates all necessary intermediates trees, if needed.
-func (t *TomlTree) Set(key string, value interface{}) {
+// Creates all necessary intermediate trees, if needed.
+func (t *Tree) Set(key string, value interface{}) {
 	t.SetPath(strings.Split(key, "."), value)
 }
 
 // SetPath sets an element in the tree.
 // Keys is an array of path elements (e.g. {"a","b","c"}).
-// Creates all necessary intermediates trees, if needed.
-func (t *TomlTree) SetPath(keys []string, value interface{}) {
+// Creates all necessary intermediate trees, if needed.
+func (t *Tree) SetPath(keys []string, value interface{}) {
 	subtree := t
 	for _, intermediateKey := range keys[:len(keys)-1] {
 		nextTree, exists := subtree.values[intermediateKey]
 		if !exists {
-			nextTree = newTomlTree()
+			nextTree = newTree()
 			subtree.values[intermediateKey] = nextTree // add new element here
 		}
 		switch node := nextTree.(type) {
-		case *TomlTree:
+		case *Tree:
 			subtree = node
-		case []*TomlTree:
+		case []*Tree:
 			// go to most recent element
 			if len(node) == 0 {
 				// create element if it does not exist
-				subtree.values[intermediateKey] = append(node, newTomlTree())
+				subtree.values[intermediateKey] = append(node, newTree())
 			}
 			subtree = node[len(node)-1]
 		}
@@ -199,9 +208,9 @@
 	var toInsert interface{}
 
 	switch value.(type) {
-	case *TomlTree:
+	case *Tree:
 		toInsert = value
-	case []*TomlTree:
+	case []*Tree:
 		toInsert = value
 	case *tomlValue:
 		toInsert = value
@@ -219,21 +228,21 @@
 // and tree[a][b][c]
 //
 // Returns nil on success, error object on failure
-func (t *TomlTree) createSubTree(keys []string, pos Position) error {
+func (t *Tree) createSubTree(keys []string, pos Position) error {
 	subtree := t
 	for _, intermediateKey := range keys {
 		nextTree, exists := subtree.values[intermediateKey]
 		if !exists {
-			tree := newTomlTree()
+			tree := newTree()
 			tree.position = pos
 			subtree.values[intermediateKey] = tree
 			nextTree = tree
 		}
 
 		switch node := nextTree.(type) {
-		case []*TomlTree:
+		case []*Tree:
 			subtree = node[len(node)-1]
-		case *TomlTree:
+		case *Tree:
 			subtree = node
 		default:
 			return fmt.Errorf("unknown type for path %s (%s): %T (%#v)",
@@ -243,17 +252,8 @@
 	return nil
 }
 
-// Query compiles and executes a query on a tree and returns the query result.
-func (t *TomlTree) Query(query string) (*QueryResult, error) {
-	q, err := CompileQuery(query)
-	if err != nil {
-		return nil, err
-	}
-	return q.Execute(t), nil
-}
-
-// LoadReader creates a TomlTree from any io.Reader.
-func LoadReader(reader io.Reader) (tree *TomlTree, err error) {
+// LoadBytes creates a Tree from a []byte.
+func LoadBytes(b []byte) (tree *Tree, err error) {
 	defer func() {
 		if r := recover(); r != nil {
 			if _, ok := r.(runtime.Error); ok {
@@ -262,17 +262,27 @@
 			err = errors.New(r.(string))
 		}
 	}()
-	tree = parseToml(lexToml(reader))
+	tree = parseToml(lexToml(b))
 	return
 }
 
-// Load creates a TomlTree from a string.
-func Load(content string) (tree *TomlTree, err error) {
-	return LoadReader(strings.NewReader(content))
+// LoadReader creates a Tree from any io.Reader.
+func LoadReader(reader io.Reader) (tree *Tree, err error) {
+	inputBytes, err := ioutil.ReadAll(reader)
+	if err != nil {
+		return
+	}
+	tree, err = LoadBytes(inputBytes)
+	return
 }
 
-// LoadFile creates a TomlTree from a file.
-func LoadFile(path string) (tree *TomlTree, err error) {
+// Load creates a Tree from a string.
+func Load(content string) (tree *Tree, err error) {
+	return LoadBytes([]byte(content))
+}
+
+// LoadFile creates a Tree from a file.
+func LoadFile(path string) (tree *Tree, err error) {
 	file, err := os.Open(path)
 	if err != nil {
 		return nil, err
diff --git a/vendor/github.com/pelletier/go-toml/tomltree_conversions.go b/vendor/github.com/pelletier/go-toml/tomltree_conversions.go
deleted file mode 100644
index bf9321b..0000000
--- a/vendor/github.com/pelletier/go-toml/tomltree_conversions.go
+++ /dev/null
@@ -1,198 +0,0 @@
-package toml
-
-// Tools to convert a TomlTree to different representations
-
-import (
-	"fmt"
-	"strconv"
-	"strings"
-	"time"
-)
-
-// encodes a string to a TOML-compliant string value
-func encodeTomlString(value string) string {
-	result := ""
-	for _, rr := range value {
-		intRr := uint16(rr)
-		switch rr {
-		case '\b':
-			result += "\\b"
-		case '\t':
-			result += "\\t"
-		case '\n':
-			result += "\\n"
-		case '\f':
-			result += "\\f"
-		case '\r':
-			result += "\\r"
-		case '"':
-			result += "\\\""
-		case '\\':
-			result += "\\\\"
-		default:
-			if intRr < 0x001F {
-				result += fmt.Sprintf("\\u%0.4X", intRr)
-			} else {
-				result += string(rr)
-			}
-		}
-	}
-	return result
-}
-
-// Value print support function for ToString()
-// Outputs the TOML compliant string representation of a value
-func toTomlValue(item interface{}, indent int) string {
-	tab := strings.Repeat(" ", indent)
-	switch value := item.(type) {
-	case int:
-		return tab + strconv.FormatInt(int64(value), 10)
-	case int8:
-		return tab + strconv.FormatInt(int64(value), 10)
-	case int16:
-		return tab + strconv.FormatInt(int64(value), 10)
-	case int32:
-		return tab + strconv.FormatInt(int64(value), 10)
-	case int64:
-		return tab + strconv.FormatInt(value, 10)
-	case uint:
-		return tab + strconv.FormatUint(uint64(value), 10)
-	case uint8:
-		return tab + strconv.FormatUint(uint64(value), 10)
-	case uint16:
-		return tab + strconv.FormatUint(uint64(value), 10)
-	case uint32:
-		return tab + strconv.FormatUint(uint64(value), 10)
-	case uint64:
-		return tab + strconv.FormatUint(value, 10)
-	case float32:
-		return tab + strconv.FormatFloat(float64(value), 'f', -1, 32)
-	case float64:
-		return tab + strconv.FormatFloat(value, 'f', -1, 64)
-	case string:
-		return tab + "\"" + encodeTomlString(value) + "\""
-	case bool:
-		if value {
-			return "true"
-		}
-		return "false"
-	case time.Time:
-		return tab + value.Format(time.RFC3339)
-	case []interface{}:
-		result := tab + "[\n"
-		for _, item := range value {
-			result += toTomlValue(item, indent+2) + ",\n"
-		}
-		return result + tab + "]"
-	case nil:
-		return ""
-	default:
-		panic(fmt.Sprintf("unsupported value type %T: %v", value, value))
-	}
-}
-
-// Recursive support function for ToString()
-// Outputs a tree, using the provided keyspace to prefix group names
-func (t *TomlTree) toToml(indent, keyspace string) string {
-	result := ""
-	for k, v := range t.values {
-		// figure out the keyspace
-		combinedKey := k
-		if keyspace != "" {
-			combinedKey = keyspace + "." + combinedKey
-		}
-		// output based on type
-		switch node := v.(type) {
-		case []*TomlTree:
-			for _, item := range node {
-				if len(item.Keys()) > 0 {
-					result += fmt.Sprintf("\n%s[[%s]]\n", indent, combinedKey)
-				}
-				result += item.toToml(indent+"  ", combinedKey)
-			}
-		case *TomlTree:
-			if len(node.Keys()) > 0 {
-				result += fmt.Sprintf("\n%s[%s]\n", indent, combinedKey)
-			}
-			result += node.toToml(indent+"  ", combinedKey)
-		case map[string]interface{}:
-			sub := TreeFromMap(node)
-
-			if len(sub.Keys()) > 0 {
-				result += fmt.Sprintf("\n%s[%s]\n", indent, combinedKey)
-			}
-			result += sub.toToml(indent+"  ", combinedKey)
-		case map[string]string:
-			sub := TreeFromMap(convertMapStringString(node))
-
-			if len(sub.Keys()) > 0 {
-				result += fmt.Sprintf("\n%s[%s]\n", indent, combinedKey)
-			}
-			result += sub.toToml(indent+"  ", combinedKey)
-		case map[interface{}]interface{}:
-			sub := TreeFromMap(convertMapInterfaceInterface(node))
-
-			if len(sub.Keys()) > 0 {
-				result += fmt.Sprintf("\n%s[%s]\n", indent, combinedKey)
-			}
-			result += sub.toToml(indent+"  ", combinedKey)
-		case *tomlValue:
-			result += fmt.Sprintf("%s%s = %s\n", indent, k, toTomlValue(node.value, 0))
-		default:
-			result += fmt.Sprintf("%s%s = %s\n", indent, k, toTomlValue(v, 0))
-		}
-	}
-	return result
-}
-
-func convertMapStringString(in map[string]string) map[string]interface{} {
-	result := make(map[string]interface{}, len(in))
-	for k, v := range in {
-		result[k] = v
-	}
-	return result
-}
-
-func convertMapInterfaceInterface(in map[interface{}]interface{}) map[string]interface{} {
-	result := make(map[string]interface{}, len(in))
-	for k, v := range in {
-		result[k.(string)] = v
-	}
-	return result
-}
-
-// ToString is an alias for String
-func (t *TomlTree) ToString() string {
-	return t.String()
-}
-
-// String generates a human-readable representation of the current tree.
-// Output spans multiple lines, and is suitable for ingest by a TOML parser
-func (t *TomlTree) String() string {
-	return t.toToml("", "")
-}
-
-// ToMap recursively generates a representation of the current tree using map[string]interface{}.
-func (t *TomlTree) ToMap() map[string]interface{} {
-	result := map[string]interface{}{}
-
-	for k, v := range t.values {
-		switch node := v.(type) {
-		case []*TomlTree:
-			var array []interface{}
-			for _, item := range node {
-				array = append(array, item.ToMap())
-			}
-			result[k] = array
-		case *TomlTree:
-			result[k] = node.ToMap()
-		case map[string]interface{}:
-			sub := TreeFromMap(node)
-			result[k] = sub.ToMap()
-		case *tomlValue:
-			result[k] = node.value
-		}
-	}
-
-	return result
-}
diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create.go b/vendor/github.com/pelletier/go-toml/tomltree_create.go
new file mode 100644
index 0000000..19d1c0d
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/tomltree_create.go
@@ -0,0 +1,142 @@
+package toml
+
+import (
+	"fmt"
+	"reflect"
+	"time"
+)
+
+var kindToType = [reflect.String + 1]reflect.Type{
+	reflect.Bool:    reflect.TypeOf(true),
+	reflect.String:  reflect.TypeOf(""),
+	reflect.Float32: reflect.TypeOf(float64(1)),
+	reflect.Float64: reflect.TypeOf(float64(1)),
+	reflect.Int:     reflect.TypeOf(int64(1)),
+	reflect.Int8:    reflect.TypeOf(int64(1)),
+	reflect.Int16:   reflect.TypeOf(int64(1)),
+	reflect.Int32:   reflect.TypeOf(int64(1)),
+	reflect.Int64:   reflect.TypeOf(int64(1)),
+	reflect.Uint:    reflect.TypeOf(uint64(1)),
+	reflect.Uint8:   reflect.TypeOf(uint64(1)),
+	reflect.Uint16:  reflect.TypeOf(uint64(1)),
+	reflect.Uint32:  reflect.TypeOf(uint64(1)),
+	reflect.Uint64:  reflect.TypeOf(uint64(1)),
+}
+
+// typeFor returns a reflect.Type for a reflect.Kind, or nil if none is found.
+// supported values:
+// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32
+func typeFor(k reflect.Kind) reflect.Type {
+	if k > 0 && int(k) < len(kindToType) {
+		return kindToType[k]
+	}
+	return nil
+}
+
+func simpleValueCoercion(object interface{}) (interface{}, error) {
+	switch original := object.(type) {
+	case string, bool, int64, uint64, float64, time.Time:
+		return original, nil
+	case int:
+		return int64(original), nil
+	case int8:
+		return int64(original), nil
+	case int16:
+		return int64(original), nil
+	case int32:
+		return int64(original), nil
+	case uint:
+		return uint64(original), nil
+	case uint8:
+		return uint64(original), nil
+	case uint16:
+		return uint64(original), nil
+	case uint32:
+		return uint64(original), nil
+	case float32:
+		return float64(original), nil
+	case fmt.Stringer:
+		return original.String(), nil
+	default:
+		return nil, fmt.Errorf("cannot convert type %T to Tree", object)
+	}
+}
+
+func sliceToTree(object interface{}) (interface{}, error) {
+	// arrays are a bit tricky, since they can represent either a
+	// collection of simple values, which is represented by one
+	// *tomlValue, or an array of tables, which is represented by an
+	// array of *Tree.
+
+	// holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice
+	value := reflect.ValueOf(object)
+	insideType := value.Type().Elem()
+	length := value.Len()
+	if length > 0 {
+		insideType = reflect.ValueOf(value.Index(0).Interface()).Type()
+	}
+	if insideType.Kind() == reflect.Map {
+		// this is considered as an array of tables
+		tablesArray := make([]*Tree, 0, length)
+		for i := 0; i < length; i++ {
+			table := value.Index(i)
+			tree, err := toTree(table.Interface())
+			if err != nil {
+				return nil, err
+			}
+			tablesArray = append(tablesArray, tree.(*Tree))
+		}
+		return tablesArray, nil
+	}
+
+	sliceType := typeFor(insideType.Kind())
+	if sliceType == nil {
+		sliceType = insideType
+	}
+
+	arrayValue := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, length)
+
+	for i := 0; i < length; i++ {
+		val := value.Index(i).Interface()
+		simpleValue, err := simpleValueCoercion(val)
+		if err != nil {
+			return nil, err
+		}
+		arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue))
+	}
+	return &tomlValue{arrayValue.Interface(), Position{}}, nil
+}
+
+func toTree(object interface{}) (interface{}, error) {
+	value := reflect.ValueOf(object)
+
+	if value.Kind() == reflect.Map {
+		values := map[string]interface{}{}
+		keys := value.MapKeys()
+		for _, key := range keys {
+			if key.Kind() != reflect.String {
+				if _, ok := key.Interface().(string); !ok {
+					return nil, fmt.Errorf("map key needs to be a string, not %T (%v)", key.Interface(), key.Kind())
+				}
+			}
+
+			v := value.MapIndex(key)
+			newValue, err := toTree(v.Interface())
+			if err != nil {
+				return nil, err
+			}
+			values[key.String()] = newValue
+		}
+		return &Tree{values, Position{}}, nil
+	}
+
+	if value.Kind() == reflect.Array || value.Kind() == reflect.Slice {
+		return sliceToTree(object)
+	}
+
+	simpleValue, err := simpleValueCoercion(object)
+	if err != nil {
+		return nil, err
+	}
+	return &tomlValue{simpleValue, Position{}}, nil
+}
diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go
new file mode 100644
index 0000000..ca763ed
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/tomltree_write.go
@@ -0,0 +1,233 @@
+package toml
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// encodes a string to a TOML-compliant string value
+func encodeTomlString(value string) string {
+	var b bytes.Buffer
+
+	for _, rr := range value {
+		switch rr {
+		case '\b':
+			b.WriteString(`\b`)
+		case '\t':
+			b.WriteString(`\t`)
+		case '\n':
+			b.WriteString(`\n`)
+		case '\f':
+			b.WriteString(`\f`)
+		case '\r':
+			b.WriteString(`\r`)
+		case '"':
+			b.WriteString(`\"`)
+		case '\\':
+			b.WriteString(`\\`)
+		default:
+			intRr := uint16(rr)
+			if intRr < 0x001F {
+				b.WriteString(fmt.Sprintf("\\u%0.4X", intRr))
+			} else {
+				b.WriteRune(rr)
+			}
+		}
+	}
+	return b.String()
+}
+
+func tomlValueStringRepresentation(v interface{}) (string, error) {
+	switch value := v.(type) {
+	case uint64:
+		return strconv.FormatUint(value, 10), nil
+	case int64:
+		return strconv.FormatInt(value, 10), nil
+	case float64:
+		// Ensure a round float does contain a decimal point. Otherwise feeding
+		// the output back to the parser would convert to an integer.
+		if math.Trunc(value) == value {
+			return strconv.FormatFloat(value, 'f', 1, 32), nil
+		}
+		return strconv.FormatFloat(value, 'f', -1, 32), nil
+	case string:
+		return "\"" + encodeTomlString(value) + "\"", nil
+	case []byte:
+		b, _ := v.([]byte)
+		return tomlValueStringRepresentation(string(b))
+	case bool:
+		if value {
+			return "true", nil
+		}
+		return "false", nil
+	case time.Time:
+		return value.Format(time.RFC3339), nil
+	case nil:
+		return "", nil
+	}
+
+	rv := reflect.ValueOf(v)
+
+	if rv.Kind() == reflect.Slice {
+		values := []string{}
+		for i := 0; i < rv.Len(); i++ {
+			item := rv.Index(i).Interface()
+			itemRepr, err := tomlValueStringRepresentation(item)
+			if err != nil {
+				return "", err
+			}
+			values = append(values, itemRepr)
+		}
+		return "[" + strings.Join(values, ",") + "]", nil
+	}
+	return "", fmt.Errorf("unsupported value type %T: %v", v, v)
+}
+
+func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64) (int64, error) {
+	simpleValuesKeys := make([]string, 0)
+	complexValuesKeys := make([]string, 0)
+
+	for k := range t.values {
+		v := t.values[k]
+		switch v.(type) {
+		case *Tree, []*Tree:
+			complexValuesKeys = append(complexValuesKeys, k)
+		default:
+			simpleValuesKeys = append(simpleValuesKeys, k)
+		}
+	}
+
+	sort.Strings(simpleValuesKeys)
+	sort.Strings(complexValuesKeys)
+
+	for _, k := range simpleValuesKeys {
+		v, ok := t.values[k].(*tomlValue)
+		if !ok {
+			return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k])
+		}
+
+		repr, err := tomlValueStringRepresentation(v.value)
+		if err != nil {
+			return bytesCount, err
+		}
+
+		writtenBytesCount, err := writeStrings(w, indent, k, " = ", repr, "\n")
+		bytesCount += int64(writtenBytesCount)
+		if err != nil {
+			return bytesCount, err
+		}
+	}
+
+	for _, k := range complexValuesKeys {
+		v := t.values[k]
+
+		combinedKey := k
+		if keyspace != "" {
+			combinedKey = keyspace + "." + combinedKey
+		}
+
+		switch node := v.(type) {
+		// node has to be of those two types given how keys are sorted above
+		case *Tree:
+			writtenBytesCount, err := writeStrings(w, "\n", indent, "[", combinedKey, "]\n")
+			bytesCount += int64(writtenBytesCount)
+			if err != nil {
+				return bytesCount, err
+			}
+			bytesCount, err = node.writeTo(w, indent+"  ", combinedKey, bytesCount)
+			if err != nil {
+				return bytesCount, err
+			}
+		case []*Tree:
+			for _, subTree := range node {
+				writtenBytesCount, err := writeStrings(w, "\n", indent, "[[", combinedKey, "]]\n")
+				bytesCount += int64(writtenBytesCount)
+				if err != nil {
+					return bytesCount, err
+				}
+
+				bytesCount, err = subTree.writeTo(w, indent+"  ", combinedKey, bytesCount)
+				if err != nil {
+					return bytesCount, err
+				}
+			}
+		}
+	}
+
+	return bytesCount, nil
+}
+
+func writeStrings(w io.Writer, s ...string) (int, error) {
+	var n int
+	for i := range s {
+		b, err := io.WriteString(w, s[i])
+		n += b
+		if err != nil {
+			return n, err
+		}
+	}
+	return n, nil
+}
+
+// WriteTo encode the Tree as Toml and writes it to the writer w.
+// Returns the number of bytes written in case of success, or an error if anything happened.
+func (t *Tree) WriteTo(w io.Writer) (int64, error) {
+	return t.writeTo(w, "", "", 0)
+}
+
+// ToTomlString generates a human-readable representation of the current tree.
+// Output spans multiple lines, and is suitable for ingest by a TOML parser.
+// If the conversion cannot be performed, ToString returns a non-nil error.
+func (t *Tree) ToTomlString() (string, error) {
+	var buf bytes.Buffer
+	_, err := t.WriteTo(&buf)
+	if err != nil {
+		return "", err
+	}
+	return buf.String(), nil
+}
+
+// String generates a human-readable representation of the current tree.
+// Alias of ToString. Present to implement the fmt.Stringer interface.
+func (t *Tree) String() string {
+	result, _ := t.ToTomlString()
+	return result
+}
+
+// ToMap recursively generates a representation of the tree using Go built-in structures.
+// The following types are used:
+//
+//	* bool
+//	* float64
+//	* int64
+//	* string
+//	* uint64
+//	* time.Time
+//	* map[string]interface{} (where interface{} is any of this list)
+//	* []interface{} (where interface{} is any of this list)
+func (t *Tree) ToMap() map[string]interface{} {
+	result := map[string]interface{}{}
+
+	for k, v := range t.values {
+		switch node := v.(type) {
+		case []*Tree:
+			var array []interface{}
+			for _, item := range node {
+				array = append(array, item.ToMap())
+			}
+			result[k] = array
+		case *Tree:
+			result[k] = node.ToMap()
+		case *tomlValue:
+			result[k] = node.value
+		}
+	}
+	return result
+}
diff --git a/vendor/github.com/spf13/afero/.travis.yml b/vendor/github.com/spf13/afero/.travis.yml
index c24eebf..6c296d2 100644
--- a/vendor/github.com/spf13/afero/.travis.yml
+++ b/vendor/github.com/spf13/afero/.travis.yml
@@ -2,9 +2,8 @@
 language: go

 

 go:

-  - 1.5.4

-  - 1.6.3

-  - 1.7

+  - 1.7.5

+  - 1.8

   - tip

 

 os:

diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go
index 6ec6ca9..5e4fc2e 100644
--- a/vendor/github.com/spf13/afero/basepath.go
+++ b/vendor/github.com/spf13/afero/basepath.go
@@ -52,7 +52,7 @@
 	// On Windows a common mistake would be to provide an absolute OS path
 	// We could strip out the base part, but that would not be very portable.
 	if filepath.IsAbs(name) {
-		return &os.PathError{"realPath", name, errors.New("got a real OS path instead of a virtual")}
+		return &os.PathError{Op: "realPath", Path: name, Err: errors.New("got a real OS path instead of a virtual")}
 	}
 
 	return nil
@@ -60,14 +60,14 @@
 
 func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) {
 	if name, err = b.RealPath(name); err != nil {
-		return &os.PathError{"chtimes", name, err}
+		return &os.PathError{Op: "chtimes", Path: name, Err: err}
 	}
 	return b.source.Chtimes(name, atime, mtime)
 }
 
 func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) {
 	if name, err = b.RealPath(name); err != nil {
-		return &os.PathError{"chmod", name, err}
+		return &os.PathError{Op: "chmod", Path: name, Err: err}
 	}
 	return b.source.Chmod(name, mode)
 }
@@ -78,66 +78,66 @@
 
 func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) {
 	if name, err = b.RealPath(name); err != nil {
-		return nil, &os.PathError{"stat", name, err}
+		return nil, &os.PathError{Op: "stat", Path: name, Err: err}
 	}
 	return b.source.Stat(name)
 }
 
 func (b *BasePathFs) Rename(oldname, newname string) (err error) {
 	if oldname, err = b.RealPath(oldname); err != nil {
-		return &os.PathError{"rename", oldname, err}
+		return &os.PathError{Op: "rename", Path: oldname, Err: err}
 	}
 	if newname, err = b.RealPath(newname); err != nil {
-		return &os.PathError{"rename", newname, err}
+		return &os.PathError{Op: "rename", Path: newname, Err: err}
 	}
 	return b.source.Rename(oldname, newname)
 }
 
 func (b *BasePathFs) RemoveAll(name string) (err error) {
 	if name, err = b.RealPath(name); err != nil {
-		return &os.PathError{"remove_all", name, err}
+		return &os.PathError{Op: "remove_all", Path: name, Err: err}
 	}
 	return b.source.RemoveAll(name)
 }
 
 func (b *BasePathFs) Remove(name string) (err error) {
 	if name, err = b.RealPath(name); err != nil {
-		return &os.PathError{"remove", name, err}
+		return &os.PathError{Op: "remove", Path: name, Err: err}
 	}
 	return b.source.Remove(name)
 }
 
 func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) {
 	if name, err = b.RealPath(name); err != nil {
-		return nil, &os.PathError{"openfile", name, err}
+		return nil, &os.PathError{Op: "openfile", Path: name, Err: err}
 	}
 	return b.source.OpenFile(name, flag, mode)
 }
 
 func (b *BasePathFs) Open(name string) (f File, err error) {
 	if name, err = b.RealPath(name); err != nil {
-		return nil, &os.PathError{"open", name, err}
+		return nil, &os.PathError{Op: "open", Path: name, Err: err}
 	}
 	return b.source.Open(name)
 }
 
 func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) {
 	if name, err = b.RealPath(name); err != nil {
-		return &os.PathError{"mkdir", name, err}
+		return &os.PathError{Op: "mkdir", Path: name, Err: err}
 	}
 	return b.source.Mkdir(name, mode)
 }
 
 func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) {
 	if name, err = b.RealPath(name); err != nil {
-		return &os.PathError{"mkdir", name, err}
+		return &os.PathError{Op: "mkdir", Path: name, Err: err}
 	}
 	return b.source.MkdirAll(name, mode)
 }
 
 func (b *BasePathFs) Create(name string) (f File, err error) {
 	if name, err = b.RealPath(name); err != nil {
-		return nil, &os.PathError{"create", name, err}
+		return nil, &os.PathError{Op: "create", Path: name, Err: err}
 	}
 	return b.source.Create(name)
 }
diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go
index d742425..e54a4f8 100644
--- a/vendor/github.com/spf13/afero/cacheOnReadFs.go
+++ b/vendor/github.com/spf13/afero/cacheOnReadFs.go
@@ -32,9 +32,8 @@
 type cacheState int
 
 const (
-	cacheUnknown cacheState = iota
 	// not present in the overlay, unknown if it exists in the base:
-	cacheMiss
+	cacheMiss cacheState = iota
 	// present in the overlay and in base, base file is newer:
 	cacheStale
 	// present in the overlay - with cache time == 0 it may exist in the base,
diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go
index 3c1e09a..e41e012 100644
--- a/vendor/github.com/spf13/afero/mem/file.go
+++ b/vendor/github.com/spf13/afero/mem/file.go
@@ -186,7 +186,7 @@
 		return ErrFileClosed
 	}
 	if f.readOnly {
-		return &os.PathError{"truncate", f.fileData.name, errors.New("file handle is read only")}
+		return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")}
 	}
 	if size < 0 {
 		return ErrOutOfRange
@@ -218,7 +218,7 @@
 
 func (f *File) Write(b []byte) (n int, err error) {
 	if f.readOnly {
-		return 0, &os.PathError{"write", f.fileData.name, errors.New("file handle is read only")}
+		return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")}
 	}
 	n = len(b)
 	cur := atomic.LoadInt64(&f.at)
diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go
index 2e259b8..767ac1d 100644
--- a/vendor/github.com/spf13/afero/memmap.go
+++ b/vendor/github.com/spf13/afero/memmap.go
@@ -35,8 +35,6 @@
 	return &MemMapFs{}
 }
 
-var memfsInit sync.Once
-
 func (m *MemMapFs) getData() map[string]*mem.FileData {
 	m.init.Do(func() {
 		m.data = make(map[string]*mem.FileData)
@@ -47,7 +45,7 @@
 	return m.data
 }
 
-func (MemMapFs) Name() string { return "MemMapFS" }
+func (*MemMapFs) Name() string { return "MemMapFS" }
 
 func (m *MemMapFs) Create(name string) (File, error) {
 	name = normalizePath(name)
@@ -110,7 +108,7 @@
 	x, ok := m.getData()[name]
 	if ok {
 		// Only return ErrFileExists if it's a file, not a directory.
-		i := mem.FileInfo{x}
+		i := mem.FileInfo{FileData: x}
 		if !i.IsDir() {
 			return ErrFileExists
 		}
@@ -129,14 +127,17 @@
 	_, ok := m.getData()[name]
 	m.mu.RUnlock()
 	if ok {
-		return &os.PathError{"mkdir", name, ErrFileExists}
-	} else {
-		m.mu.Lock()
-		item := mem.CreateDir(name)
-		m.getData()[name] = item
-		m.registerWithParent(item)
-		m.mu.Unlock()
+		return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists}
 	}
+
+	m.mu.Lock()
+	item := mem.CreateDir(name)
+	m.getData()[name] = item
+	m.registerWithParent(item)
+	m.mu.Unlock()
+
+	m.Chmod(name, perm)
+
 	return nil
 }
 
@@ -189,7 +190,7 @@
 	f, ok := m.getData()[name]
 	m.mu.RUnlock()
 	if !ok {
-		return nil, &os.PathError{"open", name, ErrFileNotFound}
+		return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound}
 	}
 	return f, nil
 }
@@ -205,9 +206,11 @@
 }
 
 func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+	chmod := false
 	file, err := m.openWrite(name)
 	if os.IsNotExist(err) && (flag&os.O_CREATE > 0) {
 		file, err = m.Create(name)
+		chmod = true
 	}
 	if err != nil {
 		return nil, err
@@ -229,6 +232,9 @@
 			return nil, err
 		}
 	}
+	if chmod {
+		m.Chmod(name, perm)
+	}
 	return file, nil
 }
 
@@ -241,11 +247,11 @@
 	if _, ok := m.getData()[name]; ok {
 		err := m.unRegisterWithParent(name)
 		if err != nil {
-			return &os.PathError{"remove", name, err}
+			return &os.PathError{Op: "remove", Path: name, Err: err}
 		}
 		delete(m.getData(), name)
 	} else {
-		return &os.PathError{"remove", name, os.ErrNotExist}
+		return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist}
 	}
 	return nil
 }
@@ -293,7 +299,7 @@
 		m.mu.Unlock()
 		m.mu.RLock()
 	} else {
-		return &os.PathError{"rename", oldname, ErrFileNotFound}
+		return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound}
 	}
 	return nil
 }
@@ -309,9 +315,12 @@
 
 func (m *MemMapFs) Chmod(name string, mode os.FileMode) error {
 	name = normalizePath(name)
+
+	m.mu.RLock()
 	f, ok := m.getData()[name]
+	m.mu.RUnlock()
 	if !ok {
-		return &os.PathError{"chmod", name, ErrFileNotFound}
+		return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound}
 	}
 
 	m.mu.Lock()
@@ -323,9 +332,12 @@
 
 func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
 	name = normalizePath(name)
+
+	m.mu.RLock()
 	f, ok := m.getData()[name]
+	m.mu.RUnlock()
 	if !ok {
-		return &os.PathError{"chtimes", name, ErrFileNotFound}
+		return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound}
 	}
 
 	m.mu.Lock()
@@ -337,13 +349,13 @@
 
 func (m *MemMapFs) List() {
 	for _, x := range m.data {
-		y := mem.FileInfo{x}
+		y := mem.FileInfo{FileData: x}
 		fmt.Println(x.Name(), y.Size())
 	}
 }
 
-func debugMemMapList(fs Fs) {
-	if x, ok := fs.(*MemMapFs); ok {
-		x.List()
-	}
-}
+// func debugMemMapList(fs Fs) {
+// 	if x, ok := fs.(*MemMapFs); ok {
+// 		x.List()
+// 	}
+// }
diff --git a/vendor/github.com/spf13/cast/.travis.yml b/vendor/github.com/spf13/cast/.travis.yml
new file mode 100644
index 0000000..4da9766
--- /dev/null
+++ b/vendor/github.com/spf13/cast/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+sudo: required
+go:
+  - 1.7.5
+  - 1.8
+  - tip
+os:
+  - linux
+matrix:
+  allow_failures:
+    - go: tip
+  fast_finish: true
+script:
+  - make check
diff --git a/vendor/github.com/spf13/cast/Makefile b/vendor/github.com/spf13/cast/Makefile
new file mode 100644
index 0000000..7ccf893
--- /dev/null
+++ b/vendor/github.com/spf13/cast/Makefile
@@ -0,0 +1,38 @@
+# A Self-Documenting Makefile: http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html
+
+.PHONY: check fmt lint test test-race vet test-cover-html help
+.DEFAULT_GOAL := help
+
+check: test-race fmt vet lint ## Run tests and linters
+
+test: ## Run tests
+	go test ./...
+
+test-race: ## Run tests with race detector
+	go test -race ./...
+
+fmt: ## Run gofmt linter
+	@for d in `go list` ; do \
+		if [ "`gofmt -l -s $$GOPATH/src/$$d | tee /dev/stderr`" ]; then \
+			echo "^ improperly formatted go files" && echo && exit 1; \
+		fi \
+	done
+
+lint: ## Run golint linter
+	@for d in `go list` ; do \
+		if [ "`golint $$d | tee /dev/stderr`" ]; then \
+			echo "^ golint errors!" && echo && exit 1; \
+		fi \
+	done
+
+vet: ## Run go vet linter
+	@if [ "`go vet | tee /dev/stderr`" ]; then \
+		echo "^ go vet errors!" && echo && exit 1; \
+	fi
+
+test-cover-html: ## Generate test coverage report
+	go test -coverprofile=coverage.out -covermode=count
+	go tool cover -func=coverage.out
+
+help:
+	@grep -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
diff --git a/vendor/github.com/spf13/cast/README.md b/vendor/github.com/spf13/cast/README.md
index af7a1fd..e693939 100644
--- a/vendor/github.com/spf13/cast/README.md
+++ b/vendor/github.com/spf13/cast/README.md
@@ -1,5 +1,8 @@
 cast
 ====
+[![GoDoc](https://godoc.org/github.com/spf13/cast?status.svg)](https://godoc.org/github.com/spf13/cast)
+[![Build Status](https://api.travis-ci.org/spf13/cast.svg?branch=master)](https://travis-ci.org/spf13/cast)
+[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast)](https://goreportcard.com/report/github.com/spf13/cast)
 
 Easy and safe casting from one type to another in Go
 
diff --git a/vendor/github.com/spf13/cast/cast.go b/vendor/github.com/spf13/cast/cast.go
index 6ca3e0e..8b8c208 100644
--- a/vendor/github.com/spf13/cast/cast.go
+++ b/vendor/github.com/spf13/cast/cast.go
@@ -3,81 +3,157 @@
 // Use of this source code is governed by an MIT-style
 // license that can be found in the LICENSE file.
 
+// Package cast provides easy and safe casting in Go.
 package cast
 
 import "time"
 
+// ToBool casts an interface to a bool type.
 func ToBool(i interface{}) bool {
 	v, _ := ToBoolE(i)
 	return v
 }
 
+// ToTime casts an interface to a time.Time type.
 func ToTime(i interface{}) time.Time {
 	v, _ := ToTimeE(i)
 	return v
 }
 
+// ToDuration casts an interface to a time.Duration type.
 func ToDuration(i interface{}) time.Duration {
 	v, _ := ToDurationE(i)
 	return v
 }
 
+// ToFloat64 casts an interface to a float64 type.
 func ToFloat64(i interface{}) float64 {
 	v, _ := ToFloat64E(i)
 	return v
 }
 
+// ToFloat32 casts an interface to a float32 type.
+func ToFloat32(i interface{}) float32 {
+	v, _ := ToFloat32E(i)
+	return v
+}
+
+// ToInt64 casts an interface to an int64 type.
 func ToInt64(i interface{}) int64 {
 	v, _ := ToInt64E(i)
 	return v
 }
 
+// ToInt32 casts an interface to an int32 type.
+func ToInt32(i interface{}) int32 {
+	v, _ := ToInt32E(i)
+	return v
+}
+
+// ToInt16 casts an interface to an int16 type.
+func ToInt16(i interface{}) int16 {
+	v, _ := ToInt16E(i)
+	return v
+}
+
+// ToInt8 casts an interface to an int8 type.
+func ToInt8(i interface{}) int8 {
+	v, _ := ToInt8E(i)
+	return v
+}
+
+// ToInt casts an interface to an int type.
 func ToInt(i interface{}) int {
 	v, _ := ToIntE(i)
 	return v
 }
 
+// ToUint casts an interface to a uint type.
+func ToUint(i interface{}) uint {
+	v, _ := ToUintE(i)
+	return v
+}
+
+// ToUint64 casts an interface to a uint64 type.
+func ToUint64(i interface{}) uint64 {
+	v, _ := ToUint64E(i)
+	return v
+}
+
+// ToUint32 casts an interface to a uint32 type.
+func ToUint32(i interface{}) uint32 {
+	v, _ := ToUint32E(i)
+	return v
+}
+
+// ToUint16 casts an interface to a uint16 type.
+func ToUint16(i interface{}) uint16 {
+	v, _ := ToUint16E(i)
+	return v
+}
+
+// ToUint8 casts an interface to a uint8 type.
+func ToUint8(i interface{}) uint8 {
+	v, _ := ToUint8E(i)
+	return v
+}
+
+// ToString casts an interface to a string type.
 func ToString(i interface{}) string {
 	v, _ := ToStringE(i)
 	return v
 }
 
+// ToStringMapString casts an interface to a map[string]string type.
 func ToStringMapString(i interface{}) map[string]string {
 	v, _ := ToStringMapStringE(i)
 	return v
 }
 
+// ToStringMapStringSlice casts an interface to a map[string][]string type.
 func ToStringMapStringSlice(i interface{}) map[string][]string {
 	v, _ := ToStringMapStringSliceE(i)
 	return v
 }
 
+// ToStringMapBool casts an interface to a map[string]bool type.
 func ToStringMapBool(i interface{}) map[string]bool {
 	v, _ := ToStringMapBoolE(i)
 	return v
 }
 
+// ToStringMap casts an interface to a map[string]interface{} type.
 func ToStringMap(i interface{}) map[string]interface{} {
 	v, _ := ToStringMapE(i)
 	return v
 }
 
+// ToSlice casts an interface to a []interface{} type.
 func ToSlice(i interface{}) []interface{} {
 	v, _ := ToSliceE(i)
 	return v
 }
 
+// ToBoolSlice casts an interface to a []bool type.
 func ToBoolSlice(i interface{}) []bool {
 	v, _ := ToBoolSliceE(i)
 	return v
 }
 
+// ToStringSlice casts an interface to a []string type.
 func ToStringSlice(i interface{}) []string {
 	v, _ := ToStringSliceE(i)
 	return v
 }
 
+// ToIntSlice casts an interface to a []int type.
 func ToIntSlice(i interface{}) []int {
 	v, _ := ToIntSliceE(i)
 	return v
 }
+
+// ToDurationSlice casts an interface to a []time.Duration type.
+func ToDurationSlice(i interface{}) []time.Duration {
+	v, _ := ToDurationSliceE(i)
+	return v
+}
diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go
index 23f0fe8..81511fe 100644
--- a/vendor/github.com/spf13/cast/caste.go
+++ b/vendor/github.com/spf13/cast/caste.go
@@ -6,6 +6,7 @@
 package cast
 
 import (
+	"errors"
 	"fmt"
 	"html/template"
 	"reflect"
@@ -14,32 +15,42 @@
 	"time"
 )
 
-// ToTimeE casts an empty interface to time.Time.
+var errNegativeNotAllowed = errors.New("unable to cast negative value")
+
+// ToTimeE casts an interface to a time.Time type.
 func ToTimeE(i interface{}) (tim time.Time, err error) {
 	i = indirect(i)
 
-	switch s := i.(type) {
+	switch v := i.(type) {
 	case time.Time:
-		return s, nil
+		return v, nil
 	case string:
-		d, e := StringToDate(s)
-		if e == nil {
-			return d, nil
-		}
-		return time.Time{}, fmt.Errorf("Could not parse Date/Time format: %v\n", e)
+		return StringToDate(v)
+	case int:
+		return time.Unix(int64(v), 0), nil
+	case int64:
+		return time.Unix(v, 0), nil
+	case int32:
+		return time.Unix(int64(v), 0), nil
+	case uint:
+		return time.Unix(int64(v), 0), nil
+	case uint64:
+		return time.Unix(int64(v), 0), nil
+	case uint32:
+		return time.Unix(int64(v), 0), nil
 	default:
-		return time.Time{}, fmt.Errorf("Unable to Cast %#v to Time\n", i)
+		return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i)
 	}
 }
 
-// ToDurationE casts an empty interface to time.Duration.
+// ToDurationE casts an interface to a time.Duration type.
 func ToDurationE(i interface{}) (d time.Duration, err error) {
 	i = indirect(i)
 
 	switch s := i.(type) {
 	case time.Duration:
 		return s, nil
-	case int64, int32, int16, int8, int:
+	case int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8:
 		d = time.Duration(ToInt64(s))
 		return
 	case float32, float64:
@@ -53,14 +64,13 @@
 		}
 		return
 	default:
-		err = fmt.Errorf("Unable to Cast %#v to Duration\n", i)
+		err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i)
 		return
 	}
 }
 
-// ToBoolE casts an empty interface to a bool.
+// ToBoolE casts an interface to a bool type.
 func ToBoolE(i interface{}) (bool, error) {
-
 	i = indirect(i)
 
 	switch b := i.(type) {
@@ -76,11 +86,11 @@
 	case string:
 		return strconv.ParseBool(i.(string))
 	default:
-		return false, fmt.Errorf("Unable to Cast %#v to bool", i)
+		return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i)
 	}
 }
 
-// ToFloat64E casts an empty interface to a float64.
+// ToFloat64E casts an interface to a float64 type.
 func ToFloat64E(i interface{}) (float64, error) {
 	i = indirect(i)
 
@@ -89,6 +99,8 @@
 		return s, nil
 	case float32:
 		return float64(s), nil
+	case int:
+		return float64(s), nil
 	case int64:
 		return float64(s), nil
 	case int32:
@@ -97,55 +109,266 @@
 		return float64(s), nil
 	case int8:
 		return float64(s), nil
-	case int:
+	case uint:
+		return float64(s), nil
+	case uint64:
+		return float64(s), nil
+	case uint32:
+		return float64(s), nil
+	case uint16:
+		return float64(s), nil
+	case uint8:
 		return float64(s), nil
 	case string:
 		v, err := strconv.ParseFloat(s, 64)
 		if err == nil {
-			return float64(v), nil
+			return v, nil
 		}
-		return 0.0, fmt.Errorf("Unable to Cast %#v to float", i)
+		return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
 	default:
-		return 0.0, fmt.Errorf("Unable to Cast %#v to float", i)
+		return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
 	}
 }
 
-// ToInt64E casts an empty interface to an int64.
+// ToFloat32E casts an interface to a float32 type.
+func ToFloat32E(i interface{}) (float32, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case float64:
+		return float32(s), nil
+	case float32:
+		return s, nil
+	case int:
+		return float32(s), nil
+	case int64:
+		return float32(s), nil
+	case int32:
+		return float32(s), nil
+	case int16:
+		return float32(s), nil
+	case int8:
+		return float32(s), nil
+	case uint:
+		return float32(s), nil
+	case uint64:
+		return float32(s), nil
+	case uint32:
+		return float32(s), nil
+	case uint16:
+		return float32(s), nil
+	case uint8:
+		return float32(s), nil
+	case string:
+		v, err := strconv.ParseFloat(s, 32)
+		if err == nil {
+			return float32(v), nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i)
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i)
+	}
+}
+
+// ToInt64E casts an interface to an int64 type.
 func ToInt64E(i interface{}) (int64, error) {
 	i = indirect(i)
 
 	switch s := i.(type) {
-	case int64:
-		return s, nil
 	case int:
 		return int64(s), nil
+	case int64:
+		return s, nil
 	case int32:
 		return int64(s), nil
 	case int16:
 		return int64(s), nil
 	case int8:
 		return int64(s), nil
+	case uint:
+		return int64(s), nil
+	case uint64:
+		return int64(s), nil
+	case uint32:
+		return int64(s), nil
+	case uint16:
+		return int64(s), nil
+	case uint8:
+		return int64(s), nil
+	case float64:
+		return int64(s), nil
+	case float32:
+		return int64(s), nil
 	case string:
 		v, err := strconv.ParseInt(s, 0, 0)
 		if err == nil {
 			return v, nil
 		}
-		return 0, fmt.Errorf("Unable to Cast %#v to int64", i)
-	case float64:
-		return int64(s), nil
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i)
 	case bool:
-		if bool(s) {
-			return int64(1), nil
+		if s {
+			return 1, nil
 		}
-		return int64(0), nil
+		return 0, nil
 	case nil:
-		return int64(0), nil
+		return 0, nil
 	default:
-		return int64(0), fmt.Errorf("Unable to Cast %#v to int64", i)
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i)
 	}
 }
 
-// ToIntE casts an empty interface to an int.
+// ToInt32E casts an interface to an int32 type.
+func ToInt32E(i interface{}) (int32, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case int:
+		return int32(s), nil
+	case int64:
+		return int32(s), nil
+	case int32:
+		return s, nil
+	case int16:
+		return int32(s), nil
+	case int8:
+		return int32(s), nil
+	case uint:
+		return int32(s), nil
+	case uint64:
+		return int32(s), nil
+	case uint32:
+		return int32(s), nil
+	case uint16:
+		return int32(s), nil
+	case uint8:
+		return int32(s), nil
+	case float64:
+		return int32(s), nil
+	case float32:
+		return int32(s), nil
+	case string:
+		v, err := strconv.ParseInt(s, 0, 0)
+		if err == nil {
+			return int32(v), nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i)
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	case nil:
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i)
+	}
+}
+
+// ToInt16E casts an interface to an int16 type.
+func ToInt16E(i interface{}) (int16, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case int:
+		return int16(s), nil
+	case int64:
+		return int16(s), nil
+	case int32:
+		return int16(s), nil
+	case int16:
+		return s, nil
+	case int8:
+		return int16(s), nil
+	case uint:
+		return int16(s), nil
+	case uint64:
+		return int16(s), nil
+	case uint32:
+		return int16(s), nil
+	case uint16:
+		return int16(s), nil
+	case uint8:
+		return int16(s), nil
+	case float64:
+		return int16(s), nil
+	case float32:
+		return int16(s), nil
+	case string:
+		v, err := strconv.ParseInt(s, 0, 0)
+		if err == nil {
+			return int16(v), nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i)
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	case nil:
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i)
+	}
+}
+
+// ToInt8E casts an interface to an int8 type.
+func ToInt8E(i interface{}) (int8, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case int:
+		return int8(s), nil
+	case int64:
+		return int8(s), nil
+	case int32:
+		return int8(s), nil
+	case int16:
+		return int8(s), nil
+	case int8:
+		return s, nil
+	case uint:
+		return int8(s), nil
+	case uint64:
+		return int8(s), nil
+	case uint32:
+		return int8(s), nil
+	case uint16:
+		return int8(s), nil
+	case uint8:
+		return int8(s), nil
+	case float64:
+		return int8(s), nil
+	case float32:
+		return int8(s), nil
+	case string:
+		v, err := strconv.ParseInt(s, 0, 0)
+		if err == nil {
+			return int8(v), nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i)
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	case nil:
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i)
+	}
+}
+
+// ToIntE casts an interface to an int type.
 func ToIntE(i interface{}) (int, error) {
 	i = indirect(i)
 
@@ -160,23 +383,375 @@
 		return int(s), nil
 	case int8:
 		return int(s), nil
+	case uint:
+		return int(s), nil
+	case uint64:
+		return int(s), nil
+	case uint32:
+		return int(s), nil
+	case uint16:
+		return int(s), nil
+	case uint8:
+		return int(s), nil
+	case float64:
+		return int(s), nil
+	case float32:
+		return int(s), nil
 	case string:
 		v, err := strconv.ParseInt(s, 0, 0)
 		if err == nil {
 			return int(v), nil
 		}
-		return 0, fmt.Errorf("Unable to Cast %#v to int", i)
-	case float64:
-		return int(s), nil
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i)
 	case bool:
-		if bool(s) {
+		if s {
 			return 1, nil
 		}
 		return 0, nil
 	case nil:
 		return 0, nil
 	default:
-		return 0, fmt.Errorf("Unable to Cast %#v to int", i)
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i)
+	}
+}
+
+// ToUintE casts an interface to a uint type.
+func ToUintE(i interface{}) (uint, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case string:
+		v, err := strconv.ParseUint(s, 0, 0)
+		if err == nil {
+			return uint(v), nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v to uint: %s", i, err)
+	case int:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint(s), nil
+	case int64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint(s), nil
+	case int32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint(s), nil
+	case int16:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint(s), nil
+	case int8:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint(s), nil
+	case uint:
+		return s, nil
+	case uint64:
+		return uint(s), nil
+	case uint32:
+		return uint(s), nil
+	case uint16:
+		return uint(s), nil
+	case uint8:
+		return uint(s), nil
+	case float64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint(s), nil
+	case float32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint(s), nil
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	case nil:
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i)
+	}
+}
+
+// ToUint64E casts an interface to a uint64 type.
+func ToUint64E(i interface{}) (uint64, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case string:
+		v, err := strconv.ParseUint(s, 0, 64)
+		if err == nil {
+			return v, nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v to uint64: %s", i, err)
+	case int:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint64(s), nil
+	case int64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint64(s), nil
+	case int32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint64(s), nil
+	case int16:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint64(s), nil
+	case int8:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint64(s), nil
+	case uint:
+		return uint64(s), nil
+	case uint64:
+		return s, nil
+	case uint32:
+		return uint64(s), nil
+	case uint16:
+		return uint64(s), nil
+	case uint8:
+		return uint64(s), nil
+	case float32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint64(s), nil
+	case float64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint64(s), nil
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	case nil:
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i)
+	}
+}
+
+// ToUint32E casts an interface to a uint32 type.
+func ToUint32E(i interface{}) (uint32, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case string:
+		v, err := strconv.ParseUint(s, 0, 32)
+		if err == nil {
+			return uint32(v), nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v to uint32: %s", i, err)
+	case int:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint32(s), nil
+	case int64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint32(s), nil
+	case int32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint32(s), nil
+	case int16:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint32(s), nil
+	case int8:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint32(s), nil
+	case uint:
+		return uint32(s), nil
+	case uint64:
+		return uint32(s), nil
+	case uint32:
+		return s, nil
+	case uint16:
+		return uint32(s), nil
+	case uint8:
+		return uint32(s), nil
+	case float64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint32(s), nil
+	case float32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint32(s), nil
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	case nil:
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i)
+	}
+}
+
+// ToUint16E casts an interface to a uint16 type.
+func ToUint16E(i interface{}) (uint16, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case string:
+		v, err := strconv.ParseUint(s, 0, 16)
+		if err == nil {
+			return uint16(v), nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v to uint16: %s", i, err)
+	case int:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint16(s), nil
+	case int64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint16(s), nil
+	case int32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint16(s), nil
+	case int16:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint16(s), nil
+	case int8:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint16(s), nil
+	case uint:
+		return uint16(s), nil
+	case uint64:
+		return uint16(s), nil
+	case uint32:
+		return uint16(s), nil
+	case uint16:
+		return s, nil
+	case uint8:
+		return uint16(s), nil
+	case float64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint16(s), nil
+	case float32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint16(s), nil
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	case nil:
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i)
+	}
+}
+
+// ToUint8E casts an interface to a uint type.
+func ToUint8E(i interface{}) (uint8, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case string:
+		v, err := strconv.ParseUint(s, 0, 8)
+		if err == nil {
+			return uint8(v), nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v to uint8: %s", i, err)
+	case int:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint8(s), nil
+	case int64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint8(s), nil
+	case int32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint8(s), nil
+	case int16:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint8(s), nil
+	case int8:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint8(s), nil
+	case uint:
+		return uint8(s), nil
+	case uint64:
+		return uint8(s), nil
+	case uint32:
+		return uint8(s), nil
+	case uint16:
+		return uint8(s), nil
+	case uint8:
+		return s, nil
+	case float64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint8(s), nil
+	case float32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint8(s), nil
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	case nil:
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i)
 	}
 }
 
@@ -219,7 +794,7 @@
 	return v.Interface()
 }
 
-// ToStringE casts an empty interface to a string.
+// ToStringE casts an interface to a string type.
 func ToStringE(i interface{}) (string, error) {
 	i = indirectToStringerOrError(i)
 
@@ -229,11 +804,29 @@
 	case bool:
 		return strconv.FormatBool(s), nil
 	case float64:
-		return strconv.FormatFloat(i.(float64), 'f', -1, 64), nil
-	case int64:
-		return strconv.FormatInt(i.(int64), 10), nil
+		return strconv.FormatFloat(s, 'f', -1, 64), nil
+	case float32:
+		return strconv.FormatFloat(float64(s), 'f', -1, 32), nil
 	case int:
-		return strconv.FormatInt(int64(i.(int)), 10), nil
+		return strconv.Itoa(s), nil
+	case int64:
+		return strconv.FormatInt(s, 10), nil
+	case int32:
+		return strconv.Itoa(int(s)), nil
+	case int16:
+		return strconv.FormatInt(int64(s), 10), nil
+	case int8:
+		return strconv.FormatInt(int64(s), 10), nil
+	case uint:
+		return strconv.FormatInt(int64(s), 10), nil
+	case uint64:
+		return strconv.FormatInt(int64(s), 10), nil
+	case uint32:
+		return strconv.FormatInt(int64(s), 10), nil
+	case uint16:
+		return strconv.FormatInt(int64(s), 10), nil
+	case uint8:
+		return strconv.FormatInt(int64(s), 10), nil
 	case []byte:
 		return string(s), nil
 	case template.HTML:
@@ -253,13 +846,12 @@
 	case error:
 		return s.Error(), nil
 	default:
-		return "", fmt.Errorf("Unable to Cast %#v to string", i)
+		return "", fmt.Errorf("unable to cast %#v of type %T to string", i, i)
 	}
 }
 
-// ToStringMapStringE casts an empty interface to a map[string]string.
+// ToStringMapStringE casts an interface to a map[string]string type.
 func ToStringMapStringE(i interface{}) (map[string]string, error) {
-
 	var m = map[string]string{}
 
 	switch v := i.(type) {
@@ -281,13 +873,12 @@
 		}
 		return m, nil
 	default:
-		return m, fmt.Errorf("Unable to Cast %#v to map[string]string", i)
+		return m, fmt.Errorf("unable to cast %#v of type %T to map[string]string", i, i)
 	}
 }
 
-// ToStringMapStringSliceE casts an empty interface to a map[string][]string.
+// ToStringMapStringSliceE casts an interface to a map[string][]string type.
 func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) {
-
 	var m = map[string][]string{}
 
 	switch v := i.(type) {
@@ -333,23 +924,22 @@
 		for k, val := range v {
 			key, err := ToStringE(k)
 			if err != nil {
-				return m, fmt.Errorf("Unable to Cast %#v to map[string][]string", i)
+				return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i)
 			}
 			value, err := ToStringSliceE(val)
 			if err != nil {
-				return m, fmt.Errorf("Unable to Cast %#v to map[string][]string", i)
+				return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i)
 			}
 			m[key] = value
 		}
 	default:
-		return m, fmt.Errorf("Unable to Cast %#v to map[string][]string", i)
+		return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i)
 	}
 	return m, nil
 }
 
-// ToStringMapBoolE casts an empty interface to a map[string]bool.
+// ToStringMapBoolE casts an interface to a map[string]bool type.
 func ToStringMapBoolE(i interface{}) (map[string]bool, error) {
-
 	var m = map[string]bool{}
 
 	switch v := i.(type) {
@@ -366,13 +956,12 @@
 	case map[string]bool:
 		return v, nil
 	default:
-		return m, fmt.Errorf("Unable to Cast %#v to map[string]bool", i)
+		return m, fmt.Errorf("unable to cast %#v of type %T to map[string]bool", i, i)
 	}
 }
 
-// ToStringMapE casts an empty interface to a map[string]interface{}.
+// ToStringMapE casts an interface to a map[string]interface{} type.
 func ToStringMapE(i interface{}) (map[string]interface{}, error) {
-
 	var m = map[string]interface{}{}
 
 	switch v := i.(type) {
@@ -384,36 +973,31 @@
 	case map[string]interface{}:
 		return v, nil
 	default:
-		return m, fmt.Errorf("Unable to Cast %#v to map[string]interface{}", i)
+		return m, fmt.Errorf("unable to cast %#v of type %T to map[string]interface{}", i, i)
 	}
 }
 
-// ToSliceE casts an empty interface to a []interface{}.
+// ToSliceE casts an interface to a []interface{} type.
 func ToSliceE(i interface{}) ([]interface{}, error) {
-
 	var s []interface{}
 
 	switch v := i.(type) {
 	case []interface{}:
-		for _, u := range v {
-			s = append(s, u)
-		}
-		return s, nil
+		return append(s, v...), nil
 	case []map[string]interface{}:
 		for _, u := range v {
 			s = append(s, u)
 		}
 		return s, nil
 	default:
-		return s, fmt.Errorf("Unable to Cast %#v of type %v to []interface{}", i, reflect.TypeOf(i))
+		return s, fmt.Errorf("unable to cast %#v of type %T to []interface{}", i, i)
 	}
 }
 
-// ToBoolSliceE casts an empty interface to a []bool.
+// ToBoolSliceE casts an interface to a []bool type.
 func ToBoolSliceE(i interface{}) ([]bool, error) {
-
 	if i == nil {
-		return []bool{}, fmt.Errorf("Unable to Cast %#v to []bool", i)
+		return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i)
 	}
 
 	switch v := i.(type) {
@@ -429,19 +1013,18 @@
 		for j := 0; j < s.Len(); j++ {
 			val, err := ToBoolE(s.Index(j).Interface())
 			if err != nil {
-				return []bool{}, fmt.Errorf("Unable to Cast %#v to []bool", i)
+				return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i)
 			}
 			a[j] = val
 		}
 		return a, nil
 	default:
-		return []bool{}, fmt.Errorf("Unable to Cast %#v to []bool", i)
+		return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i)
 	}
 }
 
-// ToStringSliceE casts an empty interface to a []string.
+// ToStringSliceE casts an interface to a []string type.
 func ToStringSliceE(i interface{}) ([]string, error) {
-
 	var a []string
 
 	switch v := i.(type) {
@@ -457,19 +1040,18 @@
 	case interface{}:
 		str, err := ToStringE(v)
 		if err != nil {
-			return a, fmt.Errorf("Unable to Cast %#v to []string", i)
+			return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i)
 		}
 		return []string{str}, nil
 	default:
-		return a, fmt.Errorf("Unable to Cast %#v to []string", i)
+		return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i)
 	}
 }
 
-// ToIntSliceE casts an empty interface to a []int.
+// ToIntSliceE casts an interface to a []int type.
 func ToIntSliceE(i interface{}) ([]int, error) {
-
 	if i == nil {
-		return []int{}, fmt.Errorf("Unable to Cast %#v to []int", i)
+		return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i)
 	}
 
 	switch v := i.(type) {
@@ -485,17 +1067,48 @@
 		for j := 0; j < s.Len(); j++ {
 			val, err := ToIntE(s.Index(j).Interface())
 			if err != nil {
-				return []int{}, fmt.Errorf("Unable to Cast %#v to []int", i)
+				return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i)
 			}
 			a[j] = val
 		}
 		return a, nil
 	default:
-		return []int{}, fmt.Errorf("Unable to Cast %#v to []int", i)
+		return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i)
 	}
 }
 
-// StringToDate casts an empty interface to a time.Time.
+// ToDurationSliceE casts an interface to a []time.Duration type.
+func ToDurationSliceE(i interface{}) ([]time.Duration, error) {
+	if i == nil {
+		return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i)
+	}
+
+	switch v := i.(type) {
+	case []time.Duration:
+		return v, nil
+	}
+
+	kind := reflect.TypeOf(i).Kind()
+	switch kind {
+	case reflect.Slice, reflect.Array:
+		s := reflect.ValueOf(i)
+		a := make([]time.Duration, s.Len())
+		for j := 0; j < s.Len(); j++ {
+			val, err := ToDurationE(s.Index(j).Interface())
+			if err != nil {
+				return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i)
+			}
+			a[j] = val
+		}
+		return a, nil
+	default:
+		return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i)
+	}
+}
+
+// StringToDate attempts to parse a string into a time.Time type using a
+// predefined list of formats.  If no suitable format is found, an error is
+// returned.
 func StringToDate(s string) (time.Time, error) {
 	return parseDateWith(s, []string{
 		time.RFC3339,
@@ -504,16 +1117,22 @@
 		time.RFC1123,
 		time.RFC822Z,
 		time.RFC822,
+		time.RFC850,
 		time.ANSIC,
 		time.UnixDate,
 		time.RubyDate,
-		"2006-01-02 15:04:05Z07:00",
-		"02 Jan 06 15:04 MST",
+		"2006-01-02 15:04:05.999999999 -0700 MST", // Time.String()
 		"2006-01-02",
 		"02 Jan 2006",
 		"2006-01-02 15:04:05 -07:00",
 		"2006-01-02 15:04:05 -0700",
+		"2006-01-02 15:04:05Z07:00", // RFC3339 without T
 		"2006-01-02 15:04:05",
+		time.Kitchen,
+		time.Stamp,
+		time.StampMilli,
+		time.StampMicro,
+		time.StampNano,
 	})
 }
 
@@ -523,5 +1142,5 @@
 			return
 		}
 	}
-	return d, fmt.Errorf("Unable to parse date: %s", s)
+	return d, fmt.Errorf("unable to parse date: %s", s)
 }
diff --git a/vendor/github.com/spf13/jwalterweatherman/README.md b/vendor/github.com/spf13/jwalterweatherman/README.md
index c6f327c..350a968 100644
--- a/vendor/github.com/spf13/jwalterweatherman/README.md
+++ b/vendor/github.com/spf13/jwalterweatherman/README.md
@@ -44,7 +44,7 @@
  * FATAL
 
 These each are loggers based on the log standard library and follow the
-standard usage. Eg..
+standard usage. Eg.
 
 ```go
     import (
@@ -79,6 +79,13 @@
 
 ```
 
+NOTE: You can also use the library in a non-global setting by creating an instance of a Notebook:
+
+```go
+notepad = jww.NewNotepad(jww.LevelInfo, jww.LevelTrace, os.Stdout, ioutil.Discard, "", log.Ldate|log.Ltime)
+notepad.WARN.Println("Some warning"")
+```
+
 _Why 7 levels?_
 
 Maybe you think that 7 levels are too much for any application... and you
@@ -118,35 +125,15 @@
 Note that JWW's own internal output uses log levels as well, so set the log
 level before making any other calls if you want to see what it's up to.
 
-### Using a temp log file
-
-JWW conveniently creates a temporary file and sets the log Handle to
-a io.Writer created for it. You should call this early in your application
-initialization routine as it will only log calls made after it is executed. 
-When this option is used, the library will fmt.Println where to find the
-log file.
-
-```go
-    import (
-        jww "github.com/spf13/jwalterweatherman"
-    )
-
-    jww.UseTempLogFile("YourAppName") 
-
-```
 
 ### Setting a log file
 
-JWW can log to any file you provide a path to (provided it’s writable).
-Will only append to this file.
+JWW can log to any `io.Writer`:
 
 
 ```go
-    import (
-        jww "github.com/spf13/jwalterweatherman"
-    )
 
-    jww.SetLogFile("/path/to/logfile") 
+    jww.SetLogOutput(customWriter) 
 
 ```
 
diff --git a/vendor/github.com/spf13/jwalterweatherman/default_notepad.go b/vendor/github.com/spf13/jwalterweatherman/default_notepad.go
new file mode 100644
index 0000000..bcb7634
--- /dev/null
+++ b/vendor/github.com/spf13/jwalterweatherman/default_notepad.go
@@ -0,0 +1,113 @@
+// Copyright © 2016 Steve Francia <spf@spf13.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package jwalterweatherman
+
+import (
+	"io"
+	"io/ioutil"
+	"log"
+	"os"
+)
+
+var (
+	TRACE    *log.Logger
+	DEBUG    *log.Logger
+	INFO     *log.Logger
+	WARN     *log.Logger
+	ERROR    *log.Logger
+	CRITICAL *log.Logger
+	FATAL    *log.Logger
+
+	LOG      *log.Logger
+	FEEDBACK *Feedback
+
+	defaultNotepad *Notepad
+)
+
+func reloadDefaultNotepad() {
+	TRACE = defaultNotepad.TRACE
+	DEBUG = defaultNotepad.DEBUG
+	INFO = defaultNotepad.INFO
+	WARN = defaultNotepad.WARN
+	ERROR = defaultNotepad.ERROR
+	CRITICAL = defaultNotepad.CRITICAL
+	FATAL = defaultNotepad.FATAL
+
+	LOG = defaultNotepad.LOG
+	FEEDBACK = defaultNotepad.FEEDBACK
+}
+
+func init() {
+	defaultNotepad = NewNotepad(LevelError, LevelWarn, os.Stdout, ioutil.Discard, "", log.Ldate|log.Ltime)
+	reloadDefaultNotepad()
+}
+
+// SetLogThreshold set the log threshold for the default notepad. Trace by default.
+func SetLogThreshold(threshold Threshold) {
+	defaultNotepad.SetLogThreshold(threshold)
+	reloadDefaultNotepad()
+}
+
+// SetLogOutput set the log output for the default notepad. Discarded by default.
+func SetLogOutput(handle io.Writer) {
+	defaultNotepad.SetLogOutput(handle)
+	reloadDefaultNotepad()
+}
+
+// SetStdoutThreshold set the standard output threshold for the default notepad.
+// Info by default.
+func SetStdoutThreshold(threshold Threshold) {
+	defaultNotepad.SetStdoutThreshold(threshold)
+	reloadDefaultNotepad()
+}
+
+// SetPrefix set the prefix for the default logger. Empty by default.
+func SetPrefix(prefix string) {
+	defaultNotepad.SetPrefix(prefix)
+	reloadDefaultNotepad()
+}
+
+// SetFlags set the flags for the default logger. "log.Ldate | log.Ltime" by default.
+func SetFlags(flags int) {
+	defaultNotepad.SetFlags(flags)
+	reloadDefaultNotepad()
+}
+
+// Level returns the current global log threshold.
+func LogThreshold() Threshold {
+	return defaultNotepad.logThreshold
+}
+
+// Level returns the current global output threshold.
+func StdoutThreshold() Threshold {
+	return defaultNotepad.stdoutThreshold
+}
+
+// GetStdoutThreshold returns the defined Treshold for the log logger.
+func GetLogThreshold() Threshold {
+	return defaultNotepad.GetLogThreshold()
+}
+
+// GetStdoutThreshold returns the Treshold for the stdout logger.
+func GetStdoutThreshold() Threshold {
+	return defaultNotepad.GetStdoutThreshold()
+}
+
+// LogCountForLevel returns the number of log invocations for a given threshold.
+func LogCountForLevel(l Threshold) uint64 {
+	return defaultNotepad.LogCountForLevel(l)
+}
+
+// LogCountForLevelsGreaterThanorEqualTo returns the number of log invocations
+// greater than or equal to a given threshold.
+func LogCountForLevelsGreaterThanorEqualTo(threshold Threshold) uint64 {
+	return defaultNotepad.LogCountForLevelsGreaterThanorEqualTo(threshold)
+}
+
+// ResetLogCounters resets the invocation counters for all levels.
+func ResetLogCounters() {
+	defaultNotepad.ResetLogCounters()
+}
diff --git a/vendor/github.com/spf13/jwalterweatherman/log_counter.go b/vendor/github.com/spf13/jwalterweatherman/log_counter.go
new file mode 100644
index 0000000..11423ac
--- /dev/null
+++ b/vendor/github.com/spf13/jwalterweatherman/log_counter.go
@@ -0,0 +1,55 @@
+// Copyright © 2016 Steve Francia <spf@spf13.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package jwalterweatherman
+
+import (
+	"sync/atomic"
+)
+
+type logCounter struct {
+	counter uint64
+}
+
+func (c *logCounter) incr() {
+	atomic.AddUint64(&c.counter, 1)
+}
+
+func (c *logCounter) resetCounter() {
+	atomic.StoreUint64(&c.counter, 0)
+}
+
+func (c *logCounter) getCount() uint64 {
+	return atomic.LoadUint64(&c.counter)
+}
+
+func (c *logCounter) Write(p []byte) (n int, err error) {
+	c.incr()
+	return len(p), nil
+}
+
+// LogCountForLevel returns the number of log invocations for a given threshold.
+func (n *Notepad) LogCountForLevel(l Threshold) uint64 {
+	return n.logCounters[l].getCount()
+}
+
+// LogCountForLevelsGreaterThanorEqualTo returns the number of log invocations
+// greater than or equal to a given threshold.
+func (n *Notepad) LogCountForLevelsGreaterThanorEqualTo(threshold Threshold) uint64 {
+	var cnt uint64
+
+	for i := int(threshold); i < len(n.logCounters); i++ {
+		cnt += n.LogCountForLevel(Threshold(i))
+	}
+
+	return cnt
+}
+
+// ResetLogCounters resets the invocation counters for all levels.
+func (n *Notepad) ResetLogCounters() {
+	for _, np := range n.logCounters {
+		np.resetCounter()
+	}
+}
diff --git a/vendor/github.com/spf13/jwalterweatherman/notepad.go b/vendor/github.com/spf13/jwalterweatherman/notepad.go
new file mode 100644
index 0000000..edeff3a
--- /dev/null
+++ b/vendor/github.com/spf13/jwalterweatherman/notepad.go
@@ -0,0 +1,194 @@
+// Copyright © 2016 Steve Francia <spf@spf13.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package jwalterweatherman
+
+import (
+	"fmt"
+	"io"
+	"log"
+)
+
+type Threshold int
+
+func (t Threshold) String() string {
+	return prefixes[t]
+}
+
+const (
+	LevelTrace Threshold = iota
+	LevelDebug
+	LevelInfo
+	LevelWarn
+	LevelError
+	LevelCritical
+	LevelFatal
+)
+
+var prefixes map[Threshold]string = map[Threshold]string{
+	LevelTrace:    "TRACE",
+	LevelDebug:    "DEBUG",
+	LevelInfo:     "INFO",
+	LevelWarn:     "WARN",
+	LevelError:    "ERROR",
+	LevelCritical: "CRITICAL",
+	LevelFatal:    "FATAL",
+}
+
+// Notepad is where you leave a note!
+type Notepad struct {
+	TRACE    *log.Logger
+	DEBUG    *log.Logger
+	INFO     *log.Logger
+	WARN     *log.Logger
+	ERROR    *log.Logger
+	CRITICAL *log.Logger
+	FATAL    *log.Logger
+
+	LOG      *log.Logger
+	FEEDBACK *Feedback
+
+	loggers         [7]**log.Logger
+	logHandle       io.Writer
+	outHandle       io.Writer
+	logThreshold    Threshold
+	stdoutThreshold Threshold
+	prefix          string
+	flags           int
+
+	// One per Threshold
+	logCounters [7]*logCounter
+}
+
+// NewNotepad create a new notepad.
+func NewNotepad(outThreshold Threshold, logThreshold Threshold, outHandle, logHandle io.Writer, prefix string, flags int) *Notepad {
+	n := &Notepad{}
+
+	n.loggers = [7]**log.Logger{&n.TRACE, &n.DEBUG, &n.INFO, &n.WARN, &n.ERROR, &n.CRITICAL, &n.FATAL}
+	n.outHandle = outHandle
+	n.logHandle = logHandle
+	n.stdoutThreshold = outThreshold
+	n.logThreshold = logThreshold
+
+	if len(prefix) != 0 {
+		n.prefix = "[" + prefix + "] "
+	} else {
+		n.prefix = ""
+	}
+
+	n.flags = flags
+
+	n.LOG = log.New(n.logHandle,
+		"LOG:   ",
+		n.flags)
+	n.FEEDBACK = &Feedback{out: log.New(outHandle, "", 0), log: n.LOG}
+
+	n.init()
+	return n
+}
+
+// init creates the loggers for each level depending on the notepad thresholds.
+func (n *Notepad) init() {
+	logAndOut := io.MultiWriter(n.outHandle, n.logHandle)
+
+	for t, logger := range n.loggers {
+		threshold := Threshold(t)
+		counter := &logCounter{}
+		n.logCounters[t] = counter
+		prefix := n.prefix + threshold.String() + " "
+
+		switch {
+		case threshold >= n.logThreshold && threshold >= n.stdoutThreshold:
+			*logger = log.New(io.MultiWriter(counter, logAndOut), prefix, n.flags)
+
+		case threshold >= n.logThreshold:
+			*logger = log.New(io.MultiWriter(counter, n.logHandle), prefix, n.flags)
+
+		case threshold >= n.stdoutThreshold:
+			*logger = log.New(io.MultiWriter(counter, n.outHandle), prefix, n.flags)
+
+		default:
+			// counter doesn't care about prefix and flags, so don't use them
+			// for performance.
+			*logger = log.New(counter, "", 0)
+		}
+	}
+}
+
+// SetLogThreshold changes the threshold above which messages are written to the
+// log file.
+func (n *Notepad) SetLogThreshold(threshold Threshold) {
+	n.logThreshold = threshold
+	n.init()
+}
+
+// SetLogOutput changes the file where log messages are written.
+func (n *Notepad) SetLogOutput(handle io.Writer) {
+	n.logHandle = handle
+	n.init()
+}
+
+// GetStdoutThreshold returns the defined Treshold for the log logger.
+func (n *Notepad) GetLogThreshold() Threshold {
+	return n.logThreshold
+}
+
+// SetStdoutThreshold changes the threshold above which messages are written to the
+// standard output.
+func (n *Notepad) SetStdoutThreshold(threshold Threshold) {
+	n.stdoutThreshold = threshold
+	n.init()
+}
+
+// GetStdoutThreshold returns the Treshold for the stdout logger.
+func (n *Notepad) GetStdoutThreshold() Threshold {
+	return n.stdoutThreshold
+}
+
+// SetPrefix changes the prefix used by the notepad. Prefixes are displayed between
+// brackets at the begining of the line. An empty prefix won't be displayed at all.
+func (n *Notepad) SetPrefix(prefix string) {
+	if len(prefix) != 0 {
+		n.prefix = "[" + prefix + "] "
+	} else {
+		n.prefix = ""
+	}
+	n.init()
+}
+
+// SetFlags choose which flags the logger will display (after prefix and message
+// level). See the package log for more informations on this.
+func (n *Notepad) SetFlags(flags int) {
+	n.flags = flags
+	n.init()
+}
+
+// Feedback writes plainly to the outHandle while
+// logging with the standard extra information (date, file, etc).
+type Feedback struct {
+	out *log.Logger
+	log *log.Logger
+}
+
+func (fb *Feedback) Println(v ...interface{}) {
+	fb.output(fmt.Sprintln(v...))
+}
+
+func (fb *Feedback) Printf(format string, v ...interface{}) {
+	fb.output(fmt.Sprintf(format, v...))
+}
+
+func (fb *Feedback) Print(v ...interface{}) {
+	fb.output(fmt.Sprint(v...))
+}
+
+func (fb *Feedback) output(s string) {
+	if fb.out != nil {
+		fb.out.Output(2, s)
+	}
+	if fb.log != nil {
+		fb.log.Output(2, s)
+	}
+}
diff --git a/vendor/github.com/spf13/jwalterweatherman/thatswhyyoualwaysleaveanote.go b/vendor/github.com/spf13/jwalterweatherman/thatswhyyoualwaysleaveanote.go
deleted file mode 100644
index b64ed46..0000000
--- a/vendor/github.com/spf13/jwalterweatherman/thatswhyyoualwaysleaveanote.go
+++ /dev/null
@@ -1,256 +0,0 @@
-// Copyright © 2016 Steve Francia <spf@spf13.com>.
-//
-// Use of this source code is governed by an MIT-style
-// license that can be found in the LICENSE file.
-
-package jwalterweatherman
-
-import (
-	"fmt"
-	"io"
-	"io/ioutil"
-	"log"
-	"os"
-	"sync/atomic"
-)
-
-// Level describes the chosen log level between
-// debug and critical.
-type Level int
-
-type NotePad struct {
-	Handle  io.Writer
-	Level   Level
-	Prefix  string
-	Logger  **log.Logger
-	counter uint64
-}
-
-func (n *NotePad) incr() {
-	atomic.AddUint64(&n.counter, 1)
-}
-
-func (n *NotePad) resetCounter() {
-	atomic.StoreUint64(&n.counter, 0)
-}
-
-func (n *NotePad) getCount() uint64 {
-	return atomic.LoadUint64(&n.counter)
-}
-
-type countingWriter struct {
-	incrFunc func()
-}
-
-func (cw *countingWriter) Write(p []byte) (n int, err error) {
-	cw.incrFunc()
-
-	return 0, nil
-}
-
-// Feedback is special. It writes plainly to the output while
-// logging with the standard extra information (date, file, etc)
-// Only Println and Printf are currently provided for this
-type Feedback struct{}
-
-const (
-	LevelTrace Level = iota
-	LevelDebug
-	LevelInfo
-	LevelWarn
-	LevelError
-	LevelCritical
-	LevelFatal
-	DefaultLogThreshold    = LevelWarn
-	DefaultStdoutThreshold = LevelError
-)
-
-var (
-	TRACE      *log.Logger
-	DEBUG      *log.Logger
-	INFO       *log.Logger
-	WARN       *log.Logger
-	ERROR      *log.Logger
-	CRITICAL   *log.Logger
-	FATAL      *log.Logger
-	LOG        *log.Logger
-	FEEDBACK   Feedback
-	LogHandle  io.Writer  = ioutil.Discard
-	OutHandle  io.Writer  = os.Stdout
-	BothHandle io.Writer  = io.MultiWriter(LogHandle, OutHandle)
-	NotePads   []*NotePad = []*NotePad{trace, debug, info, warn, err, critical, fatal}
-
-	trace           *NotePad = &NotePad{Level: LevelTrace, Handle: os.Stdout, Logger: &TRACE, Prefix: "TRACE: "}
-	debug           *NotePad = &NotePad{Level: LevelDebug, Handle: os.Stdout, Logger: &DEBUG, Prefix: "DEBUG: "}
-	info            *NotePad = &NotePad{Level: LevelInfo, Handle: os.Stdout, Logger: &INFO, Prefix: "INFO: "}
-	warn            *NotePad = &NotePad{Level: LevelWarn, Handle: os.Stdout, Logger: &WARN, Prefix: "WARN: "}
-	err             *NotePad = &NotePad{Level: LevelError, Handle: os.Stdout, Logger: &ERROR, Prefix: "ERROR: "}
-	critical        *NotePad = &NotePad{Level: LevelCritical, Handle: os.Stdout, Logger: &CRITICAL, Prefix: "CRITICAL: "}
-	fatal           *NotePad = &NotePad{Level: LevelFatal, Handle: os.Stdout, Logger: &FATAL, Prefix: "FATAL: "}
-	logThreshold    Level    = DefaultLogThreshold
-	outputThreshold Level    = DefaultStdoutThreshold
-)
-
-const (
-	DATE  = log.Ldate
-	TIME  = log.Ltime
-	SFILE = log.Lshortfile
-	LFILE = log.Llongfile
-	MSEC  = log.Lmicroseconds
-)
-
-var logFlags = DATE | TIME | SFILE
-
-func init() {
-	SetStdoutThreshold(DefaultStdoutThreshold)
-}
-
-// initialize will setup the jWalterWeatherman standard approach of providing the user
-// some feedback and logging a potentially different amount based on independent log and output thresholds.
-// By default the output has a lower threshold than logged
-// Don't use if you have manually set the Handles of the different levels as it will overwrite them.
-func initialize() {
-	BothHandle = io.MultiWriter(LogHandle, OutHandle)
-
-	for _, n := range NotePads {
-		if n.Level < outputThreshold && n.Level < logThreshold {
-			n.Handle = ioutil.Discard
-		} else if n.Level >= outputThreshold && n.Level >= logThreshold {
-			n.Handle = BothHandle
-		} else if n.Level >= outputThreshold && n.Level < logThreshold {
-			n.Handle = OutHandle
-		} else {
-			n.Handle = LogHandle
-		}
-	}
-
-	for _, n := range NotePads {
-		n.Handle = io.MultiWriter(n.Handle, &countingWriter{n.incr})
-		*n.Logger = log.New(n.Handle, n.Prefix, logFlags)
-	}
-
-	LOG = log.New(LogHandle,
-		"LOG:   ",
-		logFlags)
-}
-
-// Set the log Flags (Available flag: DATE, TIME, SFILE, LFILE and MSEC)
-func SetLogFlag(flags int) {
-	logFlags = flags
-	initialize()
-}
-
-// Level returns the current global log threshold.
-func LogThreshold() Level {
-	return logThreshold
-}
-
-// Level returns the current global output threshold.
-func StdoutThreshold() Level {
-	return outputThreshold
-}
-
-// Ensures that the level provided is within the bounds of available levels
-func levelCheck(level Level) Level {
-	switch {
-	case level <= LevelTrace:
-		return LevelTrace
-	case level >= LevelFatal:
-		return LevelFatal
-	default:
-		return level
-	}
-}
-
-// Establishes a threshold where anything matching or above will be logged
-func SetLogThreshold(level Level) {
-	logThreshold = levelCheck(level)
-	initialize()
-}
-
-// Establishes a threshold where anything matching or above will be output
-func SetStdoutThreshold(level Level) {
-	outputThreshold = levelCheck(level)
-	initialize()
-}
-
-// Conveniently Sets the Log Handle to a io.writer created for the file behind the given filepath
-// Will only append to this file
-func SetLogFile(path string) {
-	file, err := os.OpenFile(path, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0666)
-	if err != nil {
-		CRITICAL.Println("Failed to open log file:", path, err)
-		os.Exit(-1)
-	}
-
-	INFO.Println("Logging to", file.Name())
-
-	LogHandle = file
-	initialize()
-}
-
-// Conveniently Creates a temporary file and sets the Log Handle to a io.writer created for it
-func UseTempLogFile(prefix string) {
-	file, err := ioutil.TempFile(os.TempDir(), prefix)
-	if err != nil {
-		CRITICAL.Println(err)
-	}
-
-	INFO.Println("Logging to", file.Name())
-
-	LogHandle = file
-	initialize()
-}
-
-// LogCountForLevel returns the number of log invocations for a given level.
-func LogCountForLevel(l Level) uint64 {
-	for _, np := range NotePads {
-		if np.Level == l {
-			return np.getCount()
-		}
-	}
-	return 0
-}
-
-// LogCountForLevelsGreaterThanorEqualTo returns the number of log invocations
-// greater than or equal to a given level threshold.
-func LogCountForLevelsGreaterThanorEqualTo(threshold Level) uint64 {
-	var cnt uint64
-	for _, np := range NotePads {
-		if np.Level >= threshold {
-			cnt += np.getCount()
-		}
-	}
-	return cnt
-}
-
-// ResetLogCounters resets the invocation counters for all levels.
-func ResetLogCounters() {
-	for _, np := range NotePads {
-		np.resetCounter()
-	}
-}
-
-// Disables logging for the entire JWW system
-func DiscardLogging() {
-	LogHandle = ioutil.Discard
-	initialize()
-}
-
-// Feedback is special. It writes plainly to the output while
-// logging with the standard extra information (date, file, etc)
-// Only Println and Printf are currently provided for this
-func (fb *Feedback) Println(v ...interface{}) {
-	s := fmt.Sprintln(v...)
-	fmt.Print(s)
-	LOG.Output(2, s)
-}
-
-// Feedback is special. It writes plainly to the output while
-// logging with the standard extra information (date, file, etc)
-// Only Println and Printf are currently provided for this
-func (fb *Feedback) Printf(format string, v ...interface{}) {
-	s := fmt.Sprintf(format, v...)
-	fmt.Print(s)
-	LOG.Output(2, s)
-}
diff --git a/vendor/github.com/spf13/pflag/.gitignore b/vendor/github.com/spf13/pflag/.gitignore
new file mode 100644
index 0000000..c3da290
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/.gitignore
@@ -0,0 +1,2 @@
+.idea/*
+
diff --git a/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml
index 707bdc3..f8a63b3 100644
--- a/vendor/github.com/spf13/pflag/.travis.yml
+++ b/vendor/github.com/spf13/pflag/.travis.yml
@@ -3,18 +3,19 @@
 language: go
 
 go:
-        - 1.6.3
-        - 1.7.3
-        - tip
+  - 1.7.3
+  - 1.8.1
+  - tip
 
 matrix:
-        allow_failures:
-                  - go: tip
+  allow_failures:
+    - go: tip
+
 install:
-        - go get github.com/golang/lint/golint
-        - export PATH=$GOPATH/bin:$PATH
-        - go install ./...
+  - go get github.com/golang/lint/golint
+  - export PATH=$GOPATH/bin:$PATH
+  - go install ./...
 
 script:
-        - verify/all.sh -v
-        - go test ./...
+  - verify/all.sh -v
+  - go test ./...
diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md
index 08ad945..b052414 100644
--- a/vendor/github.com/spf13/pflag/README.md
+++ b/vendor/github.com/spf13/pflag/README.md
@@ -1,4 +1,6 @@
 [![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag)
+[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/pflag)](https://goreportcard.com/report/github.com/spf13/pflag)
+[![GoDoc](https://godoc.org/github.com/spf13/pflag?status.svg)](https://godoc.org/github.com/spf13/pflag)
 
 ## Description
 
@@ -106,9 +108,9 @@
 var ip = flag.IntP("flagname", "f", 1234, "help message")
 var flagvar bool
 func init() {
-    flag.BoolVarP("boolname", "b", true, "help message")
+	flag.BoolVarP(&flagvar, "boolname", "b", true, "help message")
 }
-flag.VarP(&flagVar, "varname", "v", 1234, "help message")
+flag.VarP(&flagVal, "varname", "v", "help message")
 ```
 
 Shorthand letters can be used with single dashes on the command line.
@@ -244,6 +246,25 @@
 flags.MarkHidden("secretFlag")
 ```
 
+## Disable sorting of flags
+`pflag` allows you to disable sorting of flags for help and usage message.
+
+**Example**:
+```go
+flags.BoolP("verbose", "v", false, "verbose output")
+flags.String("coolflag", "yeaah", "it's really cool flag")
+flags.Int("usefulflag", 777, "sometimes it's very useful")
+flags.SortFlags = false
+flags.PrintDefaults()
+```
+**Output**:
+```
+  -v, --verbose           verbose output
+      --coolflag string   it's really cool flag (default "yeaah")
+      --usefulflag int    sometimes it's very useful (default 777)
+```
+
+
 ## Supporting Go flags when using pflag
 In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary
 to support flags defined by third-party dependencies (e.g. `golang/glog`).
@@ -268,8 +289,8 @@
 You can see the full reference documentation of the pflag package
 [at godoc.org][3], or through go's standard documentation system by
 running `godoc -http=:6060` and browsing to
-[http://localhost:6060/pkg/github.com/ogier/pflag][2] after
+[http://localhost:6060/pkg/github.com/spf13/pflag][2] after
 installation.
 
-[2]: http://localhost:6060/pkg/github.com/ogier/pflag
-[3]: http://godoc.org/github.com/ogier/pflag
+[2]: http://localhost:6060/pkg/github.com/spf13/pflag
+[3]: http://godoc.org/github.com/spf13/pflag
diff --git a/vendor/github.com/spf13/pflag/bool_slice.go b/vendor/github.com/spf13/pflag/bool_slice.go
new file mode 100644
index 0000000..5af02f1
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bool_slice.go
@@ -0,0 +1,147 @@
+package pflag
+
+import (
+	"io"
+	"strconv"
+	"strings"
+)
+
+// -- boolSlice Value
+type boolSliceValue struct {
+	value   *[]bool
+	changed bool
+}
+
+func newBoolSliceValue(val []bool, p *[]bool) *boolSliceValue {
+	bsv := new(boolSliceValue)
+	bsv.value = p
+	*bsv.value = val
+	return bsv
+}
+
+// Set converts, and assigns, the comma-separated boolean argument string representation as the []bool value of this flag.
+// If Set is called on a flag that already has a []bool assigned, the newly converted values will be appended.
+func (s *boolSliceValue) Set(val string) error {
+
+	// remove all quote characters
+	rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
+
+	// read flag arguments with CSV parser
+	boolStrSlice, err := readAsCSV(rmQuote.Replace(val))
+	if err != nil && err != io.EOF {
+		return err
+	}
+
+	// parse boolean values into slice
+	out := make([]bool, 0, len(boolStrSlice))
+	for _, boolStr := range boolStrSlice {
+		b, err := strconv.ParseBool(strings.TrimSpace(boolStr))
+		if err != nil {
+			return err
+		}
+		out = append(out, b)
+	}
+
+	if !s.changed {
+		*s.value = out
+	} else {
+		*s.value = append(*s.value, out...)
+	}
+
+	s.changed = true
+
+	return nil
+}
+
+// Type returns a string that uniquely represents this flag's type.
+func (s *boolSliceValue) Type() string {
+	return "boolSlice"
+}
+
+// String defines a "native" format for this boolean slice flag value.
+func (s *boolSliceValue) String() string {
+
+	boolStrSlice := make([]string, len(*s.value))
+	for i, b := range *s.value {
+		boolStrSlice[i] = strconv.FormatBool(b)
+	}
+
+	out, _ := writeAsCSV(boolStrSlice)
+
+	return "[" + out + "]"
+}
+
+func boolSliceConv(val string) (interface{}, error) {
+	val = strings.Trim(val, "[]")
+	// Empty string would cause a slice with one (empty) entry
+	if len(val) == 0 {
+		return []bool{}, nil
+	}
+	ss := strings.Split(val, ",")
+	out := make([]bool, len(ss))
+	for i, t := range ss {
+		var err error
+		out[i], err = strconv.ParseBool(t)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return out, nil
+}
+
+// GetBoolSlice returns the []bool value of a flag with the given name.
+func (f *FlagSet) GetBoolSlice(name string) ([]bool, error) {
+	val, err := f.getFlagType(name, "boolSlice", boolSliceConv)
+	if err != nil {
+		return []bool{}, err
+	}
+	return val.([]bool), nil
+}
+
+// BoolSliceVar defines a boolSlice flag with specified name, default value, and usage string.
+// The argument p points to a []bool variable in which to store the value of the flag.
+func (f *FlagSet) BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
+	f.VarP(newBoolSliceValue(value, p), name, "", usage)
+}
+
+// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
+	f.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
+}
+
+// BoolSliceVar defines a []bool flag with specified name, default value, and usage string.
+// The argument p points to a []bool variable in which to store the value of the flag.
+func BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
+	CommandLine.VarP(newBoolSliceValue(value, p), name, "", usage)
+}
+
+// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
+	CommandLine.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
+}
+
+// BoolSlice defines a []bool flag with specified name, default value, and usage string.
+// The return value is the address of a []bool variable that stores the value of the flag.
+func (f *FlagSet) BoolSlice(name string, value []bool, usage string) *[]bool {
+	p := []bool{}
+	f.BoolSliceVarP(&p, name, "", value, usage)
+	return &p
+}
+
+// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
+	p := []bool{}
+	f.BoolSliceVarP(&p, name, shorthand, value, usage)
+	return &p
+}
+
+// BoolSlice defines a []bool flag with specified name, default value, and usage string.
+// The return value is the address of a []bool variable that stores the value of the flag.
+func BoolSlice(name string, value []bool, usage string) *[]bool {
+	return CommandLine.BoolSliceP(name, "", value, usage)
+}
+
+// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
+func BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
+	return CommandLine.BoolSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go
index d22be41..250a438 100644
--- a/vendor/github.com/spf13/pflag/count.go
+++ b/vendor/github.com/spf13/pflag/count.go
@@ -83,7 +83,9 @@
 	return p
 }
 
-// Count like Count only the flag is placed on the CommandLine isntead of a given flag set
+// Count defines a count flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+// A count flag will add 1 to its value evey time it is found on the command line
 func Count(name string, usage string) *int {
 	return CommandLine.CountP(name, "", usage)
 }
diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go
index fa81564..6f1fc30 100644
--- a/vendor/github.com/spf13/pflag/flag.go
+++ b/vendor/github.com/spf13/pflag/flag.go
@@ -16,9 +16,9 @@
 pflag under the name "flag" then all code should continue to function
 with no changes.
 
-	import flag "github.com/ogier/pflag"
+	import flag "github.com/spf13/pflag"
 
-	There is one exception to this: if you directly instantiate the Flag struct
+There is one exception to this: if you directly instantiate the Flag struct
 there is one more field "Shorthand" that you will need to set.
 Most code never instantiates this struct directly, and instead uses
 functions such as String(), BoolVar(), and Var(), and is therefore
@@ -134,14 +134,21 @@
 	// a custom error handler.
 	Usage func()
 
+	// SortFlags is used to indicate, if user wants to have sorted flags in
+	// help/usage messages.
+	SortFlags bool
+
 	name              string
 	parsed            bool
 	actual            map[NormalizedName]*Flag
+	orderedActual     []*Flag
+	sortedActual      []*Flag
 	formal            map[NormalizedName]*Flag
+	orderedFormal     []*Flag
+	sortedFormal      []*Flag
 	shorthands        map[byte]*Flag
 	args              []string // arguments after flags
 	argsLenAtDash     int      // len(args) when a '--' was located when parsing, or -1 if no --
-	exitOnError       bool     // does the program exit if there's an error?
 	errorHandling     ErrorHandling
 	output            io.Writer // nil means stderr; use out() accessor
 	interspersed      bool      // allow interspersed option/non-option args
@@ -156,7 +163,7 @@
 	Value               Value               // value as set
 	DefValue            string              // default value (as text); for usage message
 	Changed             bool                // If the user set the value (or if left to default)
-	NoOptDefVal         string              //default value (as text); if the flag is on the command line without any options
+	NoOptDefVal         string              // default value (as text); if the flag is on the command line without any options
 	Deprecated          string              // If this flag is deprecated, this string is the new or now thing to use
 	Hidden              bool                // used by cobra.Command to allow flags to be hidden from help/usage text
 	ShorthandDeprecated string              // If the shorthand of this flag is deprecated, this string is the new or now thing to use
@@ -194,11 +201,13 @@
 // "--getUrl" which may also be translated to "geturl" and everything will work.
 func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) {
 	f.normalizeNameFunc = n
-	for k, v := range f.formal {
-		delete(f.formal, k)
-		nname := f.normalizeFlagName(string(k))
-		f.formal[nname] = v
+	f.sortedFormal = f.sortedFormal[:0]
+	for k, v := range f.orderedFormal {
+		delete(f.formal, NormalizedName(v.Name))
+		nname := f.normalizeFlagName(v.Name)
 		v.Name = string(nname)
+		f.formal[nname] = v
+		f.orderedFormal[k] = v
 	}
 }
 
@@ -229,10 +238,25 @@
 	f.output = output
 }
 
-// VisitAll visits the flags in lexicographical order, calling fn for each.
+// VisitAll visits the flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
 // It visits all flags, even those not set.
 func (f *FlagSet) VisitAll(fn func(*Flag)) {
-	for _, flag := range sortFlags(f.formal) {
+	if len(f.formal) == 0 {
+		return
+	}
+
+	var flags []*Flag
+	if f.SortFlags {
+		if len(f.formal) != len(f.sortedFormal) {
+			f.sortedFormal = sortFlags(f.formal)
+		}
+		flags = f.sortedFormal
+	} else {
+		flags = f.orderedFormal
+	}
+
+	for _, flag := range flags {
 		fn(flag)
 	}
 }
@@ -253,22 +277,39 @@
 	return false
 }
 
-// VisitAll visits the command-line flags in lexicographical order, calling
-// fn for each.  It visits all flags, even those not set.
+// VisitAll visits the command-line flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits all flags, even those not set.
 func VisitAll(fn func(*Flag)) {
 	CommandLine.VisitAll(fn)
 }
 
-// Visit visits the flags in lexicographical order, calling fn for each.
+// Visit visits the flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
 // It visits only those flags that have been set.
 func (f *FlagSet) Visit(fn func(*Flag)) {
-	for _, flag := range sortFlags(f.actual) {
+	if len(f.actual) == 0 {
+		return
+	}
+
+	var flags []*Flag
+	if f.SortFlags {
+		if len(f.actual) != len(f.sortedActual) {
+			f.sortedActual = sortFlags(f.actual)
+		}
+		flags = f.sortedActual
+	} else {
+		flags = f.orderedActual
+	}
+
+	for _, flag := range flags {
 		fn(flag)
 	}
 }
 
-// Visit visits the command-line flags in lexicographical order, calling fn
-// for each.  It visits only those flags that have been set.
+// Visit visits the command-line flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits only those flags that have been set.
 func Visit(fn func(*Flag)) {
 	CommandLine.Visit(fn)
 }
@@ -278,6 +319,22 @@
 	return f.lookup(f.normalizeFlagName(name))
 }
 
+// ShorthandLookup returns the Flag structure of the short handed flag,
+// returning nil if none exists.
+// It panics, if len(name) > 1.
+func (f *FlagSet) ShorthandLookup(name string) *Flag {
+	if name == "" {
+		return nil
+	}
+	if len(name) > 1 {
+		msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name)
+		fmt.Fprintf(f.out(), msg)
+		panic(msg)
+	}
+	c := name[0]
+	return f.shorthands[c]
+}
+
 // lookup returns the Flag structure of the named flag, returning nil if none exists.
 func (f *FlagSet) lookup(name NormalizedName) *Flag {
 	return f.formal[name]
@@ -319,7 +376,7 @@
 	if flag == nil {
 		return fmt.Errorf("flag %q does not exist", name)
 	}
-	if len(usageMessage) == 0 {
+	if usageMessage == "" {
 		return fmt.Errorf("deprecated message for flag %q must be set", name)
 	}
 	flag.Deprecated = usageMessage
@@ -334,7 +391,7 @@
 	if flag == nil {
 		return fmt.Errorf("flag %q does not exist", name)
 	}
-	if len(usageMessage) == 0 {
+	if usageMessage == "" {
 		return fmt.Errorf("deprecated message for flag %q must be set", name)
 	}
 	flag.ShorthandDeprecated = usageMessage
@@ -358,6 +415,12 @@
 	return CommandLine.Lookup(name)
 }
 
+// ShorthandLookup returns the Flag structure of the short handed flag,
+// returning nil if none exists.
+func ShorthandLookup(name string) *Flag {
+	return CommandLine.ShorthandLookup(name)
+}
+
 // Set sets the value of the named flag.
 func (f *FlagSet) Set(name, value string) error {
 	normalName := f.normalizeFlagName(name)
@@ -365,17 +428,28 @@
 	if !ok {
 		return fmt.Errorf("no such flag -%v", name)
 	}
+
 	err := flag.Value.Set(value)
 	if err != nil {
-		return err
+		var flagName string
+		if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
+			flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name)
+		} else {
+			flagName = fmt.Sprintf("--%s", flag.Name)
+		}
+		return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err)
 	}
+
 	if f.actual == nil {
 		f.actual = make(map[NormalizedName]*Flag)
 	}
 	f.actual[normalName] = flag
+	f.orderedActual = append(f.orderedActual, flag)
+
 	flag.Changed = true
-	if len(flag.Deprecated) > 0 {
-		fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
+
+	if flag.Deprecated != "" {
+		fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
 	}
 	return nil
 }
@@ -487,31 +561,98 @@
 	return
 }
 
-// FlagUsages Returns a string containing the usage information for all flags in
-// the FlagSet
-func (f *FlagSet) FlagUsages() string {
-	x := new(bytes.Buffer)
+// Splits the string `s` on whitespace into an initial substring up to
+// `i` runes in length and the remainder. Will go `slop` over `i` if
+// that encompasses the entire string (which allows the caller to
+// avoid short orphan words on the final line).
+func wrapN(i, slop int, s string) (string, string) {
+	if i+slop > len(s) {
+		return s, ""
+	}
+
+	w := strings.LastIndexAny(s[:i], " \t")
+	if w <= 0 {
+		return s, ""
+	}
+
+	return s[:w], s[w+1:]
+}
+
+// Wraps the string `s` to a maximum width `w` with leading indent
+// `i`. The first line is not indented (this is assumed to be done by
+// caller). Pass `w` == 0 to do no wrapping
+func wrap(i, w int, s string) string {
+	if w == 0 {
+		return s
+	}
+
+	// space between indent i and end of line width w into which
+	// we should wrap the text.
+	wrap := w - i
+
+	var r, l string
+
+	// Not enough space for sensible wrapping. Wrap as a block on
+	// the next line instead.
+	if wrap < 24 {
+		i = 16
+		wrap = w - i
+		r += "\n" + strings.Repeat(" ", i)
+	}
+	// If still not enough space then don't even try to wrap.
+	if wrap < 24 {
+		return s
+	}
+
+	// Try to avoid short orphan words on the final line, by
+	// allowing wrapN to go a bit over if that would fit in the
+	// remainder of the line.
+	slop := 5
+	wrap = wrap - slop
+
+	// Handle first line, which is indented by the caller (or the
+	// special case above)
+	l, s = wrapN(wrap, slop, s)
+	r = r + l
+
+	// Now wrap the rest
+	for s != "" {
+		var t string
+
+		t, s = wrapN(wrap, slop, s)
+		r = r + "\n" + strings.Repeat(" ", i) + t
+	}
+
+	return r
+
+}
+
+// FlagUsagesWrapped returns a string containing the usage information
+// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no
+// wrapping)
+func (f *FlagSet) FlagUsagesWrapped(cols int) string {
+	buf := new(bytes.Buffer)
 
 	lines := make([]string, 0, len(f.formal))
 
 	maxlen := 0
 	f.VisitAll(func(flag *Flag) {
-		if len(flag.Deprecated) > 0 || flag.Hidden {
+		if flag.Deprecated != "" || flag.Hidden {
 			return
 		}
 
 		line := ""
-		if len(flag.Shorthand) > 0 && len(flag.ShorthandDeprecated) == 0 {
+		if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
 			line = fmt.Sprintf("  -%s, --%s", flag.Shorthand, flag.Name)
 		} else {
 			line = fmt.Sprintf("      --%s", flag.Name)
 		}
 
 		varname, usage := UnquoteUsage(flag)
-		if len(varname) > 0 {
+		if varname != "" {
 			line += " " + varname
 		}
-		if len(flag.NoOptDefVal) > 0 {
+		if flag.NoOptDefVal != "" {
 			switch flag.Value.Type() {
 			case "string":
 				line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal)
@@ -534,7 +675,7 @@
 		line += usage
 		if !flag.defaultIsZeroValue() {
 			if flag.Value.Type() == "string" {
-				line += fmt.Sprintf(" (default \"%s\")", flag.DefValue)
+				line += fmt.Sprintf(" (default %q)", flag.DefValue)
 			} else {
 				line += fmt.Sprintf(" (default %s)", flag.DefValue)
 			}
@@ -546,10 +687,17 @@
 	for _, line := range lines {
 		sidx := strings.Index(line, "\x00")
 		spacing := strings.Repeat(" ", maxlen-sidx)
-		fmt.Fprintln(x, line[:sidx], spacing, line[sidx+1:])
+		// maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx
+		fmt.Fprintln(buf, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:]))
 	}
 
-	return x.String()
+	return buf.Str