cloud.google.com/go/pubsub added to vendor

Fixes #553
I've added `cloud.google.com/go/pubsub` to vendor to avoid problem with double http handlers registrations.

Change-Id: I0ea6cebdc8547b53ae4dd8801a4bd89547e4f2b7
GitHub-Last-Rev: 0cb2eab2e3a401a9fa3b128d0480c5b5964a9b67
GitHub-Pull-Request: golang/gddo#557
Reviewed-on: https://go-review.googlesource.com/116417
Reviewed-by: Tuo Shan <shantuo@google.com>
diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
index e532843..8af16a1 100644
--- a/Godeps/Godeps.json
+++ b/Godeps/Godeps.json
@@ -1,7 +1,7 @@
 {
 	"ImportPath": "github.com/golang/gddo",
-	"GoVersion": "go1.9",
-	"GodepVersion": "v79",
+	"GoVersion": "go1.10",
+	"GodepVersion": "v80",
 	"Packages": [
 		"./..."
 	],
@@ -12,6 +12,11 @@
 			"Rev": "2d3a6656c17a60b0815b7e06ab0be04eacb6e613"
 		},
 		{
+			"ImportPath": "cloud.google.com/go/iam",
+			"Comment": "v0.16.0",
+			"Rev": "2d3a6656c17a60b0815b7e06ab0be04eacb6e613"
+		},
+		{
 			"ImportPath": "cloud.google.com/go/internal/tracecontext",
 			"Comment": "v0.16.0",
 			"Rev": "2d3a6656c17a60b0815b7e06ab0be04eacb6e613"
@@ -37,6 +42,16 @@
 			"Rev": "2d3a6656c17a60b0815b7e06ab0be04eacb6e613"
 		},
 		{
+			"ImportPath": "cloud.google.com/go/pubsub",
+			"Comment": "v0.16.0",
+			"Rev": "2d3a6656c17a60b0815b7e06ab0be04eacb6e613"
+		},
+		{
+			"ImportPath": "cloud.google.com/go/pubsub/apiv1",
+			"Comment": "v0.16.0",
+			"Rev": "2d3a6656c17a60b0815b7e06ab0be04eacb6e613"
+		},
+		{
 			"ImportPath": "cloud.google.com/go/trace",
 			"Comment": "v0.16.0",
 			"Rev": "2d3a6656c17a60b0815b7e06ab0be04eacb6e613"
@@ -289,6 +304,10 @@
 			"Rev": "13449ad91cb26cb47661c1b080790392170385fd"
 		},
 		{
+			"ImportPath": "golang.org/x/sync/errgroup",
+			"Rev": "f52d1811a62927559de87708c8913c1650ce4f26"
+		},
+		{
 			"ImportPath": "golang.org/x/sync/semaphore",
 			"Rev": "f52d1811a62927559de87708c8913c1650ce4f26"
 		},
@@ -298,18 +317,22 @@
 		},
 		{
 			"ImportPath": "golang.org/x/text/secure/bidirule",
+			"Comment": "v0.1.0-6-g1cbadb4",
 			"Rev": "1cbadb444a806fd9430d14ad08967ed91da4fa0a"
 		},
 		{
 			"ImportPath": "golang.org/x/text/transform",
+			"Comment": "v0.1.0-6-g1cbadb4",
 			"Rev": "1cbadb444a806fd9430d14ad08967ed91da4fa0a"
 		},
 		{
 			"ImportPath": "golang.org/x/text/unicode/bidi",
+			"Comment": "v0.1.0-6-g1cbadb4",
 			"Rev": "1cbadb444a806fd9430d14ad08967ed91da4fa0a"
 		},
 		{
 			"ImportPath": "golang.org/x/text/unicode/norm",
+			"Comment": "v0.1.0-6-g1cbadb4",
 			"Rev": "1cbadb444a806fd9430d14ad08967ed91da4fa0a"
 		},
 		{
@@ -318,15 +341,15 @@
 		},
 		{
 			"ImportPath": "golang.org/x/tools/go/gcexportdata",
-			"Rev": "e531a2a1c15f94033f6fa87666caeb19a688175f"
+			"Rev": "a5b4c53f6e8bdcafa95a94671bf2d1203365858b"
 		},
 		{
-			"ImportPath": "golang.org/x/tools/go/gcimporter15",
-			"Rev": "e531a2a1c15f94033f6fa87666caeb19a688175f"
+			"ImportPath": "golang.org/x/tools/go/internal/gcimporter",
+			"Rev": "a5b4c53f6e8bdcafa95a94671bf2d1203365858b"
 		},
 		{
 			"ImportPath": "golang.org/x/tools/present",
-			"Rev": "e531a2a1c15f94033f6fa87666caeb19a688175f"
+			"Rev": "a5b4c53f6e8bdcafa95a94671bf2d1203365858b"
 		},
 		{
 			"ImportPath": "google.golang.org/api/cloudtrace/v1",
@@ -497,7 +520,7 @@
 			"Rev": "1e559d0a00eef8a9a43151db4665280bd8dd5886"
 		},
 		{
-			"ImportPath": "google.golang.org/genproto/googleapis/cloud/runtimeconfig/v1beta1",
+			"ImportPath": "google.golang.org/genproto/googleapis/iam/v1",
 			"Rev": "1e559d0a00eef8a9a43151db4665280bd8dd5886"
 		},
 		{
@@ -509,7 +532,7 @@
 			"Rev": "1e559d0a00eef8a9a43151db4665280bd8dd5886"
 		},
 		{
-			"ImportPath": "google.golang.org/genproto/googleapis/longrunning",
+			"ImportPath": "google.golang.org/genproto/googleapis/pubsub/v1",
 			"Rev": "1e559d0a00eef8a9a43151db4665280bd8dd5886"
 		},
 		{
diff --git a/vendor/cloud.google.com/go/iam/iam.go b/vendor/cloud.google.com/go/iam/iam.go
new file mode 100644
index 0000000..8722ee8
--- /dev/null
+++ b/vendor/cloud.google.com/go/iam/iam.go
@@ -0,0 +1,256 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package iam supports the resource-specific operations of Google Cloud
+// IAM (Identity and Access Management) for the Google Cloud Libraries.
+// See https://cloud.google.com/iam for more about IAM.
+//
+// Users of the Google Cloud Libraries will typically not use this package
+// directly. Instead they will begin with some resource that supports IAM, like
+// a pubsub topic, and call its IAM method to get a Handle for that resource.
+package iam
+
+import (
+	"golang.org/x/net/context"
+	pb "google.golang.org/genproto/googleapis/iam/v1"
+	"google.golang.org/grpc"
+)
+
+// client abstracts the IAMPolicy API to allow multiple implementations.
+type client interface {
+	Get(ctx context.Context, resource string) (*pb.Policy, error)
+	Set(ctx context.Context, resource string, p *pb.Policy) error
+	Test(ctx context.Context, resource string, perms []string) ([]string, error)
+}
+
+// grpcClient implements client for the standard gRPC-based IAMPolicy service.
+type grpcClient struct {
+	c pb.IAMPolicyClient
+}
+
+func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) {
+	proto, err := g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource})
+	if err != nil {
+		return nil, err
+	}
+	return proto, nil
+}
+func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error {
+	_, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{
+		Resource: resource,
+		Policy:   p,
+	})
+	return err
+}
+
+func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
+	res, err := g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{
+		Resource:    resource,
+		Permissions: perms,
+	})
+	if err != nil {
+		return nil, err
+	}
+	return res.Permissions, nil
+}
+
+// A Handle provides IAM operations for a resource.
+type Handle struct {
+	c        client
+	resource string
+}
+
+// InternalNewHandle is for use by the Google Cloud Libraries only.
+//
+// InternalNewHandle returns a Handle for resource.
+// The conn parameter refers to a server that must support the IAMPolicy service.
+func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle {
+	return InternalNewHandleClient(&grpcClient{c: pb.NewIAMPolicyClient(conn)}, resource)
+}
+
+// InternalNewHandleClient is for use by the Google Cloud Libraries only.
+//
+// InternalNewHandleClient returns a Handle for resource using the given
+// client implementation.
+func InternalNewHandleClient(c client, resource string) *Handle {
+	return &Handle{
+		c:        c,
+		resource: resource,
+	}
+}
+
+// Policy retrieves the IAM policy for the resource.
+func (h *Handle) Policy(ctx context.Context) (*Policy, error) {
+	proto, err := h.c.Get(ctx, h.resource)
+	if err != nil {
+		return nil, err
+	}
+	return &Policy{InternalProto: proto}, nil
+}
+
+// SetPolicy replaces the resource's current policy with the supplied Policy.
+//
+// If policy was created from a prior call to Get, then the modification will
+// only succeed if the policy has not changed since the Get.
+func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error {
+	return h.c.Set(ctx, h.resource, policy.InternalProto)
+}
+
+// TestPermissions returns the subset of permissions that the caller has on the resource.
+func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) {
+	return h.c.Test(ctx, h.resource, permissions)
+}
+
+// A RoleName is a name representing a collection of permissions.
+type RoleName string
+
+// Common role names.
+const (
+	Owner  RoleName = "roles/owner"
+	Editor RoleName = "roles/editor"
+	Viewer RoleName = "roles/viewer"
+)
+
+const (
+	// AllUsers is a special member that denotes all users, even unauthenticated ones.
+	AllUsers = "allUsers"
+
+	// AllAuthenticatedUsers is a special member that denotes all authenticated users.
+	AllAuthenticatedUsers = "allAuthenticatedUsers"
+)
+
+// A Policy is a list of Bindings representing roles
+// granted to members.
+//
+// The zero Policy is a valid policy with no bindings.
+type Policy struct {
+	// TODO(jba): when type aliases are available, put Policy into an internal package
+	// and provide an exported alias here.
+
+	// This field is exported for use by the Google Cloud Libraries only.
+	// It may become unexported in a future release.
+	InternalProto *pb.Policy
+}
+
+// Members returns the list of members with the supplied role.
+// The return value should not be modified. Use Add and Remove
+// to modify the members of a role.
+func (p *Policy) Members(r RoleName) []string {
+	b := p.binding(r)
+	if b == nil {
+		return nil
+	}
+	return b.Members
+}
+
+// HasRole reports whether member has role r.
+func (p *Policy) HasRole(member string, r RoleName) bool {
+	return memberIndex(member, p.binding(r)) >= 0
+}
+
+// Add adds member member to role r if it is not already present.
+// A new binding is created if there is no binding for the role.
+func (p *Policy) Add(member string, r RoleName) {
+	b := p.binding(r)
+	if b == nil {
+		if p.InternalProto == nil {
+			p.InternalProto = &pb.Policy{}
+		}
+		p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{
+			Role:    string(r),
+			Members: []string{member},
+		})
+		return
+	}
+	if memberIndex(member, b) < 0 {
+		b.Members = append(b.Members, member)
+		return
+	}
+}
+
+// Remove removes member from role r if it is present.
+func (p *Policy) Remove(member string, r RoleName) {
+	bi := p.bindingIndex(r)
+	if bi < 0 {
+		return
+	}
+	bindings := p.InternalProto.Bindings
+	b := bindings[bi]
+	mi := memberIndex(member, b)
+	if mi < 0 {
+		return
+	}
+	// Order doesn't matter for bindings or members, so to remove, move the last item
+	// into the removed spot and shrink the slice.
+	if len(b.Members) == 1 {
+		// Remove binding.
+		last := len(bindings) - 1
+		bindings[bi] = bindings[last]
+		bindings[last] = nil
+		p.InternalProto.Bindings = bindings[:last]
+		return
+	}
+	// Remove member.
+	// TODO(jba): worry about multiple copies of m?
+	last := len(b.Members) - 1
+	b.Members[mi] = b.Members[last]
+	b.Members[last] = ""
+	b.Members = b.Members[:last]
+}
+
+// Roles returns the names of all the roles that appear in the Policy.
+func (p *Policy) Roles() []RoleName {
+	if p.InternalProto == nil {
+		return nil
+	}
+	var rns []RoleName
+	for _, b := range p.InternalProto.Bindings {
+		rns = append(rns, RoleName(b.Role))
+	}
+	return rns
+}
+
+// binding returns the Binding for the suppied role, or nil if there isn't one.
+func (p *Policy) binding(r RoleName) *pb.Binding {
+	i := p.bindingIndex(r)
+	if i < 0 {
+		return nil
+	}
+	return p.InternalProto.Bindings[i]
+}
+
+func (p *Policy) bindingIndex(r RoleName) int {
+	if p.InternalProto == nil {
+		return -1
+	}
+	for i, b := range p.InternalProto.Bindings {
+		if b.Role == string(r) {
+			return i
+		}
+	}
+	return -1
+}
+
+// memberIndex returns the index of m in b's Members, or -1 if not found.
+func memberIndex(m string, b *pb.Binding) int {
+	if b == nil {
+		return -1
+	}
+	for i, mm := range b.Members {
+		if mm == m {
+			return i
+		}
+	}
+	return -1
+}
diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/README.md b/vendor/cloud.google.com/go/pubsub/apiv1/README.md
new file mode 100644
index 0000000..b5967ab
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/apiv1/README.md
@@ -0,0 +1,9 @@
+Auto-generated pubsub v1 clients
+=================================
+
+This package includes auto-generated clients for the pubsub v1 API.
+
+Use the handwritten client (in the parent directory,
+cloud.google.com/go/pubsub) in preference to this.
+
+This code is EXPERIMENTAL and subject to CHANGE AT ANY TIME.
diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/doc.go b/vendor/cloud.google.com/go/pubsub/apiv1/doc.go
new file mode 100644
index 0000000..8945cfa
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/apiv1/doc.go
@@ -0,0 +1,50 @@
+// Copyright 2017, Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+// Package pubsub is an auto-generated package for the
+// Google Cloud Pub/Sub API.
+//
+//   NOTE: This package is in alpha. It is not stable, and is likely to be subject to changes.
+//
+// Provides reliable, many-to-many, asynchronous messaging between
+// applications.
+//
+// Use the client at cloud.google.com/go/pubsub in preference to this.
+package pubsub
+
+import (
+	"golang.org/x/net/context"
+	"google.golang.org/grpc/metadata"
+)
+
+func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
+	out, _ := metadata.FromOutgoingContext(ctx)
+	out = out.Copy()
+	for _, md := range mds {
+		for k, v := range md {
+			out[k] = append(out[k], v...)
+		}
+	}
+	return metadata.NewOutgoingContext(ctx, out)
+}
+
+// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
+func DefaultAuthScopes() []string {
+	return []string{
+		"https://www.googleapis.com/auth/cloud-platform",
+		"https://www.googleapis.com/auth/pubsub",
+	}
+}
diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go
new file mode 100644
index 0000000..6088dac
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go
@@ -0,0 +1,416 @@
+// Copyright 2017, Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+package pubsub
+
+import (
+	"math"
+	"time"
+
+	"cloud.google.com/go/iam"
+	"cloud.google.com/go/internal/version"
+	gax "github.com/googleapis/gax-go"
+	"golang.org/x/net/context"
+	"google.golang.org/api/iterator"
+	"google.golang.org/api/option"
+	"google.golang.org/api/transport"
+	pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+)
+
+// PublisherCallOptions contains the retry settings for each method of PublisherClient.
+type PublisherCallOptions struct {
+	CreateTopic            []gax.CallOption
+	UpdateTopic            []gax.CallOption
+	Publish                []gax.CallOption
+	GetTopic               []gax.CallOption
+	ListTopics             []gax.CallOption
+	ListTopicSubscriptions []gax.CallOption
+	DeleteTopic            []gax.CallOption
+}
+
+func defaultPublisherClientOptions() []option.ClientOption {
+	return []option.ClientOption{
+		option.WithEndpoint("pubsub.googleapis.com:443"),
+		option.WithScopes(DefaultAuthScopes()...),
+	}
+}
+
+func defaultPublisherCallOptions() *PublisherCallOptions {
+	retry := map[[2]string][]gax.CallOption{
+		{"default", "idempotent"}: {
+			gax.WithRetry(func() gax.Retryer {
+				return gax.OnCodes([]codes.Code{
+					codes.DeadlineExceeded,
+					codes.Unavailable,
+				}, gax.Backoff{
+					Initial:    100 * time.Millisecond,
+					Max:        60000 * time.Millisecond,
+					Multiplier: 1.3,
+				})
+			}),
+		},
+		{"messaging", "one_plus_delivery"}: {
+			gax.WithRetry(func() gax.Retryer {
+				return gax.OnCodes([]codes.Code{
+					codes.Aborted,
+					codes.Canceled,
+					codes.DeadlineExceeded,
+					codes.Internal,
+					codes.ResourceExhausted,
+					codes.Unavailable,
+					codes.Unknown,
+				}, gax.Backoff{
+					Initial:    100 * time.Millisecond,
+					Max:        60000 * time.Millisecond,
+					Multiplier: 1.3,
+				})
+			}),
+		},
+	}
+	return &PublisherCallOptions{
+		CreateTopic:            retry[[2]string{"default", "idempotent"}],
+		UpdateTopic:            retry[[2]string{"default", "idempotent"}],
+		Publish:                retry[[2]string{"messaging", "one_plus_delivery"}],
+		GetTopic:               retry[[2]string{"default", "idempotent"}],
+		ListTopics:             retry[[2]string{"default", "idempotent"}],
+		ListTopicSubscriptions: retry[[2]string{"default", "idempotent"}],
+		DeleteTopic:            retry[[2]string{"default", "idempotent"}],
+	}
+}
+
+// PublisherClient is a client for interacting with Google Cloud Pub/Sub API.
+type PublisherClient struct {
+	// The connection to the service.
+	conn *grpc.ClientConn
+
+	// The gRPC API client.
+	publisherClient pubsubpb.PublisherClient
+
+	// The call options for this service.
+	CallOptions *PublisherCallOptions
+
+	// The metadata to be sent with each request.
+	Metadata metadata.MD
+}
+
+// NewPublisherClient creates a new publisher client.
+//
+// The service that an application uses to manipulate topics, and to send
+// messages to a topic.
+func NewPublisherClient(ctx context.Context, opts ...option.ClientOption) (*PublisherClient, error) {
+	conn, err := transport.DialGRPC(ctx, append(defaultPublisherClientOptions(), opts...)...)
+	if err != nil {
+		return nil, err
+	}
+	c := &PublisherClient{
+		conn:        conn,
+		CallOptions: defaultPublisherCallOptions(),
+
+		publisherClient: pubsubpb.NewPublisherClient(conn),
+	}
+	c.SetGoogleClientInfo()
+	return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *PublisherClient) Connection() *grpc.ClientConn {
+	return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *PublisherClient) Close() error {
+	return c.conn.Close()
+}
+
+// SetGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *PublisherClient) SetGoogleClientInfo(keyval ...string) {
+	kv := append([]string{"gl-go", version.Go()}, keyval...)
+	kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
+	c.Metadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
+}
+
+// PublisherProjectPath returns the path for the project resource.
+func PublisherProjectPath(project string) string {
+	return "" +
+		"projects/" +
+		project +
+		""
+}
+
+// PublisherTopicPath returns the path for the topic resource.
+func PublisherTopicPath(project, topic string) string {
+	return "" +
+		"projects/" +
+		project +
+		"/topics/" +
+		topic +
+		""
+}
+
+func (c *PublisherClient) SubscriptionIAM(subscription *pubsubpb.Subscription) *iam.Handle {
+	return iam.InternalNewHandle(c.Connection(), subscription.Name)
+}
+
+func (c *PublisherClient) TopicIAM(topic *pubsubpb.Topic) *iam.Handle {
+	return iam.InternalNewHandle(c.Connection(), topic.Name)
+}
+
+// CreateTopic creates the given topic with the given name.
+func (c *PublisherClient) CreateTopic(ctx context.Context, req *pubsubpb.Topic, opts ...gax.CallOption) (*pubsubpb.Topic, error) {
+	ctx = insertMetadata(ctx, c.Metadata)
+	opts = append(c.CallOptions.CreateTopic[0:len(c.CallOptions.CreateTopic):len(c.CallOptions.CreateTopic)], opts...)
+	var resp *pubsubpb.Topic
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.publisherClient.CreateTopic(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// UpdateTopic updates an existing topic. Note that certain properties of a topic are not
+// modifiable.  Options settings follow the style guide:
+// NOTE:  The style guide requires body: "topic" instead of body: "*".
+// Keeping the latter for internal consistency in V1, however it should be
+// corrected in V2.  See
+// https://cloud.google.com/apis/design/standard_methods#update for details.
+func (c *PublisherClient) UpdateTopic(ctx context.Context, req *pubsubpb.UpdateTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) {
+	ctx = insertMetadata(ctx, c.Metadata)
+	opts = append(c.CallOptions.UpdateTopic[0:len(c.CallOptions.UpdateTopic):len(c.CallOptions.UpdateTopic)], opts...)
+	var resp *pubsubpb.Topic
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.publisherClient.UpdateTopic(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// Publish adds one or more messages to the topic. Returns NOT_FOUND if the topic
+// does not exist. The message payload must not be empty; it must contain
+// either a non-empty data field, or at least one attribute.
+func (c *PublisherClient) Publish(ctx context.Context, req *pubsubpb.PublishRequest, opts ...gax.CallOption) (*pubsubpb.PublishResponse, error) {
+	ctx = insertMetadata(ctx, c.Metadata)
+	opts = append(c.CallOptions.Publish[0:len(c.CallOptions.Publish):len(c.CallOptions.Publish)], opts...)
+	var resp *pubsubpb.PublishResponse
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.publisherClient.Publish(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// GetTopic gets the configuration of a topic.
+func (c *PublisherClient) GetTopic(ctx context.Context, req *pubsubpb.GetTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) {
+	ctx = insertMetadata(ctx, c.Metadata)
+	opts = append(c.CallOptions.GetTopic[0:len(c.CallOptions.GetTopic):len(c.CallOptions.GetTopic)], opts...)
+	var resp *pubsubpb.Topic
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.publisherClient.GetTopic(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// ListTopics lists matching topics.
+func (c *PublisherClient) ListTopics(ctx context.Context, req *pubsubpb.ListTopicsRequest, opts ...gax.CallOption) *TopicIterator {
+	ctx = insertMetadata(ctx, c.Metadata)
+	opts = append(c.CallOptions.ListTopics[0:len(c.CallOptions.ListTopics):len(c.CallOptions.ListTopics)], opts...)
+	it := &TopicIterator{}
+	it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Topic, string, error) {
+		var resp *pubsubpb.ListTopicsResponse
+		req.PageToken = pageToken
+		if pageSize > math.MaxInt32 {
+			req.PageSize = math.MaxInt32
+		} else {
+			req.PageSize = int32(pageSize)
+		}
+		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+			var err error
+			resp, err = c.publisherClient.ListTopics(ctx, req, settings.GRPC...)
+			return err
+		}, opts...)
+		if err != nil {
+			return nil, "", err
+		}
+		return resp.Topics, resp.NextPageToken, nil
+	}
+	fetch := func(pageSize int, pageToken string) (string, error) {
+		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+		if err != nil {
+			return "", err
+		}
+		it.items = append(it.items, items...)
+		return nextPageToken, nil
+	}
+	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+	return it
+}
+
+// ListTopicSubscriptions lists the name of the subscriptions for this topic.
+func (c *PublisherClient) ListTopicSubscriptions(ctx context.Context, req *pubsubpb.ListTopicSubscriptionsRequest, opts ...gax.CallOption) *StringIterator {
+	ctx = insertMetadata(ctx, c.Metadata)
+	opts = append(c.CallOptions.ListTopicSubscriptions[0:len(c.CallOptions.ListTopicSubscriptions):len(c.CallOptions.ListTopicSubscriptions)], opts...)
+	it := &StringIterator{}
+	it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) {
+		var resp *pubsubpb.ListTopicSubscriptionsResponse
+		req.PageToken = pageToken
+		if pageSize > math.MaxInt32 {
+			req.PageSize = math.MaxInt32
+		} else {
+			req.PageSize = int32(pageSize)
+		}
+		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+			var err error
+			resp, err = c.publisherClient.ListTopicSubscriptions(ctx, req, settings.GRPC...)
+			return err
+		}, opts...)
+		if err != nil {
+			return nil, "", err
+		}
+		return resp.Subscriptions, resp.NextPageToken, nil
+	}
+	fetch := func(pageSize int, pageToken string) (string, error) {
+		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+		if err != nil {
+			return "", err
+		}
+		it.items = append(it.items, items...)
+		return nextPageToken, nil
+	}
+	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+	return it
+}
+
+// DeleteTopic deletes the topic with the given name. Returns NOT_FOUND if the topic
+// does not exist. After a topic is deleted, a new topic may be created with
+// the same name; this is an entirely new topic with none of the old
+// configuration or subscriptions. Existing subscriptions to this topic are
+// not deleted, but their topic field is set to _deleted-topic_.
+func (c *PublisherClient) DeleteTopic(ctx context.Context, req *pubsubpb.DeleteTopicRequest, opts ...gax.CallOption) error {
+	ctx = insertMetadata(ctx, c.Metadata)
+	opts = append(c.CallOptions.DeleteTopic[0:len(c.CallOptions.DeleteTopic):len(c.CallOptions.DeleteTopic)], opts...)
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		_, err = c.publisherClient.DeleteTopic(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	return err
+}
+
+// StringIterator manages a stream of string.
+type StringIterator struct {
+	items    []string
+	pageInfo *iterator.PageInfo
+	nextFunc func() error
+
+	// InternalFetch is for use by the Google Cloud Libraries only.
+	// It is not part of the stable interface of this package.
+	//
+	// InternalFetch returns results from a single call to the underlying RPC.
+	// The number of results is no greater than pageSize.
+	// If there are no more results, nextPageToken is empty and err is nil.
+	InternalFetch func(pageSize int, pageToken string) (results []string, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *StringIterator) PageInfo() *iterator.PageInfo {
+	return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *StringIterator) Next() (string, error) {
+	var item string
+	if err := it.nextFunc(); err != nil {
+		return item, err
+	}
+	item = it.items[0]
+	it.items = it.items[1:]
+	return item, nil
+}
+
+func (it *StringIterator) bufLen() int {
+	return len(it.items)
+}
+
+func (it *StringIterator) takeBuf() interface{} {
+	b := it.items
+	it.items = nil
+	return b
+}
+
+// TopicIterator manages a stream of *pubsubpb.Topic.
+type TopicIterator struct {
+	items    []*pubsubpb.Topic
+	pageInfo *iterator.PageInfo
+	nextFunc func() error
+
+	// InternalFetch is for use by the Google Cloud Libraries only.
+	// It is not part of the stable interface of this package.
+	//
+	// InternalFetch returns results from a single call to the underlying RPC.
+	// The number of results is no greater than pageSize.
+	// If there are no more results, nextPageToken is empty and err is nil.
+	InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Topic, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *TopicIterator) PageInfo() *iterator.PageInfo {
+	return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *TopicIterator) Next() (*pubsubpb.Topic, error) {
+	var item *pubsubpb.Topic
+	if err := it.nextFunc(); err != nil {
+		return item, err
+	}
+	item = it.items[0]
+	it.items = it.items[1:]
+	return item, nil
+}
+
+func (it *TopicIterator) bufLen() int {
+	return len(it.items)
+}
+
+func (it *TopicIterator) takeBuf() interface{} {
+	b := it.items
+	it.items = nil
+	return b
+}
diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go
new file mode 100644
index 0000000..574c07d
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go
@@ -0,0 +1,631 @@
+// Copyright 2017, Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+package pubsub
+
+import (
+	"math"
+	"time"
+
+	"cloud.google.com/go/iam"
+	"cloud.google.com/go/internal/version"
+	gax "github.com/googleapis/gax-go"
+	"golang.org/x/net/context"
+	"google.golang.org/api/iterator"
+	"google.golang.org/api/option"
+	"google.golang.org/api/transport"
+	pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+)
+
+// SubscriberCallOptions contains the retry settings for each method of SubscriberClient.
+type SubscriberCallOptions struct {
+	CreateSubscription []gax.CallOption
+	GetSubscription    []gax.CallOption
+	UpdateSubscription []gax.CallOption
+	ListSubscriptions  []gax.CallOption
+	DeleteSubscription []gax.CallOption
+	ModifyAckDeadline  []gax.CallOption
+	Acknowledge        []gax.CallOption
+	Pull               []gax.CallOption
+	StreamingPull      []gax.CallOption
+	ModifyPushConfig   []gax.CallOption
+	ListSnapshots      []gax.CallOption
+	CreateSnapshot     []gax.CallOption
+	UpdateSnapshot     []gax.CallOption
+	DeleteSnapshot     []gax.CallOption
+	Seek               []gax.CallOption
+}
+
+func defaultSubscriberClientOptions() []option.ClientOption {
+	return []option.ClientOption{
+		option.WithEndpoint("pubsub.googleapis.com:443"),
+		option.WithScopes(DefaultAuthScopes()...),
+	}
+}
+
+func defaultSubscriberCallOptions() *SubscriberCallOptions {
+	retry := map[[2]string][]gax.CallOption{
+		{"default", "idempotent"}: {
+			gax.WithRetry(func() gax.Retryer {
+				return gax.OnCodes([]codes.Code{
+					codes.DeadlineExceeded,
+					codes.Unavailable,
+				}, gax.Backoff{
+					Initial:    100 * time.Millisecond,
+					Max:        60000 * time.Millisecond,
+					Multiplier: 1.3,
+				})
+			}),
+		},
+		{"messaging", "pull"}: {
+			gax.WithRetry(func() gax.Retryer {
+				return gax.OnCodes([]codes.Code{
+					codes.Canceled,
+					codes.DeadlineExceeded,
+					codes.Internal,
+					codes.ResourceExhausted,
+					codes.Unavailable,
+				}, gax.Backoff{
+					Initial:    100 * time.Millisecond,
+					Max:        60000 * time.Millisecond,
+					Multiplier: 1.3,
+				})
+			}),
+		},
+		{"streaming_messaging", "pull"}: {
+			gax.WithRetry(func() gax.Retryer {
+				return gax.OnCodes([]codes.Code{
+					codes.Canceled,
+					codes.DeadlineExceeded,
+					codes.Internal,
+					codes.ResourceExhausted,
+					codes.Unavailable,
+				}, gax.Backoff{
+					Initial:    100 * time.Millisecond,
+					Max:        60000 * time.Millisecond,
+					Multiplier: 1.3,
+				})
+			}),
+		},
+	}
+	return &SubscriberCallOptions{
+		CreateSubscription: retry[[2]string{"default", "idempotent"}],
+		GetSubscription:    retry[[2]string{"default", "idempotent"}],
+		UpdateSubscription: retry[[2]string{"default", "idempotent"}],
+		ListSubscriptions:  retry[[2]string{"default", "idempotent"}],
+		DeleteSubscription: retry[[2]string{"default", "idempotent"}],
+		ModifyAckDeadline:  retry[[2]string{"default", "non_idempotent"}],
+		Acknowledge:        retry[[2]string{"messaging", "non_idempotent"}],
+		Pull:               retry[[2]string{"messaging", "pull"}],
+		StreamingPull:      retry[[2]string{"streaming_messaging", "pull"}],
+		ModifyPushConfig:   retry[[2]string{"default", "non_idempotent"}],
+		ListSnapshots:      retry[[2]string{"default", "idempotent"}],
+		CreateSnapshot:     retry[[2]string{"default", "idempotent"}],
+		UpdateSnapshot:     retry[[2]string{"default", "idempotent"}],
+		DeleteSnapshot:     retry[[2]string{"default", "idempotent"}],
+		Seek:               retry[[2]string{"default", "non_idempotent"}],
+	}
+}
+
+// SubscriberClient is a client for interacting with Google Cloud Pub/Sub API.
+type SubscriberClient struct {
+	// The connection to the service.
+	conn *grpc.ClientConn
+
+	// The gRPC API client.
+	subscriberClient pubsubpb.SubscriberClient
+
+	// The call options for this service.
+	CallOptions *SubscriberCallOptions
+
+	// The metadata to be sent with each request.
+	Metadata metadata.MD
+}
+
+// NewSubscriberClient creates a new subscriber client.
+//
+// The service that an application uses to manipulate subscriptions and to
+// consume messages from a subscription via the Pull method.
+func NewSubscriberClient(ctx context.Context, opts ...option.ClientOption) (*SubscriberClient, error) {
+	conn, err := transport.DialGRPC(ctx, append(defaultSubscriberClientOptions(), opts...)...)
+	if err != nil {
+		return nil, err
+	}
+	c := &SubscriberClient{
+		conn:        conn,
+		CallOptions: defaultSubscriberCallOptions(),
+
+		subscriberClient: pubsubpb.NewSubscriberClient(conn),
+	}
+	c.SetGoogleClientInfo()
+	return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *SubscriberClient) Connection() *grpc.ClientConn {
+	return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *SubscriberClient) Close() error {
+	return c.conn.Close()
+}
+
+// SetGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *SubscriberClient) SetGoogleClientInfo(keyval ...string) {
+	kv := append([]string{"gl-go", version.Go()}, keyval...)
+	kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
+	c.Metadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
+}
+
+// SubscriberProjectPath returns the path for the project resource.
+func SubscriberProjectPath(project string) string {
+	return "" +
+		"projects/" +
+		project +
+		""
+}
+
+// SubscriberSnapshotPath returns the path for the snapshot resource.
+func SubscriberSnapshotPath(project, snapshot string) string {
+	return "" +
+		"projects/" +
+		project +
+		"/snapshots/" +
+		snapshot +
+		""
+}
+
+// SubscriberSubscriptionPath returns the path for the subscription resource.
+func SubscriberSubscriptionPath(project, subscription string) string {
+	return "" +
+		"projects/" +
+		project +
+		"/subscriptions/" +
+		subscription +
+		""
+}
+
+// SubscriberTopicPath returns the path for the topic resource.
+func SubscriberTopicPath(project, topic string) string {
+	return "" +
+		"projects/" +
+		project +
+		"/topics/" +
+		topic +
+		""
+}
+
+func (c *SubscriberClient) SubscriptionIAM(subscription *pubsubpb.Subscription) *iam.Handle {
+	return iam.InternalNewHandle(c.Connection(), subscription.Name)
+}
+
+func (c *SubscriberClient) TopicIAM(topic *pubsubpb.Topic) *iam.Handle {
+	return iam.InternalNewHandle(c.Connection(), topic.Name)
+}
+
+// CreateSubscription creates a subscription to a given topic.
+// If the subscription already exists, returns ALREADY_EXISTS.
+// If the corresponding topic doesn't exist, returns NOT_FOUND.
+//
+// If the name is not provided in the request, the server will assign a random
+// name for this subscription on the same project as the topic, conforming
+// to the
+// resource name format (at https://cloud.google.com/pubsub/docs/overview#names).
+// The generated name is populated in the returned Subscription object.
+// Note that for REST API requests, you must specify a name in the request.
+func (c *SubscriberClient) CreateSubscription(ctx context.Context, req *pubsubpb.Subscription, opts ...gax.CallOption) (*pubsubpb.Subscription, error) {
+	ctx = insertMetadata(ctx, c.Metadata)
+	opts = append(c.CallOptions.CreateSubscription[0:len(c.CallOptions.CreateSubscription):len(c.CallOptions.CreateSubscription)], opts...)
+	var resp *pubsubpb.Subscription
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.subscriberClient.CreateSubscription(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// GetSubscription gets the configuration details of a subscription.
+func (c *SubscriberClient) GetSubscription(ctx context.Context, req *pubsubpb.GetSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) {
+	ctx = insertMetadata(ctx, c.Metadata)
+	opts = append(c.CallOptions.GetSubscription[0:len(c.CallOptions.GetSubscription):len(c.CallOptions.GetSubscription)], opts...)
+	var resp *pubsubpb.Subscription
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.subscriberClient.GetSubscription(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// UpdateSubscription updates an existing subscription. Note that certain properties of a
+// subscription, such as its topic, are not modifiable.
+// NOTE:  The style guide requires body: "subscription" instead of body: "*".
+// Keeping the latter for internal consistency in V1, however it should be
+// corrected in V2.  See
+// https://cloud.google.com/apis/design/standard_methods#update for details.
+func (c *SubscriberClient) UpdateSubscription(ctx context.Context, req *pubsubpb.UpdateSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) {
+	ctx = insertMetadata(ctx, c.Metadata)
+	opts = append(c.CallOptions.UpdateSubscription[0:len(c.CallOptions.UpdateSubscription):len(c.CallOptions.UpdateSubscription)], opts...)
+	var resp *pubsubpb.Subscription
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.subscriberClient.UpdateSubscription(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// ListSubscriptions lists matching subscriptions.
+func (c *SubscriberClient) ListSubscriptions(ctx context.Context, req *pubsubpb.ListSubscriptionsRequest, opts ...gax.CallOption) *SubscriptionIterator {
+	ctx = insertMetadata(ctx, c.Metadata)
+	opts = append(c.CallOptions.ListSubscriptions[0:len(c.CallOptions.ListSubscriptions):len(c.CallOptions.ListSubscriptions)], opts...)
+	it := &SubscriptionIterator{}
+	it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Subscription, string, error) {
+		var resp *pubsubpb.ListSubscriptionsResponse
+		req.PageToken = pageToken
+		if pageSize > math.MaxInt32 {
+			req.PageSize = math.MaxInt32
+		} else {
+			req.PageSize = int32(pageSize)
+		}
+		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+			var err error
+			resp, err = c.subscriberClient.ListSubscriptions(ctx, req, settings.GRPC...)
+			return err
+		}, opts...)
+		if err != nil {
+			return nil, "", err
+		}
+		return resp.Subscriptions, resp.NextPageToken, nil
+	}
+	fetch := func(pageSize int, pageToken string) (string, error) {
+		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+		if err != nil {
+			return "", err
+		}
+		it.items = append(it.items, items...)
+		return nextPageToken, nil
+	}
+	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+	return it
+}
+
+// DeleteSubscription deletes an existing subscription. All messages retained in the subscription
+// are immediately dropped. Calls to Pull after deletion will return
+// NOT_FOUND. After a subscription is deleted, a new one may be created with
+// the same name, but the new one has no association with the old
+// subscription or its topic unless the same topic is specified.
+func (c *SubscriberClient) DeleteSubscription(ctx context.Context, req *pubsubpb.DeleteSubscriptionRequest, opts ...gax.CallOption) error {
+	ctx = insertMetadata(ctx, c.Metadata)
+	opts = append(c.CallOptions.DeleteSubscription[0:len(c.CallOptions.DeleteSubscription):len(c.CallOptions.DeleteSubscription)], opts...)
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		_, err = c.subscriberClient.DeleteSubscription(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	return err
+}
+
+// ModifyAckDeadline modifies the ack deadline for a specific message. This method is useful
+// to indicate that more time is needed to process a message by the
+// subscriber, or to make the message available for redelivery if the
+// processing was interrupted. Note that this does not modify the
+// subscription-level ackDeadlineSeconds used for subsequent messages.
+func (c *SubscriberClient) ModifyAckDeadline(ctx context.Context, req *pubsubpb.ModifyAckDeadlineRequest, opts ...gax.CallOption) error {
+	ctx = insertMetadata(ctx, c.Metadata)
+	opts = append(c.CallOptions.ModifyAckDeadline[0:len(c.CallOptions.ModifyAckDeadline):len(c.CallOptions.ModifyAckDeadline)], opts...)
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		_, err = c.subscriberClient.ModifyAckDeadline(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	return err
+}
+
+// Acknowledge acknowledges the messages associated with the ack_ids in the
+// AcknowledgeRequest. The Pub/Sub system can remove the relevant messages
+// from the subscription.
+//
+// Acknowledging a message whose ack deadline has expired may succeed,
+// but such a message may be redelivered later. Acknowledging a message more
+// than once will not result in an error.
+func (c *SubscriberClient) Acknowledge(ctx context.Context, req *pubsubpb.AcknowledgeRequest, opts ...gax.CallOption) error {
+	ctx = insertMetadata(ctx, c.Metadata)
+	opts = append(c.CallOptions.Acknowledge[0:len(c.CallOptions.Acknowledge):len(c.CallOptions.Acknowledge)], opts...)
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		_, err = c.subscriberClient.Acknowledge(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	return err
+}
+
+// Pull pulls messages from the server. Returns an empty list if there are no
+// messages available in the backlog. The server may return UNAVAILABLE if
+// there are too many concurrent pull requests pending for the given
+// subscription.
+func (c *SubscriberClient) Pull(ctx context.Context, req *pubsubpb.PullRequest, opts ...gax.CallOption) (*pubsubpb.PullResponse, error) {
+	ctx = insertMetadata(ctx, c.Metadata)
+	opts = append(c.CallOptions.Pull[0:len(c.CallOptions.Pull):len(c.CallOptions.Pull)], opts...)
+	var resp *pubsubpb.PullResponse
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.subscriberClient.Pull(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// StreamingPull (EXPERIMENTAL) StreamingPull is an experimental feature. This RPC will
+// respond with UNIMPLEMENTED errors unless you have been invited to test
+// this feature. Contact cloud-pubsub@google.com with any questions.
+//
+// Establishes a stream with the server, which sends messages down to the
+// client. The client streams acknowledgements and ack deadline modifications
+// back to the server. The server will close the stream and return the status
+// on any error. The server may close the stream with status OK to reassign
+// server-side resources, in which case, the client should re-establish the
+// stream. UNAVAILABLE may also be returned in the case of a transient error
+// (e.g., a server restart). These should also be retried by the client. Flow
+// control can be achieved by configuring the underlying RPC channel.
+func (c *SubscriberClient) StreamingPull(ctx context.Context, opts ...gax.CallOption) (pubsubpb.Subscriber_StreamingPullClient, error) {
+	ctx = insertMetadata(ctx, c.Metadata)
+	opts = append(c.CallOptions.StreamingPull[0:len(c.CallOptions.StreamingPull):len(c.CallOptions.StreamingPull)], opts...)
+	var resp pubsubpb.Subscriber_StreamingPullClient
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.subscriberClient.StreamingPull(ctx, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// ModifyPushConfig modifies the PushConfig for a specified subscription.
+//
+// This may be used to change a push subscription to a pull one (signified by
+// an empty PushConfig) or vice versa, or change the endpoint URL and other
+// attributes of a push subscription. Messages will accumulate for delivery
+// continuously through the call regardless of changes to the PushConfig.
+func (c *SubscriberClient) ModifyPushConfig(ctx context.Context, req *pubsubpb.ModifyPushConfigRequest, opts ...gax.CallOption) error {
+	ctx = insertMetadata(ctx, c.Metadata)
+	opts = append(c.CallOptions.ModifyPushConfig[0:len(c.CallOptions.ModifyPushConfig):len(c.CallOptions.ModifyPushConfig)], opts...)
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		_, err = c.subscriberClient.ModifyPushConfig(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	return err
+}
+
+// ListSnapshots lists the existing snapshots.
+func (c *SubscriberClient) ListSnapshots(ctx context.Context, req *pubsubpb.ListSnapshotsRequest, opts ...gax.CallOption) *SnapshotIterator {
+	ctx = insertMetadata(ctx, c.Metadata)
+	opts = append(c.CallOptions.ListSnapshots[0:len(c.CallOptions.ListSnapshots):len(c.CallOptions.ListSnapshots)], opts...)
+	it := &SnapshotIterator{}
+	it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Snapshot, string, error) {
+		var resp *pubsubpb.ListSnapshotsResponse
+		req.PageToken = pageToken
+		if pageSize > math.MaxInt32 {
+			req.PageSize = math.MaxInt32
+		} else {
+			req.PageSize = int32(pageSize)
+		}
+		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+			var err error
+			resp, err = c.subscriberClient.ListSnapshots(ctx, req, settings.GRPC...)
+			return err
+		}, opts...)
+		if err != nil {
+			return nil, "", err
+		}
+		return resp.Snapshots, resp.NextPageToken, nil
+	}
+	fetch := func(pageSize int, pageToken string) (string, error) {
+		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+		if err != nil {
+			return "", err
+		}
+		it.items = append(it.items, items...)
+		return nextPageToken, nil
+	}
+	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+	return it
+}
+
+// CreateSnapshot creates a snapshot from the requested subscription.
+// If the snapshot already exists, returns ALREADY_EXISTS.
+// If the requested subscription doesn't exist, returns NOT_FOUND.
+//
+// If the name is not provided in the request, the server will assign a random
+// name for this snapshot on the same project as the subscription, conforming
+// to the
+// resource name format (at https://cloud.google.com/pubsub/docs/overview#names).
+// The generated name is populated in the returned Snapshot object.
+// Note that for REST API requests, you must specify a name in the request.
+func (c *SubscriberClient) CreateSnapshot(ctx context.Context, req *pubsubpb.CreateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) {
+	ctx = insertMetadata(ctx, c.Metadata)
+	opts = append(c.CallOptions.CreateSnapshot[0:len(c.CallOptions.CreateSnapshot):len(c.CallOptions.CreateSnapshot)], opts...)
+	var resp *pubsubpb.Snapshot
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.subscriberClient.CreateSnapshot(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// UpdateSnapshot updates an existing snapshot. Note that certain properties of a snapshot
+// are not modifiable.
+// NOTE:  The style guide requires body: "snapshot" instead of body: "*".
+// Keeping the latter for internal consistency in V1, however it should be
+// corrected in V2.  See
+// https://cloud.google.com/apis/design/standard_methods#update for details.
+func (c *SubscriberClient) UpdateSnapshot(ctx context.Context, req *pubsubpb.UpdateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) {
+	ctx = insertMetadata(ctx, c.Metadata)
+	opts = append(c.CallOptions.UpdateSnapshot[0:len(c.CallOptions.UpdateSnapshot):len(c.CallOptions.UpdateSnapshot)], opts...)
+	var resp *pubsubpb.Snapshot
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.subscriberClient.UpdateSnapshot(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// DeleteSnapshot removes an existing snapshot. All messages retained in the snapshot
+// are immediately dropped. After a snapshot is deleted, a new one may be
+// created with the same name, but the new one has no association with the old
+// snapshot or its subscription, unless the same subscription is specified.
+func (c *SubscriberClient) DeleteSnapshot(ctx context.Context, req *pubsubpb.DeleteSnapshotRequest, opts ...gax.CallOption) error {
+	ctx = insertMetadata(ctx, c.Metadata)
+	opts = append(c.CallOptions.DeleteSnapshot[0:len(c.CallOptions.DeleteSnapshot):len(c.CallOptions.DeleteSnapshot)], opts...)
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		_, err = c.subscriberClient.DeleteSnapshot(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	return err
+}
+
+// Seek seeks an existing subscription to a point in time or to a given snapshot,
+// whichever is provided in the request.
+func (c *SubscriberClient) Seek(ctx context.Context, req *pubsubpb.SeekRequest, opts ...gax.CallOption) (*pubsubpb.SeekResponse, error) {
+	ctx = insertMetadata(ctx, c.Metadata)
+	opts = append(c.CallOptions.Seek[0:len(c.CallOptions.Seek):len(c.CallOptions.Seek)], opts...)
+	var resp *pubsubpb.SeekResponse
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.subscriberClient.Seek(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// SnapshotIterator manages a stream of *pubsubpb.Snapshot.
+type SnapshotIterator struct {
+	items    []*pubsubpb.Snapshot
+	pageInfo *iterator.PageInfo
+	nextFunc func() error
+
+	// InternalFetch is for use by the Google Cloud Libraries only.
+	// It is not part of the stable interface of this package.
+	//
+	// InternalFetch returns results from a single call to the underlying RPC.
+	// The number of results is no greater than pageSize.
+	// If there are no more results, nextPageToken is empty and err is nil.
+	InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Snapshot, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *SnapshotIterator) PageInfo() *iterator.PageInfo {
+	return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *SnapshotIterator) Next() (*pubsubpb.Snapshot, error) {
+	var item *pubsubpb.Snapshot
+	if err := it.nextFunc(); err != nil {
+		return item, err
+	}
+	item = it.items[0]
+	it.items = it.items[1:]
+	return item, nil
+}
+
+func (it *SnapshotIterator) bufLen() int {
+	return len(it.items)
+}
+
+func (it *SnapshotIterator) takeBuf() interface{} {
+	b := it.items
+	it.items = nil
+	return b
+}
+
+// SubscriptionIterator manages a stream of *pubsubpb.Subscription.
+type SubscriptionIterator struct {
+	items    []*pubsubpb.Subscription
+	pageInfo *iterator.PageInfo
+	nextFunc func() error
+
+	// InternalFetch is for use by the Google Cloud Libraries only.
+	// It is not part of the stable interface of this package.
+	//
+	// InternalFetch returns results from a single call to the underlying RPC.
+	// The number of results is no greater than pageSize.
+	// If there are no more results, nextPageToken is empty and err is nil.
+	InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Subscription, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *SubscriptionIterator) PageInfo() *iterator.PageInfo {
+	return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *SubscriptionIterator) Next() (*pubsubpb.Subscription, error) {
+	var item *pubsubpb.Subscription
+	if err := it.nextFunc(); err != nil {
+		return item, err
+	}
+	item = it.items[0]
+	it.items = it.items[1:]
+	return item, nil
+}
+
+func (it *SubscriptionIterator) bufLen() int {
+	return len(it.items)
+}
+
+func (it *SubscriptionIterator) takeBuf() interface{} {
+	b := it.items
+	it.items = nil
+	return b
+}
diff --git a/vendor/cloud.google.com/go/pubsub/doc.go b/vendor/cloud.google.com/go/pubsub/doc.go
new file mode 100644
index 0000000..3349f07
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/doc.go
@@ -0,0 +1,120 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package pubsub provides an easy way to publish and receive Google Cloud Pub/Sub
+messages, hiding the the details of the underlying server RPCs.  Google Cloud
+Pub/Sub is a many-to-many, asynchronous messaging system that decouples senders
+and receivers.
+
+Note: This package is in beta. Some backwards-incompatible changes may occur.
+
+More information about Google Cloud Pub/Sub is available at
+https://cloud.google.com/pubsub/docs
+
+Publishing
+
+Google Cloud Pub/Sub messages are published to topics. Topics may be created
+using the pubsub package like so:
+
+ topic, err := pubsubClient.CreateTopic(context.Background(), "topic-name")
+
+Messages may then be published to a topic:
+
+ res := topic.Publish(ctx, &pubsub.Message{Data: []byte("payload")})
+
+Publish queues the message for publishing and returns immediately. When enough
+messages have accumulated, or enough time has elapsed, the batch of messages is
+sent to the Pub/Sub service.
+
+Publish returns a PublishResult, which behaves like a future: its Get method
+blocks until the message has been sent to the service.
+
+The first time you call Publish on a topic, goroutines are started in the
+background. To clean up these goroutines, call Stop:
+
+  topic.Stop()
+
+Receiving
+
+To receive messages published to a topic, clients create subscriptions
+to the topic. There may be more than one subscription per topic; each message
+that is published to the topic will be delivered to all of its subscriptions.
+
+Subsciptions may be created like so:
+
+ sub, err := pubsubClient.CreateSubscription(context.Background(), "sub-name",
+	pubsub.SubscriptionConfig{Topic: topic})
+
+Messages are then consumed from a subscription via callback.
+
+ err := sub.Receive(context.Background(), func(ctx context.Context, m *Message) {
+ 	log.Printf("Got message: %s", m.Data)
+ 	m.Ack()
+ })
+ if err != nil {
+	// Handle error.
+ }
+
+The callback is invoked concurrently by multiple goroutines, maximizing
+throughput. To terminate a call to Receive, cancel its context.
+
+Once client code has processed the message, it must call Message.Ack, otherwise
+the message will eventually be redelivered. As an optimization, if the client
+cannot or doesn't want to process the message, it can call Message.Nack to
+speed redelivery. For more information and configuration options, see
+"Deadlines" below.
+
+Note: It is possible for Messages to be redelivered, even if Message.Ack has
+been called. Client code must be robust to multiple deliveries of messages.
+
+Deadlines
+
+The default pubsub deadlines are suitable for most use cases, but may be
+overridden.  This section describes the tradeoffs that should be considered
+when overriding the defaults.
+
+Behind the scenes, each message returned by the Pub/Sub server has an
+associated lease, known as an "ACK deadline".
+Unless a message is acknowledged within the ACK deadline, or the client requests that
+the ACK deadline be extended, the message will become elegible for redelivery.
+As a convenience, the pubsub package will automatically extend deadlines until
+either:
+ * Message.Ack or Message.Nack is called, or
+ * the "MaxExtension" period elapses from the time the message is fetched from the server.
+
+The initial ACK deadline given to each messages defaults to 10 seconds, but may
+be overridden during subscription creation.  Selecting an ACK deadline is a
+tradeoff between message redelivery latency and RPC volume. If the pubsub
+package fails to acknowledge or extend a message (e.g. due to unexpected
+termination of the process), a shorter ACK deadline will generally result in
+faster message redelivery by the Pub/Sub system. However, a short ACK deadline
+may also increase the number of deadline extension RPCs that the pubsub package
+sends to the server.
+
+The default max extension period is DefaultReceiveSettings.MaxExtension, and can
+be overridden by setting Subscription.ReceiveSettings.MaxExtension. Selecting a
+max extension period is a tradeoff between the speed at which client code must
+process messages, and the redelivery delay if messages fail to be acknowledged
+(e.g. because client code neglects to do so). Using a large MaxExtension
+increases the available time for client code to process messages. However, if
+the client code neglects to call Message.Ack/Nack, a large MaxExtension will
+increase the delay before the message is redelivered.
+
+Authentication
+
+See examples of authorization and authentication at
+https://godoc.org/cloud.google.com/go#pkg-examples.
+*/
+package pubsub
diff --git a/vendor/cloud.google.com/go/pubsub/flow_controller.go b/vendor/cloud.google.com/go/pubsub/flow_controller.go
new file mode 100644
index 0000000..0fd7bd6
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/flow_controller.go
@@ -0,0 +1,106 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pubsub
+
+import (
+	"golang.org/x/net/context"
+	"golang.org/x/sync/semaphore"
+)
+
+// flowController implements flow control for Subscription.Receive.
+type flowController struct {
+	maxSize           int                 // max total size of messages
+	semCount, semSize *semaphore.Weighted // enforces max number and size of messages
+}
+
+// newFlowController creates a new flowController that ensures no more than
+// maxCount messages or maxSize bytes are outstanding at once. If maxCount or
+// maxSize is < 1, then an unlimited number of messages or bytes is permitted,
+// respectively.
+func newFlowController(maxCount, maxSize int) *flowController {
+	fc := &flowController{
+		maxSize:  maxSize,
+		semCount: nil,
+		semSize:  nil,
+	}
+	if maxCount > 0 {
+		fc.semCount = semaphore.NewWeighted(int64(maxCount))
+	}
+	if maxSize > 0 {
+		fc.semSize = semaphore.NewWeighted(int64(maxSize))
+	}
+	return fc
+}
+
+// acquire blocks until one message of size bytes can proceed or ctx is done.
+// It returns nil in the first case, or ctx.Err() in the second.
+//
+// acquire allows large messages to proceed by treating a size greater than maxSize
+// as if it were equal to maxSize.
+func (f *flowController) acquire(ctx context.Context, size int) error {
+	if f.semCount != nil {
+		if err := f.semCount.Acquire(ctx, 1); err != nil {
+			return err
+		}
+	}
+	if f.semSize != nil {
+		if err := f.semSize.Acquire(ctx, f.bound(size)); err != nil {
+			if f.semCount != nil {
+				f.semCount.Release(1)
+			}
+			return err
+		}
+	}
+	return nil
+}
+
+// tryAcquire returns false if acquire would block. Otherwise, it behaves like
+// acquire and returns true.
+//
+// tryAcquire allows large messages to proceed by treating a size greater than
+// maxSize as if it were equal to maxSize.
+func (f *flowController) tryAcquire(size int) bool {
+	if f.semCount != nil {
+		if !f.semCount.TryAcquire(1) {
+			return false
+		}
+	}
+	if f.semSize != nil {
+		if !f.semSize.TryAcquire(f.bound(size)) {
+			if f.semCount != nil {
+				f.semCount.Release(1)
+			}
+			return false
+		}
+	}
+	return true
+}
+
+// release notes that one message of size bytes is no longer outstanding.
+func (f *flowController) release(size int) {
+	if f.semCount != nil {
+		f.semCount.Release(1)
+	}
+	if f.semSize != nil {
+		f.semSize.Release(f.bound(size))
+	}
+}
+
+func (f *flowController) bound(size int) int64 {
+	if size > f.maxSize {
+		return int64(f.maxSize)
+	}
+	return int64(size)
+}
diff --git a/vendor/cloud.google.com/go/pubsub/iterator.go b/vendor/cloud.google.com/go/pubsub/iterator.go
new file mode 100644
index 0000000..12d1f07
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/iterator.go
@@ -0,0 +1,271 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pubsub
+
+import (
+	"sync"
+	"time"
+
+	"golang.org/x/net/context"
+	pb "google.golang.org/genproto/googleapis/pubsub/v1"
+)
+
+// newMessageIterator starts a new streamingMessageIterator.  Stop must be called on the messageIterator
+// when it is no longer needed.
+// subName is the full name of the subscription to pull messages from.
+// ctx is the context to use for acking messages and extending message deadlines.
+func newMessageIterator(ctx context.Context, s service, subName string, po *pullOptions) *streamingMessageIterator {
+	sp := s.newStreamingPuller(ctx, subName, int32(po.ackDeadline.Seconds()))
+	_ = sp.open() // error stored in sp
+	return newStreamingMessageIterator(ctx, sp, po)
+}
+
+type streamingMessageIterator struct {
+	ctx        context.Context
+	po         *pullOptions
+	sp         *streamingPuller
+	kaTicker   *time.Ticker  // keep-alive (deadline extensions)
+	ackTicker  *time.Ticker  // message acks
+	nackTicker *time.Ticker  // message nacks (more frequent than acks)
+	failed     chan struct{} // closed on stream error
+	stopped    chan struct{} // closed when Stop is called
+	drained    chan struct{} // closed when stopped && no more pending messages
+	wg         sync.WaitGroup
+
+	mu                 sync.Mutex
+	keepAliveDeadlines map[string]time.Time
+	pendingReq         *pb.StreamingPullRequest
+	err                error // error from stream failure
+}
+
+func newStreamingMessageIterator(ctx context.Context, sp *streamingPuller, po *pullOptions) *streamingMessageIterator {
+	// TODO: make kaTicker frequency more configurable. (ackDeadline - 5s) is a
+	// reasonable default for now, because the minimum ack period is 10s. This
+	// gives us 5s grace.
+	keepAlivePeriod := po.ackDeadline - 5*time.Second
+	kaTicker := time.NewTicker(keepAlivePeriod)
+
+	// Ack promptly so users don't lose work if client crashes.
+	ackTicker := time.NewTicker(100 * time.Millisecond)
+	nackTicker := time.NewTicker(100 * time.Millisecond)
+	it := &streamingMessageIterator{
+		ctx:                ctx,
+		sp:                 sp,
+		po:                 po,
+		kaTicker:           kaTicker,
+		ackTicker:          ackTicker,
+		nackTicker:         nackTicker,
+		failed:             make(chan struct{}),
+		stopped:            make(chan struct{}),
+		drained:            make(chan struct{}),
+		keepAliveDeadlines: map[string]time.Time{},
+		pendingReq:         &pb.StreamingPullRequest{},
+	}
+	it.wg.Add(1)
+	go it.sender()
+	return it
+}
+
+// Subscription.receive will call stop on its messageIterator when finished with it.
+// Stop will block until Done has been called on all Messages that have been
+// returned by Next, or until the context with which the messageIterator was created
+// is cancelled or exceeds its deadline.
+func (it *streamingMessageIterator) stop() {
+	it.mu.Lock()
+	select {
+	case <-it.stopped:
+	default:
+		close(it.stopped)
+	}
+	it.checkDrained()
+	it.mu.Unlock()
+	it.wg.Wait()
+}
+
+// checkDrained closes the drained channel if the iterator has been stopped and all
+// pending messages have either been n/acked or expired.
+//
+// Called with the lock held.
+func (it *streamingMessageIterator) checkDrained() {
+	select {
+	case <-it.drained:
+		return
+	default:
+	}
+	select {
+	case <-it.stopped:
+		if len(it.keepAliveDeadlines) == 0 {
+			close(it.drained)
+		}
+	default:
+	}
+}
+
+// Called when a message is acked/nacked.
+func (it *streamingMessageIterator) done(ackID string, ack bool) {
+	it.mu.Lock()
+	defer it.mu.Unlock()
+	delete(it.keepAliveDeadlines, ackID)
+	if ack {
+		it.pendingReq.AckIds = append(it.pendingReq.AckIds, ackID)
+	} else {
+		it.addDeadlineMod(ackID, 0) // Nack indicated by modifying the deadline to zero.
+	}
+	it.checkDrained()
+}
+
+// addDeadlineMod adds the ack ID to the pending request with the given deadline.
+//
+// Called with the lock held.
+func (it *streamingMessageIterator) addDeadlineMod(ackID string, deadlineSecs int32) {
+	pr := it.pendingReq
+	pr.ModifyDeadlineAckIds = append(pr.ModifyDeadlineAckIds, ackID)
+	pr.ModifyDeadlineSeconds = append(pr.ModifyDeadlineSeconds, deadlineSecs)
+}
+
+// fail is called when a stream method returns a permanent error.
+func (it *streamingMessageIterator) fail(err error) {
+	it.mu.Lock()
+	if it.err == nil {
+		it.err = err
+		close(it.failed)
+	}
+	it.mu.Unlock()
+}
+
+// receive makes a call to the stream's Recv method and returns
+// its messages.
+func (it *streamingMessageIterator) receive() ([]*Message, error) {
+	// Stop retrieving messages if the context is done, the stream
+	// failed, or the iterator's Stop method was called.
+	select {
+	case <-it.ctx.Done():
+		return nil, it.ctx.Err()
+	default:
+	}
+	it.mu.Lock()
+	err := it.err
+	it.mu.Unlock()
+	if err != nil {
+		return nil, err
+	}
+	// Receive messages from stream. This may block indefinitely.
+	msgs, err := it.sp.fetchMessages()
+	// The streamingPuller handles retries, so any error here
+	// is fatal.
+	if err != nil {
+		it.fail(err)
+		return nil, err
+	}
+	// We received some messages. Remember them so we can
+	// keep them alive.
+	deadline := time.Now().Add(it.po.maxExtension)
+	it.mu.Lock()
+	for _, m := range msgs {
+		m.doneFunc = it.done
+		it.keepAliveDeadlines[m.ackID] = deadline
+	}
+	it.mu.Unlock()
+	return msgs, nil
+}
+
+// sender runs in a goroutine and handles all sends to the stream.
+func (it *streamingMessageIterator) sender() {
+	defer it.wg.Done()
+	defer it.kaTicker.Stop()
+	defer it.ackTicker.Stop()
+	defer it.nackTicker.Stop()
+	defer it.sp.closeSend()
+
+	done := false
+	for !done {
+		send := false
+		select {
+		case <-it.ctx.Done():
+			// Context canceled or timed out: stop immediately, without
+			// another RPC.
+			return
+
+		case <-it.failed:
+			// Stream failed: nothing to do, so stop immediately.
+			return
+
+		case <-it.drained:
+			// All outstanding messages have been marked done:
+			// nothing left to do except send the final request.
+			it.mu.Lock()
+			send = (len(it.pendingReq.AckIds) > 0 || len(it.pendingReq.ModifyDeadlineAckIds) > 0)
+			done = true
+
+		case <-it.kaTicker.C:
+			it.mu.Lock()
+			send = it.handleKeepAlives()
+
+		case <-it.nackTicker.C:
+			it.mu.Lock()
+			send = (len(it.pendingReq.ModifyDeadlineAckIds) > 0)
+
+		case <-it.ackTicker.C:
+			it.mu.Lock()
+			send = (len(it.pendingReq.AckIds) > 0)
+
+		}
+		// Lock is held here.
+		if send {
+			req := it.pendingReq
+			it.pendingReq = &pb.StreamingPullRequest{}
+			it.mu.Unlock()
+			err := it.sp.send(req)
+			if err != nil {
+				// The streamingPuller handles retries, so any error here
+				// is fatal to the iterator.
+				it.fail(err)
+				return
+			}
+		} else {
+			it.mu.Unlock()
+		}
+	}
+}
+
+// handleKeepAlives modifies the pending request to include deadline extensions
+// for live messages. It also purges expired messages. It reports whether
+// there were any live messages.
+//
+// Called with the lock held.
+func (it *streamingMessageIterator) handleKeepAlives() bool {
+	live, expired := getKeepAliveAckIDs(it.keepAliveDeadlines)
+	for _, e := range expired {
+		delete(it.keepAliveDeadlines, e)
+	}
+	dl := trunc32(int64(it.po.ackDeadline.Seconds()))
+	for _, m := range live {
+		it.addDeadlineMod(m, dl)
+	}
+	it.checkDrained()
+	return len(live) > 0
+}
+
+func getKeepAliveAckIDs(items map[string]time.Time) (live, expired []string) {
+	now := time.Now()
+	for id, expiry := range items {
+		if expiry.Before(now) {
+			expired = append(expired, id)
+		} else {
+			live = append(live, id)
+		}
+	}
+	return live, expired
+}
diff --git a/vendor/cloud.google.com/go/pubsub/message.go b/vendor/cloud.google.com/go/pubsub/message.go
new file mode 100644
index 0000000..f6bb5e0
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/message.go
@@ -0,0 +1,97 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pubsub
+
+import (
+	"time"
+
+	"github.com/golang/protobuf/ptypes"
+	pb "google.golang.org/genproto/googleapis/pubsub/v1"
+)
+
+// Message represents a Pub/Sub message.
+type Message struct {
+	// ID identifies this message.
+	// This ID is assigned by the server and is populated for Messages obtained from a subscription.
+	// This field is read-only.
+	ID string
+
+	// Data is the actual data in the message.
+	Data []byte
+
+	// Attributes represents the key-value pairs the current message
+	// is labelled with.
+	Attributes map[string]string
+
+	// ackID is the identifier to acknowledge this message.
+	ackID string
+
+	// The time at which the message was published.
+	// This is populated by the server for Messages obtained from a subscription.
+	// This field is read-only.
+	PublishTime time.Time
+
+	// size is the approximate size of the message's data and attributes.
+	size int
+
+	calledDone bool
+
+	// The done method of the iterator that created this Message.
+	doneFunc func(string, bool)
+}
+
+func toMessage(resp *pb.ReceivedMessage) (*Message, error) {
+	if resp.Message == nil {
+		return &Message{ackID: resp.AckId}, nil
+	}
+
+	pubTime, err := ptypes.Timestamp(resp.Message.PublishTime)
+	if err != nil {
+		return nil, err
+	}
+	return &Message{
+		ackID:       resp.AckId,
+		Data:        resp.Message.Data,
+		Attributes:  resp.Message.Attributes,
+		ID:          resp.Message.MessageId,
+		PublishTime: pubTime,
+	}, nil
+}
+
+// Ack indicates successful processing of a Message passed to the Subscriber.Receive callback.
+// It should not be called on any other Message value.
+// If message acknowledgement fails, the Message will be redelivered.
+// Client code must call Ack or Nack when finished for each received Message.
+// Calls to Ack or Nack have no effect after the first call.
+func (m *Message) Ack() {
+	m.done(true)
+}
+
+// Nack indicates that the client will not or cannot process a Message passed to the Subscriber.Receive callback.
+// It should not be called on any other Message value.
+// Nack will result in the Message being redelivered more quickly than if it were allowed to expire.
+// Client code must call Ack or Nack when finished for each received Message.
+// Calls to Ack or Nack have no effect after the first call.
+func (m *Message) Nack() {
+	m.done(false)
+}
+
+func (m *Message) done(ack bool) {
+	if m.calledDone {
+		return
+	}
+	m.calledDone = true
+	m.doneFunc(m.ackID, ack)
+}
diff --git a/vendor/cloud.google.com/go/pubsub/pubsub.go b/vendor/cloud.google.com/go/pubsub/pubsub.go
new file mode 100644
index 0000000..6f88b25
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/pubsub.go
@@ -0,0 +1,149 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pubsub
+
+import (
+	"fmt"
+	"os"
+	"runtime"
+	"time"
+
+	"google.golang.org/api/iterator"
+	"google.golang.org/api/option"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/keepalive"
+
+	"golang.org/x/net/context"
+)
+
+const (
+	// ScopePubSub grants permissions to view and manage Pub/Sub
+	// topics and subscriptions.
+	ScopePubSub = "https://www.googleapis.com/auth/pubsub"
+
+	// ScopeCloudPlatform grants permissions to view and manage your data
+	// across Google Cloud Platform services.
+	ScopeCloudPlatform = "https://www.googleapis.com/auth/cloud-platform"
+)
+
+const prodAddr = "https://pubsub.googleapis.com/"
+
+// Client is a Google Pub/Sub client scoped to a single project.
+//
+// Clients should be reused rather than being created as needed.
+// A Client may be shared by multiple goroutines.
+type Client struct {
+	projectID string
+	s         service
+}
+
+// NewClient creates a new PubSub client.
+func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) {
+	var o []option.ClientOption
+	// Environment variables for gcloud emulator:
+	// https://cloud.google.com/sdk/gcloud/reference/beta/emulators/pubsub/
+	if addr := os.Getenv("PUBSUB_EMULATOR_HOST"); addr != "" {
+		conn, err := grpc.Dial(addr, grpc.WithInsecure())
+		if err != nil {
+			return nil, fmt.Errorf("grpc.Dial: %v", err)
+		}
+		o = []option.ClientOption{option.WithGRPCConn(conn)}
+	} else {
+		o = []option.ClientOption{
+			// Create multiple connections to increase throughput.
+			option.WithGRPCConnectionPool(runtime.GOMAXPROCS(0)),
+
+			// TODO(grpc/grpc-go#1388) using connection pool without WithBlock
+			// can cause RPCs to fail randomly. We can delete this after the issue is fixed.
+			option.WithGRPCDialOption(grpc.WithBlock()),
+
+			option.WithGRPCDialOption(grpc.WithKeepaliveParams(keepalive.ClientParameters{
+				Time: 5 * time.Minute,
+			})),
+		}
+	}
+	o = append(o, opts...)
+	s, err := newPubSubService(ctx, o)
+	if err != nil {
+		return nil, fmt.Errorf("constructing pubsub client: %v", err)
+	}
+
+	c := &Client{
+		projectID: projectID,
+		s:         s,
+	}
+
+	return c, nil
+}
+
+// Close closes any resources held by the client.
+//
+// Close need not be called at program exit.
+func (c *Client) Close() error {
+	return c.s.close()
+}
+
+func (c *Client) fullyQualifiedProjectName() string {
+	return fmt.Sprintf("projects/%s", c.projectID)
+}
+
+// pageToken stores the next page token for a server response which is split over multiple pages.
+type pageToken struct {
+	tok      string
+	explicit bool
+}
+
+func (pt *pageToken) set(tok string) {
+	pt.tok = tok
+	pt.explicit = true
+}
+
+func (pt *pageToken) get() string {
+	return pt.tok
+}
+
+// more returns whether further pages should be fetched from the server.
+func (pt *pageToken) more() bool {
+	return pt.tok != "" || !pt.explicit
+}
+
+// stringsIterator provides an iterator API for a sequence of API page fetches that return lists of strings.
+type stringsIterator struct {
+	ctx     context.Context
+	strings []string
+	token   pageToken
+	fetch   func(ctx context.Context, tok string) (*stringsPage, error)
+}
+
+// Next returns the next string. If there are no more strings, iterator.Done will be returned.
+func (si *stringsIterator) Next() (string, error) {
+	for len(si.strings) == 0 && si.token.more() {
+		page, err := si.fetch(si.ctx, si.token.get())
+		if err != nil {
+			return "", err
+		}
+		si.token.set(page.tok)
+		si.strings = page.strings
+	}
+
+	if len(si.strings) == 0 {
+		return "", iterator.Done
+	}
+
+	s := si.strings[0]
+	si.strings = si.strings[1:]
+
+	return s, nil
+}
diff --git a/vendor/cloud.google.com/go/pubsub/service.go b/vendor/cloud.google.com/go/pubsub/service.go
new file mode 100644
index 0000000..4d57d1b
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/service.go
@@ -0,0 +1,598 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pubsub
+
+import (
+	"fmt"
+	"math"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/golang/protobuf/ptypes"
+
+	"cloud.google.com/go/iam"
+	"cloud.google.com/go/internal/version"
+	vkit "cloud.google.com/go/pubsub/apiv1"
+	durpb "github.com/golang/protobuf/ptypes/duration"
+	gax "github.com/googleapis/gax-go"
+	"golang.org/x/net/context"
+	"google.golang.org/api/option"
+	pb "google.golang.org/genproto/googleapis/pubsub/v1"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+type nextStringFunc func() (string, error)
+type nextSnapshotFunc func() (*snapshotConfig, error)
+
+// service provides an internal abstraction to isolate the generated
+// PubSub API; most of this package uses this interface instead.
+// The single implementation, *apiService, contains all the knowledge
+// of the generated PubSub API (except for that present in legacy code).
+type service interface {
+	createSubscription(ctx context.Context, subName string, cfg SubscriptionConfig) error
+	getSubscriptionConfig(ctx context.Context, subName string) (SubscriptionConfig, string, error)
+	listProjectSubscriptions(ctx context.Context, projName string) nextStringFunc
+	deleteSubscription(ctx context.Context, name string) error
+	subscriptionExists(ctx context.Context, name string) (bool, error)
+	modifyPushConfig(ctx context.Context, subName string, conf PushConfig) error
+
+	createTopic(ctx context.Context, name string) error
+	deleteTopic(ctx context.Context, name string) error
+	topicExists(ctx context.Context, name string) (bool, error)
+	listProjectTopics(ctx context.Context, projName string) nextStringFunc
+	listTopicSubscriptions(ctx context.Context, topicName string) nextStringFunc
+
+	modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error
+	fetchMessages(ctx context.Context, subName string, maxMessages int32) ([]*Message, error)
+	publishMessages(ctx context.Context, topicName string, msgs []*Message) ([]string, error)
+
+	// splitAckIDs divides ackIDs into
+	//  * a batch of a size which is suitable for passing to acknowledge or
+	//    modifyAckDeadline, and
+	//  * the rest.
+	splitAckIDs(ackIDs []string) ([]string, []string)
+
+	// acknowledge ACKs the IDs in ackIDs.
+	acknowledge(ctx context.Context, subName string, ackIDs []string) error
+
+	iamHandle(resourceName string) *iam.Handle
+
+	newStreamingPuller(ctx context.Context, subName string, ackDeadline int32) *streamingPuller
+
+	createSnapshot(ctx context.Context, snapName, subName string) (*snapshotConfig, error)
+	deleteSnapshot(ctx context.Context, snapName string) error
+	listProjectSnapshots(ctx context.Context, projName string) nextSnapshotFunc
+
+	// TODO(pongad): Raw proto returns an empty SeekResponse; figure out if we want to return it before GA.
+	seekToTime(ctx context.Context, subName string, t time.Time) error
+	seekToSnapshot(ctx context.Context, subName, snapName string) error
+
+	close() error
+}
+
+type apiService struct {
+	pubc *vkit.PublisherClient
+	subc *vkit.SubscriberClient
+}
+
+func newPubSubService(ctx context.Context, opts []option.ClientOption) (*apiService, error) {
+	pubc, err := vkit.NewPublisherClient(ctx, opts...)
+	if err != nil {
+		return nil, err
+	}
+	subc, err := vkit.NewSubscriberClient(ctx, option.WithGRPCConn(pubc.Connection()))
+	if err != nil {
+		_ = pubc.Close() // ignore error
+		return nil, err
+	}
+	pubc.SetGoogleClientInfo("gccl", version.Repo)
+	subc.SetGoogleClientInfo("gccl", version.Repo)
+	return &apiService{pubc: pubc, subc: subc}, nil
+}
+
+func (s *apiService) close() error {
+	// Return the first error, because the first call closes the connection.
+	err := s.pubc.Close()
+	_ = s.subc.Close()
+	return err
+}
+
+func (s *apiService) createSubscription(ctx context.Context, subName string, cfg SubscriptionConfig) error {
+	var rawPushConfig *pb.PushConfig
+	if cfg.PushConfig.Endpoint != "" || len(cfg.PushConfig.Attributes) != 0 {
+		rawPushConfig = &pb.PushConfig{
+			Attributes:   cfg.PushConfig.Attributes,
+			PushEndpoint: cfg.PushConfig.Endpoint,
+		}
+	}
+	var retentionDuration *durpb.Duration
+	if cfg.retentionDuration != 0 {
+		retentionDuration = ptypes.DurationProto(cfg.retentionDuration)
+	}
+
+	_, err := s.subc.CreateSubscription(ctx, &pb.Subscription{
+		Name:                     subName,
+		Topic:                    cfg.Topic.name,
+		PushConfig:               rawPushConfig,
+		AckDeadlineSeconds:       trunc32(int64(cfg.AckDeadline.Seconds())),
+		RetainAckedMessages:      cfg.retainAckedMessages,
+		MessageRetentionDuration: retentionDuration,
+	})
+	return err
+}
+
+func (s *apiService) getSubscriptionConfig(ctx context.Context, subName string) (SubscriptionConfig, string, error) {
+	rawSub, err := s.subc.GetSubscription(ctx, &pb.GetSubscriptionRequest{Subscription: subName})
+	if err != nil {
+		return SubscriptionConfig{}, "", err
+	}
+	var rd time.Duration
+	// TODO(pongad): Remove nil-check after white list is removed.
+	if rawSub.MessageRetentionDuration != nil {
+		if rd, err = ptypes.Duration(rawSub.MessageRetentionDuration); err != nil {
+			return SubscriptionConfig{}, "", err
+		}
+	}
+	sub := SubscriptionConfig{
+		AckDeadline: time.Second * time.Duration(rawSub.AckDeadlineSeconds),
+		PushConfig: PushConfig{
+			Endpoint:   rawSub.PushConfig.PushEndpoint,
+			Attributes: rawSub.PushConfig.Attributes,
+		},
+		retainAckedMessages: rawSub.RetainAckedMessages,
+		retentionDuration:   rd,
+	}
+	return sub, rawSub.Topic, nil
+}
+
+// stringsPage contains a list of strings and a token for fetching the next page.
+type stringsPage struct {
+	strings []string
+	tok     string
+}
+
+func (s *apiService) listProjectSubscriptions(ctx context.Context, projName string) nextStringFunc {
+	it := s.subc.ListSubscriptions(ctx, &pb.ListSubscriptionsRequest{
+		Project: projName,
+	})
+	return func() (string, error) {
+		sub, err := it.Next()
+		if err != nil {
+			return "", err
+		}
+		return sub.Name, nil
+	}
+}
+
+func (s *apiService) deleteSubscription(ctx context.Context, name string) error {
+	return s.subc.DeleteSubscription(ctx, &pb.DeleteSubscriptionRequest{Subscription: name})
+}
+
+func (s *apiService) subscriptionExists(ctx context.Context, name string) (bool, error) {
+	_, err := s.subc.GetSubscription(ctx, &pb.GetSubscriptionRequest{Subscription: name})
+	if err == nil {
+		return true, nil
+	}
+	if grpc.Code(err) == codes.NotFound {
+		return false, nil
+	}
+	return false, err
+}
+
+func (s *apiService) createTopic(ctx context.Context, name string) error {
+	_, err := s.pubc.CreateTopic(ctx, &pb.Topic{Name: name})
+	return err
+}
+
+func (s *apiService) listProjectTopics(ctx context.Context, projName string) nextStringFunc {
+	it := s.pubc.ListTopics(ctx, &pb.ListTopicsRequest{
+		Project: projName,
+	})
+	return func() (string, error) {
+		topic, err := it.Next()
+		if err != nil {
+			return "", err
+		}
+		return topic.Name, nil
+	}
+}
+
+func (s *apiService) deleteTopic(ctx context.Context, name string) error {
+	return s.pubc.DeleteTopic(ctx, &pb.DeleteTopicRequest{Topic: name})
+}
+
+func (s *apiService) topicExists(ctx context.Context, name string) (bool, error) {
+	_, err := s.pubc.GetTopic(ctx, &pb.GetTopicRequest{Topic: name})
+	if err == nil {
+		return true, nil
+	}
+	if grpc.Code(err) == codes.NotFound {
+		return false, nil
+	}
+	return false, err
+}
+
+func (s *apiService) listTopicSubscriptions(ctx context.Context, topicName string) nextStringFunc {
+	it := s.pubc.ListTopicSubscriptions(ctx, &pb.ListTopicSubscriptionsRequest{
+		Topic: topicName,
+	})
+	return it.Next
+}
+
+func (s *apiService) modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error {
+	return s.subc.ModifyAckDeadline(ctx, &pb.ModifyAckDeadlineRequest{
+		Subscription:       subName,
+		AckIds:             ackIDs,
+		AckDeadlineSeconds: trunc32(int64(deadline.Seconds())),
+	})
+}
+
+// maxPayload is the maximum number of bytes to devote to actual ids in
+// acknowledgement or modifyAckDeadline requests. A serialized
+// AcknowledgeRequest proto has a small constant overhead, plus the size of the
+// subscription name, plus 3 bytes per ID (a tag byte and two size bytes). A
+// ModifyAckDeadlineRequest has an additional few bytes for the deadline. We
+// don't know the subscription name here, so we just assume the size exclusive
+// of ids is 100 bytes.
+//
+// With gRPC there is no way for the client to know the server's max message size (it is
+// configurable on the server). We know from experience that it
+// it 512K.
+const (
+	maxPayload       = 512 * 1024
+	reqFixedOverhead = 100
+	overheadPerID    = 3
+	maxSendRecvBytes = 20 * 1024 * 1024 // 20M
+)
+
+// splitAckIDs splits ids into two slices, the first of which contains at most maxPayload bytes of ackID data.
+func (s *apiService) splitAckIDs(ids []string) ([]string, []string) {
+	total := reqFixedOverhead
+	for i, id := range ids {
+		total += len(id) + overheadPerID
+		if total > maxPayload {
+			return ids[:i], ids[i:]
+		}
+	}
+	return ids, nil
+}
+
+func (s *apiService) acknowledge(ctx context.Context, subName string, ackIDs []string) error {
+	return s.subc.Acknowledge(ctx, &pb.AcknowledgeRequest{
+		Subscription: subName,
+		AckIds:       ackIDs,
+	})
+}
+
+func (s *apiService) fetchMessages(ctx context.Context, subName string, maxMessages int32) ([]*Message, error) {
+	resp, err := s.subc.Pull(ctx, &pb.PullRequest{
+		Subscription: subName,
+		MaxMessages:  maxMessages,
+	}, gax.WithGRPCOptions(grpc.MaxCallRecvMsgSize(maxSendRecvBytes)))
+	if err != nil {
+		return nil, err
+	}
+	return convertMessages(resp.ReceivedMessages)
+}
+
+func convertMessages(rms []*pb.ReceivedMessage) ([]*Message, error) {
+	msgs := make([]*Message, 0, len(rms))
+	for i, m := range rms {
+		msg, err := toMessage(m)
+		if err != nil {
+			return nil, fmt.Errorf("pubsub: cannot decode the retrieved message at index: %d, message: %+v", i, m)
+		}
+		msgs = append(msgs, msg)
+	}
+	return msgs, nil
+}
+
+func (s *apiService) publishMessages(ctx context.Context, topicName string, msgs []*Message) ([]string, error) {
+	rawMsgs := make([]*pb.PubsubMessage, len(msgs))
+	for i, msg := range msgs {
+		rawMsgs[i] = &pb.PubsubMessage{
+			Data:       msg.Data,
+			Attributes: msg.Attributes,
+		}
+	}
+	resp, err := s.pubc.Publish(ctx, &pb.PublishRequest{
+		Topic:    topicName,
+		Messages: rawMsgs,
+	}, gax.WithGRPCOptions(grpc.MaxCallSendMsgSize(maxSendRecvBytes)))
+	if err != nil {
+		return nil, err
+	}
+	return resp.MessageIds, nil
+}
+
+func (s *apiService) modifyPushConfig(ctx context.Context, subName string, conf PushConfig) error {
+	return s.subc.ModifyPushConfig(ctx, &pb.ModifyPushConfigRequest{
+		Subscription: subName,
+		PushConfig: &pb.PushConfig{
+			Attributes:   conf.Attributes,
+			PushEndpoint: conf.Endpoint,
+		},
+	})
+}
+
+func (s *apiService) iamHandle(resourceName string) *iam.Handle {
+	return iam.InternalNewHandle(s.pubc.Connection(), resourceName)
+}
+
+func trunc32(i int64) int32 {
+	if i > math.MaxInt32 {
+		i = math.MaxInt32
+	}
+	return int32(i)
+}
+
+func (s *apiService) newStreamingPuller(ctx context.Context, subName string, ackDeadlineSecs int32) *streamingPuller {
+	p := &streamingPuller{
+		ctx:             ctx,
+		subName:         subName,
+		ackDeadlineSecs: ackDeadlineSecs,
+		subc:            s.subc,
+	}
+	p.c = sync.NewCond(&p.mu)
+	return p
+}
+
+type streamingPuller struct {
+	ctx             context.Context
+	subName         string
+	ackDeadlineSecs int32
+	subc            *vkit.SubscriberClient
+
+	mu       sync.Mutex
+	c        *sync.Cond
+	inFlight bool
+	closed   bool // set after CloseSend called
+	spc      pb.Subscriber_StreamingPullClient
+	err      error
+}
+
+// open establishes (or re-establishes) a stream for pulling messages.
+// It takes care that only one RPC is in flight at a time.
+func (p *streamingPuller) open() error {
+	p.c.L.Lock()
+	defer p.c.L.Unlock()
+	p.openLocked()
+	return p.err
+}
+
+func (p *streamingPuller) openLocked() {
+	if p.inFlight {
+		// Another goroutine is opening; wait for it.
+		for p.inFlight {
+			p.c.Wait()
+		}
+		return
+	}
+	// No opens in flight; start one.
+	// Keep the lock held, to avoid a race where we
+	// close the old stream while opening a new one.
+	p.inFlight = true
+	spc, err := p.subc.StreamingPull(p.ctx, gax.WithGRPCOptions(grpc.MaxCallRecvMsgSize(maxSendRecvBytes)))
+	if err == nil {
+		err = spc.Send(&pb.StreamingPullRequest{
+			Subscription:             p.subName,
+			StreamAckDeadlineSeconds: p.ackDeadlineSecs,
+		})
+	}
+	p.spc = spc
+	p.err = err
+	p.inFlight = false
+	p.c.Broadcast()
+}
+
+func (p *streamingPuller) call(f func(pb.Subscriber_StreamingPullClient) error) error {
+	p.c.L.Lock()
+	defer p.c.L.Unlock()
+	// Wait for an open in flight.
+	for p.inFlight {
+		p.c.Wait()
+	}
+	var err error
+	var bo gax.Backoff
+	for {
+		select {
+		case <-p.ctx.Done():
+			p.err = p.ctx.Err()
+		default:
+		}
+		if p.err != nil {
+			return p.err
+		}
+		spc := p.spc
+		// Do not call f with the lock held. Only one goroutine calls Send
+		// (streamingMessageIterator.sender) and only one calls Recv
+		// (streamingMessageIterator.receiver). If we locked, then a
+		// blocked Recv would prevent a Send from happening.
+		p.c.L.Unlock()
+		err = f(spc)
+		p.c.L.Lock()
+		if !p.closed && err != nil && isRetryable(err) {
+			// Sleep with exponential backoff. Normally we wouldn't hold the lock while sleeping,
+			// but here it can't do any harm, since the stream is broken anyway.
+			gax.Sleep(p.ctx, bo.Pause())
+			p.openLocked()
+			continue
+		}
+		// Not an error, or not a retryable error; stop retrying.
+		p.err = err
+		return err
+	}
+}
+
+// Logic from https://github.com/GoogleCloudPlatform/google-cloud-java/blob/master/google-cloud-pubsub/src/main/java/com/google/cloud/pubsub/v1/StatusUtil.java.
+func isRetryable(err error) bool {
+	s, ok := status.FromError(err)
+	if !ok { // includes io.EOF, normal stream close, which causes us to reopen
+		return true
+	}
+	switch s.Code() {
+	case codes.DeadlineExceeded, codes.Internal, codes.Canceled, codes.ResourceExhausted:
+		return true
+	case codes.Unavailable:
+		return !strings.Contains(s.Message(), "Server shutdownNow invoked")
+	default:
+		return false
+	}
+}
+
+func (p *streamingPuller) fetchMessages() ([]*Message, error) {
+	var res *pb.StreamingPullResponse
+	err := p.call(func(spc pb.Subscriber_StreamingPullClient) error {
+		var err error
+		res, err = spc.Recv()
+		return err
+	})
+	if err != nil {
+		return nil, err
+	}
+	return convertMessages(res.ReceivedMessages)
+}
+
+func (p *streamingPuller) send(req *pb.StreamingPullRequest) error {
+	// Note: len(modAckIDs) == len(modSecs)
+	var rest *pb.StreamingPullRequest
+	for len(req.AckIds) > 0 || len(req.ModifyDeadlineAckIds) > 0 {
+		req, rest = splitRequest(req, maxPayload)
+		err := p.call(func(spc pb.Subscriber_StreamingPullClient) error {
+			x := spc.Send(req)
+			return x
+		})
+		if err != nil {
+			return err
+		}
+		req = rest
+	}
+	return nil
+}
+
+func (p *streamingPuller) closeSend() {
+	p.mu.Lock()
+	p.closed = true
+	p.spc.CloseSend()
+	p.mu.Unlock()
+}
+
+// Split req into a prefix that is smaller than maxSize, and a remainder.
+func splitRequest(req *pb.StreamingPullRequest, maxSize int) (prefix, remainder *pb.StreamingPullRequest) {
+	const int32Bytes = 4
+
+	// Copy all fields before splitting the variable-sized ones.
+	remainder = &pb.StreamingPullRequest{}
+	*remainder = *req
+	// Split message so it isn't too big.
+	size := reqFixedOverhead
+	i := 0
+	for size < maxSize && (i < len(req.AckIds) || i < len(req.ModifyDeadlineAckIds)) {
+		if i < len(req.AckIds) {
+			size += overheadPerID + len(req.AckIds[i])
+		}
+		if i < len(req.ModifyDeadlineAckIds) {
+			size += overheadPerID + len(req.ModifyDeadlineAckIds[i]) + int32Bytes
+		}
+		i++
+	}
+
+	min := func(a, b int) int {
+		if a < b {
+			return a
+		}
+		return b
+	}
+
+	j := i
+	if size > maxSize {
+		j--
+	}
+	k := min(j, len(req.AckIds))
+	remainder.AckIds = req.AckIds[k:]
+	req.AckIds = req.AckIds[:k]
+	k = min(j, len(req.ModifyDeadlineAckIds))
+	remainder.ModifyDeadlineAckIds = req.ModifyDeadlineAckIds[k:]
+	remainder.ModifyDeadlineSeconds = req.ModifyDeadlineSeconds[k:]
+	req.ModifyDeadlineAckIds = req.ModifyDeadlineAckIds[:k]
+	req.ModifyDeadlineSeconds = req.ModifyDeadlineSeconds[:k]
+	return req, remainder
+}
+
+func (s *apiService) createSnapshot(ctx context.Context, snapName, subName string) (*snapshotConfig, error) {
+	snap, err := s.subc.CreateSnapshot(ctx, &pb.CreateSnapshotRequest{
+		Name:         snapName,
+		Subscription: subName,
+	})
+	if err != nil {
+		return nil, err
+	}
+	return s.toSnapshotConfig(snap)
+}
+
+func (s *apiService) deleteSnapshot(ctx context.Context, snapName string) error {
+	return s.subc.DeleteSnapshot(ctx, &pb.DeleteSnapshotRequest{Snapshot: snapName})
+}
+
+func (s *apiService) listProjectSnapshots(ctx context.Context, projName string) nextSnapshotFunc {
+	it := s.subc.ListSnapshots(ctx, &pb.ListSnapshotsRequest{
+		Project: projName,
+	})
+	return func() (*snapshotConfig, error) {
+		snap, err := it.Next()
+		if err != nil {
+			return nil, err
+		}
+		return s.toSnapshotConfig(snap)
+	}
+}
+
+func (s *apiService) toSnapshotConfig(snap *pb.Snapshot) (*snapshotConfig, error) {
+	exp, err := ptypes.Timestamp(snap.ExpireTime)
+	if err != nil {
+		return nil, err
+	}
+	return &snapshotConfig{
+		snapshot: &snapshot{
+			s:    s,
+			name: snap.Name,
+		},
+		Topic:      newTopic(s, snap.Topic),
+		Expiration: exp,
+	}, nil
+}
+
+func (s *apiService) seekToTime(ctx context.Context, subName string, t time.Time) error {
+	ts, err := ptypes.TimestampProto(t)
+	if err != nil {
+		return err
+	}
+	_, err = s.subc.Seek(ctx, &pb.SeekRequest{
+		Subscription: subName,
+		Target:       &pb.SeekRequest_Time{ts},
+	})
+	return err
+}
+
+func (s *apiService) seekToSnapshot(ctx context.Context, subName, snapName string) error {
+	_, err := s.subc.Seek(ctx, &pb.SeekRequest{
+		Subscription: subName,
+		Target:       &pb.SeekRequest_Snapshot{snapName},
+	})
+	return err
+}
diff --git a/vendor/cloud.google.com/go/pubsub/snapshot.go b/vendor/cloud.google.com/go/pubsub/snapshot.go
new file mode 100644
index 0000000..77115b5
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/snapshot.go
@@ -0,0 +1,119 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pubsub
+
+import (
+	"strings"
+	"time"
+
+	vkit "cloud.google.com/go/pubsub/apiv1"
+	"golang.org/x/net/context"
+)
+
+// Snapshot is a reference to a PubSub snapshot.
+type snapshot struct {
+	s service
+
+	// The fully qualified identifier for the snapshot, in the format "projects/<projid>/snapshots/<snap>"
+	name string
+}
+
+// ID returns the unique identifier of the snapshot within its project.
+func (s *snapshot) ID() string {
+	slash := strings.LastIndex(s.name, "/")
+	if slash == -1 {
+		// name is not a fully-qualified name.
+		panic("bad snapshot name")
+	}
+	return s.name[slash+1:]
+}
+
+// SnapshotConfig contains the details of a Snapshot.
+type snapshotConfig struct {
+	*snapshot
+	Topic      *Topic
+	Expiration time.Time
+}
+
+// Snapshot creates a reference to a snapshot.
+func (c *Client) snapshot(id string) *snapshot {
+	return &snapshot{
+		s:    c.s,
+		name: vkit.SubscriberSnapshotPath(c.projectID, id),
+	}
+}
+
+// Snapshots returns an iterator which returns snapshots for this project.
+func (c *Client) snapshots(ctx context.Context) *snapshotConfigIterator {
+	return &snapshotConfigIterator{
+		next: c.s.listProjectSnapshots(ctx, c.fullyQualifiedProjectName()),
+	}
+}
+
+// SnapshotConfigIterator is an iterator that returns a series of snapshots.
+type snapshotConfigIterator struct {
+	next nextSnapshotFunc
+}
+
+// Next returns the next SnapshotConfig. Its second return value is iterator.Done if there are no more results.
+// Once Next returns iterator.Done, all subsequent calls will return iterator.Done.
+func (snaps *snapshotConfigIterator) Next() (*snapshotConfig, error) {
+	return snaps.next()
+}
+
+// Delete deletes a snapshot.
+func (snap *snapshot) delete(ctx context.Context) error {
+	return snap.s.deleteSnapshot(ctx, snap.name)
+}
+
+// SeekTime seeks the subscription to a point in time.
+//
+// Messages retained in the subscription that were published before this
+// time are marked as acknowledged, and messages retained in the
+// subscription that were published after this time are marked as
+// unacknowledged. Note that this operation affects only those messages
+// retained in the subscription (configured by SnapshotConfig). For example,
+// if `time` corresponds to a point before the message retention
+// window (or to a point before the system's notion of the subscription
+// creation time), only retained messages will be marked as unacknowledged,
+// and already-expunged messages will not be restored.
+func (s *Subscription) seekToTime(ctx context.Context, t time.Time) error {
+	return s.s.seekToTime(ctx, s.name, t)
+}
+
+// Snapshot creates a new snapshot from this subscription.
+// The snapshot will be for the topic this subscription is subscribed to.
+// If the name is empty string, a unique name is assigned.
+//
+// The created snapshot is guaranteed to retain:
+//  (a) The existing backlog on the subscription. More precisely, this is
+//      defined as the messages in the subscription's backlog that are
+//      unacknowledged when Snapshot returns without error.
+//  (b) Any messages published to the subscription's topic following
+//      Snapshot returning without error.
+func (s *Subscription) createSnapshot(ctx context.Context, name string) (*snapshotConfig, error) {
+	if name != "" {
+		name = vkit.SubscriberSnapshotPath(strings.Split(s.name, "/")[1], name)
+	}
+	return s.s.createSnapshot(ctx, name, s.name)
+}
+
+// SeekSnapshot seeks the subscription to a snapshot.
+//
+// The snapshot needs not be created from this subscription,
+// but the snapshot must be for the topic this subscription is subscribed to.
+func (s *Subscription) seekToSnapshot(ctx context.Context, snap *snapshot) error {
+	return s.s.seekToSnapshot(ctx, s.name, snap.name)
+}
diff --git a/vendor/cloud.google.com/go/pubsub/subscription.go b/vendor/cloud.google.com/go/pubsub/subscription.go
new file mode 100644
index 0000000..2144125
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/subscription.go
@@ -0,0 +1,405 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pubsub
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"strings"
+	"sync"
+	"time"
+
+	"cloud.google.com/go/iam"
+	"golang.org/x/net/context"
+	"golang.org/x/sync/errgroup"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+)
+
+// Subscription is a reference to a PubSub subscription.
+type Subscription struct {
+	s service
+
+	// The fully qualified identifier for the subscription, in the format "projects/<projid>/subscriptions/<name>"
+	name string
+
+	// Settings for pulling messages. Configure these before calling Receive.
+	ReceiveSettings ReceiveSettings
+
+	mu            sync.Mutex
+	receiveActive bool
+}
+
+// Subscription creates a reference to a subscription.
+func (c *Client) Subscription(id string) *Subscription {
+	return newSubscription(c.s, fmt.Sprintf("projects/%s/subscriptions/%s", c.projectID, id))
+}
+
+func newSubscription(s service, name string) *Subscription {
+	return &Subscription{
+		s:    s,
+		name: name,
+	}
+}
+
+// String returns the globally unique printable name of the subscription.
+func (s *Subscription) String() string {
+	return s.name
+}
+
+// ID returns the unique identifier of the subscription within its project.
+func (s *Subscription) ID() string {
+	slash := strings.LastIndex(s.name, "/")
+	if slash == -1 {
+		// name is not a fully-qualified name.
+		panic("bad subscription name")
+	}
+	return s.name[slash+1:]
+}
+
+// Subscriptions returns an iterator which returns all of the subscriptions for the client's project.
+func (c *Client) Subscriptions(ctx context.Context) *SubscriptionIterator {
+	return &SubscriptionIterator{
+		s:    c.s,
+		next: c.s.listProjectSubscriptions(ctx, c.fullyQualifiedProjectName()),
+	}
+}
+
+// SubscriptionIterator is an iterator that returns a series of subscriptions.
+type SubscriptionIterator struct {
+	s    service
+	next nextStringFunc
+}
+
+// Next returns the next subscription. If there are no more subscriptions, iterator.Done will be returned.
+func (subs *SubscriptionIterator) Next() (*Subscription, error) {
+	subName, err := subs.next()
+	if err != nil {
+		return nil, err
+	}
+	return newSubscription(subs.s, subName), nil
+}
+
+// PushConfig contains configuration for subscriptions that operate in push mode.
+type PushConfig struct {
+	// A URL locating the endpoint to which messages should be pushed.
+	Endpoint string
+
+	// Endpoint configuration attributes. See https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions#pushconfig for more details.
+	Attributes map[string]string
+}
+
+// Subscription config contains the configuration of a subscription.
+type SubscriptionConfig struct {
+	Topic      *Topic
+	PushConfig PushConfig
+
+	// The default maximum time after a subscriber receives a message before
+	// the subscriber should acknowledge the message. Note: messages which are
+	// obtained via Subscription.Receive need not be acknowledged within this
+	// deadline, as the deadline will be automatically extended.
+	AckDeadline time.Duration
+
+	// Whether to retain acknowledged messages. If true, acknowledged messages
+	// will not be expunged until they fall out of the RetentionDuration window.
+	retainAckedMessages bool
+
+	// How long to retain messages in backlog, from the time of publish. If RetainAckedMessages is true,
+	// this duration affects the retention of acknowledged messages,
+	// otherwise only unacknowledged messages are retained.
+	// Defaults to 7 days. Cannot be longer than 7 days or shorter than 10 minutes.
+	retentionDuration time.Duration
+}
+
+// ReceiveSettings configure the Receive method.
+// A zero ReceiveSettings will result in values equivalent to DefaultReceiveSettings.
+type ReceiveSettings struct {
+	// MaxExtension is the maximum period for which the Subscription should
+	// automatically extend the ack deadline for each message.
+	//
+	// The Subscription will automatically extend the ack deadline of all
+	// fetched Messages for the duration specified. Automatic deadline
+	// extension may be disabled by specifying a duration less than 1.
+	MaxExtension time.Duration
+
+	// MaxOutstandingMessages is the maximum number of unprocessed messages
+	// (unacknowledged but not yet expired). If MaxOutstandingMessages is 0, it
+	// will be treated as if it were DefaultReceiveSettings.MaxOutstandingMessages.
+	// If the value is negative, then there will be no limit on the number of
+	// unprocessed messages.
+	MaxOutstandingMessages int
+
+	// MaxOutstandingBytes is the maximum size of unprocessed messages
+	// (unacknowledged but not yet expired). If MaxOutstandingBytes is 0, it will
+	// be treated as if it were DefaultReceiveSettings.MaxOutstandingBytes. If
+	// the value is negative, then there will be no limit on the number of bytes
+	// for unprocessed messages.
+	MaxOutstandingBytes int
+
+	// NumGoroutines is the number of goroutines Receive will spawn to pull
+	// messages concurrently. If NumGoroutines is less than 1, it will be treated
+	// as if it were DefaultReceiveSettings.NumGoroutines.
+	//
+	// NumGoroutines does not limit the number of messages that can be processed
+	// concurrently. Even with one goroutine, many messages might be processed at
+	// once, because that goroutine may continually receive messages and invoke the
+	// function passed to Receive on them. To limit the number of messages being
+	// processed concurrently, set MaxOutstandingMessages.
+	NumGoroutines int
+}
+
+// DefaultReceiveSettings holds the default values for ReceiveSettings.
+var DefaultReceiveSettings = ReceiveSettings{
+	MaxExtension:           10 * time.Minute,
+	MaxOutstandingMessages: 1000,
+	MaxOutstandingBytes:    1e9, // 1G
+	NumGoroutines:          1,
+}
+
+// Delete deletes the subscription.
+func (s *Subscription) Delete(ctx context.Context) error {
+	return s.s.deleteSubscription(ctx, s.name)
+}
+
+// Exists reports whether the subscription exists on the server.
+func (s *Subscription) Exists(ctx context.Context) (bool, error) {
+	return s.s.subscriptionExists(ctx, s.name)
+}
+
+// Config fetches the current configuration for the subscription.
+func (s *Subscription) Config(ctx context.Context) (SubscriptionConfig, error) {
+	conf, topicName, err := s.s.getSubscriptionConfig(ctx, s.name)
+	if err != nil {
+		return SubscriptionConfig{}, err
+	}
+	conf.Topic = &Topic{
+		s:    s.s,
+		name: topicName,
+	}
+	return conf, nil
+}
+
+// SubscriptionConfigToUpdate describes how to update a subscription.
+type SubscriptionConfigToUpdate struct {
+	// If non-nil, the push config is changed.
+	PushConfig *PushConfig
+}
+
+// Update changes an existing subscription according to the fields set in cfg.
+// It returns the new SubscriptionConfig.
+//
+// Update returns an error if no fields were modified.
+func (s *Subscription) Update(ctx context.Context, cfg SubscriptionConfigToUpdate) (SubscriptionConfig, error) {
+	if cfg.PushConfig == nil {
+		return SubscriptionConfig{}, errors.New("pubsub: UpdateSubscription call with nothing to update")
+	}
+	if err := s.s.modifyPushConfig(ctx, s.name, *cfg.PushConfig); err != nil {
+		return SubscriptionConfig{}, err
+	}
+	return s.Config(ctx)
+}
+
+func (s *Subscription) IAM() *iam.Handle {
+	return s.s.iamHandle(s.name)
+}
+
+// CreateSubscription creates a new subscription on a topic.
+//
+// id is the name of the subscription to create. It must start with a letter,
+// and contain only letters ([A-Za-z]), numbers ([0-9]), dashes (-),
+// underscores (_), periods (.), tildes (~), plus (+) or percent signs (%). It
+// must be between 3 and 255 characters in length, and must not start with
+// "goog".
+//
+// cfg.Topic is the topic from which the subscription should receive messages. It
+// need not belong to the same project as the subscription. This field is required.
+//
+// cfg.AckDeadline is the maximum time after a subscriber receives a message before
+// the subscriber should acknowledge the message. It must be between 10 and 600
+// seconds (inclusive), and is rounded down to the nearest second. If the
+// provided ackDeadline is 0, then the default value of 10 seconds is used.
+// Note: messages which are obtained via Subscription.Receive need not be
+// acknowledged within this deadline, as the deadline will be automatically
+// extended.
+//
+// cfg.PushConfig may be set to configure this subscription for push delivery.
+//
+// If the subscription already exists an error will be returned.
+func (c *Client) CreateSubscription(ctx context.Context, id string, cfg SubscriptionConfig) (*Subscription, error) {
+	if cfg.Topic == nil {
+		return nil, errors.New("pubsub: require non-nil Topic")
+	}
+	if cfg.AckDeadline == 0 {
+		cfg.AckDeadline = 10 * time.Second
+	}
+	if d := cfg.AckDeadline; d < 10*time.Second || d > 600*time.Second {
+		return nil, fmt.Errorf("ack deadline must be between 10 and 600 seconds; got: %v", d)
+	}
+
+	sub := c.Subscription(id)
+	err := c.s.createSubscription(ctx, sub.name, cfg)
+	return sub, err
+}
+
+var errReceiveInProgress = errors.New("pubsub: Receive already in progress for this subscription")
+
+// Receive calls f with the outstanding messages from the subscription.
+// It blocks until ctx is done, or the service returns a non-retryable error.
+//
+// The standard way to terminate a Receive is to cancel its context:
+//
+//   cctx, cancel := context.WithCancel(ctx)
+//   err := sub.Receive(cctx, callback)
+//   // Call cancel from callback, or another goroutine.
+//
+// If the service returns a non-retryable error, Receive returns that error after
+// all of the outstanding calls to f have returned. If ctx is done, Receive
+// returns nil after all of the outstanding calls to f have returned and
+// all messages have been acknowledged or have expired.
+//
+// Receive calls f concurrently from multiple goroutines. It is encouraged to
+// process messages synchronously in f, even if that processing is relatively
+// time-consuming; Receive will spawn new goroutines for incoming messages,
+// limited by MaxOutstandingMessages and MaxOutstandingBytes in ReceiveSettings.
+//
+// The context passed to f will be canceled when ctx is Done or there is a
+// fatal service error.
+//
+// Receive will automatically extend the ack deadline of all fetched Messages for the
+// period specified by s.ReceiveSettings.MaxExtension.
+//
+// Each Subscription may have only one invocation of Receive active at a time.
+func (s *Subscription) Receive(ctx context.Context, f func(context.Context, *Message)) error {
+	s.mu.Lock()
+	if s.receiveActive {
+		s.mu.Unlock()
+		return errReceiveInProgress
+	}
+	s.receiveActive = true
+	s.mu.Unlock()
+	defer func() { s.mu.Lock(); s.receiveActive = false; s.mu.Unlock() }()
+
+	config, err := s.Config(ctx)
+	if err != nil {
+		if grpc.Code(err) == codes.Canceled {
+			return nil
+		}
+		return err
+	}
+	maxCount := s.ReceiveSettings.MaxOutstandingMessages
+	if maxCount == 0 {
+		maxCount = DefaultReceiveSettings.MaxOutstandingMessages
+	}
+	maxBytes := s.ReceiveSettings.MaxOutstandingBytes
+	if maxBytes == 0 {
+		maxBytes = DefaultReceiveSettings.MaxOutstandingBytes
+	}
+	maxExt := s.ReceiveSettings.MaxExtension
+	if maxExt == 0 {
+		maxExt = DefaultReceiveSettings.MaxExtension
+	} else if maxExt < 0 {
+		// If MaxExtension is negative, disable automatic extension.
+		maxExt = 0
+	}
+	numGoroutines := s.ReceiveSettings.NumGoroutines
+	if numGoroutines < 1 {
+		numGoroutines = DefaultReceiveSettings.NumGoroutines
+	}
+	// TODO(jba): add tests that verify that ReceiveSettings are correctly processed.
+	po := &pullOptions{
+		maxExtension: maxExt,
+		maxPrefetch:  trunc32(int64(maxCount)),
+		ackDeadline:  config.AckDeadline,
+	}
+	fc := newFlowController(maxCount, maxBytes)
+
+	// Wait for all goroutines started by Receive to return, so instead of an
+	// obscure goroutine leak we have an obvious blocked call to Receive.
+	group, gctx := errgroup.WithContext(ctx)
+	for i := 0; i < numGoroutines; i++ {
+		group.Go(func() error {
+			return s.receive(gctx, po, fc, f)
+		})
+	}
+	return group.Wait()
+}
+
+func (s *Subscription) receive(ctx context.Context, po *pullOptions, fc *flowController, f func(context.Context, *Message)) error {
+	// Cancel a sub-context when we return, to kick the context-aware callbacks
+	// and the goroutine below.
+	ctx2, cancel := context.WithCancel(ctx)
+	// Call stop when Receive's context is done.
+	// Stop will block until all outstanding messages have been acknowledged
+	// or there was a fatal service error.
+	// The iterator does not use the context passed to Receive. If it did, canceling
+	// that context would immediately stop the iterator without waiting for unacked
+	// messages.
+	iter := newMessageIterator(context.Background(), s.s, s.name, po)
+
+	// We cannot use errgroup from Receive here. Receive might already be calling group.Wait,
+	// and group.Wait cannot be called concurrently with group.Go. We give each receive() its
+	// own WaitGroup instead.
+	// Since wg.Add is only called from the main goroutine, wg.Wait is guaranteed
+	// to be called after all Adds.
+	var wg sync.WaitGroup
+	wg.Add(1)
+	go func() {
+		<-ctx2.Done()
+		iter.stop()
+		wg.Done()
+	}()
+	defer wg.Wait()
+
+	defer cancel()
+	for {
+		msgs, err := iter.receive()
+		if err == io.EOF {
+			return nil
+		}
+		if err != nil {
+			return err
+		}
+		for i, msg := range msgs {
+			msg := msg
+			// TODO(jba): call acquire closer to when the message is allocated.
+			if err := fc.acquire(ctx, len(msg.Data)); err != nil {
+				// TODO(jba): test that these "orphaned" messages are nacked immediately when ctx is done.
+				for _, m := range msgs[i:] {
+					m.Nack()
+				}
+				return nil
+			}
+			wg.Add(1)
+			go func() {
+				// TODO(jba): call release when the message is available for GC.
+				// This considers the message to be released when
+				// f is finished, but f may ack early or not at all.
+				defer wg.Done()
+				defer fc.release(len(msg.Data))
+				f(ctx2, msg)
+			}()
+		}
+	}
+}
+
+// TODO(jba): remove when we delete messageIterator.
+type pullOptions struct {
+	maxExtension time.Duration
+	maxPrefetch  int32
+	// ackDeadline is the default ack deadline for the subscription. Not
+	// configurable.
+	ackDeadline time.Duration
+}
diff --git a/vendor/cloud.google.com/go/pubsub/topic.go b/vendor/cloud.google.com/go/pubsub/topic.go
new file mode 100644
index 0000000..274f54d
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/topic.go
@@ -0,0 +1,368 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pubsub
+
+import (
+	"errors"
+	"fmt"
+	"runtime"
+	"strings"
+	"sync"
+	"time"
+
+	"cloud.google.com/go/iam"
+	"github.com/golang/protobuf/proto"
+	"golang.org/x/net/context"
+	"google.golang.org/api/support/bundler"
+	pb "google.golang.org/genproto/googleapis/pubsub/v1"
+)
+
+const (
+	// The maximum number of messages that can be in a single publish request, as
+	// determined by the PubSub service.
+	MaxPublishRequestCount = 1000
+
+	// The maximum size of a single publish request in bytes, as determined by the PubSub service.
+	MaxPublishRequestBytes = 1e7
+
+	maxInt = int(^uint(0) >> 1)
+)
+
+// ErrOversizedMessage indicates that a message's size exceeds MaxPublishRequestBytes.
+var ErrOversizedMessage = bundler.ErrOversizedItem
+
+// Topic is a reference to a PubSub topic.
+//
+// The methods of Topic are safe for use by multiple goroutines.
+type Topic struct {
+	s service
+	// The fully qualified identifier for the topic, in the format "projects/<projid>/topics/<name>"
+	name string
+
+	// Settings for publishing messages. All changes must be made before the
+	// first call to Publish. The default is DefaultPublishSettings.
+	PublishSettings PublishSettings
+
+	mu      sync.RWMutex
+	stopped bool
+	bundler *bundler.Bundler
+
+	wg sync.WaitGroup
+
+	// Channel for message bundles to be published. Close to indicate that Stop was called.
+	bundlec chan []*bundledMessage
+}
+
+// PublishSettings control the bundling of published messages.
+type PublishSettings struct {
+
+	// Publish a non-empty batch after this delay has passed.
+	DelayThreshold time.Duration
+
+	// Publish a batch when it has this many messages. The maximum is
+	// MaxPublishRequestCount.
+	CountThreshold int
+
+	// Publish a batch when its size in bytes reaches this value.
+	ByteThreshold int
+
+	// The number of goroutines that invoke the Publish RPC concurrently.
+	// Defaults to a multiple of GOMAXPROCS.
+	NumGoroutines int
+
+	// The maximum time that the client will attempt to publish a bundle of messages.
+	Timeout time.Duration
+}
+
+// DefaultPublishSettings holds the default values for topics' PublishSettings.
+var DefaultPublishSettings = PublishSettings{
+	DelayThreshold: 1 * time.Millisecond,
+	CountThreshold: 100,
+	ByteThreshold:  1e6,
+	Timeout:        60 * time.Second,
+}
+
+// CreateTopic creates a new topic.
+// The specified topic ID must start with a letter, and contain only letters
+// ([A-Za-z]), numbers ([0-9]), dashes (-), underscores (_), periods (.),
+// tildes (~), plus (+) or percent signs (%). It must be between 3 and 255
+// characters in length, and must not start with "goog".
+// If the topic already exists an error will be returned.
+func (c *Client) CreateTopic(ctx context.Context, id string) (*Topic, error) {
+	t := c.Topic(id)
+	err := c.s.createTopic(ctx, t.name)
+	return t, err
+}
+
+// Topic creates a reference to a topic in the client's project.
+//
+// If a Topic's Publish method is called, it has background goroutines
+// associated with it. Clean them up by calling Topic.Stop.
+//
+// Avoid creating many Topic instances if you use them to publish.
+func (c *Client) Topic(id string) *Topic {
+	return c.TopicInProject(id, c.projectID)
+}
+
+// TopicInProject creates a reference to a topic in the given project.
+//
+// If a Topic's Publish method is called, it has background goroutines
+// associated with it. Clean them up by calling Topic.Stop.
+//
+// Avoid creating many Topic instances if you use them to publish.
+func (c *Client) TopicInProject(id, projectID string) *Topic {
+	return newTopic(c.s, fmt.Sprintf("projects/%s/topics/%s", projectID, id))
+}
+
+func newTopic(s service, name string) *Topic {
+	// bundlec is unbuffered. A buffer would occupy memory not
+	// accounted for by the bundler, so BufferedByteLimit would be a lie:
+	// the actual memory consumed would be higher.
+	return &Topic{
+		s:               s,
+		name:            name,
+		PublishSettings: DefaultPublishSettings,
+		bundlec:         make(chan []*bundledMessage),
+	}
+}
+
+// Topics returns an iterator which returns all of the topics for the client's project.
+func (c *Client) Topics(ctx context.Context) *TopicIterator {
+	return &TopicIterator{
+		s:    c.s,
+		next: c.s.listProjectTopics(ctx, c.fullyQualifiedProjectName()),
+	}
+}
+
+// TopicIterator is an iterator that returns a series of topics.
+type TopicIterator struct {
+	s    service
+	next nextStringFunc
+}
+
+// Next returns the next topic. If there are no more topics, iterator.Done will be returned.
+func (tps *TopicIterator) Next() (*Topic, error) {
+	topicName, err := tps.next()
+	if err != nil {
+		return nil, err
+	}
+	return newTopic(tps.s, topicName), nil
+}
+
+// ID returns the unique idenfier of the topic within its project.
+func (t *Topic) ID() string {
+	slash := strings.LastIndex(t.name, "/")
+	if slash == -1 {
+		// name is not a fully-qualified name.
+		panic("bad topic name")
+	}
+	return t.name[slash+1:]
+}
+
+// String returns the printable globally unique name for the topic.
+func (t *Topic) String() string {
+	return t.name
+}
+
+// Delete deletes the topic.
+func (t *Topic) Delete(ctx context.Context) error {
+	return t.s.deleteTopic(ctx, t.name)
+}
+
+// Exists reports whether the topic exists on the server.
+func (t *Topic) Exists(ctx context.Context) (bool, error) {
+	if t.name == "_deleted-topic_" {
+		return false, nil
+	}
+
+	return t.s.topicExists(ctx, t.name)
+}
+
+func (t *Topic) IAM() *iam.Handle {
+	return t.s.iamHandle(t.name)
+}
+
+// Subscriptions returns an iterator which returns the subscriptions for this topic.
+func (t *Topic) Subscriptions(ctx context.Context) *SubscriptionIterator {
+	// NOTE: zero or more Subscriptions that are ultimately returned by this
+	// Subscriptions iterator may belong to a different project to t.
+	return &SubscriptionIterator{
+		s:    t.s,
+		next: t.s.listTopicSubscriptions(ctx, t.name),
+	}
+}
+
+var errTopicStopped = errors.New("pubsub: Stop has been called for this topic")
+
+// Publish publishes msg to the topic asynchronously. Messages are batched and
+// sent according to the topic's PublishSettings. Publish never blocks.
+//
+// Publish returns a non-nil PublishResult which will be ready when the
+// message has been sent (or has failed to be sent) to the server.
+//
+// Publish creates goroutines for batching and sending messages. These goroutines
+// need to be stopped by calling t.Stop(). Once stopped, future calls to Publish
+// will immediately return a PublishResult with an error.
+func (t *Topic) Publish(ctx context.Context, msg *Message) *PublishResult {
+	// TODO(jba): if this turns out to take significant time, try to approximate it.
+	// Or, convert the messages to protos in Publish, instead of in the service.
+	msg.size = proto.Size(&pb.PubsubMessage{
+		Data:       msg.Data,
+		Attributes: msg.Attributes,
+	})
+	r := &PublishResult{ready: make(chan struct{})}
+	t.initBundler()
+	t.mu.RLock()
+	defer t.mu.RUnlock()
+	// TODO(aboulhosn) [from bcmills] consider changing the semantics of bundler to perform this logic so we don't have to do it here
+	if t.stopped {
+		r.set("", errTopicStopped)
+		return r
+	}
+
+	// TODO(jba) [from bcmills] consider using a shared channel per bundle
+	// (requires Bundler API changes; would reduce allocations)
+	// The call to Add should never return an error because the bundler's
+	// BufferedByteLimit is set to maxInt; we do not perform any flow
+	// control in the client.
+	err := t.bundler.Add(&bundledMessage{msg, r}, msg.size)
+	if err != nil {
+		r.set("", err)
+	}
+	return r
+}
+
+// Send all remaining published messages and stop goroutines created for handling
+// publishing. Returns once all outstanding messages have been sent or have
+// failed to be sent.
+func (t *Topic) Stop() {
+	t.mu.Lock()
+	noop := t.stopped || t.bundler == nil
+	t.stopped = true
+	t.mu.Unlock()
+	if noop {
+		return
+	}
+	t.bundler.Flush()
+	// At this point, all pending bundles have been published and the bundler's
+	// goroutines have exited, so it is OK for this goroutine to close bundlec.
+	close(t.bundlec)
+	t.wg.Wait()
+}
+
+// A PublishResult holds the result from a call to Publish.
+type PublishResult struct {
+	ready    chan struct{}
+	serverID string
+	err      error
+}
+
+// Ready returns a channel that is closed when the result is ready.
+// When the Ready channel is closed, Get is guaranteed not to block.
+func (r *PublishResult) Ready() <-chan struct{} { return r.ready }
+
+// Get returns the server-generated message ID and/or error result of a Publish call.
+// Get blocks until the Publish call completes or the context is done.
+func (r *PublishResult) Get(ctx context.Context) (serverID string, err error) {
+	// If the result is already ready, return it even if the context is done.
+	select {
+	case <-r.Ready():
+		return r.serverID, r.err
+	default:
+	}
+	select {
+	case <-ctx.Done():
+		return "", ctx.Err()
+	case <-r.Ready():
+		return r.serverID, r.err
+	}
+}
+
+func (r *PublishResult) set(sid string, err error) {
+	r.serverID = sid
+	r.err = err
+	close(r.ready)
+}
+
+type bundledMessage struct {
+	msg *Message
+	res *PublishResult
+}
+
+func (t *Topic) initBundler() {
+	t.mu.RLock()
+	noop := t.stopped || t.bundler != nil
+	t.mu.RUnlock()
+	if noop {
+		return
+	}
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	// Must re-check, since we released the lock.
+	if t.stopped || t.bundler != nil {
+		return
+	}
+
+	// TODO(jba): use a context detached from the one passed to NewClient.
+	ctx := context.TODO()
+	// Unless overridden, run several goroutines per CPU to call the Publish RPC.
+	n := t.PublishSettings.NumGoroutines
+	if n <= 0 {
+		n = 25 * runtime.GOMAXPROCS(0)
+	}
+	timeout := t.PublishSettings.Timeout
+	t.wg.Add(n)
+	for i := 0; i < n; i++ {
+		go func() {
+			defer t.wg.Done()
+			for b := range t.bundlec {
+				bctx := ctx
+				cancel := func() {}
+				if timeout != 0 {
+					bctx, cancel = context.WithTimeout(ctx, timeout)
+				}
+				t.publishMessageBundle(bctx, b)
+				cancel()
+			}
+		}()
+	}
+	t.bundler = bundler.NewBundler(&bundledMessage{}, func(items interface{}) {
+		t.bundlec <- items.([]*bundledMessage)
+
+	})
+	t.bundler.DelayThreshold = t.PublishSettings.DelayThreshold
+	t.bundler.BundleCountThreshold = t.PublishSettings.CountThreshold
+	if t.bundler.BundleCountThreshold > MaxPublishRequestCount {
+		t.bundler.BundleCountThreshold = MaxPublishRequestCount
+	}
+	t.bundler.BundleByteThreshold = t.PublishSettings.ByteThreshold
+	t.bundler.BufferedByteLimit = maxInt
+	t.bundler.BundleByteLimit = MaxPublishRequestBytes
+}
+
+func (t *Topic) publishMessageBundle(ctx context.Context, bms []*bundledMessage) {
+	msgs := make([]*Message, len(bms))
+	for i, bm := range bms {
+		msgs[i], bm.msg = bm.msg, nil // release bm.msg for GC
+	}
+	ids, err := t.s.publishMessages(ctx, t.name, msgs)
+	for i, bm := range bms {
+		if err != nil {
+			bm.res.set("", err)
+		} else {
+			bm.res.set(ids[i], nil)
+		}
+	}
+}
diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go
new file mode 100644
index 0000000..533438d
--- /dev/null
+++ b/vendor/golang.org/x/sync/errgroup/errgroup.go
@@ -0,0 +1,67 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package errgroup provides synchronization, error propagation, and Context
+// cancelation for groups of goroutines working on subtasks of a common task.
+package errgroup
+
+import (
+	"sync"
+
+	"golang.org/x/net/context"
+)
+
+// A Group is a collection of goroutines working on subtasks that are part of
+// the same overall task.
+//
+// A zero Group is valid and does not cancel on error.
+type Group struct {
+	cancel func()
+
+	wg sync.WaitGroup
+
+	errOnce sync.Once
+	err     error
+}
+
+// WithContext returns a new Group and an associated Context derived from ctx.
+//
+// The derived Context is canceled the first time a function passed to Go
+// returns a non-nil error or the first time Wait returns, whichever occurs
+// first.
+func WithContext(ctx context.Context) (*Group, context.Context) {
+	ctx, cancel := context.WithCancel(ctx)
+	return &Group{cancel: cancel}, ctx
+}
+
+// Wait blocks until all function calls from the Go method have returned, then
+// returns the first non-nil error (if any) from them.
+func (g *Group) Wait() error {
+	g.wg.Wait()
+	if g.cancel != nil {
+		g.cancel()
+	}
+	return g.err
+}
+
+// Go calls the given function in a new goroutine.
+//
+// The first call to return a non-nil error cancels the group; its error will be
+// returned by Wait.
+func (g *Group) Go(f func() error) {
+	g.wg.Add(1)
+
+	go func() {
+		defer g.wg.Done()
+
+		if err := f(); err != nil {
+			g.errOnce.Do(func() {
+				g.err = err
+				if g.cancel != nil {
+					g.cancel()
+				}
+			})
+		}
+	}()
+}
diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
index fc3c4d7..b1c0727 100644
--- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
+++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
@@ -7,9 +7,6 @@
 // gc compiler.  This package supports go1.7 export data format and all
 // later versions.
 //
-// This package replaces the deprecated golang.org/x/tools/go/gcimporter15
-// package, which will be deleted in October 2017.
-//
 // Although it might seem convenient for this package to live alongside
 // go/types in the standard library, this would cause version skew
 // problems for developer tools that use it, since they must be able to
@@ -32,7 +29,7 @@
 	"io"
 	"io/ioutil"
 
-	gcimporter "golang.org/x/tools/go/gcimporter15"
+	"golang.org/x/tools/go/internal/gcimporter"
 )
 
 // Find returns the name of an object (.o) or archive (.a) file
@@ -95,6 +92,10 @@
 // Write writes encoded type information for the specified package to out.
 // The FileSet provides file position information for named objects.
 func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
-	_, err := out.Write(gcimporter.BExportData(fset, pkg))
+	b, err := gcimporter.BExportData(fset, pkg)
+	if err != nil {
+		return err
+	}
+	_, err = out.Write(b)
 	return err
 }
diff --git a/vendor/golang.org/x/tools/go/gcimporter15/bexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go
similarity index 93%
rename from vendor/golang.org/x/tools/go/gcimporter15/bexport.go
rename to vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go
index cbf8bc0..b106172 100644
--- a/vendor/golang.org/x/tools/go/gcimporter15/bexport.go
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go
@@ -16,7 +16,6 @@
 	"go/constant"
 	"go/token"
 	"go/types"
-	"log"
 	"math"
 	"math/big"
 	"sort"
@@ -76,9 +75,29 @@
 	indent  int // for trace
 }
 
+// internalError represents an error generated inside this package.
+type internalError string
+
+func (e internalError) Error() string { return "gcimporter: " + string(e) }
+
+func internalErrorf(format string, args ...interface{}) error {
+	return internalError(fmt.Sprintf(format, args...))
+}
+
 // BExportData returns binary export data for pkg.
 // If no file set is provided, position info will be missing.
-func BExportData(fset *token.FileSet, pkg *types.Package) []byte {
+func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
+	defer func() {
+		if e := recover(); e != nil {
+			if ierr, ok := e.(internalError); ok {
+				err = ierr
+				return
+			}
+			// Not an internal error; panic again.
+			panic(e)
+		}
+	}()
+
 	p := exporter{
 		fset:          fset,
 		strIndex:      map[string]int{"": 0}, // empty string is mapped to 0
@@ -107,7 +126,7 @@
 		p.typIndex[typ] = index
 	}
 	if len(p.typIndex) != len(predeclared) {
-		log.Fatalf("gcimporter: duplicate entries in type map?")
+		return nil, internalError("duplicate entries in type map?")
 	}
 
 	// write package data
@@ -145,12 +164,12 @@
 
 	// --- end of export data ---
 
-	return p.out.Bytes()
+	return p.out.Bytes(), nil
 }
 
 func (p *exporter) pkg(pkg *types.Package, emptypath bool) {
 	if pkg == nil {
-		log.Fatalf("gcimporter: unexpected nil pkg")
+		panic(internalError("unexpected nil pkg"))
 	}
 
 	// if we saw the package before, write its index (>= 0)
@@ -209,7 +228,7 @@
 		p.paramList(sig.Results(), false)
 
 	default:
-		log.Fatalf("gcimporter: unexpected object %v (%T)", obj, obj)
+		panic(internalErrorf("unexpected object %v (%T)", obj, obj))
 	}
 }
 
@@ -273,7 +292,7 @@
 
 func (p *exporter) typ(t types.Type) {
 	if t == nil {
-		log.Fatalf("gcimporter: nil type")
+		panic(internalError("nil type"))
 	}
 
 	// Possible optimization: Anonymous pointer types *T where
@@ -356,7 +375,7 @@
 		p.typ(t.Elem())
 
 	default:
-		log.Fatalf("gcimporter: unexpected type %T: %s", t, t)
+		panic(internalErrorf("unexpected type %T: %s", t, t))
 	}
 }
 
@@ -422,7 +441,7 @@
 
 func (p *exporter) field(f *types.Var) {
 	if !f.IsField() {
-		log.Fatalf("gcimporter: field expected")
+		panic(internalError("field expected"))
 	}
 
 	p.pos(f)
@@ -452,7 +471,7 @@
 func (p *exporter) method(m *types.Func) {
 	sig := m.Type().(*types.Signature)
 	if sig.Recv() == nil {
-		log.Fatalf("gcimporter: method expected")
+		panic(internalError("method expected"))
 	}
 
 	p.pos(m)
@@ -575,13 +594,13 @@
 		p.tag(unknownTag)
 
 	default:
-		log.Fatalf("gcimporter: unexpected value %v (%T)", x, x)
+		panic(internalErrorf("unexpected value %v (%T)", x, x))
 	}
 }
 
 func (p *exporter) float(x constant.Value) {
 	if x.Kind() != constant.Float {
-		log.Fatalf("gcimporter: unexpected constant %v, want float", x)
+		panic(internalErrorf("unexpected constant %v, want float", x))
 	}
 	// extract sign (there is no -0)
 	sign := constant.Sign(x)
@@ -616,7 +635,7 @@
 	m.SetMantExp(&m, int(m.MinPrec()))
 	mant, acc := m.Int(nil)
 	if acc != big.Exact {
-		log.Fatalf("gcimporter: internal error")
+		panic(internalError("internal error"))
 	}
 
 	p.int(sign)
@@ -653,7 +672,7 @@
 
 func (p *exporter) index(marker byte, index int) {
 	if index < 0 {
-		log.Fatalf("gcimporter: invalid index < 0")
+		panic(internalError("invalid index < 0"))
 	}
 	if debugFormat {
 		p.marker('t')
@@ -666,7 +685,7 @@
 
 func (p *exporter) tag(tag int) {
 	if tag >= 0 {
-		log.Fatalf("gcimporter: invalid tag >= 0")
+		panic(internalError("invalid tag >= 0"))
 	}
 	if debugFormat {
 		p.marker('t')
diff --git a/vendor/golang.org/x/tools/go/gcimporter15/bimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go
similarity index 88%
rename from vendor/golang.org/x/tools/go/gcimporter15/bimport.go
rename to vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go
index 1936a7f..3e845ea 100644
--- a/vendor/golang.org/x/tools/go/gcimporter15/bimport.go
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go
@@ -39,8 +39,7 @@
 	posInfoFormat bool
 	prevFile      string
 	prevLine      int
-	fset          *token.FileSet
-	files         map[string]*token.File
+	fake          fakeFileSet
 
 	// debugging support
 	debugFormat bool
@@ -62,6 +61,10 @@
 		}
 	}()
 
+	if len(data) > 0 && data[0] == 'i' {
+		return iImportData(fset, imports, data[1:], path)
+	}
+
 	p := importer{
 		imports:    imports,
 		data:       data,
@@ -69,8 +72,10 @@
 		version:    -1,           // unknown version
 		strList:    []string{""}, // empty string is mapped to 0
 		pathList:   []string{""}, // empty string is mapped to 0
-		fset:       fset,
-		files:      make(map[string]*token.File),
+		fake: fakeFileSet{
+			fset:  fset,
+			files: make(map[string]*token.File),
+		},
 	}
 
 	// read version info
@@ -104,10 +109,10 @@
 
 	// read version specific flags - extend as necessary
 	switch p.version {
-	// case 6:
+	// case 7:
 	// 	...
 	//	fallthrough
-	case 5, 4, 3, 2, 1:
+	case 6, 5, 4, 3, 2, 1:
 		p.debugFormat = p.rawStringln(p.rawByte()) == "debug"
 		p.trackAllTypes = p.int() != 0
 		p.posInfoFormat = p.int() != 0
@@ -125,7 +130,7 @@
 	// read package data
 	pkg = p.pkg()
 
-	// read objects of phase 1 only (see cmd/compiler/internal/gc/bexport.go)
+	// read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go)
 	objcount := 0
 	for {
 		tag := p.tagOrIndex()
@@ -184,6 +189,9 @@
 	} else {
 		path = p.string()
 	}
+	if p.version >= 6 {
+		p.int() // package height; unused by go/types
+	}
 
 	// we should never see an empty package name
 	if name == "" {
@@ -259,7 +267,7 @@
 	case constTag:
 		pos := p.pos()
 		pkg, name := p.qualifiedName()
-		typ := p.typ(nil)
+		typ := p.typ(nil, nil)
 		val := p.value()
 		p.declare(types.NewConst(pos, pkg, name, typ, val))
 
@@ -267,16 +275,16 @@
 		// TODO(gri) verify type alias hookup is correct
 		pos := p.pos()
 		pkg, name := p.qualifiedName()
-		typ := p.typ(nil)
+		typ := p.typ(nil, nil)
 		p.declare(types.NewTypeName(pos, pkg, name, typ))
 
 	case typeTag:
-		p.typ(nil)
+		p.typ(nil, nil)
 
 	case varTag:
 		pos := p.pos()
 		pkg, name := p.qualifiedName()
-		typ := p.typ(nil)
+		typ := p.typ(nil, nil)
 		p.declare(types.NewVar(pos, pkg, name, typ))
 
 	case funcTag:
@@ -323,15 +331,23 @@
 	p.prevFile = file
 	p.prevLine = line
 
-	// Synthesize a token.Pos
+	return p.fake.pos(file, line)
+}
 
+// Synthesize a token.Pos
+type fakeFileSet struct {
+	fset  *token.FileSet
+	files map[string]*token.File
+}
+
+func (s *fakeFileSet) pos(file string, line int) token.Pos {
 	// Since we don't know the set of needed file positions, we
 	// reserve maxlines positions per file.
 	const maxlines = 64 * 1024
-	f := p.files[file]
+	f := s.files[file]
 	if f == nil {
-		f = p.fset.AddFile(file, -1, maxlines)
-		p.files[file] = f
+		f = s.fset.AddFile(file, -1, maxlines)
+		s.files[file] = f
 		// Allocate the fake linebreak indices on first use.
 		// TODO(adonovan): opt: save ~512KB using a more complex scheme?
 		fakeLinesOnce.Do(func() {
@@ -381,7 +397,11 @@
 // the package currently imported. The parent package is needed for
 // exported struct fields and interface methods which don't contain
 // explicit package information in the export data.
-func (p *importer) typ(parent *types.Package) types.Type {
+//
+// A non-nil tname is used as the "owner" of the result type; i.e.,
+// the result type is the underlying type of tname. tname is used
+// to give interface methods a named receiver type where possible.
+func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type {
 	// if the type was seen before, i is its index (>= 0)
 	i := p.tagOrIndex()
 	if i >= 0 {
@@ -411,15 +431,15 @@
 		t0 := types.NewNamed(obj.(*types.TypeName), nil, nil)
 
 		// but record the existing type, if any
-		t := obj.Type().(*types.Named)
-		p.record(t)
+		tname := obj.Type().(*types.Named) // tname is either t0 or the existing type
+		p.record(tname)
 
 		// read underlying type
-		t0.SetUnderlying(p.typ(parent))
+		t0.SetUnderlying(p.typ(parent, t0))
 
 		// interfaces don't have associated methods
 		if types.IsInterface(t0) {
-			return t
+			return tname
 		}
 
 		// read associated methods
@@ -440,7 +460,7 @@
 			t0.AddMethod(types.NewFunc(pos, parent, name, sig))
 		}
 
-		return t
+		return tname
 
 	case arrayTag:
 		t := new(types.Array)
@@ -449,7 +469,7 @@
 		}
 
 		n := p.int64()
-		*t = *types.NewArray(p.typ(parent), n)
+		*t = *types.NewArray(p.typ(parent, nil), n)
 		return t
 
 	case sliceTag:
@@ -458,7 +478,7 @@
 			p.record(t)
 		}
 
-		*t = *types.NewSlice(p.typ(parent))
+		*t = *types.NewSlice(p.typ(parent, nil))
 		return t
 
 	case dddTag:
@@ -467,7 +487,7 @@
 			p.record(t)
 		}
 
-		t.elem = p.typ(parent)
+		t.elem = p.typ(parent, nil)
 		return t
 
 	case structTag:
@@ -485,7 +505,7 @@
 			p.record(t)
 		}
 
-		*t = *types.NewPointer(p.typ(parent))
+		*t = *types.NewPointer(p.typ(parent, nil))
 		return t
 
 	case signatureTag:
@@ -504,6 +524,8 @@
 		// cannot expect the interface type to appear in a cycle, as any
 		// such cycle must contain a named type which would have been
 		// first defined earlier.
+		// TODO(gri) Is this still true now that we have type aliases?
+		// See issue #23225.
 		n := len(p.typList)
 		if p.trackAllTypes {
 			p.record(nil)
@@ -512,10 +534,10 @@
 		var embeddeds []*types.Named
 		for n := p.int(); n > 0; n-- {
 			p.pos()
-			embeddeds = append(embeddeds, p.typ(parent).(*types.Named))
+			embeddeds = append(embeddeds, p.typ(parent, nil).(*types.Named))
 		}
 
-		t := types.NewInterface(p.methodList(parent), embeddeds)
+		t := types.NewInterface(p.methodList(parent, tname), embeddeds)
 		p.interfaceList = append(p.interfaceList, t)
 		if p.trackAllTypes {
 			p.typList[n] = t
@@ -528,8 +550,8 @@
 			p.record(t)
 		}
 
-		key := p.typ(parent)
-		val := p.typ(parent)
+		key := p.typ(parent, nil)
+		val := p.typ(parent, nil)
 		*t = *types.NewMap(key, val)
 		return t
 
@@ -539,19 +561,8 @@
 			p.record(t)
 		}
 
-		var dir types.ChanDir
-		// tag values must match the constants in cmd/compile/internal/gc/go.go
-		switch d := p.int(); d {
-		case 1 /* Crecv */ :
-			dir = types.RecvOnly
-		case 2 /* Csend */ :
-			dir = types.SendOnly
-		case 3 /* Cboth */ :
-			dir = types.SendRecv
-		default:
-			errorf("unexpected channel dir %d", d)
-		}
-		val := p.typ(parent)
+		dir := chanDir(p.int())
+		val := p.typ(parent, nil)
 		*t = *types.NewChan(dir, val)
 		return t
 
@@ -561,6 +572,21 @@
 	}
 }
 
+func chanDir(d int) types.ChanDir {
+	// tag values must match the constants in cmd/compile/internal/gc/go.go
+	switch d {
+	case 1 /* Crecv */ :
+		return types.RecvOnly
+	case 2 /* Csend */ :
+		return types.SendOnly
+	case 3 /* Cboth */ :
+		return types.SendRecv
+	default:
+		errorf("unexpected channel dir %d", d)
+		return 0
+	}
+}
+
 func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) {
 	if n := p.int(); n > 0 {
 		fields = make([]*types.Var, n)
@@ -575,7 +601,7 @@
 func (p *importer) field(parent *types.Package) (*types.Var, string) {
 	pos := p.pos()
 	pkg, name, alias := p.fieldName(parent)
-	typ := p.typ(parent)
+	typ := p.typ(parent, nil)
 	tag := p.string()
 
 	anonymous := false
@@ -599,22 +625,30 @@
 	return types.NewField(pos, pkg, name, typ, anonymous), tag
 }
 
-func (p *importer) methodList(parent *types.Package) (methods []*types.Func) {
+func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) {
 	if n := p.int(); n > 0 {
 		methods = make([]*types.Func, n)
 		for i := range methods {
-			methods[i] = p.method(parent)
+			methods[i] = p.method(parent, baseType)
 		}
 	}
 	return
 }
 
-func (p *importer) method(parent *types.Package) *types.Func {
+func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func {
 	pos := p.pos()
 	pkg, name, _ := p.fieldName(parent)
+	// If we don't have a baseType, use a nil receiver.
+	// A receiver using the actual interface type (which
+	// we don't know yet) will be filled in when we call
+	// types.Interface.Complete.
+	var recv *types.Var
+	if baseType != nil {
+		recv = types.NewVar(token.NoPos, parent, "", baseType)
+	}
 	params, isddd := p.paramList()
 	result, _ := p.paramList()
-	sig := types.NewSignature(nil, params, result, isddd)
+	sig := types.NewSignature(recv, params, result, isddd)
 	return types.NewFunc(pos, pkg, name, sig)
 }
 
@@ -670,7 +704,7 @@
 }
 
 func (p *importer) param(named bool) (*types.Var, bool) {
-	t := p.typ(nil)
+	t := p.typ(nil, nil)
 	td, isddd := t.(*dddSlice)
 	if isddd {
 		t = types.NewSlice(td.elem)
diff --git a/vendor/golang.org/x/tools/go/gcimporter15/exportdata.go b/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go
similarity index 100%
rename from vendor/golang.org/x/tools/go/gcimporter15/exportdata.go
rename to vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go
diff --git a/vendor/golang.org/x/tools/go/gcimporter15/gcimporter.go b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go
similarity index 98%
rename from vendor/golang.org/x/tools/go/gcimporter15/gcimporter.go
rename to vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go
index 370203b..4edd6a8 100644
--- a/vendor/golang.org/x/tools/go/gcimporter15/gcimporter.go
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go
@@ -6,13 +6,9 @@
 // but it also contains the original source-based importer code for Go1.6.
 // Once we stop supporting 1.6, we can remove that code.
 
-// Package gcimporter15 provides various functions for reading
+// Package gcimporter provides various functions for reading
 // gc-generated object files that can be used to implement the
 // Importer interface defined by the Go 1.5 standard library package.
-//
-// Deprecated: this package will be deleted in October 2017.
-// New code should use golang.org/x/tools/go/gcexportdata.
-//
 package gcimporter
 
 import (
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go
new file mode 100644
index 0000000..dfc00a3
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go
@@ -0,0 +1,585 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Indexed package import.
+// See cmd/compile/internal/gc/iexport.go for the export data format.
+
+// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go.
+
+package gcimporter
+
+import (
+	"bytes"
+	"encoding/binary"
+	"go/constant"
+	"go/token"
+	"go/types"
+	"io"
+	"sort"
+)
+
+type intReader struct {
+	*bytes.Reader
+	path string
+}
+
+func (r *intReader) int64() int64 {
+	i, err := binary.ReadVarint(r.Reader)
+	if err != nil {
+		errorf("import %q: read varint error: %v", r.path, err)
+	}
+	return i
+}
+
+func (r *intReader) uint64() uint64 {
+	i, err := binary.ReadUvarint(r.Reader)
+	if err != nil {
+		errorf("import %q: read varint error: %v", r.path, err)
+	}
+	return i
+}
+
+const predeclReserved = 32
+
+type itag uint64
+
+const (
+	// Types
+	definedType itag = iota
+	pointerType
+	sliceType
+	arrayType
+	chanType
+	mapType
+	signatureType
+	structType
+	interfaceType
+)
+
+// iImportData imports a package from the serialized package data
+// and returns the number of bytes consumed and a reference to the package.
+// If the export data version is not recognized or the format is otherwise
+// compromised, an error is returned.
+func iImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
+	r := &intReader{bytes.NewReader(data), path}
+
+	version := r.uint64()
+	switch version {
+	case 0:
+	default:
+		errorf("cannot import %q: unknown iexport format version %d", path, version)
+	}
+
+	sLen := int64(r.uint64())
+	dLen := int64(r.uint64())
+
+	whence, _ := r.Seek(0, io.SeekCurrent)
+	stringData := data[whence : whence+sLen]
+	declData := data[whence+sLen : whence+sLen+dLen]
+	r.Seek(sLen+dLen, io.SeekCurrent)
+
+	p := iimporter{
+		ipath: path,
+
+		stringData:  stringData,
+		stringCache: make(map[uint64]string),
+		pkgCache:    make(map[uint64]*types.Package),
+
+		declData: declData,
+		pkgIndex: make(map[*types.Package]map[string]uint64),
+		typCache: make(map[uint64]types.Type),
+
+		fake: fakeFileSet{
+			fset:  fset,
+			files: make(map[string]*token.File),
+		},
+	}
+
+	for i, pt := range predeclared {
+		p.typCache[uint64(i)] = pt
+	}
+
+	pkgList := make([]*types.Package, r.uint64())
+	for i := range pkgList {
+		pkgPathOff := r.uint64()
+		pkgPath := p.stringAt(pkgPathOff)
+		pkgName := p.stringAt(r.uint64())
+		_ = r.uint64() // package height; unused by go/types
+
+		if pkgPath == "" {
+			pkgPath = path
+		}
+		pkg := imports[pkgPath]
+		if pkg == nil {
+			pkg = types.NewPackage(pkgPath, pkgName)
+			imports[pkgPath] = pkg
+		} else if pkg.Name() != pkgName {
+			errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
+		}
+
+		p.pkgCache[pkgPathOff] = pkg
+
+		nameIndex := make(map[string]uint64)
+		for nSyms := r.uint64(); nSyms > 0; nSyms-- {
+			name := p.stringAt(r.uint64())
+			nameIndex[name] = r.uint64()
+		}
+
+		p.pkgIndex[pkg] = nameIndex
+		pkgList[i] = pkg
+	}
+
+	localpkg := pkgList[0]
+
+	names := make([]string, 0, len(p.pkgIndex[localpkg]))
+	for name := range p.pkgIndex[localpkg] {
+		names = append(names, name)
+	}
+	sort.Strings(names)
+	for _, name := range names {
+		p.doDecl(localpkg, name)
+	}
+
+	for _, typ := range p.interfaceList {
+		typ.Complete()
+	}
+
+	// record all referenced packages as imports
+	list := append(([]*types.Package)(nil), pkgList[1:]...)
+	sort.Sort(byPath(list))
+	localpkg.SetImports(list)
+
+	// package was imported completely and without errors
+	localpkg.MarkComplete()
+
+	consumed, _ := r.Seek(0, io.SeekCurrent)
+	return int(consumed), localpkg, nil
+}
+
+type iimporter struct {
+	ipath string
+
+	stringData  []byte
+	stringCache map[uint64]string
+	pkgCache    map[uint64]*types.Package
+
+	declData []byte
+	pkgIndex map[*types.Package]map[string]uint64
+	typCache map[uint64]types.Type
+
+	fake          fakeFileSet
+	interfaceList []*types.Interface
+}
+
+func (p *iimporter) doDecl(pkg *types.Package, name string) {
+	// See if we've already imported this declaration.
+	if obj := pkg.Scope().Lookup(name); obj != nil {
+		return
+	}
+
+	off, ok := p.pkgIndex[pkg][name]
+	if !ok {
+		errorf("%v.%v not in index", pkg, name)
+	}
+
+	r := &importReader{p: p, currPkg: pkg}
+	r.declReader.Reset(p.declData[off:])
+
+	r.obj(name)
+}
+
+func (p *iimporter) stringAt(off uint64) string {
+	if s, ok := p.stringCache[off]; ok {
+		return s
+	}
+
+	slen, n := binary.Uvarint(p.stringData[off:])
+	if n <= 0 {
+		errorf("varint failed")
+	}
+	spos := off + uint64(n)
+	s := string(p.stringData[spos : spos+slen])
+	p.stringCache[off] = s
+	return s
+}
+
+func (p *iimporter) pkgAt(off uint64) *types.Package {
+	if pkg, ok := p.pkgCache[off]; ok {
+		return pkg
+	}
+	path := p.stringAt(off)
+	errorf("missing package %q in %q", path, p.ipath)
+	return nil
+}
+
+func (p *iimporter) typAt(off uint64, base *types.Named) types.Type {
+	if t, ok := p.typCache[off]; ok && (base == nil || !isInterface(t)) {
+		return t
+	}
+
+	if off < predeclReserved {
+		errorf("predeclared type missing from cache: %v", off)
+	}
+
+	r := &importReader{p: p}
+	r.declReader.Reset(p.declData[off-predeclReserved:])
+	t := r.doType(base)
+
+	if base == nil || !isInterface(t) {
+		p.typCache[off] = t
+	}
+	return t
+}
+
+type importReader struct {
+	p          *iimporter
+	declReader bytes.Reader
+	currPkg    *types.Package
+	prevFile   string
+	prevLine   int64
+}
+
+func (r *importReader) obj(name string) {
+	tag := r.byte()
+	pos := r.pos()
+
+	switch tag {
+	case 'A':
+		typ := r.typ()
+
+		r.declare(types.NewTypeName(pos, r.currPkg, name, typ))
+
+	case 'C':
+		typ, val := r.value()
+
+		r.declare(types.NewConst(pos, r.currPkg, name, typ, val))
+
+	case 'F':
+		sig := r.signature(nil)
+
+		r.declare(types.NewFunc(pos, r.currPkg, name, sig))
+
+	case 'T':
+		// Types can be recursive. We need to setup a stub
+		// declaration before recursing.
+		obj := types.NewTypeName(pos, r.currPkg, name, nil)
+		named := types.NewNamed(obj, nil, nil)
+		r.declare(obj)
+
+		underlying := r.p.typAt(r.uint64(), named).Underlying()
+		named.SetUnderlying(underlying)
+
+		if !isInterface(underlying) {
+			for n := r.uint64(); n > 0; n-- {
+				mpos := r.pos()
+				mname := r.ident()
+				recv := r.param()
+				msig := r.signature(recv)
+
+				named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig))
+			}
+		}
+
+	case 'V':
+		typ := r.typ()
+
+		r.declare(types.NewVar(pos, r.currPkg, name, typ))
+
+	default:
+		errorf("unexpected tag: %v", tag)
+	}
+}
+
+func (r *importReader) declare(obj types.Object) {
+	obj.Pkg().Scope().Insert(obj)
+}
+
+func (r *importReader) value() (typ types.Type, val constant.Value) {
+	typ = r.typ()
+
+	switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
+	case types.IsBoolean:
+		val = constant.MakeBool(r.bool())
+
+	case types.IsString:
+		val = constant.MakeString(r.string())
+
+	case types.IsInteger:
+		val = r.mpint(b)
+
+	case types.IsFloat:
+		val = r.mpfloat(b)
+
+	case types.IsComplex:
+		re := r.mpfloat(b)
+		im := r.mpfloat(b)
+		val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
+
+	default:
+		errorf("unexpected type %v", typ) // panics
+		panic("unreachable")
+	}
+
+	return
+}
+
+func intSize(b *types.Basic) (signed bool, maxBytes uint) {
+	if (b.Info() & types.IsUntyped) != 0 {
+		return true, 64
+	}
+
+	switch b.Kind() {
+	case types.Float32, types.Complex64:
+		return true, 3
+	case types.Float64, types.Complex128:
+		return true, 7
+	}
+
+	signed = (b.Info() & types.IsUnsigned) == 0
+	switch b.Kind() {
+	case types.Int8, types.Uint8:
+		maxBytes = 1
+	case types.Int16, types.Uint16:
+		maxBytes = 2
+	case types.Int32, types.Uint32:
+		maxBytes = 4
+	default:
+		maxBytes = 8
+	}
+
+	return
+}
+
+func (r *importReader) mpint(b *types.Basic) constant.Value {
+	signed, maxBytes := intSize(b)
+
+	maxSmall := 256 - maxBytes
+	if signed {
+		maxSmall = 256 - 2*maxBytes
+	}
+	if maxBytes == 1 {
+		maxSmall = 256
+	}
+
+	n, _ := r.declReader.ReadByte()
+	if uint(n) < maxSmall {
+		v := int64(n)
+		if signed {
+			v >>= 1
+			if n&1 != 0 {
+				v = ^v
+			}
+		}
+		return constant.MakeInt64(v)
+	}
+
+	v := -n
+	if signed {
+		v = -(n &^ 1) >> 1
+	}
+	if v < 1 || uint(v) > maxBytes {
+		errorf("weird decoding: %v, %v => %v", n, signed, v)
+	}
+
+	buf := make([]byte, v)
+	io.ReadFull(&r.declReader, buf)
+
+	// convert to little endian
+	// TODO(gri) go/constant should have a more direct conversion function
+	//           (e.g., once it supports a big.Float based implementation)
+	for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 {
+		buf[i], buf[j] = buf[j], buf[i]
+	}
+
+	x := constant.MakeFromBytes(buf)
+	if signed && n&1 != 0 {
+		x = constant.UnaryOp(token.SUB, x, 0)
+	}
+	return x
+}
+
+func (r *importReader) mpfloat(b *types.Basic) constant.Value {
+	x := r.mpint(b)
+	if constant.Sign(x) == 0 {
+		return x
+	}
+
+	exp := r.int64()
+	switch {
+	case exp > 0:
+		x = constant.Shift(x, token.SHL, uint(exp))
+	case exp < 0:
+		d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp))
+		x = constant.BinaryOp(x, token.QUO, d)
+	}
+	return x
+}
+
+func (r *importReader) ident() string {
+	return r.string()
+}
+
+func (r *importReader) qualifiedIdent() (*types.Package, string) {
+	name := r.string()
+	pkg := r.pkg()
+	return pkg, name
+}
+
+func (r *importReader) pos() token.Pos {
+	delta := r.int64()
+	if delta != deltaNewFile {
+		r.prevLine += delta
+	} else if l := r.int64(); l == -1 {
+		r.prevLine += deltaNewFile
+	} else {
+		r.prevFile = r.string()
+		r.prevLine = l
+	}
+
+	if r.prevFile == "" && r.prevLine == 0 {
+		return token.NoPos
+	}
+
+	return r.p.fake.pos(r.prevFile, int(r.prevLine))
+}
+
+func (r *importReader) typ() types.Type {
+	return r.p.typAt(r.uint64(), nil)
+}
+
+func isInterface(t types.Type) bool {
+	_, ok := t.(*types.Interface)
+	return ok
+}
+
+func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) }
+func (r *importReader) string() string      { return r.p.stringAt(r.uint64()) }
+
+func (r *importReader) doType(base *types.Named) types.Type {
+	switch k := r.kind(); k {
+	default:
+		errorf("unexpected kind tag in %q: %v", r.p.ipath, k)
+		return nil
+
+	case definedType:
+		pkg, name := r.qualifiedIdent()
+		r.p.doDecl(pkg, name)
+		return pkg.Scope().Lookup(name).(*types.TypeName).Type()
+	case pointerType:
+		return types.NewPointer(r.typ())
+	case sliceType:
+		return types.NewSlice(r.typ())
+	case arrayType:
+		n := r.uint64()
+		return types.NewArray(r.typ(), int64(n))
+	case chanType:
+		dir := chanDir(int(r.uint64()))
+		return types.NewChan(dir, r.typ())
+	case mapType:
+		return types.NewMap(r.typ(), r.typ())
+	case signatureType:
+		r.currPkg = r.pkg()
+		return r.signature(nil)
+
+	case structType:
+		r.currPkg = r.pkg()
+
+		fields := make([]*types.Var, r.uint64())
+		tags := make([]string, len(fields))
+		for i := range fields {
+			fpos := r.pos()
+			fname := r.ident()
+			ftyp := r.typ()
+			emb := r.bool()
+			tag := r.string()
+
+			fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb)
+			tags[i] = tag
+		}
+		return types.NewStruct(fields, tags)
+
+	case interfaceType:
+		r.currPkg = r.pkg()
+
+		embeddeds := make([]*types.Named, r.uint64())
+		for i := range embeddeds {
+			_ = r.pos()
+			embeddeds[i] = r.typ().(*types.Named)
+		}
+
+		methods := make([]*types.Func, r.uint64())
+		for i := range methods {
+			mpos := r.pos()
+			mname := r.ident()
+
+			// TODO(mdempsky): Matches bimport.go, but I
+			// don't agree with this.
+			var recv *types.Var
+			if base != nil {
+				recv = types.NewVar(token.NoPos, r.currPkg, "", base)
+			}
+
+			msig := r.signature(recv)
+			methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig)
+		}
+
+		typ := types.NewInterface(methods, embeddeds)
+		r.p.interfaceList = append(r.p.interfaceList, typ)
+		return typ
+	}
+}
+
+func (r *importReader) kind() itag {
+	return itag(r.uint64())
+}
+
+func (r *importReader) signature(recv *types.Var) *types.Signature {
+	params := r.paramList()
+	results := r.paramList()
+	variadic := params.Len() > 0 && r.bool()
+	return types.NewSignature(recv, params, results, variadic)
+}
+
+func (r *importReader) paramList() *types.Tuple {
+	xs := make([]*types.Var, r.uint64())
+	for i := range xs {
+		xs[i] = r.param()
+	}
+	return types.NewTuple(xs...)
+}
+
+func (r *importReader) param() *types.Var {
+	pos := r.pos()
+	name := r.ident()
+	typ := r.typ()
+	return types.NewParam(pos, r.currPkg, name, typ)
+}
+
+func (r *importReader) bool() bool {
+	return r.uint64() != 0
+}
+
+func (r *importReader) int64() int64 {
+	n, err := binary.ReadVarint(&r.declReader)
+	if err != nil {
+		errorf("readVarint: %v", err)
+	}
+	return n
+}
+
+func (r *importReader) uint64() uint64 {
+	n, err := binary.ReadUvarint(&r.declReader)
+	if err != nil {
+		errorf("readUvarint: %v", err)
+	}
+	return n
+}
+
+func (r *importReader) byte() byte {
+	x, err := r.declReader.ReadByte()
+	if err != nil {
+		errorf("declReader.ReadByte: %v", err)
+	}
+	return x
+}
diff --git a/vendor/golang.org/x/tools/go/gcimporter15/isAlias18.go b/vendor/golang.org/x/tools/go/internal/gcimporter/isAlias18.go
similarity index 100%
rename from vendor/golang.org/x/tools/go/gcimporter15/isAlias18.go
rename to vendor/golang.org/x/tools/go/internal/gcimporter/isAlias18.go
diff --git a/vendor/golang.org/x/tools/go/gcimporter15/isAlias19.go b/vendor/golang.org/x/tools/go/internal/gcimporter/isAlias19.go
similarity index 100%
rename from vendor/golang.org/x/tools/go/gcimporter15/isAlias19.go
rename to vendor/golang.org/x/tools/go/internal/gcimporter/isAlias19.go
diff --git a/vendor/golang.org/x/tools/present/doc.go b/vendor/golang.org/x/tools/present/doc.go
index e7f595e..3422294 100644
--- a/vendor/golang.org/x/tools/present/doc.go
+++ b/vendor/golang.org/x/tools/present/doc.go
@@ -117,7 +117,7 @@
 section of the file to display. The address syntax is similar in
 its simplest form to that of ed, but comes from sam and is more
 general. See
-	http://plan9.bell-labs.com/sys/doc/sam/sam.html Table II
+	https://plan9.io/sys/doc/sam/sam.html Table II
 for full details. The displayed block is always rounded out to a
 full line at both ends.
 
diff --git a/vendor/golang.org/x/tools/present/parse.go b/vendor/golang.org/x/tools/present/parse.go
index d7289db..dd0f00b 100644
--- a/vendor/golang.org/x/tools/present/parse.go
+++ b/vendor/golang.org/x/tools/present/parse.go
@@ -168,8 +168,17 @@
 	return execTemplate(t, e.TemplateName(), data)
 }
 
+// pageNum derives a page number from a section.
+func pageNum(s Section, offset int) int {
+	if len(s.Number) == 0 {
+		return offset
+	}
+	return s.Number[0] + offset
+}
+
 func init() {
 	funcs["elem"] = renderElem
+	funcs["pagenum"] = pageNum
 }
 
 // execTemplate is a helper to execute a template and return the output as a
diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go
new file mode 100644
index 0000000..2f481a3
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go
@@ -0,0 +1,337 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/iam/v1/iam_policy.proto
+
+/*
+Package iam is a generated protocol buffer package.
+
+It is generated from these files:
+	google/iam/v1/iam_policy.proto
+	google/iam/v1/policy.proto
+
+It has these top-level messages:
+	SetIamPolicyRequest
+	GetIamPolicyRequest
+	TestIamPermissionsRequest
+	TestIamPermissionsResponse
+	Policy
+	Binding
+	PolicyDelta
+	BindingDelta
+*/
+package iam
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "google.golang.org/genproto/googleapis/api/annotations"
+
+import (
+	context "golang.org/x/net/context"
+	grpc "google.golang.org/grpc"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// Request message for `SetIamPolicy` method.
+type SetIamPolicyRequest struct {
+	// REQUIRED: The resource for which the policy is being specified.
+	// `resource` is usually specified as a path. For example, a Project
+	// resource is specified as `projects/{project}`.
+	Resource string `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"`
+	// REQUIRED: The complete policy to be applied to the `resource`. The size of
+	// the policy is limited to a few 10s of KB. An empty policy is a
+	// valid policy but certain Cloud Platform services (such as Projects)
+	// might reject them.
+	Policy *Policy `protobuf:"bytes,2,opt,name=policy" json:"policy,omitempty"`
+}
+
+func (m *SetIamPolicyRequest) Reset()                    { *m = SetIamPolicyRequest{} }
+func (m *SetIamPolicyRequest) String() string            { return proto.CompactTextString(m) }
+func (*SetIamPolicyRequest) ProtoMessage()               {}
+func (*SetIamPolicyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *SetIamPolicyRequest) GetResource() string {
+	if m != nil {
+		return m.Resource
+	}
+	return ""
+}
+
+func (m *SetIamPolicyRequest) GetPolicy() *Policy {
+	if m != nil {
+		return m.Policy
+	}
+	return nil
+}
+
+// Request message for `GetIamPolicy` method.
+type GetIamPolicyRequest struct {
+	// REQUIRED: The resource for which the policy is being requested.
+	// `resource` is usually specified as a path. For example, a Project
+	// resource is specified as `projects/{project}`.
+	Resource string `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"`
+}
+
+func (m *GetIamPolicyRequest) Reset()                    { *m = GetIamPolicyRequest{} }
+func (m *GetIamPolicyRequest) String() string            { return proto.CompactTextString(m) }
+func (*GetIamPolicyRequest) ProtoMessage()               {}
+func (*GetIamPolicyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *GetIamPolicyRequest) GetResource() string {
+	if m != nil {
+		return m.Resource
+	}
+	return ""
+}
+
+// Request message for `TestIamPermissions` method.
+type TestIamPermissionsRequest struct {
+	// REQUIRED: The resource for which the policy detail is being requested.
+	// `resource` is usually specified as a path. For example, a Project
+	// resource is specified as `projects/{project}`.
+	Resource string `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"`
+	// The set of permissions to check for the `resource`. Permissions with
+	// wildcards (such as '*' or 'storage.*') are not allowed. For more
+	// information see
+	// [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).
+	Permissions []string `protobuf:"bytes,2,rep,name=permissions" json:"permissions,omitempty"`
+}
+
+func (m *TestIamPermissionsRequest) Reset()                    { *m = TestIamPermissionsRequest{} }
+func (m *TestIamPermissionsRequest) String() string            { return proto.CompactTextString(m) }
+func (*TestIamPermissionsRequest) ProtoMessage()               {}
+func (*TestIamPermissionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+
+func (m *TestIamPermissionsRequest) GetResource() string {
+	if m != nil {
+		return m.Resource
+	}
+	return ""
+}
+
+func (m *TestIamPermissionsRequest) GetPermissions() []string {
+	if m != nil {
+		return m.Permissions
+	}
+	return nil
+}
+
+// Response message for `TestIamPermissions` method.
+type TestIamPermissionsResponse struct {
+	// A subset of `TestPermissionsRequest.permissions` that the caller is
+	// allowed.
+	Permissions []string `protobuf:"bytes,1,rep,name=permissions" json:"permissions,omitempty"`
+}
+
+func (m *TestIamPermissionsResponse) Reset()                    { *m = TestIamPermissionsResponse{} }
+func (m *TestIamPermissionsResponse) String() string            { return proto.CompactTextString(m) }
+func (*TestIamPermissionsResponse) ProtoMessage()               {}
+func (*TestIamPermissionsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+
+func (m *TestIamPermissionsResponse) GetPermissions() []string {
+	if m != nil {
+		return m.Permissions
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*SetIamPolicyRequest)(nil), "google.iam.v1.SetIamPolicyRequest")
+	proto.RegisterType((*GetIamPolicyRequest)(nil), "google.iam.v1.GetIamPolicyRequest")
+	proto.RegisterType((*TestIamPermissionsRequest)(nil), "google.iam.v1.TestIamPermissionsRequest")
+	proto.RegisterType((*TestIamPermissionsResponse)(nil), "google.iam.v1.TestIamPermissionsResponse")
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// Client API for IAMPolicy service
+
+type IAMPolicyClient interface {
+	// Sets the access control policy on the specified resource. Replaces any
+	// existing policy.
+	SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error)
+	// Gets the access control policy for a resource.
+	// Returns an empty policy if the resource exists and does not have a policy
+	// set.
+	GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error)
+	// Returns permissions that a caller has on the specified resource.
+	// If the resource does not exist, this will return an empty set of
+	// permissions, not a NOT_FOUND error.
+	TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error)
+}
+
+type iAMPolicyClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewIAMPolicyClient(cc *grpc.ClientConn) IAMPolicyClient {
+	return &iAMPolicyClient{cc}
+}
+
+func (c *iAMPolicyClient) SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) {
+	out := new(Policy)
+	err := grpc.Invoke(ctx, "/google.iam.v1.IAMPolicy/SetIamPolicy", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *iAMPolicyClient) GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) {
+	out := new(Policy)
+	err := grpc.Invoke(ctx, "/google.iam.v1.IAMPolicy/GetIamPolicy", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *iAMPolicyClient) TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error) {
+	out := new(TestIamPermissionsResponse)
+	err := grpc.Invoke(ctx, "/google.iam.v1.IAMPolicy/TestIamPermissions", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Server API for IAMPolicy service
+
+type IAMPolicyServer interface {
+	// Sets the access control policy on the specified resource. Replaces any
+	// existing policy.
+	SetIamPolicy(context.Context, *SetIamPolicyRequest) (*Policy, error)
+	// Gets the access control policy for a resource.
+	// Returns an empty policy if the resource exists and does not have a policy
+	// set.
+	GetIamPolicy(context.Context, *GetIamPolicyRequest) (*Policy, error)
+	// Returns permissions that a caller has on the specified resource.
+	// If the resource does not exist, this will return an empty set of
+	// permissions, not a NOT_FOUND error.
+	TestIamPermissions(context.Context, *TestIamPermissionsRequest) (*TestIamPermissionsResponse, error)
+}
+
+func RegisterIAMPolicyServer(s *grpc.Server, srv IAMPolicyServer) {
+	s.RegisterService(&_IAMPolicy_serviceDesc, srv)
+}
+
+func _IAMPolicy_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(SetIamPolicyRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(IAMPolicyServer).SetIamPolicy(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.iam.v1.IAMPolicy/SetIamPolicy",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(IAMPolicyServer).SetIamPolicy(ctx, req.(*SetIamPolicyRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _IAMPolicy_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(GetIamPolicyRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(IAMPolicyServer).GetIamPolicy(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.iam.v1.IAMPolicy/GetIamPolicy",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(IAMPolicyServer).GetIamPolicy(ctx, req.(*GetIamPolicyRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _IAMPolicy_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(TestIamPermissionsRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(IAMPolicyServer).TestIamPermissions(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.iam.v1.IAMPolicy/TestIamPermissions",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(IAMPolicyServer).TestIamPermissions(ctx, req.(*TestIamPermissionsRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _IAMPolicy_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "google.iam.v1.IAMPolicy",
+	HandlerType: (*IAMPolicyServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "SetIamPolicy",
+			Handler:    _IAMPolicy_SetIamPolicy_Handler,
+		},
+		{
+			MethodName: "GetIamPolicy",
+			Handler:    _IAMPolicy_GetIamPolicy_Handler,
+		},
+		{
+			MethodName: "TestIamPermissions",
+			Handler:    _IAMPolicy_TestIamPermissions_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "google/iam/v1/iam_policy.proto",
+}
+
+func init() { proto.RegisterFile("google/iam/v1/iam_policy.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+	// 396 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x53, 0xcf, 0x4a, 0xe3, 0x40,
+	0x18, 0x67, 0x52, 0x28, 0xdb, 0xe9, 0xee, 0xc2, 0xa6, 0x2c, 0xd4, 0x20, 0x25, 0x8c, 0x1e, 0xd2,
+	0x80, 0x13, 0x53, 0x6f, 0x15, 0x05, 0xeb, 0x21, 0xf4, 0x20, 0x94, 0x2a, 0x82, 0x5e, 0x74, 0xac,
+	0x43, 0x18, 0x48, 0x32, 0x31, 0x33, 0x2d, 0x88, 0x78, 0xf1, 0x15, 0xf4, 0xe4, 0x23, 0xf8, 0x3a,
+	0xbe, 0x82, 0x0f, 0xe1, 0x51, 0x92, 0x89, 0x35, 0x6d, 0xaa, 0x54, 0xf0, 0x54, 0x3a, 0xf3, 0xfb,
+	0xf7, 0xfd, 0xbe, 0x0c, 0x6c, 0xf9, 0x9c, 0xfb, 0x01, 0x75, 0x18, 0x09, 0x9d, 0x89, 0x9b, 0xfe,
+	0x9c, 0xc5, 0x3c, 0x60, 0xa3, 0x6b, 0x1c, 0x27, 0x5c, 0x72, 0xfd, 0x8f, 0xba, 0xc7, 0x8c, 0x84,
+	0x78, 0xe2, 0x1a, 0xab, 0x39, 0x9c, 0xc4, 0xcc, 0x21, 0x51, 0xc4, 0x25, 0x91, 0x8c, 0x47, 0x42,
+	0x81, 0x0d, 0x63, 0x56, 0xac, 0x28, 0x84, 0xce, 0x61, 0xe3, 0x90, 0xca, 0x3e, 0x09, 0x07, 0xd9,
+	0xe9, 0x90, 0x5e, 0x8d, 0xa9, 0x90, 0xba, 0x01, 0x7f, 0x25, 0x54, 0xf0, 0x71, 0x32, 0xa2, 0x4d,
+	0x60, 0x02, 0xab, 0x36, 0x9c, 0xfe, 0xd7, 0x37, 0x60, 0x55, 0x49, 0x34, 0x35, 0x13, 0x58, 0xf5,
+	0xce, 0x7f, 0x3c, 0x13, 0x06, 0xe7, 0x4a, 0x39, 0x08, 0xb9, 0xb0, 0xe1, 0x7d, 0xcf, 0x01, 0x9d,
+	0xc0, 0x95, 0x23, 0x2a, 0x32, 0x0e, 0x4d, 0x42, 0x26, 0x44, 0x3a, 0xcc, 0x32, 0xd1, 0x4c, 0x58,
+	0x8f, 0x3f, 0x18, 0x4d, 0xcd, 0xac, 0x58, 0xb5, 0x61, 0xf1, 0x08, 0xed, 0x42, 0x63, 0x91, 0xb4,
+	0x88, 0x79, 0x24, 0x4a, 0x7c, 0x50, 0xe2, 0x77, 0x1e, 0x2a, 0xb0, 0xd6, 0xdf, 0x3b, 0x50, 0xb3,
+	0xe8, 0x12, 0xfe, 0x2e, 0xb6, 0xa7, 0xa3, 0xb9, 0x2a, 0x16, 0x54, 0x6b, 0x2c, 0xae, 0x0b, 0xb5,
+	0xef, 0x9e, 0x5f, 0xee, 0xb5, 0x35, 0xd4, 0x4a, 0x57, 0x74, 0xf3, 0x3e, 0xd1, 0x8e, 0x6d, 0xdf,
+	0x76, 0x45, 0x41, 0xa5, 0x0b, 0xec, 0xd4, 0xd5, 0xfb, 0xca, 0xd5, 0xfb, 0x11, 0x57, 0x7f, 0xce,
+	0xf5, 0x11, 0x40, 0xbd, 0x5c, 0x9d, 0x6e, 0xcd, 0x09, 0x7f, 0xba, 0x38, 0xa3, 0xbd, 0x04, 0x52,
+	0xed, 0x01, 0x39, 0x59, 0xac, 0x36, 0x5a, 0x2f, 0xc7, 0x92, 0x25, 0x56, 0x17, 0xd8, 0xbd, 0x18,
+	0xfe, 0x1b, 0xf1, 0x70, 0xd6, 0xa0, 0xf7, 0x77, 0x9a, 0x7f, 0x90, 0x7e, 0xeb, 0x03, 0x70, 0xba,
+	0x99, 0x03, 0x7c, 0x1e, 0x90, 0xc8, 0xc7, 0x3c, 0xf1, 0x1d, 0x9f, 0x46, 0xd9, 0x4b, 0x70, 0xd4,
+	0x15, 0x89, 0x99, 0xc8, 0x1f, 0xca, 0x36, 0x23, 0xe1, 0x2b, 0x00, 0x4f, 0x5a, 0xc3, 0x53, 0xac,
+	0xfd, 0x80, 0x8f, 0x2f, 0x71, 0x9f, 0x84, 0xf8, 0xd8, 0xbd, 0xa8, 0x66, 0xac, 0xad, 0xb7, 0x00,
+	0x00, 0x00, 0xff, 0xff, 0x6c, 0x3a, 0x2b, 0x4d, 0xaa, 0x03, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go
new file mode 100644
index 0000000..a22ae91
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go
@@ -0,0 +1,269 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/iam/v1/policy.proto
+
+package iam
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "google.golang.org/genproto/googleapis/api/annotations"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// The type of action performed on a Binding in a policy.
+type BindingDelta_Action int32
+
+const (
+	// Unspecified.
+	BindingDelta_ACTION_UNSPECIFIED BindingDelta_Action = 0
+	// Addition of a Binding.
+	BindingDelta_ADD BindingDelta_Action = 1
+	// Removal of a Binding.
+	BindingDelta_REMOVE BindingDelta_Action = 2
+)
+
+var BindingDelta_Action_name = map[int32]string{
+	0: "ACTION_UNSPECIFIED",
+	1: "ADD",
+	2: "REMOVE",
+}
+var BindingDelta_Action_value = map[string]int32{
+	"ACTION_UNSPECIFIED": 0,
+	"ADD":                1,
+	"REMOVE":             2,
+}
+
+func (x BindingDelta_Action) String() string {
+	return proto.EnumName(BindingDelta_Action_name, int32(x))
+}
+func (BindingDelta_Action) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{3, 0} }
+
+// Defines an Identity and Access Management (IAM) policy. It is used to
+// specify access control policies for Cloud Platform resources.
+//
+//
+// A `Policy` consists of a list of `bindings`. A `Binding` binds a list of
+// `members` to a `role`, where the members can be user accounts, Google groups,
+// Google domains, and service accounts. A `role` is a named list of permissions
+// defined by IAM.
+//
+// **Example**
+//
+//     {
+//       "bindings": [
+//         {
+//           "role": "roles/owner",
+//           "members": [
+//             "user:mike@example.com",
+//             "group:admins@example.com",
+//             "domain:google.com",
+//             "serviceAccount:my-other-app@appspot.gserviceaccount.com",
+//           ]
+//         },
+//         {
+//           "role": "roles/viewer",
+//           "members": ["user:sean@example.com"]
+//         }
+//       ]
+//     }
+//
+// For a description of IAM and its features, see the
+// [IAM developer's guide](https://cloud.google.com/iam).
+type Policy struct {
+	// Version of the `Policy`. The default version is 0.
+	Version int32 `protobuf:"varint,1,opt,name=version" json:"version,omitempty"`
+	// Associates a list of `members` to a `role`.
+	// Multiple `bindings` must not be specified for the same `role`.
+	// `bindings` with no members will result in an error.
+	Bindings []*Binding `protobuf:"bytes,4,rep,name=bindings" json:"bindings,omitempty"`
+	// `etag` is used for optimistic concurrency control as a way to help
+	// prevent simultaneous updates of a policy from overwriting each other.
+	// It is strongly suggested that systems make use of the `etag` in the
+	// read-modify-write cycle to perform policy updates in order to avoid race
+	// conditions: An `etag` is returned in the response to `getIamPolicy`, and
+	// systems are expected to put that etag in the request to `setIamPolicy` to
+	// ensure that their change will be applied to the same version of the policy.
+	//
+	// If no `etag` is provided in the call to `setIamPolicy`, then the existing
+	// policy is overwritten blindly.
+	Etag []byte `protobuf:"bytes,3,opt,name=etag,proto3" json:"etag,omitempty"`
+}
+
+func (m *Policy) Reset()                    { *m = Policy{} }
+func (m *Policy) String() string            { return proto.CompactTextString(m) }
+func (*Policy) ProtoMessage()               {}
+func (*Policy) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
+
+func (m *Policy) GetVersion() int32 {
+	if m != nil {
+		return m.Version
+	}
+	return 0
+}
+
+func (m *Policy) GetBindings() []*Binding {
+	if m != nil {
+		return m.Bindings
+	}
+	return nil
+}
+
+func (m *Policy) GetEtag() []byte {
+	if m != nil {
+		return m.Etag
+	}
+	return nil
+}
+
+// Associates `members` with a `role`.
+type Binding struct {
+	// Role that is assigned to `members`.
+	// For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
+	// Required
+	Role string `protobuf:"bytes,1,opt,name=role" json:"role,omitempty"`
+	// Specifies the identities requesting access for a Cloud Platform resource.
+	// `members` can have the following values:
+	//
+	// * `allUsers`: A special identifier that represents anyone who is
+	//    on the internet; with or without a Google account.
+	//
+	// * `allAuthenticatedUsers`: A special identifier that represents anyone
+	//    who is authenticated with a Google account or a service account.
+	//
+	// * `user:{emailid}`: An email address that represents a specific Google
+	//    account. For example, `alice@gmail.com` or `joe@example.com`.
+	//
+	//
+	// * `serviceAccount:{emailid}`: An email address that represents a service
+	//    account. For example, `my-other-app@appspot.gserviceaccount.com`.
+	//
+	// * `group:{emailid}`: An email address that represents a Google group.
+	//    For example, `admins@example.com`.
+	//
+	// * `domain:{domain}`: A Google Apps domain name that represents all the
+	//    users of that domain. For example, `google.com` or `example.com`.
+	//
+	//
+	Members []string `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"`
+}
+
+func (m *Binding) Reset()                    { *m = Binding{} }
+func (m *Binding) String() string            { return proto.CompactTextString(m) }
+func (*Binding) ProtoMessage()               {}
+func (*Binding) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} }
+
+func (m *Binding) GetRole() string {
+	if m != nil {
+		return m.Role
+	}
+	return ""
+}
+
+func (m *Binding) GetMembers() []string {
+	if m != nil {
+		return m.Members
+	}
+	return nil
+}
+
+// The difference delta between two policies.
+type PolicyDelta struct {
+	// The delta for Bindings between two policies.
+	BindingDeltas []*BindingDelta `protobuf:"bytes,1,rep,name=binding_deltas,json=bindingDeltas" json:"binding_deltas,omitempty"`
+}
+
+func (m *PolicyDelta) Reset()                    { *m = PolicyDelta{} }
+func (m *PolicyDelta) String() string            { return proto.CompactTextString(m) }
+func (*PolicyDelta) ProtoMessage()               {}
+func (*PolicyDelta) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} }
+
+func (m *PolicyDelta) GetBindingDeltas() []*BindingDelta {
+	if m != nil {
+		return m.BindingDeltas
+	}
+	return nil
+}
+
+// One delta entry for Binding. Each individual change (only one member in each
+// entry) to a binding will be a separate entry.
+type BindingDelta struct {
+	// The action that was performed on a Binding.
+	// Required
+	Action BindingDelta_Action `protobuf:"varint,1,opt,name=action,enum=google.iam.v1.BindingDelta_Action" json:"action,omitempty"`
+	// Role that is assigned to `members`.
+	// For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
+	// Required
+	Role string `protobuf:"bytes,2,opt,name=role" json:"role,omitempty"`
+	// A single identity requesting access for a Cloud Platform resource.
+	// Follows the same format of Binding.members.
+	// Required
+	Member string `protobuf:"bytes,3,opt,name=member" json:"member,omitempty"`
+}
+
+func (m *BindingDelta) Reset()                    { *m = BindingDelta{} }
+func (m *BindingDelta) String() string            { return proto.CompactTextString(m) }
+func (*BindingDelta) ProtoMessage()               {}
+func (*BindingDelta) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} }
+
+func (m *BindingDelta) GetAction() BindingDelta_Action {
+	if m != nil {
+		return m.Action
+	}
+	return BindingDelta_ACTION_UNSPECIFIED
+}
+
+func (m *BindingDelta) GetRole() string {
+	if m != nil {
+		return m.Role
+	}
+	return ""
+}
+
+func (m *BindingDelta) GetMember() string {
+	if m != nil {
+		return m.Member
+	}
+	return ""
+}
+
+func init() {
+	proto.RegisterType((*Policy)(nil), "google.iam.v1.Policy")
+	proto.RegisterType((*Binding)(nil), "google.iam.v1.Binding")
+	proto.RegisterType((*PolicyDelta)(nil), "google.iam.v1.PolicyDelta")
+	proto.RegisterType((*BindingDelta)(nil), "google.iam.v1.BindingDelta")
+	proto.RegisterEnum("google.iam.v1.BindingDelta_Action", BindingDelta_Action_name, BindingDelta_Action_value)
+}
+
+func init() { proto.RegisterFile("google/iam/v1/policy.proto", fileDescriptor1) }
+
+var fileDescriptor1 = []byte{
+	// 387 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x52, 0x4d, 0x8f, 0xd3, 0x30,
+	0x10, 0xc5, 0xed, 0x92, 0xd2, 0xd9, 0x0f, 0x15, 0x23, 0x55, 0xd1, 0xc2, 0xa1, 0xca, 0x29, 0x27,
+	0x87, 0x16, 0x21, 0x24, 0x38, 0x35, 0x4d, 0x40, 0x39, 0xb0, 0x1b, 0x0c, 0xec, 0x81, 0xcb, 0xca,
+	0x69, 0x2d, 0xcb, 0x28, 0xb6, 0xa3, 0x24, 0x54, 0xe2, 0x2f, 0x21, 0xf1, 0xff, 0x38, 0xa2, 0xd8,
+	0xee, 0xaa, 0x95, 0x10, 0xb7, 0x79, 0x79, 0xef, 0x65, 0xde, 0xcc, 0x18, 0xae, 0x85, 0x31, 0xa2,
+	0xe6, 0x89, 0x64, 0x2a, 0xd9, 0x2f, 0x93, 0xc6, 0xd4, 0x72, 0xfb, 0x93, 0x34, 0xad, 0xe9, 0x0d,
+	0xbe, 0x74, 0x1c, 0x91, 0x4c, 0x91, 0xfd, 0xf2, 0xfa, 0x85, 0x97, 0xb2, 0x46, 0x26, 0x4c, 0x6b,
+	0xd3, 0xb3, 0x5e, 0x1a, 0xdd, 0x39, 0x71, 0xf4, 0x1d, 0x82, 0xd2, 0x9a, 0x71, 0x08, 0x93, 0x3d,
+	0x6f, 0x3b, 0x69, 0x74, 0x88, 0x16, 0x28, 0x7e, 0x4c, 0x0f, 0x10, 0xaf, 0xe0, 0x49, 0x25, 0xf5,
+	0x4e, 0x6a, 0xd1, 0x85, 0x67, 0x8b, 0x71, 0x7c, 0xbe, 0x9a, 0x93, 0x93, 0x1e, 0x24, 0x75, 0x34,
+	0x7d, 0xd0, 0x61, 0x0c, 0x67, 0xbc, 0x67, 0x22, 0x1c, 0x2f, 0x50, 0x7c, 0x41, 0x6d, 0x1d, 0xbd,
+	0x81, 0x89, 0x17, 0x0e, 0x74, 0x6b, 0x6a, 0x6e, 0x3b, 0x4d, 0xa9, 0xad, 0x87, 0x00, 0x8a, 0xab,
+	0x8a, 0xb7, 0x5d, 0x38, 0x5a, 0x8c, 0xe3, 0x29, 0x3d, 0xc0, 0xe8, 0x13, 0x9c, 0xbb, 0x90, 0x19,
+	0xaf, 0x7b, 0x86, 0x53, 0xb8, 0xf2, 0x7d, 0xee, 0x77, 0xc3, 0x87, 0x2e, 0x44, 0x36, 0xd5, 0xf3,
+	0x7f, 0xa7, 0xb2, 0x26, 0x7a, 0x59, 0x1d, 0xa1, 0x2e, 0xfa, 0x8d, 0xe0, 0xe2, 0x98, 0xc7, 0x6f,
+	0x21, 0x60, 0xdb, 0xfe, 0x30, 0xfd, 0xd5, 0x2a, 0xfa, 0xcf, 0xcf, 0xc8, 0xda, 0x2a, 0xa9, 0x77,
+	0x3c, 0x4c, 0x33, 0x3a, 0x9a, 0x66, 0x0e, 0x81, 0x8b, 0x6f, 0x57, 0x30, 0xa5, 0x1e, 0x45, 0xaf,
+	0x21, 0x70, 0x6e, 0x3c, 0x07, 0xbc, 0xde, 0x7c, 0x29, 0x6e, 0x6f, 0xee, 0xbf, 0xde, 0x7c, 0x2e,
+	0xf3, 0x4d, 0xf1, 0xbe, 0xc8, 0xb3, 0xd9, 0x23, 0x3c, 0x81, 0xf1, 0x3a, 0xcb, 0x66, 0x08, 0x03,
+	0x04, 0x34, 0xff, 0x78, 0x7b, 0x97, 0xcf, 0x46, 0xa9, 0x82, 0xa7, 0x5b, 0xa3, 0x4e, 0x33, 0xa5,
+	0x7e, 0x2b, 0xe5, 0x70, 0xc9, 0x12, 0x7d, 0x7b, 0xe9, 0x59, 0x61, 0x6a, 0xa6, 0x05, 0x31, 0xad,
+	0x48, 0x04, 0xd7, 0xf6, 0xce, 0x89, 0xa3, 0x58, 0x23, 0x3b, 0xff, 0x66, 0xde, 0x49, 0xa6, 0xfe,
+	0x20, 0xf4, 0x6b, 0xf4, 0xec, 0x83, 0x73, 0x6d, 0x6a, 0xf3, 0x63, 0x47, 0x0a, 0xa6, 0xc8, 0xdd,
+	0xb2, 0x0a, 0xac, 0xeb, 0xd5, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8c, 0x4a, 0x85, 0x10, 0x68,
+	0x02, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/pubsub/v1/pubsub.pb.go b/vendor/google.golang.org/genproto/googleapis/pubsub/v1/pubsub.pb.go
new file mode 100644
index 0000000..ab4768d
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/pubsub/v1/pubsub.pb.go
@@ -0,0 +1,2523 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/pubsub/v1/pubsub.proto
+
+/*
+Package pubsub is a generated protocol buffer package.
+
+It is generated from these files:
+	google/pubsub/v1/pubsub.proto
+
+It has these top-level messages:
+	Topic
+	PubsubMessage
+	GetTopicRequest
+	UpdateTopicRequest
+	PublishRequest
+	PublishResponse
+	ListTopicsRequest
+	ListTopicsResponse
+	ListTopicSubscriptionsRequest
+	ListTopicSubscriptionsResponse
+	DeleteTopicRequest
+	Subscription
+	PushConfig
+	ReceivedMessage
+	GetSubscriptionRequest
+	UpdateSubscriptionRequest
+	ListSubscriptionsRequest
+	ListSubscriptionsResponse
+	DeleteSubscriptionRequest
+	ModifyPushConfigRequest
+	PullRequest
+	PullResponse
+	ModifyAckDeadlineRequest
+	AcknowledgeRequest
+	StreamingPullRequest
+	StreamingPullResponse
+	CreateSnapshotRequest
+	UpdateSnapshotRequest
+	Snapshot
+	ListSnapshotsRequest
+	ListSnapshotsResponse
+	DeleteSnapshotRequest
+	SeekRequest
+	SeekResponse
+*/
+package pubsub
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "google.golang.org/genproto/googleapis/api/annotations"
+import google_protobuf1 "github.com/golang/protobuf/ptypes/duration"
+import google_protobuf2 "github.com/golang/protobuf/ptypes/empty"
+import google_protobuf3 "google.golang.org/genproto/protobuf/field_mask"
+import google_protobuf4 "github.com/golang/protobuf/ptypes/timestamp"
+
+import (
+	context "golang.org/x/net/context"
+	grpc "google.golang.org/grpc"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// A topic resource.
+type Topic struct {
+	// The name of the topic. It must have the format
+	// `"projects/{project}/topics/{topic}"`. `{topic}` must start with a letter,
+	// and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),
+	// underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent
+	// signs (`%`). It must be between 3 and 255 characters in length, and it
+	// must not start with `"goog"`.
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// User labels.
+	Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+}
+
+func (m *Topic) Reset()                    { *m = Topic{} }
+func (m *Topic) String() string            { return proto.CompactTextString(m) }
+func (*Topic) ProtoMessage()               {}
+func (*Topic) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *Topic) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *Topic) GetLabels() map[string]string {
+	if m != nil {
+		return m.Labels
+	}
+	return nil
+}
+
+// A message data and its attributes. The message payload must not be empty;
+// it must contain either a non-empty data field, or at least one attribute.
+type PubsubMessage struct {
+	// The message payload.
+	Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
+	// Optional attributes for this message.
+	Attributes map[string]string `protobuf:"bytes,2,rep,name=attributes" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	// ID of this message, assigned by the server when the message is published.
+	// Guaranteed to be unique within the topic. This value may be read by a
+	// subscriber that receives a `PubsubMessage` via a `Pull` call or a push
+	// delivery. It must not be populated by the publisher in a `Publish` call.
+	MessageId string `protobuf:"bytes,3,opt,name=message_id,json=messageId" json:"message_id,omitempty"`
+	// The time at which the message was published, populated by the server when
+	// it receives the `Publish` call. It must not be populated by the
+	// publisher in a `Publish` call.
+	PublishTime *google_protobuf4.Timestamp `protobuf:"bytes,4,opt,name=publish_time,json=publishTime" json:"publish_time,omitempty"`
+}
+
+func (m *PubsubMessage) Reset()                    { *m = PubsubMessage{} }
+func (m *PubsubMessage) String() string            { return proto.CompactTextString(m) }
+func (*PubsubMessage) ProtoMessage()               {}
+func (*PubsubMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *PubsubMessage) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+func (m *PubsubMessage) GetAttributes() map[string]string {
+	if m != nil {
+		return m.Attributes
+	}
+	return nil
+}
+
+func (m *PubsubMessage) GetMessageId() string {
+	if m != nil {
+		return m.MessageId
+	}
+	return ""
+}
+
+func (m *PubsubMessage) GetPublishTime() *google_protobuf4.Timestamp {
+	if m != nil {
+		return m.PublishTime
+	}
+	return nil
+}
+
+// Request for the GetTopic method.
+type GetTopicRequest struct {
+	// The name of the topic to get.
+	// Format is `projects/{project}/topics/{topic}`.
+	Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"`
+}
+
+func (m *GetTopicRequest) Reset()                    { *m = GetTopicRequest{} }
+func (m *GetTopicRequest) String() string            { return proto.CompactTextString(m) }
+func (*GetTopicRequest) ProtoMessage()               {}
+func (*GetTopicRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+
+func (m *GetTopicRequest) GetTopic() string {
+	if m != nil {
+		return m.Topic
+	}
+	return ""
+}
+
+// Request for the UpdateTopic method.
+type UpdateTopicRequest struct {
+	// The topic to update.
+	Topic *Topic `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"`
+	// Indicates which fields in the provided topic to update.
+	// Must be specified and non-empty.
+	UpdateMask *google_protobuf3.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"`
+}
+
+func (m *UpdateTopicRequest) Reset()                    { *m = UpdateTopicRequest{} }
+func (m *UpdateTopicRequest) String() string            { return proto.CompactTextString(m) }
+func (*UpdateTopicRequest) ProtoMessage()               {}
+func (*UpdateTopicRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+
+func (m *UpdateTopicRequest) GetTopic() *Topic {
+	if m != nil {
+		return m.Topic
+	}
+	return nil
+}
+
+func (m *UpdateTopicRequest) GetUpdateMask() *google_protobuf3.FieldMask {
+	if m != nil {
+		return m.UpdateMask
+	}
+	return nil
+}
+
+// Request for the Publish method.
+type PublishRequest struct {
+	// The messages in the request will be published on this topic.
+	// Format is `projects/{project}/topics/{topic}`.
+	Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"`
+	// The messages to publish.
+	Messages []*PubsubMessage `protobuf:"bytes,2,rep,name=messages" json:"messages,omitempty"`
+}
+
+func (m *PublishRequest) Reset()                    { *m = PublishRequest{} }
+func (m *PublishRequest) String() string            { return proto.CompactTextString(m) }
+func (*PublishRequest) ProtoMessage()               {}
+func (*PublishRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+
+func (m *PublishRequest) GetTopic() string {
+	if m != nil {
+		return m.Topic
+	}
+	return ""
+}
+
+func (m *PublishRequest) GetMessages() []*PubsubMessage {
+	if m != nil {
+		return m.Messages
+	}
+	return nil
+}
+
+// Response for the `Publish` method.
+type PublishResponse struct {
+	// The server-assigned ID of each published message, in the same order as
+	// the messages in the request. IDs are guaranteed to be unique within
+	// the topic.
+	MessageIds []string `protobuf:"bytes,1,rep,name=message_ids,json=messageIds" json:"message_ids,omitempty"`
+}
+
+func (m *PublishResponse) Reset()                    { *m = PublishResponse{} }
+func (m *PublishResponse) String() string            { return proto.CompactTextString(m) }
+func (*PublishResponse) ProtoMessage()               {}
+func (*PublishResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+
+func (m *PublishResponse) GetMessageIds() []string {
+	if m != nil {
+		return m.MessageIds
+	}
+	return nil
+}
+
+// Request for the `ListTopics` method.
+type ListTopicsRequest struct {
+	// The name of the cloud project that topics belong to.
+	// Format is `projects/{project}`.
+	Project string `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"`
+	// Maximum number of topics to return.
+	PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"`
+	// The value returned by the last `ListTopicsResponse`; indicates that this is
+	// a continuation of a prior `ListTopics` call, and that the system should
+	// return the next page of data.
+	PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"`
+}
+
+func (m *ListTopicsRequest) Reset()                    { *m = ListTopicsRequest{} }
+func (m *ListTopicsRequest) String() string            { return proto.CompactTextString(m) }
+func (*ListTopicsRequest) ProtoMessage()               {}
+func (*ListTopicsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+
+func (m *ListTopicsRequest) GetProject() string {
+	if m != nil {
+		return m.Project
+	}
+	return ""
+}
+
+func (m *ListTopicsRequest) GetPageSize() int32 {
+	if m != nil {
+		return m.PageSize
+	}
+	return 0
+}
+
+func (m *ListTopicsRequest) GetPageToken() string {
+	if m != nil {
+		return m.PageToken
+	}
+	return ""
+}
+
+// Response for the `ListTopics` method.
+type ListTopicsResponse struct {
+	// The resulting topics.
+	Topics []*Topic `protobuf:"bytes,1,rep,name=topics" json:"topics,omitempty"`
+	// If not empty, indicates that there may be more topics that match the
+	// request; this value should be passed in a new `ListTopicsRequest`.
+	NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"`
+}
+
+func (m *ListTopicsResponse) Reset()                    { *m = ListTopicsResponse{} }
+func (m *ListTopicsResponse) String() string            { return proto.CompactTextString(m) }
+func (*ListTopicsResponse) ProtoMessage()               {}
+func (*ListTopicsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+
+func (m *ListTopicsResponse) GetTopics() []*Topic {
+	if m != nil {
+		return m.Topics
+	}
+	return nil
+}
+
+func (m *ListTopicsResponse) GetNextPageToken() string {
+	if m != nil {
+		return m.NextPageToken
+	}
+	return ""
+}
+
+// Request for the `ListTopicSubscriptions` method.
+type ListTopicSubscriptionsRequest struct {
+	// The name of the topic that subscriptions are attached to.
+	// Format is `projects/{project}/topics/{topic}`.
+	Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"`
+	// Maximum number of subscription names to return.
+	PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"`
+	// The value returned by the last `ListTopicSubscriptionsResponse`; indicates
+	// that this is a continuation of a prior `ListTopicSubscriptions` call, and
+	// that the system should return the next page of data.
+	PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"`
+}
+
+func (m *ListTopicSubscriptionsRequest) Reset()                    { *m = ListTopicSubscriptionsRequest{} }
+func (m *ListTopicSubscriptionsRequest) String() string            { return proto.CompactTextString(m) }
+func (*ListTopicSubscriptionsRequest) ProtoMessage()               {}
+func (*ListTopicSubscriptionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+
+func (m *ListTopicSubscriptionsRequest) GetTopic() string {
+	if m != nil {
+		return m.Topic
+	}
+	return ""
+}
+
+func (m *ListTopicSubscriptionsRequest) GetPageSize() int32 {
+	if m != nil {
+		return m.PageSize
+	}
+	return 0
+}
+
+func (m *ListTopicSubscriptionsRequest) GetPageToken() string {
+	if m != nil {
+		return m.PageToken
+	}
+	return ""
+}
+
+// Response for the `ListTopicSubscriptions` method.
+type ListTopicSubscriptionsResponse struct {
+	// The names of the subscriptions that match the request.
+	Subscriptions []string `protobuf:"bytes,1,rep,name=subscriptions" json:"subscriptions,omitempty"`
+	// If not empty, indicates that there may be more subscriptions that match
+	// the request; this value should be passed in a new
+	// `ListTopicSubscriptionsRequest` to get more subscriptions.
+	NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"`
+}
+
+func (m *ListTopicSubscriptionsResponse) Reset()                    { *m = ListTopicSubscriptionsResponse{} }
+func (m *ListTopicSubscriptionsResponse) String() string            { return proto.CompactTextString(m) }
+func (*ListTopicSubscriptionsResponse) ProtoMessage()               {}
+func (*ListTopicSubscriptionsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
+
+func (m *ListTopicSubscriptionsResponse) GetSubscriptions() []string {
+	if m != nil {
+		return m.Subscriptions
+	}
+	return nil
+}
+
+func (m *ListTopicSubscriptionsResponse) GetNextPageToken() string {
+	if m != nil {
+		return m.NextPageToken
+	}
+	return ""
+}
+
+// Request for the `DeleteTopic` method.
+type DeleteTopicRequest struct {
+	// Name of the topic to delete.
+	// Format is `projects/{project}/topics/{topic}`.
+	Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"`
+}
+
+func (m *DeleteTopicRequest) Reset()                    { *m = DeleteTopicRequest{} }
+func (m *DeleteTopicRequest) String() string            { return proto.CompactTextString(m) }
+func (*DeleteTopicRequest) ProtoMessage()               {}
+func (*DeleteTopicRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
+
+func (m *DeleteTopicRequest) GetTopic() string {
+	if m != nil {
+		return m.Topic
+	}
+	return ""
+}
+
+// A subscription resource.
+type Subscription struct {
+	// The name of the subscription. It must have the format
+	// `"projects/{project}/subscriptions/{subscription}"`. `{subscription}` must
+	// start with a letter, and contain only letters (`[A-Za-z]`), numbers
+	// (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`),
+	// plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters
+	// in length, and it must not start with `"goog"`.
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// The name of the topic from which this subscription is receiving messages.
+	// Format is `projects/{project}/topics/{topic}`.
+	// The value of this field will be `_deleted-topic_` if the topic has been
+	// deleted.
+	Topic string `protobuf:"bytes,2,opt,name=topic" json:"topic,omitempty"`
+	// If push delivery is used with this subscription, this field is
+	// used to configure it. An empty `pushConfig` signifies that the subscriber
+	// will pull and ack messages using API methods.
+	PushConfig *PushConfig `protobuf:"bytes,4,opt,name=push_config,json=pushConfig" json:"push_config,omitempty"`
+	// This value is the maximum time after a subscriber receives a message
+	// before the subscriber should acknowledge the message. After message
+	// delivery but before the ack deadline expires and before the message is
+	// acknowledged, it is an outstanding message and will not be delivered
+	// again during that time (on a best-effort basis).
+	//
+	// For pull subscriptions, this value is used as the initial value for the ack
+	// deadline. To override this value for a given message, call
+	// `ModifyAckDeadline` with the corresponding `ack_id` if using
+	// pull.
+	// The minimum custom deadline you can specify is 10 seconds.
+	// The maximum custom deadline you can specify is 600 seconds (10 minutes).
+	// If this parameter is 0, a default value of 10 seconds is used.
+	//
+	// For push delivery, this value is also used to set the request timeout for
+	// the call to the push endpoint.
+	//
+	// If the subscriber never acknowledges the message, the Pub/Sub
+	// system will eventually redeliver the message.
+	AckDeadlineSeconds int32 `protobuf:"varint,5,opt,name=ack_deadline_seconds,json=ackDeadlineSeconds" json:"ack_deadline_seconds,omitempty"`
+	// Indicates whether to retain acknowledged messages. If true, then
+	// messages are not expunged from the subscription's backlog, even if they are
+	// acknowledged, until they fall out of the `message_retention_duration`
+	// window.
+	RetainAckedMessages bool `protobuf:"varint,7,opt,name=retain_acked_messages,json=retainAckedMessages" json:"retain_acked_messages,omitempty"`
+	// How long to retain unacknowledged messages in the subscription's backlog,
+	// from the moment a message is published.
+	// If `retain_acked_messages` is true, then this also configures the retention
+	// of acknowledged messages, and thus configures how far back in time a `Seek`
+	// can be done. Defaults to 7 days. Cannot be more than 7 days or less than 10
+	// minutes.
+	MessageRetentionDuration *google_protobuf1.Duration `protobuf:"bytes,8,opt,name=message_retention_duration,json=messageRetentionDuration" json:"message_retention_duration,omitempty"`
+	// User labels.
+	Labels map[string]string `protobuf:"bytes,9,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+}
+
+func (m *Subscription) Reset()                    { *m = Subscription{} }
+func (m *Subscription) String() string            { return proto.CompactTextString(m) }
+func (*Subscription) ProtoMessage()               {}
+func (*Subscription) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
+
+func (m *Subscription) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *Subscription) GetTopic() string {
+	if m != nil {
+		return m.Topic
+	}
+	return ""
+}
+
+func (m *Subscription) GetPushConfig() *PushConfig {
+	if m != nil {
+		return m.PushConfig
+	}
+	return nil
+}
+
+func (m *Subscription) GetAckDeadlineSeconds() int32 {
+	if m != nil {
+		return m.AckDeadlineSeconds
+	}
+	return 0
+}
+
+func (m *Subscription) GetRetainAckedMessages() bool {
+	if m != nil {
+		return m.RetainAckedMessages
+	}
+	return false
+}
+
+func (m *Subscription) GetMessageRetentionDuration() *google_protobuf1.Duration {
+	if m != nil {
+		return m.MessageRetentionDuration
+	}
+	return nil
+}
+
+func (m *Subscription) GetLabels() map[string]string {
+	if m != nil {
+		return m.Labels
+	}
+	return nil
+}
+
+// Configuration for a push delivery endpoint.
+type PushConfig struct {
+	// A URL locating the endpoint to which messages should be pushed.
+	// For example, a Webhook endpoint might use "https://example.com/push".
+	PushEndpoint string `protobuf:"bytes,1,opt,name=push_endpoint,json=pushEndpoint" json:"push_endpoint,omitempty"`
+	// Endpoint configuration attributes.
+	//
+	// Every endpoint has a set of API supported attributes that can be used to
+	// control different aspects of the message delivery.
+	//
+	// The currently supported attribute is `x-goog-version`, which you can
+	// use to change the format of the pushed message. This attribute
+	// indicates the version of the data expected by the endpoint. This
+	// controls the shape of the pushed message (i.e., its fields and metadata).
+	// The endpoint version is based on the version of the Pub/Sub API.
+	//
+	// If not present during the `CreateSubscription` call, it will default to
+	// the version of the API used to make such call. If not present during a
+	// `ModifyPushConfig` call, its value will not be changed. `GetSubscription`
+	// calls will always return a valid version, even if the subscription was
+	// created without this attribute.
+	//
+	// The possible values for this attribute are:
+	//
+	// * `v1beta1`: uses the push format defined in the v1beta1 Pub/Sub API.
+	// * `v1` or `v1beta2`: uses the push format defined in the v1 Pub/Sub API.
+	Attributes map[string]string `protobuf:"bytes,2,rep,name=attributes" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+}
+
+func (m *PushConfig) Reset()                    { *m = PushConfig{} }
+func (m *PushConfig) String() string            { return proto.CompactTextString(m) }
+func (*PushConfig) ProtoMessage()               {}
+func (*PushConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
+
+func (m *PushConfig) GetPushEndpoint() string {
+	if m != nil {
+		return m.PushEndpoint
+	}
+	return ""
+}
+
+func (m *PushConfig) GetAttributes() map[string]string {
+	if m != nil {
+		return m.Attributes
+	}
+	return nil
+}
+
+// A message and its corresponding acknowledgment ID.
+type ReceivedMessage struct {
+	// This ID can be used to acknowledge the received message.
+	AckId string `protobuf:"bytes,1,opt,name=ack_id,json=ackId" json:"ack_id,omitempty"`
+	// The message.
+	Message *PubsubMessage `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"`
+}
+
+func (m *ReceivedMessage) Reset()                    { *m = ReceivedMessage{} }
+func (m *ReceivedMessage) String() string            { return proto.CompactTextString(m) }
+func (*ReceivedMessage) ProtoMessage()               {}
+func (*ReceivedMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
+
+func (m *ReceivedMessage) GetAckId() string {
+	if m != nil {
+		return m.AckId
+	}
+	return ""
+}
+
+func (m *ReceivedMessage) GetMessage() *PubsubMessage {
+	if m != nil {
+		return m.Message
+	}
+	return nil
+}
+
+// Request for the GetSubscription method.
+type GetSubscriptionRequest struct {
+	// The name of the subscription to get.
+	// Format is `projects/{project}/subscriptions/{sub}`.
+	Subscription string `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"`
+}
+
+func (m *GetSubscriptionRequest) Reset()                    { *m = GetSubscriptionRequest{} }
+func (m *GetSubscriptionRequest) String() string            { return proto.CompactTextString(m) }
+func (*GetSubscriptionRequest) ProtoMessage()               {}
+func (*GetSubscriptionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
+
+func (m *GetSubscriptionRequest) GetSubscription() string {
+	if m != nil {
+		return m.Subscription
+	}
+	return ""
+}
+
+// Request for the UpdateSubscription method.
+type UpdateSubscriptionRequest struct {
+	// The updated subscription object.
+	Subscription *Subscription `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"`
+	// Indicates which fields in the provided subscription to update.
+	// Must be specified and non-empty.
+	UpdateMask *google_protobuf3.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"`
+}
+
+func (m *UpdateSubscriptionRequest) Reset()                    { *m = UpdateSubscriptionRequest{} }
+func (m *UpdateSubscriptionRequest) String() string            { return proto.CompactTextString(m) }
+func (*UpdateSubscriptionRequest) ProtoMessage()               {}
+func (*UpdateSubscriptionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
+
+func (m *UpdateSubscriptionRequest) GetSubscription() *Subscription {
+	if m != nil {
+		return m.Subscription
+	}
+	return nil
+}
+
+func (m *UpdateSubscriptionRequest) GetUpdateMask() *google_protobuf3.FieldMask {
+	if m != nil {
+		return m.UpdateMask
+	}
+	return nil
+}
+
+// Request for the `ListSubscriptions` method.
+type ListSubscriptionsRequest struct {
+	// The name of the cloud project that subscriptions belong to.
+	// Format is `projects/{project}`.
+	Project string `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"`
+	// Maximum number of subscriptions to return.
+	PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"`
+	// The value returned by the last `ListSubscriptionsResponse`; indicates that
+	// this is a continuation of a prior `ListSubscriptions` call, and that the
+	// system should return the next page of data.
+	PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"`
+}
+
+func (m *ListSubscriptionsRequest) Reset()                    { *m = ListSubscriptionsRequest{} }
+func (m *ListSubscriptionsRequest) String() string            { return proto.CompactTextString(m) }
+func (*ListSubscriptionsRequest) ProtoMessage()               {}
+func (*ListSubscriptionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
+
+func (m *ListSubscriptionsRequest) GetProject() string {
+	if m != nil {
+		return m.Project
+	}
+	return ""
+}
+
+func (m *ListSubscriptionsRequest) GetPageSize() int32 {
+	if m != nil {
+		return m.PageSize
+	}
+	return 0
+}
+
+func (m *ListSubscriptionsRequest) GetPageToken() string {
+	if m != nil {
+		return m.PageToken
+	}
+	return ""
+}
+
+// Response for the `ListSubscriptions` method.
+type ListSubscriptionsResponse struct {
+	// The subscriptions that match the request.
+	Subscriptions []*Subscription `protobuf:"bytes,1,rep,name=subscriptions" json:"subscriptions,omitempty"`
+	// If not empty, indicates that there may be more subscriptions that match
+	// the request; this value should be passed in a new
+	// `ListSubscriptionsRequest` to get more subscriptions.
+	NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"`
+}
+
+func (m *ListSubscriptionsResponse) Reset()                    { *m = ListSubscriptionsResponse{} }
+func (m *ListSubscriptionsResponse) String() string            { return proto.CompactTextString(m) }
+func (*ListSubscriptionsResponse) ProtoMessage()               {}
+func (*ListSubscriptionsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
+
+func (m *ListSubscriptionsResponse) GetSubscriptions() []*Subscription {
+	if m != nil {
+		return m.Subscriptions
+	}
+	return nil
+}
+
+func (m *ListSubscriptionsResponse) GetNextPageToken() string {
+	if m != nil {
+		return m.NextPageToken
+	}
+	return ""
+}
+
+// Request for the DeleteSubscription method.
+type DeleteSubscriptionRequest struct {
+	// The subscription to delete.
+	// Format is `projects/{project}/subscriptions/{sub}`.
+	Subscription string `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"`
+}
+
+func (m *DeleteSubscriptionRequest) Reset()                    { *m = DeleteSubscriptionRequest{} }
+func (m *DeleteSubscriptionRequest) String() string            { return proto.CompactTextString(m) }
+func (*DeleteSubscriptionRequest) ProtoMessage()               {}
+func (*DeleteSubscriptionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
+
+func (m *DeleteSubscriptionRequest) GetSubscription() string {
+	if m != nil {
+		return m.Subscription
+	}
+	return ""
+}
+
+// Request for the ModifyPushConfig method.
+type ModifyPushConfigRequest struct {
+	// The name of the subscription.
+	// Format is `projects/{project}/subscriptions/{sub}`.
+	Subscription string `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"`
+	// The push configuration for future deliveries.
+	//
+	// An empty `pushConfig` indicates that the Pub/Sub system should
+	// stop pushing messages from the given subscription and allow
+	// messages to be pulled and acknowledged - effectively pausing
+	// the subscription if `Pull` is not called.
+	PushConfig *PushConfig `protobuf:"bytes,2,opt,name=push_config,json=pushConfig" json:"push_config,omitempty"`
+}
+
+func (m *ModifyPushConfigRequest) Reset()                    { *m = ModifyPushConfigRequest{} }
+func (m *ModifyPushConfigRequest) String() string            { return proto.CompactTextString(m) }
+func (*ModifyPushConfigRequest) ProtoMessage()               {}
+func (*ModifyPushConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
+
+func (m *ModifyPushConfigRequest) GetSubscription() string {
+	if m != nil {
+		return m.Subscription
+	}
+	return ""
+}
+
+func (m *ModifyPushConfigRequest) GetPushConfig() *PushConfig {
+	if m != nil {
+		return m.PushConfig
+	}
+	return nil
+}
+
+// Request for the `Pull` method.
+type PullRequest struct {
+	// The subscription from which messages should be pulled.
+	// Format is `projects/{project}/subscriptions/{sub}`.
+	Subscription string `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"`
+	// If this field set to true, the system will respond immediately even if
+	// it there are no messages available to return in the `Pull` response.
+	// Otherwise, the system may wait (for a bounded amount of time) until at
+	// least one message is available, rather than returning no messages. The
+	// client may cancel the request if it does not wish to wait any longer for
+	// the response.
+	ReturnImmediately bool `protobuf:"varint,2,opt,name=return_immediately,json=returnImmediately" json:"return_immediately,omitempty"`
+	// The maximum number of messages returned for this request. The Pub/Sub
+	// system may return fewer than the number specified.
+	MaxMessages int32 `protobuf:"varint,3,opt,name=max_messages,json=maxMessages" json:"max_messages,omitempty"`
+}
+
+func (m *PullRequest) Reset()                    { *m = PullRequest{} }
+func (m *PullRequest) String() string            { return proto.CompactTextString(m) }
+func (*PullRequest) ProtoMessage()               {}
+func (*PullRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
+
+func (m *PullRequest) GetSubscription() string {
+	if m != nil {
+		return m.Subscription
+	}
+	return ""
+}
+
+func (m *PullRequest) GetReturnImmediately() bool {
+	if m != nil {
+		return m.ReturnImmediately
+	}
+	return false
+}
+
+func (m *PullRequest) GetMaxMessages() int32 {
+	if m != nil {
+		return m.MaxMessages
+	}
+	return 0
+}
+
+// Response for the `Pull` method.
+type PullResponse struct {
+	// Received Pub/Sub messages. The Pub/Sub system will return zero messages if
+	// there are no more available in the backlog. The Pub/Sub system may return
+	// fewer than the `maxMessages` requested even if there are more messages
+	// available in the backlog.
+	ReceivedMessages []*ReceivedMessage `protobuf:"bytes,1,rep,name=received_messages,json=receivedMessages" json:"received_messages,omitempty"`
+}
+
+func (m *PullResponse) Reset()                    { *m = PullResponse{} }
+func (m *PullResponse) String() string            { return proto.CompactTextString(m) }
+func (*PullResponse) ProtoMessage()               {}
+func (*PullResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
+
+func (m *PullResponse) GetReceivedMessages() []*ReceivedMessage {
+	if m != nil {
+		return m.ReceivedMessages
+	}
+	return nil
+}
+
+// Request for the ModifyAckDeadline method.
+type ModifyAckDeadlineRequest struct {
+	// The name of the subscription.
+	// Format is `projects/{project}/subscriptions/{sub}`.
+	Subscription string `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"`
+	// List of acknowledgment IDs.
+	AckIds []string `protobuf:"bytes,4,rep,name=ack_ids,json=ackIds" json:"ack_ids,omitempty"`
+	// The new ack deadline with respect to the time this request was sent to
+	// the Pub/Sub system. For example, if the value is 10, the new
+	// ack deadline will expire 10 seconds after the `ModifyAckDeadline` call
+	// was made. Specifying zero may immediately make the message available for
+	// another pull request.
+	// The minimum deadline you can specify is 0 seconds.
+	// The maximum deadline you can specify is 600 seconds (10 minutes).
+	AckDeadlineSeconds int32 `protobuf:"varint,3,opt,name=ack_deadline_seconds,json=ackDeadlineSeconds" json:"ack_deadline_seconds,omitempty"`
+}
+
+func (m *ModifyAckDeadlineRequest) Reset()                    { *m = ModifyAckDeadlineRequest{} }
+func (m *ModifyAckDeadlineRequest) String() string            { return proto.CompactTextString(m) }
+func (*ModifyAckDeadlineRequest) ProtoMessage()               {}
+func (*ModifyAckDeadlineRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
+
+func (m *ModifyAckDeadlineRequest) GetSubscription() string {
+	if m != nil {
+		return m.Subscription
+	}
+	return ""
+}
+
+func (m *ModifyAckDeadlineRequest) GetAckIds() []string {
+	if m != nil {
+		return m.AckIds
+	}
+	return nil
+}
+
+func (m *ModifyAckDeadlineRequest) GetAckDeadlineSeconds() int32 {
+	if m != nil {
+		return m.AckDeadlineSeconds
+	}
+	return 0
+}
+
+// Request for the Acknowledge method.
+type AcknowledgeRequest struct {
+	// The subscription whose message is being acknowledged.
+	// Format is `projects/{project}/subscriptions/{sub}`.
+	Subscription string `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"`
+	// The acknowledgment ID for the messages being acknowledged that was returned
+	// by the Pub/Sub system in the `Pull` response. Must not be empty.
+	AckIds []string `protobuf:"bytes,2,rep,name=ack_ids,json=ackIds" json:"ack_ids,omitempty"`
+}
+
+func (m *AcknowledgeRequest) Reset()                    { *m = AcknowledgeRequest{} }
+func (m *AcknowledgeRequest) String() string            { return proto.CompactTextString(m) }
+func (*AcknowledgeRequest) ProtoMessage()               {}
+func (*AcknowledgeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
+
+func (m *AcknowledgeRequest) GetSubscription() string {
+	if m != nil {
+		return m.Subscription
+	}
+	return ""
+}
+
+func (m *AcknowledgeRequest) GetAckIds() []string {
+	if m != nil {
+		return m.AckIds
+	}
+	return nil
+}
+
+// Request for the `StreamingPull` streaming RPC method. This request is used to
+// establish the initial stream as well as to stream acknowledgements and ack
+// deadline modifications from the client to the server.
+type StreamingPullRequest struct {
+	// The subscription for which to initialize the new stream. This must be
+	// provided in the first request on the stream, and must not be set in
+	// subsequent requests from client to server.
+	// Format is `projects/{project}/subscriptions/{sub}`.
+	Subscription string `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"`
+	// List of acknowledgement IDs for acknowledging previously received messages
+	// (received on this stream or a different stream). If an ack ID has expired,
+	// the corresponding message may be redelivered later. Acknowledging a message
+	// more than once will not result in an error. If the acknowledgement ID is
+	// malformed, the stream will be aborted with status `INVALID_ARGUMENT`.
+	AckIds []string `protobuf:"bytes,2,rep,name=ack_ids,json=ackIds" json:"ack_ids,omitempty"`
+	// The list of new ack deadlines for the IDs listed in
+	// `modify_deadline_ack_ids`. The size of this list must be the same as the
+	// size of `modify_deadline_ack_ids`. If it differs the stream will be aborted
+	// with `INVALID_ARGUMENT`. Each element in this list is applied to the
+	// element in the same position in `modify_deadline_ack_ids`. The new ack
+	// deadline is with respect to the time this request was sent to the Pub/Sub
+	// system. Must be >= 0. For example, if the value is 10, the new ack deadline
+	// will expire 10 seconds after this request is received. If the value is 0,
+	// the message is immediately made available for another streaming or
+	// non-streaming pull request. If the value is < 0 (an error), the stream will
+	// be aborted with status `INVALID_ARGUMENT`.
+	ModifyDeadlineSeconds []int32 `protobuf:"varint,3,rep,packed,name=modify_deadline_seconds,json=modifyDeadlineSeconds" json:"modify_deadline_seconds,omitempty"`
+	// List of acknowledgement IDs whose deadline will be modified based on the
+	// corresponding element in `modify_deadline_seconds`. This field can be used
+	// to indicate that more time is needed to process a message by the
+	// subscriber, or to make the message available for redelivery if the
+	// processing was interrupted.
+	ModifyDeadlineAckIds []string `protobuf:"bytes,4,rep,name=modify_deadline_ack_ids,json=modifyDeadlineAckIds" json:"modify_deadline_ack_ids,omitempty"`
+	// The ack deadline to use for the stream. This must be provided in the
+	// first request on the stream, but it can also be updated on subsequent
+	// requests from client to server. The minimum deadline you can specify is 10
+	// seconds. The maximum deadline you can specify is 600 seconds (10 minutes).
+	StreamAckDeadlineSeconds int32 `protobuf:"varint,5,opt,name=stream_ack_deadline_seconds,json=streamAckDeadlineSeconds" json:"stream_ack_deadline_seconds,omitempty"`
+}
+
+func (m *StreamingPullRequest) Reset()                    { *m = StreamingPullRequest{} }
+func (m *StreamingPullRequest) String() string            { return proto.CompactTextString(m) }
+func (*StreamingPullRequest) ProtoMessage()               {}
+func (*StreamingPullRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
+
+func (m *StreamingPullRequest) GetSubscription() string {
+	if m != nil {
+		return m.Subscription
+	}
+	return ""
+}
+
+func (m *StreamingPullRequest) GetAckIds() []string {
+	if m != nil {
+		return m.AckIds
+	}
+	return nil
+}
+
+func (m *StreamingPullRequest) GetModifyDeadlineSeconds() []int32 {
+	if m != nil {
+		return m.ModifyDeadlineSeconds
+	}
+	return nil
+}
+
+func (m *StreamingPullRequest) GetModifyDeadlineAckIds() []string {
+	if m != nil {
+		return m.ModifyDeadlineAckIds
+	}
+	return nil
+}
+
+func (m *StreamingPullRequest) GetStreamAckDeadlineSeconds() int32 {
+	if m != nil {
+		return m.StreamAckDeadlineSeconds
+	}
+	return 0
+}
+
+// Response for the `StreamingPull` method. This response is used to stream
+// messages from the server to the client.
+type StreamingPullResponse struct {
+	// Received Pub/Sub messages. This will not be empty.
+	ReceivedMessages []*ReceivedMessage `protobuf:"bytes,1,rep,name=received_messages,json=receivedMessages" json:"received_messages,omitempty"`
+}
+
+func (m *StreamingPullResponse) Reset()                    { *m = StreamingPullResponse{} }
+func (m *StreamingPullResponse) String() string            { return proto.CompactTextString(m) }
+func (*StreamingPullResponse) ProtoMessage()               {}
+func (*StreamingPullResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} }
+
+func (m *StreamingPullResponse) GetReceivedMessages() []*ReceivedMessage {
+	if m != nil {
+		return m.ReceivedMessages
+	}
+	return nil
+}
+
+// Request for the `CreateSnapshot` method.
+type CreateSnapshotRequest struct {
+	// Optional user-provided name for this snapshot.
+	// If the name is not provided in the request, the server will assign a random
+	// name for this snapshot on the same project as the subscription.
+	// Note that for REST API requests, you must specify a name.
+	// Format is `projects/{project}/snapshots/{snap}`.
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// The subscription whose backlog the snapshot retains.
+	// Specifically, the created snapshot is guaranteed to retain:
+	//  (a) The existing backlog on the subscription. More precisely, this is
+	//      defined as the messages in the subscription's backlog that are
+	//      unacknowledged upon the successful completion of the
+	//      `CreateSnapshot` request; as well as:
+	//  (b) Any messages published to the subscription's topic following the
+	//      successful completion of the CreateSnapshot request.
+	// Format is `projects/{project}/subscriptions/{sub}`.
+	Subscription string `protobuf:"bytes,2,opt,name=subscription" json:"subscription,omitempty"`
+}
+
+func (m *CreateSnapshotRequest) Reset()                    { *m = CreateSnapshotRequest{} }
+func (m *CreateSnapshotRequest) String() string            { return proto.CompactTextString(m) }
+func (*CreateSnapshotRequest) ProtoMessage()               {}
+func (*CreateSnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} }
+
+func (m *CreateSnapshotRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *CreateSnapshotRequest) GetSubscription() string {
+	if m != nil {
+		return m.Subscription
+	}
+	return ""
+}
+
+// Request for the UpdateSnapshot method.
+type UpdateSnapshotRequest struct {
+	// The updated snpashot object.
+	Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot" json:"snapshot,omitempty"`
+	// Indicates which fields in the provided snapshot to update.
+	// Must be specified and non-empty.
+	UpdateMask *google_protobuf3.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"`
+}
+
+func (m *UpdateSnapshotRequest) Reset()                    { *m = UpdateSnapshotRequest{} }
+func (m *UpdateSnapshotRequest) String() string            { return proto.CompactTextString(m) }
+func (*UpdateSnapshotRequest) ProtoMessage()               {}
+func (*UpdateSnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} }
+
+func (m *UpdateSnapshotRequest) GetSnapshot() *Snapshot {
+	if m != nil {
+		return m.Snapshot
+	}
+	return nil
+}
+
+func (m *UpdateSnapshotRequest) GetUpdateMask() *google_protobuf3.FieldMask {
+	if m != nil {
+		return m.UpdateMask
+	}
+	return nil
+}
+
+// A snapshot resource.
+type Snapshot struct {
+	// The name of the snapshot.
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// The name of the topic from which this snapshot is retaining messages.
+	Topic string `protobuf:"bytes,2,opt,name=topic" json:"topic,omitempty"`
+	// The snapshot is guaranteed to exist up until this time.
+	// A newly-created snapshot expires no later than 7 days from the time of its
+	// creation. Its exact lifetime is determined at creation by the existing
+	// backlog in the source subscription. Specifically, the lifetime of the
+	// snapshot is `7 days - (age of oldest unacked message in the subscription)`.
+	// For example, consider a subscription whose oldest unacked message is 3 days
+	// old. If a snapshot is created from this subscription, the snapshot -- which
+	// will always capture this 3-day-old backlog as long as the snapshot
+	// exists -- will expire in 4 days.
+	ExpireTime *google_protobuf4.Timestamp `protobuf:"bytes,3,opt,name=expire_time,json=expireTime" json:"expire_time,omitempty"`
+	// User labels.
+	Labels map[string]string `protobuf:"bytes,4,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+}
+
+func (m *Snapshot) Reset()                    { *m = Snapshot{} }
+func (m *Snapshot) String() string            { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage()               {}
+func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} }
+
+func (m *Snapshot) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *Snapshot) GetTopic() string {
+	if m != nil {
+		return m.Topic
+	}
+	return ""
+}
+
+func (m *Snapshot) GetExpireTime() *google_protobuf4.Timestamp {
+	if m != nil {
+		return m.ExpireTime
+	}
+	return nil
+}
+
+func (m *Snapshot) GetLabels() map[string]string {
+	if m != nil {
+		return m.Labels
+	}
+	return nil
+}
+
+// Request for the `ListSnapshots` method.
+type ListSnapshotsRequest struct {
+	// The name of the cloud project that snapshots belong to.
+	// Format is `projects/{project}`.
+	Project string `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"`
+	// Maximum number of snapshots to return.
+	PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"`
+	// The value returned by the last `ListSnapshotsResponse`; indicates that this
+	// is a continuation of a prior `ListSnapshots` call, and that the system
+	// should return the next page of data.
+	PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"`
+}
+
+func (m *ListSnapshotsRequest) Reset()                    { *m = ListSnapshotsRequest{} }
+func (m *ListSnapshotsRequest) String() string            { return proto.CompactTextString(m) }
+func (*ListSnapshotsRequest) ProtoMessage()               {}
+func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} }
+
+func (m *ListSnapshotsRequest) GetProject() string {
+	if m != nil {
+		return m.Project
+	}
+	return ""
+}
+
+func (m *ListSnapshotsRequest) GetPageSize() int32 {
+	if m != nil {
+		return m.PageSize
+	}
+	return 0
+}
+
+func (m *ListSnapshotsRequest) GetPageToken() string {
+	if m != nil {
+		return m.PageToken
+	}
+	return ""
+}
+
+// Response for the `ListSnapshots` method.
+type ListSnapshotsResponse struct {
+	// The resulting snapshots.
+	Snapshots []*Snapshot `protobuf:"bytes,1,rep,name=snapshots" json:"snapshots,omitempty"`
+	// If not empty, indicates that there may be more snapshot that match the
+	// request; this value should be passed in a new `ListSnapshotsRequest`.
+	NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"`
+}
+
+func (m *ListSnapshotsResponse) Reset()                    { *m = ListSnapshotsResponse{} }
+func (m *ListSnapshotsResponse) String() string            { return proto.CompactTextString(m) }
+func (*ListSnapshotsResponse) ProtoMessage()               {}
+func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} }
+
+func (m *ListSnapshotsResponse) GetSnapshots() []*Snapshot {
+	if m != nil {
+		return m.Snapshots
+	}
+	return nil
+}
+
+func (m *ListSnapshotsResponse) GetNextPageToken() string {
+	if m != nil {
+		return m.NextPageToken
+	}
+	return ""
+}
+
+// Request for the `DeleteSnapshot` method.
+type DeleteSnapshotRequest struct {
+	// The name of the snapshot to delete.
+	// Format is `projects/{project}/snapshots/{snap}`.
+	Snapshot string `protobuf:"bytes,1,opt,name=snapshot" json:"snapshot,omitempty"`
+}
+
+func (m *DeleteSnapshotRequest) Reset()                    { *m = DeleteSnapshotRequest{} }
+func (m *DeleteSnapshotRequest) String() string            { return proto.CompactTextString(m) }
+func (*DeleteSnapshotRequest) ProtoMessage()               {}
+func (*DeleteSnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} }
+
+func (m *DeleteSnapshotRequest) GetSnapshot() string {
+	if m != nil {
+		return m.Snapshot
+	}
+	return ""
+}
+
+// Request for the `Seek` method.
+type SeekRequest struct {
+	// The subscription to affect.
+	Subscription string `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"`
+	// Types that are valid to be assigned to Target:
+	//	*SeekRequest_Time
+	//	*SeekRequest_Snapshot
+	Target isSeekRequest_Target `protobuf_oneof:"target"`
+}
+
+func (m *SeekRequest) Reset()                    { *m = SeekRequest{} }
+func (m *SeekRequest) String() string            { return proto.CompactTextString(m) }
+func (*SeekRequest) ProtoMessage()               {}
+func (*SeekRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} }
+
+type isSeekRequest_Target interface {
+	isSeekRequest_Target()
+}
+
+type SeekRequest_Time struct {
+	Time *google_protobuf4.Timestamp `protobuf:"bytes,2,opt,name=time,oneof"`
+}
+type SeekRequest_Snapshot struct {
+	Snapshot string `protobuf:"bytes,3,opt,name=snapshot,oneof"`
+}
+
+func (*SeekRequest_Time) isSeekRequest_Target()     {}
+func (*SeekRequest_Snapshot) isSeekRequest_Target() {}
+
+func (m *SeekRequest) GetTarget() isSeekRequest_Target {
+	if m != nil {
+		return m.Target
+	}
+	return nil
+}
+
+func (m *SeekRequest) GetSubscription() string {
+	if m != nil {
+		return m.Subscription
+	}
+	return ""
+}
+
+func (m *SeekRequest) GetTime() *google_protobuf4.Timestamp {
+	if x, ok := m.GetTarget().(*SeekRequest_Time); ok {
+		return x.Time
+	}
+	return nil
+}
+
+func (m *SeekRequest) GetSnapshot() string {
+	if x, ok := m.GetTarget().(*SeekRequest_Snapshot); ok {
+		return x.Snapshot
+	}
+	return ""
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*SeekRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+	return _SeekRequest_OneofMarshaler, _SeekRequest_OneofUnmarshaler, _SeekRequest_OneofSizer, []interface{}{
+		(*SeekRequest_Time)(nil),
+		(*SeekRequest_Snapshot)(nil),
+	}
+}
+
+func _SeekRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+	m := msg.(*SeekRequest)
+	// target
+	switch x := m.Target.(type) {
+	case *SeekRequest_Time:
+		b.EncodeVarint(2<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.Time); err != nil {
+			return err
+		}
+	case *SeekRequest_Snapshot:
+		b.EncodeVarint(3<<3 | proto.WireBytes)
+		b.EncodeStringBytes(x.Snapshot)
+	case nil:
+	default:
+		return fmt.Errorf("SeekRequest.Target has unexpected type %T", x)
+	}
+	return nil
+}
+
+func _SeekRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+	m := msg.(*SeekRequest)
+	switch tag {
+	case 2: // target.time
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(google_protobuf4.Timestamp)
+		err := b.DecodeMessage(msg)
+		m.Target = &SeekRequest_Time{msg}
+		return true, err
+	case 3: // target.snapshot
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		x, err := b.DecodeStringBytes()
+		m.Target = &SeekRequest_Snapshot{x}
+		return true, err
+	default:
+		return false, nil
+	}
+}
+
+func _SeekRequest_OneofSizer(msg proto.Message) (n int) {
+	m := msg.(*SeekRequest)
+	// target
+	switch x := m.Target.(type) {
+	case *SeekRequest_Time:
+		s := proto.Size(x.Time)
+		n += proto.SizeVarint(2<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *SeekRequest_Snapshot:
+		n += proto.SizeVarint(3<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(len(x.Snapshot)))
+		n += len(x.Snapshot)
+	case nil:
+	default:
+		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+	}
+	return n
+}
+
+type SeekResponse struct {
+}
+
+func (m *SeekResponse) Reset()                    { *m = SeekResponse{} }
+func (m *SeekResponse) String() string            { return proto.CompactTextString(m) }
+func (*SeekResponse) ProtoMessage()               {}
+func (*SeekResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} }
+
+func init() {
+	proto.RegisterType((*Topic)(nil), "google.pubsub.v1.Topic")
+	proto.RegisterType((*PubsubMessage)(nil), "google.pubsub.v1.PubsubMessage")
+	proto.RegisterType((*GetTopicRequest)(nil), "google.pubsub.v1.GetTopicRequest")
+	proto.RegisterType((*UpdateTopicRequest)(nil), "google.pubsub.v1.UpdateTopicRequest")
+	proto.RegisterType((*PublishRequest)(nil), "google.pubsub.v1.PublishRequest")
+	proto.RegisterType((*PublishResponse)(nil), "google.pubsub.v1.PublishResponse")
+	proto.RegisterType((*ListTopicsRequest)(nil), "google.pubsub.v1.ListTopicsRequest")
+	proto.RegisterType((*ListTopicsResponse)(nil), "google.pubsub.v1.ListTopicsResponse")
+	proto.RegisterType((*ListTopicSubscriptionsRequest)(nil), "google.pubsub.v1.ListTopicSubscriptionsRequest")
+	proto.RegisterType((*ListTopicSubscriptionsResponse)(nil), "google.pubsub.v1.ListTopicSubscriptionsResponse")
+	proto.RegisterType((*DeleteTopicRequest)(nil), "google.pubsub.v1.DeleteTopicRequest")
+	proto.RegisterType((*Subscription)(nil), "google.pubsub.v1.Subscription")
+	proto.RegisterType((*PushConfig)(nil), "google.pubsub.v1.PushConfig")
+	proto.RegisterType((*ReceivedMessage)(nil), "google.pubsub.v1.ReceivedMessage")
+	proto.RegisterType((*GetSubscriptionRequest)(nil), "google.pubsub.v1.GetSubscriptionRequest")
+	proto.RegisterType((*UpdateSubscriptionRequest)(nil), "google.pubsub.v1.UpdateSubscriptionRequest")
+	proto.RegisterType((*ListSubscriptionsRequest)(nil), "google.pubsub.v1.ListSubscriptionsRequest")
+	proto.RegisterType((*ListSubscriptionsResponse)(nil), "google.pubsub.v1.ListSubscriptionsResponse")
+	proto.RegisterType((*DeleteSubscriptionRequest)(nil), "google.pubsub.v1.DeleteSubscriptionRequest")
+	proto.RegisterType((*ModifyPushConfigRequest)(nil), "google.pubsub.v1.ModifyPushConfigRequest")
+	proto.RegisterType((*PullRequest)(nil), "google.pubsub.v1.PullRequest")
+	proto.RegisterType((*PullResponse)(nil), "google.pubsub.v1.PullResponse")
+	proto.RegisterType((*ModifyAckDeadlineRequest)(nil), "google.pubsub.v1.ModifyAckDeadlineRequest")
+	proto.RegisterType((*AcknowledgeRequest)(nil), "google.pubsub.v1.AcknowledgeRequest")
+	proto.RegisterType((*StreamingPullRequest)(nil), "google.pubsub.v1.StreamingPullRequest")
+	proto.RegisterType((*StreamingPullResponse)(nil), "google.pubsub.v1.StreamingPullResponse")
+	proto.RegisterType((*CreateSnapshotRequest)(nil), "google.pubsub.v1.CreateSnapshotRequest")
+	proto.RegisterType((*UpdateSnapshotRequest)(nil), "google.pubsub.v1.UpdateSnapshotRequest")
+	proto.RegisterType((*Snapshot)(nil), "google.pubsub.v1.Snapshot")
+	proto.RegisterType((*ListSnapshotsRequest)(nil), "google.pubsub.v1.ListSnapshotsRequest")
+	proto.RegisterType((*ListSnapshotsResponse)(nil), "google.pubsub.v1.ListSnapshotsResponse")
+	proto.RegisterType((*DeleteSnapshotRequest)(nil), "google.pubsub.v1.DeleteSnapshotRequest")
+	proto.RegisterType((*SeekRequest)(nil), "google.pubsub.v1.SeekRequest")
+	proto.RegisterType((*SeekResponse)(nil), "google.pubsub.v1.SeekResponse")
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// Client API for Subscriber service
+
+type SubscriberClient interface {
+	// Creates a subscription to a given topic.
+	// If the subscription already exists, returns `ALREADY_EXISTS`.
+	// If the corresponding topic doesn't exist, returns `NOT_FOUND`.
+	//
+	// If the name is not provided in the request, the server will assign a random
+	// name for this subscription on the same project as the topic, conforming
+	// to the
+	// [resource name format](https://cloud.google.com/pubsub/docs/overview#names).
+	// The generated name is populated in the returned Subscription object.
+	// Note that for REST API requests, you must specify a name in the request.
+	CreateSubscription(ctx context.Context, in *Subscription, opts ...grpc.CallOption) (*Subscription, error)
+	// Gets the configuration details of a subscription.
+	GetSubscription(ctx context.Context, in *GetSubscriptionRequest, opts ...grpc.CallOption) (*Subscription, error)
+	// Updates an existing subscription. Note that certain properties of a
+	// subscription, such as its topic, are not modifiable.
+	// NOTE:  The style guide requires body: "subscription" instead of body: "*".
+	// Keeping the latter for internal consistency in V1, however it should be
+	// corrected in V2.  See
+	// https://cloud.google.com/apis/design/standard_methods#update for details.
+	UpdateSubscription(ctx context.Context, in *UpdateSubscriptionRequest, opts ...grpc.CallOption) (*Subscription, error)
+	// Lists matching subscriptions.
+	ListSubscriptions(ctx context.Context, in *ListSubscriptionsRequest, opts ...grpc.CallOption) (*ListSubscriptionsResponse, error)
+	// Deletes an existing subscription. All messages retained in the subscription
+	// are immediately dropped. Calls to `Pull` after deletion will return
+	// `NOT_FOUND`. After a subscription is deleted, a new one may be created with
+	// the same name, but the new one has no association with the old
+	// subscription or its topic unless the same topic is specified.
+	DeleteSubscription(ctx context.Context, in *DeleteSubscriptionRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
+	// Modifies the ack deadline for a specific message. This method is useful
+	// to indicate that more time is needed to process a message by the
+	// subscriber, or to make the message available for redelivery if the
+	// processing was interrupted. Note that this does not modify the
+	// subscription-level `ackDeadlineSeconds` used for subsequent messages.
+	ModifyAckDeadline(ctx context.Context, in *ModifyAckDeadlineRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
+	// Acknowledges the messages associated with the `ack_ids` in the
+	// `AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages
+	// from the subscription.
+	//
+	// Acknowledging a message whose ack deadline has expired may succeed,
+	// but such a message may be redelivered later. Acknowledging a message more
+	// than once will not result in an error.
+	Acknowledge(ctx context.Context, in *AcknowledgeRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
+	// Pulls messages from the server. Returns an empty list if there are no
+	// messages available in the backlog. The server may return `UNAVAILABLE` if
+	// there are too many concurrent pull requests pending for the given
+	// subscription.
+	Pull(ctx context.Context, in *PullRequest, opts ...grpc.CallOption) (*PullResponse, error)
+	// (EXPERIMENTAL) StreamingPull is an experimental feature. This RPC will
+	// respond with UNIMPLEMENTED errors unless you have been invited to test
+	// this feature. Contact cloud-pubsub@google.com with any questions.
+	//
+	// Establishes a stream with the server, which sends messages down to the
+	// client. The client streams acknowledgements and ack deadline modifications
+	// back to the server. The server will close the stream and return the status
+	// on any error. The server may close the stream with status `OK` to reassign
+	// server-side resources, in which case, the client should re-establish the
+	// stream. `UNAVAILABLE` may also be returned in the case of a transient error
+	// (e.g., a server restart). These should also be retried by the client. Flow
+	// control can be achieved by configuring the underlying RPC channel.
+	StreamingPull(ctx context.Context, opts ...grpc.CallOption) (Subscriber_StreamingPullClient, error)
+	// Modifies the `PushConfig` for a specified subscription.
+	//
+	// This may be used to change a push subscription to a pull one (signified by
+	// an empty `PushConfig`) or vice versa, or change the endpoint URL and other
+	// attributes of a push subscription. Messages will accumulate for delivery
+	// continuously through the call regardless of changes to the `PushConfig`.
+	ModifyPushConfig(ctx context.Context, in *ModifyPushConfigRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
+	// Lists the existing snapshots.
+	ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error)
+	// Creates a snapshot from the requested subscription.
+	// If the snapshot already exists, returns `ALREADY_EXISTS`.
+	// If the requested subscription doesn't exist, returns `NOT_FOUND`.
+	//
+	// If the name is not provided in the request, the server will assign a random
+	// name for this snapshot on the same project as the subscription, conforming
+	// to the
+	// [resource name format](https://cloud.google.com/pubsub/docs/overview#names).
+	// The generated name is populated in the returned Snapshot object.
+	// Note that for REST API requests, you must specify a name in the request.
+	CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*Snapshot, error)
+	// Updates an existing snapshot. Note that certain properties of a snapshot
+	// are not modifiable.
+	// NOTE:  The style guide requires body: "snapshot" instead of body: "*".
+	// Keeping the latter for internal consistency in V1, however it should be
+	// corrected in V2.  See
+	// https://cloud.google.com/apis/design/standard_methods#update for details.
+	UpdateSnapshot(ctx context.Context, in *UpdateSnapshotRequest, opts ...grpc.CallOption) (*Snapshot, error)
+	// Removes an existing snapshot. All messages retained in the snapshot
+	// are immediately dropped. After a snapshot is deleted, a new one may be
+	// created with the same name, but the new one has no association with the old
+	// snapshot or its subscription, unless the same subscription is specified.
+	DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
+	// Seeks an existing subscription to a point in time or to a given snapshot,
+	// whichever is provided in the request.
+	Seek(ctx context.Context, in *SeekRequest, opts ...grpc.CallOption) (*SeekResponse, error)
+}
+
+type subscriberClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewSubscriberClient(cc *grpc.ClientConn) SubscriberClient {
+	return &subscriberClient{cc}
+}
+
+func (c *subscriberClient) CreateSubscription(ctx context.Context, in *Subscription, opts ...grpc.CallOption) (*Subscription, error) {
+	out := new(Subscription)
+	err := grpc.Invoke(ctx, "/google.pubsub.v1.Subscriber/CreateSubscription", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *subscriberClient) GetSubscription(ctx context.Context, in *GetSubscriptionRequest, opts ...grpc.CallOption) (*Subscription, error) {
+	out := new(Subscription)
+	err := grpc.Invoke(ctx, "/google.pubsub.v1.Subscriber/GetSubscription", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *subscriberClient) UpdateSubscription(ctx context.Context, in *UpdateSubscriptionRequest, opts ...grpc.CallOption) (*Subscription, error) {
+	out := new(Subscription)
+	err := grpc.Invoke(ctx, "/google.pubsub.v1.Subscriber/UpdateSubscription", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *subscriberClient) ListSubscriptions(ctx context.Context, in *ListSubscriptionsRequest, opts ...grpc.CallOption) (*ListSubscriptionsResponse, error) {
+	out := new(ListSubscriptionsResponse)
+	err := grpc.Invoke(ctx, "/google.pubsub.v1.Subscriber/ListSubscriptions", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *subscriberClient) DeleteSubscription(ctx context.Context, in *DeleteSubscriptionRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
+	out := new(google_protobuf2.Empty)
+	err := grpc.Invoke(ctx, "/google.pubsub.v1.Subscriber/DeleteSubscription", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *subscriberClient) ModifyAckDeadline(ctx context.Context, in *ModifyAckDeadlineRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
+	out := new(google_protobuf2.Empty)
+	err := grpc.Invoke(ctx, "/google.pubsub.v1.Subscriber/ModifyAckDeadline", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *subscriberClient) Acknowledge(ctx context.Context, in *AcknowledgeRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
+	out := new(google_protobuf2.Empty)
+	err := grpc.Invoke(ctx, "/google.pubsub.v1.Subscriber/Acknowledge", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *subscriberClient) Pull(ctx context.Context, in *PullRequest, opts ...grpc.CallOption) (*PullResponse, error) {
+	out := new(PullResponse)
+	err := grpc.Invoke(ctx, "/google.pubsub.v1.Subscriber/Pull", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *subscriberClient) StreamingPull(ctx context.Context, opts ...grpc.CallOption) (Subscriber_StreamingPullClient, error) {
+	stream, err := grpc.NewClientStream(ctx, &_Subscriber_serviceDesc.Streams[0], c.cc, "/google.pubsub.v1.Subscriber/StreamingPull", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &subscriberStreamingPullClient{stream}
+	return x, nil
+}
+
+type Subscriber_StreamingPullClient interface {
+	Send(*StreamingPullRequest) error
+	Recv() (*StreamingPullResponse, error)
+	grpc.ClientStream
+}
+
+type subscriberStreamingPullClient struct {
+	grpc.ClientStream
+}
+
+func (x *subscriberStreamingPullClient) Send(m *StreamingPullRequest) error {
+	return x.ClientStream.SendMsg(m)
+}
+
+func (x *subscriberStreamingPullClient) Recv() (*StreamingPullResponse, error) {
+	m := new(StreamingPullResponse)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func (c *subscriberClient) ModifyPushConfig(ctx context.Context, in *ModifyPushConfigRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
+	out := new(google_protobuf2.Empty)
+	err := grpc.Invoke(ctx, "/google.pubsub.v1.Subscriber/ModifyPushConfig", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *subscriberClient) ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) {
+	out := new(ListSnapshotsResponse)
+	err := grpc.Invoke(ctx, "/google.pubsub.v1.Subscriber/ListSnapshots", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *subscriberClient) CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*Snapshot, error) {
+	out := new(Snapshot)
+	err := grpc.Invoke(ctx, "/google.pubsub.v1.Subscriber/CreateSnapshot", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *subscriberClient) UpdateSnapshot(ctx context.Context, in *UpdateSnapshotRequest, opts ...grpc.CallOption) (*Snapshot, error) {
+	out := new(Snapshot)
+	err := grpc.Invoke(ctx, "/google.pubsub.v1.Subscriber/UpdateSnapshot", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *subscriberClient) DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
+	out := new(google_protobuf2.Empty)
+	err := grpc.Invoke(ctx, "/google.pubsub.v1.Subscriber/DeleteSnapshot", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *subscriberClient) Seek(ctx context.Context, in *SeekRequest, opts ...grpc.CallOption) (*SeekResponse, error) {
+	out := new(SeekResponse)
+	err := grpc.Invoke(ctx, "/google.pubsub.v1.Subscriber/Seek", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Server API for Subscriber service
+
+type SubscriberServer interface {
+	// Creates a subscription to a given topic.
+	// If the subscription already exists, returns `ALREADY_EXISTS`.
+	// If the corresponding topic doesn't exist, returns `NOT_FOUND`.
+	//
+	// If the name is not provided in the request, the server will assign a random
+	// name for this subscription on the same project as the topic, conforming
+	// to the
+	// [resource name format](https://cloud.google.com/pubsub/docs/overview#names).
+	// The generated name is populated in the returned Subscription object.
+	// Note that for REST API requests, you must specify a name in the request.
+	CreateSubscription(context.Context, *Subscription) (*Subscription, error)
+	// Gets the configuration details of a subscription.
+	GetSubscription(context.Context, *GetSubscriptionRequest) (*Subscription, error)
+	// Updates an existing subscription. Note that certain properties of a
+	// subscription, such as its topic, are not modifiable.
+	// NOTE:  The style guide requires body: "subscription" instead of body: "*".
+	// Keeping the latter for internal consistency in V1, however it should be
+	// corrected in V2.  See
+	// https://cloud.google.com/apis/design/standard_methods#update for details.
+	UpdateSubscription(context.Context, *UpdateSubscriptionRequest) (*Subscription, error)
+	// Lists matching subscriptions.
+	ListSubscriptions(context.Context, *ListSubscriptionsRequest) (*ListSubscriptionsResponse, error)
+	// Deletes an existing subscription. All messages retained in the subscription
+	// are immediately dropped. Calls to `Pull` after deletion will return
+	// `NOT_FOUND`. After a subscription is deleted, a new one may be created with
+	// the same name, but the new one has no association with the old
+	// subscription or its topic unless the same topic is specified.
+	DeleteSubscription(context.Context, *DeleteSubscriptionRequest) (*google_protobuf2.Empty, error)
+	// Modifies the ack deadline for a specific message. This method is useful
+	// to indicate that more time is needed to process a message by the
+	// subscriber, or to make the message available for redelivery if the
+	// processing was interrupted. Note that this does not modify the
+	// subscription-level `ackDeadlineSeconds` used for subsequent messages.
+	ModifyAckDeadline(context.Context, *ModifyAckDeadlineRequest) (*google_protobuf2.Empty, error)
+	// Acknowledges the messages associated with the `ack_ids` in the
+	// `AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages
+	// from the subscription.
+	//
+	// Acknowledging a message whose ack deadline has expired may succeed,
+	// but such a message may be redelivered later. Acknowledging a message more
+	// than once will not result in an error.
+	Acknowledge(context.Context, *AcknowledgeRequest) (*google_protobuf2.Empty, error)
+	// Pulls messages from the server. Returns an empty list if there are no
+	// messages available in the backlog. The server may return `UNAVAILABLE` if
+	// there are too many concurrent pull requests pending for the given
+	// subscription.
+	Pull(context.Context, *PullRequest) (*PullResponse, error)
+	// (EXPERIMENTAL) StreamingPull is an experimental feature. This RPC will
+	// respond with UNIMPLEMENTED errors unless you have been invited to test
+	// this feature. Contact cloud-pubsub@google.com with any questions.
+	//
+	// Establishes a stream with the server, which sends messages down to the
+	// client. The client streams acknowledgements and ack deadline modifications
+	// back to the server. The server will close the stream and return the status
+	// on any error. The server may close the stream with status `OK` to reassign
+	// server-side resources, in which case, the client should re-establish the
+	// stream. `UNAVAILABLE` may also be returned in the case of a transient error
+	// (e.g., a server restart). These should also be retried by the client. Flow
+	// control can be achieved by configuring the underlying RPC channel.
+	StreamingPull(Subscriber_StreamingPullServer) error
+	// Modifies the `PushConfig` for a specified subscription.
+	//
+	// This may be used to change a push subscription to a pull one (signified by
+	// an empty `PushConfig`) or vice versa, or change the endpoint URL and other
+	// attributes of a push subscription. Messages will accumulate for delivery
+	// continuously through the call regardless of changes to the `PushConfig`.
+	ModifyPushConfig(context.Context, *ModifyPushConfigRequest) (*google_protobuf2.Empty, error)
+	// Lists the existing snapshots.
+	ListSnapshots(context.Context, *ListSnapshotsRequest) (*ListSnapshotsResponse, error)
+	// Creates a snapshot from the requested subscription.
+	// If the snapshot already exists, returns `ALREADY_EXISTS`.
+	// If the requested subscription doesn't exist, returns `NOT_FOUND`.
+	//
+	// If the name is not provided in the request, the server will assign a random
+	// name for this snapshot on the same project as the subscription, conforming
+	// to the
+	// [resource name format](https://cloud.google.com/pubsub/docs/overview#names).
+	// The generated name is populated in the returned Snapshot object.
+	// Note that for REST API requests, you must specify a name in the request.
+	CreateSnapshot(context.Context, *CreateSnapshotRequest) (*Snapshot, error)
+	// Updates an existing snapshot. Note that certain properties of a snapshot
+	// are not modifiable.
+	// NOTE:  The style guide requires body: "snapshot" instead of body: "*".
+	// Keeping the latter for internal consistency in V1, however it should be
+	// corrected in V2.  See
+	// https://cloud.google.com/apis/design/standard_methods#update for details.
+	UpdateSnapshot(context.Context, *UpdateSnapshotRequest) (*Snapshot, error)
+	// Removes an existing snapshot. All messages retained in the snapshot
+	// are immediately dropped. After a snapshot is deleted, a new one may be
+	// created with the same name, but the new one has no association with the old
+	// snapshot or its subscription, unless the same subscription is specified.
+	DeleteSnapshot(context.Context, *DeleteSnapshotRequest) (*google_protobuf2.Empty, error)
+	// Seeks an existing subscription to a point in time or to a given snapshot,
+	// whichever is provided in the request.
+	Seek(context.Context, *SeekRequest) (*SeekResponse, error)
+}
+
+func RegisterSubscriberServer(s *grpc.Server, srv SubscriberServer) {
+	s.RegisterService(&_Subscriber_serviceDesc, srv)
+}
+
+func _Subscriber_CreateSubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Subscription)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(SubscriberServer).CreateSubscription(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.pubsub.v1.Subscriber/CreateSubscription",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(SubscriberServer).CreateSubscription(ctx, req.(*Subscription))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Subscriber_GetSubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(GetSubscriptionRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(SubscriberServer).GetSubscription(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.pubsub.v1.Subscriber/GetSubscription",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(SubscriberServer).GetSubscription(ctx, req.(*GetSubscriptionRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Subscriber_UpdateSubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(UpdateSubscriptionRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(SubscriberServer).UpdateSubscription(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.pubsub.v1.Subscriber/UpdateSubscription",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(SubscriberServer).UpdateSubscription(ctx, req.(*UpdateSubscriptionRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Subscriber_ListSubscriptions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ListSubscriptionsRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(SubscriberServer).ListSubscriptions(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.pubsub.v1.Subscriber/ListSubscriptions",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(SubscriberServer).ListSubscriptions(ctx, req.(*ListSubscriptionsRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Subscriber_DeleteSubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DeleteSubscriptionRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(SubscriberServer).DeleteSubscription(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.pubsub.v1.Subscriber/DeleteSubscription",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(SubscriberServer).DeleteSubscription(ctx, req.(*DeleteSubscriptionRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Subscriber_ModifyAckDeadline_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ModifyAckDeadlineRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(SubscriberServer).ModifyAckDeadline(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.pubsub.v1.Subscriber/ModifyAckDeadline",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(SubscriberServer).ModifyAckDeadline(ctx, req.(*ModifyAckDeadlineRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Subscriber_Acknowledge_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AcknowledgeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(SubscriberServer).Acknowledge(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.pubsub.v1.Subscriber/Acknowledge",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(SubscriberServer).Acknowledge(ctx, req.(*AcknowledgeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Subscriber_Pull_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(PullRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(SubscriberServer).Pull(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.pubsub.v1.Subscriber/Pull",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(SubscriberServer).Pull(ctx, req.(*PullRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Subscriber_StreamingPull_Handler(srv interface{}, stream grpc.ServerStream) error {
+	return srv.(SubscriberServer).StreamingPull(&subscriberStreamingPullServer{stream})
+}
+
+type Subscriber_StreamingPullServer interface {
+	Send(*StreamingPullResponse) error
+	Recv() (*StreamingPullRequest, error)
+	grpc.ServerStream
+}
+
+type subscriberStreamingPullServer struct {
+	grpc.ServerStream
+}
+
+func (x *subscriberStreamingPullServer) Send(m *StreamingPullResponse) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func (x *subscriberStreamingPullServer) Recv() (*StreamingPullRequest, error) {
+	m := new(StreamingPullRequest)
+	if err := x.ServerStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func _Subscriber_ModifyPushConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ModifyPushConfigRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(SubscriberServer).ModifyPushConfig(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.pubsub.v1.Subscriber/ModifyPushConfig",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(SubscriberServer).ModifyPushConfig(ctx, req.(*ModifyPushConfigRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Subscriber_ListSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ListSnapshotsRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(SubscriberServer).ListSnapshots(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.pubsub.v1.Subscriber/ListSnapshots",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(SubscriberServer).ListSnapshots(ctx, req.(*ListSnapshotsRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Subscriber_CreateSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(CreateSnapshotRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(SubscriberServer).CreateSnapshot(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.pubsub.v1.Subscriber/CreateSnapshot",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(SubscriberServer).CreateSnapshot(ctx, req.(*CreateSnapshotRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Subscriber_UpdateSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(UpdateSnapshotRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(SubscriberServer).UpdateSnapshot(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.pubsub.v1.Subscriber/UpdateSnapshot",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(SubscriberServer).UpdateSnapshot(ctx, req.(*UpdateSnapshotRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Subscriber_DeleteSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DeleteSnapshotRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(SubscriberServer).DeleteSnapshot(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.pubsub.v1.Subscriber/DeleteSnapshot",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(SubscriberServer).DeleteSnapshot(ctx, req.(*DeleteSnapshotRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Subscriber_Seek_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(SeekRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(SubscriberServer).Seek(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.pubsub.v1.Subscriber/Seek",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(SubscriberServer).Seek(ctx, req.(*SeekRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Subscriber_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "google.pubsub.v1.Subscriber",
+	HandlerType: (*SubscriberServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "CreateSubscription",
+			Handler:    _Subscriber_CreateSubscription_Handler,
+		},
+		{
+			MethodName: "GetSubscription",
+			Handler:    _Subscriber_GetSubscription_Handler,
+		},
+		{
+			MethodName: "UpdateSubscription",
+			Handler:    _Subscriber_UpdateSubscription_Handler,
+		},
+		{
+			MethodName: "ListSubscriptions",
+			Handler:    _Subscriber_ListSubscriptions_Handler,
+		},
+		{
+			MethodName: "DeleteSubscription",
+			Handler:    _Subscriber_DeleteSubscription_Handler,
+		},
+		{
+			MethodName: "ModifyAckDeadline",
+			Handler:    _Subscriber_ModifyAckDeadline_Handler,
+		},
+		{
+			MethodName: "Acknowledge",
+			Handler:    _Subscriber_Acknowledge_Handler,
+		},
+		{
+			MethodName: "Pull",
+			Handler:    _Subscriber_Pull_Handler,
+		},
+		{
+			MethodName: "ModifyPushConfig",
+			Handler:    _Subscriber_ModifyPushConfig_Handler,
+		},
+		{
+			MethodName: "ListSnapshots",
+			Handler:    _Subscriber_ListSnapshots_Handler,
+		},
+		{
+			MethodName: "CreateSnapshot",
+			Handler:    _Subscriber_CreateSnapshot_Handler,
+		},
+		{
+			MethodName: "UpdateSnapshot",
+			Handler:    _Subscriber_UpdateSnapshot_Handler,
+		},
+		{
+			MethodName: "DeleteSnapshot",
+			Handler:    _Subscriber_DeleteSnapshot_Handler,
+		},
+		{
+			MethodName: "Seek",
+			Handler:    _Subscriber_Seek_Handler,
+		},
+	},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "StreamingPull",
+			Handler:       _Subscriber_StreamingPull_Handler,
+			ServerStreams: true,
+			ClientStreams: true,
+		},
+	},
+	Metadata: "google/pubsub/v1/pubsub.proto",
+}
+
+// Client API for Publisher service
+
+type PublisherClient interface {
+	// Creates the given topic with the given name.
+	CreateTopic(ctx context.Context, in *Topic, opts ...grpc.CallOption) (*Topic, error)
+	// Updates an existing topic. Note that certain properties of a topic are not
+	// modifiable.  Options settings follow the style guide:
+	// NOTE:  The style guide requires body: "topic" instead of body: "*".
+	// Keeping the latter for internal consistency in V1, however it should be
+	// corrected in V2.  See
+	// https://cloud.google.com/apis/design/standard_methods#update for details.
+	UpdateTopic(ctx context.Context, in *UpdateTopicRequest, opts ...grpc.CallOption) (*Topic, error)
+	// Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic
+	// does not exist. The message payload must not be empty; it must contain
+	//  either a non-empty data field, or at least one attribute.
+	Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*PublishResponse, error)
+	// Gets the configuration of a topic.
+	GetTopic(ctx context.Context, in *GetTopicRequest, opts ...grpc.CallOption) (*Topic, error)
+	// Lists matching topics.
+	ListTopics(ctx context.Context, in *ListTopicsRequest, opts ...grpc.CallOption) (*ListTopicsResponse, error)
+	// Lists the name of the subscriptions for this topic.
+	ListTopicSubscriptions(ctx context.Context, in *ListTopicSubscriptionsRequest, opts ...grpc.CallOption) (*ListTopicSubscriptionsResponse, error)
+	// Deletes the topic with the given name. Returns `NOT_FOUND` if the topic
+	// does not exist. After a topic is deleted, a new topic may be created with
+	// the same name; this is an entirely new topic with none of the old
+	// configuration or subscriptions. Existing subscriptions to this topic are
+	// not deleted, but their `topic` field is set to `_deleted-topic_`.
+	DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
+}
+
+type publisherClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewPublisherClient(cc *grpc.ClientConn) PublisherClient {
+	return &publisherClient{cc}
+}
+
+func (c *publisherClient) CreateTopic(ctx context.Context, in *Topic, opts ...grpc.CallOption) (*Topic, error) {
+	out := new(Topic)
+	err := grpc.Invoke(ctx, "/google.pubsub.v1.Publisher/CreateTopic", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *publisherClient) UpdateTopic(ctx context.Context, in *UpdateTopicRequest, opts ...grpc.CallOption) (*Topic, error) {
+	out := new(Topic)
+	err := grpc.Invoke(ctx, "/google.pubsub.v1.Publisher/UpdateTopic", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *publisherClient) Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*PublishResponse, error) {
+	out := new(PublishResponse)
+	err := grpc.Invoke(ctx, "/google.pubsub.v1.Publisher/Publish", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *publisherClient) GetTopic(ctx context.Context, in *GetTopicRequest, opts ...grpc.CallOption) (*Topic, error) {
+	out := new(Topic)
+	err := grpc.Invoke(ctx, "/google.pubsub.v1.Publisher/GetTopic", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *publisherClient) ListTopics(ctx context.Context, in *ListTopicsRequest, opts ...grpc.CallOption) (*ListTopicsResponse, error) {
+	out := new(ListTopicsResponse)
+	err := grpc.Invoke(ctx, "/google.pubsub.v1.Publisher/ListTopics", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *publisherClient) ListTopicSubscriptions(ctx context.Context, in *ListTopicSubscriptionsRequest, opts ...grpc.CallOption) (*ListTopicSubscriptionsResponse, error) {
+	out := new(ListTopicSubscriptionsResponse)
+	err := grpc.Invoke(ctx, "/google.pubsub.v1.Publisher/ListTopicSubscriptions", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *publisherClient) DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
+	out := new(google_protobuf2.Empty)
+	err := grpc.Invoke(ctx, "/google.pubsub.v1.Publisher/DeleteTopic", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Server API for Publisher service
+
+type PublisherServer interface {
+	// Creates the given topic with the given name.
+	CreateTopic(context.Context, *Topic) (*Topic, error)
+	// Updates an existing topic. Note that certain properties of a topic are not
+	// modifiable.  Options settings follow the style guide:
+	// NOTE:  The style guide requires body: "topic" instead of body: "*".
+	// Keeping the latter for internal consistency in V1, however it should be
+	// corrected in V2.  See
+	// https://cloud.google.com/apis/design/standard_methods#update for details.
+	UpdateTopic(context.Context, *UpdateTopicRequest) (*Topic, error)
+	// Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic
+	// does not exist. The message payload must not be empty; it must contain
+	//  either a non-empty data field, or at least one attribute.
+	Publish(context.Context, *PublishRequest) (*PublishResponse, error)
+	// Gets the configuration of a topic.
+	GetTopic(context.Context, *GetTopicRequest) (*Topic, error)
+	// Lists matching topics.
+	ListTopics(context.Context, *ListTopicsRequest) (*ListTopicsResponse, error)
+	// Lists the name of the subscriptions for this topic.
+	ListTopicSubscriptions(context.Context, *ListTopicSubscriptionsRequest) (*ListTopicSubscriptionsResponse, error)
+	// Deletes the topic with the given name. Returns `NOT_FOUND` if the topic
+	// does not exist. After a topic is deleted, a new topic may be created with
+	// the same name; this is an entirely new topic with none of the old
+	// configuration or subscriptions. Existing subscriptions to this topic are
+	// not deleted, but their `topic` field is set to `_deleted-topic_`.
+	DeleteTopic(context.Context, *DeleteTopicRequest) (*google_protobuf2.Empty, error)
+}
+
+func RegisterPublisherServer(s *grpc.Server, srv PublisherServer) {
+	s.RegisterService(&_Publisher_serviceDesc, srv)
+}
+
+func _Publisher_CreateTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Topic)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(PublisherServer).CreateTopic(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.pubsub.v1.Publisher/CreateTopic",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(PublisherServer).CreateTopic(ctx, req.(*Topic))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Publisher_UpdateTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(UpdateTopicRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(PublisherServer).UpdateTopic(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.pubsub.v1.Publisher/UpdateTopic",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(PublisherServer).UpdateTopic(ctx, req.(*UpdateTopicRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Publisher_Publish_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(PublishRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(PublisherServer).Publish(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.pubsub.v1.Publisher/Publish",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(PublisherServer).Publish(ctx, req.(*PublishRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Publisher_GetTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(GetTopicRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(PublisherServer).GetTopic(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.pubsub.v1.Publisher/GetTopic",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(PublisherServer).GetTopic(ctx, req.(*GetTopicRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Publisher_ListTopics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ListTopicsRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(PublisherServer).ListTopics(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.pubsub.v1.Publisher/ListTopics",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(PublisherServer).ListTopics(ctx, req.(*ListTopicsRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Publisher_ListTopicSubscriptions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ListTopicSubscriptionsRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(PublisherServer).ListTopicSubscriptions(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.pubsub.v1.Publisher/ListTopicSubscriptions",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(PublisherServer).ListTopicSubscriptions(ctx, req.(*ListTopicSubscriptionsRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Publisher_DeleteTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DeleteTopicRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(PublisherServer).DeleteTopic(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/google.pubsub.v1.Publisher/DeleteTopic",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(PublisherServer).DeleteTopic(ctx, req.(*DeleteTopicRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Publisher_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "google.pubsub.v1.Publisher",
+	HandlerType: (*PublisherServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "CreateTopic",
+			Handler:    _Publisher_CreateTopic_Handler,
+		},
+		{
+			MethodName: "UpdateTopic",
+			Handler:    _Publisher_UpdateTopic_Handler,
+		},
+		{
+			MethodName: "Publish",
+			Handler:    _Publisher_Publish_Handler,
+		},
+		{
+			MethodName: "GetTopic",
+			Handler:    _Publisher_GetTopic_Handler,
+		},
+		{
+			MethodName: "ListTopics",
+			Handler:    _Publisher_ListTopics_Handler,
+		},
+		{
+			MethodName: "ListTopicSubscriptions",
+			Handler:    _Publisher_ListTopicSubscriptions_Handler,
+		},
+		{
+			MethodName: "DeleteTopic",
+			Handler:    _Publisher_DeleteTopic_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "google/pubsub/v1/pubsub.proto",
+}
+
+func init() { proto.RegisterFile("google/pubsub/v1/pubsub.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+	// 2011 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x59, 0xcb, 0x73, 0xdb, 0xc6,
+	0x19, 0xcf, 0x92, 0x7a, 0x50, 0x1f, 0xf4, 0xf2, 0x56, 0xb2, 0x69, 0xf8, 0x25, 0xc1, 0x8a, 0x45,
+	0x33, 0x36, 0x29, 0x33, 0x13, 0x37, 0xb6, 0x2a, 0x67, 0x24, 0xcb, 0x75, 0xdc, 0xb1, 0x1b, 0x15,
+	0x72, 0xdb, 0x99, 0x1e, 0xca, 0x01, 0x89, 0x35, 0x8d, 0x90, 0x04, 0x10, 0x00, 0x54, 0xad, 0xb4,
+	0x9e, 0x49, 0x93, 0x4e, 0x67, 0x3a, 0xf5, 0xa1, 0x69, 0x6e, 0x9d, 0x1c, 0x3a, 0xd3, 0x5b, 0x8f,
+	0x9d, 0xe9, 0xb5, 0xff, 0x44, 0xff, 0x85, 0x1e, 0x7b, 0x6f, 0x8f, 0x99, 0x7d, 0x00, 0xc4, 0x63,
+	0x41, 0x8a, 0xb2, 0x7d, 0x03, 0xf6, 0xfb, 0x76, 0xbf, 0xdf, 0xf7, 0xfe, 0x80, 0x85, 0x4b, 0x1d,
+	0xc7, 0xe9, 0xf4, 0x48, 0xdd, 0x1d, 0xb4, 0xfc, 0x41, 0xab, 0x7e, 0x74, 0x4b, 0x3c, 0xd5, 0x5c,
+	0xcf, 0x09, 0x1c, 0xbc, 0xcc, 0xc9, 0x35, 0xb1, 0x78, 0x74, 0x4b, 0xbd, 0x28, 0x36, 0x18, 0xae,
+	0x55, 0x37, 0x6c, 0xdb, 0x09, 0x8c, 0xc0, 0x72, 0x6c, 0x9f, 0xf3, 0xab, 0x97, 0xc3, 0xe3, 0xe8,
+	0x5b, 0x6b, 0xf0, 0xac, 0x6e, 0x0e, 0x3c, 0xc6, 0x20, 0xe8, 0x17, 0xd2, 0x74, 0xd2, 0x77, 0x83,
+	0x63, 0x41, 0x5c, 0x4b, 0x13, 0x9f, 0x59, 0xa4, 0x67, 0x36, 0xfb, 0x86, 0xdf, 0x15, 0x1c, 0x57,
+	0xd2, 0x1c, 0x81, 0xd5, 0x27, 0x7e, 0x60, 0xf4, 0x5d, 0xce, 0xa0, 0x7d, 0x83, 0x60, 0xfa, 0xa9,
+	0xe3, 0x5a, 0x6d, 0x8c, 0x61, 0xca, 0x36, 0xfa, 0xa4, 0x8c, 0xd6, 0x50, 0x65, 0x4e, 0x67, 0xcf,
+	0x78, 0x1b, 0x66, 0x7a, 0x46, 0x8b, 0xf4, 0xfc, 0x72, 0x61, 0xad, 0x58, 0x51, 0x1a, 0x57, 0x6b,
+	0x69, 0xf5, 0x6a, 0x6c, 0x73, 0xed, 0x31, 0xe3, 0x7a, 0x60, 0x07, 0xde, 0xb1, 0x2e, 0xb6, 0xa8,
+	0x77, 0x40, 0x89, 0x2d, 0xe3, 0x65, 0x28, 0x76, 0xc9, 0xb1, 0x38, 0x9e, 0x3e, 0xe2, 0x15, 0x98,
+	0x3e, 0x32, 0x7a, 0x03, 0x52, 0x2e, 0xb0, 0x35, 0xfe, 0x72, 0xb7, 0xf0, 0x21, 0xd2, 0xbe, 0x2e,
+	0xc0, 0xc2, 0x01, 0x13, 0xf1, 0x84, 0xf8, 0xbe, 0xd1, 0x21, 0x14, 0x9d, 0x69, 0x04, 0x06, 0xdb,
+	0x3e, 0xaf, 0xb3, 0x67, 0xfc, 0x09, 0x80, 0x11, 0x04, 0x9e, 0xd5, 0x1a, 0x04, 0x24, 0x44, 0x58,
+	0xcf, 0x22, 0x4c, 0x1c, 0x54, 0xdb, 0x8d, 0x76, 0x70, 0xb4, 0xb1, 0x23, 0xf0, 0x25, 0x80, 0x3e,
+	0x67, 0x6b, 0x5a, 0x66, 0xb9, 0xc8, 0x50, 0xcd, 0x89, 0x95, 0x47, 0x26, 0xde, 0x81, 0x79, 0x77,
+	0xd0, 0xea, 0x59, 0xfe, 0xf3, 0x26, 0x35, 0x63, 0x79, 0x6a, 0x0d, 0x55, 0x94, 0x86, 0x1a, 0x49,
+	0x14, 0x36, 0xae, 0x3d, 0x0d, 0x6d, 0xac, 0x2b, 0x82, 0x9f, 0xae, 0xa8, 0x3b, 0xb0, 0x94, 0x12,
+	0x3e, 0x91, 0x4d, 0x36, 0x61, 0xe9, 0x21, 0x09, 0x98, 0xb9, 0x75, 0xf2, 0xd9, 0x80, 0xf8, 0x01,
+	0x65, 0x0e, 0xe8, 0xbb, 0x38, 0x80, 0xbf, 0x68, 0x5f, 0x20, 0xc0, 0x3f, 0x75, 0x4d, 0x23, 0x20,
+	0x09, 0xe6, 0x9b, 0x71, 0x66, 0xa5, 0x71, 0x2e, 0xc7, 0x95, 0xe2, 0x14, 0xbc, 0x0d, 0xca, 0x80,
+	0x1d, 0xc2, 0xc2, 0x89, 0xc1, 0x91, 0xe9, 0xfa, 0x43, 0x1a, 0x71, 0x4f, 0x0c, 0xbf, 0xab, 0x03,
+	0x67, 0xa7, 0xcf, 0x5a, 0x1b, 0x16, 0x0f, 0xb8, 0xe6, 0x23, 0xa1, 0xe2, 0x6d, 0x28, 0x09, 0xf3,
+	0x86, 0xfe, 0xbb, 0x32, 0xc6, 0x7f, 0x7a, 0xb4, 0x41, 0x6b, 0xc0, 0x52, 0x24, 0xc4, 0x77, 0x1d,
+	0xdb, 0x27, 0xf8, 0x0a, 0x28, 0x43, 0x07, 0xfa, 0x65, 0xb4, 0x56, 0xac, 0xcc, 0xe9, 0x10, 0x79,
+	0xd0, 0xd7, 0x2c, 0x38, 0xf3, 0xd8, 0xf2, 0xb9, 0x15, 0xfd, 0x10, 0x5b, 0x19, 0x66, 0x5d, 0xcf,
+	0xf9, 0x94, 0xb4, 0x03, 0x81, 0x2e, 0x7c, 0xc5, 0x17, 0x60, 0xce, 0xa5, 0x87, 0xf9, 0xd6, 0xe7,
+	0xdc, 0x23, 0xd3, 0x7a, 0x89, 0x2e, 0x1c, 0x5a, 0x9f, 0x13, 0x1a, 0x2d, 0x8c, 0x18, 0x38, 0x5d,
+	0x62, 0x87, 0xd1, 0x42, 0x57, 0x9e, 0xd2, 0x05, 0xad, 0x0f, 0x38, 0x2e, 0x4a, 0x20, 0xac, 0xc3,
+	0x0c, 0x53, 0x9d, 0x83, 0x1b, 0xe1, 0x06, 0xc1, 0x86, 0xaf, 0xc1, 0x92, 0x4d, 0x5e, 0x04, 0xcd,
+	0x98, 0x28, 0x1e, 0x1a, 0x0b, 0x74, 0xf9, 0x20, 0x12, 0xf7, 0x19, 0x5c, 0x8a, 0xc4, 0x1d, 0x0e,
+	0x5a, 0x7e, 0xdb, 0xb3, 0x5c, 0x56, 0x68, 0x46, 0x7b, 0xe0, 0x75, 0x34, 0xb4, 0xe1, 0x72, 0x9e,
+	0x48, 0xa1, 0xed, 0x06, 0x2c, 0xf8, 0x71, 0x82, 0xf0, 0x48, 0x72, 0xf1, 0xc4, 0x2a, 0x56, 0x01,
+	0xef, 0x93, 0x1e, 0x49, 0xc5, 0xb5, 0x3c, 0x09, 0xfe, 0x59, 0x84, 0xf9, 0x38, 0x26, 0x69, 0x79,
+	0x8b, 0xb6, 0x16, 0xe2, 0x26, 0xd9, 0x01, 0xc5, 0x1d, 0xf8, 0xcf, 0x9b, 0x6d, 0xc7, 0x7e, 0x66,
+	0x75, 0x44, 0x96, 0x5f, 0x94, 0xc5, 0xa5, 0xff, 0xfc, 0x3e, 0xe3, 0xd1, 0xc1, 0x8d, 0x9e, 0xf1,
+	0x16, 0xac, 0x18, 0xed, 0x6e, 0xd3, 0x24, 0x86, 0xd9, 0xb3, 0x6c, 0xd2, 0xf4, 0x49, 0xdb, 0xb1,
+	0x4d, 0xbf, 0x3c, 0xcd, 0x8c, 0x8b, 0x8d, 0x76, 0x77, 0x5f, 0x90, 0x0e, 0x39, 0x05, 0x37, 0x60,
+	0xd5, 0x23, 0x81, 0x61, 0xd9, 0x4d, 0xa3, 0xdd, 0x25, 0x66, 0x33, 0x4a, 0x89, 0xd9, 0x35, 0x54,
+	0x29, 0xe9, 0xdf, 0xe3, 0xc4, 0x5d, 0x4a, 0x13, 0x59, 0xe0, 0xe3, 0x9f, 0x83, 0x1a, 0x46, 0xba,
+	0x47, 0x02, 0x62, 0x53, 0x1d, 0x9b, 0x61, 0xef, 0x28, 0x97, 0x18, 0xe6, 0xf3, 0x99, 0x6c, 0xdd,
+	0x17, 0x0c, 0x7a, 0x59, 0x6c, 0xd6, 0xc3, 0xbd, 0x21, 0x05, 0xef, 0x45, 0x25, 0x7f, 0x8e, 0x05,
+	0x68, 0x35, 0xab, 0x78, 0xdc, 0xae, 0x6f, 0xba, 0xf2, 0xff, 0x0b, 0x01, 0x0c, 0x0d, 0x8b, 0xaf,
+	0xc2, 0x02, 0xf3, 0x05, 0xb1, 0x4d, 0xd7, 0xb1, 0xec, 0x30, 0x41, 0xe7, 0xe9, 0xe2, 0x03, 0xb1,
+	0x86, 0x1f, 0x4b, 0xfa, 0xc0, 0x8d, 0x51, 0xfe, 0x1a, 0xd5, 0x04, 0x5e, 0xb7, 0x4c, 0xb7, 0x61,
+	0x49, 0x27, 0x6d, 0x62, 0x1d, 0x45, 0xce, 0xc2, 0xab, 0x30, 0x43, 0x23, 0xc2, 0x32, 0xc3, 0x10,
+	0x35, 0xda, 0xdd, 0x47, 0x26, 0xbe, 0x03, 0xb3, 0xc2, 0x0b, 0xa2, 0xba, 0x8e, 0xad, 0x7d, 0x21,
+	0xbf, 0xf6, 0x03, 0x38, 0xfb, 0x90, 0x04, 0x71, 0x3f, 0x84, 0xd9, 0xa0, 0xc1, 0x7c, 0x3c, 0xb9,
+	0x42, 0x7b, 0xc5, 0xd7, 0xb4, 0x6f, 0x11, 0x9c, 0xe7, 0x0d, 0x42, 0x76, 0xc2, 0x9e, 0xe4, 0x04,
+	0xa5, 0x71, 0x79, 0x74, 0x18, 0x24, 0x25, 0xbc, 0x5e, 0xf3, 0x70, 0xa1, 0x4c, 0xcb, 0x8a, 0xb4,
+	0x88, 0xbd, 0x9d, 0x52, 0xfd, 0x07, 0x04, 0xe7, 0x25, 0x22, 0x45, 0x11, 0xdb, 0x97, 0x15, 0xb1,
+	0xf1, 0x16, 0x39, 0x65, 0x91, 0xfb, 0x08, 0xce, 0xf3, 0x22, 0x77, 0x5a, 0xef, 0xfe, 0x06, 0xce,
+	0x3d, 0x71, 0x4c, 0xeb, 0xd9, 0x71, 0xac, 0x3e, 0x9d, 0x7c, 0x7b, 0xba, 0xfa, 0x15, 0x26, 0xab,
+	0x7e, 0xda, 0x57, 0x08, 0x94, 0x83, 0x41, 0xaf, 0x37, 0x89, 0xc8, 0x9b, 0x80, 0x3d, 0x12, 0x0c,
+	0x3c, 0xbb, 0x69, 0xf5, 0xfb, 0xc4, 0xb4, 0x8c, 0x80, 0xf4, 0x8e, 0x99, 0xe4, 0x92, 0x7e, 0x86,
+	0x53, 0x1e, 0x0d, 0x09, 0x78, 0x1d, 0xe6, 0xfb, 0xc6, 0x8b, 0x61, 0x95, 0x2c, 0x32, 0x67, 0x2b,
+	0x7d, 0xe3, 0x45, 0x58, 0x1d, 0xb5, 0x5f, 0xc2, 0x3c, 0x07, 0x21, 0x5c, 0xf8, 0x63, 0x38, 0xe3,
+	0x89, 0xa4, 0x1c, 0xee, 0xe3, 0x6e, 0x5c, 0xcf, 0xaa, 0x96, 0xca, 0x5f, 0x7d, 0xd9, 0x4b, 0x2e,
+	0xf8, 0x34, 0x60, 0xca, 0xdc, 0xc8, 0xbb, 0xc3, 0x72, 0x3e, 0x89, 0xca, 0xe7, 0x60, 0x96, 0x97,
+	0x04, 0xbf, 0x3c, 0xc5, 0x5a, 0xe2, 0x0c, 0xab, 0x09, 0x7e, 0x6e, 0xf7, 0x28, 0xe6, 0x75, 0x0f,
+	0xed, 0x27, 0x80, 0x77, 0xdb, 0x5d, 0xdb, 0xf9, 0x55, 0x8f, 0x98, 0x9d, 0xd3, 0x82, 0x28, 0xc4,
+	0x41, 0x68, 0xbf, 0x2d, 0xc0, 0xca, 0x61, 0xe0, 0x11, 0xa3, 0x6f, 0xd9, 0x9d, 0x49, 0xbd, 0x99,
+	0x77, 0x2a, 0xbe, 0x0d, 0xe7, 0xfa, 0xcc, 0x66, 0x32, 0xed, 0x8a, 0x95, 0x69, 0x7d, 0x95, 0x93,
+	0xd3, 0xed, 0xf1, 0x83, 0xec, 0xbe, 0xa4, 0xed, 0x56, 0x92, 0xfb, 0x76, 0xb9, 0xb8, 0x1d, 0xb8,
+	0xe0, 0x33, 0x1d, 0x9a, 0x23, 0xda, 0x71, 0x99, 0xb3, 0xec, 0x66, 0xcd, 0xda, 0x81, 0xd5, 0x94,
+	0x09, 0xde, 0x52, 0x2c, 0x7d, 0x02, 0xab, 0xf7, 0x3d, 0x42, 0x8b, 0xb1, 0x6d, 0xb8, 0xfe, 0x73,
+	0x27, 0x08, 0x8d, 0x2d, 0x9b, 0x58, 0xd2, 0x0e, 0x28, 0x48, 0x0a, 0xc0, 0x2b, 0x04, 0xab, 0xa2,
+	0xbc, 0xa7, 0x4e, 0xbc, 0x0d, 0x25, 0x5f, 0x2c, 0x89, 0xb2, 0xae, 0x4a, 0x8a, 0x58, 0xb8, 0x29,
+	0xe2, 0x7d, 0xbd, 0x72, 0xfe, 0x5f, 0x04, 0xa5, 0xf0, 0xcc, 0x09, 0xa6, 0xb0, 0x6d, 0x50, 0xc8,
+	0x0b, 0xd7, 0xf2, 0x08, 0xff, 0xd6, 0x2a, 0x8e, 0xfd, 0xd6, 0x02, 0xce, 0x4e, 0x17, 0xf0, 0xbd,
+	0x68, 0x88, 0x99, 0x62, 0x8e, 0xb9, 0x96, 0xaf, 0xe6, 0x9b, 0x1e, 0x60, 0x7a, 0xb0, 0xc2, 0x5a,
+	0x89, 0x38, 0xfe, 0x2d, 0x77, 0xae, 0x63, 0x58, 0x4d, 0x49, 0x13, 0x51, 0xfa, 0x21, 0xcc, 0x85,
+	0xee, 0x0b, 0xa3, 0x73, 0x94, 0xaf, 0x87, 0xcc, 0x27, 0x6e, 0x54, 0xef, 0xc3, 0xaa, 0x68, 0x54,
+	0xa9, 0x28, 0x53, 0x53, 0x51, 0x36, 0x37, 0x8c, 0x24, 0xed, 0x8f, 0x08, 0x94, 0x43, 0x42, 0xba,
+	0x93, 0x14, 0x94, 0x2d, 0x98, 0x62, 0x21, 0x50, 0x18, 0x17, 0x02, 0x1f, 0xbf, 0xa3, 0x33, 0x4e,
+	0x7c, 0x31, 0x86, 0x80, 0x99, 0xec, 0xe3, 0x77, 0x86, 0x18, 0xf6, 0x4a, 0x30, 0x13, 0x18, 0x5e,
+	0x87, 0x04, 0xda, 0x22, 0xcc, 0x73, 0x30, 0xdc, 0x68, 0x8d, 0xff, 0x2d, 0x03, 0x88, 0xb6, 0xdb,
+	0x22, 0x1e, 0xfe, 0x3d, 0x02, 0x2c, 0x52, 0x33, 0x8e, 0x67, 0x4c, 0xe3, 0x57, 0xc7, 0xd0, 0xb5,
+	0xad, 0x2f, 0xff, 0xfd, 0x9f, 0x6f, 0x0a, 0x55, 0xf5, 0xdd, 0xfa, 0xd1, 0xad, 0xfa, 0xaf, 0x69,
+	0x0a, 0xec, 0x88, 0x50, 0xf0, 0xeb, 0xd5, 0x7a, 0x62, 0x6a, 0xa8, 0x57, 0x5f, 0xde, 0x45, 0x55,
+	0xfc, 0x67, 0xc4, 0xbe, 0xfd, 0x13, 0x28, 0x2a, 0x59, 0x29, 0xf2, 0x91, 0x70, 0x2c, 0x9e, 0x0f,
+	0x18, 0x9e, 0x3a, 0xbe, 0xc9, 0xf0, 0xc4, 0xe5, 0x8f, 0xc2, 0x85, 0xff, 0x1a, 0xfd, 0x66, 0x48,
+	0xe0, 0x7a, 0x2f, 0x2b, 0x2d, 0x77, 0xd6, 0x1c, 0x0b, 0x6d, 0x87, 0x41, 0xfb, 0x7e, 0xa3, 0x91,
+	0x81, 0x56, 0x3b, 0x89, 0xdd, 0xbe, 0x45, 0xfc, 0x73, 0x3f, 0x31, 0xd7, 0x61, 0xc9, 0x17, 0x4d,
+	0xde, 0xbc, 0xa9, 0xbe, 0x77, 0x22, 0x5e, 0x1e, 0x3e, 0x5a, 0x8d, 0xa1, 0xad, 0xe0, 0x6b, 0x0c,
+	0xad, 0xc0, 0x16, 0xc3, 0xf8, 0x32, 0x09, 0x12, 0xff, 0x09, 0x85, 0x1f, 0xb4, 0xe3, 0x2c, 0x98,
+	0x3b, 0x11, 0xaa, 0x67, 0x33, 0xe9, 0xf0, 0xa0, 0xef, 0x06, 0xc7, 0xa1, 0x53, 0xab, 0x13, 0x3a,
+	0xf5, 0x6f, 0x08, 0xce, 0x64, 0x06, 0x1b, 0x99, 0xc5, 0xf2, 0xa6, 0x9f, 0x5c, 0x40, 0x3f, 0x62,
+	0x80, 0xf6, 0xb5, 0x8f, 0x26, 0x02, 0x74, 0xb7, 0x9f, 0x96, 0x43, 0xfd, 0xfa, 0x35, 0x02, 0x25,
+	0x36, 0xf3, 0xe0, 0x8d, 0x2c, 0xbe, 0xec, 0x48, 0x94, 0x8b, 0x6c, 0x9f, 0x21, 0xbb, 0xa7, 0xdd,
+	0x99, 0x0c, 0x99, 0x31, 0x94, 0x40, 0x31, 0xfd, 0x0e, 0xc1, 0x14, 0x9d, 0x13, 0xf0, 0x25, 0xd9,
+	0xac, 0x1c, 0x8d, 0x50, 0xb2, 0x90, 0x8f, 0x8f, 0x17, 0x61, 0xc8, 0x6b, 0x8d, 0xc9, 0xd0, 0xb8,
+	0x83, 0x5e, 0x8f, 0xc2, 0x30, 0x61, 0x21, 0x31, 0xb6, 0x60, 0x59, 0xeb, 0x93, 0x8c, 0x76, 0xea,
+	0xe6, 0x58, 0x3e, 0x0e, 0xb0, 0x82, 0xb6, 0x10, 0xcd, 0xfd, 0xe5, 0xf4, 0x47, 0x06, 0xbe, 0x9e,
+	0x17, 0x25, 0x99, 0x0f, 0x91, 0x5c, 0x57, 0x3c, 0x62, 0xca, 0xdf, 0xd7, 0xee, 0x9d, 0x26, 0x48,
+	0x86, 0x62, 0xa8, 0x21, 0x5e, 0x21, 0x58, 0x48, 0xb4, 0x46, 0x99, 0x25, 0x64, 0x9d, 0x5a, 0x66,
+	0x09, 0x69, 0x8f, 0xd5, 0xaa, 0x0c, 0xed, 0x06, 0xd6, 0xf2, 0xf3, 0x3d, 0x12, 0xfe, 0x15, 0x82,
+	0xc5, 0xe4, 0x98, 0x87, 0x25, 0x72, 0xa4, 0x83, 0xa0, 0x3a, 0xa2, 0x71, 0x6b, 0x37, 0x18, 0x86,
+	0x6b, 0xea, 0xba, 0xbc, 0x99, 0x84, 0xf2, 0x45, 0x41, 0x7c, 0x85, 0x60, 0x31, 0x39, 0x1a, 0xca,
+	0x50, 0x48, 0x87, 0xc7, 0x91, 0x28, 0x44, 0xb5, 0x69, 0x54, 0xb9, 0xdf, 0xc2, 0xd1, 0x6a, 0x1c,
+	0x9c, 0x2f, 0x10, 0x2c, 0x26, 0x67, 0x08, 0x19, 0x1c, 0xe9, 0x94, 0x91, 0x1b, 0x42, 0x37, 0x19,
+	0x94, 0xcd, 0xea, 0xbb, 0x09, 0x28, 0x79, 0x28, 0x58, 0xda, 0xd2, 0x19, 0x40, 0x96, 0xb6, 0xb1,
+	0x41, 0x45, 0xda, 0xa9, 0x62, 0xa3, 0xc3, 0x69, 0xd3, 0xd6, 0x27, 0xa4, 0x7b, 0x17, 0x55, 0x1b,
+	0x7f, 0x99, 0x85, 0x39, 0xf1, 0x33, 0x9b, 0x78, 0xf8, 0x53, 0x50, 0x78, 0x24, 0xf0, 0x9b, 0x99,
+	0xbc, 0x7f, 0xc4, 0x6a, 0x1e, 0x41, 0xbb, 0xce, 0xd0, 0x5c, 0x55, 0x2f, 0x4b, 0xa3, 0x82, 0xff,
+	0x59, 0x16, 0x3e, 0x78, 0x09, 0x4a, 0xec, 0xb2, 0x40, 0x56, 0x4a, 0xb3, 0x77, 0x09, 0xf9, 0x82,
+	0xeb, 0x4c, 0xf0, 0xf5, 0xc6, 0x06, 0x13, 0xcc, 0x04, 0xd5, 0x46, 0x8a, 0xff, 0x12, 0xc1, 0xac,
+	0x50, 0x1c, 0xaf, 0x49, 0xff, 0x7f, 0xc5, 0x6e, 0x11, 0xd4, 0xf5, 0x11, 0x1c, 0xc2, 0x11, 0x0d,
+	0x86, 0xe0, 0x86, 0xb6, 0x39, 0x44, 0x20, 0x17, 0x2e, 0xae, 0x66, 0x28, 0x08, 0x07, 0x4a, 0xe1,
+	0xd5, 0x0a, 0x5e, 0x97, 0xce, 0x55, 0x27, 0xd3, 0x7e, 0x93, 0xc9, 0x5e, 0xc7, 0x57, 0xc6, 0xc8,
+	0xa6, 0x81, 0x0f, 0xc3, 0xcb, 0x01, 0x7c, 0x55, 0x5e, 0x71, 0x12, 0xb7, 0x14, 0xea, 0xc6, 0x68,
+	0x26, 0xa1, 0x7e, 0x12, 0x82, 0xac, 0x26, 0x89, 0x7b, 0x85, 0x7f, 0x20, 0x38, 0x2b, 0xff, 0x7b,
+	0x8f, 0xeb, 0x23, 0x24, 0x49, 0xa7, 0xa4, 0xad, 0x93, 0x6f, 0x10, 0x30, 0x93, 0x33, 0x67, 0xbe,
+	0xa5, 0x52, 0x13, 0x53, 0x00, 0x4a, 0xec, 0x06, 0x40, 0x16, 0xac, 0xd9, 0x0b, 0x82, 0xdc, 0x4a,
+	0x21, 0x4c, 0x55, 0x1d, 0xe7, 0xad, 0xbd, 0x63, 0x58, 0x69, 0x3b, 0xfd, 0x8c, 0xac, 0x3d, 0x85,
+	0xff, 0x9d, 0x3d, 0xa0, 0xc7, 0x1e, 0xa0, 0x5f, 0xdc, 0x16, 0x0c, 0x1d, 0xa7, 0x67, 0xd8, 0x9d,
+	0x9a, 0xe3, 0x75, 0xea, 0x1d, 0x62, 0x33, 0xa1, 0x75, 0x4e, 0x32, 0x5c, 0xcb, 0x1f, 0x5e, 0x1c,
+	0x6f, 0xf3, 0xa7, 0xff, 0x23, 0xf4, 0xf7, 0xc2, 0xd9, 0x87, 0x7c, 0xef, 0xfd, 0x9e, 0x33, 0x30,
+	0x69, 0x4c, 0x1f, 0x0e, 0x5a, 0xb5, 0x9f, 0xdd, 0x6a, 0xcd, 0xb0, 0xed, 0xef, 0x7f, 0x17, 0x00,
+	0x00, 0xff, 0xff, 0x68, 0xa2, 0x62, 0x98, 0x76, 0x1e, 0x00, 0x00,
+}
diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go
index dcaf502..41de8b8 100644
--- a/vendor/gopkg.in/yaml.v2/emitterc.go
+++ b/vendor/gopkg.in/yaml.v2/emitterc.go
@@ -995,9 +995,9 @@
 		space_break    = false
 
 		preceded_by_whitespace = false
-		followed_by_whitespace = false
-		previous_space         = false
-		previous_break         = false
+		followed_by_whitespace  = false
+		previous_space          = false
+		previous_break          = false
 	)
 
 	emitter.scalar_data.value = value