Browse Source

Merge pull request #38882 from tonistiigi/buildkit-update

builder-next: update buildkit to c3541087 (v0.4.0)
Sebastiaan van Stijn 6 năm trước cách đây
mục cha
commit
38005cfc12
100 tập tin đã thay đổi với 5962 bổ sung3347 xóa
  1. 21 17
      builder/builder-next/adapters/containerimage/pull.go
  2. 163 0
      builder/builder-next/adapters/localinlinecache/inlinecache.go
  3. 11 1
      builder/builder-next/adapters/snapshot/layer.go
  4. 12 0
      builder/builder-next/builder.go
  5. 42 14
      builder/builder-next/controller.go
  6. 1 1
      builder/builder-next/exporter/export.go
  7. 9 1
      builder/builder-next/exporter/writer.go
  8. 96 22
      builder/builder-next/worker/worker.go
  9. 8 7
      vendor.conf
  10. 13 10
      vendor/github.com/containerd/containerd/README.md
  11. 86 40
      vendor/github.com/containerd/containerd/api/events/task.pb.go
  12. 3 0
      vendor/github.com/containerd/containerd/api/events/task.proto
  13. 1 1
      vendor/github.com/containerd/containerd/archive/time_unix.go
  14. 5 1
      vendor/github.com/containerd/containerd/cio/io_windows.go
  15. 83 154
      vendor/github.com/containerd/containerd/client.go
  16. 16 0
      vendor/github.com/containerd/containerd/client_opts.go
  17. 79 1
      vendor/github.com/containerd/containerd/container.go
  18. 155 0
      vendor/github.com/containerd/containerd/container_checkpoint_opts.go
  19. 5 4
      vendor/github.com/containerd/containerd/container_opts.go
  20. 0 69
      vendor/github.com/containerd/containerd/container_opts_unix.go
  21. 150 0
      vendor/github.com/containerd/containerd/container_restore_opts.go
  22. 4 0
      vendor/github.com/containerd/containerd/containers/containers.go
  23. 1 1
      vendor/github.com/containerd/containerd/errdefs/grpc.go
  24. 4 4
      vendor/github.com/containerd/containerd/events/exchange/exchange.go
  25. 6 19
      vendor/github.com/containerd/containerd/export.go
  26. 1 1
      vendor/github.com/containerd/containerd/filters/parser.go
  27. 1 1
      vendor/github.com/containerd/containerd/identifiers/validate.go
  28. 14 2
      vendor/github.com/containerd/containerd/images/handlers.go
  29. 8 5
      vendor/github.com/containerd/containerd/images/mediatypes.go
  30. 241 0
      vendor/github.com/containerd/containerd/images/oci/exporter.go
  31. 2 1
      vendor/github.com/containerd/containerd/install.go
  32. 83 7
      vendor/github.com/containerd/containerd/metadata/buckets.go
  33. 1 1
      vendor/github.com/containerd/containerd/metadata/containers.go
  34. 29 12
      vendor/github.com/containerd/containerd/metadata/content.go
  35. 24 2
      vendor/github.com/containerd/containerd/metadata/db.go
  36. 1 1
      vendor/github.com/containerd/containerd/metadata/images.go
  37. 1 1
      vendor/github.com/containerd/containerd/metadata/leases.go
  38. 1 1
      vendor/github.com/containerd/containerd/metadata/snapshot.go
  39. 1 10
      vendor/github.com/containerd/containerd/oci/spec.go
  40. 61 10
      vendor/github.com/containerd/containerd/oci/spec_opts.go
  41. 67 0
      vendor/github.com/containerd/containerd/oci/spec_opts_windows.go
  42. 9 0
      vendor/github.com/containerd/containerd/plugin/plugin.go
  43. 190 0
      vendor/github.com/containerd/containerd/pull.go
  44. 6 2
      vendor/github.com/containerd/containerd/remotes/docker/authorizer.go
  45. 22 7
      vendor/github.com/containerd/containerd/remotes/docker/resolver.go
  46. 6 3
      vendor/github.com/containerd/containerd/remotes/handlers.go
  47. 203 35
      vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.pb.go
  48. 4 0
      vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.proto
  49. 14 3
      vendor/github.com/containerd/containerd/runtime/v1/linux/bundle.go
  50. 8 3
      vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init.go
  51. 1 0
      vendor/github.com/containerd/containerd/runtime/v1/linux/proc/types.go
  52. 30 0
      vendor/github.com/containerd/containerd/runtime/v1/linux/runtime.go
  53. 38 0
      vendor/github.com/containerd/containerd/runtime/v1/shim.go
  54. 28 6
      vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go
  55. 7 0
      vendor/github.com/containerd/containerd/runtime/v1/shim/service.go
  56. 195 0
      vendor/github.com/containerd/containerd/runtime/v2/README.md
  57. 17 0
      vendor/github.com/containerd/containerd/runtime/v2/runc/options/doc.go
  58. 1313 0
      vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.pb.go
  59. 58 0
      vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.proto
  60. 492 0
      vendor/github.com/containerd/containerd/services/content/service.go
  61. 71 0
      vendor/github.com/containerd/containerd/services/content/store.go
  62. 38 0
      vendor/github.com/containerd/containerd/services/server/config/config.go
  63. 36 0
      vendor/github.com/containerd/containerd/services/services.go
  64. 0 60
      vendor/github.com/containerd/containerd/signal_map_linux.go
  65. 0 58
      vendor/github.com/containerd/containerd/signal_map_unix.go
  66. 1 23
      vendor/github.com/containerd/containerd/signals.go
  67. 47 0
      vendor/github.com/containerd/containerd/signals_unix.go
  68. 24 0
      vendor/github.com/containerd/containerd/signals_windows.go
  69. 11 9
      vendor/github.com/containerd/containerd/snapshots/snapshotter.go
  70. 55 6
      vendor/github.com/containerd/containerd/task.go
  71. 54 0
      vendor/github.com/containerd/containerd/task_opts.go
  72. 40 18
      vendor/github.com/containerd/containerd/task_opts_unix.go
  73. 20 19
      vendor/github.com/containerd/containerd/vendor.conf
  74. 10 11
      vendor/github.com/containerd/containerd/version/version.go
  75. 10 0
      vendor/github.com/containerd/ttrpc/README.md
  76. 6 0
      vendor/github.com/containerd/ttrpc/client.go
  77. 15 0
      vendor/github.com/containerd/ttrpc/server.go
  78. 4 3
      vendor/github.com/containerd/ttrpc/types.go
  79. 27 0
      vendor/github.com/gofrs/flock/LICENSE
  80. 40 0
      vendor/github.com/gofrs/flock/README.md
  81. 127 0
      vendor/github.com/gofrs/flock/flock.go
  82. 195 0
      vendor/github.com/gofrs/flock/flock_unix.go
  83. 76 0
      vendor/github.com/gofrs/flock/flock_winapi.go
  84. 140 0
      vendor/github.com/gofrs/flock/flock_windows.go
  85. 2 3
      vendor/github.com/gogo/protobuf/LICENSE
  86. 60 20
      vendor/github.com/gogo/protobuf/README
  87. 23 3
      vendor/github.com/gogo/protobuf/Readme.md
  88. 1 1
      vendor/github.com/gogo/protobuf/gogoproto/doc.go
  89. 230 162
      vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go
  90. 11 0
      vendor/github.com/gogo/protobuf/gogoproto/gogo.proto
  91. 61 3
      vendor/github.com/gogo/protobuf/gogoproto/helper.go
  92. 14 15
      vendor/github.com/gogo/protobuf/io/varint.go
  93. 35 11
      vendor/github.com/gogo/protobuf/proto/clone.go
  94. 39 0
      vendor/github.com/gogo/protobuf/proto/custom_gogo.go
  95. 63 613
      vendor/github.com/gogo/protobuf/proto/decode.go
  96. 0 172
      vendor/github.com/gogo/protobuf/proto/decode_gogo.go
  97. 200 1
      vendor/github.com/gogo/protobuf/proto/discard.go
  98. 0 154
      vendor/github.com/gogo/protobuf/proto/duration_gogo.go
  99. 25 1184
      vendor/github.com/gogo/protobuf/proto/encode.go
  100. 0 317
      vendor/github.com/gogo/protobuf/proto/encode_gogo.go

+ 21 - 17
builder/builder-next/adapters/containerimage/pull.go

@@ -45,7 +45,6 @@ import (
 
 
 // SourceOpt is options for creating the image source
 // SourceOpt is options for creating the image source
 type SourceOpt struct {
 type SourceOpt struct {
-	SessionManager  *session.Manager
 	ContentStore    content.Store
 	ContentStore    content.Store
 	CacheAccessor   cache.Accessor
 	CacheAccessor   cache.Accessor
 	ReferenceStore  reference.Store
 	ReferenceStore  reference.Store
@@ -73,19 +72,19 @@ func (is *imageSource) ID() string {
 	return source.DockerImageScheme
 	return source.DockerImageScheme
 }
 }
 
 
-func (is *imageSource) getResolver(ctx context.Context, rfn resolver.ResolveOptionsFunc, ref string) remotes.Resolver {
+func (is *imageSource) getResolver(ctx context.Context, rfn resolver.ResolveOptionsFunc, ref string, sm *session.Manager) remotes.Resolver {
 	opt := docker.ResolverOptions{
 	opt := docker.ResolverOptions{
 		Client: tracing.DefaultClient,
 		Client: tracing.DefaultClient,
 	}
 	}
 	if rfn != nil {
 	if rfn != nil {
 		opt = rfn(ref)
 		opt = rfn(ref)
 	}
 	}
-	opt.Credentials = is.getCredentialsFromSession(ctx)
+	opt.Credentials = is.getCredentialsFromSession(ctx, sm)
 	r := docker.NewResolver(opt)
 	r := docker.NewResolver(opt)
 	return r
 	return r
 }
 }
 
 
-func (is *imageSource) getCredentialsFromSession(ctx context.Context) func(string) (string, string, error) {
+func (is *imageSource) getCredentialsFromSession(ctx context.Context, sm *session.Manager) func(string) (string, string, error) {
 	id := session.FromContext(ctx)
 	id := session.FromContext(ctx)
 	if id == "" {
 	if id == "" {
 		// can be removed after containerd/containerd#2812
 		// can be removed after containerd/containerd#2812
@@ -97,7 +96,7 @@ func (is *imageSource) getCredentialsFromSession(ctx context.Context) func(strin
 		timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
 		timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
 		defer cancel()
 		defer cancel()
 
 
-		caller, err := is.SessionManager.Get(timeoutCtx, id)
+		caller, err := sm.Get(timeoutCtx, id)
 		if err != nil {
 		if err != nil {
 			return "", "", err
 			return "", "", err
 		}
 		}
@@ -122,13 +121,13 @@ func (is *imageSource) resolveLocal(refStr string) ([]byte, error) {
 	return img.RawJSON(), nil
 	return img.RawJSON(), nil
 }
 }
 
 
-func (is *imageSource) resolveRemote(ctx context.Context, ref string, platform *ocispec.Platform) (digest.Digest, []byte, error) {
+func (is *imageSource) resolveRemote(ctx context.Context, ref string, platform *ocispec.Platform, sm *session.Manager) (digest.Digest, []byte, error) {
 	type t struct {
 	type t struct {
 		dgst digest.Digest
 		dgst digest.Digest
 		dt   []byte
 		dt   []byte
 	}
 	}
 	res, err := is.g.Do(ctx, ref, func(ctx context.Context) (interface{}, error) {
 	res, err := is.g.Do(ctx, ref, func(ctx context.Context) (interface{}, error) {
-		dgst, dt, err := imageutil.Config(ctx, ref, is.getResolver(ctx, is.ResolverOpt, ref), is.ContentStore, platform)
+		dgst, dt, err := imageutil.Config(ctx, ref, is.getResolver(ctx, is.ResolverOpt, ref, sm), is.ContentStore, platform)
 		if err != nil {
 		if err != nil {
 			return nil, err
 			return nil, err
 		}
 		}
@@ -142,14 +141,14 @@ func (is *imageSource) resolveRemote(ctx context.Context, ref string, platform *
 	return typed.dgst, typed.dt, nil
 	return typed.dgst, typed.dt, nil
 }
 }
 
 
-func (is *imageSource) ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt) (digest.Digest, []byte, error) {
+func (is *imageSource) ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt, sm *session.Manager) (digest.Digest, []byte, error) {
 	resolveMode, err := source.ParseImageResolveMode(opt.ResolveMode)
 	resolveMode, err := source.ParseImageResolveMode(opt.ResolveMode)
 	if err != nil {
 	if err != nil {
 		return "", nil, err
 		return "", nil, err
 	}
 	}
 	switch resolveMode {
 	switch resolveMode {
 	case source.ResolveModeForcePull:
 	case source.ResolveModeForcePull:
-		dgst, dt, err := is.resolveRemote(ctx, ref, opt.Platform)
+		dgst, dt, err := is.resolveRemote(ctx, ref, opt.Platform, sm)
 		// TODO: pull should fallback to local in case of failure to allow offline behavior
 		// TODO: pull should fallback to local in case of failure to allow offline behavior
 		// the fallback doesn't work currently
 		// the fallback doesn't work currently
 		return dgst, dt, err
 		return dgst, dt, err
@@ -171,13 +170,13 @@ func (is *imageSource) ResolveImageConfig(ctx context.Context, ref string, opt g
 			return "", dt, err
 			return "", dt, err
 		}
 		}
 		// fallback to remote
 		// fallback to remote
-		return is.resolveRemote(ctx, ref, opt.Platform)
+		return is.resolveRemote(ctx, ref, opt.Platform, sm)
 	}
 	}
 	// should never happen
 	// should never happen
 	return "", nil, fmt.Errorf("builder cannot resolve image %s: invalid mode %q", ref, opt.ResolveMode)
 	return "", nil, fmt.Errorf("builder cannot resolve image %s: invalid mode %q", ref, opt.ResolveMode)
 }
 }
 
 
-func (is *imageSource) Resolve(ctx context.Context, id source.Identifier) (source.SourceInstance, error) {
+func (is *imageSource) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager) (source.SourceInstance, error) {
 	imageIdentifier, ok := id.(*source.ImageIdentifier)
 	imageIdentifier, ok := id.(*source.ImageIdentifier)
 	if !ok {
 	if !ok {
 		return nil, errors.Errorf("invalid image identifier %v", id)
 		return nil, errors.Errorf("invalid image identifier %v", id)
@@ -191,8 +190,9 @@ func (is *imageSource) Resolve(ctx context.Context, id source.Identifier) (sourc
 	p := &puller{
 	p := &puller{
 		src:      imageIdentifier,
 		src:      imageIdentifier,
 		is:       is,
 		is:       is,
-		resolver: is.getResolver(ctx, is.ResolverOpt, imageIdentifier.Reference.String()),
+		resolver: is.getResolver(ctx, is.ResolverOpt, imageIdentifier.Reference.String(), sm),
 		platform: platform,
 		platform: platform,
+		sm:       sm,
 	}
 	}
 	return p, nil
 	return p, nil
 }
 }
@@ -208,6 +208,7 @@ type puller struct {
 	resolver         remotes.Resolver
 	resolver         remotes.Resolver
 	config           []byte
 	config           []byte
 	platform         ocispec.Platform
 	platform         ocispec.Platform
+	sm               *session.Manager
 }
 }
 
 
 func (p *puller) mainManifestKey(dgst digest.Digest, platform ocispec.Platform) (digest.Digest, error) {
 func (p *puller) mainManifestKey(dgst digest.Digest, platform ocispec.Platform) (digest.Digest, error) {
@@ -294,7 +295,7 @@ func (p *puller) resolve(ctx context.Context) error {
 				resolveProgressDone(err)
 				resolveProgressDone(err)
 				return
 				return
 			}
 			}
-			_, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), gw.ResolveImageConfigOpt{Platform: &p.platform, ResolveMode: resolveModeToString(p.src.ResolveMode)})
+			_, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), gw.ResolveImageConfigOpt{Platform: &p.platform, ResolveMode: resolveModeToString(p.src.ResolveMode)}, p.sm)
 			if err != nil {
 			if err != nil {
 				p.resolveErr = err
 				p.resolveErr = err
 				resolveProgressDone(err)
 				resolveProgressDone(err)
@@ -380,6 +381,7 @@ func (p *puller) Snapshot(ctx context.Context) (cache.ImmutableRef, error) {
 		return nil, err
 		return nil, err
 	}
 	}
 
 
+	platform := platforms.Only(p.platform)
 	var (
 	var (
 		schema1Converter *schema1.Converter
 		schema1Converter *schema1.Converter
 		handlers         []images.Handler
 		handlers         []images.Handler
@@ -412,7 +414,9 @@ func (p *puller) Snapshot(ctx context.Context) (cache.ImmutableRef, error) {
 		// Set any children labels for that content
 		// Set any children labels for that content
 		childrenHandler = images.SetChildrenLabels(p.is.ContentStore, childrenHandler)
 		childrenHandler = images.SetChildrenLabels(p.is.ContentStore, childrenHandler)
 		// Filter the children by the platform
 		// Filter the children by the platform
-		childrenHandler = images.FilterPlatforms(childrenHandler, platforms.Default())
+		childrenHandler = images.FilterPlatforms(childrenHandler, platform)
+		// Limit manifests pulled to the best match in an index
+		childrenHandler = images.LimitManifests(childrenHandler, platform, 1)
 
 
 		handlers = append(handlers,
 		handlers = append(handlers,
 			remotes.FetchHandler(p.is.ContentStore, fetcher),
 			remotes.FetchHandler(p.is.ContentStore, fetcher),
@@ -420,7 +424,7 @@ func (p *puller) Snapshot(ctx context.Context) (cache.ImmutableRef, error) {
 		)
 		)
 	}
 	}
 
 
-	if err := images.Dispatch(ctx, images.Handlers(handlers...), p.desc); err != nil {
+	if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, p.desc); err != nil {
 		stopProgress()
 		stopProgress()
 		return nil, err
 		return nil, err
 	}
 	}
@@ -433,12 +437,12 @@ func (p *puller) Snapshot(ctx context.Context) (cache.ImmutableRef, error) {
 		}
 		}
 	}
 	}
 
 
-	mfst, err := images.Manifest(ctx, p.is.ContentStore, p.desc, platforms.Default())
+	mfst, err := images.Manifest(ctx, p.is.ContentStore, p.desc, platform)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 
 
-	config, err := images.Config(ctx, p.is.ContentStore, p.desc, platforms.Default())
+	config, err := images.Config(ctx, p.is.ContentStore, p.desc, platform)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}

+ 163 - 0
builder/builder-next/adapters/localinlinecache/inlinecache.go

@@ -0,0 +1,163 @@
+package localinlinecache
+
+import (
+	"context"
+	"encoding/json"
+	"time"
+
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/images"
+	distreference "github.com/docker/distribution/reference"
+	imagestore "github.com/docker/docker/image"
+	"github.com/docker/docker/reference"
+	"github.com/moby/buildkit/cache/remotecache"
+	registryremotecache "github.com/moby/buildkit/cache/remotecache/registry"
+	v1 "github.com/moby/buildkit/cache/remotecache/v1"
+	"github.com/moby/buildkit/session"
+	"github.com/moby/buildkit/solver"
+	"github.com/moby/buildkit/util/resolver"
+	"github.com/moby/buildkit/worker"
+	digest "github.com/opencontainers/go-digest"
+	specs "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+)
+
+// ResolveCacheImporterFunc returns a resolver function for local inline cache
+func ResolveCacheImporterFunc(sm *session.Manager, resolverOpt resolver.ResolveOptionsFunc, rs reference.Store, is imagestore.Store) remotecache.ResolveCacheImporterFunc {
+
+	upstream := registryremotecache.ResolveCacheImporterFunc(sm, resolverOpt)
+
+	return func(ctx context.Context, attrs map[string]string) (remotecache.Importer, specs.Descriptor, error) {
+		if dt, err := tryImportLocal(rs, is, attrs["ref"]); err == nil {
+			return newLocalImporter(dt), specs.Descriptor{}, nil
+		}
+		return upstream(ctx, attrs)
+	}
+}
+
+func tryImportLocal(rs reference.Store, is imagestore.Store, refStr string) ([]byte, error) {
+	ref, err := distreference.ParseNormalizedNamed(refStr)
+	if err != nil {
+		return nil, err
+	}
+	dgst, err := rs.Get(ref)
+	if err != nil {
+		return nil, err
+	}
+	img, err := is.Get(imagestore.ID(dgst))
+	if err != nil {
+		return nil, err
+	}
+
+	return img.RawJSON(), nil
+}
+
+func newLocalImporter(dt []byte) remotecache.Importer {
+	return &localImporter{dt: dt}
+}
+
+type localImporter struct {
+	dt []byte
+}
+
+func (li *localImporter) Resolve(ctx context.Context, _ specs.Descriptor, id string, w worker.Worker) (solver.CacheManager, error) {
+	cc := v1.NewCacheChains()
+	if err := li.importInlineCache(ctx, li.dt, cc); err != nil {
+		return nil, err
+	}
+
+	keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cc, w)
+	if err != nil {
+		return nil, err
+	}
+	return solver.NewCacheManager(id, keysStorage, resultStorage), nil
+}
+
+func (li *localImporter) importInlineCache(ctx context.Context, dt []byte, cc solver.CacheExporterTarget) error {
+	var img image
+
+	if err := json.Unmarshal(dt, &img); err != nil {
+		return err
+	}
+
+	if img.Cache == nil {
+		return nil
+	}
+
+	var config v1.CacheConfig
+	if err := json.Unmarshal(img.Cache, &config.Records); err != nil {
+		return err
+	}
+
+	createdDates, createdMsg, err := parseCreatedLayerInfo(img)
+	if err != nil {
+		return err
+	}
+
+	layers := v1.DescriptorProvider{}
+	for i, diffID := range img.Rootfs.DiffIDs {
+		dgst := digest.Digest(diffID.String())
+		desc := specs.Descriptor{
+			Digest:      dgst,
+			Size:        -1,
+			MediaType:   images.MediaTypeDockerSchema2Layer,
+			Annotations: map[string]string{},
+		}
+		if createdAt := createdDates[i]; createdAt != "" {
+			desc.Annotations["buildkit/createdat"] = createdAt
+		}
+		if createdBy := createdMsg[i]; createdBy != "" {
+			desc.Annotations["buildkit/description"] = createdBy
+		}
+		desc.Annotations["containerd.io/uncompressed"] = img.Rootfs.DiffIDs[i].String()
+		layers[dgst] = v1.DescriptorProviderPair{
+			Descriptor: desc,
+			Provider:   &emptyProvider{},
+		}
+		config.Layers = append(config.Layers, v1.CacheLayer{
+			Blob:        dgst,
+			ParentIndex: i - 1,
+		})
+	}
+
+	return v1.ParseConfig(config, layers, cc)
+}
+
+type image struct {
+	Rootfs struct {
+		DiffIDs []digest.Digest `json:"diff_ids"`
+	} `json:"rootfs"`
+	Cache   []byte `json:"moby.buildkit.cache.v0"`
+	History []struct {
+		Created    *time.Time `json:"created,omitempty"`
+		CreatedBy  string     `json:"created_by,omitempty"`
+		EmptyLayer bool       `json:"empty_layer,omitempty"`
+	} `json:"history,omitempty"`
+}
+
+func parseCreatedLayerInfo(img image) ([]string, []string, error) {
+	dates := make([]string, 0, len(img.Rootfs.DiffIDs))
+	createdBy := make([]string, 0, len(img.Rootfs.DiffIDs))
+	for _, h := range img.History {
+		if !h.EmptyLayer {
+			str := ""
+			if h.Created != nil {
+				dt, err := h.Created.MarshalText()
+				if err != nil {
+					return nil, nil, err
+				}
+				str = string(dt)
+			}
+			dates = append(dates, str)
+			createdBy = append(createdBy, h.CreatedBy)
+		}
+	}
+	return dates, createdBy, nil
+}
+
+type emptyProvider struct {
+}
+
+func (p *emptyProvider) ReaderAt(ctx context.Context, dec specs.Descriptor) (content.ReaderAt, error) {
+	return nil, errors.Errorf("ReaderAt not implemented for empty provider")
+}

+ 11 - 1
builder/builder-next/adapters/snapshot/layer.go

@@ -12,12 +12,22 @@ import (
 	"golang.org/x/sync/errgroup"
 	"golang.org/x/sync/errgroup"
 )
 )
 
 
-func (s *snapshotter) EnsureLayer(ctx context.Context, key string) ([]layer.DiffID, error) {
+func (s *snapshotter) GetDiffIDs(ctx context.Context, key string) ([]layer.DiffID, error) {
 	if l, err := s.getLayer(key, true); err != nil {
 	if l, err := s.getLayer(key, true); err != nil {
 		return nil, err
 		return nil, err
 	} else if l != nil {
 	} else if l != nil {
 		return getDiffChain(l), nil
 		return getDiffChain(l), nil
 	}
 	}
+	return nil, nil
+}
+
+func (s *snapshotter) EnsureLayer(ctx context.Context, key string) ([]layer.DiffID, error) {
+	diffIDs, err := s.GetDiffIDs(ctx, key)
+	if err != nil {
+		return nil, err
+	} else if diffIDs != nil {
+		return diffIDs, nil
+	}
 
 
 	id, committed := s.getGraphDriverID(key)
 	id, committed := s.getGraphDriverID(key)
 	if !committed {
 	if !committed {

+ 12 - 0
builder/builder-next/builder.go

@@ -5,6 +5,7 @@ import (
 	"fmt"
 	"fmt"
 	"io"
 	"io"
 	"net"
 	"net"
+	"strconv"
 	"strings"
 	"strings"
 	"sync"
 	"sync"
 	"time"
 	"time"
@@ -318,6 +319,16 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
 		exporterAttrs["name"] = strings.Join(opt.Options.Tags, ",")
 		exporterAttrs["name"] = strings.Join(opt.Options.Tags, ",")
 	}
 	}
 
 
+	cache := controlapi.CacheOptions{}
+
+	if inlineCache := opt.Options.BuildArgs["BUILDKIT_INLINE_CACHE"]; inlineCache != nil {
+		if b, err := strconv.ParseBool(*inlineCache); err == nil && b {
+			cache.Exports = append(cache.Exports, &controlapi.CacheOptionsEntry{
+				Type: "inline",
+			})
+		}
+	}
+
 	req := &controlapi.SolveRequest{
 	req := &controlapi.SolveRequest{
 		Ref:           id,
 		Ref:           id,
 		Exporter:      "moby",
 		Exporter:      "moby",
@@ -325,6 +336,7 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
 		Frontend:      "dockerfile.v0",
 		Frontend:      "dockerfile.v0",
 		FrontendAttrs: frontendAttrs,
 		FrontendAttrs: frontendAttrs,
 		Session:       opt.Options.SessionID,
 		Session:       opt.Options.SessionID,
+		Cache:         cache,
 	}
 	}
 
 
 	if opt.Options.NetworkMode == "host" {
 	if opt.Options.NetworkMode == "host" {

+ 42 - 14
builder/builder-next/controller.go

@@ -6,8 +6,10 @@ import (
 	"path/filepath"
 	"path/filepath"
 
 
 	"github.com/containerd/containerd/content/local"
 	"github.com/containerd/containerd/content/local"
+	"github.com/containerd/containerd/platforms"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/builder/builder-next/adapters/containerimage"
 	"github.com/docker/docker/builder/builder-next/adapters/containerimage"
+	"github.com/docker/docker/builder/builder-next/adapters/localinlinecache"
 	"github.com/docker/docker/builder/builder-next/adapters/snapshot"
 	"github.com/docker/docker/builder/builder-next/adapters/snapshot"
 	containerimageexp "github.com/docker/docker/builder/builder-next/exporter"
 	containerimageexp "github.com/docker/docker/builder/builder-next/exporter"
 	"github.com/docker/docker/builder/builder-next/imagerefchecker"
 	"github.com/docker/docker/builder/builder-next/imagerefchecker"
@@ -17,17 +19,19 @@ import (
 	units "github.com/docker/go-units"
 	units "github.com/docker/go-units"
 	"github.com/moby/buildkit/cache"
 	"github.com/moby/buildkit/cache"
 	"github.com/moby/buildkit/cache/metadata"
 	"github.com/moby/buildkit/cache/metadata"
-	registryremotecache "github.com/moby/buildkit/cache/remotecache/registry"
+	"github.com/moby/buildkit/cache/remotecache"
+	inlineremotecache "github.com/moby/buildkit/cache/remotecache/inline"
 	"github.com/moby/buildkit/client"
 	"github.com/moby/buildkit/client"
 	"github.com/moby/buildkit/control"
 	"github.com/moby/buildkit/control"
-	"github.com/moby/buildkit/exporter"
 	"github.com/moby/buildkit/frontend"
 	"github.com/moby/buildkit/frontend"
 	dockerfile "github.com/moby/buildkit/frontend/dockerfile/builder"
 	dockerfile "github.com/moby/buildkit/frontend/dockerfile/builder"
 	"github.com/moby/buildkit/frontend/gateway"
 	"github.com/moby/buildkit/frontend/gateway"
 	"github.com/moby/buildkit/frontend/gateway/forwarder"
 	"github.com/moby/buildkit/frontend/gateway/forwarder"
 	"github.com/moby/buildkit/snapshot/blobmapping"
 	"github.com/moby/buildkit/snapshot/blobmapping"
 	"github.com/moby/buildkit/solver/bboltcachestorage"
 	"github.com/moby/buildkit/solver/bboltcachestorage"
+	"github.com/moby/buildkit/util/binfmt_misc"
 	"github.com/moby/buildkit/worker"
 	"github.com/moby/buildkit/worker"
+	specs "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 )
 )
 
 
@@ -94,7 +98,6 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
 	}
 	}
 
 
 	src, err := containerimage.NewSource(containerimage.SourceOpt{
 	src, err := containerimage.NewSource(containerimage.SourceOpt{
-		SessionManager:  opt.SessionManager,
 		CacheAccessor:   cm,
 		CacheAccessor:   cm,
 		ContentStore:    store,
 		ContentStore:    store,
 		DownloadManager: dist.DownloadManager,
 		DownloadManager: dist.DownloadManager,
@@ -136,9 +139,18 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
 		return nil, errors.Wrap(err, "could not get builder GC policy")
 		return nil, errors.Wrap(err, "could not get builder GC policy")
 	}
 	}
 
 
+	layers, ok := sbase.(mobyworker.LayerAccess)
+	if !ok {
+		return nil, errors.Errorf("snapshotter doesn't support differ")
+	}
+
+	p, err := parsePlatforms(binfmt_misc.SupportedPlatforms())
+	if err != nil {
+		return nil, err
+	}
+
 	wopt := mobyworker.Opt{
 	wopt := mobyworker.Opt{
 		ID:                "moby",
 		ID:                "moby",
-		SessionManager:    opt.SessionManager,
 		MetadataStore:     md,
 		MetadataStore:     md,
 		ContentStore:      store,
 		ContentStore:      store,
 		CacheManager:      cm,
 		CacheManager:      cm,
@@ -148,10 +160,10 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
 		ImageSource:       src,
 		ImageSource:       src,
 		DownloadManager:   dist.DownloadManager,
 		DownloadManager:   dist.DownloadManager,
 		V2MetadataService: dist.V2MetadataService,
 		V2MetadataService: dist.V2MetadataService,
-		Exporters: map[string]exporter.Exporter{
-			"moby": exp,
-		},
-		Transport: rt,
+		Exporter:          exp,
+		Transport:         rt,
+		Layers:            layers,
+		Platforms:         p,
 	}
 	}
 
 
 	wc := &worker.Controller{}
 	wc := &worker.Controller{}
@@ -167,12 +179,16 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
 	}
 	}
 
 
 	return control.NewController(control.Opt{
 	return control.NewController(control.Opt{
-		SessionManager:           opt.SessionManager,
-		WorkerController:         wc,
-		Frontends:                frontends,
-		CacheKeyStorage:          cacheStorage,
-		ResolveCacheImporterFunc: registryremotecache.ResolveCacheImporterFunc(opt.SessionManager, opt.ResolverOpt),
-		// TODO: set ResolveCacheExporterFunc for exporting cache
+		SessionManager:   opt.SessionManager,
+		WorkerController: wc,
+		Frontends:        frontends,
+		CacheKeyStorage:  cacheStorage,
+		ResolveCacheImporterFuncs: map[string]remotecache.ResolveCacheImporterFunc{
+			"registry": localinlinecache.ResolveCacheImporterFunc(opt.SessionManager, opt.ResolverOpt, dist.ReferenceStore, dist.ImageStore),
+		},
+		ResolveCacheExporterFuncs: map[string]remotecache.ResolveCacheExporterFunc{
+			"inline": inlineremotecache.ResolveCacheExporterFunc(),
+		},
 	})
 	})
 }
 }
 
 
@@ -216,3 +232,15 @@ func getGCPolicy(conf config.BuilderConfig, root string) ([]client.PruneInfo, er
 	}
 	}
 	return gcPolicy, nil
 	return gcPolicy, nil
 }
 }
+
+func parsePlatforms(platformsStr []string) ([]specs.Platform, error) {
+	out := make([]specs.Platform, 0, len(platformsStr))
+	for _, s := range platformsStr {
+		p, err := platforms.Parse(s)
+		if err != nil {
+			return nil, err
+		}
+		out = append(out, platforms.Normalize(p))
+	}
+	return out, nil
+}

+ 1 - 1
builder/builder-next/exporter/export.go

@@ -148,7 +148,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source)
 
 
 	diffs, history = normalizeLayersAndHistory(diffs, history, ref)
 	diffs, history = normalizeLayersAndHistory(diffs, history, ref)
 
 
-	config, err = patchImageConfig(config, diffs, history)
+	config, err = patchImageConfig(config, diffs, history, inp.Metadata[exptypes.ExporterInlineCache])
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}

+ 9 - 1
builder/builder-next/exporter/writer.go

@@ -41,7 +41,7 @@ func parseHistoryFromConfig(dt []byte) ([]ocispec.History, error) {
 	return config.History, nil
 	return config.History, nil
 }
 }
 
 
-func patchImageConfig(dt []byte, dps []digest.Digest, history []ocispec.History) ([]byte, error) {
+func patchImageConfig(dt []byte, dps []digest.Digest, history []ocispec.History, cache []byte) ([]byte, error) {
 	m := map[string]json.RawMessage{}
 	m := map[string]json.RawMessage{}
 	if err := json.Unmarshal(dt, &m); err != nil {
 	if err := json.Unmarshal(dt, &m); err != nil {
 		return nil, errors.Wrap(err, "failed to parse image config for patch")
 		return nil, errors.Wrap(err, "failed to parse image config for patch")
@@ -77,6 +77,14 @@ func patchImageConfig(dt []byte, dps []digest.Digest, history []ocispec.History)
 		m["created"] = dt
 		m["created"] = dt
 	}
 	}
 
 
+	if cache != nil {
+		dt, err := json.Marshal(cache)
+		if err != nil {
+			return nil, err
+		}
+		m["moby.buildkit.cache.v0"] = dt
+	}
+
 	dt, err = json.Marshal(m)
 	dt, err = json.Marshal(m)
 	return dt, errors.Wrap(err, "failed to marshal config after patch")
 	return dt, errors.Wrap(err, "failed to marshal config after patch")
 }
 }

+ 96 - 22
builder/builder-next/worker/worker.go

@@ -10,6 +10,7 @@ import (
 	"time"
 	"time"
 
 
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/images"
 	"github.com/containerd/containerd/platforms"
 	"github.com/containerd/containerd/platforms"
 	"github.com/containerd/containerd/rootfs"
 	"github.com/containerd/containerd/rootfs"
 	"github.com/docker/docker/distribution"
 	"github.com/docker/docker/distribution"
@@ -23,6 +24,7 @@ import (
 	"github.com/moby/buildkit/client"
 	"github.com/moby/buildkit/client"
 	"github.com/moby/buildkit/executor"
 	"github.com/moby/buildkit/executor"
 	"github.com/moby/buildkit/exporter"
 	"github.com/moby/buildkit/exporter"
+	localexporter "github.com/moby/buildkit/exporter/local"
 	"github.com/moby/buildkit/frontend"
 	"github.com/moby/buildkit/frontend"
 	gw "github.com/moby/buildkit/frontend/gateway/client"
 	gw "github.com/moby/buildkit/frontend/gateway/client"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/session"
@@ -42,22 +44,31 @@ import (
 	"github.com/sirupsen/logrus"
 	"github.com/sirupsen/logrus"
 )
 )
 
 
+const labelCreatedAt = "buildkit/createdat"
+
+// LayerAccess provides access to a moby layer from a snapshot
+type LayerAccess interface {
+	GetDiffIDs(ctx context.Context, key string) ([]layer.DiffID, error)
+	EnsureLayer(ctx context.Context, key string) ([]layer.DiffID, error)
+}
+
 // Opt defines a structure for creating a worker.
 // Opt defines a structure for creating a worker.
 type Opt struct {
 type Opt struct {
 	ID                string
 	ID                string
 	Labels            map[string]string
 	Labels            map[string]string
 	GCPolicy          []client.PruneInfo
 	GCPolicy          []client.PruneInfo
-	SessionManager    *session.Manager
 	MetadataStore     *metadata.Store
 	MetadataStore     *metadata.Store
 	Executor          executor.Executor
 	Executor          executor.Executor
 	Snapshotter       snapshot.Snapshotter
 	Snapshotter       snapshot.Snapshotter
 	ContentStore      content.Store
 	ContentStore      content.Store
 	CacheManager      cache.Manager
 	CacheManager      cache.Manager
 	ImageSource       source.Source
 	ImageSource       source.Source
-	Exporters         map[string]exporter.Exporter
 	DownloadManager   distribution.RootFSDownloadManager
 	DownloadManager   distribution.RootFSDownloadManager
 	V2MetadataService distmetadata.V2MetadataService
 	V2MetadataService distmetadata.V2MetadataService
 	Transport         nethttp.RoundTripper
 	Transport         nethttp.RoundTripper
+	Exporter          exporter.Exporter
+	Layers            LayerAccess
+	Platforms         []ocispec.Platform
 }
 }
 
 
 // Worker is a local worker instance with dedicated snapshotter, cache, and so on.
 // Worker is a local worker instance with dedicated snapshotter, cache, and so on.
@@ -99,9 +110,8 @@ func NewWorker(opt Opt) (*Worker, error) {
 	}
 	}
 
 
 	ss, err := local.NewSource(local.Opt{
 	ss, err := local.NewSource(local.Opt{
-		SessionManager: opt.SessionManager,
-		CacheAccessor:  cm,
-		MetadataStore:  opt.MetadataStore,
+		CacheAccessor: cm,
+		MetadataStore: opt.MetadataStore,
 	})
 	})
 	if err == nil {
 	if err == nil {
 		sm.Register(ss)
 		sm.Register(ss)
@@ -127,8 +137,10 @@ func (w *Worker) Labels() map[string]string {
 
 
 // Platforms returns one or more platforms supported by the image.
 // Platforms returns one or more platforms supported by the image.
 func (w *Worker) Platforms() []ocispec.Platform {
 func (w *Worker) Platforms() []ocispec.Platform {
-	// does not handle lcow
-	return []ocispec.Platform{platforms.DefaultSpec()}
+	if len(w.Opt.Platforms) == 0 {
+		return []ocispec.Platform{platforms.DefaultSpec()}
+	}
+	return w.Opt.Platforms
 }
 }
 
 
 // GCPolicy returns automatic GC Policy
 // GCPolicy returns automatic GC Policy
@@ -146,13 +158,13 @@ func (w *Worker) LoadRef(id string, hidden bool) (cache.ImmutableRef, error) {
 }
 }
 
 
 // ResolveOp converts a LLB vertex into a LLB operation
 // ResolveOp converts a LLB vertex into a LLB operation
-func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge) (solver.Op, error) {
+func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *session.Manager) (solver.Op, error) {
 	if baseOp, ok := v.Sys().(*pb.Op); ok {
 	if baseOp, ok := v.Sys().(*pb.Op); ok {
 		switch op := baseOp.Op.(type) {
 		switch op := baseOp.Op.(type) {
 		case *pb.Op_Source:
 		case *pb.Op_Source:
-			return ops.NewSourceOp(v, op, baseOp.Platform, w.SourceManager, w)
+			return ops.NewSourceOp(v, op, baseOp.Platform, w.SourceManager, sm, w)
 		case *pb.Op_Exec:
 		case *pb.Op_Exec:
-			return ops.NewExecOp(v, op, w.CacheManager, w.Opt.SessionManager, w.MetadataStore, w.Executor, w)
+			return ops.NewExecOp(v, op, baseOp.Platform, w.CacheManager, sm, w.MetadataStore, w.Executor, w)
 		case *pb.Op_Build:
 		case *pb.Op_Build:
 			return ops.NewBuildOp(v, op, s, w)
 			return ops.NewBuildOp(v, op, s, w)
 		}
 		}
@@ -161,13 +173,13 @@ func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge) (solve
 }
 }
 
 
 // ResolveImageConfig returns image config for an image
 // ResolveImageConfig returns image config for an image
-func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt) (digest.Digest, []byte, error) {
+func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt, sm *session.Manager) (digest.Digest, []byte, error) {
 	// ImageSource is typically source/containerimage
 	// ImageSource is typically source/containerimage
 	resolveImageConfig, ok := w.ImageSource.(resolveImageConfig)
 	resolveImageConfig, ok := w.ImageSource.(resolveImageConfig)
 	if !ok {
 	if !ok {
 		return "", nil, errors.Errorf("worker %q does not implement ResolveImageConfig", w.ID())
 		return "", nil, errors.Errorf("worker %q does not implement ResolveImageConfig", w.ID())
 	}
 	}
-	return resolveImageConfig.ResolveImageConfig(ctx, ref, opt)
+	return resolveImageConfig.ResolveImageConfig(ctx, ref, opt, sm)
 }
 }
 
 
 // Exec executes a process directly on a worker
 // Exec executes a process directly on a worker
@@ -191,17 +203,51 @@ func (w *Worker) Prune(ctx context.Context, ch chan client.UsageInfo, info ...cl
 }
 }
 
 
 // Exporter returns exporter by name
 // Exporter returns exporter by name
-func (w *Worker) Exporter(name string) (exporter.Exporter, error) {
-	exp, ok := w.Exporters[name]
-	if !ok {
+func (w *Worker) Exporter(name string, sm *session.Manager) (exporter.Exporter, error) {
+	switch name {
+	case "moby":
+		return w.Opt.Exporter, nil
+	case client.ExporterLocal:
+		return localexporter.New(localexporter.Opt{
+			SessionManager: sm,
+		})
+	default:
 		return nil, errors.Errorf("exporter %q could not be found", name)
 		return nil, errors.Errorf("exporter %q could not be found", name)
 	}
 	}
-	return exp, nil
 }
 }
 
 
 // GetRemote returns a remote snapshot reference for a local one
 // GetRemote returns a remote snapshot reference for a local one
 func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef, createIfNeeded bool) (*solver.Remote, error) {
 func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef, createIfNeeded bool) (*solver.Remote, error) {
-	return nil, errors.Errorf("getremote not implemented")
+	var diffIDs []layer.DiffID
+	var err error
+	if !createIfNeeded {
+		diffIDs, err = w.Layers.GetDiffIDs(ctx, ref.ID())
+		if err != nil {
+			return nil, err
+		}
+	} else {
+		if err := ref.Finalize(ctx, true); err != nil {
+			return nil, err
+		}
+		diffIDs, err = w.Layers.EnsureLayer(ctx, ref.ID())
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	descriptors := make([]ocispec.Descriptor, len(diffIDs))
+	for i, dgst := range diffIDs {
+		descriptors[i] = ocispec.Descriptor{
+			MediaType: images.MediaTypeDockerSchema2Layer,
+			Digest:    digest.Digest(dgst),
+			Size:      -1,
+		}
+	}
+
+	return &solver.Remote{
+		Descriptors: descriptors,
+		Provider:    &emptyProvider{},
+	}, nil
 }
 }
 
 
 // FromRemote converts a remote snapshot reference to a local one
 // FromRemote converts a remote snapshot reference to a local one
@@ -237,11 +283,32 @@ func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (cache.I
 	}
 	}
 	defer release()
 	defer release()
 
 
-	ref, err := w.CacheManager.GetFromSnapshotter(ctx, string(rootFS.ChainID()), cache.WithDescription(fmt.Sprintf("imported %s", remote.Descriptors[len(remote.Descriptors)-1].Digest)))
-	if err != nil {
-		return nil, err
+	if len(rootFS.DiffIDs) != len(layers) {
+		return nil, errors.Errorf("invalid layer count mismatch %d vs %d", len(rootFS.DiffIDs), len(layers))
 	}
 	}
-	return ref, nil
+
+	for i := range rootFS.DiffIDs {
+		tm := time.Now()
+		if tmstr, ok := remote.Descriptors[i].Annotations[labelCreatedAt]; ok {
+			if err := (&tm).UnmarshalText([]byte(tmstr)); err != nil {
+				return nil, err
+			}
+		}
+		descr := fmt.Sprintf("imported %s", remote.Descriptors[i].Digest)
+		if v, ok := remote.Descriptors[i].Annotations["buildkit/description"]; ok {
+			descr = v
+		}
+		ref, err := w.CacheManager.GetFromSnapshotter(ctx, string(layer.CreateChainID(rootFS.DiffIDs[:i+1])), cache.WithDescription(descr), cache.WithCreationTime(tm))
+		if err != nil {
+			return nil, err
+		}
+		if i == len(remote.Descriptors)-1 {
+			return ref, nil
+		}
+		defer ref.Release(context.TODO())
+	}
+
+	return nil, errors.Errorf("unreachable")
 }
 }
 
 
 type discardProgress struct{}
 type discardProgress struct{}
@@ -338,5 +405,12 @@ func oneOffProgress(ctx context.Context, id string) func(err error) error {
 }
 }
 
 
 type resolveImageConfig interface {
 type resolveImageConfig interface {
-	ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt) (digest.Digest, []byte, error)
+	ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt, sm *session.Manager) (digest.Digest, []byte, error)
+}
+
+type emptyProvider struct {
+}
+
+func (p *emptyProvider) ReaderAt(ctx context.Context, dec ocispec.Descriptor) (content.ReaderAt, error) {
+	return nil, errors.Errorf("ReaderAt not implemented for empty provider")
 }
 }

+ 8 - 7
vendor.conf

@@ -14,7 +14,7 @@ github.com/sirupsen/logrus v1.0.6
 github.com/tchap/go-patricia v2.2.6
 github.com/tchap/go-patricia v2.2.6
 github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3
 github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3
 golang.org/x/net a680a1efc54dd51c040b3b5ce4939ea3cf2ea0d1
 golang.org/x/net a680a1efc54dd51c040b3b5ce4939ea3cf2ea0d1
-golang.org/x/sys 41f3e6584952bb034a481797859f6ab34b6803bd
+golang.org/x/sys d455e41777fca6e8a5a79e34a14b8368bc11d9ba
 github.com/docker/go-units 47565b4f722fb6ceae66b95f853feed578a4a51c # v0.3.3
 github.com/docker/go-units 47565b4f722fb6ceae66b95f853feed578a4a51c # v0.3.3
 github.com/docker/go-connections 7395e3f8aa162843a74ed6d48e79627d9792ac55 # v0.4.0
 github.com/docker/go-connections 7395e3f8aa162843a74ed6d48e79627d9792ac55 # v0.4.0
 golang.org/x/text f21a4dfb5e38f5895301dc265a8def02365cc3d0 # v0.3.0
 golang.org/x/text f21a4dfb5e38f5895301dc265a8def02365cc3d0 # v0.3.0
@@ -26,13 +26,14 @@ github.com/imdario/mergo v0.3.6
 golang.org/x/sync 1d60e4601c6fd243af51cc01ddf169918a5407ca
 golang.org/x/sync 1d60e4601c6fd243af51cc01ddf169918a5407ca
 
 
 # buildkit
 # buildkit
-github.com/moby/buildkit 34ff9c2366a878ada7938d2f9ede71741b0a220c
-github.com/tonistiigi/fsutil 2862f6bc5ac9b97124e552a5c108230b38a1b0ca
+github.com/moby/buildkit c35410878ab9070498c66f6c67d3e8bc3b92241f
+github.com/tonistiigi/fsutil 1ec1983587cde7e8ac2978e354ff5360af622464
 github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
 github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
 github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7
 github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7
 github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716
 github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716
 github.com/opentracing-contrib/go-stdlib  b1a47cfbdd7543e70e9ef3e73d0802ad306cc1cc
 github.com/opentracing-contrib/go-stdlib  b1a47cfbdd7543e70e9ef3e73d0802ad306cc1cc
 github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b
 github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b
+github.com/gofrs/flock 7f43ea2e6a643ad441fc12d0ecc0d3388b300c53 # v0.7.0
 
 
 #get libnetwork packages
 #get libnetwork packages
 
 
@@ -88,7 +89,7 @@ github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
 github.com/coreos/go-systemd v17
 github.com/coreos/go-systemd v17
 github.com/godbus/dbus v4.0.0
 github.com/godbus/dbus v4.0.0
 github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852
 github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852
-github.com/golang/protobuf v1.1.0
+github.com/golang/protobuf v1.2.0
 
 
 # gelf logging driver deps
 # gelf logging driver deps
 github.com/Graylog2/go-gelf 4143646226541087117ff2f83334ea48b3201841
 github.com/Graylog2/go-gelf 4143646226541087117ff2f83334ea48b3201841
@@ -118,19 +119,19 @@ github.com/googleapis/gax-go v2.0.0
 google.golang.org/genproto 694d95ba50e67b2e363f3483057db5d4910c18f9
 google.golang.org/genproto 694d95ba50e67b2e363f3483057db5d4910c18f9
 
 
 # containerd
 # containerd
-github.com/containerd/containerd bb71b10fd8f58240ca47fbb579b9d1028eea7c84 # v1.2.5
+github.com/containerd/containerd a15b6e2097c48b632dbdc63254bad4c62b69e709
 github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
 github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
 github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d
 github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d
 github.com/containerd/cgroups dbea6f2bd41658b84b00417ceefa416b979cbf10
 github.com/containerd/cgroups dbea6f2bd41658b84b00417ceefa416b979cbf10
 github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23
 github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23
 github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3
 github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3
 github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40
 github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40
-github.com/containerd/ttrpc 2a805f71863501300ae1976d29f0454ae003e85a
+github.com/containerd/ttrpc f02858b1457c5ca3aaec3a0803eb0d59f96e41d6
 github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef
 github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef
 
 
 # cluster
 # cluster
 github.com/docker/swarmkit 415dc72789e2b733ea884f09188c286ca187d8ec
 github.com/docker/swarmkit 415dc72789e2b733ea884f09188c286ca187d8ec
-github.com/gogo/protobuf v1.0.0
+github.com/gogo/protobuf v1.2.0
 github.com/cloudflare/cfssl 1.3.2
 github.com/cloudflare/cfssl 1.3.2
 github.com/fernet/fernet-go 1b2437bc582b3cfbb341ee5a29f8ef5b42912ff2
 github.com/fernet/fernet-go 1b2437bc582b3cfbb341ee5a29f8ef5b42912ff2
 github.com/google/certificate-transparency-go v1.0.20
 github.com/google/certificate-transparency-go v1.0.20

+ 13 - 10
vendor/github.com/containerd/containerd/README.md

@@ -172,11 +172,9 @@ checkpoint, err := task.Checkpoint(context)
 err := client.Push(context, "myregistry/checkpoints/redis:master", checkpoint)
 err := client.Push(context, "myregistry/checkpoints/redis:master", checkpoint)
 
 
 // on a new machine pull the checkpoint and restore the redis container
 // on a new machine pull the checkpoint and restore the redis container
-image, err := client.Pull(context, "myregistry/checkpoints/redis:master")
+checkpoint, err := client.Pull(context, "myregistry/checkpoints/redis:master")
 
 
-checkpoint := image.Target()
-
-redis, err = client.NewContainer(context, "redis-master", containerd.WithCheckpoint(checkpoint, "redis-rootfs"))
+redis, err = client.NewContainer(context, "redis-master", containerd.WithNewSnapshot("redis-rootfs", checkpoint))
 defer container.Delete(context)
 defer container.Delete(context)
 
 
 task, err = redis.NewTask(context, cio.Stdio, containerd.WithTaskCheckpoint(checkpoint))
 task, err = redis.NewTask(context, cio.Stdio, containerd.WithTaskCheckpoint(checkpoint))
@@ -212,11 +210,6 @@ See [PLUGINS.md](PLUGINS.md) for how to create plugins
 Please see [RELEASES.md](RELEASES.md) for details on versioning and stability
 Please see [RELEASES.md](RELEASES.md) for details on versioning and stability
 of containerd components.
 of containerd components.
 
 
-### Development reports.
-
-Weekly summary on the progress and what is being worked on.
-https://github.com/containerd/containerd/tree/master/reports
-
 ### Communication
 ### Communication
 
 
 For async communication and long running discussions please use issues and pull requests on the github repo.
 For async communication and long running discussions please use issues and pull requests on the github repo.
@@ -224,7 +217,12 @@ This will be the best place to discuss design and implementation.
 
 
 For sync communication we have a community slack with a #containerd channel that everyone is welcome to join and chat about development.
 For sync communication we have a community slack with a #containerd channel that everyone is welcome to join and chat about development.
 
 
-**Slack:** https://join.slack.com/t/dockercommunity/shared_invite/enQtNDM4NjAwNDMyOTUwLWZlMDZmYWRjZjk4Zjc5ZGQ5NWZkOWI1Yjk2NGE3ZWVlYjYxM2VhYjczOWIyZDFhZTE3NTUwZWQzMjhmNGYyZTg
+**Slack:** Catch us in the #containerd and #containerd-dev channels on dockercommunity.slack.com.
+[Click here for an invite to docker community slack.](https://join.slack.com/t/dockercommunity/shared_invite/enQtNDY4MDc1Mzc0MzIwLTgxZDBlMmM4ZGEyNDc1N2FkMzlhODJkYmE1YTVkYjM1MDE3ZjAwZjBkOGFlOTJkZjRmZGYzNjYyY2M3ZTUxYzQ)
+
+### Security audit
+
+A third party security audit was performed by Cure53 in 4Q2018; the [full report](docs/SECURITY_AUDIT.pdf) is available in our docs/ directory.
 
 
 ### Reporting security issues
 ### Reporting security issues
 
 
@@ -249,3 +247,8 @@ Please find all these core project documents, including the:
  * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
  * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
 
 
 information in our [`containerd/project`](https://github.com/containerd/project) repository.
 information in our [`containerd/project`](https://github.com/containerd/project) repository.
+
+## Adoption
+
+Interested to see who is using containerd? Are you using containerd in a project?
+Please add yourself via pull request to our [ADOPTERS.md](./ADOPTERS.md) file.

+ 86 - 40
vendor/github.com/containerd/containerd/api/events/task.pb.go

@@ -55,6 +55,9 @@ type TaskDelete struct {
 	Pid         uint32    `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
 	Pid         uint32    `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
 	ExitStatus  uint32    `protobuf:"varint,3,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
 	ExitStatus  uint32    `protobuf:"varint,3,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
 	ExitedAt    time.Time `protobuf:"bytes,4,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"`
 	ExitedAt    time.Time `protobuf:"bytes,4,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"`
+	// id is the specific exec. By default if omitted will be `""` thus matches
+	// the init exec of the task matching `container_id`.
+	ID string `protobuf:"bytes,5,opt,name=id,proto3" json:"id,omitempty"`
 }
 }
 
 
 func (m *TaskDelete) Reset()                    { *m = TaskDelete{} }
 func (m *TaskDelete) Reset()                    { *m = TaskDelete{} }
@@ -210,6 +213,8 @@ func (m *TaskDelete) Field(fieldpath []string) (string, bool) {
 	// unhandled: exited_at
 	// unhandled: exited_at
 	case "container_id":
 	case "container_id":
 		return string(m.ContainerID), len(m.ContainerID) > 0
 		return string(m.ContainerID), len(m.ContainerID) > 0
+	case "id":
+		return string(m.ID), len(m.ID) > 0
 	}
 	}
 	return "", false
 	return "", false
 }
 }
@@ -474,6 +479,12 @@ func (m *TaskDelete) MarshalTo(dAtA []byte) (int, error) {
 		return 0, err
 		return 0, err
 	}
 	}
 	i += n2
 	i += n2
+	if len(m.ID) > 0 {
+		dAtA[i] = 0x2a
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(len(m.ID)))
+		i += copy(dAtA[i:], m.ID)
+	}
 	return i, nil
 	return i, nil
 }
 }
 
 
@@ -806,6 +817,10 @@ func (m *TaskDelete) Size() (n int) {
 	}
 	}
 	l = types.SizeOfStdTime(m.ExitedAt)
 	l = types.SizeOfStdTime(m.ExitedAt)
 	n += 1 + l + sovTask(uint64(l))
 	n += 1 + l + sovTask(uint64(l))
+	l = len(m.ID)
+	if l > 0 {
+		n += 1 + l + sovTask(uint64(l))
+	}
 	return n
 	return n
 }
 }
 
 
@@ -975,6 +990,7 @@ func (this *TaskDelete) String() string {
 		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
 		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
 		`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
 		`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
 		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`,
 		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`,
+		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
 		`}`,
 		`}`,
 	}, "")
 	}, "")
 	return s
 	return s
@@ -1522,6 +1538,35 @@ func (m *TaskDelete) Unmarshal(dAtA []byte) error {
 				return err
 				return err
 			}
 			}
 			iNdEx = postIndex
 			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
 		default:
 		default:
 			iNdEx = preIndex
 			iNdEx = preIndex
 			skippy, err := skipTask(dAtA[iNdEx:])
 			skippy, err := skipTask(dAtA[iNdEx:])
@@ -2566,45 +2611,46 @@ func init() {
 }
 }
 
 
 var fileDescriptorTask = []byte{
 var fileDescriptorTask = []byte{
-	// 637 bytes of a gzipped FileDescriptorProto
+	// 644 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x95, 0xcd, 0x6e, 0xd3, 0x40,
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x95, 0xcd, 0x6e, 0xd3, 0x40,
-	0x10, 0xc7, 0x63, 0xa7, 0x75, 0x93, 0x09, 0x55, 0x8b, 0x55, 0x41, 0xc8, 0xc1, 0x8e, 0xcc, 0x25,
-	0x27, 0x5b, 0x04, 0x89, 0x0b, 0x42, 0x6a, 0xd2, 0x70, 0xc8, 0xa1, 0x4a, 0x71, 0x7b, 0xa8, 0xb8,
-	0x44, 0x4e, 0x76, 0x93, 0x2c, 0x8d, 0xbd, 0x96, 0x3d, 0x46, 0x45, 0xe2, 0xc0, 0x23, 0xf0, 0x08,
-	0x3c, 0x05, 0xcf, 0xd0, 0x03, 0x07, 0x8e, 0x9c, 0x02, 0xf5, 0x03, 0x70, 0xe2, 0x01, 0xd0, 0x7a,
-	0x1d, 0xb7, 0x50, 0xf1, 0x65, 0x89, 0x53, 0x76, 0x66, 0x67, 0xff, 0x33, 0xf3, 0xdb, 0xc9, 0x1a,
-	0x1e, 0xcd, 0x19, 0x2e, 0x92, 0x89, 0x3d, 0xe5, 0xbe, 0x33, 0xe5, 0x01, 0x7a, 0x2c, 0xa0, 0x11,
-	0xb9, 0xbe, 0xf4, 0x42, 0xe6, 0xd0, 0x97, 0x34, 0xc0, 0xd8, 0x41, 0x2f, 0x3e, 0xb3, 0xc3, 0x88,
-	0x23, 0xd7, 0x6f, 0x5f, 0x45, 0xd8, 0x72, 0xb7, 0xb5, 0x37, 0xe7, 0x73, 0x9e, 0xed, 0x3a, 0x62,
-	0x25, 0x03, 0x5b, 0xe6, 0x9c, 0xf3, 0xf9, 0x92, 0x3a, 0x99, 0x35, 0x49, 0x66, 0x0e, 0x32, 0x9f,
-	0xc6, 0xe8, 0xf9, 0x61, 0x1e, 0xf0, 0x77, 0x15, 0xe0, 0xab, 0x90, 0xc6, 0x8e, 0xcf, 0x93, 0x00,
-	0xf3, 0x73, 0xfb, 0x7f, 0x3c, 0x57, 0xa4, 0x0c, 0x97, 0xc9, 0x9c, 0x05, 0xce, 0x8c, 0xd1, 0x25,
-	0x09, 0x3d, 0x5c, 0x48, 0x05, 0xeb, 0xab, 0x02, 0x70, 0xe2, 0xc5, 0x67, 0x07, 0x11, 0xf5, 0x90,
-	0xea, 0x5d, 0xb8, 0x55, 0x1c, 0x1e, 0x33, 0xd2, 0x54, 0xda, 0x4a, 0xa7, 0xde, 0xdf, 0x49, 0x57,
-	0x66, 0xe3, 0x60, 0xed, 0x1f, 0x0e, 0xdc, 0x46, 0x11, 0x34, 0x24, 0xfa, 0x1d, 0xd0, 0x26, 0x49,
-	0x40, 0x96, 0xb4, 0xa9, 0x8a, 0x68, 0x37, 0xb7, 0x74, 0x07, 0xb4, 0x88, 0x73, 0x9c, 0xc5, 0xcd,
-	0x6a, 0xbb, 0xda, 0x69, 0x74, 0xef, 0xda, 0xd7, 0x78, 0x65, 0xbd, 0xd8, 0x87, 0xa2, 0x17, 0x37,
-	0x0f, 0xd3, 0x1f, 0x80, 0xca, 0x78, 0x73, 0xa3, 0xad, 0x74, 0x1a, 0xdd, 0x7b, 0xf6, 0x0d, 0xb8,
-	0xb6, 0xa8, 0x73, 0x38, 0xea, 0x6b, 0xe9, 0xca, 0x54, 0x87, 0x23, 0x57, 0x65, 0x5c, 0x37, 0x00,
-	0xa6, 0x0b, 0x3a, 0x3d, 0x0b, 0x39, 0x0b, 0xb0, 0xb9, 0x99, 0xe5, 0xbf, 0xe6, 0xd1, 0x77, 0xa1,
-	0x1a, 0x32, 0xd2, 0xd4, 0xda, 0x4a, 0x67, 0xdb, 0x15, 0x4b, 0xeb, 0x19, 0xd4, 0x85, 0xce, 0x31,
-	0x7a, 0x11, 0x96, 0x6a, 0x37, 0x97, 0x54, 0xaf, 0x24, 0xdf, 0xe7, 0x0c, 0x07, 0x74, 0x49, 0x4b,
-	0x32, 0xbc, 0x21, 0xaa, 0x9b, 0xd0, 0xa0, 0xe7, 0x0c, 0xc7, 0x31, 0x7a, 0x98, 0x08, 0x84, 0x62,
-	0x07, 0x84, 0xeb, 0x38, 0xf3, 0xe8, 0x3d, 0xa8, 0x0b, 0x8b, 0x92, 0xb1, 0x87, 0x39, 0xb4, 0x96,
-	0x2d, 0x07, 0xcd, 0x5e, 0xdf, 0xba, 0x7d, 0xb2, 0x1e, 0xb4, 0x7e, 0xed, 0x62, 0x65, 0x56, 0xde,
-	0x7e, 0x36, 0x15, 0xb7, 0x26, 0x8f, 0xf5, 0xd0, 0x7a, 0x01, 0x9a, 0x64, 0xaa, 0xef, 0xc1, 0x66,
-	0x8c, 0x84, 0x05, 0xb2, 0x58, 0x57, 0x1a, 0xe2, 0x66, 0x63, 0x24, 0x3c, 0xc1, 0xf5, 0xcd, 0x4a,
-	0x2b, 0xf7, 0xd3, 0x28, 0xca, 0xca, 0x92, 0x7e, 0x1a, 0x45, 0x7a, 0x0b, 0x6a, 0x48, 0x23, 0x9f,
-	0x05, 0xde, 0x32, 0xab, 0xa8, 0xe6, 0x16, 0xb6, 0xf5, 0x41, 0x81, 0x9a, 0x48, 0xf6, 0xf4, 0x9c,
-	0x61, 0xc9, 0x31, 0x53, 0x73, 0x42, 0xf5, 0x7c, 0x04, 0x06, 0xae, 0xca, 0x0a, 0x74, 0xd5, 0x5f,
-	0xa2, 0xdb, 0xf8, 0x3d, 0xba, 0xcd, 0x52, 0xe8, 0x9e, 0xc0, 0x96, 0xe8, 0x66, 0x34, 0x3a, 0x2c,
-	0xd3, 0x8c, 0xb5, 0x80, 0x6d, 0x09, 0x83, 0x4e, 0x7b, 0x84, 0x50, 0x52, 0x8a, 0xc8, 0x7d, 0xd8,
-	0xa2, 0xe7, 0x74, 0x3a, 0x2e, 0xb0, 0x40, 0xba, 0x32, 0x35, 0xa1, 0x39, 0x1c, 0xb8, 0x9a, 0xd8,
-	0x1a, 0x12, 0xeb, 0x35, 0xec, 0xac, 0x33, 0x65, 0x33, 0xff, 0x1f, 0x73, 0xdd, 0xbc, 0x0a, 0x6b,
-	0x5f, 0xfe, 0x33, 0x8e, 0xbc, 0x24, 0x2e, 0x97, 0xd8, 0xea, 0x41, 0x43, 0x28, 0xb8, 0x34, 0x4e,
-	0xfc, 0x92, 0x12, 0x33, 0xd8, 0xcd, 0x9e, 0xb8, 0xe2, 0x59, 0x28, 0xc9, 0xe0, 0xc7, 0xc7, 0x46,
-	0xfd, 0xf9, 0xb1, 0xe9, 0x1f, 0x5d, 0x5c, 0x1a, 0x95, 0x4f, 0x97, 0x46, 0xe5, 0x4d, 0x6a, 0x28,
-	0x17, 0xa9, 0xa1, 0x7c, 0x4c, 0x0d, 0xe5, 0x4b, 0x6a, 0x28, 0xef, 0xbe, 0x19, 0xca, 0xf3, 0xee,
-	0x3f, 0x7c, 0x65, 0x1e, 0xcb, 0x9f, 0xd3, 0xca, 0x69, 0x75, 0xa2, 0x65, 0x13, 0xf9, 0xf0, 0x7b,
-	0x00, 0x00, 0x00, 0xff, 0xff, 0x07, 0x69, 0x62, 0x9d, 0xa6, 0x06, 0x00, 0x00,
+	0x10, 0xc7, 0x63, 0xa7, 0x75, 0xd3, 0x09, 0x55, 0x8b, 0x55, 0x95, 0x90, 0x83, 0x1d, 0x99, 0x4b,
+	0x4e, 0xb6, 0x08, 0x12, 0x17, 0x84, 0xd4, 0xa4, 0xe1, 0x90, 0x43, 0x95, 0xe2, 0xf6, 0x50, 0x71,
+	0x89, 0x36, 0xd9, 0x4d, 0xb2, 0x34, 0xf1, 0x5a, 0xf6, 0x18, 0x15, 0x89, 0x03, 0x8f, 0xc0, 0x23,
+	0xf0, 0x38, 0x3d, 0x20, 0xc4, 0x91, 0x53, 0xa0, 0x7e, 0x00, 0x4e, 0x3c, 0x00, 0x5a, 0xaf, 0x93,
+	0xb6, 0x54, 0x7c, 0x59, 0xe2, 0x94, 0x9d, 0xd9, 0xd9, 0xff, 0xec, 0xfc, 0x76, 0x3c, 0x81, 0xc7,
+	0x13, 0x8e, 0xd3, 0x64, 0xe8, 0x8e, 0xc4, 0xdc, 0x1b, 0x89, 0x00, 0x09, 0x0f, 0x58, 0x44, 0xaf,
+	0x2f, 0x49, 0xc8, 0x3d, 0xf6, 0x8a, 0x05, 0x18, 0x7b, 0x48, 0xe2, 0x33, 0x37, 0x8c, 0x04, 0x0a,
+	0xf3, 0xee, 0x55, 0x84, 0xab, 0x76, 0xeb, 0xbb, 0x13, 0x31, 0x11, 0xd9, 0xae, 0x27, 0x57, 0x2a,
+	0xb0, 0x6e, 0x4f, 0x84, 0x98, 0xcc, 0x98, 0x97, 0x59, 0xc3, 0x64, 0xec, 0x21, 0x9f, 0xb3, 0x18,
+	0xc9, 0x3c, 0xcc, 0x03, 0xfe, 0xee, 0x06, 0xf8, 0x3a, 0x64, 0xb1, 0x37, 0x17, 0x49, 0x80, 0xf9,
+	0xb9, 0xfd, 0x3f, 0x9e, 0x5b, 0xa5, 0x0c, 0x67, 0xc9, 0x84, 0x07, 0xde, 0x98, 0xb3, 0x19, 0x0d,
+	0x09, 0x4e, 0x95, 0x82, 0xf3, 0x4d, 0x03, 0x38, 0x21, 0xf1, 0xd9, 0x41, 0xc4, 0x08, 0x32, 0xb3,
+	0x05, 0x77, 0x56, 0x87, 0x07, 0x9c, 0xd6, 0xb4, 0x86, 0xd6, 0xdc, 0xec, 0x6c, 0xa7, 0x0b, 0xbb,
+	0x7a, 0xb0, 0xf4, 0xf7, 0xba, 0x7e, 0x75, 0x15, 0xd4, 0xa3, 0xe6, 0x1e, 0x18, 0xc3, 0x24, 0xa0,
+	0x33, 0x56, 0xd3, 0x65, 0xb4, 0x9f, 0x5b, 0xa6, 0x07, 0x46, 0x24, 0x04, 0x8e, 0xe3, 0x5a, 0xb9,
+	0x51, 0x6e, 0x56, 0x5b, 0xf7, 0xdc, 0x6b, 0xbc, 0xb2, 0x5a, 0xdc, 0x43, 0x59, 0x8b, 0x9f, 0x87,
+	0x99, 0x0f, 0x41, 0xe7, 0xa2, 0xb6, 0xd6, 0xd0, 0x9a, 0xd5, 0xd6, 0x7d, 0xf7, 0x16, 0x5c, 0x57,
+	0xde, 0xb3, 0xd7, 0xef, 0x18, 0xe9, 0xc2, 0xd6, 0x7b, 0x7d, 0x5f, 0xe7, 0xc2, 0xb4, 0x00, 0x46,
+	0x53, 0x36, 0x3a, 0x0b, 0x05, 0x0f, 0xb0, 0xb6, 0x9e, 0xe5, 0xbf, 0xe6, 0x31, 0x77, 0xa0, 0x1c,
+	0x72, 0x5a, 0x33, 0x1a, 0x5a, 0x73, 0xcb, 0x97, 0x4b, 0xe7, 0x39, 0x6c, 0x4a, 0x9d, 0x63, 0x24,
+	0x11, 0x16, 0x2a, 0x37, 0x97, 0xd4, 0xaf, 0x24, 0x3f, 0xe6, 0x0c, 0xbb, 0x6c, 0xc6, 0x0a, 0x32,
+	0xbc, 0x25, 0x6a, 0xda, 0x50, 0x65, 0xe7, 0x1c, 0x07, 0x31, 0x12, 0x4c, 0x24, 0x42, 0xb9, 0x03,
+	0xd2, 0x75, 0x9c, 0x79, 0xcc, 0x36, 0x6c, 0x4a, 0x8b, 0xd1, 0x01, 0xc1, 0x1c, 0x5a, 0xdd, 0x55,
+	0x8d, 0xe6, 0x2e, 0x5f, 0xdd, 0x3d, 0x59, 0x36, 0x5a, 0xa7, 0x72, 0xb1, 0xb0, 0x4b, 0xef, 0xbe,
+	0xd8, 0x9a, 0x5f, 0x51, 0xc7, 0xda, 0x68, 0xee, 0x81, 0xce, 0xa9, 0xa2, 0x96, 0x53, 0xed, 0xfa,
+	0x3a, 0xa7, 0xce, 0x4b, 0x30, 0x14, 0x6b, 0x73, 0x17, 0xd6, 0x63, 0xa4, 0x3c, 0x50, 0x45, 0xf8,
+	0xca, 0x90, 0x2f, 0x1e, 0x23, 0x15, 0x09, 0x2e, 0x5f, 0x5c, 0x59, 0xb9, 0x9f, 0x45, 0x51, 0x76,
+	0x5d, 0xe5, 0x67, 0x51, 0x64, 0xd6, 0xa1, 0x82, 0x2c, 0x9a, 0xf3, 0x80, 0xcc, 0xb2, 0x9b, 0x56,
+	0xfc, 0x95, 0xed, 0x7c, 0xd0, 0xa0, 0x22, 0x93, 0x3d, 0x3b, 0xe7, 0x58, 0xb0, 0xfd, 0xf4, 0x9c,
+	0xdc, 0x8d, 0x22, 0x96, 0x48, 0xcb, 0xbf, 0x44, 0xba, 0xf6, 0x7b, 0xa4, 0xeb, 0x45, 0x90, 0x3a,
+	0x4f, 0x61, 0x43, 0x56, 0xd3, 0xef, 0x1f, 0x16, 0x29, 0xc6, 0x99, 0xc2, 0x96, 0x82, 0xc1, 0x46,
+	0x6d, 0x4a, 0x19, 0x2d, 0x44, 0xe4, 0x01, 0x6c, 0xb0, 0x73, 0x36, 0x1a, 0xac, 0xb0, 0x40, 0xba,
+	0xb0, 0x0d, 0xa9, 0xd9, 0xeb, 0xfa, 0x86, 0xdc, 0xea, 0x51, 0xe7, 0x0d, 0x6c, 0x2f, 0x33, 0x65,
+	0xdf, 0xc2, 0x7f, 0xcc, 0x75, 0xfb, 0x29, 0x9c, 0x7d, 0xf5, 0xc5, 0x1c, 0x91, 0x24, 0x2e, 0x96,
+	0xd8, 0x69, 0x43, 0x55, 0x2a, 0xf8, 0x2c, 0x4e, 0xe6, 0x05, 0x25, 0xc6, 0xb0, 0x93, 0x8d, 0xbe,
+	0xd5, 0xb8, 0x28, 0xc8, 0xe0, 0xe6, 0x10, 0xd2, 0x7f, 0x1e, 0x42, 0x9d, 0xa3, 0x8b, 0x4b, 0xab,
+	0xf4, 0xf9, 0xd2, 0x2a, 0xbd, 0x4d, 0x2d, 0xed, 0x22, 0xb5, 0xb4, 0x4f, 0xa9, 0xa5, 0x7d, 0x4d,
+	0x2d, 0xed, 0xfd, 0x77, 0x4b, 0x7b, 0xd1, 0xfa, 0x87, 0x7f, 0x9f, 0x27, 0xea, 0xe7, 0xb4, 0x74,
+	0x5a, 0x1e, 0x1a, 0x59, 0x47, 0x3e, 0xfa, 0x11, 0x00, 0x00, 0xff, 0xff, 0xc5, 0x58, 0x0f, 0xec,
+	0xbe, 0x06, 0x00, 0x00,
 }
 }

+ 3 - 0
vendor/github.com/containerd/containerd/api/events/task.proto

@@ -29,6 +29,9 @@ message TaskDelete {
 	uint32 pid = 2;
 	uint32 pid = 2;
 	uint32 exit_status = 3;
 	uint32 exit_status = 3;
 	google.protobuf.Timestamp exited_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
 	google.protobuf.Timestamp exited_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+	// id is the specific exec. By default if omitted will be `""` thus matches
+	// the init exec of the task matching `container_id`.
+	string id = 5;
 }
 }
 
 
 message TaskIO {
 message TaskIO {

+ 1 - 1
vendor/github.com/containerd/containerd/archive/time_unix.go

@@ -1,4 +1,4 @@
-// +build freebsd linux openbsd solaris
+// +build !windows
 
 
 /*
 /*
    Copyright The containerd Authors.
    Copyright The containerd Authors.

+ 5 - 1
vendor/github.com/containerd/containerd/cio/io_windows.go

@@ -31,11 +31,15 @@ const pipeRoot = `\\.\pipe`
 
 
 // NewFIFOSetInDir returns a new set of fifos for the task
 // NewFIFOSetInDir returns a new set of fifos for the task
 func NewFIFOSetInDir(_, id string, terminal bool) (*FIFOSet, error) {
 func NewFIFOSetInDir(_, id string, terminal bool) (*FIFOSet, error) {
+	stderrPipe := ""
+	if !terminal {
+		stderrPipe = fmt.Sprintf(`%s\ctr-%s-stderr`, pipeRoot, id)
+	}
 	return NewFIFOSet(Config{
 	return NewFIFOSet(Config{
 		Terminal: terminal,
 		Terminal: terminal,
 		Stdin:    fmt.Sprintf(`%s\ctr-%s-stdin`, pipeRoot, id),
 		Stdin:    fmt.Sprintf(`%s\ctr-%s-stdin`, pipeRoot, id),
 		Stdout:   fmt.Sprintf(`%s\ctr-%s-stdout`, pipeRoot, id),
 		Stdout:   fmt.Sprintf(`%s\ctr-%s-stdout`, pipeRoot, id),
-		Stderr:   fmt.Sprintf(`%s\ctr-%s-stderr`, pipeRoot, id),
+		Stderr:   stderrPipe,
 	}, nil), nil
 	}, nil), nil
 }
 }
 
 

+ 83 - 154
vendor/github.com/containerd/containerd/client.go

@@ -17,11 +17,14 @@
 package containerd
 package containerd
 
 
 import (
 import (
+	"bytes"
 	"context"
 	"context"
+	"encoding/json"
 	"fmt"
 	"fmt"
 	"net/http"
 	"net/http"
 	"runtime"
 	"runtime"
 	"strconv"
 	"strconv"
+	"strings"
 	"sync"
 	"sync"
 	"time"
 	"time"
 
 
@@ -40,7 +43,6 @@ import (
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/content"
 	contentproxy "github.com/containerd/containerd/content/proxy"
 	contentproxy "github.com/containerd/containerd/content/proxy"
 	"github.com/containerd/containerd/defaults"
 	"github.com/containerd/containerd/defaults"
-	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/events"
 	"github.com/containerd/containerd/events"
 	"github.com/containerd/containerd/images"
 	"github.com/containerd/containerd/images"
 	"github.com/containerd/containerd/leases"
 	"github.com/containerd/containerd/leases"
@@ -51,7 +53,6 @@ import (
 	"github.com/containerd/containerd/plugin"
 	"github.com/containerd/containerd/plugin"
 	"github.com/containerd/containerd/remotes"
 	"github.com/containerd/containerd/remotes"
 	"github.com/containerd/containerd/remotes/docker"
 	"github.com/containerd/containerd/remotes/docker"
-	"github.com/containerd/containerd/remotes/docker/schema1"
 	"github.com/containerd/containerd/snapshots"
 	"github.com/containerd/containerd/snapshots"
 	snproxy "github.com/containerd/containerd/snapshots/proxy"
 	snproxy "github.com/containerd/containerd/snapshots/proxy"
 	"github.com/containerd/typeurl"
 	"github.com/containerd/typeurl"
@@ -280,6 +281,12 @@ type RemoteContext struct {
 	// handlers.
 	// handlers.
 	BaseHandlers []images.Handler
 	BaseHandlers []images.Handler
 
 
+	// HandlerWrapper wraps the handler which gets sent to dispatch.
+	// Unlike BaseHandlers, this can run before and after the built
+	// in handlers, allowing operations to run on the descriptor
+	// after it has completed transferring.
+	HandlerWrapper func(images.Handler) images.Handler
+
 	// ConvertSchema1 is whether to convert Docker registry schema 1
 	// ConvertSchema1 is whether to convert Docker registry schema 1
 	// manifests. If this option is false then any image which resolves
 	// manifests. If this option is false then any image which resolves
 	// to schema 1 will return an error since schema 1 is not supported.
 	// to schema 1 will return an error since schema 1 is not supported.
@@ -290,6 +297,9 @@ type RemoteContext struct {
 	// platforms will be used to create a PlatformMatcher with no ordering
 	// platforms will be used to create a PlatformMatcher with no ordering
 	// preference.
 	// preference.
 	Platforms []string
 	Platforms []string
+
+	// MaxConcurrentDownloads is the max concurrent content downloads for each pull.
+	MaxConcurrentDownloads int
 }
 }
 
 
 func defaultRemoteContext() *RemoteContext {
 func defaultRemoteContext() *RemoteContext {
@@ -341,157 +351,6 @@ func (c *Client) Fetch(ctx context.Context, ref string, opts ...RemoteOpt) (imag
 	return c.fetch(ctx, fetchCtx, ref, 0)
 	return c.fetch(ctx, fetchCtx, ref, 0)
 }
 }
 
 
-// Pull downloads the provided content into containerd's content store
-// and returns a platform specific image object
-func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (Image, error) {
-	pullCtx := defaultRemoteContext()
-	for _, o := range opts {
-		if err := o(c, pullCtx); err != nil {
-			return nil, err
-		}
-	}
-
-	if pullCtx.PlatformMatcher == nil {
-		if len(pullCtx.Platforms) > 1 {
-			return nil, errors.New("cannot pull multiplatform image locally, try Fetch")
-		} else if len(pullCtx.Platforms) == 0 {
-			pullCtx.PlatformMatcher = platforms.Default()
-		} else {
-			p, err := platforms.Parse(pullCtx.Platforms[0])
-			if err != nil {
-				return nil, errors.Wrapf(err, "invalid platform %s", pullCtx.Platforms[0])
-			}
-
-			pullCtx.PlatformMatcher = platforms.Only(p)
-		}
-	}
-
-	ctx, done, err := c.WithLease(ctx)
-	if err != nil {
-		return nil, err
-	}
-	defer done(ctx)
-
-	img, err := c.fetch(ctx, pullCtx, ref, 1)
-	if err != nil {
-		return nil, err
-	}
-
-	i := NewImageWithPlatform(c, img, pullCtx.PlatformMatcher)
-
-	if pullCtx.Unpack {
-		if err := i.Unpack(ctx, pullCtx.Snapshotter); err != nil {
-			return nil, errors.Wrapf(err, "failed to unpack image on snapshotter %s", pullCtx.Snapshotter)
-		}
-	}
-
-	return i, nil
-}
-
-func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, limit int) (images.Image, error) {
-	store := c.ContentStore()
-	name, desc, err := rCtx.Resolver.Resolve(ctx, ref)
-	if err != nil {
-		return images.Image{}, errors.Wrapf(err, "failed to resolve reference %q", ref)
-	}
-
-	fetcher, err := rCtx.Resolver.Fetcher(ctx, name)
-	if err != nil {
-		return images.Image{}, errors.Wrapf(err, "failed to get fetcher for %q", name)
-	}
-
-	var (
-		handler images.Handler
-
-		isConvertible bool
-		converterFunc func(context.Context, ocispec.Descriptor) (ocispec.Descriptor, error)
-	)
-
-	if desc.MediaType == images.MediaTypeDockerSchema1Manifest && rCtx.ConvertSchema1 {
-		schema1Converter := schema1.NewConverter(store, fetcher)
-
-		handler = images.Handlers(append(rCtx.BaseHandlers, schema1Converter)...)
-
-		isConvertible = true
-
-		converterFunc = func(ctx context.Context, _ ocispec.Descriptor) (ocispec.Descriptor, error) {
-			return schema1Converter.Convert(ctx)
-		}
-	} else {
-		// Get all the children for a descriptor
-		childrenHandler := images.ChildrenHandler(store)
-		// Set any children labels for that content
-		childrenHandler = images.SetChildrenLabels(store, childrenHandler)
-		// Filter children by platforms
-		childrenHandler = images.FilterPlatforms(childrenHandler, rCtx.PlatformMatcher)
-		// Sort and limit manifests if a finite number is needed
-		if limit > 0 {
-			childrenHandler = images.LimitManifests(childrenHandler, rCtx.PlatformMatcher, limit)
-		}
-
-		// set isConvertible to true if there is application/octet-stream media type
-		convertibleHandler := images.HandlerFunc(
-			func(_ context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
-				if desc.MediaType == docker.LegacyConfigMediaType {
-					isConvertible = true
-				}
-
-				return []ocispec.Descriptor{}, nil
-			},
-		)
-
-		handler = images.Handlers(append(rCtx.BaseHandlers,
-			remotes.FetchHandler(store, fetcher),
-			convertibleHandler,
-			childrenHandler,
-		)...)
-
-		converterFunc = func(ctx context.Context, desc ocispec.Descriptor) (ocispec.Descriptor, error) {
-			return docker.ConvertManifest(ctx, store, desc)
-		}
-	}
-
-	if err := images.Dispatch(ctx, handler, desc); err != nil {
-		return images.Image{}, err
-	}
-
-	if isConvertible {
-		if desc, err = converterFunc(ctx, desc); err != nil {
-			return images.Image{}, err
-		}
-	}
-
-	img := images.Image{
-		Name:   name,
-		Target: desc,
-		Labels: rCtx.Labels,
-	}
-
-	is := c.ImageService()
-	for {
-		if created, err := is.Create(ctx, img); err != nil {
-			if !errdefs.IsAlreadyExists(err) {
-				return images.Image{}, err
-			}
-
-			updated, err := is.Update(ctx, img)
-			if err != nil {
-				// if image was removed, try create again
-				if errdefs.IsNotFound(err) {
-					continue
-				}
-				return images.Image{}, err
-			}
-
-			img = updated
-		} else {
-			img = created
-		}
-
-		return img, nil
-	}
-}
-
 // Push uploads the provided content to a remote resource
 // Push uploads the provided content to a remote resource
 func (c *Client) Push(ctx context.Context, ref string, desc ocispec.Descriptor, opts ...RemoteOpt) error {
 func (c *Client) Push(ctx context.Context, ref string, desc ocispec.Descriptor, opts ...RemoteOpt) error {
 	pushCtx := defaultRemoteContext()
 	pushCtx := defaultRemoteContext()
@@ -521,7 +380,21 @@ func (c *Client) Push(ctx context.Context, ref string, desc ocispec.Descriptor,
 		return err
 		return err
 	}
 	}
 
 
-	return remotes.PushContent(ctx, pusher, desc, c.ContentStore(), pushCtx.PlatformMatcher, pushCtx.BaseHandlers...)
+	var wrapper func(images.Handler) images.Handler
+
+	if len(pushCtx.BaseHandlers) > 0 {
+		wrapper = func(h images.Handler) images.Handler {
+			h = images.Handlers(append(pushCtx.BaseHandlers, h)...)
+			if pushCtx.HandlerWrapper != nil {
+				h = pushCtx.HandlerWrapper(h)
+			}
+			return h
+		}
+	} else if pushCtx.HandlerWrapper != nil {
+		wrapper = pushCtx.HandlerWrapper
+	}
+
+	return remotes.PushContent(ctx, pusher, desc, c.ContentStore(), pushCtx.PlatformMatcher, wrapper)
 }
 }
 
 
 // GetImage returns an existing image
 // GetImage returns an existing image
@@ -546,6 +419,45 @@ func (c *Client) ListImages(ctx context.Context, filters ...string) ([]Image, er
 	return images, nil
 	return images, nil
 }
 }
 
 
+// Restore restores a container from a checkpoint
+func (c *Client) Restore(ctx context.Context, id string, checkpoint Image, opts ...RestoreOpts) (Container, error) {
+	store := c.ContentStore()
+	index, err := decodeIndex(ctx, store, checkpoint.Target())
+	if err != nil {
+		return nil, err
+	}
+
+	ctx, done, err := c.WithLease(ctx)
+	if err != nil {
+		return nil, err
+	}
+	defer done(ctx)
+
+	copts := []NewContainerOpts{}
+	for _, o := range opts {
+		copts = append(copts, o(ctx, id, c, checkpoint, index))
+	}
+
+	ctr, err := c.NewContainer(ctx, id, copts...)
+	if err != nil {
+		return nil, err
+	}
+
+	return ctr, nil
+}
+
+func writeIndex(ctx context.Context, index *ocispec.Index, client *Client, ref string) (d ocispec.Descriptor, err error) {
+	labels := map[string]string{}
+	for i, m := range index.Manifests {
+		labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = m.Digest.String()
+	}
+	data, err := json.Marshal(index)
+	if err != nil {
+		return ocispec.Descriptor{}, err
+	}
+	return writeContent(ctx, client.ContentStore(), ocispec.MediaTypeImageIndex, ref, bytes.NewReader(data), content.WithLabels(labels))
+}
+
 // Subscribe to events that match one or more of the provided filters.
 // Subscribe to events that match one or more of the provided filters.
 //
 //
 // Callers should listen on both the envelope and errs channels. If the errs
 // Callers should listen on both the envelope and errs channels. If the errs
@@ -703,3 +615,20 @@ func (c *Client) Version(ctx context.Context) (Version, error) {
 		Revision: response.Revision,
 		Revision: response.Revision,
 	}, nil
 	}, nil
 }
 }
+
+// CheckRuntime returns true if the current runtime matches the expected
+// runtime. Providing various parts of the runtime schema will match those
+// parts of the expected runtime
+func CheckRuntime(current, expected string) bool {
+	cp := strings.Split(current, ".")
+	l := len(cp)
+	for i, p := range strings.Split(expected, ".") {
+		if i > l {
+			return false
+		}
+		if p != cp[i] {
+			return false
+		}
+	}
+	return true
+}

+ 16 - 0
vendor/github.com/containerd/containerd/client_opts.go

@@ -178,3 +178,19 @@ func WithImageHandler(h images.Handler) RemoteOpt {
 		return nil
 		return nil
 	}
 	}
 }
 }
+
+// WithImageHandlerWrapper wraps the handlers to be called on dispatch.
+func WithImageHandlerWrapper(w func(images.Handler) images.Handler) RemoteOpt {
+	return func(client *Client, c *RemoteContext) error {
+		c.HandlerWrapper = w
+		return nil
+	}
+}
+
+// WithMaxConcurrentDownloads sets max concurrent download limit.
+func WithMaxConcurrentDownloads(max int) RemoteOpt {
+	return func(client *Client, c *RemoteContext) error {
+		c.MaxConcurrentDownloads = max
+		return nil
+	}
+}

+ 79 - 1
vendor/github.com/containerd/containerd/container.go

@@ -28,12 +28,22 @@ import (
 	"github.com/containerd/containerd/cio"
 	"github.com/containerd/containerd/cio"
 	"github.com/containerd/containerd/containers"
 	"github.com/containerd/containerd/containers"
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/images"
 	"github.com/containerd/containerd/oci"
 	"github.com/containerd/containerd/oci"
+	"github.com/containerd/containerd/runtime/v2/runc/options"
 	"github.com/containerd/typeurl"
 	"github.com/containerd/typeurl"
 	prototypes "github.com/gogo/protobuf/types"
 	prototypes "github.com/gogo/protobuf/types"
+	ver "github.com/opencontainers/image-spec/specs-go"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 )
 )
 
 
+const (
+	checkpointImageNameLabel       = "org.opencontainers.image.ref.name"
+	checkpointRuntimeNameLabel     = "io.containerd.checkpoint.runtime"
+	checkpointSnapshotterNameLabel = "io.containerd.checkpoint.snapshotter"
+)
+
 // Container is a metadata object for container resources and task creation
 // Container is a metadata object for container resources and task creation
 type Container interface {
 type Container interface {
 	// ID identifies the container
 	// ID identifies the container
@@ -64,6 +74,8 @@ type Container interface {
 	Extensions(context.Context) (map[string]prototypes.Any, error)
 	Extensions(context.Context) (map[string]prototypes.Any, error)
 	// Update a container
 	// Update a container
 	Update(context.Context, ...UpdateContainerOpts) error
 	Update(context.Context, ...UpdateContainerOpts) error
+	// Checkpoint creates a checkpoint image of the current container
+	Checkpoint(context.Context, string, ...CheckpointOpts) (Image, error)
 }
 }
 
 
 func containerFromRecord(client *Client, c containers.Container) *container {
 func containerFromRecord(client *Client, c containers.Container) *container {
@@ -217,7 +229,9 @@ func (c *container) NewTask(ctx context.Context, ioCreate cio.Creator, opts ...N
 			})
 			})
 		}
 		}
 	}
 	}
-	var info TaskInfo
+	info := TaskInfo{
+		runtime: r.Runtime.Name,
+	}
 	for _, o := range opts {
 	for _, o := range opts {
 		if err := o(ctx, c.client, &info); err != nil {
 		if err := o(ctx, c.client, &info); err != nil {
 			return nil, err
 			return nil, err
@@ -272,6 +286,70 @@ func (c *container) Update(ctx context.Context, opts ...UpdateContainerOpts) err
 	return nil
 	return nil
 }
 }
 
 
+func (c *container) Checkpoint(ctx context.Context, ref string, opts ...CheckpointOpts) (Image, error) {
+	index := &ocispec.Index{
+		Versioned: ver.Versioned{
+			SchemaVersion: 2,
+		},
+		Annotations: make(map[string]string),
+	}
+	copts := &options.CheckpointOptions{
+		Exit:                false,
+		OpenTcp:             false,
+		ExternalUnixSockets: false,
+		Terminal:            false,
+		FileLocks:           true,
+		EmptyNamespaces:     nil,
+	}
+	info, err := c.Info(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	img, err := c.Image(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	ctx, done, err := c.client.WithLease(ctx)
+	if err != nil {
+		return nil, err
+	}
+	defer done(ctx)
+
+	// add image name to manifest
+	index.Annotations[checkpointImageNameLabel] = img.Name()
+	// add runtime info to index
+	index.Annotations[checkpointRuntimeNameLabel] = info.Runtime.Name
+	// add snapshotter info to index
+	index.Annotations[checkpointSnapshotterNameLabel] = info.Snapshotter
+
+	// process remaining opts
+	for _, o := range opts {
+		if err := o(ctx, c.client, &info, index, copts); err != nil {
+			err = errdefs.FromGRPC(err)
+			if !errdefs.IsAlreadyExists(err) {
+				return nil, err
+			}
+		}
+	}
+
+	desc, err := writeIndex(ctx, index, c.client, c.ID()+"index")
+	if err != nil {
+		return nil, err
+	}
+	i := images.Image{
+		Name:   ref,
+		Target: desc,
+	}
+	checkpoint, err := c.client.ImageService().Create(ctx, i)
+	if err != nil {
+		return nil, err
+	}
+
+	return NewImage(c.client, checkpoint), nil
+}
+
 func (c *container) loadTask(ctx context.Context, ioAttach cio.Attach) (Task, error) {
 func (c *container) loadTask(ctx context.Context, ioAttach cio.Attach) (Task, error) {
 	response, err := c.client.TaskService().Get(ctx, &tasks.GetRequest{
 	response, err := c.client.TaskService().Get(ctx, &tasks.GetRequest{
 		ContainerID: c.id,
 		ContainerID: c.id,

+ 155 - 0
vendor/github.com/containerd/containerd/container_checkpoint_opts.go

@@ -0,0 +1,155 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package containerd
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"runtime"
+
+	tasks "github.com/containerd/containerd/api/services/tasks/v1"
+	"github.com/containerd/containerd/containers"
+	"github.com/containerd/containerd/diff"
+	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/platforms"
+	"github.com/containerd/containerd/rootfs"
+	"github.com/containerd/containerd/runtime/v2/runc/options"
+	"github.com/containerd/typeurl"
+	imagespec "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+)
+
+var (
+	// ErrCheckpointRWUnsupported is returned if the container runtime does not support checkpoint
+	ErrCheckpointRWUnsupported = errors.New("rw checkpoint is only supported on v2 runtimes")
+	// ErrMediaTypeNotFound returns an error when a media type in the manifest is unknown
+	ErrMediaTypeNotFound = errors.New("media type not found")
+)
+
+// CheckpointOpts are options to manage the checkpoint operation
+type CheckpointOpts func(context.Context, *Client, *containers.Container, *imagespec.Index, *options.CheckpointOptions) error
+
+// WithCheckpointImage includes the container image in the checkpoint
+func WithCheckpointImage(ctx context.Context, client *Client, c *containers.Container, index *imagespec.Index, copts *options.CheckpointOptions) error {
+	ir, err := client.ImageService().Get(ctx, c.Image)
+	if err != nil {
+		return err
+	}
+	index.Manifests = append(index.Manifests, ir.Target)
+	return nil
+}
+
+// WithCheckpointTask includes the running task
+func WithCheckpointTask(ctx context.Context, client *Client, c *containers.Container, index *imagespec.Index, copts *options.CheckpointOptions) error {
+	any, err := typeurl.MarshalAny(copts)
+	if err != nil {
+		return nil
+	}
+	task, err := client.TaskService().Checkpoint(ctx, &tasks.CheckpointTaskRequest{
+		ContainerID: c.ID,
+		Options:     any,
+	})
+	if err != nil {
+		return err
+	}
+	for _, d := range task.Descriptors {
+		platformSpec := platforms.DefaultSpec()
+		index.Manifests = append(index.Manifests, imagespec.Descriptor{
+			MediaType: d.MediaType,
+			Size:      d.Size_,
+			Digest:    d.Digest,
+			Platform:  &platformSpec,
+		})
+	}
+	// save copts
+	data, err := any.Marshal()
+	if err != nil {
+		return err
+	}
+	r := bytes.NewReader(data)
+	desc, err := writeContent(ctx, client.ContentStore(), images.MediaTypeContainerd1CheckpointOptions, c.ID+"-checkpoint-options", r)
+	if err != nil {
+		return err
+	}
+	desc.Platform = &imagespec.Platform{
+		OS:           runtime.GOOS,
+		Architecture: runtime.GOARCH,
+	}
+	index.Manifests = append(index.Manifests, desc)
+	return nil
+}
+
+// WithCheckpointRuntime includes the container runtime info
+func WithCheckpointRuntime(ctx context.Context, client *Client, c *containers.Container, index *imagespec.Index, copts *options.CheckpointOptions) error {
+	if c.Runtime.Options != nil {
+		data, err := c.Runtime.Options.Marshal()
+		if err != nil {
+			return err
+		}
+		r := bytes.NewReader(data)
+		desc, err := writeContent(ctx, client.ContentStore(), images.MediaTypeContainerd1CheckpointRuntimeOptions, c.ID+"-runtime-options", r)
+		if err != nil {
+			return err
+		}
+		desc.Platform = &imagespec.Platform{
+			OS:           runtime.GOOS,
+			Architecture: runtime.GOARCH,
+		}
+		index.Manifests = append(index.Manifests, desc)
+	}
+	return nil
+}
+
+// WithCheckpointRW includes the rw in the checkpoint
+func WithCheckpointRW(ctx context.Context, client *Client, c *containers.Container, index *imagespec.Index, copts *options.CheckpointOptions) error {
+	diffOpts := []diff.Opt{
+		diff.WithReference(fmt.Sprintf("checkpoint-rw-%s", c.SnapshotKey)),
+	}
+	rw, err := rootfs.CreateDiff(ctx,
+		c.SnapshotKey,
+		client.SnapshotService(c.Snapshotter),
+		client.DiffService(),
+		diffOpts...,
+	)
+	if err != nil {
+		return err
+
+	}
+	rw.Platform = &imagespec.Platform{
+		OS:           runtime.GOOS,
+		Architecture: runtime.GOARCH,
+	}
+	index.Manifests = append(index.Manifests, rw)
+	return nil
+}
+
+// WithCheckpointTaskExit causes the task to exit after checkpoint
+func WithCheckpointTaskExit(ctx context.Context, client *Client, c *containers.Container, index *imagespec.Index, copts *options.CheckpointOptions) error {
+	copts.Exit = true
+	return nil
+}
+
+// GetIndexByMediaType returns the index in a manifest for the specified media type
+func GetIndexByMediaType(index *imagespec.Index, mt string) (*imagespec.Descriptor, error) {
+	for _, d := range index.Manifests {
+		if d.MediaType == mt {
+			return &d, nil
+		}
+	}
+	return nil, ErrMediaTypeNotFound
+}

+ 5 - 4
vendor/github.com/containerd/containerd/container_opts.go

@@ -23,6 +23,7 @@ import (
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/oci"
 	"github.com/containerd/containerd/oci"
 	"github.com/containerd/containerd/platforms"
 	"github.com/containerd/containerd/platforms"
+	"github.com/containerd/containerd/snapshots"
 	"github.com/containerd/typeurl"
 	"github.com/containerd/typeurl"
 	"github.com/gogo/protobuf/types"
 	"github.com/gogo/protobuf/types"
 	"github.com/opencontainers/image-spec/identity"
 	"github.com/opencontainers/image-spec/identity"
@@ -118,7 +119,7 @@ func WithSnapshot(id string) NewContainerOpts {
 
 
 // WithNewSnapshot allocates a new snapshot to be used by the container as the
 // WithNewSnapshot allocates a new snapshot to be used by the container as the
 // root filesystem in read-write mode
 // root filesystem in read-write mode
-func WithNewSnapshot(id string, i Image) NewContainerOpts {
+func WithNewSnapshot(id string, i Image, opts ...snapshots.Opt) NewContainerOpts {
 	return func(ctx context.Context, client *Client, c *containers.Container) error {
 	return func(ctx context.Context, client *Client, c *containers.Container) error {
 		diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default())
 		diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default())
 		if err != nil {
 		if err != nil {
@@ -126,7 +127,7 @@ func WithNewSnapshot(id string, i Image) NewContainerOpts {
 		}
 		}
 		setSnapshotterIfEmpty(c)
 		setSnapshotterIfEmpty(c)
 		parent := identity.ChainID(diffIDs).String()
 		parent := identity.ChainID(diffIDs).String()
-		if _, err := client.SnapshotService(c.Snapshotter).Prepare(ctx, id, parent); err != nil {
+		if _, err := client.SnapshotService(c.Snapshotter).Prepare(ctx, id, parent, opts...); err != nil {
 			return err
 			return err
 		}
 		}
 		c.SnapshotKey = id
 		c.SnapshotKey = id
@@ -148,7 +149,7 @@ func WithSnapshotCleanup(ctx context.Context, client *Client, c containers.Conta
 
 
 // WithNewSnapshotView allocates a new snapshot to be used by the container as the
 // WithNewSnapshotView allocates a new snapshot to be used by the container as the
 // root filesystem in read-only mode
 // root filesystem in read-only mode
-func WithNewSnapshotView(id string, i Image) NewContainerOpts {
+func WithNewSnapshotView(id string, i Image, opts ...snapshots.Opt) NewContainerOpts {
 	return func(ctx context.Context, client *Client, c *containers.Container) error {
 	return func(ctx context.Context, client *Client, c *containers.Container) error {
 		diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default())
 		diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default())
 		if err != nil {
 		if err != nil {
@@ -156,7 +157,7 @@ func WithNewSnapshotView(id string, i Image) NewContainerOpts {
 		}
 		}
 		setSnapshotterIfEmpty(c)
 		setSnapshotterIfEmpty(c)
 		parent := identity.ChainID(diffIDs).String()
 		parent := identity.ChainID(diffIDs).String()
-		if _, err := client.SnapshotService(c.Snapshotter).View(ctx, id, parent); err != nil {
+		if _, err := client.SnapshotService(c.Snapshotter).View(ctx, id, parent, opts...); err != nil {
 			return err
 			return err
 		}
 		}
 		c.SnapshotKey = id
 		c.SnapshotKey = id

+ 0 - 69
vendor/github.com/containerd/containerd/container_opts_unix.go

@@ -26,81 +26,12 @@ import (
 	"syscall"
 	"syscall"
 
 
 	"github.com/containerd/containerd/containers"
 	"github.com/containerd/containerd/containers"
-	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/errdefs"
-	"github.com/containerd/containerd/images"
 	"github.com/containerd/containerd/mount"
 	"github.com/containerd/containerd/mount"
 	"github.com/containerd/containerd/platforms"
 	"github.com/containerd/containerd/platforms"
-	"github.com/gogo/protobuf/proto"
-	protobuf "github.com/gogo/protobuf/types"
 	"github.com/opencontainers/image-spec/identity"
 	"github.com/opencontainers/image-spec/identity"
-	"github.com/opencontainers/image-spec/specs-go/v1"
-	"github.com/pkg/errors"
 )
 )
 
 
-// WithCheckpoint allows a container to be created from the checkpointed information
-// provided by the descriptor. The image, snapshot, and runtime specifications are
-// restored on the container
-func WithCheckpoint(im Image, snapshotKey string) NewContainerOpts {
-	// set image and rw, and spec
-	return func(ctx context.Context, client *Client, c *containers.Container) error {
-		var (
-			desc  = im.Target()
-			store = client.ContentStore()
-		)
-		index, err := decodeIndex(ctx, store, desc)
-		if err != nil {
-			return err
-		}
-		var rw *v1.Descriptor
-		for _, m := range index.Manifests {
-			switch m.MediaType {
-			case v1.MediaTypeImageLayer:
-				fk := m
-				rw = &fk
-			case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList:
-				config, err := images.Config(ctx, store, m, platforms.Default())
-				if err != nil {
-					return errors.Wrap(err, "unable to resolve image config")
-				}
-				diffIDs, err := images.RootFS(ctx, store, config)
-				if err != nil {
-					return errors.Wrap(err, "unable to get rootfs")
-				}
-				setSnapshotterIfEmpty(c)
-				if _, err := client.SnapshotService(c.Snapshotter).Prepare(ctx, snapshotKey, identity.ChainID(diffIDs).String()); err != nil {
-					if !errdefs.IsAlreadyExists(err) {
-						return err
-					}
-				}
-				c.Image = index.Annotations["image.name"]
-			case images.MediaTypeContainerd1CheckpointConfig:
-				data, err := content.ReadBlob(ctx, store, m)
-				if err != nil {
-					return errors.Wrap(err, "unable to read checkpoint config")
-				}
-				var any protobuf.Any
-				if err := proto.Unmarshal(data, &any); err != nil {
-					return err
-				}
-				c.Spec = &any
-			}
-		}
-		if rw != nil {
-			// apply the rw snapshot to the new rw layer
-			mounts, err := client.SnapshotService(c.Snapshotter).Mounts(ctx, snapshotKey)
-			if err != nil {
-				return errors.Wrapf(err, "unable to get mounts for %s", snapshotKey)
-			}
-			if _, err := client.DiffService().Apply(ctx, *rw, mounts); err != nil {
-				return errors.Wrap(err, "unable to apply rw diff")
-			}
-		}
-		c.SnapshotKey = snapshotKey
-		return nil
-	}
-}
-
 // WithRemappedSnapshot creates a new snapshot and remaps the uid/gid for the
 // WithRemappedSnapshot creates a new snapshot and remaps the uid/gid for the
 // filesystem to be used by a container with user namespaces
 // filesystem to be used by a container with user namespaces
 func WithRemappedSnapshot(id string, i Image, uid, gid uint32) NewContainerOpts {
 func WithRemappedSnapshot(id string, i Image, uid, gid uint32) NewContainerOpts {

+ 150 - 0
vendor/github.com/containerd/containerd/container_restore_opts.go

@@ -0,0 +1,150 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package containerd
+
+import (
+	"context"
+
+	"github.com/containerd/containerd/containers"
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/platforms"
+	"github.com/gogo/protobuf/proto"
+	ptypes "github.com/gogo/protobuf/types"
+	"github.com/opencontainers/image-spec/identity"
+	imagespec "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+)
+
+var (
+	// ErrImageNameNotFoundInIndex is returned when the image name is not found in the index
+	ErrImageNameNotFoundInIndex = errors.New("image name not found in index")
+	// ErrRuntimeNameNotFoundInIndex is returned when the runtime is not found in the index
+	ErrRuntimeNameNotFoundInIndex = errors.New("runtime not found in index")
+	// ErrSnapshotterNameNotFoundInIndex is returned when the snapshotter is not found in the index
+	ErrSnapshotterNameNotFoundInIndex = errors.New("snapshotter not found in index")
+)
+
+// RestoreOpts are options to manage the restore operation
+type RestoreOpts func(context.Context, string, *Client, Image, *imagespec.Index) NewContainerOpts
+
+// WithRestoreImage restores the image for the container
+func WithRestoreImage(ctx context.Context, id string, client *Client, checkpoint Image, index *imagespec.Index) NewContainerOpts {
+	return func(ctx context.Context, client *Client, c *containers.Container) error {
+		name, ok := index.Annotations[checkpointImageNameLabel]
+		if !ok || name == "" {
+			return ErrRuntimeNameNotFoundInIndex
+		}
+		snapshotter, ok := index.Annotations[checkpointSnapshotterNameLabel]
+		if !ok || name == "" {
+			return ErrSnapshotterNameNotFoundInIndex
+		}
+		i, err := client.GetImage(ctx, name)
+		if err != nil {
+			return err
+		}
+
+		diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default())
+		if err != nil {
+			return err
+		}
+		parent := identity.ChainID(diffIDs).String()
+		if _, err := client.SnapshotService(snapshotter).Prepare(ctx, id, parent); err != nil {
+			return err
+		}
+		c.Image = i.Name()
+		c.SnapshotKey = id
+		c.Snapshotter = snapshotter
+		return nil
+	}
+}
+
+// WithRestoreRuntime restores the runtime for the container
+func WithRestoreRuntime(ctx context.Context, id string, client *Client, checkpoint Image, index *imagespec.Index) NewContainerOpts {
+	return func(ctx context.Context, client *Client, c *containers.Container) error {
+		name, ok := index.Annotations[checkpointRuntimeNameLabel]
+		if !ok {
+			return ErrRuntimeNameNotFoundInIndex
+		}
+
+		// restore options if present
+		m, err := GetIndexByMediaType(index, images.MediaTypeContainerd1CheckpointRuntimeOptions)
+		if err != nil {
+			if err != ErrMediaTypeNotFound {
+				return err
+			}
+		}
+		var options *ptypes.Any
+		if m != nil {
+			store := client.ContentStore()
+			data, err := content.ReadBlob(ctx, store, *m)
+			if err != nil {
+				return errors.Wrap(err, "unable to read checkpoint runtime")
+			}
+			if err := proto.Unmarshal(data, options); err != nil {
+				return err
+			}
+		}
+
+		c.Runtime = containers.RuntimeInfo{
+			Name:    name,
+			Options: options,
+		}
+		return nil
+	}
+}
+
+// WithRestoreSpec restores the spec from the checkpoint for the container
+func WithRestoreSpec(ctx context.Context, id string, client *Client, checkpoint Image, index *imagespec.Index) NewContainerOpts {
+	return func(ctx context.Context, client *Client, c *containers.Container) error {
+		m, err := GetIndexByMediaType(index, images.MediaTypeContainerd1CheckpointConfig)
+		if err != nil {
+			return err
+		}
+		store := client.ContentStore()
+		data, err := content.ReadBlob(ctx, store, *m)
+		if err != nil {
+			return errors.Wrap(err, "unable to read checkpoint config")
+		}
+		var any ptypes.Any
+		if err := proto.Unmarshal(data, &any); err != nil {
+			return err
+		}
+		c.Spec = &any
+		return nil
+	}
+}
+
+// WithRestoreRW restores the rw layer from the checkpoint for the container
+func WithRestoreRW(ctx context.Context, id string, client *Client, checkpoint Image, index *imagespec.Index) NewContainerOpts {
+	return func(ctx context.Context, client *Client, c *containers.Container) error {
+		// apply rw layer
+		rw, err := GetIndexByMediaType(index, imagespec.MediaTypeImageLayerGzip)
+		if err != nil {
+			return err
+		}
+		mounts, err := client.SnapshotService(c.Snapshotter).Mounts(ctx, c.SnapshotKey)
+		if err != nil {
+			return err
+		}
+
+		if _, err := client.DiffService().Apply(ctx, *rw, mounts); err != nil {
+			return err
+		}
+		return nil
+	}
+}

+ 4 - 0
vendor/github.com/containerd/containerd/containers/containers.go

@@ -86,6 +86,10 @@ type RuntimeInfo struct {
 
 
 // Store interacts with the underlying container storage
 // Store interacts with the underlying container storage
 type Store interface {
 type Store interface {
+	// Get a container using the id.
+	//
+	// Container object is returned on success. If the id is not known to the
+	// store, an error will be returned.
 	Get(ctx context.Context, id string) (Container, error)
 	Get(ctx context.Context, id string) (Container, error)
 
 
 	// List returns containers that match one or more of the provided filters.
 	// List returns containers that match one or more of the provided filters.

+ 1 - 1
vendor/github.com/containerd/containerd/errdefs/grpc.go

@@ -95,7 +95,7 @@ func FromGRPC(err error) error {
 
 
 	msg := rebaseMessage(cls, err)
 	msg := rebaseMessage(cls, err)
 	if msg != "" {
 	if msg != "" {
-		err = errors.Wrapf(cls, msg)
+		err = errors.Wrap(cls, msg)
 	} else {
 	} else {
 		err = errors.WithStack(cls)
 		err = errors.WithStack(cls)
 	}
 	}

+ 4 - 4
vendor/github.com/containerd/containerd/events/exchange/exchange.go

@@ -138,10 +138,10 @@ func (e *Exchange) Subscribe(ctx context.Context, fs ...string) (ch <-chan *even
 	)
 	)
 
 
 	closeAll := func() {
 	closeAll := func() {
-		defer close(errq)
-		defer e.broadcaster.Remove(dst)
-		defer queue.Close()
-		defer channel.Close()
+		channel.Close()
+		queue.Close()
+		e.broadcaster.Remove(dst)
+		close(errq)
 	}
 	}
 
 
 	ch = evch
 	ch = evch

+ 6 - 19
vendor/github.com/containerd/containerd/export.go

@@ -20,36 +20,23 @@ import (
 	"context"
 	"context"
 	"io"
 	"io"
 
 
-	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/images/oci"
+
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 )
 )
 
 
-type exportOpts struct {
-}
-
-// ExportOpt allows the caller to specify export-specific options
-type ExportOpt func(c *exportOpts) error
-
-func resolveExportOpt(opts ...ExportOpt) (exportOpts, error) {
-	var eopts exportOpts
-	for _, o := range opts {
-		if err := o(&eopts); err != nil {
-			return eopts, err
-		}
-	}
-	return eopts, nil
-}
-
 // Export exports an image to a Tar stream.
 // Export exports an image to a Tar stream.
 // OCI format is used by default.
 // OCI format is used by default.
 // It is up to caller to put "org.opencontainers.image.ref.name" annotation to desc.
 // It is up to caller to put "org.opencontainers.image.ref.name" annotation to desc.
 // TODO(AkihiroSuda): support exporting multiple descriptors at once to a single archive stream.
 // TODO(AkihiroSuda): support exporting multiple descriptors at once to a single archive stream.
-func (c *Client) Export(ctx context.Context, exporter images.Exporter, desc ocispec.Descriptor, opts ...ExportOpt) (io.ReadCloser, error) {
-	_, err := resolveExportOpt(opts...) // unused now
+func (c *Client) Export(ctx context.Context, desc ocispec.Descriptor, opts ...oci.V1ExporterOpt) (io.ReadCloser, error) {
+
+	exporter, err := oci.ResolveV1ExportOpt(opts...)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
+
 	pr, pw := io.Pipe()
 	pr, pw := io.Pipe()
 	go func() {
 	go func() {
 		pw.CloseWithError(errors.Wrap(exporter.Export(ctx, c.ContentStore(), desc, pw), "export failed"))
 		pw.CloseWithError(errors.Wrap(exporter.Export(ctx, c.ContentStore(), desc, pw), "export failed"))

+ 1 - 1
vendor/github.com/containerd/containerd/filters/parser.go

@@ -71,7 +71,7 @@ func ParseAll(ss ...string) (Filter, error) {
 	for _, s := range ss {
 	for _, s := range ss {
 		f, err := Parse(s)
 		f, err := Parse(s)
 		if err != nil {
 		if err != nil {
-			return nil, errors.Wrapf(errdefs.ErrInvalidArgument, err.Error())
+			return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error())
 		}
 		}
 
 
 		fs = append(fs, f)
 		fs = append(fs, f)

+ 1 - 1
vendor/github.com/containerd/containerd/identifiers/validate.go

@@ -45,7 +45,7 @@ var (
 // Validate return nil if the string s is a valid identifier.
 // Validate return nil if the string s is a valid identifier.
 //
 //
 // identifiers must be valid domain names according to RFC 1035, section 2.3.1.  To
 // identifiers must be valid domain names according to RFC 1035, section 2.3.1.  To
-// enforce case insensitvity, all characters must be lower case.
+// enforce case insensitivity, all characters must be lower case.
 //
 //
 // In general, identifiers that pass this validation, should be safe for use as
 // In general, identifiers that pass this validation, should be safe for use as
 // a domain names or filesystem path component.
 // a domain names or filesystem path component.

+ 14 - 2
vendor/github.com/containerd/containerd/images/handlers.go

@@ -26,6 +26,7 @@ import (
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"golang.org/x/sync/errgroup"
 	"golang.org/x/sync/errgroup"
+	"golang.org/x/sync/semaphore"
 )
 )
 
 
 var (
 var (
@@ -108,19 +109,30 @@ func Walk(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) err
 // handler may return `ErrSkipDesc` to signal to the dispatcher to not traverse
 // handler may return `ErrSkipDesc` to signal to the dispatcher to not traverse
 // any children.
 // any children.
 //
 //
+// A concurrency limiter can be passed in to limit the number of concurrent
+// handlers running. When limiter is nil, there is no limit.
+//
 // Typically, this function will be used with `FetchHandler`, often composed
 // Typically, this function will be used with `FetchHandler`, often composed
 // with other handlers.
 // with other handlers.
 //
 //
 // If any handler returns an error, the dispatch session will be canceled.
 // If any handler returns an error, the dispatch session will be canceled.
-func Dispatch(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) error {
+func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted, descs ...ocispec.Descriptor) error {
 	eg, ctx := errgroup.WithContext(ctx)
 	eg, ctx := errgroup.WithContext(ctx)
 	for _, desc := range descs {
 	for _, desc := range descs {
 		desc := desc
 		desc := desc
 
 
+		if limiter != nil {
+			if err := limiter.Acquire(ctx, 1); err != nil {
+				return err
+			}
+		}
 		eg.Go(func() error {
 		eg.Go(func() error {
 			desc := desc
 			desc := desc
 
 
 			children, err := handler.Handle(ctx, desc)
 			children, err := handler.Handle(ctx, desc)
+			if limiter != nil {
+				limiter.Release(1)
+			}
 			if err != nil {
 			if err != nil {
 				if errors.Cause(err) == ErrSkipDesc {
 				if errors.Cause(err) == ErrSkipDesc {
 					return nil // don't traverse the children.
 					return nil // don't traverse the children.
@@ -129,7 +141,7 @@ func Dispatch(ctx context.Context, handler Handler, descs ...ocispec.Descriptor)
 			}
 			}
 
 
 			if len(children) > 0 {
 			if len(children) > 0 {
-				return Dispatch(ctx, handler, children...)
+				return Dispatch(ctx, handler, limiter, children...)
 			}
 			}
 
 
 			return nil
 			return nil

+ 8 - 5
vendor/github.com/containerd/containerd/images/mediatypes.go

@@ -29,11 +29,14 @@ const (
 	MediaTypeDockerSchema2Manifest         = "application/vnd.docker.distribution.manifest.v2+json"
 	MediaTypeDockerSchema2Manifest         = "application/vnd.docker.distribution.manifest.v2+json"
 	MediaTypeDockerSchema2ManifestList     = "application/vnd.docker.distribution.manifest.list.v2+json"
 	MediaTypeDockerSchema2ManifestList     = "application/vnd.docker.distribution.manifest.list.v2+json"
 	// Checkpoint/Restore Media Types
 	// Checkpoint/Restore Media Types
-	MediaTypeContainerd1Checkpoint        = "application/vnd.containerd.container.criu.checkpoint.criu.tar"
-	MediaTypeContainerd1CheckpointPreDump = "application/vnd.containerd.container.criu.checkpoint.predump.tar"
-	MediaTypeContainerd1Resource          = "application/vnd.containerd.container.resource.tar"
-	MediaTypeContainerd1RW                = "application/vnd.containerd.container.rw.tar"
-	MediaTypeContainerd1CheckpointConfig  = "application/vnd.containerd.container.checkpoint.config.v1+proto"
+	MediaTypeContainerd1Checkpoint               = "application/vnd.containerd.container.criu.checkpoint.criu.tar"
+	MediaTypeContainerd1CheckpointPreDump        = "application/vnd.containerd.container.criu.checkpoint.predump.tar"
+	MediaTypeContainerd1Resource                 = "application/vnd.containerd.container.resource.tar"
+	MediaTypeContainerd1RW                       = "application/vnd.containerd.container.rw.tar"
+	MediaTypeContainerd1CheckpointConfig         = "application/vnd.containerd.container.checkpoint.config.v1+proto"
+	MediaTypeContainerd1CheckpointOptions        = "application/vnd.containerd.container.checkpoint.options.v1+proto"
+	MediaTypeContainerd1CheckpointRuntimeName    = "application/vnd.containerd.container.checkpoint.runtime.name"
+	MediaTypeContainerd1CheckpointRuntimeOptions = "application/vnd.containerd.container.checkpoint.runtime.options+proto"
 	// Legacy Docker schema1 manifest
 	// Legacy Docker schema1 manifest
 	MediaTypeDockerSchema1Manifest = "application/vnd.docker.distribution.manifest.v1+prettyjws"
 	MediaTypeDockerSchema1Manifest = "application/vnd.docker.distribution.manifest.v1+prettyjws"
 )
 )

+ 241 - 0
vendor/github.com/containerd/containerd/images/oci/exporter.go

@@ -0,0 +1,241 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package oci
+
+import (
+	"archive/tar"
+	"context"
+	"encoding/json"
+	"io"
+	"sort"
+
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/platforms"
+	ocispecs "github.com/opencontainers/image-spec/specs-go"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+)
+
+// V1Exporter implements OCI Image Spec v1.
+// It is up to caller to put "org.opencontainers.image.ref.name" annotation to desc.
+//
+// TODO(AkihiroSuda): add V1Exporter{TranslateMediaTypes: true} that transforms media types,
+//                    e.g. application/vnd.docker.image.rootfs.diff.tar.gzip
+//                         -> application/vnd.oci.image.layer.v1.tar+gzip
+type V1Exporter struct {
+	AllPlatforms bool
+}
+
+// V1ExporterOpt allows the caller to set additional options to a new V1Exporter
+type V1ExporterOpt func(c *V1Exporter) error
+
+// DefaultV1Exporter return a default V1Exporter pointer
+func DefaultV1Exporter() *V1Exporter {
+	return &V1Exporter{
+		AllPlatforms: false,
+	}
+}
+
+// ResolveV1ExportOpt return a new V1Exporter with V1ExporterOpt
+func ResolveV1ExportOpt(opts ...V1ExporterOpt) (*V1Exporter, error) {
+	exporter := DefaultV1Exporter()
+	for _, o := range opts {
+		if err := o(exporter); err != nil {
+			return exporter, err
+		}
+	}
+	return exporter, nil
+}
+
+// WithAllPlatforms set V1Exporter`s AllPlatforms option
+func WithAllPlatforms(allPlatforms bool) V1ExporterOpt {
+	return func(c *V1Exporter) error {
+		c.AllPlatforms = allPlatforms
+		return nil
+	}
+}
+
+// Export implements Exporter.
+func (oe *V1Exporter) Export(ctx context.Context, store content.Provider, desc ocispec.Descriptor, writer io.Writer) error {
+	tw := tar.NewWriter(writer)
+	defer tw.Close()
+
+	records := []tarRecord{
+		ociLayoutFile(""),
+		ociIndexRecord(desc),
+	}
+
+	algorithms := map[string]struct{}{}
+	exportHandler := func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+		records = append(records, blobRecord(store, desc))
+		algorithms[desc.Digest.Algorithm().String()] = struct{}{}
+		return nil, nil
+	}
+
+	childrenHandler := images.ChildrenHandler(store)
+
+	if !oe.AllPlatforms {
+		// get local default platform to fetch image manifest
+		childrenHandler = images.FilterPlatforms(childrenHandler, platforms.Any(platforms.DefaultSpec()))
+	}
+
+	handlers := images.Handlers(
+		childrenHandler,
+		images.HandlerFunc(exportHandler),
+	)
+
+	// Walk sequentially since the number of fetchs is likely one and doing in
+	// parallel requires locking the export handler
+	if err := images.Walk(ctx, handlers, desc); err != nil {
+		return err
+	}
+
+	if len(algorithms) > 0 {
+		records = append(records, directoryRecord("blobs/", 0755))
+		for alg := range algorithms {
+			records = append(records, directoryRecord("blobs/"+alg+"/", 0755))
+		}
+	}
+
+	return writeTar(ctx, tw, records)
+}
+
+type tarRecord struct {
+	Header *tar.Header
+	CopyTo func(context.Context, io.Writer) (int64, error)
+}
+
+func blobRecord(cs content.Provider, desc ocispec.Descriptor) tarRecord {
+	path := "blobs/" + desc.Digest.Algorithm().String() + "/" + desc.Digest.Hex()
+	return tarRecord{
+		Header: &tar.Header{
+			Name:     path,
+			Mode:     0444,
+			Size:     desc.Size,
+			Typeflag: tar.TypeReg,
+		},
+		CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
+			r, err := cs.ReaderAt(ctx, desc)
+			if err != nil {
+				return 0, errors.Wrap(err, "failed to get reader")
+			}
+			defer r.Close()
+
+			// Verify digest
+			dgstr := desc.Digest.Algorithm().Digester()
+
+			n, err := io.Copy(io.MultiWriter(w, dgstr.Hash()), content.NewReader(r))
+			if err != nil {
+				return 0, errors.Wrap(err, "failed to copy to tar")
+			}
+			if dgstr.Digest() != desc.Digest {
+				return 0, errors.Errorf("unexpected digest %s copied", dgstr.Digest())
+			}
+			return n, nil
+		},
+	}
+}
+
+func directoryRecord(name string, mode int64) tarRecord {
+	return tarRecord{
+		Header: &tar.Header{
+			Name:     name,
+			Mode:     mode,
+			Typeflag: tar.TypeDir,
+		},
+	}
+}
+
+func ociLayoutFile(version string) tarRecord {
+	if version == "" {
+		version = ocispec.ImageLayoutVersion
+	}
+	layout := ocispec.ImageLayout{
+		Version: version,
+	}
+
+	b, err := json.Marshal(layout)
+	if err != nil {
+		panic(err)
+	}
+
+	return tarRecord{
+		Header: &tar.Header{
+			Name:     ocispec.ImageLayoutFile,
+			Mode:     0444,
+			Size:     int64(len(b)),
+			Typeflag: tar.TypeReg,
+		},
+		CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
+			n, err := w.Write(b)
+			return int64(n), err
+		},
+	}
+
+}
+
+func ociIndexRecord(manifests ...ocispec.Descriptor) tarRecord {
+	index := ocispec.Index{
+		Versioned: ocispecs.Versioned{
+			SchemaVersion: 2,
+		},
+		Manifests: manifests,
+	}
+
+	b, err := json.Marshal(index)
+	if err != nil {
+		panic(err)
+	}
+
+	return tarRecord{
+		Header: &tar.Header{
+			Name:     "index.json",
+			Mode:     0644,
+			Size:     int64(len(b)),
+			Typeflag: tar.TypeReg,
+		},
+		CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
+			n, err := w.Write(b)
+			return int64(n), err
+		},
+	}
+}
+
+func writeTar(ctx context.Context, tw *tar.Writer, records []tarRecord) error {
+	sort.Slice(records, func(i, j int) bool {
+		return records[i].Header.Name < records[j].Header.Name
+	})
+
+	for _, record := range records {
+		if err := tw.WriteHeader(record.Header); err != nil {
+			return err
+		}
+		if record.CopyTo != nil {
+			n, err := record.CopyTo(ctx, tw)
+			if err != nil {
+				return err
+			}
+			if n != record.Header.Size {
+				return errors.Errorf("unexpected copy size for %s", record.Header.Name)
+			}
+		} else if record.Header.Size > 0 {
+			return errors.Errorf("no content to write to record with non-zero size for %s", record.Header.Name)
+		}
+	}
+	return nil
+}

+ 2 - 1
vendor/github.com/containerd/containerd/install.go

@@ -59,7 +59,6 @@ func (c *Client) Install(ctx context.Context, image Image, opts ...InstallOpts)
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
-		defer r.Close()
 		if _, err := archive.Apply(ctx, path, r, archive.WithFilter(func(hdr *tar.Header) (bool, error) {
 		if _, err := archive.Apply(ctx, path, r, archive.WithFilter(func(hdr *tar.Header) (bool, error) {
 			d := filepath.Dir(hdr.Name)
 			d := filepath.Dir(hdr.Name)
 			result := d == "bin"
 			result := d == "bin"
@@ -73,8 +72,10 @@ func (c *Client) Install(ctx context.Context, image Image, opts ...InstallOpts)
 			}
 			}
 			return result, nil
 			return result, nil
 		})); err != nil {
 		})); err != nil {
+			r.Close()
 			return err
 			return err
 		}
 		}
+		r.Close()
 	}
 	}
 	return nil
 	return nil
 }
 }

+ 83 - 7
vendor/github.com/containerd/containerd/metadata/buckets.go

@@ -14,13 +14,11 @@
    limitations under the License.
    limitations under the License.
 */
 */
 
 
-package metadata
-
-import (
-	digest "github.com/opencontainers/go-digest"
-	bolt "go.etcd.io/bbolt"
-)
-
+// Package metadata stores all labels and object specific metadata by namespace.
+// This package also contains the main garbage collection logic  for cleaning up
+// resources consistently and atomically. Resources used by backends will be
+// tracked in the metadata store to be exposed to consumers of this package.
+//
 // The layout where a "/" delineates a bucket is described in the following
 // The layout where a "/" delineates a bucket is described in the following
 // section. Please try to follow this as closely as possible when adding
 // section. Please try to follow this as closely as possible when adding
 // functionality. We can bolster this with helpers and more structure if that
 // functionality. We can bolster this with helpers and more structure if that
@@ -43,6 +41,84 @@ import (
 //
 //
 // key: object-specific key identifying the storage bucket for the objects
 // key: object-specific key identifying the storage bucket for the objects
 // contents.
 // contents.
+//
+// Below is the current database schema. This should be updated each time
+// the structure is changed in addition to adding a migration and incrementing
+// the database version. Note that `╘══*...*` refers to maps with arbitrary
+// keys.
+//  ├──version : <varint>                        - Latest version, see migrations
+//  └──v1                                        - Schema version bucket
+//     ╘══*namespace*
+//        ├──labels
+//        │  ╘══*key* : <string>                 - Label value
+//        ├──image
+//        │  ╘══*image name*
+//        │     ├──createdat : <binary time>     - Created at
+//        │     ├──updatedat : <binary time>     - Updated at
+//        │     ├──target
+//        │     │  ├──digest : <digest>          - Descriptor digest
+//        │     │  ├──mediatype : <string>       - Descriptor media type
+//        │     │  └──size : <varint>            - Descriptor size
+//        │     └──labels
+//        │        ╘══*key* : <string>           - Label value
+//        ├──containers
+//        │  ╘══*container id*
+//        │     ├──createdat : <binary time>     - Created at
+//        │     ├──updatedat : <binary time>     - Updated at
+//        │     ├──spec : <binary>               - Proto marshaled spec
+//        │     ├──image : <string>              - Image name
+//        │     ├──snapshotter : <string>        - Snapshotter name
+//        │     ├──snapshotKey : <string>        - Snapshot key
+//        │     ├──runtime
+//        │     │  ├──name : <string>            - Runtime name
+//        │     │  ├──extensions
+//        │     │  │  ╘══*name* : <binary>       - Proto marshaled extension
+//        │     │  └──options : <binary>         - Proto marshaled options
+//        │     └──labels
+//        │        ╘══*key* : <string>           - Label value
+//        ├──snapshots
+//        │  ╘══*snapshotter*
+//        │     ╘══*snapshot key*
+//        │        ├──name : <string>            - Snapshot name in backend
+//        │        ├──createdat : <binary time>  - Created at
+//        │        ├──updatedat : <binary time>  - Updated at
+//        │        ├──parent : <string>          - Parent snapshot name
+//        │        ├──children
+//        │        │  ╘══*snapshot key* : <nil>  - Child snapshot reference
+//        │        └──labels
+//        │           ╘══*key* : <string>        - Label value
+//        ├──content
+//        │  ├──blob
+//        │  │  ╘══*blob digest*
+//        │  │     ├──createdat : <binary time>  - Created at
+//        │  │     ├──updatedat : <binary time>  - Updated at
+//        │  │     ├──size : <varint>            - Blob size
+//        │  │     └──labels
+//        │  │        ╘══*key* : <string>        - Label value
+//        │  └──ingests
+//        │     ╘══*ingest reference*
+//        │        ├──ref : <string>             - Ingest reference in backend
+//        │        ├──expireat : <binary time>   - Time to expire ingest
+//        │        └──expected : <digest>        - Expected commit digest
+//        └──leases
+//           ╘══*lease id*
+//              ├──createdat : <binary time>     - Created at
+//              ├──labels
+//              │  ╘══*key* : <string>           - Label value
+//              ├──snapshots
+//              │  ╘══*snapshotter*
+//              │     ╘══*snapshot key* : <nil>  - Snapshot reference
+//              ├──content
+//              │  ╘══*blob digest* : <nil>      - Content blob reference
+//              └──ingests
+//                 ╘══*ingest reference* : <nil> - Content ingest reference
+package metadata
+
+import (
+	digest "github.com/opencontainers/go-digest"
+	bolt "go.etcd.io/bbolt"
+)
+
 var (
 var (
 	bucketKeyVersion          = []byte(schemaVersion)
 	bucketKeyVersion          = []byte(schemaVersion)
 	bucketKeyDBVersion        = []byte("version")    // stores the version of the schema
 	bucketKeyDBVersion        = []byte("version")    // stores the version of the schema

+ 1 - 1
vendor/github.com/containerd/containerd/metadata/containers.go

@@ -72,7 +72,7 @@ func (s *containerStore) List(ctx context.Context, fs ...string) ([]containers.C
 
 
 	filter, err := filters.ParseAll(fs...)
 	filter, err := filters.ParseAll(fs...)
 	if err != nil {
 	if err != nil {
-		return nil, errors.Wrapf(errdefs.ErrInvalidArgument, err.Error())
+		return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error())
 	}
 	}
 
 
 	bkt := getContainersBucket(s.tx, namespace)
 	bkt := getContainersBucket(s.tx, namespace)

+ 29 - 12
vendor/github.com/containerd/containerd/metadata/content.go

@@ -38,16 +38,31 @@ import (
 
 
 type contentStore struct {
 type contentStore struct {
 	content.Store
 	content.Store
-	db *DB
-	l  sync.RWMutex
+	db     *DB
+	shared bool
+	l      sync.RWMutex
 }
 }
 
 
 // newContentStore returns a namespaced content store using an existing
 // newContentStore returns a namespaced content store using an existing
 // content store interface.
 // content store interface.
-func newContentStore(db *DB, cs content.Store) *contentStore {
+// policy defines the sharing behavior for content between namespaces. Both
+// modes will result in shared storage in the backend for committed. Choose
+// "shared" to prevent separate namespaces from having to pull the same content
+// twice.  Choose "isolated" if the content must not be shared between
+// namespaces.
+//
+// If the policy is "shared", writes will try to resolve the "expected" digest
+// against the backend, allowing imports of content from other namespaces. In
+// "isolated" mode, the client must prove they have the content by providing
+// the entire blob before the content can be added to another namespace.
+//
+// Since we have only two policies right now, it's simpler using bool to
+// represent it internally.
+func newContentStore(db *DB, shared bool, cs content.Store) *contentStore {
 	return &contentStore{
 	return &contentStore{
-		Store: cs,
-		db:    db,
+		Store:  cs,
+		db:     db,
+		shared: shared,
 	}
 	}
 }
 }
 
 
@@ -383,13 +398,15 @@ func (cs *contentStore) Writer(ctx context.Context, opts ...content.WriterOpt) (
 				return nil
 				return nil
 			}
 			}
 
 
-			if st, err := cs.Store.Info(ctx, wOpts.Desc.Digest); err == nil {
-				// Ensure the expected size is the same, it is likely
-				// an error if the size is mismatched but the caller
-				// must resolve this on commit
-				if wOpts.Desc.Size == 0 || wOpts.Desc.Size == st.Size {
-					shared = true
-					wOpts.Desc.Size = st.Size
+			if cs.shared {
+				if st, err := cs.Store.Info(ctx, wOpts.Desc.Digest); err == nil {
+					// Ensure the expected size is the same, it is likely
+					// an error if the size is mismatched but the caller
+					// must resolve this on commit
+					if wOpts.Desc.Size == 0 || wOpts.Desc.Size == st.Size {
+						shared = true
+						wOpts.Desc.Size = st.Size
+					}
 				}
 				}
 			}
 			}
 		}
 		}

+ 24 - 2
vendor/github.com/containerd/containerd/metadata/db.go

@@ -46,6 +46,19 @@ const (
 	dbVersion = 3
 	dbVersion = 3
 )
 )
 
 
+// DBOpt configures how we set up the DB
+type DBOpt func(*dbOptions)
+
+// WithPolicyIsolated isolates contents between namespaces
+func WithPolicyIsolated(o *dbOptions) {
+	o.shared = false
+}
+
+// dbOptions configure db options.
+type dbOptions struct {
+	shared bool
+}
+
 // DB represents a metadata database backed by a bolt
 // DB represents a metadata database backed by a bolt
 // database. The database is fully namespaced and stores
 // database. The database is fully namespaced and stores
 // image, container, namespace, snapshot, and content data
 // image, container, namespace, snapshot, and content data
@@ -72,19 +85,28 @@ type DB struct {
 	// mutationCallbacks are called after each mutation with the flag
 	// mutationCallbacks are called after each mutation with the flag
 	// set indicating whether any dirty flags are set
 	// set indicating whether any dirty flags are set
 	mutationCallbacks []func(bool)
 	mutationCallbacks []func(bool)
+
+	dbopts dbOptions
 }
 }
 
 
 // NewDB creates a new metadata database using the provided
 // NewDB creates a new metadata database using the provided
 // bolt database, content store, and snapshotters.
 // bolt database, content store, and snapshotters.
-func NewDB(db *bolt.DB, cs content.Store, ss map[string]snapshots.Snapshotter) *DB {
+func NewDB(db *bolt.DB, cs content.Store, ss map[string]snapshots.Snapshotter, opts ...DBOpt) *DB {
 	m := &DB{
 	m := &DB{
 		db:      db,
 		db:      db,
 		ss:      make(map[string]*snapshotter, len(ss)),
 		ss:      make(map[string]*snapshotter, len(ss)),
 		dirtySS: map[string]struct{}{},
 		dirtySS: map[string]struct{}{},
+		dbopts: dbOptions{
+			shared: true,
+		},
+	}
+
+	for _, opt := range opts {
+		opt(&m.dbopts)
 	}
 	}
 
 
 	// Initialize data stores
 	// Initialize data stores
-	m.cs = newContentStore(m, cs)
+	m.cs = newContentStore(m, m.dbopts.shared, cs)
 	for name, sn := range ss {
 	for name, sn := range ss {
 		m.ss[name] = newSnapshotter(m, name, sn)
 		m.ss[name] = newSnapshotter(m, name, sn)
 	}
 	}

+ 1 - 1
vendor/github.com/containerd/containerd/metadata/images.go

@@ -84,7 +84,7 @@ func (s *imageStore) List(ctx context.Context, fs ...string) ([]images.Image, er
 
 
 	filter, err := filters.ParseAll(fs...)
 	filter, err := filters.ParseAll(fs...)
 	if err != nil {
 	if err != nil {
-		return nil, errors.Wrapf(errdefs.ErrInvalidArgument, err.Error())
+		return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error())
 	}
 	}
 
 
 	var m []images.Image
 	var m []images.Image

+ 1 - 1
vendor/github.com/containerd/containerd/metadata/leases.go

@@ -122,7 +122,7 @@ func (lm *LeaseManager) List(ctx context.Context, fs ...string) ([]leases.Lease,
 
 
 	filter, err := filters.ParseAll(fs...)
 	filter, err := filters.ParseAll(fs...)
 	if err != nil {
 	if err != nil {
-		return nil, errors.Wrapf(errdefs.ErrInvalidArgument, err.Error())
+		return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error())
 	}
 	}
 
 
 	var ll []leases.Lease
 	var ll []leases.Lease

+ 1 - 1
vendor/github.com/containerd/containerd/metadata/snapshot.go

@@ -232,7 +232,7 @@ func overlayInfo(info, overlay snapshots.Info) snapshots.Info {
 		info.Labels = overlay.Labels
 		info.Labels = overlay.Labels
 	} else {
 	} else {
 		for k, v := range overlay.Labels {
 		for k, v := range overlay.Labels {
-			overlay.Labels[k] = v
+			info.Labels[k] = v
 		}
 		}
 	}
 	}
 	return info
 	return info

+ 1 - 10
vendor/github.com/containerd/containerd/oci/spec.go

@@ -247,17 +247,8 @@ func populateDefaultWindowsSpec(ctx context.Context, s *Spec, id string) error {
 		Root:    &specs.Root{},
 		Root:    &specs.Root{},
 		Process: &specs.Process{
 		Process: &specs.Process{
 			Cwd: `C:\`,
 			Cwd: `C:\`,
-			ConsoleSize: &specs.Box{
-				Width:  80,
-				Height: 20,
-			},
-		},
-		Windows: &specs.Windows{
-			IgnoreFlushesDuringBoot: true,
-			Network: &specs.WindowsNetwork{
-				AllowUnqualifiedDNSQuery: true,
-			},
 		},
 		},
+		Windows: &specs.Windows{},
 	}
 	}
 	return nil
 	return nil
 }
 }

+ 61 - 10
vendor/github.com/containerd/containerd/oci/spec_opts.go

@@ -141,8 +141,10 @@ func WithEnv(environmentVariables []string) SpecOpts {
 // replaced by env key or appended to the list
 // replaced by env key or appended to the list
 func replaceOrAppendEnvValues(defaults, overrides []string) []string {
 func replaceOrAppendEnvValues(defaults, overrides []string) []string {
 	cache := make(map[string]int, len(defaults))
 	cache := make(map[string]int, len(defaults))
+	results := make([]string, 0, len(defaults))
 	for i, e := range defaults {
 	for i, e := range defaults {
 		parts := strings.SplitN(e, "=", 2)
 		parts := strings.SplitN(e, "=", 2)
+		results = append(results, e)
 		cache[parts[0]] = i
 		cache[parts[0]] = i
 	}
 	}
 
 
@@ -150,7 +152,7 @@ func replaceOrAppendEnvValues(defaults, overrides []string) []string {
 		// Values w/o = means they want this env to be removed/unset.
 		// Values w/o = means they want this env to be removed/unset.
 		if !strings.Contains(value, "=") {
 		if !strings.Contains(value, "=") {
 			if i, exists := cache[value]; exists {
 			if i, exists := cache[value]; exists {
-				defaults[i] = "" // Used to indicate it should be removed
+				results[i] = "" // Used to indicate it should be removed
 			}
 			}
 			continue
 			continue
 		}
 		}
@@ -158,21 +160,21 @@ func replaceOrAppendEnvValues(defaults, overrides []string) []string {
 		// Just do a normal set/update
 		// Just do a normal set/update
 		parts := strings.SplitN(value, "=", 2)
 		parts := strings.SplitN(value, "=", 2)
 		if i, exists := cache[parts[0]]; exists {
 		if i, exists := cache[parts[0]]; exists {
-			defaults[i] = value
+			results[i] = value
 		} else {
 		} else {
-			defaults = append(defaults, value)
+			results = append(results, value)
 		}
 		}
 	}
 	}
 
 
 	// Now remove all entries that we want to "unset"
 	// Now remove all entries that we want to "unset"
-	for i := 0; i < len(defaults); i++ {
-		if defaults[i] == "" {
-			defaults = append(defaults[:i], defaults[i+1:]...)
+	for i := 0; i < len(results); i++ {
+		if results[i] == "" {
+			results = append(results[:i], results[i+1:]...)
 			i--
 			i--
 		}
 		}
 	}
 	}
 
 
-	return defaults
+	return results
 }
 }
 
 
 // WithProcessArgs replaces the args on the generated spec
 // WithProcessArgs replaces the args on the generated spec
@@ -310,7 +312,7 @@ func WithImageConfigArgs(image Image, args []string) SpecOpts {
 
 
 		setProcess(s)
 		setProcess(s)
 		if s.Linux != nil {
 		if s.Linux != nil {
-			s.Process.Env = append(s.Process.Env, config.Env...)
+			s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, config.Env)
 			cmd := config.Cmd
 			cmd := config.Cmd
 			if len(args) > 0 {
 			if len(args) > 0 {
 				cmd = args
 				cmd = args
@@ -332,8 +334,14 @@ func WithImageConfigArgs(image Image, args []string) SpecOpts {
 			// even if there is no specified user in the image config
 			// even if there is no specified user in the image config
 			return WithAdditionalGIDs("root")(ctx, client, c, s)
 			return WithAdditionalGIDs("root")(ctx, client, c, s)
 		} else if s.Windows != nil {
 		} else if s.Windows != nil {
-			s.Process.Env = config.Env
-			s.Process.Args = append(config.Entrypoint, config.Cmd...)
+			s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, config.Env)
+			cmd := config.Cmd
+			if len(args) > 0 {
+				cmd = args
+			}
+			s.Process.Args = append(config.Entrypoint, cmd...)
+
+			s.Process.Cwd = config.WorkingDir
 			s.Process.User = specs.User{
 			s.Process.User = specs.User{
 				Username: config.User,
 				Username: config.User,
 			}
 			}
@@ -1026,3 +1034,46 @@ func WithWindowsHyperV(_ context.Context, _ Client, _ *containers.Container, s *
 	}
 	}
 	return nil
 	return nil
 }
 }
+
+// WithMemoryLimit sets the `Linux.LinuxResources.Memory.Limit` section to the
+// `limit` specified if the `Linux` section is not `nil`. Additionally sets the
+// `Windows.WindowsResources.Memory.Limit` section if the `Windows` section is
+// not `nil`.
+func WithMemoryLimit(limit uint64) SpecOpts {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+		if s.Linux != nil {
+			if s.Linux.Resources == nil {
+				s.Linux.Resources = &specs.LinuxResources{}
+			}
+			if s.Linux.Resources.Memory == nil {
+				s.Linux.Resources.Memory = &specs.LinuxMemory{}
+			}
+			l := int64(limit)
+			s.Linux.Resources.Memory.Limit = &l
+		}
+		if s.Windows != nil {
+			if s.Windows.Resources == nil {
+				s.Windows.Resources = &specs.WindowsResources{}
+			}
+			if s.Windows.Resources.Memory == nil {
+				s.Windows.Resources.Memory = &specs.WindowsMemoryResources{}
+			}
+			s.Windows.Resources.Memory.Limit = &limit
+		}
+		return nil
+	}
+}
+
+// WithAnnotations appends or replaces the annotations on the spec with the
+// provided annotations
+func WithAnnotations(annotations map[string]string) SpecOpts {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+		if s.Annotations == nil {
+			s.Annotations = make(map[string]string)
+		}
+		for k, v := range annotations {
+			s.Annotations[k] = v
+		}
+		return nil
+	}
+}

+ 67 - 0
vendor/github.com/containerd/containerd/oci/spec_opts_windows.go

@@ -0,0 +1,67 @@
+// +build windows
+
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package oci
+
+import (
+	"context"
+
+	"github.com/containerd/containerd/containers"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// WithWindowsCPUCount sets the `Windows.Resources.CPU.Count` section to the
+// `count` specified.
+func WithWindowsCPUCount(count uint64) SpecOpts {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+		if s.Windows.Resources == nil {
+			s.Windows.Resources = &specs.WindowsResources{}
+		}
+		if s.Windows.Resources.CPU == nil {
+			s.Windows.Resources.CPU = &specs.WindowsCPUResources{}
+		}
+		s.Windows.Resources.CPU.Count = &count
+		return nil
+	}
+}
+
+// WithWindowsIgnoreFlushesDuringBoot sets `Windows.IgnoreFlushesDuringBoot`.
+func WithWindowsIgnoreFlushesDuringBoot() SpecOpts {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+		if s.Windows == nil {
+			s.Windows = &specs.Windows{}
+		}
+		s.Windows.IgnoreFlushesDuringBoot = true
+		return nil
+	}
+}
+
+// WithWindowNetworksAllowUnqualifiedDNSQuery sets `Windows.IgnoreFlushesDuringBoot`.
+func WithWindowNetworksAllowUnqualifiedDNSQuery() SpecOpts {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+		if s.Windows == nil {
+			s.Windows = &specs.Windows{}
+		}
+		if s.Windows.Network == nil {
+			s.Windows.Network = &specs.WindowsNetwork{}
+		}
+
+		s.Windows.Network.AllowUnqualifiedDNSQuery = true
+		return nil
+	}
+}

+ 9 - 0
vendor/github.com/containerd/containerd/plugin/plugin.go

@@ -75,6 +75,15 @@ const (
 	GCPlugin Type = "io.containerd.gc.v1"
 	GCPlugin Type = "io.containerd.gc.v1"
 )
 )
 
 
+const (
+	// RuntimeLinuxV1 is the legacy linux runtime
+	RuntimeLinuxV1 = "io.containerd.runtime.v1.linux"
+	// RuntimeRuncV1 is the runc runtime that supports a single container
+	RuntimeRuncV1 = "io.containerd.runc.v1"
+	// RuntimeRuncV2 is the runc runtime that supports multiple containers per shim
+	RuntimeRuncV2 = "io.containerd.runc.v2"
+)
+
 // Registration contains information for registering a plugin
 // Registration contains information for registering a plugin
 type Registration struct {
 type Registration struct {
 	// Type of the plugin
 	// Type of the plugin

+ 190 - 0
vendor/github.com/containerd/containerd/pull.go

@@ -0,0 +1,190 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package containerd
+
+import (
+	"context"
+
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/platforms"
+	"github.com/containerd/containerd/remotes"
+	"github.com/containerd/containerd/remotes/docker"
+	"github.com/containerd/containerd/remotes/docker/schema1"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+	"golang.org/x/sync/semaphore"
+)
+
+// Pull downloads the provided content into containerd's content store
+// and returns a platform specific image object
+func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (Image, error) {
+	pullCtx := defaultRemoteContext()
+	for _, o := range opts {
+		if err := o(c, pullCtx); err != nil {
+			return nil, err
+		}
+	}
+
+	if pullCtx.PlatformMatcher == nil {
+		if len(pullCtx.Platforms) > 1 {
+			return nil, errors.New("cannot pull multiplatform image locally, try Fetch")
+		} else if len(pullCtx.Platforms) == 0 {
+			pullCtx.PlatformMatcher = platforms.Default()
+		} else {
+			p, err := platforms.Parse(pullCtx.Platforms[0])
+			if err != nil {
+				return nil, errors.Wrapf(err, "invalid platform %s", pullCtx.Platforms[0])
+			}
+
+			pullCtx.PlatformMatcher = platforms.Only(p)
+		}
+	}
+
+	ctx, done, err := c.WithLease(ctx)
+	if err != nil {
+		return nil, err
+	}
+	defer done(ctx)
+
+	img, err := c.fetch(ctx, pullCtx, ref, 1)
+	if err != nil {
+		return nil, err
+	}
+
+	i := NewImageWithPlatform(c, img, pullCtx.PlatformMatcher)
+
+	if pullCtx.Unpack {
+		if err := i.Unpack(ctx, pullCtx.Snapshotter); err != nil {
+			return nil, errors.Wrapf(err, "failed to unpack image on snapshotter %s", pullCtx.Snapshotter)
+		}
+	}
+
+	return i, nil
+}
+
+func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, limit int) (images.Image, error) {
+	store := c.ContentStore()
+	name, desc, err := rCtx.Resolver.Resolve(ctx, ref)
+	if err != nil {
+		return images.Image{}, errors.Wrapf(err, "failed to resolve reference %q", ref)
+	}
+
+	fetcher, err := rCtx.Resolver.Fetcher(ctx, name)
+	if err != nil {
+		return images.Image{}, errors.Wrapf(err, "failed to get fetcher for %q", name)
+	}
+
+	var (
+		handler images.Handler
+
+		isConvertible bool
+		converterFunc func(context.Context, ocispec.Descriptor) (ocispec.Descriptor, error)
+		limiter       *semaphore.Weighted
+	)
+
+	if desc.MediaType == images.MediaTypeDockerSchema1Manifest && rCtx.ConvertSchema1 {
+		schema1Converter := schema1.NewConverter(store, fetcher)
+
+		handler = images.Handlers(append(rCtx.BaseHandlers, schema1Converter)...)
+
+		isConvertible = true
+
+		converterFunc = func(ctx context.Context, _ ocispec.Descriptor) (ocispec.Descriptor, error) {
+			return schema1Converter.Convert(ctx)
+		}
+	} else {
+		// Get all the children for a descriptor
+		childrenHandler := images.ChildrenHandler(store)
+		// Set any children labels for that content
+		childrenHandler = images.SetChildrenLabels(store, childrenHandler)
+		// Filter children by platforms
+		childrenHandler = images.FilterPlatforms(childrenHandler, rCtx.PlatformMatcher)
+		// Sort and limit manifests if a finite number is needed
+		if limit > 0 {
+			childrenHandler = images.LimitManifests(childrenHandler, rCtx.PlatformMatcher, limit)
+		}
+
+		// set isConvertible to true if there is application/octet-stream media type
+		convertibleHandler := images.HandlerFunc(
+			func(_ context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+				if desc.MediaType == docker.LegacyConfigMediaType {
+					isConvertible = true
+				}
+
+				return []ocispec.Descriptor{}, nil
+			},
+		)
+
+		handler = images.Handlers(append(rCtx.BaseHandlers,
+			remotes.FetchHandler(store, fetcher),
+			convertibleHandler,
+			childrenHandler,
+		)...)
+
+		converterFunc = func(ctx context.Context, desc ocispec.Descriptor) (ocispec.Descriptor, error) {
+			return docker.ConvertManifest(ctx, store, desc)
+		}
+	}
+
+	if rCtx.HandlerWrapper != nil {
+		handler = rCtx.HandlerWrapper(handler)
+	}
+
+	if rCtx.MaxConcurrentDownloads > 0 {
+		limiter = semaphore.NewWeighted(int64(rCtx.MaxConcurrentDownloads))
+	}
+	if err := images.Dispatch(ctx, handler, limiter, desc); err != nil {
+		return images.Image{}, err
+	}
+
+	if isConvertible {
+		if desc, err = converterFunc(ctx, desc); err != nil {
+			return images.Image{}, err
+		}
+	}
+
+	img := images.Image{
+		Name:   name,
+		Target: desc,
+		Labels: rCtx.Labels,
+	}
+
+	is := c.ImageService()
+	for {
+		if created, err := is.Create(ctx, img); err != nil {
+			if !errdefs.IsAlreadyExists(err) {
+				return images.Image{}, err
+			}
+
+			updated, err := is.Update(ctx, img)
+			if err != nil {
+				// if image was removed, try create again
+				if errdefs.IsNotFound(err) {
+					continue
+				}
+				return images.Image{}, err
+			}
+
+			img = updated
+		} else {
+			img = created
+		}
+
+		return img, nil
+	}
+}

+ 6 - 2
vendor/github.com/containerd/containerd/remotes/docker/authorizer.go

@@ -81,7 +81,7 @@ func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.R
 			// TODO(dmcg): Store challenge, not token
 			// TODO(dmcg): Store challenge, not token
 			// Move token fetching to authorize
 			// Move token fetching to authorize
 			return a.setTokenAuth(ctx, host, c.parameters)
 			return a.setTokenAuth(ctx, host, c.parameters)
-		} else if c.scheme == basicAuth {
+		} else if c.scheme == basicAuth && a.credentials != nil {
 			// TODO: Resolve credentials on authorize
 			// TODO: Resolve credentials on authorize
 			username, secret, err := a.credentials(host)
 			username, secret, err := a.credentials(host)
 			if err != nil {
 			if err != nil {
@@ -194,7 +194,11 @@ func (a *dockerAuthorizer) fetchTokenWithOAuth(ctx context.Context, to tokenOpti
 		form.Set("password", to.secret)
 		form.Set("password", to.secret)
 	}
 	}
 
 
-	resp, err := ctxhttp.PostForm(ctx, a.client, to.realm, form)
+	resp, err := ctxhttp.Post(
+		ctx, a.client, to.realm,
+		"application/x-www-form-urlencoded; charset=utf-8",
+		strings.NewReader(form.Encode()),
+	)
 	if err != nil {
 	if err != nil {
 		return "", err
 		return "", err
 	}
 	}

+ 22 - 7
vendor/github.com/containerd/containerd/remotes/docker/resolver.go

@@ -29,6 +29,7 @@ import (
 	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/reference"
 	"github.com/containerd/containerd/reference"
 	"github.com/containerd/containerd/remotes"
 	"github.com/containerd/containerd/remotes"
+	"github.com/containerd/containerd/version"
 	digest "github.com/opencontainers/go-digest"
 	digest "github.com/opencontainers/go-digest"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
@@ -75,13 +76,16 @@ type ResolverOptions struct {
 
 
 	// Credentials provides username and secret given a host.
 	// Credentials provides username and secret given a host.
 	// If username is empty but a secret is given, that secret
 	// If username is empty but a secret is given, that secret
-	// is interpretted as a long lived token.
+	// is interpreted as a long lived token.
 	// Deprecated: use Authorizer
 	// Deprecated: use Authorizer
 	Credentials func(string) (string, string, error)
 	Credentials func(string) (string, string, error)
 
 
 	// Host provides the hostname given a namespace.
 	// Host provides the hostname given a namespace.
 	Host func(string) (string, error)
 	Host func(string) (string, error)
 
 
+	// Headers are the HTTP request header fields sent by the resolver
+	Headers http.Header
+
 	// PlainHTTP specifies to use plain http and not https
 	// PlainHTTP specifies to use plain http and not https
 	PlainHTTP bool
 	PlainHTTP bool
 
 
@@ -105,6 +109,7 @@ func DefaultHost(ns string) (string, error) {
 type dockerResolver struct {
 type dockerResolver struct {
 	auth      Authorizer
 	auth      Authorizer
 	host      func(string) (string, error)
 	host      func(string) (string, error)
+	headers   http.Header
 	plainHTTP bool
 	plainHTTP bool
 	client    *http.Client
 	client    *http.Client
 	tracker   StatusTracker
 	tracker   StatusTracker
@@ -118,12 +123,27 @@ func NewResolver(options ResolverOptions) remotes.Resolver {
 	if options.Host == nil {
 	if options.Host == nil {
 		options.Host = DefaultHost
 		options.Host = DefaultHost
 	}
 	}
+	if options.Headers == nil {
+		options.Headers = make(http.Header)
+	}
+	if _, ok := options.Headers["Accept"]; !ok {
+		// set headers for all the types we support for resolution.
+		options.Headers.Set("Accept", strings.Join([]string{
+			images.MediaTypeDockerSchema2Manifest,
+			images.MediaTypeDockerSchema2ManifestList,
+			ocispec.MediaTypeImageManifest,
+			ocispec.MediaTypeImageIndex, "*"}, ", "))
+	}
+	if _, ok := options.Headers["User-Agent"]; !ok {
+		options.Headers.Set("User-Agent", "containerd/"+version.Version)
+	}
 	if options.Authorizer == nil {
 	if options.Authorizer == nil {
 		options.Authorizer = NewAuthorizer(options.Client, options.Credentials)
 		options.Authorizer = NewAuthorizer(options.Client, options.Credentials)
 	}
 	}
 	return &dockerResolver{
 	return &dockerResolver{
 		auth:      options.Authorizer,
 		auth:      options.Authorizer,
 		host:      options.Host,
 		host:      options.Host,
+		headers:   options.Headers,
 		plainHTTP: options.PlainHTTP,
 		plainHTTP: options.PlainHTTP,
 		client:    options.Client,
 		client:    options.Client,
 		tracker:   options.Tracker,
 		tracker:   options.Tracker,
@@ -182,12 +202,7 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
 			return "", ocispec.Descriptor{}, err
 			return "", ocispec.Descriptor{}, err
 		}
 		}
 
 
-		// set headers for all the types we support for resolution.
-		req.Header.Set("Accept", strings.Join([]string{
-			images.MediaTypeDockerSchema2Manifest,
-			images.MediaTypeDockerSchema2ManifestList,
-			ocispec.MediaTypeImageManifest,
-			ocispec.MediaTypeImageIndex, "*"}, ", "))
+		req.Header = r.headers
 
 
 		log.G(ctx).Debug("resolving")
 		log.G(ctx).Debug("resolving")
 		resp, err := fetcher.doRequestWithRetries(ctx, req, nil)
 		resp, err := fetcher.doRequestWithRetries(ctx, req, nil)

+ 6 - 3
vendor/github.com/containerd/containerd/remotes/handlers.go

@@ -156,7 +156,7 @@ func push(ctx context.Context, provider content.Provider, pusher Pusher, desc oc
 //
 //
 // Base handlers can be provided which will be called before any push specific
 // Base handlers can be provided which will be called before any push specific
 // handlers.
 // handlers.
-func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, provider content.Provider, platform platforms.MatchComparer, baseHandlers ...images.Handler) error {
+func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, provider content.Provider, platform platforms.MatchComparer, wrapper func(h images.Handler) images.Handler) error {
 	var m sync.Mutex
 	var m sync.Mutex
 	manifestStack := []ocispec.Descriptor{}
 	manifestStack := []ocispec.Descriptor{}
 
 
@@ -175,13 +175,16 @@ func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, pr
 
 
 	pushHandler := PushHandler(pusher, provider)
 	pushHandler := PushHandler(pusher, provider)
 
 
-	handlers := append(baseHandlers,
+	var handler images.Handler = images.Handlers(
 		images.FilterPlatforms(images.ChildrenHandler(provider), platform),
 		images.FilterPlatforms(images.ChildrenHandler(provider), platform),
 		filterHandler,
 		filterHandler,
 		pushHandler,
 		pushHandler,
 	)
 	)
+	if wrapper != nil {
+		handler = wrapper(handler)
+	}
 
 
-	if err := images.Dispatch(ctx, images.Handlers(handlers...), desc); err != nil {
+	if err := images.Dispatch(ctx, handler, nil, desc); err != nil {
 		return err
 		return err
 	}
 	}
 
 

+ 203 - 35
vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.pb.go

@@ -60,6 +60,8 @@ type CreateOptions struct {
 	ShimCgroup          string   `protobuf:"bytes,9,opt,name=shim_cgroup,json=shimCgroup,proto3" json:"shim_cgroup,omitempty"`
 	ShimCgroup          string   `protobuf:"bytes,9,opt,name=shim_cgroup,json=shimCgroup,proto3" json:"shim_cgroup,omitempty"`
 	IoUid               uint32   `protobuf:"varint,10,opt,name=io_uid,json=ioUid,proto3" json:"io_uid,omitempty"`
 	IoUid               uint32   `protobuf:"varint,10,opt,name=io_uid,json=ioUid,proto3" json:"io_uid,omitempty"`
 	IoGid               uint32   `protobuf:"varint,11,opt,name=io_gid,json=ioGid,proto3" json:"io_gid,omitempty"`
 	IoGid               uint32   `protobuf:"varint,11,opt,name=io_gid,json=ioGid,proto3" json:"io_gid,omitempty"`
+	CriuWorkPath        string   `protobuf:"bytes,12,opt,name=criu_work_path,json=criuWorkPath,proto3" json:"criu_work_path,omitempty"`
+	CriuImagePath       string   `protobuf:"bytes,13,opt,name=criu_image_path,json=criuImagePath,proto3" json:"criu_image_path,omitempty"`
 }
 }
 
 
 func (m *CreateOptions) Reset()                    { *m = CreateOptions{} }
 func (m *CreateOptions) Reset()                    { *m = CreateOptions{} }
@@ -74,6 +76,8 @@ type CheckpointOptions struct {
 	FileLocks           bool     `protobuf:"varint,5,opt,name=file_locks,json=fileLocks,proto3" json:"file_locks,omitempty"`
 	FileLocks           bool     `protobuf:"varint,5,opt,name=file_locks,json=fileLocks,proto3" json:"file_locks,omitempty"`
 	EmptyNamespaces     []string `protobuf:"bytes,6,rep,name=empty_namespaces,json=emptyNamespaces" json:"empty_namespaces,omitempty"`
 	EmptyNamespaces     []string `protobuf:"bytes,6,rep,name=empty_namespaces,json=emptyNamespaces" json:"empty_namespaces,omitempty"`
 	CgroupsMode         string   `protobuf:"bytes,7,opt,name=cgroups_mode,json=cgroupsMode,proto3" json:"cgroups_mode,omitempty"`
 	CgroupsMode         string   `protobuf:"bytes,7,opt,name=cgroups_mode,json=cgroupsMode,proto3" json:"cgroups_mode,omitempty"`
+	WorkPath            string   `protobuf:"bytes,8,opt,name=work_path,json=workPath,proto3" json:"work_path,omitempty"`
+	ImagePath           string   `protobuf:"bytes,9,opt,name=image_path,json=imagePath,proto3" json:"image_path,omitempty"`
 }
 }
 
 
 func (m *CheckpointOptions) Reset()                    { *m = CheckpointOptions{} }
 func (m *CheckpointOptions) Reset()                    { *m = CheckpointOptions{} }
@@ -252,6 +256,18 @@ func (m *CreateOptions) MarshalTo(dAtA []byte) (int, error) {
 		i++
 		i++
 		i = encodeVarintRunc(dAtA, i, uint64(m.IoGid))
 		i = encodeVarintRunc(dAtA, i, uint64(m.IoGid))
 	}
 	}
+	if len(m.CriuWorkPath) > 0 {
+		dAtA[i] = 0x62
+		i++
+		i = encodeVarintRunc(dAtA, i, uint64(len(m.CriuWorkPath)))
+		i += copy(dAtA[i:], m.CriuWorkPath)
+	}
+	if len(m.CriuImagePath) > 0 {
+		dAtA[i] = 0x6a
+		i++
+		i = encodeVarintRunc(dAtA, i, uint64(len(m.CriuImagePath)))
+		i += copy(dAtA[i:], m.CriuImagePath)
+	}
 	return i, nil
 	return i, nil
 }
 }
 
 
@@ -341,6 +357,18 @@ func (m *CheckpointOptions) MarshalTo(dAtA []byte) (int, error) {
 		i = encodeVarintRunc(dAtA, i, uint64(len(m.CgroupsMode)))
 		i = encodeVarintRunc(dAtA, i, uint64(len(m.CgroupsMode)))
 		i += copy(dAtA[i:], m.CgroupsMode)
 		i += copy(dAtA[i:], m.CgroupsMode)
 	}
 	}
+	if len(m.WorkPath) > 0 {
+		dAtA[i] = 0x42
+		i++
+		i = encodeVarintRunc(dAtA, i, uint64(len(m.WorkPath)))
+		i += copy(dAtA[i:], m.WorkPath)
+	}
+	if len(m.ImagePath) > 0 {
+		dAtA[i] = 0x4a
+		i++
+		i = encodeVarintRunc(dAtA, i, uint64(len(m.ImagePath)))
+		i += copy(dAtA[i:], m.ImagePath)
+	}
 	return i, nil
 	return i, nil
 }
 }
 
 
@@ -439,6 +467,14 @@ func (m *CreateOptions) Size() (n int) {
 	if m.IoGid != 0 {
 	if m.IoGid != 0 {
 		n += 1 + sovRunc(uint64(m.IoGid))
 		n += 1 + sovRunc(uint64(m.IoGid))
 	}
 	}
+	l = len(m.CriuWorkPath)
+	if l > 0 {
+		n += 1 + l + sovRunc(uint64(l))
+	}
+	l = len(m.CriuImagePath)
+	if l > 0 {
+		n += 1 + l + sovRunc(uint64(l))
+	}
 	return n
 	return n
 }
 }
 
 
@@ -470,6 +506,14 @@ func (m *CheckpointOptions) Size() (n int) {
 	if l > 0 {
 	if l > 0 {
 		n += 1 + l + sovRunc(uint64(l))
 		n += 1 + l + sovRunc(uint64(l))
 	}
 	}
+	l = len(m.WorkPath)
+	if l > 0 {
+		n += 1 + l + sovRunc(uint64(l))
+	}
+	l = len(m.ImagePath)
+	if l > 0 {
+		n += 1 + l + sovRunc(uint64(l))
+	}
 	return n
 	return n
 }
 }
 
 
@@ -525,6 +569,8 @@ func (this *CreateOptions) String() string {
 		`ShimCgroup:` + fmt.Sprintf("%v", this.ShimCgroup) + `,`,
 		`ShimCgroup:` + fmt.Sprintf("%v", this.ShimCgroup) + `,`,
 		`IoUid:` + fmt.Sprintf("%v", this.IoUid) + `,`,
 		`IoUid:` + fmt.Sprintf("%v", this.IoUid) + `,`,
 		`IoGid:` + fmt.Sprintf("%v", this.IoGid) + `,`,
 		`IoGid:` + fmt.Sprintf("%v", this.IoGid) + `,`,
+		`CriuWorkPath:` + fmt.Sprintf("%v", this.CriuWorkPath) + `,`,
+		`CriuImagePath:` + fmt.Sprintf("%v", this.CriuImagePath) + `,`,
 		`}`,
 		`}`,
 	}, "")
 	}, "")
 	return s
 	return s
@@ -541,6 +587,8 @@ func (this *CheckpointOptions) String() string {
 		`FileLocks:` + fmt.Sprintf("%v", this.FileLocks) + `,`,
 		`FileLocks:` + fmt.Sprintf("%v", this.FileLocks) + `,`,
 		`EmptyNamespaces:` + fmt.Sprintf("%v", this.EmptyNamespaces) + `,`,
 		`EmptyNamespaces:` + fmt.Sprintf("%v", this.EmptyNamespaces) + `,`,
 		`CgroupsMode:` + fmt.Sprintf("%v", this.CgroupsMode) + `,`,
 		`CgroupsMode:` + fmt.Sprintf("%v", this.CgroupsMode) + `,`,
+		`WorkPath:` + fmt.Sprintf("%v", this.WorkPath) + `,`,
+		`ImagePath:` + fmt.Sprintf("%v", this.ImagePath) + `,`,
 		`}`,
 		`}`,
 	}, "")
 	}, "")
 	return s
 	return s
@@ -994,6 +1042,64 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error {
 					break
 					break
 				}
 				}
 			}
 			}
+		case 12:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CriuWorkPath", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRunc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.CriuWorkPath = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 13:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CriuImagePath", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRunc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.CriuImagePath = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
 		default:
 		default:
 			iNdEx = preIndex
 			iNdEx = preIndex
 			skippy, err := skipRunc(dAtA[iNdEx:])
 			skippy, err := skipRunc(dAtA[iNdEx:])
@@ -1202,6 +1308,64 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
 			}
 			}
 			m.CgroupsMode = string(dAtA[iNdEx:postIndex])
 			m.CgroupsMode = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field WorkPath", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRunc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.WorkPath = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ImagePath", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRunc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ImagePath = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
 		default:
 		default:
 			iNdEx = preIndex
 			iNdEx = preIndex
 			skippy, err := skipRunc(dAtA[iNdEx:])
 			skippy, err := skipRunc(dAtA[iNdEx:])
@@ -1412,39 +1576,43 @@ func init() {
 }
 }
 
 
 var fileDescriptorRunc = []byte{
 var fileDescriptorRunc = []byte{
-	// 541 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x93, 0xc1, 0x6e, 0xd3, 0x40,
-	0x10, 0x86, 0x6b, 0xda, 0x26, 0xce, 0xa4, 0x29, 0xb0, 0x50, 0xc9, 0x14, 0x91, 0x86, 0x00, 0x52,
-	0xb8, 0xa4, 0x12, 0x88, 0x13, 0xb7, 0xa6, 0x08, 0x55, 0x40, 0xa9, 0x0c, 0x95, 0x10, 0x42, 0x5a,
-	0xb9, 0xeb, 0x21, 0x59, 0xc5, 0xde, 0x59, 0x79, 0xd7, 0xd4, 0xb9, 0xf5, 0x09, 0x78, 0xae, 0x1e,
-	0x39, 0x72, 0x42, 0x34, 0x2f, 0x02, 0xf2, 0xda, 0x0e, 0x9c, 0x39, 0x72, 0xfb, 0xe7, 0xfb, 0xc7,
-	0x9e, 0xd1, 0xbf, 0x1a, 0x98, 0x4c, 0xa5, 0x9d, 0xe5, 0x67, 0x63, 0x41, 0xe9, 0xbe, 0x20, 0x65,
-	0x23, 0xa9, 0x30, 0x8b, 0xff, 0x96, 0x59, 0xae, 0xac, 0x4c, 0x71, 0x3f, 0x91, 0x2a, 0x2f, 0xca,
-	0x4a, 0xd8, 0x85, 0x46, 0xe3, 0xd4, 0x58, 0x67, 0x64, 0x89, 0xed, 0xfc, 0x69, 0x1f, 0xbb, 0xb6,
-	0x71, 0x69, 0xee, 0xde, 0x9e, 0xd2, 0x94, 0x5c, 0xc7, 0x7e, 0xa9, 0xaa, 0xe6, 0xe1, 0x57, 0x0f,
-	0xba, 0x61, 0xae, 0xc4, 0x5b, 0x6d, 0x25, 0x29, 0xc3, 0x02, 0x68, 0xd7, 0x23, 0x02, 0x6f, 0xe0,
-	0x8d, 0x3a, 0x61, 0x53, 0xb2, 0xfb, 0xb0, 0x55, 0x4b, 0x9e, 0x11, 0xd9, 0xe0, 0x9a, 0xb3, 0xbb,
-	0x35, 0x0b, 0x89, 0x2c, 0xbb, 0x0b, 0x1d, 0x91, 0xc9, 0x9c, 0xeb, 0xc8, 0xce, 0x82, 0x75, 0xe7,
-	0xfb, 0x25, 0x38, 0x89, 0xec, 0x8c, 0x3d, 0x82, 0x6d, 0xb3, 0x30, 0x16, 0xd3, 0x98, 0x8b, 0x69,
-	0x46, 0xb9, 0x0e, 0x36, 0x06, 0xde, 0xc8, 0x0f, 0x7b, 0x35, 0x9d, 0x38, 0x38, 0xbc, 0x58, 0x87,
-	0xde, 0x24, 0xc3, 0xc8, 0x62, 0xb3, 0xd2, 0x10, 0x7a, 0x8a, 0xb8, 0x96, 0x5f, 0xc8, 0x56, 0x93,
-	0x3d, 0xf7, 0x5d, 0x57, 0xd1, 0x49, 0xc9, 0xdc, 0xe4, 0x3b, 0xe0, 0x93, 0x46, 0xc5, 0xad, 0xd0,
-	0x6e, 0x31, 0x3f, 0x6c, 0x97, 0xf5, 0x7b, 0xa1, 0xd9, 0x13, 0xd8, 0xc1, 0xc2, 0x62, 0xa6, 0xa2,
-	0x84, 0xe7, 0x4a, 0x16, 0xdc, 0x90, 0x98, 0xa3, 0x35, 0x6e, 0x41, 0x3f, 0xbc, 0xd5, 0x98, 0xa7,
-	0x4a, 0x16, 0xef, 0x2a, 0x8b, 0xed, 0x82, 0x6f, 0x31, 0x4b, 0xa5, 0x8a, 0x92, 0x7a, 0xcb, 0x55,
-	0xcd, 0xee, 0x01, 0x7c, 0x96, 0x09, 0xf2, 0x84, 0xc4, 0xdc, 0x04, 0x9b, 0xce, 0xed, 0x94, 0xe4,
-	0x75, 0x09, 0xd8, 0x63, 0xb8, 0x81, 0xa9, 0xb6, 0x0b, 0xae, 0xa2, 0x14, 0x8d, 0x8e, 0x04, 0x9a,
-	0xa0, 0x35, 0x58, 0x1f, 0x75, 0xc2, 0xeb, 0x8e, 0x1f, 0xaf, 0x70, 0x99, 0x68, 0x95, 0x84, 0xe1,
-	0x29, 0xc5, 0x18, 0xb4, 0xab, 0x44, 0x6b, 0xf6, 0x86, 0x62, 0x64, 0x0f, 0x61, 0x5b, 0x11, 0x57,
-	0x78, 0xce, 0xe7, 0xb8, 0xc8, 0xa4, 0x9a, 0x06, 0xbe, 0x1b, 0xb8, 0xa5, 0xe8, 0x18, 0xcf, 0x5f,
-	0x55, 0x8c, 0xed, 0x41, 0xd7, 0xcc, 0x64, 0xda, 0xe4, 0xda, 0x71, 0xff, 0x81, 0x12, 0x55, 0xa1,
-	0xb2, 0x1d, 0x68, 0x49, 0xe2, 0xb9, 0x8c, 0x03, 0x18, 0x78, 0xa3, 0x5e, 0xb8, 0x29, 0xe9, 0x54,
-	0xc6, 0x35, 0x9e, 0xca, 0x38, 0xe8, 0x36, 0xf8, 0xa5, 0x8c, 0x87, 0xbf, 0x3c, 0xb8, 0x39, 0x99,
-	0xa1, 0x98, 0x6b, 0x92, 0xca, 0x36, 0xcf, 0xc0, 0x60, 0x03, 0x0b, 0xd9, 0xa4, 0xef, 0xf4, 0xff,
-	0x1a, 0xfb, 0xf0, 0x19, 0x6c, 0x9f, 0x64, 0x24, 0xd0, 0x98, 0x43, 0xb4, 0x91, 0x4c, 0x0c, 0x7b,
-	0x00, 0x6d, 0x2c, 0x50, 0x70, 0x19, 0x57, 0x77, 0x71, 0x00, 0xcb, 0x1f, 0x7b, 0xad, 0x17, 0x05,
-	0x8a, 0xa3, 0xc3, 0xb0, 0x55, 0x5a, 0x47, 0xf1, 0xc1, 0xa7, 0xcb, 0xab, 0xfe, 0xda, 0xf7, 0xab,
-	0xfe, 0xda, 0xc5, 0xb2, 0xef, 0x5d, 0x2e, 0xfb, 0xde, 0xb7, 0x65, 0xdf, 0xfb, 0xb9, 0xec, 0x7b,
-	0x1f, 0x0f, 0xfe, 0xf5, 0xb0, 0x9f, 0xaf, 0xd4, 0x87, 0xb5, 0xb3, 0x96, 0xbb, 0xd9, 0xa7, 0xbf,
-	0x03, 0x00, 0x00, 0xff, 0xff, 0x18, 0xa1, 0x4b, 0x5b, 0x27, 0x04, 0x00, 0x00,
+	// 604 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x94, 0xcf, 0x6e, 0xd3, 0x40,
+	0x10, 0xc6, 0xeb, 0xfe, 0x49, 0x9c, 0x49, 0xd2, 0xc2, 0x42, 0x25, 0xd3, 0xaa, 0x69, 0x08, 0x7f,
+	0x14, 0x2e, 0xa9, 0x04, 0xe2, 0xc4, 0xad, 0x29, 0x42, 0x15, 0x50, 0x2a, 0x43, 0x05, 0x42, 0x48,
+	0x2b, 0x77, 0x3d, 0x24, 0xab, 0xc4, 0x3b, 0x96, 0x77, 0x4d, 0x92, 0x1b, 0x4f, 0xc0, 0x0b, 0xf1,
+	0x02, 0x3d, 0x21, 0x8e, 0x9c, 0x10, 0xcd, 0x93, 0xa0, 0x5d, 0xc7, 0x69, 0xcf, 0x1c, 0xb9, 0xcd,
+	0xfc, 0xe6, 0xb3, 0x67, 0xf4, 0x7d, 0xb2, 0xa1, 0x3f, 0x90, 0x66, 0x98, 0x9f, 0xf7, 0x04, 0x25,
+	0x07, 0x82, 0x94, 0x89, 0xa4, 0xc2, 0x2c, 0xbe, 0x5e, 0x66, 0xb9, 0x32, 0x32, 0xc1, 0x83, 0xb1,
+	0x54, 0xf9, 0xd4, 0x76, 0xc2, 0xcc, 0x52, 0xd4, 0xae, 0xea, 0xa5, 0x19, 0x19, 0x62, 0xdb, 0x57,
+	0xf2, 0x9e, 0x93, 0xf5, 0xec, 0x70, 0xe7, 0xf6, 0x80, 0x06, 0xe4, 0x14, 0x07, 0xb6, 0x2a, 0xc4,
+	0x9d, 0x6f, 0x1e, 0xd4, 0xc3, 0x5c, 0x89, 0x37, 0xa9, 0x91, 0xa4, 0x34, 0x0b, 0xa0, 0xba, 0x58,
+	0x11, 0x78, 0x6d, 0xaf, 0x5b, 0x0b, 0xcb, 0x96, 0xdd, 0x85, 0xc6, 0xa2, 0xe4, 0x19, 0x91, 0x09,
+	0x56, 0xdd, 0xb8, 0xbe, 0x60, 0x21, 0x91, 0x61, 0xbb, 0x50, 0x13, 0x99, 0xcc, 0x79, 0x1a, 0x99,
+	0x61, 0xb0, 0xe6, 0xe6, 0xbe, 0x05, 0xa7, 0x91, 0x19, 0xb2, 0x07, 0xb0, 0xa9, 0x67, 0xda, 0x60,
+	0x12, 0x73, 0x31, 0xc8, 0x28, 0x4f, 0x83, 0xf5, 0xb6, 0xd7, 0xf5, 0xc3, 0xe6, 0x82, 0xf6, 0x1d,
+	0xec, 0xfc, 0x58, 0x83, 0x66, 0x3f, 0xc3, 0xc8, 0x60, 0x79, 0x52, 0x07, 0x9a, 0x8a, 0x78, 0x2a,
+	0xbf, 0x90, 0x29, 0x36, 0x7b, 0xee, 0xb9, 0xba, 0xa2, 0x53, 0xcb, 0xdc, 0xe6, 0x3b, 0xe0, 0x53,
+	0x8a, 0x8a, 0x1b, 0x91, 0xba, 0xc3, 0xfc, 0xb0, 0x6a, 0xfb, 0x77, 0x22, 0x65, 0x8f, 0x61, 0x1b,
+	0xa7, 0x06, 0x33, 0x15, 0x8d, 0x79, 0xae, 0xe4, 0x94, 0x6b, 0x12, 0x23, 0x34, 0xda, 0x1d, 0xe8,
+	0x87, 0xb7, 0xca, 0xe1, 0x99, 0x92, 0xd3, 0xb7, 0xc5, 0x88, 0xed, 0x80, 0x6f, 0x30, 0x4b, 0xa4,
+	0x8a, 0xc6, 0x8b, 0x2b, 0x97, 0x3d, 0xdb, 0x03, 0xf8, 0x2c, 0xc7, 0xc8, 0xc7, 0x24, 0x46, 0x3a,
+	0xd8, 0x70, 0xd3, 0x9a, 0x25, 0xaf, 0x2c, 0x60, 0x8f, 0xe0, 0x06, 0x26, 0xa9, 0x99, 0x71, 0x15,
+	0x25, 0xa8, 0xd3, 0x48, 0xa0, 0x0e, 0x2a, 0xed, 0xb5, 0x6e, 0x2d, 0xdc, 0x72, 0xfc, 0x64, 0x89,
+	0xad, 0xa3, 0x85, 0x13, 0x9a, 0x27, 0x14, 0x63, 0x50, 0x2d, 0x1c, 0x5d, 0xb0, 0xd7, 0x14, 0x23,
+	0xbb, 0x0f, 0x9b, 0x8a, 0xb8, 0xc2, 0x09, 0x1f, 0xe1, 0x2c, 0x93, 0x6a, 0x10, 0xf8, 0x6e, 0x61,
+	0x43, 0xd1, 0x09, 0x4e, 0x5e, 0x16, 0x8c, 0xed, 0x43, 0x5d, 0x0f, 0x65, 0x52, 0xfa, 0x5a, 0x73,
+	0xef, 0x01, 0x8b, 0x0a, 0x53, 0xd9, 0x36, 0x54, 0x24, 0xf1, 0x5c, 0xc6, 0x01, 0xb4, 0xbd, 0x6e,
+	0x33, 0xdc, 0x90, 0x74, 0x26, 0xe3, 0x05, 0x1e, 0xc8, 0x38, 0xa8, 0x97, 0xf8, 0x85, 0x8c, 0xed,
+	0x52, 0x17, 0xe3, 0x84, 0xb2, 0x51, 0x91, 0x65, 0xc3, 0xbd, 0xb1, 0x61, 0xe9, 0x7b, 0xca, 0x46,
+	0x2e, 0xcf, 0x87, 0xb0, 0xe5, 0x54, 0x32, 0x89, 0x06, 0x58, 0xc8, 0x9a, 0x4e, 0xd6, 0xb4, 0xf8,
+	0xd8, 0x52, 0xab, 0xeb, 0x7c, 0x5f, 0x85, 0x9b, 0xfd, 0x21, 0x8a, 0x51, 0x4a, 0x52, 0x99, 0x32,
+	0x54, 0x06, 0xeb, 0x38, 0x95, 0x65, 0x96, 0xae, 0xfe, 0x6f, 0x43, 0xdc, 0x85, 0xda, 0x95, 0x95,
+	0x7e, 0xf1, 0x59, 0x4c, 0x4a, 0x1b, 0xf7, 0x00, 0xae, 0x39, 0x58, 0x44, 0x57, 0x93, 0x4b, 0xf7,
+	0x9e, 0xc2, 0xe6, 0x69, 0x46, 0x02, 0xb5, 0x3e, 0x42, 0x13, 0xc9, 0xb1, 0x66, 0xf7, 0xa0, 0x8a,
+	0x53, 0x14, 0x5c, 0xc6, 0xc5, 0x17, 0x7a, 0x08, 0xf3, 0xdf, 0xfb, 0x95, 0xe7, 0x53, 0x14, 0xc7,
+	0x47, 0x61, 0xc5, 0x8e, 0x8e, 0xe3, 0xc3, 0x4f, 0x17, 0x97, 0xad, 0x95, 0x5f, 0x97, 0xad, 0x95,
+	0xaf, 0xf3, 0x96, 0x77, 0x31, 0x6f, 0x79, 0x3f, 0xe7, 0x2d, 0xef, 0xcf, 0xbc, 0xe5, 0x7d, 0x3c,
+	0xfc, 0xd7, 0x5f, 0xcc, 0xb3, 0x65, 0xf5, 0x61, 0xe5, 0xbc, 0xe2, 0xfe, 0x1e, 0x4f, 0xfe, 0x06,
+	0x00, 0x00, 0xff, 0xff, 0x7f, 0x24, 0x6f, 0x2e, 0xb1, 0x04, 0x00, 0x00,
 }
 }

+ 4 - 0
vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.proto

@@ -25,6 +25,8 @@ message CreateOptions {
 	string shim_cgroup = 9;
 	string shim_cgroup = 9;
 	uint32 io_uid = 10;
 	uint32 io_uid = 10;
 	uint32 io_gid = 11;
 	uint32 io_gid = 11;
+	string criu_work_path = 12;
+	string criu_image_path = 13;
 }
 }
 
 
 message CheckpointOptions {
 message CheckpointOptions {
@@ -35,6 +37,8 @@ message CheckpointOptions {
 	bool file_locks = 5;
 	bool file_locks = 5;
 	repeated string empty_namespaces = 6;
 	repeated string empty_namespaces = 6;
 	string cgroups_mode = 7;
 	string cgroups_mode = 7;
+	string work_path = 8;
+	string image_path = 9;
 }
 }
 
 
 message ProcessDetails {
 message ProcessDetails {

+ 14 - 3
vendor/github.com/containerd/containerd/runtime/v1/linux/bundle.go

@@ -20,6 +20,7 @@ package linux
 
 
 import (
 import (
 	"context"
 	"context"
+	"fmt"
 	"io/ioutil"
 	"io/ioutil"
 	"os"
 	"os"
 	"path/filepath"
 	"path/filepath"
@@ -114,12 +115,12 @@ func (b *bundle) NewShimClient(ctx context.Context, namespace string, getClientO
 
 
 // Delete deletes the bundle from disk
 // Delete deletes the bundle from disk
 func (b *bundle) Delete() error {
 func (b *bundle) Delete() error {
-	err := os.RemoveAll(b.path)
+	err := atomicDelete(b.path)
 	if err == nil {
 	if err == nil {
-		return os.RemoveAll(b.workDir)
+		return atomicDelete(b.workDir)
 	}
 	}
 	// error removing the bundle path; still attempt removing work dir
 	// error removing the bundle path; still attempt removing work dir
-	err2 := os.RemoveAll(b.workDir)
+	err2 := atomicDelete(b.workDir)
 	if err2 == nil {
 	if err2 == nil {
 		return err
 		return err
 	}
 	}
@@ -152,3 +153,13 @@ func (b *bundle) shimConfig(namespace string, c *Config, runcOptions *runctypes.
 		SystemdCgroup: systemdCgroup,
 		SystemdCgroup: systemdCgroup,
 	}
 	}
 }
 }
+
+// atomicDelete renames the path to a hidden file before removal
+func atomicDelete(path string) error {
+	// create a hidden dir for an atomic removal
+	atomicPath := filepath.Join(filepath.Dir(path), fmt.Sprintf(".%s", filepath.Base(path)))
+	if err := os.Rename(path, atomicPath); err != nil {
+		return err
+	}
+	return os.RemoveAll(atomicPath)
+}

+ 8 - 3
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init.go

@@ -76,6 +76,7 @@ type Init struct {
 	IoGID        int
 	IoGID        int
 	NoPivotRoot  bool
 	NoPivotRoot  bool
 	NoNewKeyring bool
 	NoNewKeyring bool
+	CriuWorkPath string
 }
 }
 
 
 // NewRunc returns a new runc instance for a process
 // NewRunc returns a new runc instance for a process
@@ -132,7 +133,7 @@ func (p *Init) Create(ctx context.Context, r *CreateConfig) error {
 		opts := &runc.RestoreOpts{
 		opts := &runc.RestoreOpts{
 			CheckpointOpts: runc.CheckpointOpts{
 			CheckpointOpts: runc.CheckpointOpts{
 				ImagePath:  r.Checkpoint,
 				ImagePath:  r.Checkpoint,
-				WorkDir:    p.WorkDir,
+				WorkDir:    p.CriuWorkPath,
 				ParentPath: r.ParentCheckpoint,
 				ParentPath: r.ParentCheckpoint,
 			},
 			},
 			PidFile:     pidFile,
 			PidFile:     pidFile,
@@ -425,8 +426,12 @@ func (p *Init) checkpoint(ctx context.Context, r *CheckpointConfig) error {
 	if !r.Exit {
 	if !r.Exit {
 		actions = append(actions, runc.LeaveRunning)
 		actions = append(actions, runc.LeaveRunning)
 	}
 	}
-	work := filepath.Join(p.WorkDir, "criu-work")
-	defer os.RemoveAll(work)
+	// keep criu work directory if criu work dir is set
+	work := r.WorkDir
+	if work == "" {
+		work = filepath.Join(p.WorkDir, "criu-work")
+		defer os.RemoveAll(work)
+	}
 	if err := p.runtime.Checkpoint(ctx, p.id, &runc.CheckpointOpts{
 	if err := p.runtime.Checkpoint(ctx, p.id, &runc.CheckpointOpts{
 		WorkDir:                  work,
 		WorkDir:                  work,
 		ImagePath:                r.Path,
 		ImagePath:                r.Path,

+ 1 - 0
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/types.go

@@ -55,6 +55,7 @@ type ExecConfig struct {
 
 
 // CheckpointConfig holds task checkpoint configuration
 // CheckpointConfig holds task checkpoint configuration
 type CheckpointConfig struct {
 type CheckpointConfig struct {
+	WorkDir                  string
 	Path                     string
 	Path                     string
 	Exit                     bool
 	Exit                     bool
 	AllowOpenTCP             bool
 	AllowOpenTCP             bool

+ 30 - 0
vendor/github.com/containerd/containerd/runtime/v1/linux/runtime.go

@@ -21,6 +21,7 @@ package linux
 import (
 import (
 	"context"
 	"context"
 	"fmt"
 	"fmt"
+	"io"
 	"io/ioutil"
 	"io/ioutil"
 	"os"
 	"os"
 	"path/filepath"
 	"path/filepath"
@@ -40,6 +41,7 @@ import (
 	"github.com/containerd/containerd/plugin"
 	"github.com/containerd/containerd/plugin"
 	"github.com/containerd/containerd/runtime"
 	"github.com/containerd/containerd/runtime"
 	"github.com/containerd/containerd/runtime/linux/runctypes"
 	"github.com/containerd/containerd/runtime/linux/runctypes"
+	"github.com/containerd/containerd/runtime/v1"
 	"github.com/containerd/containerd/runtime/v1/linux/proc"
 	"github.com/containerd/containerd/runtime/v1/linux/proc"
 	shim "github.com/containerd/containerd/runtime/v1/shim/v1"
 	shim "github.com/containerd/containerd/runtime/v1/shim/v1"
 	runc "github.com/containerd/go-runc"
 	runc "github.com/containerd/go-runc"
@@ -288,6 +290,10 @@ func (r *Runtime) restoreTasks(ctx context.Context) ([]*Task, error) {
 			continue
 			continue
 		}
 		}
 		name := namespace.Name()
 		name := namespace.Name()
+		// skip hidden directories
+		if len(name) > 0 && name[0] == '.' {
+			continue
+		}
 		log.G(ctx).WithField("namespace", name).Debug("loading tasks in namespace")
 		log.G(ctx).WithField("namespace", name).Debug("loading tasks in namespace")
 		tasks, err := r.loadTasks(ctx, name)
 		tasks, err := r.loadTasks(ctx, name)
 		if err != nil {
 		if err != nil {
@@ -351,6 +357,30 @@ func (r *Runtime) loadTasks(ctx context.Context, ns string) ([]*Task, error) {
 			continue
 			continue
 		}
 		}
 
 
+		logDirPath := filepath.Join(r.root, ns, id)
+
+		shimStdoutLog, err := v1.OpenShimStdoutLog(ctx, logDirPath)
+		if err != nil {
+			log.G(ctx).WithError(err).WithFields(logrus.Fields{
+				"id":         id,
+				"namespace":  ns,
+				"logDirPath": logDirPath,
+			}).Error("opening shim stdout log pipe")
+			continue
+		}
+		go io.Copy(os.Stdout, shimStdoutLog)
+
+		shimStderrLog, err := v1.OpenShimStderrLog(ctx, logDirPath)
+		if err != nil {
+			log.G(ctx).WithError(err).WithFields(logrus.Fields{
+				"id":         id,
+				"namespace":  ns,
+				"logDirPath": logDirPath,
+			}).Error("opening shim stderr log pipe")
+			continue
+		}
+		go io.Copy(os.Stderr, shimStderrLog)
+
 		t, err := newTask(id, ns, pid, s, r.events, r.tasks, bundle)
 		t, err := newTask(id, ns, pid, s, r.events, r.tasks, bundle)
 		if err != nil {
 		if err != nil {
 			log.G(ctx).WithError(err).Error("loading task type")
 			log.G(ctx).WithError(err).Error("loading task type")

+ 38 - 0
vendor/github.com/containerd/containerd/runtime/v1/shim.go

@@ -0,0 +1,38 @@
+// +build !windows
+
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package v1
+
+import (
+	"context"
+	"io"
+	"path/filepath"
+
+	"github.com/containerd/fifo"
+	"golang.org/x/sys/unix"
+)
+
+// OpenShimStdoutLog opens the shim log for reading
+func OpenShimStdoutLog(ctx context.Context, logDirPath string) (io.ReadWriteCloser, error) {
+	return fifo.OpenFifo(ctx, filepath.Join(logDirPath, "shim.stdout.log"), unix.O_RDWR|unix.O_CREAT, 0700)
+}
+
+// OpenShimStderrLog opens the shim log
+func OpenShimStderrLog(ctx context.Context, logDirPath string) (io.ReadWriteCloser, error) {
+	return fifo.OpenFifo(ctx, filepath.Join(logDirPath, "shim.stderr.log"), unix.O_RDWR|unix.O_CREAT, 0700)
+}

+ 28 - 6
vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go

@@ -37,6 +37,7 @@ import (
 
 
 	"github.com/containerd/containerd/events"
 	"github.com/containerd/containerd/events"
 	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/log"
+	v1 "github.com/containerd/containerd/runtime/v1"
 	"github.com/containerd/containerd/runtime/v1/shim"
 	"github.com/containerd/containerd/runtime/v1/shim"
 	shimapi "github.com/containerd/containerd/runtime/v1/shim/v1"
 	shimapi "github.com/containerd/containerd/runtime/v1/shim/v1"
 	"github.com/containerd/containerd/sys"
 	"github.com/containerd/containerd/sys"
@@ -62,7 +63,24 @@ func WithStart(binary, address, daemonAddress, cgroup string, debug bool, exitHa
 		}
 		}
 		defer f.Close()
 		defer f.Close()
 
 
-		cmd, err := newCommand(binary, daemonAddress, debug, config, f)
+		var stdoutLog io.ReadWriteCloser
+		var stderrLog io.ReadWriteCloser
+		if debug {
+			stdoutLog, err = v1.OpenShimStdoutLog(ctx, config.WorkDir)
+			if err != nil {
+				return nil, nil, errors.Wrapf(err, "failed to create stdout log")
+			}
+
+			stderrLog, err = v1.OpenShimStderrLog(ctx, config.WorkDir)
+			if err != nil {
+				return nil, nil, errors.Wrapf(err, "failed to create stderr log")
+			}
+
+			go io.Copy(os.Stdout, stdoutLog)
+			go io.Copy(os.Stderr, stderrLog)
+		}
+
+		cmd, err := newCommand(binary, daemonAddress, debug, config, f, stdoutLog, stderrLog)
 		if err != nil {
 		if err != nil {
 			return nil, nil, err
 			return nil, nil, err
 		}
 		}
@@ -77,6 +95,12 @@ func WithStart(binary, address, daemonAddress, cgroup string, debug bool, exitHa
 		go func() {
 		go func() {
 			cmd.Wait()
 			cmd.Wait()
 			exitHandler()
 			exitHandler()
+			if stdoutLog != nil {
+				stderrLog.Close()
+			}
+			if stdoutLog != nil {
+				stderrLog.Close()
+			}
 		}()
 		}()
 		log.G(ctx).WithFields(logrus.Fields{
 		log.G(ctx).WithFields(logrus.Fields{
 			"pid":     cmd.Process.Pid,
 			"pid":     cmd.Process.Pid,
@@ -104,7 +128,7 @@ func WithStart(binary, address, daemonAddress, cgroup string, debug bool, exitHa
 	}
 	}
 }
 }
 
 
-func newCommand(binary, daemonAddress string, debug bool, config shim.Config, socket *os.File) (*exec.Cmd, error) {
+func newCommand(binary, daemonAddress string, debug bool, config shim.Config, socket *os.File, stdout, stderr io.Writer) (*exec.Cmd, error) {
 	selfExe, err := os.Executable()
 	selfExe, err := os.Executable()
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
@@ -137,10 +161,8 @@ func newCommand(binary, daemonAddress string, debug bool, config shim.Config, so
 	cmd.SysProcAttr = getSysProcAttr()
 	cmd.SysProcAttr = getSysProcAttr()
 	cmd.ExtraFiles = append(cmd.ExtraFiles, socket)
 	cmd.ExtraFiles = append(cmd.ExtraFiles, socket)
 	cmd.Env = append(os.Environ(), "GOMAXPROCS=2")
 	cmd.Env = append(os.Environ(), "GOMAXPROCS=2")
-	if debug {
-		cmd.Stdout = os.Stdout
-		cmd.Stderr = os.Stderr
-	}
+	cmd.Stdout = stdout
+	cmd.Stderr = stderr
 	return cmd, nil
 	return cmd, nil
 }
 }
 
 

+ 7 - 0
vendor/github.com/containerd/containerd/runtime/v1/shim/service.go

@@ -448,6 +448,7 @@ func (s *Service) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskReque
 		AllowTerminal:            options.Terminal,
 		AllowTerminal:            options.Terminal,
 		FileLocks:                options.FileLocks,
 		FileLocks:                options.FileLocks,
 		EmptyNamespaces:          options.EmptyNamespaces,
 		EmptyNamespaces:          options.EmptyNamespaces,
+		WorkDir:                  options.WorkPath,
 	}); err != nil {
 	}); err != nil {
 		return nil, errdefs.ToGRPC(err)
 		return nil, errdefs.ToGRPC(err)
 	}
 	}
@@ -657,5 +658,11 @@ func newInit(ctx context.Context, path, workDir, runtimeRoot, namespace, criu st
 	p.IoGID = int(options.IoGid)
 	p.IoGID = int(options.IoGid)
 	p.NoPivotRoot = options.NoPivotRoot
 	p.NoPivotRoot = options.NoPivotRoot
 	p.NoNewKeyring = options.NoNewKeyring
 	p.NoNewKeyring = options.NoNewKeyring
+	p.CriuWorkPath = options.CriuWorkPath
+	if p.CriuWorkPath == "" {
+		// if criu work path not set, use container WorkDir
+		p.CriuWorkPath = p.WorkDir
+	}
+
 	return p, nil
 	return p, nil
 }
 }

+ 195 - 0
vendor/github.com/containerd/containerd/runtime/v2/README.md

@@ -0,0 +1,195 @@
+# Runtime v2
+
+Runtime v2 introduces a first class shim API for runtime authors to integrate with containerd.
+The shim API is minimal and scoped to the execution lifecycle of a container.
+
+## Binary Naming
+
+Users specify the runtime they wish to use when creating a container.
+The runtime can also be changed via a container update.
+
+```bash
+> ctr run --runtime io.containerd.runc.v1
+```
+
+When a user specifies a runtime name, `io.containerd.runc.v1`, they will specify the name and version of the runtime.
+This will be translated by containerd into a binary name for the shim.
+
+`io.containerd.runc.v1` -> `containerd-shim-runc-v1`
+
+containerd keeps the `containerd-shim-*` prefix so that users can `ps aux | grep containerd-shim` to see running shims on their system.
+
+## Shim Authoring
+
+This section is dedicated to runtime authors wishing to build a shim.
+It will detail how the API works and different considerations when building shim.
+
+### Commands
+
+Container information is provided to a shim in two ways.
+The OCI Runtime Bundle and on the `Create` rpc request.
+
+#### `start`
+
+Each shim MUST implement a `start` subcommand.
+This command will launch new shims.
+The start command MUST accept the following flags:
+
+* `-namespace` the namespace for the container
+* `-address` the address of the containerd's main socket
+* `-publish-binary` the binary path to publish events back to containerd
+* `-id` the id of the container
+
+The start command, as well as all binary calls to the shim, has the bundle for the container set as the `cwd`.
+
+The start command MUST return an address to a shim for containerd to issue API requests for container operations.
+
+The start command can either start a new shim or return an address to an existing shim based on the shim's logic.
+
+#### `delete`
+
+Each shim MUST implement a `delete` subcommand.
+This command allows containerd to delete any container resources created, mounted, and/or run by a shim when containerd can no longer communicate over rpc.
+This happens if a shim is SIGKILL'd with a running container.
+These resources will need to be cleaned up when containerd looses the connection to a shim.
+This is also used when containerd boots and reconnects to shims.
+If a bundle is still on disk but containerd cannot connect to a shim, the delete command is invoked.
+
+The delete command MUST accept the following flags:
+
+* `-namespace` the namespace for the container
+* `-address` the address of the containerd's main socket
+* `-publish-binary` the binary path to publish events back to containerd
+* `-id` the id of the container
+* `-bundle` the path to the bundle to delete. On non-Windows platforms this will match `cwd`
+
+The delete command will be executed in the container's bundle as its `cwd` except for on the Windows platform.
+
+### Host Level Shim Configuration
+
+containerd does not provide any host level configuration for shims via the API.
+If a shim needs configuration from the user with host level information across all instances, a shim specific configuration file can be setup.
+
+### Container Level Shim Configuration
+
+On the create request, there is a generic `*protobuf.Any` that allows a user to specify container level configuration for the shim.
+
+```proto
+message CreateTaskRequest {
+	string id = 1;
+	...
+	google.protobuf.Any options = 10;
+}
+```
+
+A shim author can create their own protobuf message for configuration and clients can import and provide this information is needed.
+
+### I/O
+
+I/O for a container is provided by the client to the shim via fifo on Linux, named pipes on Windows, or log files on disk.
+The paths to these files are provided on the `Create` rpc for the initial creation and on the `Exec` rpc for additional processes.
+
+```proto
+message CreateTaskRequest {
+	string id = 1;
+	bool terminal = 4;
+	string stdin = 5;
+	string stdout = 6;
+	string stderr = 7;
+}
+```
+
+```proto
+message ExecProcessRequest {
+	string id = 1;
+	string exec_id = 2;
+	bool terminal = 3;
+	string stdin = 4;
+	string stdout = 5;
+	string stderr = 6;
+}
+```
+
+Containers that are to be launched with an interactive terminal will have the `terminal` field set to `true`, data is still copied over the files(fifos,pipes) in the same way as non interactive containers.
+
+### Root Filesystems
+
+The root filesystem for the containers is provided by on the `Create` rpc.
+Shims are responsible for managing the lifecycle of the filesystem mount during the lifecycle of a container.
+
+```proto
+message CreateTaskRequest {
+	string id = 1;
+	string bundle = 2;
+	repeated containerd.types.Mount rootfs = 3;
+	...
+}
+```
+
+The mount protobuf message is:
+
+```proto
+message Mount {
+	// Type defines the nature of the mount.
+	string type = 1;
+	// Source specifies the name of the mount. Depending on mount type, this
+	// may be a volume name or a host path, or even ignored.
+	string source = 2;
+	// Target path in container
+	string target = 3;
+	// Options specifies zero or more fstab style mount options.
+	repeated string options = 4;
+}
+```
+
+Shims are responsible for mounting the filesystem into the `rootfs/` directory of the bundle.
+Shims are also responsible for unmounting of the filesystem.
+During a `delete` binary call, the shim MUST ensure that filesystem is also unmounted.
+Filesystems are provided by the containerd snapshotters.
+
+### Events
+
+The Runtime v2 supports an async event model. In order for the an upstream caller (such as Docker) to get these events in the correct order a Runtime v2 shim MUST implement the following events where `Compliance=MUST`. This avoids race conditions between the shim and shim client where for example a call to `Start` can signal a `TaskExitEventTopic` before even returning the results from the `Start` call. With these guarantees of a Runtime v2 shim a call to `Start` is required to have published the async event `TaskStartEventTopic` before the shim can publish the `TaskExitEventTopic`.
+
+#### Tasks
+
+| Topic | Compliance | Description |
+| ----- | ---------- | ----------- |
+| `runtime.TaskCreateEventTopic`       | MUST                                                                          | When a task is successfully created |
+| `runtime.TaskStartEventTopic`        | MUST (follow `TaskCreateEventTopic`)                                          | When a task is successfully started |
+| `runtime.TaskExitEventTopic`         | MUST (follow `TaskStartEventTopic`)                                           | When a task exits expected or unexpected |
+| `runtime.TaskDeleteEventTopic`       | MUST (follow `TaskExitEventTopic` or `TaskCreateEventTopic` if never started) | When a task is removed from a shim |
+| `runtime.TaskPausedEventTopic`       | SHOULD                                                                        | When a task is successfully paused |
+| `runtime.TaskResumedEventTopic`      | SHOULD (follow `TaskPausedEventTopic`)                                        | When a task is successfully resumed |
+| `runtime.TaskCheckpointedEventTopic` | SHOULD                                                                        | When a task is checkpointed |
+| `runtime.TaskOOMEventTopic`          | SHOULD                                                                        | If the shim collects Out of Memory events |
+
+#### Execs
+
+| Topic | Compliance | Description |
+| ----- | ---------- | ----------- |
+| `runtime.TaskExecAddedEventTopic`   | MUST (follow `TaskCreateEventTopic` )     | When an exec is successfully added |
+| `runtime.TaskExecStartedEventTopic` | MUST (follow `TaskExecAddedEventTopic`)   | When an exec is successfully started |
+| `runtime.TaskExitEventTopic`        | MUST (follow `TaskExecStartedEventTopic`) | When an exec (other than the init exec) exits expected or unexpected |
+| `runtime.TaskDeleteEventTopic`      | SHOULD (follow `TaskExitEventTopic` or `TaskExecAddedEventTopic` if never started) | When an exec is removed from a shim |
+
+### Other
+
+#### Unsupported rpcs
+
+If a shim does not or cannot implement an rpc call, it MUST return a `github.com/containerd/containerd/errdefs.ErrNotImplemented` error.
+
+#### Debugging and Shim Logs
+
+A fifo on unix or named pipe on Windows will be provided to the shim.
+It can be located inside the `cwd` of the shim named "log".
+The shims can use the existing `github.com/containerd/containerd/log` package to log debug messages.
+Messages will automatically be output in the containerd's daemon logs with the correct fields and runtime set.
+
+#### ttrpc
+
+[ttrpc](https://github.com/containerd/ttrpc) is the only currently supported protocol for shims.
+It works with standard protobufs and GRPC services as well as generating clients.
+The only difference between grpc and ttrpc is the wire protocol.
+ttrpc removes the http stack in order to save memory and binary size to keep shims small.
+It is recommended to use ttrpc in your shim but grpc support is also in development.

+ 17 - 0
vendor/github.com/containerd/containerd/runtime/v2/runc/options/doc.go

@@ -0,0 +1,17 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package options

+ 1313 - 0
vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.pb.go

@@ -0,0 +1,1313 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/containerd/containerd/runtime/v2/runc/options/oci.proto
+
+/*
+	Package options is a generated protocol buffer package.
+
+	It is generated from these files:
+		github.com/containerd/containerd/runtime/v2/runc/options/oci.proto
+
+	It has these top-level messages:
+		Options
+		CheckpointOptions
+		ProcessDetails
+*/
+package options
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
+
+import strings "strings"
+import reflect "reflect"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+type Options struct {
+	// disable pivot root when creating a container
+	NoPivotRoot bool `protobuf:"varint,1,opt,name=no_pivot_root,json=noPivotRoot,proto3" json:"no_pivot_root,omitempty"`
+	// create a new keyring for the container
+	NoNewKeyring bool `protobuf:"varint,2,opt,name=no_new_keyring,json=noNewKeyring,proto3" json:"no_new_keyring,omitempty"`
+	// place the shim in a cgroup
+	ShimCgroup string `protobuf:"bytes,3,opt,name=shim_cgroup,json=shimCgroup,proto3" json:"shim_cgroup,omitempty"`
+	// set the I/O's pipes uid
+	IoUid uint32 `protobuf:"varint,4,opt,name=io_uid,json=ioUid,proto3" json:"io_uid,omitempty"`
+	// set the I/O's pipes gid
+	IoGid uint32 `protobuf:"varint,5,opt,name=io_gid,json=ioGid,proto3" json:"io_gid,omitempty"`
+	// binary name of the runc binary
+	BinaryName string `protobuf:"bytes,6,opt,name=binary_name,json=binaryName,proto3" json:"binary_name,omitempty"`
+	// runc root directory
+	Root string `protobuf:"bytes,7,opt,name=root,proto3" json:"root,omitempty"`
+	// criu binary path
+	CriuPath string `protobuf:"bytes,8,opt,name=criu_path,json=criuPath,proto3" json:"criu_path,omitempty"`
+	// enable systemd cgroups
+	SystemdCgroup bool `protobuf:"varint,9,opt,name=systemd_cgroup,json=systemdCgroup,proto3" json:"systemd_cgroup,omitempty"`
+	// criu image path
+	CriuImagePath string `protobuf:"bytes,10,opt,name=criu_image_path,json=criuImagePath,proto3" json:"criu_image_path,omitempty"`
+	// criu work path
+	CriuWorkPath string `protobuf:"bytes,11,opt,name=criu_work_path,json=criuWorkPath,proto3" json:"criu_work_path,omitempty"`
+}
+
+func (m *Options) Reset()                    { *m = Options{} }
+func (*Options) ProtoMessage()               {}
+func (*Options) Descriptor() ([]byte, []int) { return fileDescriptorOci, []int{0} }
+
+type CheckpointOptions struct {
+	// exit the container after a checkpoint
+	Exit bool `protobuf:"varint,1,opt,name=exit,proto3" json:"exit,omitempty"`
+	// checkpoint open tcp connections
+	OpenTcp bool `protobuf:"varint,2,opt,name=open_tcp,json=openTcp,proto3" json:"open_tcp,omitempty"`
+	// checkpoint external unix sockets
+	ExternalUnixSockets bool `protobuf:"varint,3,opt,name=external_unix_sockets,json=externalUnixSockets,proto3" json:"external_unix_sockets,omitempty"`
+	// checkpoint terminals (ptys)
+	Terminal bool `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"`
+	// allow checkpointing of file locks
+	FileLocks bool `protobuf:"varint,5,opt,name=file_locks,json=fileLocks,proto3" json:"file_locks,omitempty"`
+	// restore provided namespaces as empty namespaces
+	EmptyNamespaces []string `protobuf:"bytes,6,rep,name=empty_namespaces,json=emptyNamespaces" json:"empty_namespaces,omitempty"`
+	// set the cgroups mode, soft, full, strict
+	CgroupsMode string `protobuf:"bytes,7,opt,name=cgroups_mode,json=cgroupsMode,proto3" json:"cgroups_mode,omitempty"`
+	// checkpoint image path
+	ImagePath string `protobuf:"bytes,8,opt,name=image_path,json=imagePath,proto3" json:"image_path,omitempty"`
+	// checkpoint work path
+	WorkPath string `protobuf:"bytes,9,opt,name=work_path,json=workPath,proto3" json:"work_path,omitempty"`
+}
+
+func (m *CheckpointOptions) Reset()                    { *m = CheckpointOptions{} }
+func (*CheckpointOptions) ProtoMessage()               {}
+func (*CheckpointOptions) Descriptor() ([]byte, []int) { return fileDescriptorOci, []int{1} }
+
+type ProcessDetails struct {
+	// exec process id if the process is managed by a shim
+	ExecID string `protobuf:"bytes,1,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+}
+
+func (m *ProcessDetails) Reset()                    { *m = ProcessDetails{} }
+func (*ProcessDetails) ProtoMessage()               {}
+func (*ProcessDetails) Descriptor() ([]byte, []int) { return fileDescriptorOci, []int{2} }
+
+func init() {
+	proto.RegisterType((*Options)(nil), "containerd.runc.v1.Options")
+	proto.RegisterType((*CheckpointOptions)(nil), "containerd.runc.v1.CheckpointOptions")
+	proto.RegisterType((*ProcessDetails)(nil), "containerd.runc.v1.ProcessDetails")
+}
+func (m *Options) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Options) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.NoPivotRoot {
+		dAtA[i] = 0x8
+		i++
+		if m.NoPivotRoot {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if m.NoNewKeyring {
+		dAtA[i] = 0x10
+		i++
+		if m.NoNewKeyring {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if len(m.ShimCgroup) > 0 {
+		dAtA[i] = 0x1a
+		i++
+		i = encodeVarintOci(dAtA, i, uint64(len(m.ShimCgroup)))
+		i += copy(dAtA[i:], m.ShimCgroup)
+	}
+	if m.IoUid != 0 {
+		dAtA[i] = 0x20
+		i++
+		i = encodeVarintOci(dAtA, i, uint64(m.IoUid))
+	}
+	if m.IoGid != 0 {
+		dAtA[i] = 0x28
+		i++
+		i = encodeVarintOci(dAtA, i, uint64(m.IoGid))
+	}
+	if len(m.BinaryName) > 0 {
+		dAtA[i] = 0x32
+		i++
+		i = encodeVarintOci(dAtA, i, uint64(len(m.BinaryName)))
+		i += copy(dAtA[i:], m.BinaryName)
+	}
+	if len(m.Root) > 0 {
+		dAtA[i] = 0x3a
+		i++
+		i = encodeVarintOci(dAtA, i, uint64(len(m.Root)))
+		i += copy(dAtA[i:], m.Root)
+	}
+	if len(m.CriuPath) > 0 {
+		dAtA[i] = 0x42
+		i++
+		i = encodeVarintOci(dAtA, i, uint64(len(m.CriuPath)))
+		i += copy(dAtA[i:], m.CriuPath)
+	}
+	if m.SystemdCgroup {
+		dAtA[i] = 0x48
+		i++
+		if m.SystemdCgroup {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if len(m.CriuImagePath) > 0 {
+		dAtA[i] = 0x52
+		i++
+		i = encodeVarintOci(dAtA, i, uint64(len(m.CriuImagePath)))
+		i += copy(dAtA[i:], m.CriuImagePath)
+	}
+	if len(m.CriuWorkPath) > 0 {
+		dAtA[i] = 0x5a
+		i++
+		i = encodeVarintOci(dAtA, i, uint64(len(m.CriuWorkPath)))
+		i += copy(dAtA[i:], m.CriuWorkPath)
+	}
+	return i, nil
+}
+
+func (m *CheckpointOptions) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CheckpointOptions) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Exit {
+		dAtA[i] = 0x8
+		i++
+		if m.Exit {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if m.OpenTcp {
+		dAtA[i] = 0x10
+		i++
+		if m.OpenTcp {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if m.ExternalUnixSockets {
+		dAtA[i] = 0x18
+		i++
+		if m.ExternalUnixSockets {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if m.Terminal {
+		dAtA[i] = 0x20
+		i++
+		if m.Terminal {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if m.FileLocks {
+		dAtA[i] = 0x28
+		i++
+		if m.FileLocks {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if len(m.EmptyNamespaces) > 0 {
+		for _, s := range m.EmptyNamespaces {
+			dAtA[i] = 0x32
+			i++
+			l = len(s)
+			for l >= 1<<7 {
+				dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+				l >>= 7
+				i++
+			}
+			dAtA[i] = uint8(l)
+			i++
+			i += copy(dAtA[i:], s)
+		}
+	}
+	if len(m.CgroupsMode) > 0 {
+		dAtA[i] = 0x3a
+		i++
+		i = encodeVarintOci(dAtA, i, uint64(len(m.CgroupsMode)))
+		i += copy(dAtA[i:], m.CgroupsMode)
+	}
+	if len(m.ImagePath) > 0 {
+		dAtA[i] = 0x42
+		i++
+		i = encodeVarintOci(dAtA, i, uint64(len(m.ImagePath)))
+		i += copy(dAtA[i:], m.ImagePath)
+	}
+	if len(m.WorkPath) > 0 {
+		dAtA[i] = 0x4a
+		i++
+		i = encodeVarintOci(dAtA, i, uint64(len(m.WorkPath)))
+		i += copy(dAtA[i:], m.WorkPath)
+	}
+	return i, nil
+}
+
+func (m *ProcessDetails) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ProcessDetails) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ExecID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintOci(dAtA, i, uint64(len(m.ExecID)))
+		i += copy(dAtA[i:], m.ExecID)
+	}
+	return i, nil
+}
+
+func encodeVarintOci(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *Options) Size() (n int) {
+	var l int
+	_ = l
+	if m.NoPivotRoot {
+		n += 2
+	}
+	if m.NoNewKeyring {
+		n += 2
+	}
+	l = len(m.ShimCgroup)
+	if l > 0 {
+		n += 1 + l + sovOci(uint64(l))
+	}
+	if m.IoUid != 0 {
+		n += 1 + sovOci(uint64(m.IoUid))
+	}
+	if m.IoGid != 0 {
+		n += 1 + sovOci(uint64(m.IoGid))
+	}
+	l = len(m.BinaryName)
+	if l > 0 {
+		n += 1 + l + sovOci(uint64(l))
+	}
+	l = len(m.Root)
+	if l > 0 {
+		n += 1 + l + sovOci(uint64(l))
+	}
+	l = len(m.CriuPath)
+	if l > 0 {
+		n += 1 + l + sovOci(uint64(l))
+	}
+	if m.SystemdCgroup {
+		n += 2
+	}
+	l = len(m.CriuImagePath)
+	if l > 0 {
+		n += 1 + l + sovOci(uint64(l))
+	}
+	l = len(m.CriuWorkPath)
+	if l > 0 {
+		n += 1 + l + sovOci(uint64(l))
+	}
+	return n
+}
+
+func (m *CheckpointOptions) Size() (n int) {
+	var l int
+	_ = l
+	if m.Exit {
+		n += 2
+	}
+	if m.OpenTcp {
+		n += 2
+	}
+	if m.ExternalUnixSockets {
+		n += 2
+	}
+	if m.Terminal {
+		n += 2
+	}
+	if m.FileLocks {
+		n += 2
+	}
+	if len(m.EmptyNamespaces) > 0 {
+		for _, s := range m.EmptyNamespaces {
+			l = len(s)
+			n += 1 + l + sovOci(uint64(l))
+		}
+	}
+	l = len(m.CgroupsMode)
+	if l > 0 {
+		n += 1 + l + sovOci(uint64(l))
+	}
+	l = len(m.ImagePath)
+	if l > 0 {
+		n += 1 + l + sovOci(uint64(l))
+	}
+	l = len(m.WorkPath)
+	if l > 0 {
+		n += 1 + l + sovOci(uint64(l))
+	}
+	return n
+}
+
+func (m *ProcessDetails) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ExecID)
+	if l > 0 {
+		n += 1 + l + sovOci(uint64(l))
+	}
+	return n
+}
+
+func sovOci(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozOci(x uint64) (n int) {
+	return sovOci(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *Options) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Options{`,
+		`NoPivotRoot:` + fmt.Sprintf("%v", this.NoPivotRoot) + `,`,
+		`NoNewKeyring:` + fmt.Sprintf("%v", this.NoNewKeyring) + `,`,
+		`ShimCgroup:` + fmt.Sprintf("%v", this.ShimCgroup) + `,`,
+		`IoUid:` + fmt.Sprintf("%v", this.IoUid) + `,`,
+		`IoGid:` + fmt.Sprintf("%v", this.IoGid) + `,`,
+		`BinaryName:` + fmt.Sprintf("%v", this.BinaryName) + `,`,
+		`Root:` + fmt.Sprintf("%v", this.Root) + `,`,
+		`CriuPath:` + fmt.Sprintf("%v", this.CriuPath) + `,`,
+		`SystemdCgroup:` + fmt.Sprintf("%v", this.SystemdCgroup) + `,`,
+		`CriuImagePath:` + fmt.Sprintf("%v", this.CriuImagePath) + `,`,
+		`CriuWorkPath:` + fmt.Sprintf("%v", this.CriuWorkPath) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CheckpointOptions) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CheckpointOptions{`,
+		`Exit:` + fmt.Sprintf("%v", this.Exit) + `,`,
+		`OpenTcp:` + fmt.Sprintf("%v", this.OpenTcp) + `,`,
+		`ExternalUnixSockets:` + fmt.Sprintf("%v", this.ExternalUnixSockets) + `,`,
+		`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
+		`FileLocks:` + fmt.Sprintf("%v", this.FileLocks) + `,`,
+		`EmptyNamespaces:` + fmt.Sprintf("%v", this.EmptyNamespaces) + `,`,
+		`CgroupsMode:` + fmt.Sprintf("%v", this.CgroupsMode) + `,`,
+		`ImagePath:` + fmt.Sprintf("%v", this.ImagePath) + `,`,
+		`WorkPath:` + fmt.Sprintf("%v", this.WorkPath) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ProcessDetails) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ProcessDetails{`,
+		`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringOci(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *Options) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowOci
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Options: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Options: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NoPivotRoot", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowOci
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.NoPivotRoot = bool(v != 0)
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NoNewKeyring", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowOci
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.NoNewKeyring = bool(v != 0)
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ShimCgroup", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowOci
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthOci
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ShimCgroup = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IoUid", wireType)
+			}
+			m.IoUid = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowOci
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.IoUid |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IoGid", wireType)
+			}
+			m.IoGid = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowOci
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.IoGid |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field BinaryName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowOci
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthOci
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.BinaryName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Root", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowOci
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthOci
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Root = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CriuPath", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowOci
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthOci
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.CriuPath = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 9:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SystemdCgroup", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowOci
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.SystemdCgroup = bool(v != 0)
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CriuImagePath", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowOci
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthOci
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.CriuImagePath = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 11:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CriuWorkPath", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowOci
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthOci
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.CriuWorkPath = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipOci(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthOci
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowOci
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CheckpointOptions: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CheckpointOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Exit", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowOci
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Exit = bool(v != 0)
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field OpenTcp", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowOci
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.OpenTcp = bool(v != 0)
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExternalUnixSockets", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowOci
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ExternalUnixSockets = bool(v != 0)
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowOci
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Terminal = bool(v != 0)
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FileLocks", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowOci
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.FileLocks = bool(v != 0)
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field EmptyNamespaces", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowOci
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthOci
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.EmptyNamespaces = append(m.EmptyNamespaces, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CgroupsMode", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowOci
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthOci
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.CgroupsMode = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ImagePath", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowOci
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthOci
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ImagePath = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field WorkPath", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowOci
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthOci
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.WorkPath = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipOci(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthOci
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowOci
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ProcessDetails: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ProcessDetails: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowOci
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthOci
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ExecID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipOci(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthOci
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipOci(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowOci
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowOci
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowOci
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthOci
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowOci
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipOci(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthOci = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowOci   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/runtime/v2/runc/options/oci.proto", fileDescriptorOci)
+}
+
+var fileDescriptorOci = []byte{
+	// 587 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcf, 0x6e, 0xd3, 0x40,
+	0x10, 0x87, 0xeb, 0xfe, 0x49, 0xec, 0x4d, 0x93, 0xc2, 0x42, 0x25, 0xd3, 0x8a, 0x34, 0x94, 0x82,
+	0xc2, 0x25, 0x11, 0x45, 0x9c, 0xb8, 0xa0, 0xb6, 0x08, 0x55, 0x40, 0xa9, 0x0c, 0x15, 0xa8, 0x97,
+	0x95, 0xbb, 0x1e, 0x9c, 0x51, 0xe2, 0x1d, 0xcb, 0xbb, 0x69, 0xd2, 0x1b, 0xef, 0xc5, 0x0b, 0xf4,
+	0xc8, 0x91, 0x13, 0xa2, 0xb9, 0xf1, 0x16, 0x68, 0xd7, 0x4e, 0xdb, 0x33, 0x27, 0xcf, 0x7e, 0xf3,
+	0xf3, 0x78, 0xfd, 0xad, 0x96, 0xed, 0xa5, 0x68, 0x06, 0xe3, 0xb3, 0x9e, 0xa4, 0xac, 0x2f, 0x49,
+	0x99, 0x18, 0x15, 0x14, 0xc9, 0xed, 0xb2, 0x18, 0x2b, 0x83, 0x19, 0xf4, 0xcf, 0x77, 0x6d, 0x29,
+	0xfb, 0x94, 0x1b, 0x24, 0xa5, 0xfb, 0x24, 0xb1, 0x97, 0x17, 0x64, 0x88, 0xf3, 0x9b, 0x74, 0xcf,
+	0x46, 0x7a, 0xe7, 0xcf, 0x37, 0xee, 0xa7, 0x94, 0x92, 0x6b, 0xf7, 0x6d, 0x55, 0x26, 0xb7, 0xff,
+	0x2e, 0xb2, 0xfa, 0xc7, 0xf2, 0x7d, 0xbe, 0xcd, 0x9a, 0x8a, 0x44, 0x8e, 0xe7, 0x64, 0x44, 0x41,
+	0x64, 0x42, 0xaf, 0xe3, 0x75, 0xfd, 0xa8, 0xa1, 0xe8, 0xd8, 0xb2, 0x88, 0xc8, 0xf0, 0x1d, 0xd6,
+	0x52, 0x24, 0x14, 0x4c, 0xc4, 0x10, 0x2e, 0x0a, 0x54, 0x69, 0xb8, 0xe8, 0x42, 0xab, 0x8a, 0x8e,
+	0x60, 0xf2, 0xae, 0x64, 0x7c, 0x8b, 0x35, 0xf4, 0x00, 0x33, 0x21, 0xd3, 0x82, 0xc6, 0x79, 0xb8,
+	0xd4, 0xf1, 0xba, 0x41, 0xc4, 0x2c, 0xda, 0x77, 0x84, 0xaf, 0xb3, 0x1a, 0x92, 0x18, 0x63, 0x12,
+	0x2e, 0x77, 0xbc, 0x6e, 0x33, 0x5a, 0x41, 0x3a, 0xc1, 0xa4, 0xc2, 0x29, 0x26, 0xe1, 0xca, 0x1c,
+	0xbf, 0xc5, 0xc4, 0x8e, 0x3b, 0x43, 0x15, 0x17, 0x17, 0x42, 0xc5, 0x19, 0x84, 0xb5, 0x72, 0x5c,
+	0x89, 0x8e, 0xe2, 0x0c, 0x38, 0x67, 0xcb, 0x6e, 0xc3, 0x75, 0xd7, 0x71, 0x35, 0xdf, 0x64, 0x81,
+	0x2c, 0x70, 0x2c, 0xf2, 0xd8, 0x0c, 0x42, 0xdf, 0x35, 0x7c, 0x0b, 0x8e, 0x63, 0x33, 0xe0, 0x4f,
+	0x58, 0x4b, 0x5f, 0x68, 0x03, 0x59, 0x32, 0xdf, 0x63, 0xe0, 0x7e, 0xa3, 0x59, 0xd1, 0x6a, 0x9b,
+	0x4f, 0xd9, 0x9a, 0x9b, 0x81, 0x59, 0x9c, 0x42, 0x39, 0x89, 0xb9, 0x49, 0x4d, 0x8b, 0x0f, 0x2d,
+	0x75, 0xe3, 0x76, 0x58, 0xcb, 0xe5, 0x26, 0x54, 0x0c, 0xcb, 0x58, 0xc3, 0xc5, 0x56, 0x2d, 0xfd,
+	0x42, 0xc5, 0xd0, 0xa6, 0xb6, 0x7f, 0x2c, 0xb2, 0xbb, 0xfb, 0x03, 0x90, 0xc3, 0x9c, 0x50, 0x99,
+	0xb9, 0x75, 0xce, 0x96, 0x61, 0x8a, 0x73, 0xd9, 0xae, 0xe6, 0x0f, 0x98, 0x4f, 0x39, 0x28, 0x61,
+	0x64, 0x5e, 0xf9, 0xad, 0xdb, 0xf5, 0x67, 0x99, 0xf3, 0x5d, 0xb6, 0x0e, 0x53, 0x03, 0x85, 0x8a,
+	0x47, 0x62, 0xac, 0x70, 0x2a, 0x34, 0xc9, 0x21, 0x18, 0xed, 0x24, 0xfb, 0xd1, 0xbd, 0x79, 0xf3,
+	0x44, 0xe1, 0xf4, 0x53, 0xd9, 0xe2, 0x1b, 0xcc, 0x37, 0x50, 0x64, 0xa8, 0xe2, 0x91, 0xf3, 0xed,
+	0x47, 0xd7, 0x6b, 0xfe, 0x90, 0xb1, 0x6f, 0x38, 0x02, 0x31, 0x22, 0x39, 0xd4, 0x4e, 0xbb, 0x1f,
+	0x05, 0x96, 0xbc, 0xb7, 0x80, 0x3f, 0x63, 0x77, 0x20, 0xcb, 0x4d, 0x69, 0x5e, 0xe7, 0xb1, 0x04,
+	0x1d, 0xd6, 0x3a, 0x4b, 0xdd, 0x20, 0x5a, 0x73, 0xfc, 0xe8, 0x1a, 0xf3, 0x47, 0x6c, 0xb5, 0x74,
+	0xa9, 0x45, 0x46, 0x09, 0x54, 0x87, 0xd1, 0xa8, 0xd8, 0x07, 0x4a, 0xc0, 0x7e, 0xec, 0x96, 0xca,
+	0xf2, 0x50, 0x02, 0xbc, 0xd6, 0xb8, 0xc9, 0x82, 0x1b, 0x83, 0x41, 0x79, 0x64, 0x93, 0xb9, 0xbd,
+	0x97, 0xac, 0x75, 0x5c, 0x90, 0x04, 0xad, 0x0f, 0xc0, 0xc4, 0x38, 0xd2, 0xfc, 0x31, 0xab, 0xc3,
+	0x14, 0xa4, 0xc0, 0xc4, 0xc9, 0x0b, 0xf6, 0xd8, 0xec, 0xf7, 0x56, 0xed, 0xcd, 0x14, 0xe4, 0xe1,
+	0x41, 0x54, 0xb3, 0xad, 0xc3, 0x64, 0xef, 0xf4, 0xf2, 0xaa, 0xbd, 0xf0, 0xeb, 0xaa, 0xbd, 0xf0,
+	0x7d, 0xd6, 0xf6, 0x2e, 0x67, 0x6d, 0xef, 0xe7, 0xac, 0xed, 0xfd, 0x99, 0xb5, 0xbd, 0xd3, 0xd7,
+	0xff, 0x7b, 0xd1, 0x5e, 0x55, 0xcf, 0xaf, 0x0b, 0x67, 0x35, 0x77, 0x8b, 0x5e, 0xfc, 0x0b, 0x00,
+	0x00, 0xff, 0xff, 0x90, 0x50, 0x79, 0xf2, 0xb5, 0x03, 0x00, 0x00,
+}

+ 58 - 0
vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.proto

@@ -0,0 +1,58 @@
+syntax = "proto3";
+
+package containerd.runc.v1;
+
+import weak "gogoproto/gogo.proto";
+
+option go_package = "github.com/containerd/containerd/runtime/v2/runc/options;options";
+
+message Options {
+	// disable pivot root when creating a container
+	bool no_pivot_root = 1;
+	// create a new keyring for the container
+	bool no_new_keyring = 2;
+	// place the shim in a cgroup
+	string shim_cgroup = 3;
+	// set the I/O's pipes uid
+	uint32 io_uid = 4;
+	// set the I/O's pipes gid
+	uint32 io_gid = 5;
+	// binary name of the runc binary
+	string binary_name = 6;
+	// runc root directory
+	string root = 7;
+	// criu binary path
+	string criu_path = 8;
+	// enable systemd cgroups
+	bool systemd_cgroup = 9;
+	// criu image path
+	string criu_image_path = 10;
+	// criu work path
+	string criu_work_path = 11;
+}
+
+message CheckpointOptions {
+	// exit the container after a checkpoint
+	bool exit = 1;
+	// checkpoint open tcp connections
+	bool open_tcp = 2;
+	// checkpoint external unix sockets
+	bool external_unix_sockets = 3;
+	// checkpoint terminals (ptys)
+	bool terminal = 4;
+	// allow checkpointing of file locks
+	bool file_locks = 5;
+	// restore provided namespaces as empty namespaces
+	repeated string empty_namespaces = 6;
+	// set the cgroups mode, soft, full, strict
+	string cgroups_mode = 7;
+	// checkpoint image path
+	string image_path = 8;
+	// checkpoint work path
+	string work_path = 9;
+}
+
+message ProcessDetails {
+	// exec process id if the process is managed by a shim
+	string exec_id = 1;
+}

+ 492 - 0
vendor/github.com/containerd/containerd/services/content/service.go

@@ -0,0 +1,492 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package content
+
+import (
+	"context"
+	"io"
+	"sync"
+
+	api "github.com/containerd/containerd/api/services/content/v1"
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/log"
+	"github.com/containerd/containerd/plugin"
+	"github.com/containerd/containerd/services"
+	ptypes "github.com/gogo/protobuf/types"
+	digest "github.com/opencontainers/go-digest"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+	"github.com/sirupsen/logrus"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+type service struct {
+	store content.Store
+}
+
+var bufPool = sync.Pool{
+	New: func() interface{} {
+		buffer := make([]byte, 1<<20)
+		return &buffer
+	},
+}
+
+var _ api.ContentServer = &service{}
+
+func init() {
+	plugin.Register(&plugin.Registration{
+		Type: plugin.GRPCPlugin,
+		ID:   "content",
+		Requires: []plugin.Type{
+			plugin.ServicePlugin,
+		},
+		InitFn: func(ic *plugin.InitContext) (interface{}, error) {
+			plugins, err := ic.GetByType(plugin.ServicePlugin)
+			if err != nil {
+				return nil, err
+			}
+			p, ok := plugins[services.ContentService]
+			if !ok {
+				return nil, errors.New("content store service not found")
+			}
+			cs, err := p.Instance()
+			if err != nil {
+				return nil, err
+			}
+			return NewService(cs.(content.Store)), nil
+		},
+	})
+}
+
+// NewService returns the content GRPC server
+func NewService(cs content.Store) api.ContentServer {
+	return &service{store: cs}
+}
+
+func (s *service) Register(server *grpc.Server) error {
+	api.RegisterContentServer(server, s)
+	return nil
+}
+
+func (s *service) Info(ctx context.Context, req *api.InfoRequest) (*api.InfoResponse, error) {
+	if err := req.Digest.Validate(); err != nil {
+		return nil, status.Errorf(codes.InvalidArgument, "%q failed validation", req.Digest)
+	}
+
+	bi, err := s.store.Info(ctx, req.Digest)
+	if err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+
+	return &api.InfoResponse{
+		Info: infoToGRPC(bi),
+	}, nil
+}
+
+func (s *service) Update(ctx context.Context, req *api.UpdateRequest) (*api.UpdateResponse, error) {
+	if err := req.Info.Digest.Validate(); err != nil {
+		return nil, status.Errorf(codes.InvalidArgument, "%q failed validation", req.Info.Digest)
+	}
+
+	info, err := s.store.Update(ctx, infoFromGRPC(req.Info), req.UpdateMask.GetPaths()...)
+	if err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+
+	return &api.UpdateResponse{
+		Info: infoToGRPC(info),
+	}, nil
+}
+
+func (s *service) List(req *api.ListContentRequest, session api.Content_ListServer) error {
+	var (
+		buffer    []api.Info
+		sendBlock = func(block []api.Info) error {
+			// send last block
+			return session.Send(&api.ListContentResponse{
+				Info: block,
+			})
+		}
+	)
+
+	if err := s.store.Walk(session.Context(), func(info content.Info) error {
+		buffer = append(buffer, api.Info{
+			Digest:    info.Digest,
+			Size_:     info.Size,
+			CreatedAt: info.CreatedAt,
+			Labels:    info.Labels,
+		})
+
+		if len(buffer) >= 100 {
+			if err := sendBlock(buffer); err != nil {
+				return err
+			}
+
+			buffer = buffer[:0]
+		}
+
+		return nil
+	}, req.Filters...); err != nil {
+		return err
+	}
+
+	if len(buffer) > 0 {
+		// send last block
+		if err := sendBlock(buffer); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (s *service) Delete(ctx context.Context, req *api.DeleteContentRequest) (*ptypes.Empty, error) {
+	log.G(ctx).WithField("digest", req.Digest).Debugf("delete content")
+	if err := req.Digest.Validate(); err != nil {
+		return nil, status.Errorf(codes.InvalidArgument, err.Error())
+	}
+
+	if err := s.store.Delete(ctx, req.Digest); err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+
+	return &ptypes.Empty{}, nil
+}
+
+func (s *service) Read(req *api.ReadContentRequest, session api.Content_ReadServer) error {
+	if err := req.Digest.Validate(); err != nil {
+		return status.Errorf(codes.InvalidArgument, "%v: %v", req.Digest, err)
+	}
+
+	oi, err := s.store.Info(session.Context(), req.Digest)
+	if err != nil {
+		return errdefs.ToGRPC(err)
+	}
+
+	ra, err := s.store.ReaderAt(session.Context(), ocispec.Descriptor{Digest: req.Digest})
+	if err != nil {
+		return errdefs.ToGRPC(err)
+	}
+	defer ra.Close()
+
+	var (
+		offset = req.Offset
+		// size is read size, not the expected size of the blob (oi.Size), which the caller might not be aware of.
+		// offset+size can be larger than oi.Size.
+		size = req.Size_
+
+		// TODO(stevvooe): Using the global buffer pool. At 32KB, it is probably
+		// little inefficient for work over a fast network. We can tune this later.
+		p = bufPool.Get().(*[]byte)
+	)
+	defer bufPool.Put(p)
+
+	if offset < 0 {
+		offset = 0
+	}
+
+	if offset > oi.Size {
+		return status.Errorf(codes.OutOfRange, "read past object length %v bytes", oi.Size)
+	}
+
+	if size <= 0 || offset+size > oi.Size {
+		size = oi.Size - offset
+	}
+
+	_, err = io.CopyBuffer(
+		&readResponseWriter{session: session},
+		io.NewSectionReader(ra, offset, size), *p)
+	return errdefs.ToGRPC(err)
+}
+
+// readResponseWriter is a writer that places the output into ReadContentRequest messages.
+//
+// This allows io.CopyBuffer to do the heavy lifting of chunking the responses
+// into the buffer size.
+type readResponseWriter struct {
+	offset  int64
+	session api.Content_ReadServer
+}
+
+func (rw *readResponseWriter) Write(p []byte) (n int, err error) {
+	if err := rw.session.Send(&api.ReadContentResponse{
+		Offset: rw.offset,
+		Data:   p,
+	}); err != nil {
+		return 0, err
+	}
+
+	rw.offset += int64(len(p))
+	return len(p), nil
+}
+
+func (s *service) Status(ctx context.Context, req *api.StatusRequest) (*api.StatusResponse, error) {
+	status, err := s.store.Status(ctx, req.Ref)
+	if err != nil {
+		return nil, errdefs.ToGRPCf(err, "could not get status for ref %q", req.Ref)
+	}
+
+	var resp api.StatusResponse
+	resp.Status = &api.Status{
+		StartedAt: status.StartedAt,
+		UpdatedAt: status.UpdatedAt,
+		Ref:       status.Ref,
+		Offset:    status.Offset,
+		Total:     status.Total,
+		Expected:  status.Expected,
+	}
+
+	return &resp, nil
+}
+
+func (s *service) ListStatuses(ctx context.Context, req *api.ListStatusesRequest) (*api.ListStatusesResponse, error) {
+	statuses, err := s.store.ListStatuses(ctx, req.Filters...)
+	if err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+
+	var resp api.ListStatusesResponse
+	for _, status := range statuses {
+		resp.Statuses = append(resp.Statuses, api.Status{
+			StartedAt: status.StartedAt,
+			UpdatedAt: status.UpdatedAt,
+			Ref:       status.Ref,
+			Offset:    status.Offset,
+			Total:     status.Total,
+			Expected:  status.Expected,
+		})
+	}
+
+	return &resp, nil
+}
+
+func (s *service) Write(session api.Content_WriteServer) (err error) {
+	var (
+		ctx      = session.Context()
+		msg      api.WriteContentResponse
+		req      *api.WriteContentRequest
+		ref      string
+		total    int64
+		expected digest.Digest
+	)
+
+	defer func(msg *api.WriteContentResponse) {
+		// pump through the last message if no error was encountered
+		if err != nil {
+			if s, ok := status.FromError(err); ok && s.Code() != codes.AlreadyExists {
+				// TODO(stevvooe): Really need a log line here to track which
+				// errors are actually causing failure on the server side. May want
+				// to configure the service with an interceptor to make this work
+				// identically across all GRPC methods.
+				//
+				// This is pretty noisy, so we can remove it but leave it for now.
+				log.G(ctx).WithError(err).Error("(*service).Write failed")
+			}
+
+			return
+		}
+
+		err = session.Send(msg)
+	}(&msg)
+
+	// handle the very first request!
+	req, err = session.Recv()
+	if err != nil {
+		return err
+	}
+
+	ref = req.Ref
+
+	if ref == "" {
+		return status.Errorf(codes.InvalidArgument, "first message must have a reference")
+	}
+
+	fields := logrus.Fields{
+		"ref": ref,
+	}
+	total = req.Total
+	expected = req.Expected
+	if total > 0 {
+		fields["total"] = total
+	}
+
+	if expected != "" {
+		fields["expected"] = expected
+	}
+
+	ctx = log.WithLogger(ctx, log.G(ctx).WithFields(fields))
+
+	log.G(ctx).Debug("(*service).Write started")
+	// this action locks the writer for the session.
+	wr, err := s.store.Writer(ctx,
+		content.WithRef(ref),
+		content.WithDescriptor(ocispec.Descriptor{Size: total, Digest: expected}))
+	if err != nil {
+		return errdefs.ToGRPC(err)
+	}
+	defer wr.Close()
+
+	for {
+		msg.Action = req.Action
+		ws, err := wr.Status()
+		if err != nil {
+			return errdefs.ToGRPC(err)
+		}
+
+		msg.Offset = ws.Offset // always set the offset.
+
+		// NOTE(stevvooe): In general, there are two cases underwhich a remote
+		// writer is used.
+		//
+		// For pull, we almost always have this before fetching large content,
+		// through descriptors. We allow predeclaration of the expected size
+		// and digest.
+		//
+		// For push, it is more complex. If we want to cut through content into
+		// storage, we may have no expectation until we are done processing the
+		// content. The case here is the following:
+		//
+		// 	1. Start writing content.
+		// 	2. Compress inline.
+		// 	3. Validate digest and size (maybe).
+		//
+		// Supporting these two paths is quite awkward but it lets both API
+		// users use the same writer style for each with a minimum of overhead.
+		if req.Expected != "" {
+			if expected != "" && expected != req.Expected {
+				log.G(ctx).Debugf("commit digest differs from writer digest: %v != %v", req.Expected, expected)
+			}
+			expected = req.Expected
+
+			if _, err := s.store.Info(session.Context(), req.Expected); err == nil {
+				if err := wr.Close(); err != nil {
+					log.G(ctx).WithError(err).Error("failed to close writer")
+				}
+				if err := s.store.Abort(session.Context(), ref); err != nil {
+					log.G(ctx).WithError(err).Error("failed to abort write")
+				}
+
+				return status.Errorf(codes.AlreadyExists, "blob with expected digest %v exists", req.Expected)
+			}
+		}
+
+		if req.Total > 0 {
+			// Update the expected total. Typically, this could be seen at
+			// negotiation time or on a commit message.
+			if total > 0 && req.Total != total {
+				log.G(ctx).Debugf("commit size differs from writer size: %v != %v", req.Total, total)
+			}
+			total = req.Total
+		}
+
+		switch req.Action {
+		case api.WriteActionStat:
+			msg.Digest = wr.Digest()
+			msg.StartedAt = ws.StartedAt
+			msg.UpdatedAt = ws.UpdatedAt
+			msg.Total = total
+		case api.WriteActionWrite, api.WriteActionCommit:
+			if req.Offset > 0 {
+				// validate the offset if provided
+				if req.Offset != ws.Offset {
+					return status.Errorf(codes.OutOfRange, "write @%v must occur at current offset %v", req.Offset, ws.Offset)
+				}
+			}
+
+			if req.Offset == 0 && ws.Offset > 0 {
+				if err := wr.Truncate(req.Offset); err != nil {
+					return errors.Wrapf(err, "truncate failed")
+				}
+				msg.Offset = req.Offset
+			}
+
+			// issue the write if we actually have data.
+			if len(req.Data) > 0 {
+				// While this looks like we could use io.WriterAt here, because we
+				// maintain the offset as append only, we just issue the write.
+				n, err := wr.Write(req.Data)
+				if err != nil {
+					return errdefs.ToGRPC(err)
+				}
+
+				if n != len(req.Data) {
+					// TODO(stevvooe): Perhaps, we can recover this by including it
+					// in the offset on the write return.
+					return status.Errorf(codes.DataLoss, "wrote %v of %v bytes", n, len(req.Data))
+				}
+
+				msg.Offset += int64(n)
+			}
+
+			if req.Action == api.WriteActionCommit {
+				var opts []content.Opt
+				if req.Labels != nil {
+					opts = append(opts, content.WithLabels(req.Labels))
+				}
+				if err := wr.Commit(ctx, total, expected, opts...); err != nil {
+					return errdefs.ToGRPC(err)
+				}
+			}
+
+			msg.Digest = wr.Digest()
+		}
+
+		if err := session.Send(&msg); err != nil {
+			return err
+		}
+
+		req, err = session.Recv()
+		if err != nil {
+			if err == io.EOF {
+				return nil
+			}
+
+			return err
+		}
+	}
+}
+
+func (s *service) Abort(ctx context.Context, req *api.AbortRequest) (*ptypes.Empty, error) {
+	if err := s.store.Abort(ctx, req.Ref); err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+
+	return &ptypes.Empty{}, nil
+}
+
+func infoToGRPC(info content.Info) api.Info {
+	return api.Info{
+		Digest:    info.Digest,
+		Size_:     info.Size,
+		CreatedAt: info.CreatedAt,
+		UpdatedAt: info.UpdatedAt,
+		Labels:    info.Labels,
+	}
+}
+
+func infoFromGRPC(info api.Info) content.Info {
+	return content.Info{
+		Digest:    info.Digest,
+		Size:      info.Size_,
+		CreatedAt: info.CreatedAt,
+		UpdatedAt: info.UpdatedAt,
+		Labels:    info.Labels,
+	}
+}

+ 71 - 0
vendor/github.com/containerd/containerd/services/content/store.go

@@ -0,0 +1,71 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package content
+
+import (
+	"context"
+
+	eventstypes "github.com/containerd/containerd/api/events"
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/events"
+	"github.com/containerd/containerd/metadata"
+	"github.com/containerd/containerd/plugin"
+	"github.com/containerd/containerd/services"
+	digest "github.com/opencontainers/go-digest"
+)
+
+// store wraps content.Store with proper event published.
+type store struct {
+	content.Store
+	publisher events.Publisher
+}
+
+func init() {
+	plugin.Register(&plugin.Registration{
+		Type: plugin.ServicePlugin,
+		ID:   services.ContentService,
+		Requires: []plugin.Type{
+			plugin.MetadataPlugin,
+		},
+		InitFn: func(ic *plugin.InitContext) (interface{}, error) {
+			m, err := ic.Get(plugin.MetadataPlugin)
+			if err != nil {
+				return nil, err
+			}
+
+			s, err := newContentStore(m.(*metadata.DB).ContentStore(), ic.Events)
+			return s, err
+		},
+	})
+}
+
+func newContentStore(cs content.Store, publisher events.Publisher) (content.Store, error) {
+	return &store{
+		Store:     cs,
+		publisher: publisher,
+	}, nil
+}
+
+func (s *store) Delete(ctx context.Context, dgst digest.Digest) error {
+	if err := s.Store.Delete(ctx, dgst); err != nil {
+		return err
+	}
+	// TODO: Consider whether we should return error here.
+	return s.publisher.Publish(ctx, "/content/delete", &eventstypes.ContentDelete{
+		Digest: dgst,
+	})
+}

+ 38 - 0
vendor/github.com/containerd/containerd/services/server/config/config.go

@@ -83,6 +83,44 @@ type ProxyPlugin struct {
 	Address string `toml:"address"`
 	Address string `toml:"address"`
 }
 }
 
 
+// BoltConfig defines the configuration values for the bolt plugin, which is
+// loaded here, rather than back registered in the metadata package.
+type BoltConfig struct {
+	// ContentSharingPolicy sets the sharing policy for content between
+	// namespaces.
+	//
+	// The default mode "shared" will make blobs available in all
+	// namespaces once it is pulled into any namespace. The blob will be pulled
+	// into the namespace if a writer is opened with the "Expected" digest that
+	// is already present in the backend.
+	//
+	// The alternative mode, "isolated" requires that clients prove they have
+	// access to the content by providing all of the content to the ingest
+	// before the blob is added to the namespace.
+	//
+	// Both modes share backing data, while "shared" will reduce total
+	// bandwidth across namespaces, at the cost of allowing access to any blob
+	// just by knowing its digest.
+	ContentSharingPolicy string `toml:"content_sharing_policy"`
+}
+
+const (
+	// SharingPolicyShared represents the "shared" sharing policy
+	SharingPolicyShared = "shared"
+	// SharingPolicyIsolated represents the "isolated" sharing policy
+	SharingPolicyIsolated = "isolated"
+)
+
+// Validate validates if BoltConfig is valid
+func (bc *BoltConfig) Validate() error {
+	switch bc.ContentSharingPolicy {
+	case SharingPolicyShared, SharingPolicyIsolated:
+		return nil
+	default:
+		return errors.Wrapf(errdefs.ErrInvalidArgument, "unknown policy: %s", bc.ContentSharingPolicy)
+	}
+}
+
 // Decode unmarshals a plugin specific configuration by plugin id
 // Decode unmarshals a plugin specific configuration by plugin id
 func (c *Config) Decode(id string, v interface{}) (interface{}, error) {
 func (c *Config) Decode(id string, v interface{}) (interface{}, error) {
 	data, ok := c.Plugins[id]
 	data, ok := c.Plugins[id]

+ 36 - 0
vendor/github.com/containerd/containerd/services/services.go

@@ -0,0 +1,36 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package services
+
+const (
+	// ContentService is id of content service.
+	ContentService = "content-service"
+	// SnapshotsService is id of snapshots service.
+	SnapshotsService = "snapshots-service"
+	// ImagesService is id of images service.
+	ImagesService = "images-service"
+	// ContainersService is id of containers service.
+	ContainersService = "containers-service"
+	// TasksService is id of tasks service.
+	TasksService = "tasks-service"
+	// NamespacesService is id of namespaces service.
+	NamespacesService = "namespaces-service"
+	// LeasesService is id of leases service.
+	LeasesService = "leases-service"
+	// DiffService is id of diff service.
+	DiffService = "diff-service"
+)

+ 0 - 60
vendor/github.com/containerd/containerd/signal_map_linux.go

@@ -1,60 +0,0 @@
-/*
-   Copyright The containerd Authors.
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-*/
-
-package containerd
-
-import (
-	"syscall"
-
-	"golang.org/x/sys/unix"
-)
-
-var signalMap = map[string]syscall.Signal{
-	"ABRT":   unix.SIGABRT,
-	"ALRM":   unix.SIGALRM,
-	"BUS":    unix.SIGBUS,
-	"CHLD":   unix.SIGCHLD,
-	"CLD":    unix.SIGCLD,
-	"CONT":   unix.SIGCONT,
-	"FPE":    unix.SIGFPE,
-	"HUP":    unix.SIGHUP,
-	"ILL":    unix.SIGILL,
-	"INT":    unix.SIGINT,
-	"IO":     unix.SIGIO,
-	"IOT":    unix.SIGIOT,
-	"KILL":   unix.SIGKILL,
-	"PIPE":   unix.SIGPIPE,
-	"POLL":   unix.SIGPOLL,
-	"PROF":   unix.SIGPROF,
-	"PWR":    unix.SIGPWR,
-	"QUIT":   unix.SIGQUIT,
-	"SEGV":   unix.SIGSEGV,
-	"STKFLT": unix.SIGSTKFLT,
-	"STOP":   unix.SIGSTOP,
-	"SYS":    unix.SIGSYS,
-	"TERM":   unix.SIGTERM,
-	"TRAP":   unix.SIGTRAP,
-	"TSTP":   unix.SIGTSTP,
-	"TTIN":   unix.SIGTTIN,
-	"TTOU":   unix.SIGTTOU,
-	"URG":    unix.SIGURG,
-	"USR1":   unix.SIGUSR1,
-	"USR2":   unix.SIGUSR2,
-	"VTALRM": unix.SIGVTALRM,
-	"WINCH":  unix.SIGWINCH,
-	"XCPU":   unix.SIGXCPU,
-	"XFSZ":   unix.SIGXFSZ,
-}

+ 0 - 58
vendor/github.com/containerd/containerd/signal_map_unix.go

@@ -1,58 +0,0 @@
-// +build darwin freebsd solaris
-
-/*
-   Copyright The containerd Authors.
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-*/
-
-package containerd
-
-import (
-	"syscall"
-
-	"golang.org/x/sys/unix"
-)
-
-var signalMap = map[string]syscall.Signal{
-	"ABRT":   unix.SIGABRT,
-	"ALRM":   unix.SIGALRM,
-	"BUS":    unix.SIGBUS,
-	"CHLD":   unix.SIGCHLD,
-	"CONT":   unix.SIGCONT,
-	"FPE":    unix.SIGFPE,
-	"HUP":    unix.SIGHUP,
-	"ILL":    unix.SIGILL,
-	"INT":    unix.SIGINT,
-	"IO":     unix.SIGIO,
-	"IOT":    unix.SIGIOT,
-	"KILL":   unix.SIGKILL,
-	"PIPE":   unix.SIGPIPE,
-	"PROF":   unix.SIGPROF,
-	"QUIT":   unix.SIGQUIT,
-	"SEGV":   unix.SIGSEGV,
-	"STOP":   unix.SIGSTOP,
-	"SYS":    unix.SIGSYS,
-	"TERM":   unix.SIGTERM,
-	"TRAP":   unix.SIGTRAP,
-	"TSTP":   unix.SIGTSTP,
-	"TTIN":   unix.SIGTTIN,
-	"TTOU":   unix.SIGTTOU,
-	"URG":    unix.SIGURG,
-	"USR1":   unix.SIGUSR1,
-	"USR2":   unix.SIGUSR2,
-	"VTALRM": unix.SIGVTALRM,
-	"WINCH":  unix.SIGWINCH,
-	"XCPU":   unix.SIGXCPU,
-	"XFSZ":   unix.SIGXFSZ,
-}

+ 1 - 23
vendor/github.com/containerd/containerd/signals.go

@@ -20,13 +20,11 @@ import (
 	"context"
 	"context"
 	"encoding/json"
 	"encoding/json"
 	"fmt"
 	"fmt"
-	"strconv"
-	"strings"
 	"syscall"
 	"syscall"
 
 
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/images"
 	"github.com/containerd/containerd/images"
-	"github.com/opencontainers/image-spec/specs-go/v1"
+	v1 "github.com/opencontainers/image-spec/specs-go/v1"
 )
 )
 
 
 // StopSignalLabel is a well-known containerd label for storing the stop
 // StopSignalLabel is a well-known containerd label for storing the stop
@@ -83,23 +81,3 @@ func GetOCIStopSignal(ctx context.Context, image Image, defaultSignal string) (s
 
 
 	return config.StopSignal, nil
 	return config.StopSignal, nil
 }
 }
-
-// ParseSignal parses a given string into a syscall.Signal
-// it checks that the signal exists in the platform-appropriate signalMap
-func ParseSignal(rawSignal string) (syscall.Signal, error) {
-	s, err := strconv.Atoi(rawSignal)
-	if err == nil {
-		sig := syscall.Signal(s)
-		for _, msig := range signalMap {
-			if sig == msig {
-				return sig, nil
-			}
-		}
-		return -1, fmt.Errorf("unknown signal %q", rawSignal)
-	}
-	signal, ok := signalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")]
-	if !ok {
-		return -1, fmt.Errorf("unknown signal %q", rawSignal)
-	}
-	return signal, nil
-}

+ 47 - 0
vendor/github.com/containerd/containerd/signals_unix.go

@@ -0,0 +1,47 @@
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
+
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package containerd
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+	"syscall"
+
+	"golang.org/x/sys/unix"
+)
+
+// ParseSignal parses a given string into a syscall.Signal
+// the rawSignal can be a string with "SIG" prefix,
+// or a signal number in string format.
+func ParseSignal(rawSignal string) (syscall.Signal, error) {
+	s, err := strconv.Atoi(rawSignal)
+	if err == nil {
+		signal := syscall.Signal(s)
+		if unix.SignalName(signal) != "" {
+			return signal, nil
+		}
+		return -1, fmt.Errorf("unknown signal %q", rawSignal)
+	}
+	signal := unix.SignalNum(strings.ToUpper(rawSignal))
+	if signal == 0 {
+		return -1, fmt.Errorf("unknown signal %q", rawSignal)
+	}
+	return signal, nil
+}

+ 24 - 0
vendor/github.com/containerd/containerd/signal_map_windows.go → vendor/github.com/containerd/containerd/signals_windows.go

@@ -17,6 +17,9 @@
 package containerd
 package containerd
 
 
 import (
 import (
+	"fmt"
+	"strconv"
+	"strings"
 	"syscall"
 	"syscall"
 
 
 	"golang.org/x/sys/windows"
 	"golang.org/x/sys/windows"
@@ -37,3 +40,24 @@ var signalMap = map[string]syscall.Signal{
 	"ALRM":   syscall.Signal(windows.SIGALRM),
 	"ALRM":   syscall.Signal(windows.SIGALRM),
 	"TERM":   syscall.Signal(windows.SIGTERM),
 	"TERM":   syscall.Signal(windows.SIGTERM),
 }
 }
+
+// ParseSignal parses a given string into a syscall.Signal
+// the rawSignal can be a string with "SIG" prefix,
+// or a signal number in string format.
+func ParseSignal(rawSignal string) (syscall.Signal, error) {
+	s, err := strconv.Atoi(rawSignal)
+	if err == nil {
+		sig := syscall.Signal(s)
+		for _, msig := range signalMap {
+			if sig == msig {
+				return sig, nil
+			}
+		}
+		return -1, fmt.Errorf("unknown signal %q", rawSignal)
+	}
+	signal, ok := signalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")]
+	if !ok {
+		return -1, fmt.Errorf("unknown signal %q", rawSignal)
+	}
+	return signal, nil
+}

+ 11 - 9
vendor/github.com/containerd/containerd/snapshots/snapshotter.go

@@ -160,9 +160,13 @@ func (u *Usage) Add(other Usage) {
 //	layerPath, tmpDir := getLayerPath(), mkTmpDir() // just a path to layer tar file.
 //	layerPath, tmpDir := getLayerPath(), mkTmpDir() // just a path to layer tar file.
 //
 //
 // We start by using a Snapshotter to Prepare a new snapshot transaction, using a
 // We start by using a Snapshotter to Prepare a new snapshot transaction, using a
-// key and descending from the empty parent "":
+// key and descending from the empty parent "". To prevent our layer from being
+// garbage collected during unpacking, we add the `containerd.io/gc.root` label:
 //
 //
-//	mounts, err := snapshotter.Prepare(ctx, key, "")
+//	noGcOpt := snapshots.WithLabels(map[string]string{
+//		"containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
+//	})
+//	mounts, err := snapshotter.Prepare(ctx, key, "", noGcOpt)
 // 	if err != nil { ... }
 // 	if err != nil { ... }
 //
 //
 // We get back a list of mounts from Snapshotter.Prepare, with the key identifying
 // We get back a list of mounts from Snapshotter.Prepare, with the key identifying
@@ -191,15 +195,13 @@ func (u *Usage) Add(other Usage) {
 //
 //
 // Now that we've verified and unpacked our layer, we commit the active
 // Now that we've verified and unpacked our layer, we commit the active
 // snapshot to a name. For this example, we are just going to use the layer
 // snapshot to a name. For this example, we are just going to use the layer
-// digest, but in practice, this will probably be the ChainID:
+// digest, but in practice, this will probably be the ChainID. This also removes
+// the active snapshot:
 //
 //
-//	if err := snapshotter.Commit(ctx, digest.String(), key); err != nil { ... }
+//	if err := snapshotter.Commit(ctx, digest.String(), key, noGcOpt); err != nil { ... }
 //
 //
 // Now, we have a layer in the Snapshotter that can be accessed with the digest
 // Now, we have a layer in the Snapshotter that can be accessed with the digest
-// provided during commit. Once you have committed the snapshot, the active
-// snapshot can be removed with the following:
-//
-// 	snapshotter.Remove(ctx, key)
+// provided during commit.
 //
 //
 // Importing the Next Layer
 // Importing the Next Layer
 //
 //
@@ -207,7 +209,7 @@ func (u *Usage) Add(other Usage) {
 // above except that the parent is provided as parent when calling
 // above except that the parent is provided as parent when calling
 // Manager.Prepare, assuming a clean, unique key identifier:
 // Manager.Prepare, assuming a clean, unique key identifier:
 //
 //
-// 	mounts, err := snapshotter.Prepare(ctx, key, parentDigest)
+// 	mounts, err := snapshotter.Prepare(ctx, key, parentDigest, noGcOpt)
 //
 //
 // We then mount, apply and commit, as we did above. The new snapshot will be
 // We then mount, apply and commit, as we did above. The new snapshot will be
 // based on the content of the previous one.
 // based on the content of the previous one.

+ 55 - 6
vendor/github.com/containerd/containerd/task.go

@@ -37,11 +37,13 @@ import (
 	"github.com/containerd/containerd/mount"
 	"github.com/containerd/containerd/mount"
 	"github.com/containerd/containerd/plugin"
 	"github.com/containerd/containerd/plugin"
 	"github.com/containerd/containerd/rootfs"
 	"github.com/containerd/containerd/rootfs"
+	"github.com/containerd/containerd/runtime/linux/runctypes"
+	"github.com/containerd/containerd/runtime/v2/runc/options"
 	"github.com/containerd/typeurl"
 	"github.com/containerd/typeurl"
 	google_protobuf "github.com/gogo/protobuf/types"
 	google_protobuf "github.com/gogo/protobuf/types"
 	digest "github.com/opencontainers/go-digest"
 	digest "github.com/opencontainers/go-digest"
 	is "github.com/opencontainers/image-spec/specs-go"
 	is "github.com/opencontainers/image-spec/specs-go"
-	"github.com/opencontainers/image-spec/specs-go/v1"
+	v1 "github.com/opencontainers/image-spec/specs-go/v1"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 )
 )
@@ -115,6 +117,13 @@ type CheckpointTaskInfo struct {
 	ParentCheckpoint digest.Digest
 	ParentCheckpoint digest.Digest
 	// Options hold runtime specific settings for checkpointing a task
 	// Options hold runtime specific settings for checkpointing a task
 	Options interface{}
 	Options interface{}
+
+	runtime string
+}
+
+// Runtime name for the container
+func (i *CheckpointTaskInfo) Runtime() string {
+	return i.runtime
 }
 }
 
 
 // CheckpointTaskOpts allows the caller to set checkpoint options
 // CheckpointTaskOpts allows the caller to set checkpoint options
@@ -129,6 +138,12 @@ type TaskInfo struct {
 	RootFS []mount.Mount
 	RootFS []mount.Mount
 	// Options hold runtime specific settings for task creation
 	// Options hold runtime specific settings for task creation
 	Options interface{}
 	Options interface{}
+	runtime string
+}
+
+// Runtime name for the container
+func (i *TaskInfo) Runtime() string {
+	return i.runtime
 }
 }
 
 
 // Task is the executable object within containerd
 // Task is the executable object within containerd
@@ -147,6 +162,8 @@ type Task interface {
 	// OCI Index that can be push and pulled from a remote resource.
 	// OCI Index that can be push and pulled from a remote resource.
 	//
 	//
 	// Additional software like CRIU maybe required to checkpoint and restore tasks
 	// Additional software like CRIU maybe required to checkpoint and restore tasks
+	// NOTE: Checkpoint supports to dump task information to a directory, in this way,
+	// an empty OCI Index will be returned.
 	Checkpoint(context.Context, ...CheckpointTaskOpts) (Image, error)
 	Checkpoint(context.Context, ...CheckpointTaskOpts) (Image, error)
 	// Update modifies executing tasks with updated settings
 	// Update modifies executing tasks with updated settings
 	Update(context.Context, ...UpdateTaskOpts) error
 	Update(context.Context, ...UpdateTaskOpts) error
@@ -389,17 +406,25 @@ func (t *task) Resize(ctx context.Context, w, h uint32) error {
 	return errdefs.FromGRPC(err)
 	return errdefs.FromGRPC(err)
 }
 }
 
 
+// NOTE: Checkpoint supports to dump task information to a directory, in this way, an empty
+// OCI Index will be returned.
 func (t *task) Checkpoint(ctx context.Context, opts ...CheckpointTaskOpts) (Image, error) {
 func (t *task) Checkpoint(ctx context.Context, opts ...CheckpointTaskOpts) (Image, error) {
 	ctx, done, err := t.client.WithLease(ctx)
 	ctx, done, err := t.client.WithLease(ctx)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 	defer done(ctx)
 	defer done(ctx)
+	cr, err := t.client.ContainerService().Get(ctx, t.id)
+	if err != nil {
+		return nil, err
+	}
 
 
 	request := &tasks.CheckpointTaskRequest{
 	request := &tasks.CheckpointTaskRequest{
 		ContainerID: t.id,
 		ContainerID: t.id,
 	}
 	}
-	var i CheckpointTaskInfo
+	i := CheckpointTaskInfo{
+		runtime: cr.Runtime.Name,
+	}
 	for _, o := range opts {
 	for _, o := range opts {
 		if err := o(&i); err != nil {
 		if err := o(&i); err != nil {
 			return nil, err
 			return nil, err
@@ -422,10 +447,6 @@ func (t *task) Checkpoint(ctx context.Context, opts ...CheckpointTaskOpts) (Imag
 		return nil, err
 		return nil, err
 	}
 	}
 	defer t.Resume(ctx)
 	defer t.Resume(ctx)
-	cr, err := t.client.ContainerService().Get(ctx, t.id)
-	if err != nil {
-		return nil, err
-	}
 	index := v1.Index{
 	index := v1.Index{
 		Versioned: is.Versioned{
 		Versioned: is.Versioned{
 			SchemaVersion: 2,
 			SchemaVersion: 2,
@@ -435,6 +456,12 @@ func (t *task) Checkpoint(ctx context.Context, opts ...CheckpointTaskOpts) (Imag
 	if err := t.checkpointTask(ctx, &index, request); err != nil {
 	if err := t.checkpointTask(ctx, &index, request); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
+	// if checkpoint image path passed, jump checkpoint image,
+	// return an empty image
+	if isCheckpointPathExist(cr.Runtime.Name, i.Options) {
+		return NewImage(t.client, images.Image{}), nil
+	}
+
 	if cr.Image != "" {
 	if cr.Image != "" {
 		if err := t.checkpointImage(ctx, &index, cr.Image); err != nil {
 		if err := t.checkpointImage(ctx, &index, cr.Image); err != nil {
 			return nil, err
 			return nil, err
@@ -544,6 +571,7 @@ func (t *task) checkpointTask(ctx context.Context, index *v1.Index, request *tas
 	if err != nil {
 	if err != nil {
 		return errdefs.FromGRPC(err)
 		return errdefs.FromGRPC(err)
 	}
 	}
+	// NOTE: response.Descriptors can be an empty slice if checkpoint image is jumped
 	// add the checkpoint descriptors to the index
 	// add the checkpoint descriptors to the index
 	for _, d := range response.Descriptors {
 	for _, d := range response.Descriptors {
 		index.Manifests = append(index.Manifests, v1.Descriptor{
 		index.Manifests = append(index.Manifests, v1.Descriptor{
@@ -621,3 +649,24 @@ func writeContent(ctx context.Context, store content.Ingester, mediaType, ref st
 		Size:      size,
 		Size:      size,
 	}, nil
 	}, nil
 }
 }
+
+// isCheckpointPathExist only suitable for runc runtime now
+func isCheckpointPathExist(runtime string, v interface{}) bool {
+	if v == nil {
+		return false
+	}
+
+	switch runtime {
+	case plugin.RuntimeRuncV1, plugin.RuntimeRuncV2:
+		if opts, ok := v.(*options.CheckpointOptions); ok && opts.ImagePath != "" {
+			return true
+		}
+
+	case plugin.RuntimeLinuxV1:
+		if opts, ok := v.(*runctypes.CheckpointOptions); ok && opts.ImagePath != "" {
+			return true
+		}
+	}
+
+	return false
+}

+ 54 - 0
vendor/github.com/containerd/containerd/task_opts.go

@@ -27,6 +27,8 @@ import (
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/images"
 	"github.com/containerd/containerd/images"
 	"github.com/containerd/containerd/mount"
 	"github.com/containerd/containerd/mount"
+	"github.com/containerd/containerd/runtime/linux/runctypes"
+	"github.com/containerd/containerd/runtime/v2/runc/options"
 	imagespec "github.com/opencontainers/image-spec/specs-go/v1"
 	imagespec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/opencontainers/runtime-spec/specs-go"
 	"github.com/opencontainers/runtime-spec/specs-go"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
@@ -89,6 +91,58 @@ func WithCheckpointName(name string) CheckpointTaskOpts {
 	}
 	}
 }
 }
 
 
+// WithCheckpointImagePath sets image path for checkpoint option
+func WithCheckpointImagePath(path string) CheckpointTaskOpts {
+	return func(r *CheckpointTaskInfo) error {
+		if CheckRuntime(r.Runtime(), "io.containerd.runc") {
+			if r.Options == nil {
+				r.Options = &options.CheckpointOptions{}
+			}
+			opts, ok := r.Options.(*options.CheckpointOptions)
+			if !ok {
+				return errors.New("invalid v2 shim checkpoint options format")
+			}
+			opts.ImagePath = path
+		} else {
+			if r.Options == nil {
+				r.Options = &runctypes.CheckpointOptions{}
+			}
+			opts, ok := r.Options.(*runctypes.CheckpointOptions)
+			if !ok {
+				return errors.New("invalid v1 shim checkpoint options format")
+			}
+			opts.ImagePath = path
+		}
+		return nil
+	}
+}
+
+// WithRestoreImagePath sets image path for create option
+func WithRestoreImagePath(path string) NewTaskOpts {
+	return func(ctx context.Context, c *Client, ti *TaskInfo) error {
+		if CheckRuntime(ti.Runtime(), "io.containerd.runc") {
+			if ti.Options == nil {
+				ti.Options = &options.Options{}
+			}
+			opts, ok := ti.Options.(*options.Options)
+			if !ok {
+				return errors.New("invalid v2 shim create options format")
+			}
+			opts.CriuImagePath = path
+		} else {
+			if ti.Options == nil {
+				ti.Options = &runctypes.CreateOptions{}
+			}
+			opts, ok := ti.Options.(*runctypes.CreateOptions)
+			if !ok {
+				return errors.New("invalid v1 shim create options format")
+			}
+			opts.CriuImagePath = path
+		}
+		return nil
+	}
+}
+
 // ProcessDeleteOpts allows the caller to set options for the deletion of a task
 // ProcessDeleteOpts allows the caller to set options for the deletion of a task
 type ProcessDeleteOpts func(context.Context, Process) error
 type ProcessDeleteOpts func(context.Context, Process) error
 
 

+ 40 - 18
vendor/github.com/containerd/containerd/task_opts_unix.go

@@ -22,36 +22,58 @@ import (
 	"context"
 	"context"
 
 
 	"github.com/containerd/containerd/runtime/linux/runctypes"
 	"github.com/containerd/containerd/runtime/linux/runctypes"
+	"github.com/containerd/containerd/runtime/v2/runc/options"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 )
 )
 
 
 // WithNoNewKeyring causes tasks not to be created with a new keyring for secret storage.
 // WithNoNewKeyring causes tasks not to be created with a new keyring for secret storage.
 // There is an upper limit on the number of keyrings in a linux system
 // There is an upper limit on the number of keyrings in a linux system
 func WithNoNewKeyring(ctx context.Context, c *Client, ti *TaskInfo) error {
 func WithNoNewKeyring(ctx context.Context, c *Client, ti *TaskInfo) error {
-	if ti.Options == nil {
-		ti.Options = &runctypes.CreateOptions{}
-	}
-	opts, ok := ti.Options.(*runctypes.CreateOptions)
-	if !ok {
-		return errors.New("could not cast TaskInfo Options to CreateOptions")
+	if CheckRuntime(ti.Runtime(), "io.containerd.runc") {
+		if ti.Options == nil {
+			ti.Options = &options.Options{}
+		}
+		opts, ok := ti.Options.(*options.Options)
+		if !ok {
+			return errors.New("invalid v2 shim create options format")
+		}
+		opts.NoNewKeyring = true
+	} else {
+		if ti.Options == nil {
+			ti.Options = &runctypes.CreateOptions{}
+		}
+		opts, ok := ti.Options.(*runctypes.CreateOptions)
+		if !ok {
+			return errors.New("could not cast TaskInfo Options to CreateOptions")
+		}
+		opts.NoNewKeyring = true
 	}
 	}
-
-	opts.NoNewKeyring = true
 	return nil
 	return nil
 }
 }
 
 
 // WithNoPivotRoot instructs the runtime not to you pivot_root
 // WithNoPivotRoot instructs the runtime not to you pivot_root
-func WithNoPivotRoot(_ context.Context, _ *Client, info *TaskInfo) error {
-	if info.Options == nil {
-		info.Options = &runctypes.CreateOptions{
-			NoPivotRoot: true,
+func WithNoPivotRoot(_ context.Context, _ *Client, ti *TaskInfo) error {
+	if CheckRuntime(ti.Runtime(), "io.containerd.runc") {
+		if ti.Options == nil {
+			ti.Options = &options.Options{}
 		}
 		}
-		return nil
-	}
-	opts, ok := info.Options.(*runctypes.CreateOptions)
-	if !ok {
-		return errors.New("invalid options type, expected runctypes.CreateOptions")
+		opts, ok := ti.Options.(*options.Options)
+		if !ok {
+			return errors.New("invalid v2 shim create options format")
+		}
+		opts.NoPivotRoot = true
+	} else {
+		if ti.Options == nil {
+			ti.Options = &runctypes.CreateOptions{
+				NoPivotRoot: true,
+			}
+			return nil
+		}
+		opts, ok := ti.Options.(*runctypes.CreateOptions)
+		if !ok {
+			return errors.New("invalid options type, expected runctypes.CreateOptions")
+		}
+		opts.NoPivotRoot = true
 	}
 	}
-	opts.NoPivotRoot = true
 	return nil
 	return nil
 }
 }

+ 20 - 19
vendor/github.com/containerd/containerd/vendor.conf

@@ -19,31 +19,32 @@ github.com/matttproud/golang_protobuf_extensions v1.0.0
 github.com/gogo/protobuf v1.0.0
 github.com/gogo/protobuf v1.0.0
 github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef
 github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef
 github.com/golang/protobuf v1.1.0
 github.com/golang/protobuf v1.1.0
-github.com/opencontainers/runtime-spec eba862dc2470385a233c7507392675cbeadf7353 # v1.0.1-45-geba862d
+github.com/opencontainers/runtime-spec 29686dbc5559d93fb1ef402eeda3e35c38d75af4 # v1.0.1-59-g29686db
 github.com/opencontainers/runc 2b18fe1d885ee5083ef9f0838fee39b62d653e30
 github.com/opencontainers/runc 2b18fe1d885ee5083ef9f0838fee39b62d653e30
-github.com/sirupsen/logrus v1.0.0
+github.com/konsorten/go-windows-terminal-sequences v1.0.1
+github.com/sirupsen/logrus v1.3.0
 github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c
 github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c
 golang.org/x/net b3756b4b77d7b13260a0a2ec658753cf48922eac
 golang.org/x/net b3756b4b77d7b13260a0a2ec658753cf48922eac
 google.golang.org/grpc v1.12.0
 google.golang.org/grpc v1.12.0
 github.com/pkg/errors v0.8.0
 github.com/pkg/errors v0.8.0
 github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
 github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
-golang.org/x/sys 41f3e6584952bb034a481797859f6ab34b6803bd https://github.com/golang/sys
+golang.org/x/sys d455e41777fca6e8a5a79e34a14b8368bc11d9ba https://github.com/golang/sys
 github.com/opencontainers/image-spec v1.0.1
 github.com/opencontainers/image-spec v1.0.1
-golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
+golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
 github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
 github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
 github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
 github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
-github.com/Microsoft/go-winio v0.4.11
-github.com/Microsoft/hcsshim v0.8.1
+github.com/Microsoft/go-winio v0.4.12
+github.com/Microsoft/hcsshim v0.8.5
 google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
 google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
 golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
 golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
-github.com/containerd/ttrpc 2a805f71863501300ae1976d29f0454ae003e85a
+github.com/containerd/ttrpc f02858b1457c5ca3aaec3a0803eb0d59f96e41d6
 github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16
 github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16
 gotest.tools v2.1.0
 gotest.tools v2.1.0
 github.com/google/go-cmp v0.1.0
 github.com/google/go-cmp v0.1.0
-go.etcd.io/bbolt v1.3.1-etcd.8
+go.etcd.io/bbolt v1.3.2
 
 
 # cri dependencies
 # cri dependencies
-github.com/containerd/cri a92c40017473cbe0239ce180125f12669757e44f # release/1.2 branch
+github.com/containerd/cri 4dd6735020f5596dd41738f8c4f5cb07fa804c5e # master
 github.com/containerd/go-cni 40bcf8ec8acd7372be1d77031d585d5d8e561c90
 github.com/containerd/go-cni 40bcf8ec8acd7372be1d77031d585d5d8e561c90
 github.com/blang/semver v3.1.0
 github.com/blang/semver v3.1.0
 github.com/containernetworking/cni v0.6.0
 github.com/containernetworking/cni v0.6.0
@@ -53,8 +54,6 @@ github.com/docker/distribution 0d3efadf0154c2b8a4e7b6621fff9809655cc580
 github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00
 github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00
 github.com/docker/spdystream 449fdfce4d962303d702fec724ef0ad181c92528
 github.com/docker/spdystream 449fdfce4d962303d702fec724ef0ad181c92528
 github.com/emicklei/go-restful v2.2.1
 github.com/emicklei/go-restful v2.2.1
-github.com/ghodss/yaml v1.0.0
-github.com/golang/glog 44145f04b68cf362d9c4df2182967c2275eaefed
 github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c
 github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c
 github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55
 github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55
 github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f
 github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f
@@ -73,17 +72,19 @@ golang.org/x/oauth2 a6bd8cefa1811bd24b86f8902872e4e8225f74c4
 golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631
 golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631
 gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
 gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
 gopkg.in/yaml.v2 v2.2.1
 gopkg.in/yaml.v2 v2.2.1
-k8s.io/api kubernetes-1.12.0
-k8s.io/apimachinery kubernetes-1.12.0
-k8s.io/apiserver kubernetes-1.12.0
-k8s.io/client-go kubernetes-1.12.0
-k8s.io/kubernetes v1.12.0
-k8s.io/utils cd34563cd63c2bd7c6fe88a73c4dcf34ed8a67cb
+k8s.io/api kubernetes-1.13.0
+k8s.io/apimachinery kubernetes-1.13.0
+k8s.io/apiserver kubernetes-1.13.0
+k8s.io/client-go kubernetes-1.13.0
+k8s.io/klog 8139d8cb77af419532b33dfa7dd09fbc5f1d344f
+k8s.io/kubernetes v1.13.0
+k8s.io/utils 0d26856f57b32ec3398579285e5c8a2bfe8c5243
+sigs.k8s.io/yaml v1.1.0
 
 
 # zfs dependencies
 # zfs dependencies
-github.com/containerd/zfs 9a0b8b8b5982014b729cd34eb7cd7a11062aa6ec
+github.com/containerd/zfs 9f6ef3b1fe5144bd91fe5855b4eba81bc0d17d03
 github.com/mistifyio/go-zfs 166add352731e515512690329794ee593f1aaff2
 github.com/mistifyio/go-zfs 166add352731e515512690329794ee593f1aaff2
 github.com/pborman/uuid c65b2f87fee37d1c7854c9164a450713c28d50cd
 github.com/pborman/uuid c65b2f87fee37d1c7854c9164a450713c28d50cd
 
 
 # aufs dependencies
 # aufs dependencies
-github.com/containerd/aufs ffa39970e26ad01d81f540b21e65f9c1841a5f92
+github.com/containerd/aufs da3cf16bfbe68ba8f114f1536a05c01528a25434

+ 10 - 11
vendor/github.com/containerd/containerd/archive/time_darwin.go → vendor/github.com/containerd/containerd/version/version.go

@@ -14,17 +14,16 @@
    limitations under the License.
    limitations under the License.
 */
 */
 
 
-package archive
+package version
 
 
-import (
-	"time"
+var (
+	// Package is filled at linking time
+	Package = "github.com/containerd/containerd"
 
 
-	"github.com/pkg/errors"
-)
+	// Version holds the complete version number. Filled in at linking time.
+	Version = "1.2.0+unknown"
 
 
-// as at MacOS 10.12 there is apparently no way to set timestamps
-// with nanosecond precision. We could fall back to utimes/lutimes
-// and lose the precision as a temporary workaround.
-func chtimes(path string, atime, mtime time.Time) error {
-	return errors.New("OSX missing UtimesNanoAt")
-}
+	// Revision is filled with the VCS (e.g. git) revision being used to build
+	// the program at linking time.
+	Revision = ""
+)

+ 10 - 0
vendor/github.com/containerd/ttrpc/README.md

@@ -50,3 +50,13 @@ TODO:
 - [ ] Document protocol layout
 - [ ] Document protocol layout
 - [ ] Add testing under concurrent load to ensure
 - [ ] Add testing under concurrent load to ensure
 - [ ] Verify connection error handling
 - [ ] Verify connection error handling
+
+# Project details
+
+ttrpc is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
+As a containerd sub-project, you will find the:
+ * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md),
+ * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS),
+ * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
+
+information in our [`containerd/project`](https://github.com/containerd/project) repository.

+ 6 - 0
vendor/github.com/containerd/ttrpc/client.go

@@ -24,6 +24,7 @@ import (
 	"strings"
 	"strings"
 	"sync"
 	"sync"
 	"syscall"
 	"syscall"
+	"time"
 
 
 	"github.com/gogo/protobuf/proto"
 	"github.com/gogo/protobuf/proto"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
@@ -86,6 +87,10 @@ func (c *Client) Call(ctx context.Context, service, method string, req, resp int
 		cresp = &Response{}
 		cresp = &Response{}
 	)
 	)
 
 
+	if dl, ok := ctx.Deadline(); ok {
+		creq.TimeoutNano = dl.Sub(time.Now()).Nanoseconds()
+	}
+
 	if err := c.dispatch(ctx, creq, cresp); err != nil {
 	if err := c.dispatch(ctx, creq, cresp); err != nil {
 		return err
 		return err
 	}
 	}
@@ -104,6 +109,7 @@ func (c *Client) Call(ctx context.Context, service, method string, req, resp int
 func (c *Client) dispatch(ctx context.Context, req *Request, resp *Response) error {
 func (c *Client) dispatch(ctx context.Context, req *Request, resp *Response) error {
 	errs := make(chan error, 1)
 	errs := make(chan error, 1)
 	call := &callRequest{
 	call := &callRequest{
+		ctx:  ctx,
 		req:  req,
 		req:  req,
 		resp: resp,
 		resp: resp,
 		errs: errs,
 		errs: errs,

+ 15 - 0
vendor/github.com/containerd/ttrpc/server.go

@@ -414,6 +414,9 @@ func (c *serverConn) run(sctx context.Context) {
 		case request := <-requests:
 		case request := <-requests:
 			active++
 			active++
 			go func(id uint32) {
 			go func(id uint32) {
+				ctx, cancel := getRequestContext(ctx, request.req)
+				defer cancel()
+
 				p, status := c.server.services.call(ctx, request.req.Service, request.req.Method, request.req.Payload)
 				p, status := c.server.services.call(ctx, request.req.Service, request.req.Method, request.req.Payload)
 				resp := &Response{
 				resp := &Response{
 					Status:  status.Proto(),
 					Status:  status.Proto(),
@@ -454,3 +457,15 @@ func (c *serverConn) run(sctx context.Context) {
 		}
 		}
 	}
 	}
 }
 }
+
+var noopFunc = func() {}
+
+func getRequestContext(ctx context.Context, req *Request) (retCtx context.Context, cancel func()) {
+	cancel = noopFunc
+	if req.TimeoutNano == 0 {
+		return ctx, cancel
+	}
+
+	ctx, cancel = context.WithTimeout(ctx, time.Duration(req.TimeoutNano))
+	return ctx, cancel
+}

+ 4 - 3
vendor/github.com/containerd/ttrpc/types.go

@@ -23,9 +23,10 @@ import (
 )
 )
 
 
 type Request struct {
 type Request struct {
-	Service string `protobuf:"bytes,1,opt,name=service,proto3"`
-	Method  string `protobuf:"bytes,2,opt,name=method,proto3"`
-	Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3"`
+	Service     string `protobuf:"bytes,1,opt,name=service,proto3"`
+	Method      string `protobuf:"bytes,2,opt,name=method,proto3"`
+	Payload     []byte `protobuf:"bytes,3,opt,name=payload,proto3"`
+	TimeoutNano int64  `protobuf:"varint,4,opt,name=timeout_nano,proto3"`
 }
 }
 
 
 func (r *Request) Reset()         { *r = Request{} }
 func (r *Request) Reset()         { *r = Request{} }

+ 27 - 0
vendor/github.com/gofrs/flock/LICENSE

@@ -0,0 +1,27 @@
+Copyright (c) 2015, Tim Heckman
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution.
+
+* Neither the name of linode-netint nor the names of its
+  contributors may be used to endorse or promote products derived from
+  this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 40 - 0
vendor/github.com/gofrs/flock/README.md

@@ -0,0 +1,40 @@
+# flock
+[![TravisCI Build Status](https://img.shields.io/travis/gofrs/flock/master.svg?style=flat)](https://travis-ci.org/gofrs/flock)
+[![GoDoc](https://img.shields.io/badge/godoc-go--flock-blue.svg?style=flat)](https://godoc.org/github.com/gofrs/flock)
+[![License](https://img.shields.io/badge/license-BSD_3--Clause-brightgreen.svg?style=flat)](https://github.com/gofrs/flock/blob/master/LICENSE)
+
+`flock` implements a thread-safe sync.Locker interface for file locking. It also
+includes a non-blocking TryLock() function to allow locking without blocking execution.
+
+## License
+`flock` is released under the BSD 3-Clause License. See the `LICENSE` file for more details.
+
+## Go Compatibility
+This package makes use of the `context` package that was introduced in Go 1.7. As such, this
+package has an implicit dependency on Go 1.7+.
+
+## Installation
+```
+go get -u github.com/gofrs/flock
+```
+
+## Usage
+```Go
+import "github.com/gofrs/flock"
+
+fileLock := flock.New("/var/lock/go-lock.lock")
+
+locked, err := fileLock.TryLock()
+
+if err != nil {
+	// handle locking error
+}
+
+if locked {
+	// do work
+	fileLock.Unlock()
+}
+```
+
+For more detailed usage information take a look at the package API docs on
+[GoDoc](https://godoc.org/github.com/gofrs/flock).

+ 127 - 0
vendor/github.com/gofrs/flock/flock.go

@@ -0,0 +1,127 @@
+// Copyright 2015 Tim Heckman. All rights reserved.
+// Use of this source code is governed by the BSD 3-Clause
+// license that can be found in the LICENSE file.
+
+// Package flock implements a thread-safe sync.Locker interface for file locking.
+// It also includes a non-blocking TryLock() function to allow locking
+// without blocking execution.
+//
+// Package flock is released under the BSD 3-Clause License. See the LICENSE file
+// for more details.
+//
+// While using this library, remember that the locking behaviors are not
+// guaranteed to be the same on each platform. For example, some UNIX-like
+// operating systems will transparently convert a shared lock to an exclusive
+// lock. If you Unlock() the flock from a location where you believe that you
+// have the shared lock, you may accidently drop the exclusive lock.
+package flock
+
+import (
+	"context"
+	"os"
+	"sync"
+	"time"
+)
+
+// Flock is the struct type to handle file locking. All fields are unexported,
+// with access to some of the fields provided by getter methods (Path() and Locked()).
+type Flock struct {
+	path string
+	m    sync.RWMutex
+	fh   *os.File
+	l    bool
+	r    bool
+}
+
+// New returns a new instance of *Flock. The only parameter
+// it takes is the path to the desired lockfile.
+func New(path string) *Flock {
+	return &Flock{path: path}
+}
+
+// NewFlock returns a new instance of *Flock. The only parameter
+// it takes is the path to the desired lockfile.
+//
+// Deprecated: Use New instead.
+func NewFlock(path string) *Flock {
+	return New(path)
+}
+
+// Close is equivalent to calling Unlock.
+//
+// This will release the lock and close the underlying file descriptor.
+// It will not remove the file from disk, that's up to your application.
+func (f *Flock) Close() error {
+	return f.Unlock()
+}
+
+// Path returns the path as provided in NewFlock().
+func (f *Flock) Path() string {
+	return f.path
+}
+
+// Locked returns the lock state (locked: true, unlocked: false).
+//
+// Warning: by the time you use the returned value, the state may have changed.
+func (f *Flock) Locked() bool {
+	f.m.RLock()
+	defer f.m.RUnlock()
+	return f.l
+}
+
+// RLocked returns the read lock state (locked: true, unlocked: false).
+//
+// Warning: by the time you use the returned value, the state may have changed.
+func (f *Flock) RLocked() bool {
+	f.m.RLock()
+	defer f.m.RUnlock()
+	return f.r
+}
+
+func (f *Flock) String() string {
+	return f.path
+}
+
+// TryLockContext repeatedly tries to take an exclusive lock until one of the
+// conditions is met: TryLock succeeds, TryLock fails with error, or Context
+// Done channel is closed.
+func (f *Flock) TryLockContext(ctx context.Context, retryDelay time.Duration) (bool, error) {
+	return tryCtx(f.TryLock, ctx, retryDelay)
+}
+
+// TryRLockContext repeatedly tries to take a shared lock until one of the
+// conditions is met: TryRLock succeeds, TryRLock fails with error, or Context
+// Done channel is closed.
+func (f *Flock) TryRLockContext(ctx context.Context, retryDelay time.Duration) (bool, error) {
+	return tryCtx(f.TryRLock, ctx, retryDelay)
+}
+
+func tryCtx(fn func() (bool, error), ctx context.Context, retryDelay time.Duration) (bool, error) {
+	if ctx.Err() != nil {
+		return false, ctx.Err()
+	}
+	for {
+		if ok, err := fn(); ok || err != nil {
+			return ok, err
+		}
+		select {
+		case <-ctx.Done():
+			return false, ctx.Err()
+		case <-time.After(retryDelay):
+			// try again
+		}
+	}
+}
+
+func (f *Flock) setFh() error {
+	// open a new os.File instance
+	// create it if it doesn't exist, and open the file read-only.
+	fh, err := os.OpenFile(f.path, os.O_CREATE|os.O_RDONLY, os.FileMode(0600))
+	if err != nil {
+		return err
+	}
+
+	// set the filehandle on the struct
+	f.fh = fh
+	return nil
+}

+ 195 - 0
vendor/github.com/gofrs/flock/flock_unix.go

@@ -0,0 +1,195 @@
+// Copyright 2015 Tim Heckman. All rights reserved.
+// Use of this source code is governed by the BSD 3-Clause
+// license that can be found in the LICENSE file.
+
+// +build !windows
+
+package flock
+
+import (
+	"os"
+	"syscall"
+)
+
+// Lock is a blocking call to try and take an exclusive file lock. It will wait
+// until it is able to obtain the exclusive file lock. It's recommended that
+// TryLock() be used over this function. This function may block the ability to
+// query the current Locked() or RLocked() status due to a RW-mutex lock.
+//
+// If we are already exclusive-locked, this function short-circuits and returns
+// immediately assuming it can take the mutex lock.
+//
+// If the *Flock has a shared lock (RLock), this may transparently replace the
+// shared lock with an exclusive lock on some UNIX-like operating systems. Be
+// careful when using exclusive locks in conjunction with shared locks
+// (RLock()), because calling Unlock() may accidentally release the exclusive
+// lock that was once a shared lock.
+func (f *Flock) Lock() error {
+	return f.lock(&f.l, syscall.LOCK_EX)
+}
+
+// RLock is a blocking call to try and take a shared file lock. It will wait
+// until it is able to obtain the shared file lock. It's recommended that
+// TryRLock() be used over this function. This function may block the ability to
+// query the current Locked() or RLocked() status due to a RW-mutex lock.
+//
+// If we are already shared-locked, this function short-circuits and returns
+// immediately assuming it can take the mutex lock.
+func (f *Flock) RLock() error {
+	return f.lock(&f.r, syscall.LOCK_SH)
+}
+
+func (f *Flock) lock(locked *bool, flag int) error {
+	f.m.Lock()
+	defer f.m.Unlock()
+
+	if *locked {
+		return nil
+	}
+
+	if f.fh == nil {
+		if err := f.setFh(); err != nil {
+			return err
+		}
+	}
+
+	if err := syscall.Flock(int(f.fh.Fd()), flag); err != nil {
+		shouldRetry, reopenErr := f.reopenFDOnError(err)
+		if reopenErr != nil {
+			return reopenErr
+		}
+
+		if !shouldRetry {
+			return err
+		}
+
+		if err = syscall.Flock(int(f.fh.Fd()), flag); err != nil {
+			return err
+		}
+	}
+
+	*locked = true
+	return nil
+}
+
+// Unlock is a function to unlock the file. This file takes a RW-mutex lock, so
+// while it is running the Locked() and RLocked() functions will be blocked.
+//
+// This function short-circuits if we are unlocked already. If not, it calls
+// syscall.LOCK_UN on the file and closes the file descriptor. It does not
+// remove the file from disk. It's up to your application to do.
+//
+// Please note, if your shared lock became an exclusive lock this may
+// unintentionally drop the exclusive lock if called by the consumer that
+// believes they have a shared lock. Please see Lock() for more details.
+func (f *Flock) Unlock() error {
+	f.m.Lock()
+	defer f.m.Unlock()
+
+	// if we aren't locked or if the lockfile instance is nil
+	// just return a nil error because we are unlocked
+	if (!f.l && !f.r) || f.fh == nil {
+		return nil
+	}
+
+	// mark the file as unlocked
+	if err := syscall.Flock(int(f.fh.Fd()), syscall.LOCK_UN); err != nil {
+		return err
+	}
+
+	f.fh.Close()
+
+	f.l = false
+	f.r = false
+	f.fh = nil
+
+	return nil
+}
+
+// TryLock is the preferred function for taking an exclusive file lock. This
+// function takes an RW-mutex lock before it tries to lock the file, so there is
+// the possibility that this function may block for a short time if another
+// goroutine is trying to take any action.
+//
+// The actual file lock is non-blocking. If we are unable to get the exclusive
+// file lock, the function will return false instead of waiting for the lock. If
+// we get the lock, we also set the *Flock instance as being exclusive-locked.
+func (f *Flock) TryLock() (bool, error) {
+	return f.try(&f.l, syscall.LOCK_EX)
+}
+
+// TryRLock is the preferred function for taking a shared file lock. This
+// function takes an RW-mutex lock before it tries to lock the file, so there is
+// the possibility that this function may block for a short time if another
+// goroutine is trying to take any action.
+//
+// The actual file lock is non-blocking. If we are unable to get the shared file
+// lock, the function will return false instead of waiting for the lock. If we
+// get the lock, we also set the *Flock instance as being share-locked.
+func (f *Flock) TryRLock() (bool, error) {
+	return f.try(&f.r, syscall.LOCK_SH)
+}
+
+func (f *Flock) try(locked *bool, flag int) (bool, error) {
+	f.m.Lock()
+	defer f.m.Unlock()
+
+	if *locked {
+		return true, nil
+	}
+
+	if f.fh == nil {
+		if err := f.setFh(); err != nil {
+			return false, err
+		}
+	}
+
+	var retried bool
+retry:
+	err := syscall.Flock(int(f.fh.Fd()), flag|syscall.LOCK_NB)
+
+	switch err {
+	case syscall.EWOULDBLOCK:
+		return false, nil
+	case nil:
+		*locked = true
+		return true, nil
+	}
+	if !retried {
+		if shouldRetry, reopenErr := f.reopenFDOnError(err); reopenErr != nil {
+			return false, reopenErr
+		} else if shouldRetry {
+			retried = true
+			goto retry
+		}
+	}
+
+	return false, err
+}
+
+// reopenFDOnError determines whether we should reopen the file handle
+// in readwrite mode and try again. This comes from util-linux/sys-utils/flock.c:
+//  Since Linux 3.4 (commit 55725513)
+//  Probably NFSv4 where flock() is emulated by fcntl().
+func (f *Flock) reopenFDOnError(err error) (bool, error) {
+	if err != syscall.EIO && err != syscall.EBADF {
+		return false, nil
+	}
+	if st, err := f.fh.Stat(); err == nil {
+		// if the file is able to be read and written
+		if st.Mode()&0600 == 0600 {
+			f.fh.Close()
+			f.fh = nil
+
+			// reopen in read-write mode and set the filehandle
+			fh, err := os.OpenFile(f.path, os.O_CREATE|os.O_RDWR, os.FileMode(0600))
+			if err != nil {
+				return false, err
+			}
+			f.fh = fh
+			return true, nil
+		}
+	}
+
+	return false, nil
+}

+ 76 - 0
vendor/github.com/gofrs/flock/flock_winapi.go

@@ -0,0 +1,76 @@
+// Copyright 2015 Tim Heckman. All rights reserved.
+// Use of this source code is governed by the BSD 3-Clause
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package flock
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+var (
+	kernel32, _         = syscall.LoadLibrary("kernel32.dll")
+	procLockFileEx, _   = syscall.GetProcAddress(kernel32, "LockFileEx")
+	procUnlockFileEx, _ = syscall.GetProcAddress(kernel32, "UnlockFileEx")
+)
+
+const (
+	winLockfileFailImmediately = 0x00000001
+	winLockfileExclusiveLock   = 0x00000002
+	winLockfileSharedLock      = 0x00000000
+)
+
+// Use of 0x00000000 for the shared lock is a guess based on some the MS Windows
+// `LockFileEX` docs, which document the `LOCKFILE_EXCLUSIVE_LOCK` flag as:
+//
+// > The function requests an exclusive lock. Otherwise, it requests a shared
+// > lock.
+//
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
+
+func lockFileEx(handle syscall.Handle, flags uint32, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) (bool, syscall.Errno) {
+	r1, _, errNo := syscall.Syscall6(
+		uintptr(procLockFileEx),
+		6,
+		uintptr(handle),
+		uintptr(flags),
+		uintptr(reserved),
+		uintptr(numberOfBytesToLockLow),
+		uintptr(numberOfBytesToLockHigh),
+		uintptr(unsafe.Pointer(offset)))
+
+	if r1 != 1 {
+		if errNo == 0 {
+			return false, syscall.EINVAL
+		}
+
+		return false, errNo
+	}
+
+	return true, 0
+}
+
+func unlockFileEx(handle syscall.Handle, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) (bool, syscall.Errno) {
+	r1, _, errNo := syscall.Syscall6(
+		uintptr(procUnlockFileEx),
+		5,
+		uintptr(handle),
+		uintptr(reserved),
+		uintptr(numberOfBytesToLockLow),
+		uintptr(numberOfBytesToLockHigh),
+		uintptr(unsafe.Pointer(offset)),
+		0)
+
+	if r1 != 1 {
+		if errNo == 0 {
+			return false, syscall.EINVAL
+		}
+
+		return false, errNo
+	}
+
+	return true, 0
+}

+ 140 - 0
vendor/github.com/gofrs/flock/flock_windows.go

@@ -0,0 +1,140 @@
+// Copyright 2015 Tim Heckman. All rights reserved.
+// Use of this source code is governed by the BSD 3-Clause
+// license that can be found in the LICENSE file.
+
+package flock
+
+import (
+	"syscall"
+)
+
+// ErrorLockViolation is the error code returned from the Windows syscall when a
+// lock would block and you ask to fail immediately.
+const ErrorLockViolation syscall.Errno = 0x21 // 33
+
+// Lock is a blocking call to try and take an exclusive file lock. It will wait
+// until it is able to obtain the exclusive file lock. It's recommended that
+// TryLock() be used over this function. This function may block the ability to
+// query the current Locked() or RLocked() status due to a RW-mutex lock.
+//
+// If we are already locked, this function short-circuits and returns
+// immediately assuming it can take the mutex lock.
+func (f *Flock) Lock() error {
+	return f.lock(&f.l, winLockfileExclusiveLock)
+}
+
+// RLock is a blocking call to try and take a shared file lock. It will wait
+// until it is able to obtain the shared file lock. It's recommended that
+// TryRLock() be used over this function. This function may block the ability to
+// query the current Locked() or RLocked() status due to a RW-mutex lock.
+//
+// If we are already locked, this function short-circuits and returns
+// immediately assuming it can take the mutex lock.
+func (f *Flock) RLock() error {
+	return f.lock(&f.r, winLockfileSharedLock)
+}
+
+func (f *Flock) lock(locked *bool, flag uint32) error {
+	f.m.Lock()
+	defer f.m.Unlock()
+
+	if *locked {
+		return nil
+	}
+
+	if f.fh == nil {
+		if err := f.setFh(); err != nil {
+			return err
+		}
+	}
+
+	if _, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}); errNo > 0 {
+		return errNo
+	}
+
+	*locked = true
+	return nil
+}
+
+// Unlock is a function to unlock the file. This file takes a RW-mutex lock, so
+// while it is running the Locked() and RLocked() functions will be blocked.
+//
+// This function short-circuits if we are unlocked already. If not, it calls
+// UnlockFileEx() on the file and closes the file descriptor. It does not remove
+// the file from disk. It's up to your application to do.
+func (f *Flock) Unlock() error {
+	f.m.Lock()
+	defer f.m.Unlock()
+
+	// if we aren't locked or if the lockfile instance is nil
+	// just return a nil error because we are unlocked
+	if (!f.l && !f.r) || f.fh == nil {
+		return nil
+	}
+
+	// mark the file as unlocked
+	if _, errNo := unlockFileEx(syscall.Handle(f.fh.Fd()), 0, 1, 0, &syscall.Overlapped{}); errNo > 0 {
+		return errNo
+	}
+
+	f.fh.Close()
+
+	f.l = false
+	f.r = false
+	f.fh = nil
+
+	return nil
+}
+
+// TryLock is the preferred function for taking an exclusive file lock. This
+// function does take a RW-mutex lock before it tries to lock the file, so there
+// is the possibility that this function may block for a short time if another
+// goroutine is trying to take any action.
+//
+// The actual file lock is non-blocking. If we are unable to get the exclusive
+// file lock, the function will return false instead of waiting for the lock. If
+// we get the lock, we also set the *Flock instance as being exclusive-locked.
+func (f *Flock) TryLock() (bool, error) {
+	return f.try(&f.l, winLockfileExclusiveLock)
+}
+
+// TryRLock is the preferred function for taking a shared file lock. This
+// function does take a RW-mutex lock before it tries to lock the file, so there
+// is the possibility that this function may block for a short time if another
+// goroutine is trying to take any action.
+//
+// The actual file lock is non-blocking. If we are unable to get the shared file
+// lock, the function will return false instead of waiting for the lock. If we
+// get the lock, we also set the *Flock instance as being shared-locked.
+func (f *Flock) TryRLock() (bool, error) {
+	return f.try(&f.r, winLockfileSharedLock)
+}
+
+func (f *Flock) try(locked *bool, flag uint32) (bool, error) {
+	f.m.Lock()
+	defer f.m.Unlock()
+
+	if *locked {
+		return true, nil
+	}
+
+	if f.fh == nil {
+		if err := f.setFh(); err != nil {
+			return false, err
+		}
+	}
+
+	_, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag|winLockfileFailImmediately, 0, 1, 0, &syscall.Overlapped{})
+
+	if errNo > 0 {
+		if errNo == ErrorLockViolation || errNo == syscall.ERROR_IO_PENDING {
+			return false, nil
+		}
+
+		return false, errNo
+	}
+
+	*locked = true
+
+	return true, nil
+}

+ 2 - 3
vendor/github.com/gogo/protobuf/LICENSE

@@ -1,7 +1,6 @@
-Protocol Buffers for Go with Gadgets
-
 Copyright (c) 2013, The GoGo Authors. All rights reserved.
 Copyright (c) 2013, The GoGo Authors. All rights reserved.
-http://github.com/gogo/protobuf
+
+Protocol Buffers for Go with Gadgets
 
 
 Go support for Protocol Buffers - Google's data interchange format
 Go support for Protocol Buffers - Google's data interchange format
 
 

+ 60 - 20
vendor/github.com/gogo/protobuf/README

@@ -1,13 +1,18 @@
-GoGoProtobuf http://github.com/gogo/protobuf extends 
+Protocol Buffers for Go with Gadgets
+
+GoGoProtobuf http://github.com/gogo/protobuf extends
 GoProtobuf http://github.com/golang/protobuf
 GoProtobuf http://github.com/golang/protobuf
 
 
+Copyright (c) 2013, The GoGo Authors. All rights reserved.
+
+
 # Go support for Protocol Buffers
 # Go support for Protocol Buffers
 
 
 Google's data interchange format.
 Google's data interchange format.
 Copyright 2010 The Go Authors.
 Copyright 2010 The Go Authors.
 https://github.com/golang/protobuf
 https://github.com/golang/protobuf
 
 
-This package and the code it generates requires at least Go 1.4.
+This package and the code it generates requires at least Go 1.6.
 
 
 This software implements Go bindings for protocol buffers.  For
 This software implements Go bindings for protocol buffers.  For
 information about protocol buffers themselves, see
 information about protocol buffers themselves, see
@@ -58,6 +63,45 @@ parameter set to the directory you want to output the Go code to.
 The generated files will be suffixed .pb.go.  See the Test code below
 The generated files will be suffixed .pb.go.  See the Test code below
 for an example using such a file.
 for an example using such a file.
 
 
+## Packages and input paths ##
+
+The protocol buffer language has a concept of "packages" which does not
+correspond well to the Go notion of packages. In generated Go code,
+each source `.proto` file is associated with a single Go package. The
+name and import path for this package is specified with the `go_package`
+proto option:
+
+	option go_package = "github.com/gogo/protobuf/types";
+
+The protocol buffer compiler will attempt to derive a package name and
+import path if a `go_package` option is not present, but it is
+best to always specify one explicitly.
+
+There is a one-to-one relationship between source `.proto` files and
+generated `.pb.go` files, but any number of `.pb.go` files may be
+contained in the same Go package.
+
+The output name of a generated file is produced by replacing the
+`.proto` suffix with `.pb.go` (e.g., `foo.proto` produces `foo.pb.go`).
+However, the output directory is selected in one of two ways.  Let
+us say we have `inputs/x.proto` with a `go_package` option of
+`github.com/golang/protobuf/p`. The corresponding output file may
+be:
+
+- Relative to the import path:
+
+	protoc --gogo_out=. inputs/x.proto
+	# writes ./github.com/gogo/protobuf/p/x.pb.go
+
+  (This can work well with `--gogo_out=$GOPATH`.)
+
+- Relative to the input file:
+
+	protoc --gogo_out=paths=source_relative:. inputs/x.proto
+	# generate ./inputs/x.pb.go
+
+## Generated code ##
+
 The package comment for the proto library contains text describing
 The package comment for the proto library contains text describing
 the interface provided in Go for protocol buffers. Here is an edited
 the interface provided in Go for protocol buffers. Here is an edited
 version.
 version.
@@ -125,16 +169,13 @@ Consider file test.proto, containing
 ```proto
 ```proto
 	syntax = "proto2";
 	syntax = "proto2";
 	package example;
 	package example;
-	
+
 	enum FOO { X = 17; };
 	enum FOO { X = 17; };
-	
+
 	message Test {
 	message Test {
 	  required string label = 1;
 	  required string label = 1;
 	  optional int32 type = 2 [default=77];
 	  optional int32 type = 2 [default=77];
 	  repeated int64 reps = 3;
 	  repeated int64 reps = 3;
-	  optional group OptionalGroup = 4 {
-	    required string RequiredField = 5;
-	  }
 	}
 	}
 ```
 ```
 
 
@@ -151,13 +192,10 @@ To create and play with a Test object from the example package,
 	)
 	)
 
 
 	func main() {
 	func main() {
-		test := &example.Test {
+		test := &example.Test{
 			Label: proto.String("hello"),
 			Label: proto.String("hello"),
 			Type:  proto.Int32(17),
 			Type:  proto.Int32(17),
 			Reps:  []int64{1, 2, 3},
 			Reps:  []int64{1, 2, 3},
-			Optionalgroup: &example.Test_OptionalGroup {
-				RequiredField: proto.String("good bye"),
-			},
 		}
 		}
 		data, err := proto.Marshal(test)
 		data, err := proto.Marshal(test)
 		if err != nil {
 		if err != nil {
@@ -185,19 +223,23 @@ parameter list separated from the output directory by a colon:
 
 
 	protoc --gogo_out=plugins=grpc,import_path=mypackage:. *.proto
 	protoc --gogo_out=plugins=grpc,import_path=mypackage:. *.proto
 
 
-
-- `import_prefix=xxx` - a prefix that is added onto the beginning of
-  all imports. Useful for things like generating protos in a
-  subdirectory, or regenerating vendored protobufs in-place.
-- `import_path=foo/bar` - used as the package if no input files
-  declare `go_package`. If it contains slashes, everything up to the
-  rightmost slash is ignored.
+- `paths=(import | source_relative)` - specifies how the paths of
+  generated files are structured. See the "Packages and imports paths"
+  section above. The default is `import`.
 - `plugins=plugin1+plugin2` - specifies the list of sub-plugins to
 - `plugins=plugin1+plugin2` - specifies the list of sub-plugins to
   load. The only plugin in this repo is `grpc`.
   load. The only plugin in this repo is `grpc`.
 - `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is
 - `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is
   associated with Go package quux/shme.  This is subject to the
   associated with Go package quux/shme.  This is subject to the
   import_prefix parameter.
   import_prefix parameter.
 
 
+The following parameters are deprecated and should not be used:
+
+- `import_prefix=xxx` - a prefix that is added onto the beginning of
+  all imports.
+- `import_path=foo/bar` - used as the package if no input files
+  declare `go_package`. If it contains slashes, everything up to the
+  rightmost slash is ignored.
+
 ## gRPC Support ##
 ## gRPC Support ##
 
 
 If a proto file specifies RPC services, protoc-gen-go can be instructed to
 If a proto file specifies RPC services, protoc-gen-go can be instructed to
@@ -251,8 +293,6 @@ generated code and declare a new package-level constant whose name incorporates
 the latest version number.  Removing a compatibility constant is considered a
 the latest version number.  Removing a compatibility constant is considered a
 breaking change and would be subject to the announcement policy stated above.
 breaking change and would be subject to the announcement policy stated above.
 
 
-## Plugins ##
-
 The `protoc-gen-go/generator` package exposes a plugin interface,
 The `protoc-gen-go/generator` package exposes a plugin interface,
 which is used by the gRPC code generation. This interface is not
 which is used by the gRPC code generation. This interface is not
 supported and is subject to incompatible changes without notice.
 supported and is subject to incompatible changes without notice.

+ 23 - 3
vendor/github.com/gogo/protobuf/Readme.md

@@ -1,6 +1,7 @@
 # Protocol Buffers for Go with Gadgets
 # Protocol Buffers for Go with Gadgets
 
 
 [![Build Status](https://travis-ci.org/gogo/protobuf.svg?branch=master)](https://travis-ci.org/gogo/protobuf)
 [![Build Status](https://travis-ci.org/gogo/protobuf.svg?branch=master)](https://travis-ci.org/gogo/protobuf)
+[![GoDoc](https://godoc.org/github.com/gogo/protobuf?status.svg)](http://godoc.org/github.com/gogo/protobuf)
 
 
 gogoprotobuf is a fork of <a href="https://github.com/golang/protobuf">golang/protobuf</a> with extra code generation features.
 gogoprotobuf is a fork of <a href="https://github.com/golang/protobuf">golang/protobuf</a> with extra code generation features.
 
 
@@ -45,6 +46,9 @@ These projects use gogoprotobuf:
   - <a href="https://github.com/go-graphite">carbonzipper stack</a>
   - <a href="https://github.com/go-graphite">carbonzipper stack</a>
   - <a href="https://sendgrid.com/">sendgrid</a>
   - <a href="https://sendgrid.com/">sendgrid</a>
   - <a href="https://github.com/zero-os/0-stor">zero-os/0-stor</a>
   - <a href="https://github.com/zero-os/0-stor">zero-os/0-stor</a>
+  - <a href="https://github.com/spacemeshos/go-spacemesh">go-spacemesh</a>
+  - <a href="https://github.com/weaveworks/cortex">cortex</a> - <a href="https://github.com/weaveworks/cortex/blob/fee02a59729d3771ef888f7bf0fd050e1197c56e/pkg/ingester/client/cortex.proto">sample proto file</a>
+  - <a href="http://skywalking.apache.org/">Apache SkyWalking APM</a> - Istio telemetry receiver based on Mixer bypass protocol
 
 
 Please let us know if you are using gogoprotobuf by posting on our <a href="https://groups.google.com/forum/#!topic/gogoprotobuf/Brw76BxmFpQ">GoogleGroup</a>.
 Please let us know if you are using gogoprotobuf by posting on our <a href="https://groups.google.com/forum/#!topic/gogoprotobuf/Brw76BxmFpQ">GoogleGroup</a>.
 
 
@@ -53,7 +57,13 @@ Please let us know if you are using gogoprotobuf by posting on our <a href="http
   - <a href="http://www.slideshare.net/albertstrasheim/serialization-in-go">Cloudflare - go serialization talk - Albert Strasheim</a>
   - <a href="http://www.slideshare.net/albertstrasheim/serialization-in-go">Cloudflare - go serialization talk - Albert Strasheim</a>
   - <a href="https://youtu.be/4xB46Xl9O9Q?t=557">GopherCon 2014 Writing High Performance Databases in Go by Ben Johnson</a>
   - <a href="https://youtu.be/4xB46Xl9O9Q?t=557">GopherCon 2014 Writing High Performance Databases in Go by Ben Johnson</a>
   - <a href="https://github.com/alecthomas/go_serialization_benchmarks">alecthomas' go serialization benchmarks</a>
   - <a href="https://github.com/alecthomas/go_serialization_benchmarks">alecthomas' go serialization benchmarks</a>
-
+  - <a href="http://agniva.me/go/2017/11/18/gogoproto.html">Go faster with gogoproto - Agniva De Sarker</a>
+  - <a href="https://www.youtube.com/watch?v=CY9T020HLP8">Evolution of protobuf (Gource Visualization) - Landon Wilkins</a>
+  - <a href="https://fosdem.org/2018/schedule/event/gopherjs/">Creating GopherJS Apps with gRPC-Web - Johan Brandhorst</a>
+  - <a href="https://jbrandhorst.com/post/gogoproto/">So you want to use GoGo Protobuf - Johan Brandhorst</a>
+  - <a href="https://jbrandhorst.com/post/grpc-errors/">Advanced gRPC Error Usage - Johan Brandhorst</a>
+  - <a href="https://www.udemy.com/grpc-golang/?couponCode=GITHUB10">gRPC Golang Course on Udemy - Stephane Maarek</a>
+  
 ## Getting Started
 ## Getting Started
 
 
 There are several ways to use gogoprotobuf, but for all you need to install go and protoc.
 There are several ways to use gogoprotobuf, but for all you need to install go and protoc.
@@ -65,7 +75,8 @@ After that you can choose:
 
 
 ### Installation
 ### Installation
 
 
-To install it, you must first have Go (at least version 1.6.3) installed (see [http://golang.org/doc/install](http://golang.org/doc/install)). Latest patch versions of Go 1.8, 1.9 and 1.10 are continuously tested.
+To install it, you must first have Go (at least version 1.6.3 or 1.9 if you are using gRPC) installed (see [http://golang.org/doc/install](http://golang.org/doc/install)). 
+Latest patch versions of 1.9 and 1.10 are continuously tested.
 
 
 Next, install the standard protocol buffer implementation from [https://github.com/google/protobuf](https://github.com/google/protobuf).
 Next, install the standard protocol buffer implementation from [https://github.com/google/protobuf](https://github.com/google/protobuf).
 Most versions from 2.3.1 should not give any problems, but 2.6.1, 3.0.2 and 3.5.1 are continuously tested.
 Most versions from 2.3.1 should not give any problems, but 2.6.1, 3.0.2 and 3.5.1 are continuously tested.
@@ -114,7 +125,7 @@ To use proto files from "google/protobuf" you need to add additional args to pro
     Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,\
     Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,\
     Mgoogle/protobuf/wrappers.proto=github.com/gogo/protobuf/types:. \
     Mgoogle/protobuf/wrappers.proto=github.com/gogo/protobuf/types:. \
     myproto.proto
     myproto.proto
-    
+
 Note that in the protoc command, {binary} does not contain the initial prefix of "protoc-gen".
 Note that in the protoc command, {binary} does not contain the initial prefix of "protoc-gen".
 
 
 ### Most Speed and most customization
 ### Most Speed and most customization
@@ -137,3 +148,12 @@ It works the same as golang/protobuf, simply specify the plugin.
 Here is an example using gofast:
 Here is an example using gofast:
 
 
     protoc --gofast_out=plugins=grpc:. my.proto
     protoc --gofast_out=plugins=grpc:. my.proto
+
+See [https://github.com/gogo/grpc-example](https://github.com/gogo/grpc-example) for an example of using gRPC with gogoprotobuf and the wider grpc-ecosystem.
+
+
+## License
+This software is licensed under the 3-Clause BSD License
+("BSD License 2.0", "Revised BSD License", "New BSD License", or "Modified BSD License").
+
+  

+ 1 - 1
vendor/github.com/gogo/protobuf/gogoproto/doc.go

@@ -162,7 +162,7 @@ The most complete way to see examples is to look at
 	github.com/gogo/protobuf/test/thetest.proto
 	github.com/gogo/protobuf/test/thetest.proto
 
 
 Gogoprototest is a seperate project,
 Gogoprototest is a seperate project,
-because we want to keep gogoprotobuf independant of goprotobuf,
+because we want to keep gogoprotobuf independent of goprotobuf,
 but we still want to test it thoroughly.
 but we still want to test it thoroughly.
 
 
 */
 */

+ 230 - 162
vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go

@@ -1,20 +1,12 @@
 // Code generated by protoc-gen-gogo. DO NOT EDIT.
 // Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: gogo.proto
 // source: gogo.proto
 
 
-/*
-Package gogoproto is a generated protocol buffer package.
-
-It is generated from these files:
-	gogo.proto
-
-It has these top-level messages:
-*/
-package gogoproto
+package gogoproto // import "github.com/gogo/protobuf/gogoproto"
 
 
 import proto "github.com/gogo/protobuf/proto"
 import proto "github.com/gogo/protobuf/proto"
 import fmt "fmt"
 import fmt "fmt"
 import math "math"
 import math "math"
-import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
+import descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
 
 
 // Reference imports to suppress errors if they are not otherwise used.
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
 var _ = proto.Marshal
@@ -28,7 +20,7 @@ var _ = math.Inf
 const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
 const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
 
 
 var E_GoprotoEnumPrefix = &proto.ExtensionDesc{
 var E_GoprotoEnumPrefix = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.EnumOptions)(nil),
+	ExtendedType:  (*descriptor.EnumOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         62001,
 	Field:         62001,
 	Name:          "gogoproto.goproto_enum_prefix",
 	Name:          "gogoproto.goproto_enum_prefix",
@@ -37,7 +29,7 @@ var E_GoprotoEnumPrefix = &proto.ExtensionDesc{
 }
 }
 
 
 var E_GoprotoEnumStringer = &proto.ExtensionDesc{
 var E_GoprotoEnumStringer = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.EnumOptions)(nil),
+	ExtendedType:  (*descriptor.EnumOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         62021,
 	Field:         62021,
 	Name:          "gogoproto.goproto_enum_stringer",
 	Name:          "gogoproto.goproto_enum_stringer",
@@ -46,7 +38,7 @@ var E_GoprotoEnumStringer = &proto.ExtensionDesc{
 }
 }
 
 
 var E_EnumStringer = &proto.ExtensionDesc{
 var E_EnumStringer = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.EnumOptions)(nil),
+	ExtendedType:  (*descriptor.EnumOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         62022,
 	Field:         62022,
 	Name:          "gogoproto.enum_stringer",
 	Name:          "gogoproto.enum_stringer",
@@ -55,7 +47,7 @@ var E_EnumStringer = &proto.ExtensionDesc{
 }
 }
 
 
 var E_EnumCustomname = &proto.ExtensionDesc{
 var E_EnumCustomname = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.EnumOptions)(nil),
+	ExtendedType:  (*descriptor.EnumOptions)(nil),
 	ExtensionType: (*string)(nil),
 	ExtensionType: (*string)(nil),
 	Field:         62023,
 	Field:         62023,
 	Name:          "gogoproto.enum_customname",
 	Name:          "gogoproto.enum_customname",
@@ -64,7 +56,7 @@ var E_EnumCustomname = &proto.ExtensionDesc{
 }
 }
 
 
 var E_Enumdecl = &proto.ExtensionDesc{
 var E_Enumdecl = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.EnumOptions)(nil),
+	ExtendedType:  (*descriptor.EnumOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         62024,
 	Field:         62024,
 	Name:          "gogoproto.enumdecl",
 	Name:          "gogoproto.enumdecl",
@@ -73,7 +65,7 @@ var E_Enumdecl = &proto.ExtensionDesc{
 }
 }
 
 
 var E_EnumvalueCustomname = &proto.ExtensionDesc{
 var E_EnumvalueCustomname = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.EnumValueOptions)(nil),
+	ExtendedType:  (*descriptor.EnumValueOptions)(nil),
 	ExtensionType: (*string)(nil),
 	ExtensionType: (*string)(nil),
 	Field:         66001,
 	Field:         66001,
 	Name:          "gogoproto.enumvalue_customname",
 	Name:          "gogoproto.enumvalue_customname",
@@ -82,7 +74,7 @@ var E_EnumvalueCustomname = &proto.ExtensionDesc{
 }
 }
 
 
 var E_GoprotoGettersAll = &proto.ExtensionDesc{
 var E_GoprotoGettersAll = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63001,
 	Field:         63001,
 	Name:          "gogoproto.goproto_getters_all",
 	Name:          "gogoproto.goproto_getters_all",
@@ -91,7 +83,7 @@ var E_GoprotoGettersAll = &proto.ExtensionDesc{
 }
 }
 
 
 var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{
 var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63002,
 	Field:         63002,
 	Name:          "gogoproto.goproto_enum_prefix_all",
 	Name:          "gogoproto.goproto_enum_prefix_all",
@@ -100,7 +92,7 @@ var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{
 }
 }
 
 
 var E_GoprotoStringerAll = &proto.ExtensionDesc{
 var E_GoprotoStringerAll = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63003,
 	Field:         63003,
 	Name:          "gogoproto.goproto_stringer_all",
 	Name:          "gogoproto.goproto_stringer_all",
@@ -109,7 +101,7 @@ var E_GoprotoStringerAll = &proto.ExtensionDesc{
 }
 }
 
 
 var E_VerboseEqualAll = &proto.ExtensionDesc{
 var E_VerboseEqualAll = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63004,
 	Field:         63004,
 	Name:          "gogoproto.verbose_equal_all",
 	Name:          "gogoproto.verbose_equal_all",
@@ -118,7 +110,7 @@ var E_VerboseEqualAll = &proto.ExtensionDesc{
 }
 }
 
 
 var E_FaceAll = &proto.ExtensionDesc{
 var E_FaceAll = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63005,
 	Field:         63005,
 	Name:          "gogoproto.face_all",
 	Name:          "gogoproto.face_all",
@@ -127,7 +119,7 @@ var E_FaceAll = &proto.ExtensionDesc{
 }
 }
 
 
 var E_GostringAll = &proto.ExtensionDesc{
 var E_GostringAll = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63006,
 	Field:         63006,
 	Name:          "gogoproto.gostring_all",
 	Name:          "gogoproto.gostring_all",
@@ -136,7 +128,7 @@ var E_GostringAll = &proto.ExtensionDesc{
 }
 }
 
 
 var E_PopulateAll = &proto.ExtensionDesc{
 var E_PopulateAll = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63007,
 	Field:         63007,
 	Name:          "gogoproto.populate_all",
 	Name:          "gogoproto.populate_all",
@@ -145,7 +137,7 @@ var E_PopulateAll = &proto.ExtensionDesc{
 }
 }
 
 
 var E_StringerAll = &proto.ExtensionDesc{
 var E_StringerAll = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63008,
 	Field:         63008,
 	Name:          "gogoproto.stringer_all",
 	Name:          "gogoproto.stringer_all",
@@ -154,7 +146,7 @@ var E_StringerAll = &proto.ExtensionDesc{
 }
 }
 
 
 var E_OnlyoneAll = &proto.ExtensionDesc{
 var E_OnlyoneAll = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63009,
 	Field:         63009,
 	Name:          "gogoproto.onlyone_all",
 	Name:          "gogoproto.onlyone_all",
@@ -163,7 +155,7 @@ var E_OnlyoneAll = &proto.ExtensionDesc{
 }
 }
 
 
 var E_EqualAll = &proto.ExtensionDesc{
 var E_EqualAll = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63013,
 	Field:         63013,
 	Name:          "gogoproto.equal_all",
 	Name:          "gogoproto.equal_all",
@@ -172,7 +164,7 @@ var E_EqualAll = &proto.ExtensionDesc{
 }
 }
 
 
 var E_DescriptionAll = &proto.ExtensionDesc{
 var E_DescriptionAll = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63014,
 	Field:         63014,
 	Name:          "gogoproto.description_all",
 	Name:          "gogoproto.description_all",
@@ -181,7 +173,7 @@ var E_DescriptionAll = &proto.ExtensionDesc{
 }
 }
 
 
 var E_TestgenAll = &proto.ExtensionDesc{
 var E_TestgenAll = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63015,
 	Field:         63015,
 	Name:          "gogoproto.testgen_all",
 	Name:          "gogoproto.testgen_all",
@@ -190,7 +182,7 @@ var E_TestgenAll = &proto.ExtensionDesc{
 }
 }
 
 
 var E_BenchgenAll = &proto.ExtensionDesc{
 var E_BenchgenAll = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63016,
 	Field:         63016,
 	Name:          "gogoproto.benchgen_all",
 	Name:          "gogoproto.benchgen_all",
@@ -199,7 +191,7 @@ var E_BenchgenAll = &proto.ExtensionDesc{
 }
 }
 
 
 var E_MarshalerAll = &proto.ExtensionDesc{
 var E_MarshalerAll = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63017,
 	Field:         63017,
 	Name:          "gogoproto.marshaler_all",
 	Name:          "gogoproto.marshaler_all",
@@ -208,7 +200,7 @@ var E_MarshalerAll = &proto.ExtensionDesc{
 }
 }
 
 
 var E_UnmarshalerAll = &proto.ExtensionDesc{
 var E_UnmarshalerAll = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63018,
 	Field:         63018,
 	Name:          "gogoproto.unmarshaler_all",
 	Name:          "gogoproto.unmarshaler_all",
@@ -217,7 +209,7 @@ var E_UnmarshalerAll = &proto.ExtensionDesc{
 }
 }
 
 
 var E_StableMarshalerAll = &proto.ExtensionDesc{
 var E_StableMarshalerAll = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63019,
 	Field:         63019,
 	Name:          "gogoproto.stable_marshaler_all",
 	Name:          "gogoproto.stable_marshaler_all",
@@ -226,7 +218,7 @@ var E_StableMarshalerAll = &proto.ExtensionDesc{
 }
 }
 
 
 var E_SizerAll = &proto.ExtensionDesc{
 var E_SizerAll = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63020,
 	Field:         63020,
 	Name:          "gogoproto.sizer_all",
 	Name:          "gogoproto.sizer_all",
@@ -235,7 +227,7 @@ var E_SizerAll = &proto.ExtensionDesc{
 }
 }
 
 
 var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{
 var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63021,
 	Field:         63021,
 	Name:          "gogoproto.goproto_enum_stringer_all",
 	Name:          "gogoproto.goproto_enum_stringer_all",
@@ -244,7 +236,7 @@ var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{
 }
 }
 
 
 var E_EnumStringerAll = &proto.ExtensionDesc{
 var E_EnumStringerAll = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63022,
 	Field:         63022,
 	Name:          "gogoproto.enum_stringer_all",
 	Name:          "gogoproto.enum_stringer_all",
@@ -253,7 +245,7 @@ var E_EnumStringerAll = &proto.ExtensionDesc{
 }
 }
 
 
 var E_UnsafeMarshalerAll = &proto.ExtensionDesc{
 var E_UnsafeMarshalerAll = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63023,
 	Field:         63023,
 	Name:          "gogoproto.unsafe_marshaler_all",
 	Name:          "gogoproto.unsafe_marshaler_all",
@@ -262,7 +254,7 @@ var E_UnsafeMarshalerAll = &proto.ExtensionDesc{
 }
 }
 
 
 var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{
 var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63024,
 	Field:         63024,
 	Name:          "gogoproto.unsafe_unmarshaler_all",
 	Name:          "gogoproto.unsafe_unmarshaler_all",
@@ -271,7 +263,7 @@ var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{
 }
 }
 
 
 var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{
 var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63025,
 	Field:         63025,
 	Name:          "gogoproto.goproto_extensions_map_all",
 	Name:          "gogoproto.goproto_extensions_map_all",
@@ -280,7 +272,7 @@ var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{
 }
 }
 
 
 var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{
 var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63026,
 	Field:         63026,
 	Name:          "gogoproto.goproto_unrecognized_all",
 	Name:          "gogoproto.goproto_unrecognized_all",
@@ -289,7 +281,7 @@ var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{
 }
 }
 
 
 var E_GogoprotoImport = &proto.ExtensionDesc{
 var E_GogoprotoImport = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63027,
 	Field:         63027,
 	Name:          "gogoproto.gogoproto_import",
 	Name:          "gogoproto.gogoproto_import",
@@ -298,7 +290,7 @@ var E_GogoprotoImport = &proto.ExtensionDesc{
 }
 }
 
 
 var E_ProtosizerAll = &proto.ExtensionDesc{
 var E_ProtosizerAll = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63028,
 	Field:         63028,
 	Name:          "gogoproto.protosizer_all",
 	Name:          "gogoproto.protosizer_all",
@@ -307,7 +299,7 @@ var E_ProtosizerAll = &proto.ExtensionDesc{
 }
 }
 
 
 var E_CompareAll = &proto.ExtensionDesc{
 var E_CompareAll = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63029,
 	Field:         63029,
 	Name:          "gogoproto.compare_all",
 	Name:          "gogoproto.compare_all",
@@ -316,7 +308,7 @@ var E_CompareAll = &proto.ExtensionDesc{
 }
 }
 
 
 var E_TypedeclAll = &proto.ExtensionDesc{
 var E_TypedeclAll = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63030,
 	Field:         63030,
 	Name:          "gogoproto.typedecl_all",
 	Name:          "gogoproto.typedecl_all",
@@ -325,7 +317,7 @@ var E_TypedeclAll = &proto.ExtensionDesc{
 }
 }
 
 
 var E_EnumdeclAll = &proto.ExtensionDesc{
 var E_EnumdeclAll = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63031,
 	Field:         63031,
 	Name:          "gogoproto.enumdecl_all",
 	Name:          "gogoproto.enumdecl_all",
@@ -334,7 +326,7 @@ var E_EnumdeclAll = &proto.ExtensionDesc{
 }
 }
 
 
 var E_GoprotoRegistration = &proto.ExtensionDesc{
 var E_GoprotoRegistration = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtendedType:  (*descriptor.FileOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         63032,
 	Field:         63032,
 	Name:          "gogoproto.goproto_registration",
 	Name:          "gogoproto.goproto_registration",
@@ -342,8 +334,35 @@ var E_GoprotoRegistration = &proto.ExtensionDesc{
 	Filename:      "gogo.proto",
 	Filename:      "gogo.proto",
 }
 }
 
 
+var E_MessagenameAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63033,
+	Name:          "gogoproto.messagename_all",
+	Tag:           "varint,63033,opt,name=messagename_all,json=messagenameAll",
+	Filename:      "gogo.proto",
+}
+
+var E_GoprotoSizecacheAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63034,
+	Name:          "gogoproto.goproto_sizecache_all",
+	Tag:           "varint,63034,opt,name=goproto_sizecache_all,json=goprotoSizecacheAll",
+	Filename:      "gogo.proto",
+}
+
+var E_GoprotoUnkeyedAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63035,
+	Name:          "gogoproto.goproto_unkeyed_all",
+	Tag:           "varint,63035,opt,name=goproto_unkeyed_all,json=goprotoUnkeyedAll",
+	Filename:      "gogo.proto",
+}
+
 var E_GoprotoGetters = &proto.ExtensionDesc{
 var E_GoprotoGetters = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.MessageOptions)(nil),
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         64001,
 	Field:         64001,
 	Name:          "gogoproto.goproto_getters",
 	Name:          "gogoproto.goproto_getters",
@@ -352,7 +371,7 @@ var E_GoprotoGetters = &proto.ExtensionDesc{
 }
 }
 
 
 var E_GoprotoStringer = &proto.ExtensionDesc{
 var E_GoprotoStringer = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.MessageOptions)(nil),
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         64003,
 	Field:         64003,
 	Name:          "gogoproto.goproto_stringer",
 	Name:          "gogoproto.goproto_stringer",
@@ -361,7 +380,7 @@ var E_GoprotoStringer = &proto.ExtensionDesc{
 }
 }
 
 
 var E_VerboseEqual = &proto.ExtensionDesc{
 var E_VerboseEqual = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.MessageOptions)(nil),
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         64004,
 	Field:         64004,
 	Name:          "gogoproto.verbose_equal",
 	Name:          "gogoproto.verbose_equal",
@@ -370,7 +389,7 @@ var E_VerboseEqual = &proto.ExtensionDesc{
 }
 }
 
 
 var E_Face = &proto.ExtensionDesc{
 var E_Face = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.MessageOptions)(nil),
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         64005,
 	Field:         64005,
 	Name:          "gogoproto.face",
 	Name:          "gogoproto.face",
@@ -379,7 +398,7 @@ var E_Face = &proto.ExtensionDesc{
 }
 }
 
 
 var E_Gostring = &proto.ExtensionDesc{
 var E_Gostring = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.MessageOptions)(nil),
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         64006,
 	Field:         64006,
 	Name:          "gogoproto.gostring",
 	Name:          "gogoproto.gostring",
@@ -388,7 +407,7 @@ var E_Gostring = &proto.ExtensionDesc{
 }
 }
 
 
 var E_Populate = &proto.ExtensionDesc{
 var E_Populate = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.MessageOptions)(nil),
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         64007,
 	Field:         64007,
 	Name:          "gogoproto.populate",
 	Name:          "gogoproto.populate",
@@ -397,7 +416,7 @@ var E_Populate = &proto.ExtensionDesc{
 }
 }
 
 
 var E_Stringer = &proto.ExtensionDesc{
 var E_Stringer = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.MessageOptions)(nil),
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         67008,
 	Field:         67008,
 	Name:          "gogoproto.stringer",
 	Name:          "gogoproto.stringer",
@@ -406,7 +425,7 @@ var E_Stringer = &proto.ExtensionDesc{
 }
 }
 
 
 var E_Onlyone = &proto.ExtensionDesc{
 var E_Onlyone = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.MessageOptions)(nil),
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         64009,
 	Field:         64009,
 	Name:          "gogoproto.onlyone",
 	Name:          "gogoproto.onlyone",
@@ -415,7 +434,7 @@ var E_Onlyone = &proto.ExtensionDesc{
 }
 }
 
 
 var E_Equal = &proto.ExtensionDesc{
 var E_Equal = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.MessageOptions)(nil),
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         64013,
 	Field:         64013,
 	Name:          "gogoproto.equal",
 	Name:          "gogoproto.equal",
@@ -424,7 +443,7 @@ var E_Equal = &proto.ExtensionDesc{
 }
 }
 
 
 var E_Description = &proto.ExtensionDesc{
 var E_Description = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.MessageOptions)(nil),
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         64014,
 	Field:         64014,
 	Name:          "gogoproto.description",
 	Name:          "gogoproto.description",
@@ -433,7 +452,7 @@ var E_Description = &proto.ExtensionDesc{
 }
 }
 
 
 var E_Testgen = &proto.ExtensionDesc{
 var E_Testgen = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.MessageOptions)(nil),
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         64015,
 	Field:         64015,
 	Name:          "gogoproto.testgen",
 	Name:          "gogoproto.testgen",
@@ -442,7 +461,7 @@ var E_Testgen = &proto.ExtensionDesc{
 }
 }
 
 
 var E_Benchgen = &proto.ExtensionDesc{
 var E_Benchgen = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.MessageOptions)(nil),
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         64016,
 	Field:         64016,
 	Name:          "gogoproto.benchgen",
 	Name:          "gogoproto.benchgen",
@@ -451,7 +470,7 @@ var E_Benchgen = &proto.ExtensionDesc{
 }
 }
 
 
 var E_Marshaler = &proto.ExtensionDesc{
 var E_Marshaler = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.MessageOptions)(nil),
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         64017,
 	Field:         64017,
 	Name:          "gogoproto.marshaler",
 	Name:          "gogoproto.marshaler",
@@ -460,7 +479,7 @@ var E_Marshaler = &proto.ExtensionDesc{
 }
 }
 
 
 var E_Unmarshaler = &proto.ExtensionDesc{
 var E_Unmarshaler = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.MessageOptions)(nil),
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         64018,
 	Field:         64018,
 	Name:          "gogoproto.unmarshaler",
 	Name:          "gogoproto.unmarshaler",
@@ -469,7 +488,7 @@ var E_Unmarshaler = &proto.ExtensionDesc{
 }
 }
 
 
 var E_StableMarshaler = &proto.ExtensionDesc{
 var E_StableMarshaler = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.MessageOptions)(nil),
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         64019,
 	Field:         64019,
 	Name:          "gogoproto.stable_marshaler",
 	Name:          "gogoproto.stable_marshaler",
@@ -478,7 +497,7 @@ var E_StableMarshaler = &proto.ExtensionDesc{
 }
 }
 
 
 var E_Sizer = &proto.ExtensionDesc{
 var E_Sizer = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.MessageOptions)(nil),
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         64020,
 	Field:         64020,
 	Name:          "gogoproto.sizer",
 	Name:          "gogoproto.sizer",
@@ -487,7 +506,7 @@ var E_Sizer = &proto.ExtensionDesc{
 }
 }
 
 
 var E_UnsafeMarshaler = &proto.ExtensionDesc{
 var E_UnsafeMarshaler = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.MessageOptions)(nil),
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         64023,
 	Field:         64023,
 	Name:          "gogoproto.unsafe_marshaler",
 	Name:          "gogoproto.unsafe_marshaler",
@@ -496,7 +515,7 @@ var E_UnsafeMarshaler = &proto.ExtensionDesc{
 }
 }
 
 
 var E_UnsafeUnmarshaler = &proto.ExtensionDesc{
 var E_UnsafeUnmarshaler = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.MessageOptions)(nil),
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         64024,
 	Field:         64024,
 	Name:          "gogoproto.unsafe_unmarshaler",
 	Name:          "gogoproto.unsafe_unmarshaler",
@@ -505,7 +524,7 @@ var E_UnsafeUnmarshaler = &proto.ExtensionDesc{
 }
 }
 
 
 var E_GoprotoExtensionsMap = &proto.ExtensionDesc{
 var E_GoprotoExtensionsMap = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.MessageOptions)(nil),
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         64025,
 	Field:         64025,
 	Name:          "gogoproto.goproto_extensions_map",
 	Name:          "gogoproto.goproto_extensions_map",
@@ -514,7 +533,7 @@ var E_GoprotoExtensionsMap = &proto.ExtensionDesc{
 }
 }
 
 
 var E_GoprotoUnrecognized = &proto.ExtensionDesc{
 var E_GoprotoUnrecognized = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.MessageOptions)(nil),
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         64026,
 	Field:         64026,
 	Name:          "gogoproto.goproto_unrecognized",
 	Name:          "gogoproto.goproto_unrecognized",
@@ -523,7 +542,7 @@ var E_GoprotoUnrecognized = &proto.ExtensionDesc{
 }
 }
 
 
 var E_Protosizer = &proto.ExtensionDesc{
 var E_Protosizer = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.MessageOptions)(nil),
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         64028,
 	Field:         64028,
 	Name:          "gogoproto.protosizer",
 	Name:          "gogoproto.protosizer",
@@ -532,7 +551,7 @@ var E_Protosizer = &proto.ExtensionDesc{
 }
 }
 
 
 var E_Compare = &proto.ExtensionDesc{
 var E_Compare = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.MessageOptions)(nil),
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         64029,
 	Field:         64029,
 	Name:          "gogoproto.compare",
 	Name:          "gogoproto.compare",
@@ -541,7 +560,7 @@ var E_Compare = &proto.ExtensionDesc{
 }
 }
 
 
 var E_Typedecl = &proto.ExtensionDesc{
 var E_Typedecl = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.MessageOptions)(nil),
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         64030,
 	Field:         64030,
 	Name:          "gogoproto.typedecl",
 	Name:          "gogoproto.typedecl",
@@ -549,8 +568,35 @@ var E_Typedecl = &proto.ExtensionDesc{
 	Filename:      "gogo.proto",
 	Filename:      "gogo.proto",
 }
 }
 
 
+var E_Messagename = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64033,
+	Name:          "gogoproto.messagename",
+	Tag:           "varint,64033,opt,name=messagename",
+	Filename:      "gogo.proto",
+}
+
+var E_GoprotoSizecache = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64034,
+	Name:          "gogoproto.goproto_sizecache",
+	Tag:           "varint,64034,opt,name=goproto_sizecache,json=goprotoSizecache",
+	Filename:      "gogo.proto",
+}
+
+var E_GoprotoUnkeyed = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64035,
+	Name:          "gogoproto.goproto_unkeyed",
+	Tag:           "varint,64035,opt,name=goproto_unkeyed,json=goprotoUnkeyed",
+	Filename:      "gogo.proto",
+}
+
 var E_Nullable = &proto.ExtensionDesc{
 var E_Nullable = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FieldOptions)(nil),
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         65001,
 	Field:         65001,
 	Name:          "gogoproto.nullable",
 	Name:          "gogoproto.nullable",
@@ -559,7 +605,7 @@ var E_Nullable = &proto.ExtensionDesc{
 }
 }
 
 
 var E_Embed = &proto.ExtensionDesc{
 var E_Embed = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FieldOptions)(nil),
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         65002,
 	Field:         65002,
 	Name:          "gogoproto.embed",
 	Name:          "gogoproto.embed",
@@ -568,7 +614,7 @@ var E_Embed = &proto.ExtensionDesc{
 }
 }
 
 
 var E_Customtype = &proto.ExtensionDesc{
 var E_Customtype = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FieldOptions)(nil),
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
 	ExtensionType: (*string)(nil),
 	ExtensionType: (*string)(nil),
 	Field:         65003,
 	Field:         65003,
 	Name:          "gogoproto.customtype",
 	Name:          "gogoproto.customtype",
@@ -577,7 +623,7 @@ var E_Customtype = &proto.ExtensionDesc{
 }
 }
 
 
 var E_Customname = &proto.ExtensionDesc{
 var E_Customname = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FieldOptions)(nil),
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
 	ExtensionType: (*string)(nil),
 	ExtensionType: (*string)(nil),
 	Field:         65004,
 	Field:         65004,
 	Name:          "gogoproto.customname",
 	Name:          "gogoproto.customname",
@@ -586,7 +632,7 @@ var E_Customname = &proto.ExtensionDesc{
 }
 }
 
 
 var E_Jsontag = &proto.ExtensionDesc{
 var E_Jsontag = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FieldOptions)(nil),
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
 	ExtensionType: (*string)(nil),
 	ExtensionType: (*string)(nil),
 	Field:         65005,
 	Field:         65005,
 	Name:          "gogoproto.jsontag",
 	Name:          "gogoproto.jsontag",
@@ -595,7 +641,7 @@ var E_Jsontag = &proto.ExtensionDesc{
 }
 }
 
 
 var E_Moretags = &proto.ExtensionDesc{
 var E_Moretags = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FieldOptions)(nil),
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
 	ExtensionType: (*string)(nil),
 	ExtensionType: (*string)(nil),
 	Field:         65006,
 	Field:         65006,
 	Name:          "gogoproto.moretags",
 	Name:          "gogoproto.moretags",
@@ -604,7 +650,7 @@ var E_Moretags = &proto.ExtensionDesc{
 }
 }
 
 
 var E_Casttype = &proto.ExtensionDesc{
 var E_Casttype = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FieldOptions)(nil),
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
 	ExtensionType: (*string)(nil),
 	ExtensionType: (*string)(nil),
 	Field:         65007,
 	Field:         65007,
 	Name:          "gogoproto.casttype",
 	Name:          "gogoproto.casttype",
@@ -613,7 +659,7 @@ var E_Casttype = &proto.ExtensionDesc{
 }
 }
 
 
 var E_Castkey = &proto.ExtensionDesc{
 var E_Castkey = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FieldOptions)(nil),
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
 	ExtensionType: (*string)(nil),
 	ExtensionType: (*string)(nil),
 	Field:         65008,
 	Field:         65008,
 	Name:          "gogoproto.castkey",
 	Name:          "gogoproto.castkey",
@@ -622,7 +668,7 @@ var E_Castkey = &proto.ExtensionDesc{
 }
 }
 
 
 var E_Castvalue = &proto.ExtensionDesc{
 var E_Castvalue = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FieldOptions)(nil),
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
 	ExtensionType: (*string)(nil),
 	ExtensionType: (*string)(nil),
 	Field:         65009,
 	Field:         65009,
 	Name:          "gogoproto.castvalue",
 	Name:          "gogoproto.castvalue",
@@ -631,7 +677,7 @@ var E_Castvalue = &proto.ExtensionDesc{
 }
 }
 
 
 var E_Stdtime = &proto.ExtensionDesc{
 var E_Stdtime = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FieldOptions)(nil),
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         65010,
 	Field:         65010,
 	Name:          "gogoproto.stdtime",
 	Name:          "gogoproto.stdtime",
@@ -640,7 +686,7 @@ var E_Stdtime = &proto.ExtensionDesc{
 }
 }
 
 
 var E_Stdduration = &proto.ExtensionDesc{
 var E_Stdduration = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FieldOptions)(nil),
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
 	ExtensionType: (*bool)(nil),
 	ExtensionType: (*bool)(nil),
 	Field:         65011,
 	Field:         65011,
 	Name:          "gogoproto.stdduration",
 	Name:          "gogoproto.stdduration",
@@ -648,6 +694,15 @@ var E_Stdduration = &proto.ExtensionDesc{
 	Filename:      "gogo.proto",
 	Filename:      "gogo.proto",
 }
 }
 
 
+var E_Wktpointer = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         65012,
+	Name:          "gogoproto.wktpointer",
+	Tag:           "varint,65012,opt,name=wktpointer",
+	Filename:      "gogo.proto",
+}
+
 func init() {
 func init() {
 	proto.RegisterExtension(E_GoprotoEnumPrefix)
 	proto.RegisterExtension(E_GoprotoEnumPrefix)
 	proto.RegisterExtension(E_GoprotoEnumStringer)
 	proto.RegisterExtension(E_GoprotoEnumStringer)
@@ -684,6 +739,9 @@ func init() {
 	proto.RegisterExtension(E_TypedeclAll)
 	proto.RegisterExtension(E_TypedeclAll)
 	proto.RegisterExtension(E_EnumdeclAll)
 	proto.RegisterExtension(E_EnumdeclAll)
 	proto.RegisterExtension(E_GoprotoRegistration)
 	proto.RegisterExtension(E_GoprotoRegistration)
+	proto.RegisterExtension(E_MessagenameAll)
+	proto.RegisterExtension(E_GoprotoSizecacheAll)
+	proto.RegisterExtension(E_GoprotoUnkeyedAll)
 	proto.RegisterExtension(E_GoprotoGetters)
 	proto.RegisterExtension(E_GoprotoGetters)
 	proto.RegisterExtension(E_GoprotoStringer)
 	proto.RegisterExtension(E_GoprotoStringer)
 	proto.RegisterExtension(E_VerboseEqual)
 	proto.RegisterExtension(E_VerboseEqual)
@@ -707,6 +765,9 @@ func init() {
 	proto.RegisterExtension(E_Protosizer)
 	proto.RegisterExtension(E_Protosizer)
 	proto.RegisterExtension(E_Compare)
 	proto.RegisterExtension(E_Compare)
 	proto.RegisterExtension(E_Typedecl)
 	proto.RegisterExtension(E_Typedecl)
+	proto.RegisterExtension(E_Messagename)
+	proto.RegisterExtension(E_GoprotoSizecache)
+	proto.RegisterExtension(E_GoprotoUnkeyed)
 	proto.RegisterExtension(E_Nullable)
 	proto.RegisterExtension(E_Nullable)
 	proto.RegisterExtension(E_Embed)
 	proto.RegisterExtension(E_Embed)
 	proto.RegisterExtension(E_Customtype)
 	proto.RegisterExtension(E_Customtype)
@@ -718,87 +779,94 @@ func init() {
 	proto.RegisterExtension(E_Castvalue)
 	proto.RegisterExtension(E_Castvalue)
 	proto.RegisterExtension(E_Stdtime)
 	proto.RegisterExtension(E_Stdtime)
 	proto.RegisterExtension(E_Stdduration)
 	proto.RegisterExtension(E_Stdduration)
-}
-
-func init() { proto.RegisterFile("gogo.proto", fileDescriptorGogo) }
-
-var fileDescriptorGogo = []byte{
-	// 1220 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x4b, 0x6f, 0x1c, 0x45,
-	0x10, 0x80, 0x85, 0x48, 0x14, 0x6f, 0xd9, 0x8e, 0xf1, 0xda, 0x98, 0x10, 0x81, 0x08, 0x9c, 0x38,
-	0xd9, 0xa7, 0x08, 0xa5, 0xad, 0xc8, 0x72, 0x2c, 0xc7, 0x4a, 0x84, 0xc1, 0x98, 0x38, 0xbc, 0x0e,
-	0xab, 0xd9, 0xdd, 0xf6, 0x78, 0x60, 0x66, 0x7a, 0x98, 0xe9, 0x89, 0xe2, 0xdc, 0x50, 0x78, 0x08,
-	0x21, 0xde, 0x48, 0x90, 0x90, 0x04, 0x38, 0xf0, 0x7e, 0x86, 0xf7, 0x91, 0x0b, 0x8f, 0x2b, 0xff,
-	0x81, 0x0b, 0x60, 0xde, 0xbe, 0xf9, 0x82, 0x6a, 0xb6, 0x6a, 0xb6, 0x67, 0xbd, 0x52, 0xf7, 0xde,
-	0xc6, 0xeb, 0xfe, 0xbe, 0xad, 0xa9, 0x9a, 0xae, 0xea, 0x59, 0x00, 0x5f, 0xf9, 0x6a, 0x3a, 0x49,
-	0x95, 0x56, 0xf5, 0x1a, 0x5e, 0x17, 0x97, 0x07, 0x0f, 0xf9, 0x4a, 0xf9, 0xa1, 0x9c, 0x29, 0xfe,
-	0x6a, 0xe6, 0xeb, 0x33, 0x6d, 0x99, 0xb5, 0xd2, 0x20, 0xd1, 0x2a, 0xed, 0x2c, 0x16, 0x77, 0xc1,
-	0x04, 0x2d, 0x6e, 0xc8, 0x38, 0x8f, 0x1a, 0x49, 0x2a, 0xd7, 0x83, 0xb3, 0xf5, 0x9b, 0xa6, 0x3b,
-	0xe4, 0x34, 0x93, 0xd3, 0x8b, 0x71, 0x1e, 0xdd, 0x9d, 0xe8, 0x40, 0xc5, 0xd9, 0x81, 0xab, 0xbf,
-	0x5c, 0x7b, 0xe8, 0x9a, 0xdb, 0x87, 0x56, 0xc7, 0x09, 0xc5, 0xff, 0xad, 0x14, 0xa0, 0x58, 0x85,
-	0xeb, 0x2b, 0xbe, 0x4c, 0xa7, 0x41, 0xec, 0xcb, 0xd4, 0x62, 0xfc, 0x9e, 0x8c, 0x13, 0x86, 0xf1,
-	0x5e, 0x42, 0xc5, 0x02, 0x8c, 0x0e, 0xe2, 0xfa, 0x81, 0x5c, 0x23, 0xd2, 0x94, 0x2c, 0xc1, 0x58,
-	0x21, 0x69, 0xe5, 0x99, 0x56, 0x51, 0xec, 0x45, 0xd2, 0xa2, 0xf9, 0xb1, 0xd0, 0xd4, 0x56, 0xf7,
-	0x23, 0xb6, 0x50, 0x52, 0x42, 0xc0, 0x10, 0x7e, 0xd2, 0x96, 0xad, 0xd0, 0x62, 0xf8, 0x89, 0x02,
-	0x29, 0xd7, 0x8b, 0xd3, 0x30, 0x89, 0xd7, 0x67, 0xbc, 0x30, 0x97, 0x66, 0x24, 0xb7, 0xf6, 0xf5,
-	0x9c, 0xc6, 0x65, 0x2c, 0xfb, 0xf9, 0xfc, 0x9e, 0x22, 0x9c, 0x89, 0x52, 0x60, 0xc4, 0x64, 0x54,
-	0xd1, 0x97, 0x5a, 0xcb, 0x34, 0x6b, 0x78, 0x61, 0xbf, 0xf0, 0x8e, 0x07, 0x61, 0x69, 0xbc, 0xb0,
-	0x55, 0xad, 0xe2, 0x52, 0x87, 0x9c, 0x0f, 0x43, 0xb1, 0x06, 0x37, 0xf4, 0x79, 0x2a, 0x1c, 0x9c,
-	0x17, 0xc9, 0x39, 0xb9, 0xeb, 0xc9, 0x40, 0xed, 0x0a, 0xf0, 0xe7, 0x65, 0x2d, 0x1d, 0x9c, 0xaf,
-	0x93, 0xb3, 0x4e, 0x2c, 0x97, 0x14, 0x8d, 0x27, 0x61, 0xfc, 0x8c, 0x4c, 0x9b, 0x2a, 0x93, 0x0d,
-	0xf9, 0x68, 0xee, 0x85, 0x0e, 0xba, 0x4b, 0xa4, 0x1b, 0x23, 0x70, 0x11, 0x39, 0x74, 0x1d, 0x81,
-	0xa1, 0x75, 0xaf, 0x25, 0x1d, 0x14, 0x97, 0x49, 0xb1, 0x0f, 0xd7, 0x23, 0x3a, 0x0f, 0x23, 0xbe,
-	0xea, 0xdc, 0x92, 0x03, 0x7e, 0x85, 0xf0, 0x61, 0x66, 0x48, 0x91, 0xa8, 0x24, 0x0f, 0x3d, 0xed,
-	0x12, 0xc1, 0x1b, 0xac, 0x60, 0x86, 0x14, 0x03, 0xa4, 0xf5, 0x4d, 0x56, 0x64, 0x46, 0x3e, 0xe7,
-	0x60, 0x58, 0xc5, 0xe1, 0xa6, 0x8a, 0x5d, 0x82, 0x78, 0x8b, 0x0c, 0x40, 0x08, 0x0a, 0x66, 0xa1,
-	0xe6, 0x5a, 0x88, 0xb7, 0xb7, 0x78, 0x7b, 0x70, 0x05, 0x96, 0x60, 0x8c, 0x1b, 0x54, 0xa0, 0x62,
-	0x07, 0xc5, 0x3b, 0xa4, 0xd8, 0x6f, 0x60, 0x74, 0x1b, 0x5a, 0x66, 0xda, 0x97, 0x2e, 0x92, 0x77,
-	0xf9, 0x36, 0x08, 0xa1, 0x54, 0x36, 0x65, 0xdc, 0xda, 0x70, 0x33, 0xbc, 0xc7, 0xa9, 0x64, 0x06,
-	0x15, 0x0b, 0x30, 0x1a, 0x79, 0x69, 0xb6, 0xe1, 0x85, 0x4e, 0xe5, 0x78, 0x9f, 0x1c, 0x23, 0x25,
-	0x44, 0x19, 0xc9, 0xe3, 0x41, 0x34, 0x1f, 0x70, 0x46, 0x0c, 0x8c, 0xb6, 0x5e, 0xa6, 0xbd, 0x66,
-	0x28, 0x1b, 0x83, 0xd8, 0x3e, 0xe4, 0xad, 0xd7, 0x61, 0x97, 0x4d, 0xe3, 0x2c, 0xd4, 0xb2, 0xe0,
-	0x9c, 0x93, 0xe6, 0x23, 0xae, 0x74, 0x01, 0x20, 0xfc, 0x00, 0xdc, 0xd8, 0x77, 0x4c, 0x38, 0xc8,
-	0x3e, 0x26, 0xd9, 0x54, 0x9f, 0x51, 0x41, 0x2d, 0x61, 0x50, 0xe5, 0x27, 0xdc, 0x12, 0x64, 0x8f,
-	0x6b, 0x05, 0x26, 0xf3, 0x38, 0xf3, 0xd6, 0x07, 0xcb, 0xda, 0xa7, 0x9c, 0xb5, 0x0e, 0x5b, 0xc9,
-	0xda, 0x29, 0x98, 0x22, 0xe3, 0x60, 0x75, 0xfd, 0x8c, 0x1b, 0x6b, 0x87, 0x5e, 0xab, 0x56, 0xf7,
-	0x21, 0x38, 0x58, 0xa6, 0xf3, 0xac, 0x96, 0x71, 0x86, 0x4c, 0x23, 0xf2, 0x12, 0x07, 0xf3, 0x55,
-	0x32, 0x73, 0xc7, 0x5f, 0x2c, 0x05, 0xcb, 0x5e, 0x82, 0xf2, 0xfb, 0xe1, 0x00, 0xcb, 0xf3, 0x38,
-	0x95, 0x2d, 0xe5, 0xc7, 0xc1, 0x39, 0xd9, 0x76, 0x50, 0x7f, 0xde, 0x53, 0xaa, 0x35, 0x03, 0x47,
-	0xf3, 0x09, 0xb8, 0xae, 0x3c, 0xab, 0x34, 0x82, 0x28, 0x51, 0xa9, 0xb6, 0x18, 0xbf, 0xe0, 0x4a,
-	0x95, 0xdc, 0x89, 0x02, 0x13, 0x8b, 0xb0, 0xbf, 0xf8, 0xd3, 0xf5, 0x91, 0xfc, 0x92, 0x44, 0xa3,
-	0x5d, 0x8a, 0x1a, 0x47, 0x4b, 0x45, 0x89, 0x97, 0xba, 0xf4, 0xbf, 0xaf, 0xb8, 0x71, 0x10, 0x42,
-	0x8d, 0x43, 0x6f, 0x26, 0x12, 0xa7, 0xbd, 0x83, 0xe1, 0x6b, 0x6e, 0x1c, 0xcc, 0x90, 0x82, 0x0f,
-	0x0c, 0x0e, 0x8a, 0x6f, 0x58, 0xc1, 0x0c, 0x2a, 0xee, 0xe9, 0x0e, 0xda, 0x54, 0xfa, 0x41, 0xa6,
-	0x53, 0x0f, 0x57, 0x5b, 0x54, 0xdf, 0x6e, 0x55, 0x0f, 0x61, 0xab, 0x06, 0x2a, 0x4e, 0xc2, 0x58,
-	0xcf, 0x11, 0xa3, 0x7e, 0xcb, 0x2e, 0xdb, 0xb2, 0xcc, 0x32, 0xcf, 0x2f, 0x85, 0x8f, 0x6d, 0x53,
-	0x33, 0xaa, 0x9e, 0x30, 0xc4, 0x9d, 0x58, 0xf7, 0xea, 0x39, 0xc0, 0x2e, 0x3b, 0xbf, 0x5d, 0x96,
-	0xbe, 0x72, 0x0c, 0x10, 0xc7, 0x61, 0xb4, 0x72, 0x06, 0xb0, 0xab, 0x1e, 0x27, 0xd5, 0x88, 0x79,
-	0x04, 0x10, 0x87, 0x61, 0x0f, 0xce, 0x73, 0x3b, 0xfe, 0x04, 0xe1, 0xc5, 0x72, 0x71, 0x14, 0x86,
-	0x78, 0x8e, 0xdb, 0xd1, 0x27, 0x09, 0x2d, 0x11, 0xc4, 0x79, 0x86, 0xdb, 0xf1, 0xa7, 0x18, 0x67,
-	0x04, 0x71, 0xf7, 0x14, 0x7e, 0xf7, 0xcc, 0x1e, 0xea, 0xc3, 0x9c, 0xbb, 0x59, 0xd8, 0x47, 0xc3,
-	0xdb, 0x4e, 0x3f, 0x4d, 0x5f, 0xce, 0x84, 0xb8, 0x03, 0xf6, 0x3a, 0x26, 0xfc, 0x59, 0x42, 0x3b,
-	0xeb, 0xc5, 0x02, 0x0c, 0x1b, 0x03, 0xdb, 0x8e, 0x3f, 0x47, 0xb8, 0x49, 0x61, 0xe8, 0x34, 0xb0,
-	0xed, 0x82, 0xe7, 0x39, 0x74, 0x22, 0x30, 0x6d, 0x3c, 0xab, 0xed, 0xf4, 0x0b, 0x9c, 0x75, 0x46,
-	0xc4, 0x1c, 0xd4, 0xca, 0xfe, 0x6b, 0xe7, 0x5f, 0x24, 0xbe, 0xcb, 0x60, 0x06, 0x8c, 0xfe, 0x6f,
-	0x57, 0xbc, 0xc4, 0x19, 0x30, 0x28, 0xdc, 0x46, 0xbd, 0x33, 0xdd, 0x6e, 0x7a, 0x99, 0xb7, 0x51,
-	0xcf, 0x48, 0xc7, 0x6a, 0x16, 0x6d, 0xd0, 0xae, 0x78, 0x85, 0xab, 0x59, 0xac, 0xc7, 0x30, 0x7a,
-	0x87, 0xa4, 0xdd, 0xf1, 0x2a, 0x87, 0xd1, 0x33, 0x23, 0xc5, 0x0a, 0xd4, 0x77, 0x0f, 0x48, 0xbb,
-	0xef, 0x35, 0xf2, 0x8d, 0xef, 0x9a, 0x8f, 0xe2, 0x3e, 0x98, 0xea, 0x3f, 0x1c, 0xed, 0xd6, 0x0b,
-	0xdb, 0x3d, 0xaf, 0x33, 0xe6, 0x6c, 0x14, 0xa7, 0xba, 0x5d, 0xd6, 0x1c, 0x8c, 0x76, 0xed, 0xc5,
-	0xed, 0x6a, 0xa3, 0x35, 0xe7, 0xa2, 0x98, 0x07, 0xe8, 0xce, 0x24, 0xbb, 0xeb, 0x12, 0xb9, 0x0c,
-	0x08, 0xb7, 0x06, 0x8d, 0x24, 0x3b, 0x7f, 0x99, 0xb7, 0x06, 0x11, 0xb8, 0x35, 0x78, 0x1a, 0xd9,
-	0xe9, 0x2b, 0xbc, 0x35, 0x18, 0x11, 0xb3, 0x30, 0x14, 0xe7, 0x61, 0x88, 0xcf, 0x56, 0xfd, 0xe6,
-	0x3e, 0xe3, 0x46, 0x86, 0x6d, 0x86, 0x7f, 0xdd, 0x21, 0x98, 0x01, 0x71, 0x18, 0xf6, 0xca, 0xa8,
-	0x29, 0xdb, 0x36, 0xf2, 0xb7, 0x1d, 0xee, 0x27, 0xb8, 0x5a, 0xcc, 0x01, 0x74, 0x5e, 0xa6, 0x31,
-	0x0a, 0x1b, 0xfb, 0xfb, 0x4e, 0xe7, 0xbd, 0xde, 0x40, 0xba, 0x82, 0xe2, 0x6d, 0xdc, 0x22, 0xd8,
-	0xaa, 0x0a, 0x8a, 0x17, 0xf0, 0x23, 0xb0, 0xef, 0xe1, 0x4c, 0xc5, 0xda, 0xf3, 0x6d, 0xf4, 0x1f,
-	0x44, 0xf3, 0x7a, 0x4c, 0x58, 0xa4, 0x52, 0xa9, 0x3d, 0x3f, 0xb3, 0xb1, 0x7f, 0x12, 0x5b, 0x02,
-	0x08, 0xb7, 0xbc, 0x4c, 0xbb, 0xdc, 0xf7, 0x5f, 0x0c, 0x33, 0x80, 0x41, 0xe3, 0xf5, 0x23, 0x72,
-	0xd3, 0xc6, 0xfe, 0xcd, 0x41, 0xd3, 0x7a, 0x71, 0x14, 0x6a, 0x78, 0x59, 0xfc, 0x0e, 0x61, 0x83,
-	0xff, 0x21, 0xb8, 0x4b, 0xe0, 0x37, 0x67, 0xba, 0xad, 0x03, 0x7b, 0xb2, 0xff, 0xa5, 0x4a, 0xf3,
-	0x7a, 0x31, 0x0f, 0xc3, 0x99, 0x6e, 0xb7, 0x73, 0x3a, 0xd1, 0x58, 0xf0, 0xff, 0x76, 0xca, 0x97,
-	0xdc, 0x92, 0x39, 0xb6, 0x08, 0x13, 0x2d, 0x15, 0xf5, 0x82, 0xc7, 0x60, 0x49, 0x2d, 0xa9, 0x95,
-	0x62, 0x17, 0x3d, 0x78, 0x9b, 0x1f, 0xe8, 0x8d, 0xbc, 0x39, 0xdd, 0x52, 0xd1, 0x0c, 0x1e, 0x35,
-	0xbb, 0xbf, 0xa0, 0x95, 0x07, 0xcf, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0xed, 0x5f, 0x6c, 0x20,
-	0x74, 0x13, 0x00, 0x00,
+	proto.RegisterExtension(E_Wktpointer)
+}
+
+func init() { proto.RegisterFile("gogo.proto", fileDescriptor_gogo_b95f77e237336c7c) }
+
+var fileDescriptor_gogo_b95f77e237336c7c = []byte{
+	// 1328 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45,
+	0x14, 0x80, 0x85, 0x48, 0x64, 0x4f, 0x79, 0x8b, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9,
+	0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x0e, 0xc6, 0x89, 0xc3, 0x76, 0x18,
+	0xf5, 0xf4, 0x94, 0xdb, 0x8d, 0xbb, 0xbb, 0x9a, 0xee, 0xea, 0x10, 0xe7, 0x86, 0xc2, 0x22, 0x84,
+	0xd8, 0x91, 0x20, 0x21, 0x09, 0x04, 0xc4, 0xbe, 0x86, 0x7d, 0xb9, 0x70, 0x61, 0xb9, 0xf2, 0x1f,
+	0xb8, 0x00, 0x66, 0xf7, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x69, 0x8f, 0x54, 0x35, 0xb7,
+	0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xef, 0x4d, 0x33, 0xe6, 0x49, 0x4f, 0x4e, 0xc6,
+	0x89, 0x54, 0xb2, 0x5e, 0x83, 0xeb, 0xfc, 0x72, 0xdf, 0x7e, 0x4f, 0x4a, 0x2f, 0x10, 0x53, 0xf9,
+	0x5f, 0xcd, 0x6c, 0x75, 0xaa, 0x25, 0x52, 0x37, 0xf1, 0x63, 0x25, 0x93, 0x62, 0x31, 0x3f, 0xc6,
+	0xc6, 0x70, 0x71, 0x43, 0x44, 0x59, 0xd8, 0x88, 0x13, 0xb1, 0xea, 0x9f, 0xae, 0x5f, 0x3f, 0x59,
+	0x90, 0x93, 0x44, 0x4e, 0xce, 0x47, 0x59, 0x78, 0x47, 0xac, 0x7c, 0x19, 0xa5, 0x7b, 0xaf, 0xfc,
+	0x72, 0xf5, 0xfe, 0xab, 0x6e, 0xe9, 0x5f, 0x1e, 0x45, 0x14, 0xfe, 0xb7, 0x94, 0x83, 0x7c, 0x99,
+	0x5d, 0xd3, 0xe1, 0x4b, 0x55, 0xe2, 0x47, 0x9e, 0x48, 0x0c, 0xc6, 0xef, 0xd1, 0x38, 0xa6, 0x19,
+	0x8f, 0x23, 0xca, 0xe7, 0xd8, 0x50, 0x2f, 0xae, 0x1f, 0xd0, 0x35, 0x28, 0x74, 0xc9, 0x02, 0x1b,
+	0xc9, 0x25, 0x6e, 0x96, 0x2a, 0x19, 0x46, 0x4e, 0x28, 0x0c, 0x9a, 0x1f, 0x73, 0x4d, 0x6d, 0x79,
+	0x18, 0xb0, 0xb9, 0x92, 0xe2, 0x9c, 0xf5, 0xc3, 0x27, 0x2d, 0xe1, 0x06, 0x06, 0xc3, 0x4f, 0xb8,
+	0x91, 0x72, 0x3d, 0x3f, 0xc9, 0xc6, 0xe1, 0xfa, 0x94, 0x13, 0x64, 0x42, 0xdf, 0xc9, 0x4d, 0x5d,
+	0x3d, 0x27, 0x61, 0x19, 0xc9, 0x7e, 0x3e, 0xbb, 0x2b, 0xdf, 0xce, 0x58, 0x29, 0xd0, 0xf6, 0xa4,
+	0x65, 0xd1, 0x13, 0x4a, 0x89, 0x24, 0x6d, 0x38, 0x41, 0xb7, 0xed, 0x1d, 0xf1, 0x83, 0xd2, 0x78,
+	0x6e, 0xb3, 0x33, 0x8b, 0x0b, 0x05, 0x39, 0x1b, 0x04, 0x7c, 0x85, 0x5d, 0xdb, 0xe5, 0xa9, 0xb0,
+	0x70, 0x9e, 0x47, 0xe7, 0xf8, 0x8e, 0x27, 0x03, 0xb4, 0x4b, 0x8c, 0x3e, 0x2f, 0x73, 0x69, 0xe1,
+	0x7c, 0x19, 0x9d, 0x75, 0x64, 0x29, 0xa5, 0x60, 0xbc, 0x8d, 0x8d, 0x9e, 0x12, 0x49, 0x53, 0xa6,
+	0xa2, 0x21, 0x1e, 0xc8, 0x9c, 0xc0, 0x42, 0x77, 0x01, 0x75, 0x23, 0x08, 0xce, 0x03, 0x07, 0xae,
+	0x83, 0xac, 0x7f, 0xd5, 0x71, 0x85, 0x85, 0xe2, 0x22, 0x2a, 0xfa, 0x60, 0x3d, 0xa0, 0xb3, 0x6c,
+	0xd0, 0x93, 0xc5, 0x2d, 0x59, 0xe0, 0x97, 0x10, 0x1f, 0x20, 0x06, 0x15, 0xb1, 0x8c, 0xb3, 0xc0,
+	0x51, 0x36, 0x3b, 0x78, 0x85, 0x14, 0xc4, 0xa0, 0xa2, 0x87, 0xb0, 0xbe, 0x4a, 0x8a, 0x54, 0x8b,
+	0xe7, 0x0c, 0x1b, 0x90, 0x51, 0xb0, 0x21, 0x23, 0x9b, 0x4d, 0x5c, 0x46, 0x03, 0x43, 0x04, 0x04,
+	0xd3, 0xac, 0x66, 0x9b, 0x88, 0x37, 0x36, 0xe9, 0x78, 0x50, 0x06, 0x16, 0xd8, 0x08, 0x15, 0x28,
+	0x5f, 0x46, 0x16, 0x8a, 0x37, 0x51, 0x31, 0xac, 0x61, 0x78, 0x1b, 0x4a, 0xa4, 0xca, 0x13, 0x36,
+	0x92, 0xb7, 0xe8, 0x36, 0x10, 0xc1, 0x50, 0x36, 0x45, 0xe4, 0xae, 0xd9, 0x19, 0xde, 0xa6, 0x50,
+	0x12, 0x03, 0x8a, 0x39, 0x36, 0x14, 0x3a, 0x49, 0xba, 0xe6, 0x04, 0x56, 0xe9, 0x78, 0x07, 0x1d,
+	0x83, 0x25, 0x84, 0x11, 0xc9, 0xa2, 0x5e, 0x34, 0xef, 0x52, 0x44, 0x34, 0x0c, 0x8f, 0x5e, 0xaa,
+	0x9c, 0x66, 0x20, 0x1a, 0xbd, 0xd8, 0xde, 0xa3, 0xa3, 0x57, 0xb0, 0x8b, 0xba, 0x71, 0x9a, 0xd5,
+	0x52, 0xff, 0x8c, 0x95, 0xe6, 0x7d, 0xca, 0x74, 0x0e, 0x00, 0x7c, 0x0f, 0xbb, 0xae, 0x6b, 0x9b,
+	0xb0, 0x90, 0x7d, 0x80, 0xb2, 0x89, 0x2e, 0xad, 0x02, 0x4b, 0x42, 0xaf, 0xca, 0x0f, 0xa9, 0x24,
+	0x88, 0x8a, 0x6b, 0x89, 0x8d, 0x67, 0x51, 0xea, 0xac, 0xf6, 0x16, 0xb5, 0x8f, 0x28, 0x6a, 0x05,
+	0xdb, 0x11, 0xb5, 0x13, 0x6c, 0x02, 0x8d, 0xbd, 0xe5, 0xf5, 0x63, 0x2a, 0xac, 0x05, 0xbd, 0xd2,
+	0x99, 0xdd, 0xfb, 0xd8, 0xbe, 0x32, 0x9c, 0xa7, 0x95, 0x88, 0x52, 0x60, 0x1a, 0xa1, 0x13, 0x5b,
+	0x98, 0xaf, 0xa0, 0x99, 0x2a, 0xfe, 0x7c, 0x29, 0x58, 0x74, 0x62, 0x90, 0xdf, 0xcd, 0xf6, 0x92,
+	0x3c, 0x8b, 0x12, 0xe1, 0x4a, 0x2f, 0xf2, 0xcf, 0x88, 0x96, 0x85, 0xfa, 0x93, 0x4a, 0xaa, 0x56,
+	0x34, 0x1c, 0xcc, 0x47, 0xd9, 0x9e, 0x72, 0x56, 0x69, 0xf8, 0x61, 0x2c, 0x13, 0x65, 0x30, 0x7e,
+	0x4a, 0x99, 0x2a, 0xb9, 0xa3, 0x39, 0xc6, 0xe7, 0xd9, 0x70, 0xfe, 0xa7, 0xed, 0x23, 0xf9, 0x19,
+	0x8a, 0x86, 0xda, 0x14, 0x16, 0x0e, 0x57, 0x86, 0xb1, 0x93, 0xd8, 0xd4, 0xbf, 0xcf, 0xa9, 0x70,
+	0x20, 0x82, 0x85, 0x43, 0x6d, 0xc4, 0x02, 0xba, 0xbd, 0x85, 0xe1, 0x0b, 0x2a, 0x1c, 0xc4, 0xa0,
+	0x82, 0x06, 0x06, 0x0b, 0xc5, 0x97, 0xa4, 0x20, 0x06, 0x14, 0x77, 0xb6, 0x1b, 0x6d, 0x22, 0x3c,
+	0x3f, 0x55, 0x89, 0x03, 0xab, 0x0d, 0xaa, 0xaf, 0x36, 0x3b, 0x87, 0xb0, 0x65, 0x0d, 0x85, 0x4a,
+	0x14, 0x8a, 0x34, 0x75, 0x3c, 0x01, 0x13, 0x87, 0xc5, 0xc6, 0xbe, 0xa6, 0x4a, 0xa4, 0x61, 0xb0,
+	0x37, 0x6d, 0x42, 0x84, 0xb0, 0xbb, 0x8e, 0xbb, 0x66, 0xa3, 0xfb, 0xa6, 0xb2, 0xb9, 0xe3, 0xc4,
+	0x82, 0x53, 0x9b, 0x7f, 0xb2, 0x68, 0x5d, 0x6c, 0x58, 0x3d, 0x9d, 0xdf, 0x56, 0xe6, 0x9f, 0x95,
+	0x82, 0x2c, 0x6a, 0xc8, 0x48, 0x65, 0x9e, 0xaa, 0xdf, 0xb8, 0xc3, 0xb5, 0x58, 0xdc, 0x17, 0xe9,
+	0x1e, 0xda, 0xc2, 0xfb, 0xed, 0x1c, 0xa7, 0xf8, 0xed, 0xf0, 0x90, 0x77, 0x0e, 0x3d, 0x66, 0xd9,
+	0xd9, 0xad, 0xf2, 0x39, 0xef, 0x98, 0x79, 0xf8, 0x11, 0x36, 0xd4, 0x31, 0xf0, 0x98, 0x55, 0x0f,
+	0xa3, 0x6a, 0x50, 0x9f, 0x77, 0xf8, 0x01, 0xb6, 0x0b, 0x86, 0x17, 0x33, 0xfe, 0x08, 0xe2, 0xf9,
+	0x72, 0x7e, 0x88, 0xf5, 0xd3, 0xd0, 0x62, 0x46, 0x1f, 0x45, 0xb4, 0x44, 0x00, 0xa7, 0x81, 0xc5,
+	0x8c, 0x3f, 0x46, 0x38, 0x21, 0x80, 0xdb, 0x87, 0xf0, 0xbb, 0x27, 0x76, 0x61, 0xd3, 0xa1, 0xd8,
+	0x4d, 0xb3, 0x3e, 0x9c, 0x54, 0xcc, 0xf4, 0xe3, 0xf8, 0xe5, 0x44, 0xf0, 0x5b, 0xd9, 0x6e, 0xcb,
+	0x80, 0x3f, 0x89, 0x68, 0xb1, 0x9e, 0xcf, 0xb1, 0x01, 0x6d, 0x3a, 0x31, 0xe3, 0x4f, 0x21, 0xae,
+	0x53, 0xb0, 0x75, 0x9c, 0x4e, 0xcc, 0x82, 0xa7, 0x69, 0xeb, 0x48, 0x40, 0xd8, 0x68, 0x30, 0x31,
+	0xd3, 0xcf, 0x50, 0xd4, 0x09, 0xe1, 0x33, 0xac, 0x56, 0x36, 0x1b, 0x33, 0xff, 0x2c, 0xf2, 0x6d,
+	0x06, 0x22, 0xa0, 0x35, 0x3b, 0xb3, 0xe2, 0x39, 0x8a, 0x80, 0x46, 0xc1, 0x31, 0xaa, 0x0e, 0x30,
+	0x66, 0xd3, 0xf3, 0x74, 0x8c, 0x2a, 0xf3, 0x0b, 0x64, 0x33, 0xaf, 0xf9, 0x66, 0xc5, 0x0b, 0x94,
+	0xcd, 0x7c, 0x3d, 0x6c, 0xa3, 0x3a, 0x11, 0x98, 0x1d, 0x2f, 0xd2, 0x36, 0x2a, 0x03, 0x01, 0x5f,
+	0x62, 0xf5, 0x9d, 0xd3, 0x80, 0xd9, 0xf7, 0x12, 0xfa, 0x46, 0x77, 0x0c, 0x03, 0xfc, 0x2e, 0x36,
+	0xd1, 0x7d, 0x12, 0x30, 0x5b, 0xcf, 0x6d, 0x55, 0x7e, 0xbb, 0xe9, 0x83, 0x00, 0x3f, 0xd1, 0x6e,
+	0x29, 0xfa, 0x14, 0x60, 0xd6, 0x9e, 0xdf, 0xea, 0x2c, 0xdc, 0xfa, 0x10, 0xc0, 0x67, 0x19, 0x6b,
+	0x37, 0x60, 0xb3, 0xeb, 0x02, 0xba, 0x34, 0x08, 0x8e, 0x06, 0xf6, 0x5f, 0x33, 0x7f, 0x91, 0x8e,
+	0x06, 0x12, 0x70, 0x34, 0xa8, 0xf5, 0x9a, 0xe9, 0x4b, 0x74, 0x34, 0x08, 0x81, 0x27, 0x5b, 0xeb,
+	0x6e, 0x66, 0xc3, 0x65, 0x7a, 0xb2, 0x35, 0x8a, 0x1f, 0x63, 0xa3, 0x3b, 0x1a, 0xa2, 0x59, 0xf5,
+	0x1a, 0xaa, 0xf6, 0x54, 0xfb, 0xa1, 0xde, 0xbc, 0xb0, 0x19, 0x9a, 0x6d, 0xaf, 0x57, 0x9a, 0x17,
+	0xf6, 0x42, 0x3e, 0xcd, 0xfa, 0xa3, 0x2c, 0x08, 0xe0, 0xf0, 0xd4, 0x6f, 0xe8, 0xd2, 0x4d, 0x45,
+	0xd0, 0x22, 0xc5, 0xaf, 0xdb, 0x18, 0x1d, 0x02, 0xf8, 0x01, 0xb6, 0x5b, 0x84, 0x4d, 0xd1, 0x32,
+	0x91, 0xbf, 0x6d, 0x53, 0xc1, 0x84, 0xd5, 0x7c, 0x86, 0xb1, 0xe2, 0xd5, 0x08, 0x84, 0xd9, 0xc4,
+	0xfe, 0xbe, 0x5d, 0xbc, 0xa5, 0xd1, 0x90, 0xb6, 0x20, 0x4f, 0x8a, 0x41, 0xb0, 0xd9, 0x29, 0xc8,
+	0x33, 0x72, 0x90, 0xf5, 0xdd, 0x9f, 0xca, 0x48, 0x39, 0x9e, 0x89, 0xfe, 0x03, 0x69, 0x5a, 0x0f,
+	0x01, 0x0b, 0x65, 0x22, 0x94, 0xe3, 0xa5, 0x26, 0xf6, 0x4f, 0x64, 0x4b, 0x00, 0x60, 0xd7, 0x49,
+	0x95, 0xcd, 0x7d, 0xff, 0x45, 0x30, 0x01, 0xb0, 0x69, 0xb8, 0x5e, 0x17, 0x1b, 0x26, 0xf6, 0x6f,
+	0xda, 0x34, 0xae, 0xe7, 0x87, 0x58, 0x0d, 0x2e, 0xf3, 0xb7, 0x4a, 0x26, 0xf8, 0x1f, 0x84, 0xdb,
+	0x04, 0x7c, 0x73, 0xaa, 0x5a, 0xca, 0x37, 0x07, 0xfb, 0x5f, 0xcc, 0x34, 0xad, 0xe7, 0xb3, 0x6c,
+	0x20, 0x55, 0xad, 0x56, 0x86, 0xf3, 0xa9, 0x01, 0xff, 0x6f, 0xbb, 0x7c, 0x65, 0x51, 0x32, 0x90,
+	0xed, 0x07, 0xd7, 0x55, 0x2c, 0xfd, 0x48, 0x89, 0xc4, 0x64, 0xd8, 0x42, 0x83, 0x86, 0x1c, 0x9e,
+	0x67, 0x63, 0xae, 0x0c, 0xab, 0xdc, 0x61, 0xb6, 0x20, 0x17, 0xe4, 0x52, 0x5e, 0x67, 0xee, 0xbd,
+	0xd9, 0xf3, 0xd5, 0x5a, 0xd6, 0x9c, 0x74, 0x65, 0x38, 0x05, 0xbf, 0x3c, 0xda, 0x2f, 0x54, 0xcb,
+	0xdf, 0x21, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xaf, 0x70, 0x4e, 0x83, 0x15, 0x00, 0x00,
 }
 }

+ 11 - 0
vendor/github.com/gogo/protobuf/gogoproto/gogo.proto

@@ -83,6 +83,10 @@ extend google.protobuf.FileOptions {
     optional bool enumdecl_all = 63031;
     optional bool enumdecl_all = 63031;
 
 
 	optional bool goproto_registration = 63032;
 	optional bool goproto_registration = 63032;
+	optional bool messagename_all = 63033;
+
+	optional bool goproto_sizecache_all = 63034;
+	optional bool goproto_unkeyed_all = 63035;
 }
 }
 
 
 extend google.protobuf.MessageOptions {
 extend google.protobuf.MessageOptions {
@@ -115,6 +119,11 @@ extend google.protobuf.MessageOptions {
 	optional bool compare = 64029;
 	optional bool compare = 64029;
 
 
 	optional bool typedecl = 64030;
 	optional bool typedecl = 64030;
+
+	optional bool messagename = 64033;
+
+	optional bool goproto_sizecache = 64034;
+	optional bool goproto_unkeyed = 64035;
 }
 }
 
 
 extend google.protobuf.FieldOptions {
 extend google.protobuf.FieldOptions {
@@ -130,4 +139,6 @@ extend google.protobuf.FieldOptions {
 
 
 	optional bool stdtime = 65010;
 	optional bool stdtime = 65010;
 	optional bool stdduration = 65011;
 	optional bool stdduration = 65011;
+	optional bool wktpointer = 65012;
+
 }
 }

+ 61 - 3
vendor/github.com/gogo/protobuf/gogoproto/helper.go

@@ -47,6 +47,55 @@ func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool {
 	return proto.GetBoolExtension(field.Options, E_Stdduration, false)
 	return proto.GetBoolExtension(field.Options, E_Stdduration, false)
 }
 }
 
 
+func IsStdDouble(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.DoubleValue"
+}
+
+func IsStdFloat(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.FloatValue"
+}
+
+func IsStdInt64(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int64Value"
+}
+
+func IsStdUInt64(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt64Value"
+}
+
+func IsStdInt32(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int32Value"
+}
+
+func IsStdUInt32(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt32Value"
+}
+
+func IsStdBool(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BoolValue"
+}
+
+func IsStdString(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.StringValue"
+}
+
+func IsStdBytes(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BytesValue"
+}
+
+func IsStdType(field *google_protobuf.FieldDescriptorProto) bool {
+	return (IsStdTime(field) || IsStdDuration(field) ||
+		IsStdDouble(field) || IsStdFloat(field) ||
+		IsStdInt64(field) || IsStdUInt64(field) ||
+		IsStdInt32(field) || IsStdUInt32(field) ||
+		IsStdBool(field) ||
+		IsStdString(field) || IsStdBytes(field))
+}
+
+func IsWktPtr(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false)
+}
+
 func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool {
 func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool {
 	nullable := IsNullable(field)
 	nullable := IsNullable(field)
 	if field.IsMessage() || IsCustomType(field) {
 	if field.IsMessage() || IsCustomType(field) {
@@ -334,9 +383,6 @@ func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google
 }
 }
 
 
 func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
 func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
-	if IsProto3(file) {
-		return false
-	}
 	return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true))
 	return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true))
 }
 }
 
 
@@ -355,3 +401,15 @@ func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_proto
 func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool {
 func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool {
 	return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false)
 	return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false)
 }
 }
+
+func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false))
+}
+
+func HasSizecache(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_GoprotoSizecache, proto.GetBoolExtension(file.Options, E_GoprotoSizecacheAll, true))
+}
+
+func HasUnkeyed(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_GoprotoUnkeyed, proto.GetBoolExtension(file.Options, E_GoprotoUnkeyedAll, true))
+}

+ 14 - 15
vendor/github.com/gogo/protobuf/io/varint.go

@@ -42,7 +42,7 @@ var (
 )
 )
 
 
 func NewDelimitedWriter(w io.Writer) WriteCloser {
 func NewDelimitedWriter(w io.Writer) WriteCloser {
-	return &varintWriter{w, make([]byte, 10), nil}
+	return &varintWriter{w, make([]byte, binary.MaxVarintLen64), nil}
 }
 }
 
 
 type varintWriter struct {
 type varintWriter struct {
@@ -55,26 +55,25 @@ func (this *varintWriter) WriteMsg(msg proto.Message) (err error) {
 	var data []byte
 	var data []byte
 	if m, ok := msg.(marshaler); ok {
 	if m, ok := msg.(marshaler); ok {
 		n, ok := getSize(m)
 		n, ok := getSize(m)
-		if !ok {
-			data, err = proto.Marshal(msg)
+		if ok {
+			if n+binary.MaxVarintLen64 >= len(this.buffer) {
+				this.buffer = make([]byte, n+binary.MaxVarintLen64)
+			}
+			lenOff := binary.PutUvarint(this.buffer, uint64(n))
+			_, err = m.MarshalTo(this.buffer[lenOff:])
 			if err != nil {
 			if err != nil {
 				return err
 				return err
 			}
 			}
-		}
-		if n >= len(this.buffer) {
-			this.buffer = make([]byte, n)
-		}
-		_, err = m.MarshalTo(this.buffer)
-		if err != nil {
-			return err
-		}
-		data = this.buffer[:n]
-	} else {
-		data, err = proto.Marshal(msg)
-		if err != nil {
+			_, err = this.w.Write(this.buffer[:lenOff+n])
 			return err
 			return err
 		}
 		}
 	}
 	}
+
+	// fallback
+	data, err = proto.Marshal(msg)
+	if err != nil {
+		return err
+	}
 	length := uint64(len(data))
 	length := uint64(len(data))
 	n := binary.PutUvarint(this.lenBuf, length)
 	n := binary.PutUvarint(this.lenBuf, length)
 	_, err = this.w.Write(this.lenBuf[:n])
 	_, err = this.w.Write(this.lenBuf[:n])

+ 35 - 11
vendor/github.com/gogo/protobuf/proto/clone.go

@@ -35,22 +35,39 @@
 package proto
 package proto
 
 
 import (
 import (
+	"fmt"
 	"log"
 	"log"
 	"reflect"
 	"reflect"
 	"strings"
 	"strings"
 )
 )
 
 
 // Clone returns a deep copy of a protocol buffer.
 // Clone returns a deep copy of a protocol buffer.
-func Clone(pb Message) Message {
-	in := reflect.ValueOf(pb)
+func Clone(src Message) Message {
+	in := reflect.ValueOf(src)
 	if in.IsNil() {
 	if in.IsNil() {
-		return pb
+		return src
 	}
 	}
-
 	out := reflect.New(in.Type().Elem())
 	out := reflect.New(in.Type().Elem())
-	// out is empty so a merge is a deep copy.
-	mergeStruct(out.Elem(), in.Elem())
-	return out.Interface().(Message)
+	dst := out.Interface().(Message)
+	Merge(dst, src)
+	return dst
+}
+
+// Merger is the interface representing objects that can merge messages of the same type.
+type Merger interface {
+	// Merge merges src into this message.
+	// Required and optional fields that are set in src will be set to that value in dst.
+	// Elements of repeated fields will be appended.
+	//
+	// Merge may panic if called with a different argument type than the receiver.
+	Merge(src Message)
+}
+
+// generatedMerger is the custom merge method that generated protos will have.
+// We must add this method since a generate Merge method will conflict with
+// many existing protos that have a Merge data field already defined.
+type generatedMerger interface {
+	XXX_Merge(src Message)
 }
 }
 
 
 // Merge merges src into dst.
 // Merge merges src into dst.
@@ -58,17 +75,24 @@ func Clone(pb Message) Message {
 // Elements of repeated fields will be appended.
 // Elements of repeated fields will be appended.
 // Merge panics if src and dst are not the same type, or if dst is nil.
 // Merge panics if src and dst are not the same type, or if dst is nil.
 func Merge(dst, src Message) {
 func Merge(dst, src Message) {
+	if m, ok := dst.(Merger); ok {
+		m.Merge(src)
+		return
+	}
+
 	in := reflect.ValueOf(src)
 	in := reflect.ValueOf(src)
 	out := reflect.ValueOf(dst)
 	out := reflect.ValueOf(dst)
 	if out.IsNil() {
 	if out.IsNil() {
 		panic("proto: nil destination")
 		panic("proto: nil destination")
 	}
 	}
 	if in.Type() != out.Type() {
 	if in.Type() != out.Type() {
-		// Explicit test prior to mergeStruct so that mistyped nils will fail
-		panic("proto: type mismatch")
+		panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
 	}
 	}
 	if in.IsNil() {
 	if in.IsNil() {
-		// Merging nil into non-nil is a quiet no-op
+		return // Merge from nil src is a noop
+	}
+	if m, ok := dst.(generatedMerger); ok {
+		m.XXX_Merge(src)
 		return
 		return
 	}
 	}
 	mergeStruct(out.Elem(), in.Elem())
 	mergeStruct(out.Elem(), in.Elem())
@@ -89,7 +113,7 @@ func mergeStruct(out, in reflect.Value) {
 		bIn := emIn.GetExtensions()
 		bIn := emIn.GetExtensions()
 		bOut := emOut.GetExtensions()
 		bOut := emOut.GetExtensions()
 		*bOut = append(*bOut, *bIn...)
 		*bOut = append(*bOut, *bIn...)
-	} else if emIn, ok := extendable(in.Addr().Interface()); ok {
+	} else if emIn, err := extendable(in.Addr().Interface()); err == nil {
 		emOut, _ := extendable(out.Addr().Interface())
 		emOut, _ := extendable(out.Addr().Interface())
 		mIn, muIn := emIn.extensionsRead()
 		mIn, muIn := emIn.extensionsRead()
 		if mIn != nil {
 		if mIn != nil {

+ 39 - 0
vendor/github.com/gogo/protobuf/proto/custom_gogo.go

@@ -0,0 +1,39 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import "reflect"
+
+type custom interface {
+	Marshal() ([]byte, error)
+	Unmarshal(data []byte) error
+	Size() int
+}
+
+var customType = reflect.TypeOf((*custom)(nil)).Elem()

+ 63 - 613
vendor/github.com/gogo/protobuf/proto/decode.go

@@ -39,8 +39,6 @@ import (
 	"errors"
 	"errors"
 	"fmt"
 	"fmt"
 	"io"
 	"io"
-	"os"
-	"reflect"
 )
 )
 
 
 // errOverflow is returned when an integer is too large to be represented.
 // errOverflow is returned when an integer is too large to be represented.
@@ -50,10 +48,6 @@ var errOverflow = errors.New("proto: integer overflow")
 // wire type is encountered. It does not get returned to user code.
 // wire type is encountered. It does not get returned to user code.
 var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
 var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
 
 
-// The fundamental decoders that interpret bytes on the wire.
-// Those that take integer types all return uint64 and are
-// therefore of type valueDecoder.
-
 // DecodeVarint reads a varint-encoded integer from the slice.
 // DecodeVarint reads a varint-encoded integer from the slice.
 // It returns the integer and the number of bytes consumed, or
 // It returns the integer and the number of bytes consumed, or
 // zero if there is not enough.
 // zero if there is not enough.
@@ -267,9 +261,6 @@ func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
 	return
 	return
 }
 }
 
 
-// These are not ValueDecoders: they produce an array of bytes or a string.
-// bytes, embedded messages
-
 // DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
 // DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
 // This is the format used for the bytes protocol buffer
 // This is the format used for the bytes protocol buffer
 // type and for embedded messages.
 // type and for embedded messages.
@@ -311,81 +302,29 @@ func (p *Buffer) DecodeStringBytes() (s string, err error) {
 	return string(buf), nil
 	return string(buf), nil
 }
 }
 
 
-// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
-// If the protocol buffer has extensions, and the field matches, add it as an extension.
-// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
-func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
-	oi := o.index
-
-	err := o.skip(t, tag, wire)
-	if err != nil {
-		return err
-	}
-
-	if !unrecField.IsValid() {
-		return nil
-	}
-
-	ptr := structPointer_Bytes(base, unrecField)
-
-	// Add the skipped field to struct field
-	obuf := o.buf
-
-	o.buf = *ptr
-	o.EncodeVarint(uint64(tag<<3 | wire))
-	*ptr = append(o.buf, obuf[oi:o.index]...)
-
-	o.buf = obuf
-
-	return nil
-}
-
-// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
-func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
-
-	var u uint64
-	var err error
-
-	switch wire {
-	case WireVarint:
-		_, err = o.DecodeVarint()
-	case WireFixed64:
-		_, err = o.DecodeFixed64()
-	case WireBytes:
-		_, err = o.DecodeRawBytes(false)
-	case WireFixed32:
-		_, err = o.DecodeFixed32()
-	case WireStartGroup:
-		for {
-			u, err = o.DecodeVarint()
-			if err != nil {
-				break
-			}
-			fwire := int(u & 0x7)
-			if fwire == WireEndGroup {
-				break
-			}
-			ftag := int(u >> 3)
-			err = o.skip(t, ftag, fwire)
-			if err != nil {
-				break
-			}
-		}
-	default:
-		err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
-	}
-	return err
-}
-
 // Unmarshaler is the interface representing objects that can
 // Unmarshaler is the interface representing objects that can
-// unmarshal themselves.  The method should reset the receiver before
-// decoding starts.  The argument points to data that may be
+// unmarshal themselves.  The argument points to data that may be
 // overwritten, so implementations should not keep references to the
 // overwritten, so implementations should not keep references to the
 // buffer.
 // buffer.
+// Unmarshal implementations should not clear the receiver.
+// Any unmarshaled data should be merged into the receiver.
+// Callers of Unmarshal that do not want to retain existing data
+// should Reset the receiver before calling Unmarshal.
 type Unmarshaler interface {
 type Unmarshaler interface {
 	Unmarshal([]byte) error
 	Unmarshal([]byte) error
 }
 }
 
 
+// newUnmarshaler is the interface representing objects that can
+// unmarshal themselves. The semantics are identical to Unmarshaler.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newUnmarshaler interface {
+	XXX_Unmarshal([]byte) error
+}
+
 // Unmarshal parses the protocol buffer representation in buf and places the
 // Unmarshal parses the protocol buffer representation in buf and places the
 // decoded result in pb.  If the struct underlying pb does not match
 // decoded result in pb.  If the struct underlying pb does not match
 // the data in buf, the results can be unpredictable.
 // the data in buf, the results can be unpredictable.
@@ -395,7 +334,13 @@ type Unmarshaler interface {
 // to preserve and append to existing data.
 // to preserve and append to existing data.
 func Unmarshal(buf []byte, pb Message) error {
 func Unmarshal(buf []byte, pb Message) error {
 	pb.Reset()
 	pb.Reset()
-	return UnmarshalMerge(buf, pb)
+	if u, ok := pb.(newUnmarshaler); ok {
+		return u.XXX_Unmarshal(buf)
+	}
+	if u, ok := pb.(Unmarshaler); ok {
+		return u.Unmarshal(buf)
+	}
+	return NewBuffer(buf).Unmarshal(pb)
 }
 }
 
 
 // UnmarshalMerge parses the protocol buffer representation in buf and
 // UnmarshalMerge parses the protocol buffer representation in buf and
@@ -405,8 +350,16 @@ func Unmarshal(buf []byte, pb Message) error {
 // UnmarshalMerge merges into existing data in pb.
 // UnmarshalMerge merges into existing data in pb.
 // Most code should use Unmarshal instead.
 // Most code should use Unmarshal instead.
 func UnmarshalMerge(buf []byte, pb Message) error {
 func UnmarshalMerge(buf []byte, pb Message) error {
-	// If the object can unmarshal itself, let it.
+	if u, ok := pb.(newUnmarshaler); ok {
+		return u.XXX_Unmarshal(buf)
+	}
 	if u, ok := pb.(Unmarshaler); ok {
 	if u, ok := pb.(Unmarshaler); ok {
+		// NOTE: The history of proto have unfortunately been inconsistent
+		// whether Unmarshaler should or should not implicitly clear itself.
+		// Some implementations do, most do not.
+		// Thus, calling this here may or may not do what people want.
+		//
+		// See https://github.com/golang/protobuf/issues/424
 		return u.Unmarshal(buf)
 		return u.Unmarshal(buf)
 	}
 	}
 	return NewBuffer(buf).Unmarshal(pb)
 	return NewBuffer(buf).Unmarshal(pb)
@@ -422,12 +375,17 @@ func (p *Buffer) DecodeMessage(pb Message) error {
 }
 }
 
 
 // DecodeGroup reads a tag-delimited group from the Buffer.
 // DecodeGroup reads a tag-delimited group from the Buffer.
+// StartGroup tag is already consumed. This function consumes
+// EndGroup tag.
 func (p *Buffer) DecodeGroup(pb Message) error {
 func (p *Buffer) DecodeGroup(pb Message) error {
-	typ, base, err := getbase(pb)
-	if err != nil {
-		return err
+	b := p.buf[p.index:]
+	x, y := findEndGroup(b)
+	if x < 0 {
+		return io.ErrUnexpectedEOF
 	}
 	}
-	return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
+	err := Unmarshal(b[:x], pb)
+	p.index += y
+	return err
 }
 }
 
 
 // Unmarshal parses the protocol buffer representation in the
 // Unmarshal parses the protocol buffer representation in the
@@ -438,541 +396,33 @@ func (p *Buffer) DecodeGroup(pb Message) error {
 // Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
 // Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
 func (p *Buffer) Unmarshal(pb Message) error {
 func (p *Buffer) Unmarshal(pb Message) error {
 	// If the object can unmarshal itself, let it.
 	// If the object can unmarshal itself, let it.
-	if u, ok := pb.(Unmarshaler); ok {
-		err := u.Unmarshal(p.buf[p.index:])
+	if u, ok := pb.(newUnmarshaler); ok {
+		err := u.XXX_Unmarshal(p.buf[p.index:])
 		p.index = len(p.buf)
 		p.index = len(p.buf)
 		return err
 		return err
 	}
 	}
-
-	typ, base, err := getbase(pb)
-	if err != nil {
-		return err
-	}
-
-	err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
-
-	if collectStats {
-		stats.Decode++
-	}
-
-	return err
-}
-
-// unmarshalType does the work of unmarshaling a structure.
-func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
-	var state errorState
-	required, reqFields := prop.reqCount, uint64(0)
-
-	var err error
-	for err == nil && o.index < len(o.buf) {
-		oi := o.index
-		var u uint64
-		u, err = o.DecodeVarint()
-		if err != nil {
-			break
-		}
-		wire := int(u & 0x7)
-		if wire == WireEndGroup {
-			if is_group {
-				if required > 0 {
-					// Not enough information to determine the exact field.
-					// (See below.)
-					return &RequiredNotSetError{"{Unknown}"}
-				}
-				return nil // input is satisfied
-			}
-			return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
-		}
-		tag := int(u >> 3)
-		if tag <= 0 {
-			return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
-		}
-		fieldnum, ok := prop.decoderTags.get(tag)
-		if !ok {
-			// Maybe it's an extension?
-			if prop.extendable {
-				if e, eok := structPointer_Interface(base, st).(extensionsBytes); eok {
-					if isExtensionField(e, int32(tag)) {
-						if err = o.skip(st, tag, wire); err == nil {
-							ext := e.GetExtensions()
-							*ext = append(*ext, o.buf[oi:o.index]...)
-						}
-						continue
-					}
-				} else if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
-					if err = o.skip(st, tag, wire); err == nil {
-						extmap := e.extensionsWrite()
-						ext := extmap[int32(tag)] // may be missing
-						ext.enc = append(ext.enc, o.buf[oi:o.index]...)
-						extmap[int32(tag)] = ext
-					}
-					continue
-				}
-			}
-			// Maybe it's a oneof?
-			if prop.oneofUnmarshaler != nil {
-				m := structPointer_Interface(base, st).(Message)
-				// First return value indicates whether tag is a oneof field.
-				ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
-				if err == ErrInternalBadWireType {
-					// Map the error to something more descriptive.
-					// Do the formatting here to save generated code space.
-					err = fmt.Errorf("bad wiretype for oneof field in %T", m)
-				}
-				if ok {
-					continue
-				}
-			}
-			err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
-			continue
-		}
-		p := prop.Prop[fieldnum]
-
-		if p.dec == nil {
-			fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
-			continue
-		}
-		dec := p.dec
-		if wire != WireStartGroup && wire != p.WireType {
-			if wire == WireBytes && p.packedDec != nil {
-				// a packable field
-				dec = p.packedDec
-			} else {
-				err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
-				continue
-			}
-		}
-		decErr := dec(o, p, base)
-		if decErr != nil && !state.shouldContinue(decErr, p) {
-			err = decErr
-		}
-		if err == nil && p.Required {
-			// Successfully decoded a required field.
-			if tag <= 64 {
-				// use bitmap for fields 1-64 to catch field reuse.
-				var mask uint64 = 1 << uint64(tag-1)
-				if reqFields&mask == 0 {
-					// new required field
-					reqFields |= mask
-					required--
-				}
-			} else {
-				// This is imprecise. It can be fooled by a required field
-				// with a tag > 64 that is encoded twice; that's very rare.
-				// A fully correct implementation would require allocating
-				// a data structure, which we would like to avoid.
-				required--
-			}
-		}
-	}
-	if err == nil {
-		if is_group {
-			return io.ErrUnexpectedEOF
-		}
-		if state.err != nil {
-			return state.err
-		}
-		if required > 0 {
-			// Not enough information to determine the exact field. If we use extra
-			// CPU, we could determine the field only if the missing required field
-			// has a tag <= 64 and we check reqFields.
-			return &RequiredNotSetError{"{Unknown}"}
-		}
-	}
-	return err
-}
-
-// Individual type decoders
-// For each,
-//	u is the decoded value,
-//	v is a pointer to the field (pointer) in the struct
-
-// Sizes of the pools to allocate inside the Buffer.
-// The goal is modest amortization and allocation
-// on at least 16-byte boundaries.
-const (
-	boolPoolSize   = 16
-	uint32PoolSize = 8
-	uint64PoolSize = 4
-)
-
-// Decode a bool.
-func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
-	u, err := p.valDec(o)
-	if err != nil {
-		return err
-	}
-	if len(o.bools) == 0 {
-		o.bools = make([]bool, boolPoolSize)
-	}
-	o.bools[0] = u != 0
-	*structPointer_Bool(base, p.field) = &o.bools[0]
-	o.bools = o.bools[1:]
-	return nil
-}
-
-func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
-	u, err := p.valDec(o)
-	if err != nil {
-		return err
-	}
-	*structPointer_BoolVal(base, p.field) = u != 0
-	return nil
-}
-
-// Decode an int32.
-func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
-	u, err := p.valDec(o)
-	if err != nil {
-		return err
-	}
-	word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
-	return nil
-}
-
-func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
-	u, err := p.valDec(o)
-	if err != nil {
-		return err
-	}
-	word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
-	return nil
-}
-
-// Decode an int64.
-func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
-	u, err := p.valDec(o)
-	if err != nil {
-		return err
-	}
-	word64_Set(structPointer_Word64(base, p.field), o, u)
-	return nil
-}
-
-func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
-	u, err := p.valDec(o)
-	if err != nil {
-		return err
-	}
-	word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
-	return nil
-}
-
-// Decode a string.
-func (o *Buffer) dec_string(p *Properties, base structPointer) error {
-	s, err := o.DecodeStringBytes()
-	if err != nil {
-		return err
-	}
-	*structPointer_String(base, p.field) = &s
-	return nil
-}
-
-func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
-	s, err := o.DecodeStringBytes()
-	if err != nil {
-		return err
-	}
-	*structPointer_StringVal(base, p.field) = s
-	return nil
-}
-
-// Decode a slice of bytes ([]byte).
-func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
-	b, err := o.DecodeRawBytes(true)
-	if err != nil {
-		return err
-	}
-	*structPointer_Bytes(base, p.field) = b
-	return nil
-}
-
-// Decode a slice of bools ([]bool).
-func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
-	u, err := p.valDec(o)
-	if err != nil {
-		return err
-	}
-	v := structPointer_BoolSlice(base, p.field)
-	*v = append(*v, u != 0)
-	return nil
-}
-
-// Decode a slice of bools ([]bool) in packed format.
-func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
-	v := structPointer_BoolSlice(base, p.field)
-
-	nn, err := o.DecodeVarint()
-	if err != nil {
-		return err
-	}
-	nb := int(nn) // number of bytes of encoded bools
-	fin := o.index + nb
-	if fin < o.index {
-		return errOverflow
-	}
-
-	y := *v
-	for o.index < fin {
-		u, err := p.valDec(o)
-		if err != nil {
-			return err
-		}
-		y = append(y, u != 0)
-	}
-
-	*v = y
-	return nil
-}
-
-// Decode a slice of int32s ([]int32).
-func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
-	u, err := p.valDec(o)
-	if err != nil {
-		return err
-	}
-	structPointer_Word32Slice(base, p.field).Append(uint32(u))
-	return nil
-}
-
-// Decode a slice of int32s ([]int32) in packed format.
-func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
-	v := structPointer_Word32Slice(base, p.field)
-
-	nn, err := o.DecodeVarint()
-	if err != nil {
-		return err
-	}
-	nb := int(nn) // number of bytes of encoded int32s
-
-	fin := o.index + nb
-	if fin < o.index {
-		return errOverflow
-	}
-	for o.index < fin {
-		u, err := p.valDec(o)
-		if err != nil {
-			return err
-		}
-		v.Append(uint32(u))
-	}
-	return nil
-}
-
-// Decode a slice of int64s ([]int64).
-func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
-	u, err := p.valDec(o)
-	if err != nil {
-		return err
-	}
-
-	structPointer_Word64Slice(base, p.field).Append(u)
-	return nil
-}
-
-// Decode a slice of int64s ([]int64) in packed format.
-func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
-	v := structPointer_Word64Slice(base, p.field)
-
-	nn, err := o.DecodeVarint()
-	if err != nil {
-		return err
-	}
-	nb := int(nn) // number of bytes of encoded int64s
-
-	fin := o.index + nb
-	if fin < o.index {
-		return errOverflow
-	}
-	for o.index < fin {
-		u, err := p.valDec(o)
-		if err != nil {
-			return err
-		}
-		v.Append(u)
-	}
-	return nil
-}
-
-// Decode a slice of strings ([]string).
-func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
-	s, err := o.DecodeStringBytes()
-	if err != nil {
-		return err
-	}
-	v := structPointer_StringSlice(base, p.field)
-	*v = append(*v, s)
-	return nil
-}
-
-// Decode a slice of slice of bytes ([][]byte).
-func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
-	b, err := o.DecodeRawBytes(true)
-	if err != nil {
-		return err
-	}
-	v := structPointer_BytesSlice(base, p.field)
-	*v = append(*v, b)
-	return nil
-}
-
-// Decode a map field.
-func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
-	raw, err := o.DecodeRawBytes(false)
-	if err != nil {
-		return err
-	}
-	oi := o.index       // index at the end of this map entry
-	o.index -= len(raw) // move buffer back to start of map entry
-
-	mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
-	if mptr.Elem().IsNil() {
-		mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
-	}
-	v := mptr.Elem() // map[K]V
-
-	// Prepare addressable doubly-indirect placeholders for the key and value types.
-	// See enc_new_map for why.
-	keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
-	keybase := toStructPointer(keyptr.Addr())                  // **K
-
-	var valbase structPointer
-	var valptr reflect.Value
-	switch p.mtype.Elem().Kind() {
-	case reflect.Slice:
-		// []byte
-		var dummy []byte
-		valptr = reflect.ValueOf(&dummy)  // *[]byte
-		valbase = toStructPointer(valptr) // *[]byte
-	case reflect.Ptr:
-		// message; valptr is **Msg; need to allocate the intermediate pointer
-		valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
-		valptr.Set(reflect.New(valptr.Type().Elem()))
-		valbase = toStructPointer(valptr)
-	default:
-		// everything else
-		valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
-		valbase = toStructPointer(valptr.Addr())                   // **V
-	}
-
-	// Decode.
-	// This parses a restricted wire format, namely the encoding of a message
-	// with two fields. See enc_new_map for the format.
-	for o.index < oi {
-		// tagcode for key and value properties are always a single byte
-		// because they have tags 1 and 2.
-		tagcode := o.buf[o.index]
-		o.index++
-		switch tagcode {
-		case p.mkeyprop.tagcode[0]:
-			if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
-				return err
-			}
-		case p.mvalprop.tagcode[0]:
-			if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
-				return err
-			}
-		default:
-			// TODO: Should we silently skip this instead?
-			return fmt.Errorf("proto: bad map data tag %d", raw[0])
-		}
-	}
-	keyelem, valelem := keyptr.Elem(), valptr.Elem()
-	if !keyelem.IsValid() {
-		keyelem = reflect.Zero(p.mtype.Key())
-	}
-	if !valelem.IsValid() {
-		valelem = reflect.Zero(p.mtype.Elem())
-	}
-
-	v.SetMapIndex(keyelem, valelem)
-	return nil
-}
-
-// Decode a group.
-func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
-	bas := structPointer_GetStructPointer(base, p.field)
-	if structPointer_IsNil(bas) {
-		// allocate new nested message
-		bas = toStructPointer(reflect.New(p.stype))
-		structPointer_SetStructPointer(base, p.field, bas)
-	}
-	return o.unmarshalType(p.stype, p.sprop, true, bas)
-}
-
-// Decode an embedded message.
-func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
-	raw, e := o.DecodeRawBytes(false)
-	if e != nil {
-		return e
-	}
-
-	bas := structPointer_GetStructPointer(base, p.field)
-	if structPointer_IsNil(bas) {
-		// allocate new nested message
-		bas = toStructPointer(reflect.New(p.stype))
-		structPointer_SetStructPointer(base, p.field, bas)
-	}
-
-	// If the object can unmarshal itself, let it.
-	if p.isUnmarshaler {
-		iv := structPointer_Interface(bas, p.stype)
-		return iv.(Unmarshaler).Unmarshal(raw)
-	}
-
-	obuf := o.buf
-	oi := o.index
-	o.buf = raw
-	o.index = 0
-
-	err = o.unmarshalType(p.stype, p.sprop, false, bas)
-	o.buf = obuf
-	o.index = oi
-
-	return err
-}
-
-// Decode a slice of embedded messages.
-func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
-	return o.dec_slice_struct(p, false, base)
-}
-
-// Decode a slice of embedded groups.
-func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
-	return o.dec_slice_struct(p, true, base)
-}
-
-// Decode a slice of structs ([]*struct).
-func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
-	v := reflect.New(p.stype)
-	bas := toStructPointer(v)
-	structPointer_StructPointerSlice(base, p.field).Append(bas)
-
-	if is_group {
-		err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
-		return err
-	}
-
-	raw, err := o.DecodeRawBytes(false)
-	if err != nil {
+	if u, ok := pb.(Unmarshaler); ok {
+		// NOTE: The history of proto have unfortunately been inconsistent
+		// whether Unmarshaler should or should not implicitly clear itself.
+		// Some implementations do, most do not.
+		// Thus, calling this here may or may not do what people want.
+		//
+		// See https://github.com/golang/protobuf/issues/424
+		err := u.Unmarshal(p.buf[p.index:])
+		p.index = len(p.buf)
 		return err
 		return err
 	}
 	}
 
 
-	// If the object can unmarshal itself, let it.
-	if p.isUnmarshaler {
-		iv := v.Interface()
-		return iv.(Unmarshaler).Unmarshal(raw)
-	}
-
-	obuf := o.buf
-	oi := o.index
-	o.buf = raw
-	o.index = 0
-
-	err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
-
-	o.buf = obuf
-	o.index = oi
-
+	// Slow workaround for messages that aren't Unmarshalers.
+	// This includes some hand-coded .pb.go files and
+	// bootstrap protos.
+	// TODO: fix all of those and then add Unmarshal to
+	// the Message interface. Then:
+	// The cast above and code below can be deleted.
+	// The old unmarshaler can be deleted.
+	// Clients can call Unmarshal directly (can already do that, actually).
+	var info InternalMessageInfo
+	err := info.Unmarshal(pb, p.buf[p.index:])
+	p.index = len(p.buf)
 	return err
 	return err
 }
 }

+ 0 - 172
vendor/github.com/gogo/protobuf/proto/decode_gogo.go

@@ -1,172 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2013, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
-	"reflect"
-)
-
-// Decode a reference to a struct pointer.
-func (o *Buffer) dec_ref_struct_message(p *Properties, base structPointer) (err error) {
-	raw, e := o.DecodeRawBytes(false)
-	if e != nil {
-		return e
-	}
-
-	// If the object can unmarshal itself, let it.
-	if p.isUnmarshaler {
-		panic("not supported, since this is a pointer receiver")
-	}
-
-	obuf := o.buf
-	oi := o.index
-	o.buf = raw
-	o.index = 0
-
-	bas := structPointer_FieldPointer(base, p.field)
-
-	err = o.unmarshalType(p.stype, p.sprop, false, bas)
-	o.buf = obuf
-	o.index = oi
-
-	return err
-}
-
-// Decode a slice of references to struct pointers ([]struct).
-func (o *Buffer) dec_slice_ref_struct(p *Properties, is_group bool, base structPointer) error {
-	newBas := appendStructPointer(base, p.field, p.sstype)
-
-	if is_group {
-		panic("not supported, maybe in future, if requested.")
-	}
-
-	raw, err := o.DecodeRawBytes(false)
-	if err != nil {
-		return err
-	}
-
-	// If the object can unmarshal itself, let it.
-	if p.isUnmarshaler {
-		panic("not supported, since this is not a pointer receiver.")
-	}
-
-	obuf := o.buf
-	oi := o.index
-	o.buf = raw
-	o.index = 0
-
-	err = o.unmarshalType(p.stype, p.sprop, is_group, newBas)
-
-	o.buf = obuf
-	o.index = oi
-
-	return err
-}
-
-// Decode a slice of references to struct pointers.
-func (o *Buffer) dec_slice_ref_struct_message(p *Properties, base structPointer) error {
-	return o.dec_slice_ref_struct(p, false, base)
-}
-
-func setPtrCustomType(base structPointer, f field, v interface{}) {
-	if v == nil {
-		return
-	}
-	structPointer_SetStructPointer(base, f, toStructPointer(reflect.ValueOf(v)))
-}
-
-func setCustomType(base structPointer, f field, value interface{}) {
-	if value == nil {
-		return
-	}
-	v := reflect.ValueOf(value).Elem()
-	t := reflect.TypeOf(value).Elem()
-	kind := t.Kind()
-	switch kind {
-	case reflect.Slice:
-		slice := reflect.MakeSlice(t, v.Len(), v.Cap())
-		reflect.Copy(slice, v)
-		oldHeader := structPointer_GetSliceHeader(base, f)
-		oldHeader.Data = slice.Pointer()
-		oldHeader.Len = v.Len()
-		oldHeader.Cap = v.Cap()
-	default:
-		size := reflect.TypeOf(value).Elem().Size()
-		structPointer_Copy(toStructPointer(reflect.ValueOf(value)), structPointer_Add(base, f), int(size))
-	}
-}
-
-func (o *Buffer) dec_custom_bytes(p *Properties, base structPointer) error {
-	b, err := o.DecodeRawBytes(true)
-	if err != nil {
-		return err
-	}
-	i := reflect.New(p.ctype.Elem()).Interface()
-	custom := (i).(Unmarshaler)
-	if err := custom.Unmarshal(b); err != nil {
-		return err
-	}
-	setPtrCustomType(base, p.field, custom)
-	return nil
-}
-
-func (o *Buffer) dec_custom_ref_bytes(p *Properties, base structPointer) error {
-	b, err := o.DecodeRawBytes(true)
-	if err != nil {
-		return err
-	}
-	i := reflect.New(p.ctype).Interface()
-	custom := (i).(Unmarshaler)
-	if err := custom.Unmarshal(b); err != nil {
-		return err
-	}
-	if custom != nil {
-		setCustomType(base, p.field, custom)
-	}
-	return nil
-}
-
-// Decode a slice of bytes ([]byte) into a slice of custom types.
-func (o *Buffer) dec_custom_slice_bytes(p *Properties, base structPointer) error {
-	b, err := o.DecodeRawBytes(true)
-	if err != nil {
-		return err
-	}
-	i := reflect.New(p.ctype.Elem()).Interface()
-	custom := (i).(Unmarshaler)
-	if err := custom.Unmarshal(b); err != nil {
-		return err
-	}
-	newBas := appendStructPointer(base, p.field, p.ctype)
-
-	var zero field
-	setCustomType(newBas, zero, custom)
-
-	return nil
-}

+ 200 - 1
vendor/github.com/gogo/protobuf/proto/discard.go

@@ -35,8 +35,14 @@ import (
 	"fmt"
 	"fmt"
 	"reflect"
 	"reflect"
 	"strings"
 	"strings"
+	"sync"
+	"sync/atomic"
 )
 )
 
 
+type generatedDiscarder interface {
+	XXX_DiscardUnknown()
+}
+
 // DiscardUnknown recursively discards all unknown fields from this message
 // DiscardUnknown recursively discards all unknown fields from this message
 // and all embedded messages.
 // and all embedded messages.
 //
 //
@@ -49,9 +55,202 @@ import (
 // For proto2 messages, the unknown fields of message extensions are only
 // For proto2 messages, the unknown fields of message extensions are only
 // discarded from messages that have been accessed via GetExtension.
 // discarded from messages that have been accessed via GetExtension.
 func DiscardUnknown(m Message) {
 func DiscardUnknown(m Message) {
+	if m, ok := m.(generatedDiscarder); ok {
+		m.XXX_DiscardUnknown()
+		return
+	}
+	// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
+	// but the master branch has no implementation for InternalMessageInfo,
+	// so it would be more work to replicate that approach.
 	discardLegacy(m)
 	discardLegacy(m)
 }
 }
 
 
+// DiscardUnknown recursively discards all unknown fields.
+func (a *InternalMessageInfo) DiscardUnknown(m Message) {
+	di := atomicLoadDiscardInfo(&a.discard)
+	if di == nil {
+		di = getDiscardInfo(reflect.TypeOf(m).Elem())
+		atomicStoreDiscardInfo(&a.discard, di)
+	}
+	di.discard(toPointer(&m))
+}
+
+type discardInfo struct {
+	typ reflect.Type
+
+	initialized int32 // 0: only typ is valid, 1: everything is valid
+	lock        sync.Mutex
+
+	fields       []discardFieldInfo
+	unrecognized field
+}
+
+type discardFieldInfo struct {
+	field   field // Offset of field, guaranteed to be valid
+	discard func(src pointer)
+}
+
+var (
+	discardInfoMap  = map[reflect.Type]*discardInfo{}
+	discardInfoLock sync.Mutex
+)
+
+func getDiscardInfo(t reflect.Type) *discardInfo {
+	discardInfoLock.Lock()
+	defer discardInfoLock.Unlock()
+	di := discardInfoMap[t]
+	if di == nil {
+		di = &discardInfo{typ: t}
+		discardInfoMap[t] = di
+	}
+	return di
+}
+
+func (di *discardInfo) discard(src pointer) {
+	if src.isNil() {
+		return // Nothing to do.
+	}
+
+	if atomic.LoadInt32(&di.initialized) == 0 {
+		di.computeDiscardInfo()
+	}
+
+	for _, fi := range di.fields {
+		sfp := src.offset(fi.field)
+		fi.discard(sfp)
+	}
+
+	// For proto2 messages, only discard unknown fields in message extensions
+	// that have been accessed via GetExtension.
+	if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
+		// Ignore lock since DiscardUnknown is not concurrency safe.
+		emm, _ := em.extensionsRead()
+		for _, mx := range emm {
+			if m, ok := mx.value.(Message); ok {
+				DiscardUnknown(m)
+			}
+		}
+	}
+
+	if di.unrecognized.IsValid() {
+		*src.offset(di.unrecognized).toBytes() = nil
+	}
+}
+
+func (di *discardInfo) computeDiscardInfo() {
+	di.lock.Lock()
+	defer di.lock.Unlock()
+	if di.initialized != 0 {
+		return
+	}
+	t := di.typ
+	n := t.NumField()
+
+	for i := 0; i < n; i++ {
+		f := t.Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+
+		dfi := discardFieldInfo{field: toField(&f)}
+		tf := f.Type
+
+		// Unwrap tf to get its most basic type.
+		var isPointer, isSlice bool
+		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+			isSlice = true
+			tf = tf.Elem()
+		}
+		if tf.Kind() == reflect.Ptr {
+			isPointer = true
+			tf = tf.Elem()
+		}
+		if isPointer && isSlice && tf.Kind() != reflect.Struct {
+			panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
+		}
+
+		switch tf.Kind() {
+		case reflect.Struct:
+			switch {
+			case !isPointer:
+				panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
+			case isSlice: // E.g., []*pb.T
+				discardInfo := getDiscardInfo(tf)
+				dfi.discard = func(src pointer) {
+					sps := src.getPointerSlice()
+					for _, sp := range sps {
+						if !sp.isNil() {
+							discardInfo.discard(sp)
+						}
+					}
+				}
+			default: // E.g., *pb.T
+				discardInfo := getDiscardInfo(tf)
+				dfi.discard = func(src pointer) {
+					sp := src.getPointer()
+					if !sp.isNil() {
+						discardInfo.discard(sp)
+					}
+				}
+			}
+		case reflect.Map:
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
+			default: // E.g., map[K]V
+				if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
+					dfi.discard = func(src pointer) {
+						sm := src.asPointerTo(tf).Elem()
+						if sm.Len() == 0 {
+							return
+						}
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							DiscardUnknown(val.Interface().(Message))
+						}
+					}
+				} else {
+					dfi.discard = func(pointer) {} // Noop
+				}
+			}
+		case reflect.Interface:
+			// Must be oneof field.
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
+			default: // E.g., interface{}
+				// TODO: Make this faster?
+				dfi.discard = func(src pointer) {
+					su := src.asPointerTo(tf).Elem()
+					if !su.IsNil() {
+						sv := su.Elem().Elem().Field(0)
+						if sv.Kind() == reflect.Ptr && sv.IsNil() {
+							return
+						}
+						switch sv.Type().Kind() {
+						case reflect.Ptr: // Proto struct (e.g., *T)
+							DiscardUnknown(sv.Interface().(Message))
+						}
+					}
+				}
+			}
+		default:
+			continue
+		}
+		di.fields = append(di.fields, dfi)
+	}
+
+	di.unrecognized = invalidField
+	if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+		if f.Type != reflect.TypeOf([]byte{}) {
+			panic("expected XXX_unrecognized to be of type []byte")
+		}
+		di.unrecognized = toField(&f)
+	}
+
+	atomic.StoreInt32(&di.initialized, 1)
+}
+
 func discardLegacy(m Message) {
 func discardLegacy(m Message) {
 	v := reflect.ValueOf(m)
 	v := reflect.ValueOf(m)
 	if v.Kind() != reflect.Ptr || v.IsNil() {
 	if v.Kind() != reflect.Ptr || v.IsNil() {
@@ -139,7 +338,7 @@ func discardLegacy(m Message) {
 
 
 	// For proto2 messages, only discard unknown fields in message extensions
 	// For proto2 messages, only discard unknown fields in message extensions
 	// that have been accessed via GetExtension.
 	// that have been accessed via GetExtension.
-	if em, ok := extendable(m); ok {
+	if em, err := extendable(m); err == nil {
 		// Ignore lock since discardLegacy is not concurrency safe.
 		// Ignore lock since discardLegacy is not concurrency safe.
 		emm, _ := em.extensionsRead()
 		emm, _ := em.extensionsRead()
 		for _, mx := range emm {
 		for _, mx := range emm {

+ 0 - 154
vendor/github.com/gogo/protobuf/proto/duration_gogo.go

@@ -47,157 +47,3 @@ func (*duration) String() string { return "duration<string>" }
 func init() {
 func init() {
 	RegisterType((*duration)(nil), "gogo.protobuf.proto.duration")
 	RegisterType((*duration)(nil), "gogo.protobuf.proto.duration")
 }
 }
-
-func (o *Buffer) decDuration() (time.Duration, error) {
-	b, err := o.DecodeRawBytes(true)
-	if err != nil {
-		return 0, err
-	}
-	dproto := &duration{}
-	if err := Unmarshal(b, dproto); err != nil {
-		return 0, err
-	}
-	return durationFromProto(dproto)
-}
-
-func (o *Buffer) dec_duration(p *Properties, base structPointer) error {
-	d, err := o.decDuration()
-	if err != nil {
-		return err
-	}
-	word64_Set(structPointer_Word64(base, p.field), o, uint64(d))
-	return nil
-}
-
-func (o *Buffer) dec_ref_duration(p *Properties, base structPointer) error {
-	d, err := o.decDuration()
-	if err != nil {
-		return err
-	}
-	word64Val_Set(structPointer_Word64Val(base, p.field), o, uint64(d))
-	return nil
-}
-
-func (o *Buffer) dec_slice_duration(p *Properties, base structPointer) error {
-	d, err := o.decDuration()
-	if err != nil {
-		return err
-	}
-	newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType)))
-	var zero field
-	setPtrCustomType(newBas, zero, &d)
-	return nil
-}
-
-func (o *Buffer) dec_slice_ref_duration(p *Properties, base structPointer) error {
-	d, err := o.decDuration()
-	if err != nil {
-		return err
-	}
-	structPointer_Word64Slice(base, p.field).Append(uint64(d))
-	return nil
-}
-
-func size_duration(p *Properties, base structPointer) (n int) {
-	structp := structPointer_GetStructPointer(base, p.field)
-	if structPointer_IsNil(structp) {
-		return 0
-	}
-	dur := structPointer_Interface(structp, durationType).(*time.Duration)
-	d := durationProto(*dur)
-	size := Size(d)
-	return size + sizeVarint(uint64(size)) + len(p.tagcode)
-}
-
-func (o *Buffer) enc_duration(p *Properties, base structPointer) error {
-	structp := structPointer_GetStructPointer(base, p.field)
-	if structPointer_IsNil(structp) {
-		return ErrNil
-	}
-	dur := structPointer_Interface(structp, durationType).(*time.Duration)
-	d := durationProto(*dur)
-	data, err := Marshal(d)
-	if err != nil {
-		return err
-	}
-	o.buf = append(o.buf, p.tagcode...)
-	o.EncodeRawBytes(data)
-	return nil
-}
-
-func size_ref_duration(p *Properties, base structPointer) (n int) {
-	dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration)
-	d := durationProto(*dur)
-	size := Size(d)
-	return size + sizeVarint(uint64(size)) + len(p.tagcode)
-}
-
-func (o *Buffer) enc_ref_duration(p *Properties, base structPointer) error {
-	dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration)
-	d := durationProto(*dur)
-	data, err := Marshal(d)
-	if err != nil {
-		return err
-	}
-	o.buf = append(o.buf, p.tagcode...)
-	o.EncodeRawBytes(data)
-	return nil
-}
-
-func size_slice_duration(p *Properties, base structPointer) (n int) {
-	pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration)
-	durs := *pdurs
-	for i := 0; i < len(durs); i++ {
-		if durs[i] == nil {
-			return 0
-		}
-		dproto := durationProto(*durs[i])
-		size := Size(dproto)
-		n += len(p.tagcode) + size + sizeVarint(uint64(size))
-	}
-	return n
-}
-
-func (o *Buffer) enc_slice_duration(p *Properties, base structPointer) error {
-	pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration)
-	durs := *pdurs
-	for i := 0; i < len(durs); i++ {
-		if durs[i] == nil {
-			return errRepeatedHasNil
-		}
-		dproto := durationProto(*durs[i])
-		data, err := Marshal(dproto)
-		if err != nil {
-			return err
-		}
-		o.buf = append(o.buf, p.tagcode...)
-		o.EncodeRawBytes(data)
-	}
-	return nil
-}
-
-func size_slice_ref_duration(p *Properties, base structPointer) (n int) {
-	pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration)
-	durs := *pdurs
-	for i := 0; i < len(durs); i++ {
-		dproto := durationProto(durs[i])
-		size := Size(dproto)
-		n += len(p.tagcode) + size + sizeVarint(uint64(size))
-	}
-	return n
-}
-
-func (o *Buffer) enc_slice_ref_duration(p *Properties, base structPointer) error {
-	pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration)
-	durs := *pdurs
-	for i := 0; i < len(durs); i++ {
-		dproto := durationProto(durs[i])
-		data, err := Marshal(dproto)
-		if err != nil {
-			return err
-		}
-		o.buf = append(o.buf, p.tagcode...)
-		o.EncodeRawBytes(data)
-	}
-	return nil
-}

+ 25 - 1184
vendor/github.com/gogo/protobuf/proto/encode.go

@@ -37,28 +37,9 @@ package proto
 
 
 import (
 import (
 	"errors"
 	"errors"
-	"fmt"
 	"reflect"
 	"reflect"
-	"sort"
 )
 )
 
 
-// RequiredNotSetError is the error returned if Marshal is called with
-// a protocol buffer struct whose required fields have not
-// all been initialized. It is also the error returned if Unmarshal is
-// called with an encoded protocol buffer that does not include all the
-// required fields.
-//
-// When printed, RequiredNotSetError reports the first unset required field in a
-// message. If the field cannot be precisely determined, it is reported as
-// "{Unknown}".
-type RequiredNotSetError struct {
-	field string
-}
-
-func (e *RequiredNotSetError) Error() string {
-	return fmt.Sprintf("proto: required field %q not set", e.field)
-}
-
 var (
 var (
 	// errRepeatedHasNil is the error returned if Marshal is called with
 	// errRepeatedHasNil is the error returned if Marshal is called with
 	// a struct with a repeated field containing a nil element.
 	// a struct with a repeated field containing a nil element.
@@ -82,10 +63,6 @@ var (
 
 
 const maxVarintBytes = 10 // maximum length of a varint
 const maxVarintBytes = 10 // maximum length of a varint
 
 
-// maxMarshalSize is the largest allowed size of an encoded protobuf,
-// since C++ and Java use signed int32s for the size.
-const maxMarshalSize = 1<<31 - 1
-
 // EncodeVarint returns the varint encoding of x.
 // EncodeVarint returns the varint encoding of x.
 // This is the format for the
 // This is the format for the
 // int32, int64, uint32, uint64, bool, and enum
 // int32, int64, uint32, uint64, bool, and enum
@@ -119,18 +96,27 @@ func (p *Buffer) EncodeVarint(x uint64) error {
 
 
 // SizeVarint returns the varint encoding size of an integer.
 // SizeVarint returns the varint encoding size of an integer.
 func SizeVarint(x uint64) int {
 func SizeVarint(x uint64) int {
-	return sizeVarint(x)
-}
-
-func sizeVarint(x uint64) (n int) {
-	for {
-		n++
-		x >>= 7
-		if x == 0 {
-			break
-		}
-	}
-	return n
+	switch {
+	case x < 1<<7:
+		return 1
+	case x < 1<<14:
+		return 2
+	case x < 1<<21:
+		return 3
+	case x < 1<<28:
+		return 4
+	case x < 1<<35:
+		return 5
+	case x < 1<<42:
+		return 6
+	case x < 1<<49:
+		return 7
+	case x < 1<<56:
+		return 8
+	case x < 1<<63:
+		return 9
+	}
+	return 10
 }
 }
 
 
 // EncodeFixed64 writes a 64-bit integer to the Buffer.
 // EncodeFixed64 writes a 64-bit integer to the Buffer.
@@ -149,10 +135,6 @@ func (p *Buffer) EncodeFixed64(x uint64) error {
 	return nil
 	return nil
 }
 }
 
 
-func sizeFixed64(x uint64) int {
-	return 8
-}
-
 // EncodeFixed32 writes a 32-bit integer to the Buffer.
 // EncodeFixed32 writes a 32-bit integer to the Buffer.
 // This is the format for the
 // This is the format for the
 // fixed32, sfixed32, and float protocol buffer types.
 // fixed32, sfixed32, and float protocol buffer types.
@@ -165,20 +147,12 @@ func (p *Buffer) EncodeFixed32(x uint64) error {
 	return nil
 	return nil
 }
 }
 
 
-func sizeFixed32(x uint64) int {
-	return 4
-}
-
 // EncodeZigzag64 writes a zigzag-encoded 64-bit integer
 // EncodeZigzag64 writes a zigzag-encoded 64-bit integer
 // to the Buffer.
 // to the Buffer.
 // This is the format used for the sint64 protocol buffer type.
 // This is the format used for the sint64 protocol buffer type.
 func (p *Buffer) EncodeZigzag64(x uint64) error {
 func (p *Buffer) EncodeZigzag64(x uint64) error {
 	// use signed number to get arithmetic right shift.
 	// use signed number to get arithmetic right shift.
-	return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63)))
-}
-
-func sizeZigzag64(x uint64) int {
-	return sizeVarint((x << 1) ^ uint64((int64(x) >> 63)))
+	return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
 }
 }
 
 
 // EncodeZigzag32 writes a zigzag-encoded 32-bit integer
 // EncodeZigzag32 writes a zigzag-encoded 32-bit integer
@@ -189,10 +163,6 @@ func (p *Buffer) EncodeZigzag32(x uint64) error {
 	return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
 	return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
 }
 }
 
 
-func sizeZigzag32(x uint64) int {
-	return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
-}
-
 // EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
 // EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
 // This is the format used for the bytes protocol buffer
 // This is the format used for the bytes protocol buffer
 // type and for embedded messages.
 // type and for embedded messages.
@@ -202,11 +172,6 @@ func (p *Buffer) EncodeRawBytes(b []byte) error {
 	return nil
 	return nil
 }
 }
 
 
-func sizeRawBytes(b []byte) int {
-	return sizeVarint(uint64(len(b))) +
-		len(b)
-}
-
 // EncodeStringBytes writes an encoded string to the Buffer.
 // EncodeStringBytes writes an encoded string to the Buffer.
 // This is the format used for the proto2 string type.
 // This is the format used for the proto2 string type.
 func (p *Buffer) EncodeStringBytes(s string) error {
 func (p *Buffer) EncodeStringBytes(s string) error {
@@ -215,319 +180,17 @@ func (p *Buffer) EncodeStringBytes(s string) error {
 	return nil
 	return nil
 }
 }
 
 
-func sizeStringBytes(s string) int {
-	return sizeVarint(uint64(len(s))) +
-		len(s)
-}
-
 // Marshaler is the interface representing objects that can marshal themselves.
 // Marshaler is the interface representing objects that can marshal themselves.
 type Marshaler interface {
 type Marshaler interface {
 	Marshal() ([]byte, error)
 	Marshal() ([]byte, error)
 }
 }
 
 
-// Marshal takes the protocol buffer
-// and encodes it into the wire format, returning the data.
-func Marshal(pb Message) ([]byte, error) {
-	// Can the object marshal itself?
-	if m, ok := pb.(Marshaler); ok {
-		return m.Marshal()
-	}
-	p := NewBuffer(nil)
-	err := p.Marshal(pb)
-	if p.buf == nil && err == nil {
-		// Return a non-nil slice on success.
-		return []byte{}, nil
-	}
-	return p.buf, err
-}
-
 // EncodeMessage writes the protocol buffer to the Buffer,
 // EncodeMessage writes the protocol buffer to the Buffer,
 // prefixed by a varint-encoded length.
 // prefixed by a varint-encoded length.
 func (p *Buffer) EncodeMessage(pb Message) error {
 func (p *Buffer) EncodeMessage(pb Message) error {
-	t, base, err := getbase(pb)
-	if structPointer_IsNil(base) {
-		return ErrNil
-	}
-	if err == nil {
-		var state errorState
-		err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
-	}
-	return err
-}
-
-// Marshal takes the protocol buffer
-// and encodes it into the wire format, writing the result to the
-// Buffer.
-func (p *Buffer) Marshal(pb Message) error {
-	// Can the object marshal itself?
-	if m, ok := pb.(Marshaler); ok {
-		data, err := m.Marshal()
-		p.buf = append(p.buf, data...)
-		return err
-	}
-
-	t, base, err := getbase(pb)
-	if structPointer_IsNil(base) {
-		return ErrNil
-	}
-	if err == nil {
-		err = p.enc_struct(GetProperties(t.Elem()), base)
-	}
-
-	if collectStats {
-		(stats).Encode++ // Parens are to work around a goimports bug.
-	}
-
-	if len(p.buf) > maxMarshalSize {
-		return ErrTooLarge
-	}
-	return err
-}
-
-// Size returns the encoded size of a protocol buffer.
-func Size(pb Message) (n int) {
-	// Can the object marshal itself?  If so, Size is slow.
-	// TODO: add Size to Marshaler, or add a Sizer interface.
-	if m, ok := pb.(Marshaler); ok {
-		b, _ := m.Marshal()
-		return len(b)
-	}
-
-	t, base, err := getbase(pb)
-	if structPointer_IsNil(base) {
-		return 0
-	}
-	if err == nil {
-		n = size_struct(GetProperties(t.Elem()), base)
-	}
-
-	if collectStats {
-		(stats).Size++ // Parens are to work around a goimports bug.
-	}
-
-	return
-}
-
-// Individual type encoders.
-
-// Encode a bool.
-func (o *Buffer) enc_bool(p *Properties, base structPointer) error {
-	v := *structPointer_Bool(base, p.field)
-	if v == nil {
-		return ErrNil
-	}
-	x := 0
-	if *v {
-		x = 1
-	}
-	o.buf = append(o.buf, p.tagcode...)
-	p.valEnc(o, uint64(x))
-	return nil
-}
-
-func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {
-	v := *structPointer_BoolVal(base, p.field)
-	if !v {
-		return ErrNil
-	}
-	o.buf = append(o.buf, p.tagcode...)
-	p.valEnc(o, 1)
-	return nil
-}
-
-func size_bool(p *Properties, base structPointer) int {
-	v := *structPointer_Bool(base, p.field)
-	if v == nil {
-		return 0
-	}
-	return len(p.tagcode) + 1 // each bool takes exactly one byte
-}
-
-func size_proto3_bool(p *Properties, base structPointer) int {
-	v := *structPointer_BoolVal(base, p.field)
-	if !v && !p.oneof {
-		return 0
-	}
-	return len(p.tagcode) + 1 // each bool takes exactly one byte
-}
-
-// Encode an int32.
-func (o *Buffer) enc_int32(p *Properties, base structPointer) error {
-	v := structPointer_Word32(base, p.field)
-	if word32_IsNil(v) {
-		return ErrNil
-	}
-	x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
-	o.buf = append(o.buf, p.tagcode...)
-	p.valEnc(o, uint64(x))
-	return nil
-}
-
-func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {
-	v := structPointer_Word32Val(base, p.field)
-	x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
-	if x == 0 {
-		return ErrNil
-	}
-	o.buf = append(o.buf, p.tagcode...)
-	p.valEnc(o, uint64(x))
-	return nil
-}
-
-func size_int32(p *Properties, base structPointer) (n int) {
-	v := structPointer_Word32(base, p.field)
-	if word32_IsNil(v) {
-		return 0
-	}
-	x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
-	n += len(p.tagcode)
-	n += p.valSize(uint64(x))
-	return
-}
-
-func size_proto3_int32(p *Properties, base structPointer) (n int) {
-	v := structPointer_Word32Val(base, p.field)
-	x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
-	if x == 0 && !p.oneof {
-		return 0
-	}
-	n += len(p.tagcode)
-	n += p.valSize(uint64(x))
-	return
-}
-
-// Encode a uint32.
-// Exactly the same as int32, except for no sign extension.
-func (o *Buffer) enc_uint32(p *Properties, base structPointer) error {
-	v := structPointer_Word32(base, p.field)
-	if word32_IsNil(v) {
-		return ErrNil
-	}
-	x := word32_Get(v)
-	o.buf = append(o.buf, p.tagcode...)
-	p.valEnc(o, uint64(x))
-	return nil
-}
-
-func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {
-	v := structPointer_Word32Val(base, p.field)
-	x := word32Val_Get(v)
-	if x == 0 {
-		return ErrNil
-	}
-	o.buf = append(o.buf, p.tagcode...)
-	p.valEnc(o, uint64(x))
-	return nil
-}
-
-func size_uint32(p *Properties, base structPointer) (n int) {
-	v := structPointer_Word32(base, p.field)
-	if word32_IsNil(v) {
-		return 0
-	}
-	x := word32_Get(v)
-	n += len(p.tagcode)
-	n += p.valSize(uint64(x))
-	return
-}
-
-func size_proto3_uint32(p *Properties, base structPointer) (n int) {
-	v := structPointer_Word32Val(base, p.field)
-	x := word32Val_Get(v)
-	if x == 0 && !p.oneof {
-		return 0
-	}
-	n += len(p.tagcode)
-	n += p.valSize(uint64(x))
-	return
-}
-
-// Encode an int64.
-func (o *Buffer) enc_int64(p *Properties, base structPointer) error {
-	v := structPointer_Word64(base, p.field)
-	if word64_IsNil(v) {
-		return ErrNil
-	}
-	x := word64_Get(v)
-	o.buf = append(o.buf, p.tagcode...)
-	p.valEnc(o, x)
-	return nil
-}
-
-func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {
-	v := structPointer_Word64Val(base, p.field)
-	x := word64Val_Get(v)
-	if x == 0 {
-		return ErrNil
-	}
-	o.buf = append(o.buf, p.tagcode...)
-	p.valEnc(o, x)
-	return nil
-}
-
-func size_int64(p *Properties, base structPointer) (n int) {
-	v := structPointer_Word64(base, p.field)
-	if word64_IsNil(v) {
-		return 0
-	}
-	x := word64_Get(v)
-	n += len(p.tagcode)
-	n += p.valSize(x)
-	return
-}
-
-func size_proto3_int64(p *Properties, base structPointer) (n int) {
-	v := structPointer_Word64Val(base, p.field)
-	x := word64Val_Get(v)
-	if x == 0 && !p.oneof {
-		return 0
-	}
-	n += len(p.tagcode)
-	n += p.valSize(x)
-	return
-}
-
-// Encode a string.
-func (o *Buffer) enc_string(p *Properties, base structPointer) error {
-	v := *structPointer_String(base, p.field)
-	if v == nil {
-		return ErrNil
-	}
-	x := *v
-	o.buf = append(o.buf, p.tagcode...)
-	o.EncodeStringBytes(x)
-	return nil
-}
-
-func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {
-	v := *structPointer_StringVal(base, p.field)
-	if v == "" {
-		return ErrNil
-	}
-	o.buf = append(o.buf, p.tagcode...)
-	o.EncodeStringBytes(v)
-	return nil
-}
-
-func size_string(p *Properties, base structPointer) (n int) {
-	v := *structPointer_String(base, p.field)
-	if v == nil {
-		return 0
-	}
-	x := *v
-	n += len(p.tagcode)
-	n += sizeStringBytes(x)
-	return
-}
-
-func size_proto3_string(p *Properties, base structPointer) (n int) {
-	v := *structPointer_StringVal(base, p.field)
-	if v == "" && !p.oneof {
-		return 0
-	}
-	n += len(p.tagcode)
-	n += sizeStringBytes(v)
-	return
+	siz := Size(pb)
+	p.EncodeVarint(uint64(siz))
+	return p.Marshal(pb)
 }
 }
 
 
 // All protocol buffer fields are nillable, but be careful.
 // All protocol buffer fields are nillable, but be careful.
@@ -538,825 +201,3 @@ func isNil(v reflect.Value) bool {
 	}
 	}
 	return false
 	return false
 }
 }
-
-// Encode a message struct.
-func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
-	var state errorState
-	structp := structPointer_GetStructPointer(base, p.field)
-	if structPointer_IsNil(structp) {
-		return ErrNil
-	}
-
-	// Can the object marshal itself?
-	if p.isMarshaler {
-		m := structPointer_Interface(structp, p.stype).(Marshaler)
-		data, err := m.Marshal()
-		if err != nil && !state.shouldContinue(err, nil) {
-			return err
-		}
-		o.buf = append(o.buf, p.tagcode...)
-		o.EncodeRawBytes(data)
-		return state.err
-	}
-
-	o.buf = append(o.buf, p.tagcode...)
-	return o.enc_len_struct(p.sprop, structp, &state)
-}
-
-func size_struct_message(p *Properties, base structPointer) int {
-	structp := structPointer_GetStructPointer(base, p.field)
-	if structPointer_IsNil(structp) {
-		return 0
-	}
-
-	// Can the object marshal itself?
-	if p.isMarshaler {
-		m := structPointer_Interface(structp, p.stype).(Marshaler)
-		data, _ := m.Marshal()
-		n0 := len(p.tagcode)
-		n1 := sizeRawBytes(data)
-		return n0 + n1
-	}
-
-	n0 := len(p.tagcode)
-	n1 := size_struct(p.sprop, structp)
-	n2 := sizeVarint(uint64(n1)) // size of encoded length
-	return n0 + n1 + n2
-}
-
-// Encode a group struct.
-func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {
-	var state errorState
-	b := structPointer_GetStructPointer(base, p.field)
-	if structPointer_IsNil(b) {
-		return ErrNil
-	}
-
-	o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
-	err := o.enc_struct(p.sprop, b)
-	if err != nil && !state.shouldContinue(err, nil) {
-		return err
-	}
-	o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
-	return state.err
-}
-
-func size_struct_group(p *Properties, base structPointer) (n int) {
-	b := structPointer_GetStructPointer(base, p.field)
-	if structPointer_IsNil(b) {
-		return 0
-	}
-
-	n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))
-	n += size_struct(p.sprop, b)
-	n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))
-	return
-}
-
-// Encode a slice of bools ([]bool).
-func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {
-	s := *structPointer_BoolSlice(base, p.field)
-	l := len(s)
-	if l == 0 {
-		return ErrNil
-	}
-	for _, x := range s {
-		o.buf = append(o.buf, p.tagcode...)
-		v := uint64(0)
-		if x {
-			v = 1
-		}
-		p.valEnc(o, v)
-	}
-	return nil
-}
-
-func size_slice_bool(p *Properties, base structPointer) int {
-	s := *structPointer_BoolSlice(base, p.field)
-	l := len(s)
-	if l == 0 {
-		return 0
-	}
-	return l * (len(p.tagcode) + 1) // each bool takes exactly one byte
-}
-
-// Encode a slice of bools ([]bool) in packed format.
-func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {
-	s := *structPointer_BoolSlice(base, p.field)
-	l := len(s)
-	if l == 0 {
-		return ErrNil
-	}
-	o.buf = append(o.buf, p.tagcode...)
-	o.EncodeVarint(uint64(l)) // each bool takes exactly one byte
-	for _, x := range s {
-		v := uint64(0)
-		if x {
-			v = 1
-		}
-		p.valEnc(o, v)
-	}
-	return nil
-}
-
-func size_slice_packed_bool(p *Properties, base structPointer) (n int) {
-	s := *structPointer_BoolSlice(base, p.field)
-	l := len(s)
-	if l == 0 {
-		return 0
-	}
-	n += len(p.tagcode)
-	n += sizeVarint(uint64(l))
-	n += l // each bool takes exactly one byte
-	return
-}
-
-// Encode a slice of bytes ([]byte).
-func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {
-	s := *structPointer_Bytes(base, p.field)
-	if s == nil {
-		return ErrNil
-	}
-	o.buf = append(o.buf, p.tagcode...)
-	o.EncodeRawBytes(s)
-	return nil
-}
-
-func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {
-	s := *structPointer_Bytes(base, p.field)
-	if len(s) == 0 {
-		return ErrNil
-	}
-	o.buf = append(o.buf, p.tagcode...)
-	o.EncodeRawBytes(s)
-	return nil
-}
-
-func size_slice_byte(p *Properties, base structPointer) (n int) {
-	s := *structPointer_Bytes(base, p.field)
-	if s == nil && !p.oneof {
-		return 0
-	}
-	n += len(p.tagcode)
-	n += sizeRawBytes(s)
-	return
-}
-
-func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
-	s := *structPointer_Bytes(base, p.field)
-	if len(s) == 0 && !p.oneof {
-		return 0
-	}
-	n += len(p.tagcode)
-	n += sizeRawBytes(s)
-	return
-}
-
-// Encode a slice of int32s ([]int32).
-func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {
-	s := structPointer_Word32Slice(base, p.field)
-	l := s.Len()
-	if l == 0 {
-		return ErrNil
-	}
-	for i := 0; i < l; i++ {
-		o.buf = append(o.buf, p.tagcode...)
-		x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
-		p.valEnc(o, uint64(x))
-	}
-	return nil
-}
-
-func size_slice_int32(p *Properties, base structPointer) (n int) {
-	s := structPointer_Word32Slice(base, p.field)
-	l := s.Len()
-	if l == 0 {
-		return 0
-	}
-	for i := 0; i < l; i++ {
-		n += len(p.tagcode)
-		x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
-		n += p.valSize(uint64(x))
-	}
-	return
-}
-
-// Encode a slice of int32s ([]int32) in packed format.
-func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {
-	s := structPointer_Word32Slice(base, p.field)
-	l := s.Len()
-	if l == 0 {
-		return ErrNil
-	}
-	// TODO: Reuse a Buffer.
-	buf := NewBuffer(nil)
-	for i := 0; i < l; i++ {
-		x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
-		p.valEnc(buf, uint64(x))
-	}
-
-	o.buf = append(o.buf, p.tagcode...)
-	o.EncodeVarint(uint64(len(buf.buf)))
-	o.buf = append(o.buf, buf.buf...)
-	return nil
-}
-
-func size_slice_packed_int32(p *Properties, base structPointer) (n int) {
-	s := structPointer_Word32Slice(base, p.field)
-	l := s.Len()
-	if l == 0 {
-		return 0
-	}
-	var bufSize int
-	for i := 0; i < l; i++ {
-		x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
-		bufSize += p.valSize(uint64(x))
-	}
-
-	n += len(p.tagcode)
-	n += sizeVarint(uint64(bufSize))
-	n += bufSize
-	return
-}
-
-// Encode a slice of uint32s ([]uint32).
-// Exactly the same as int32, except for no sign extension.
-func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {
-	s := structPointer_Word32Slice(base, p.field)
-	l := s.Len()
-	if l == 0 {
-		return ErrNil
-	}
-	for i := 0; i < l; i++ {
-		o.buf = append(o.buf, p.tagcode...)
-		x := s.Index(i)
-		p.valEnc(o, uint64(x))
-	}
-	return nil
-}
-
-func size_slice_uint32(p *Properties, base structPointer) (n int) {
-	s := structPointer_Word32Slice(base, p.field)
-	l := s.Len()
-	if l == 0 {
-		return 0
-	}
-	for i := 0; i < l; i++ {
-		n += len(p.tagcode)
-		x := s.Index(i)
-		n += p.valSize(uint64(x))
-	}
-	return
-}
-
-// Encode a slice of uint32s ([]uint32) in packed format.
-// Exactly the same as int32, except for no sign extension.
-func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {
-	s := structPointer_Word32Slice(base, p.field)
-	l := s.Len()
-	if l == 0 {
-		return ErrNil
-	}
-	// TODO: Reuse a Buffer.
-	buf := NewBuffer(nil)
-	for i := 0; i < l; i++ {
-		p.valEnc(buf, uint64(s.Index(i)))
-	}
-
-	o.buf = append(o.buf, p.tagcode...)
-	o.EncodeVarint(uint64(len(buf.buf)))
-	o.buf = append(o.buf, buf.buf...)
-	return nil
-}
-
-func size_slice_packed_uint32(p *Properties, base structPointer) (n int) {
-	s := structPointer_Word32Slice(base, p.field)
-	l := s.Len()
-	if l == 0 {
-		return 0
-	}
-	var bufSize int
-	for i := 0; i < l; i++ {
-		bufSize += p.valSize(uint64(s.Index(i)))
-	}
-
-	n += len(p.tagcode)
-	n += sizeVarint(uint64(bufSize))
-	n += bufSize
-	return
-}
-
-// Encode a slice of int64s ([]int64).
-func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {
-	s := structPointer_Word64Slice(base, p.field)
-	l := s.Len()
-	if l == 0 {
-		return ErrNil
-	}
-	for i := 0; i < l; i++ {
-		o.buf = append(o.buf, p.tagcode...)
-		p.valEnc(o, s.Index(i))
-	}
-	return nil
-}
-
-func size_slice_int64(p *Properties, base structPointer) (n int) {
-	s := structPointer_Word64Slice(base, p.field)
-	l := s.Len()
-	if l == 0 {
-		return 0
-	}
-	for i := 0; i < l; i++ {
-		n += len(p.tagcode)
-		n += p.valSize(s.Index(i))
-	}
-	return
-}
-
-// Encode a slice of int64s ([]int64) in packed format.
-func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {
-	s := structPointer_Word64Slice(base, p.field)
-	l := s.Len()
-	if l == 0 {
-		return ErrNil
-	}
-	// TODO: Reuse a Buffer.
-	buf := NewBuffer(nil)
-	for i := 0; i < l; i++ {
-		p.valEnc(buf, s.Index(i))
-	}
-
-	o.buf = append(o.buf, p.tagcode...)
-	o.EncodeVarint(uint64(len(buf.buf)))
-	o.buf = append(o.buf, buf.buf...)
-	return nil
-}
-
-func size_slice_packed_int64(p *Properties, base structPointer) (n int) {
-	s := structPointer_Word64Slice(base, p.field)
-	l := s.Len()
-	if l == 0 {
-		return 0
-	}
-	var bufSize int
-	for i := 0; i < l; i++ {
-		bufSize += p.valSize(s.Index(i))
-	}
-
-	n += len(p.tagcode)
-	n += sizeVarint(uint64(bufSize))
-	n += bufSize
-	return
-}
-
-// Encode a slice of slice of bytes ([][]byte).
-func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {
-	ss := *structPointer_BytesSlice(base, p.field)
-	l := len(ss)
-	if l == 0 {
-		return ErrNil
-	}
-	for i := 0; i < l; i++ {
-		o.buf = append(o.buf, p.tagcode...)
-		o.EncodeRawBytes(ss[i])
-	}
-	return nil
-}
-
-func size_slice_slice_byte(p *Properties, base structPointer) (n int) {
-	ss := *structPointer_BytesSlice(base, p.field)
-	l := len(ss)
-	if l == 0 {
-		return 0
-	}
-	n += l * len(p.tagcode)
-	for i := 0; i < l; i++ {
-		n += sizeRawBytes(ss[i])
-	}
-	return
-}
-
-// Encode a slice of strings ([]string).
-func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {
-	ss := *structPointer_StringSlice(base, p.field)
-	l := len(ss)
-	for i := 0; i < l; i++ {
-		o.buf = append(o.buf, p.tagcode...)
-		o.EncodeStringBytes(ss[i])
-	}
-	return nil
-}
-
-func size_slice_string(p *Properties, base structPointer) (n int) {
-	ss := *structPointer_StringSlice(base, p.field)
-	l := len(ss)
-	n += l * len(p.tagcode)
-	for i := 0; i < l; i++ {
-		n += sizeStringBytes(ss[i])
-	}
-	return
-}
-
-// Encode a slice of message structs ([]*struct).
-func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {
-	var state errorState
-	s := structPointer_StructPointerSlice(base, p.field)
-	l := s.Len()
-
-	for i := 0; i < l; i++ {
-		structp := s.Index(i)
-		if structPointer_IsNil(structp) {
-			return errRepeatedHasNil
-		}
-
-		// Can the object marshal itself?
-		if p.isMarshaler {
-			m := structPointer_Interface(structp, p.stype).(Marshaler)
-			data, err := m.Marshal()
-			if err != nil && !state.shouldContinue(err, nil) {
-				return err
-			}
-			o.buf = append(o.buf, p.tagcode...)
-			o.EncodeRawBytes(data)
-			continue
-		}
-
-		o.buf = append(o.buf, p.tagcode...)
-		err := o.enc_len_struct(p.sprop, structp, &state)
-		if err != nil && !state.shouldContinue(err, nil) {
-			if err == ErrNil {
-				return errRepeatedHasNil
-			}
-			return err
-		}
-	}
-	return state.err
-}
-
-func size_slice_struct_message(p *Properties, base structPointer) (n int) {
-	s := structPointer_StructPointerSlice(base, p.field)
-	l := s.Len()
-	n += l * len(p.tagcode)
-	for i := 0; i < l; i++ {
-		structp := s.Index(i)
-		if structPointer_IsNil(structp) {
-			return // return the size up to this point
-		}
-
-		// Can the object marshal itself?
-		if p.isMarshaler {
-			m := structPointer_Interface(structp, p.stype).(Marshaler)
-			data, _ := m.Marshal()
-			n += sizeRawBytes(data)
-			continue
-		}
-
-		n0 := size_struct(p.sprop, structp)
-		n1 := sizeVarint(uint64(n0)) // size of encoded length
-		n += n0 + n1
-	}
-	return
-}
-
-// Encode a slice of group structs ([]*struct).
-func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {
-	var state errorState
-	s := structPointer_StructPointerSlice(base, p.field)
-	l := s.Len()
-
-	for i := 0; i < l; i++ {
-		b := s.Index(i)
-		if structPointer_IsNil(b) {
-			return errRepeatedHasNil
-		}
-
-		o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
-
-		err := o.enc_struct(p.sprop, b)
-
-		if err != nil && !state.shouldContinue(err, nil) {
-			if err == ErrNil {
-				return errRepeatedHasNil
-			}
-			return err
-		}
-
-		o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
-	}
-	return state.err
-}
-
-func size_slice_struct_group(p *Properties, base structPointer) (n int) {
-	s := structPointer_StructPointerSlice(base, p.field)
-	l := s.Len()
-
-	n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))
-	n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))
-	for i := 0; i < l; i++ {
-		b := s.Index(i)
-		if structPointer_IsNil(b) {
-			return // return size up to this point
-		}
-
-		n += size_struct(p.sprop, b)
-	}
-	return
-}
-
-// Encode an extension map.
-func (o *Buffer) enc_map(p *Properties, base structPointer) error {
-	exts := structPointer_ExtMap(base, p.field)
-	if err := encodeExtensionsMap(*exts); err != nil {
-		return err
-	}
-
-	return o.enc_map_body(*exts)
-}
-
-func (o *Buffer) enc_exts(p *Properties, base structPointer) error {
-	exts := structPointer_Extensions(base, p.field)
-
-	v, mu := exts.extensionsRead()
-	if v == nil {
-		return nil
-	}
-
-	mu.Lock()
-	defer mu.Unlock()
-	if err := encodeExtensionsMap(v); err != nil {
-		return err
-	}
-
-	return o.enc_map_body(v)
-}
-
-func (o *Buffer) enc_map_body(v map[int32]Extension) error {
-	// Fast-path for common cases: zero or one extensions.
-	if len(v) <= 1 {
-		for _, e := range v {
-			o.buf = append(o.buf, e.enc...)
-		}
-		return nil
-	}
-
-	// Sort keys to provide a deterministic encoding.
-	keys := make([]int, 0, len(v))
-	for k := range v {
-		keys = append(keys, int(k))
-	}
-	sort.Ints(keys)
-
-	for _, k := range keys {
-		o.buf = append(o.buf, v[int32(k)].enc...)
-	}
-	return nil
-}
-
-func size_map(p *Properties, base structPointer) int {
-	v := structPointer_ExtMap(base, p.field)
-	return extensionsMapSize(*v)
-}
-
-func size_exts(p *Properties, base structPointer) int {
-	v := structPointer_Extensions(base, p.field)
-	return extensionsSize(v)
-}
-
-// Encode a map field.
-func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
-	var state errorState // XXX: or do we need to plumb this through?
-
-	/*
-		A map defined as
-			map<key_type, value_type> map_field = N;
-		is encoded in the same way as
-			message MapFieldEntry {
-				key_type key = 1;
-				value_type value = 2;
-			}
-			repeated MapFieldEntry map_field = N;
-	*/
-
-	v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
-	if v.Len() == 0 {
-		return nil
-	}
-
-	keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
-
-	enc := func() error {
-		if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
-			return err
-		}
-		if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil {
-			return err
-		}
-		return nil
-	}
-
-	// Don't sort map keys. It is not required by the spec, and C++ doesn't do it.
-	for _, key := range v.MapKeys() {
-		val := v.MapIndex(key)
-
-		keycopy.Set(key)
-		valcopy.Set(val)
-
-		o.buf = append(o.buf, p.tagcode...)
-		if err := o.enc_len_thing(enc, &state); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func size_new_map(p *Properties, base structPointer) int {
-	v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
-
-	keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
-
-	n := 0
-	for _, key := range v.MapKeys() {
-		val := v.MapIndex(key)
-		keycopy.Set(key)
-		valcopy.Set(val)
-
-		// Tag codes for key and val are the responsibility of the sub-sizer.
-		keysize := p.mkeyprop.size(p.mkeyprop, keybase)
-		valsize := p.mvalprop.size(p.mvalprop, valbase)
-		entry := keysize + valsize
-		// Add on tag code and length of map entry itself.
-		n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
-	}
-	return n
-}
-
-// mapEncodeScratch returns a new reflect.Value matching the map's value type,
-// and a structPointer suitable for passing to an encoder or sizer.
-func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {
-	// Prepare addressable doubly-indirect placeholders for the key and value types.
-	// This is needed because the element-type encoders expect **T, but the map iteration produces T.
-
-	keycopy = reflect.New(mapType.Key()).Elem()                 // addressable K
-	keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K
-	keyptr.Set(keycopy.Addr())                                  //
-	keybase = toStructPointer(keyptr.Addr())                    // **K
-
-	// Value types are more varied and require special handling.
-	switch mapType.Elem().Kind() {
-	case reflect.Slice:
-		// []byte
-		var dummy []byte
-		valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte
-		valbase = toStructPointer(valcopy.Addr())
-	case reflect.Ptr:
-		// message; the generated field type is map[K]*Msg (so V is *Msg),
-		// so we only need one level of indirection.
-		valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
-		valbase = toStructPointer(valcopy.Addr())
-	default:
-		// everything else
-		valcopy = reflect.New(mapType.Elem()).Elem()                // addressable V
-		valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V
-		valptr.Set(valcopy.Addr())                                  //
-		valbase = toStructPointer(valptr.Addr())                    // **V
-	}
-	return
-}
-
-// Encode a struct.
-func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
-	var state errorState
-	// Encode fields in tag order so that decoders may use optimizations
-	// that depend on the ordering.
-	// https://developers.google.com/protocol-buffers/docs/encoding#order
-	for _, i := range prop.order {
-		p := prop.Prop[i]
-		if p.enc != nil {
-			err := p.enc(o, p, base)
-			if err != nil {
-				if err == ErrNil {
-					if p.Required && state.err == nil {
-						state.err = &RequiredNotSetError{p.Name}
-					}
-				} else if err == errRepeatedHasNil {
-					// Give more context to nil values in repeated fields.
-					return errors.New("repeated field " + p.OrigName + " has nil element")
-				} else if !state.shouldContinue(err, p) {
-					return err
-				}
-			}
-			if len(o.buf) > maxMarshalSize {
-				return ErrTooLarge
-			}
-		}
-	}
-
-	// Do oneof fields.
-	if prop.oneofMarshaler != nil {
-		m := structPointer_Interface(base, prop.stype).(Message)
-		if err := prop.oneofMarshaler(m, o); err == ErrNil {
-			return errOneofHasNil
-		} else if err != nil {
-			return err
-		}
-	}
-
-	// Add unrecognized fields at the end.
-	if prop.unrecField.IsValid() {
-		v := *structPointer_Bytes(base, prop.unrecField)
-		if len(o.buf)+len(v) > maxMarshalSize {
-			return ErrTooLarge
-		}
-		if len(v) > 0 {
-			o.buf = append(o.buf, v...)
-		}
-	}
-
-	return state.err
-}
-
-func size_struct(prop *StructProperties, base structPointer) (n int) {
-	for _, i := range prop.order {
-		p := prop.Prop[i]
-		if p.size != nil {
-			n += p.size(p, base)
-		}
-	}
-
-	// Add unrecognized fields at the end.
-	if prop.unrecField.IsValid() {
-		v := *structPointer_Bytes(base, prop.unrecField)
-		n += len(v)
-	}
-
-	// Factor in any oneof fields.
-	if prop.oneofSizer != nil {
-		m := structPointer_Interface(base, prop.stype).(Message)
-		n += prop.oneofSizer(m)
-	}
-
-	return
-}
-
-var zeroes [20]byte // longer than any conceivable sizeVarint
-
-// Encode a struct, preceded by its encoded length (as a varint).
-func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {
-	return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)
-}
-
-// Encode something, preceded by its encoded length (as a varint).
-func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {
-	iLen := len(o.buf)
-	o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
-	iMsg := len(o.buf)
-	err := enc()
-	if err != nil && !state.shouldContinue(err, nil) {
-		return err
-	}
-	lMsg := len(o.buf) - iMsg
-	lLen := sizeVarint(uint64(lMsg))
-	switch x := lLen - (iMsg - iLen); {
-	case x > 0: // actual length is x bytes larger than the space we reserved
-		// Move msg x bytes right.
-		o.buf = append(o.buf, zeroes[:x]...)
-		copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
-	case x < 0: // actual length is x bytes smaller than the space we reserved
-		// Move msg x bytes left.
-		copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
-		o.buf = o.buf[:len(o.buf)+x] // x is negative
-	}
-	// Encode the length in the reserved space.
-	o.buf = o.buf[:iLen]
-	o.EncodeVarint(uint64(lMsg))
-	o.buf = o.buf[:len(o.buf)+lMsg]
-	return state.err
-}
-
-// errorState maintains the first error that occurs and updates that error
-// with additional context.
-type errorState struct {
-	err error
-}
-
-// shouldContinue reports whether encoding should continue upon encountering the
-// given error. If the error is RequiredNotSetError, shouldContinue returns true
-// and, if this is the first appearance of that error, remembers it for future
-// reporting.
-//
-// If prop is not nil, it may update any error with additional context about the
-// field with the error.
-func (s *errorState) shouldContinue(err error, prop *Properties) bool {
-	// Ignore unset required fields.
-	reqNotSet, ok := err.(*RequiredNotSetError)
-	if !ok {
-		return false
-	}
-	if s.err == nil {
-		if prop != nil {
-			err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field}
-		}
-		s.err = err
-	}
-	return true
-}

+ 0 - 317
vendor/github.com/gogo/protobuf/proto/encode_gogo.go

@@ -3,11 +3,6 @@
 // Copyright (c) 2013, The GoGo Authors. All rights reserved.
 // Copyright (c) 2013, The GoGo Authors. All rights reserved.
 // http://github.com/gogo/protobuf
 // http://github.com/gogo/protobuf
 //
 //
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors.  All rights reserved.
-// http://github.com/golang/protobuf/
-//
 // Redistribution and use in source and binary forms, with or without
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // modification, are permitted provided that the following conditions are
 // met:
 // met:
@@ -18,9 +13,6 @@
 // copyright notice, this list of conditions and the following disclaimer
 // copyright notice, this list of conditions and the following disclaimer
 // in the documentation and/or other materials provided with the
 // in the documentation and/or other materials provided with the
 // distribution.
 // distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
 //
 //
 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@@ -36,315 +28,6 @@
 
 
 package proto
 package proto
 
 
-import (
-	"reflect"
-)
-
 func NewRequiredNotSetError(field string) *RequiredNotSetError {
 func NewRequiredNotSetError(field string) *RequiredNotSetError {
 	return &RequiredNotSetError{field}
 	return &RequiredNotSetError{field}
 }
 }
-
-type Sizer interface {
-	Size() int
-}
-
-func (o *Buffer) enc_ext_slice_byte(p *Properties, base structPointer) error {
-	s := *structPointer_Bytes(base, p.field)
-	if s == nil {
-		return ErrNil
-	}
-	o.buf = append(o.buf, s...)
-	return nil
-}
-
-func size_ext_slice_byte(p *Properties, base structPointer) (n int) {
-	s := *structPointer_Bytes(base, p.field)
-	if s == nil {
-		return 0
-	}
-	n += len(s)
-	return
-}
-
-// Encode a reference to bool pointer.
-func (o *Buffer) enc_ref_bool(p *Properties, base structPointer) error {
-	v := *structPointer_BoolVal(base, p.field)
-	x := 0
-	if v {
-		x = 1
-	}
-	o.buf = append(o.buf, p.tagcode...)
-	p.valEnc(o, uint64(x))
-	return nil
-}
-
-func size_ref_bool(p *Properties, base structPointer) int {
-	return len(p.tagcode) + 1 // each bool takes exactly one byte
-}
-
-// Encode a reference to int32 pointer.
-func (o *Buffer) enc_ref_int32(p *Properties, base structPointer) error {
-	v := structPointer_Word32Val(base, p.field)
-	x := int32(word32Val_Get(v))
-	o.buf = append(o.buf, p.tagcode...)
-	p.valEnc(o, uint64(x))
-	return nil
-}
-
-func size_ref_int32(p *Properties, base structPointer) (n int) {
-	v := structPointer_Word32Val(base, p.field)
-	x := int32(word32Val_Get(v))
-	n += len(p.tagcode)
-	n += p.valSize(uint64(x))
-	return
-}
-
-func (o *Buffer) enc_ref_uint32(p *Properties, base structPointer) error {
-	v := structPointer_Word32Val(base, p.field)
-	x := word32Val_Get(v)
-	o.buf = append(o.buf, p.tagcode...)
-	p.valEnc(o, uint64(x))
-	return nil
-}
-
-func size_ref_uint32(p *Properties, base structPointer) (n int) {
-	v := structPointer_Word32Val(base, p.field)
-	x := word32Val_Get(v)
-	n += len(p.tagcode)
-	n += p.valSize(uint64(x))
-	return
-}
-
-// Encode a reference to an int64 pointer.
-func (o *Buffer) enc_ref_int64(p *Properties, base structPointer) error {
-	v := structPointer_Word64Val(base, p.field)
-	x := word64Val_Get(v)
-	o.buf = append(o.buf, p.tagcode...)
-	p.valEnc(o, x)
-	return nil
-}
-
-func size_ref_int64(p *Properties, base structPointer) (n int) {
-	v := structPointer_Word64Val(base, p.field)
-	x := word64Val_Get(v)
-	n += len(p.tagcode)
-	n += p.valSize(x)
-	return
-}
-
-// Encode a reference to a string pointer.
-func (o *Buffer) enc_ref_string(p *Properties, base structPointer) error {
-	v := *structPointer_StringVal(base, p.field)
-	o.buf = append(o.buf, p.tagcode...)
-	o.EncodeStringBytes(v)
-	return nil
-}
-
-func size_ref_string(p *Properties, base structPointer) (n int) {
-	v := *structPointer_StringVal(base, p.field)
-	n += len(p.tagcode)
-	n += sizeStringBytes(v)
-	return
-}
-
-// Encode a reference to a message struct.
-func (o *Buffer) enc_ref_struct_message(p *Properties, base structPointer) error {
-	var state errorState
-	structp := structPointer_GetRefStructPointer(base, p.field)
-	if structPointer_IsNil(structp) {
-		return ErrNil
-	}
-
-	// Can the object marshal itself?
-	if p.isMarshaler {
-		m := structPointer_Interface(structp, p.stype).(Marshaler)
-		data, err := m.Marshal()
-		if err != nil && !state.shouldContinue(err, nil) {
-			return err
-		}
-		o.buf = append(o.buf, p.tagcode...)
-		o.EncodeRawBytes(data)
-		return nil
-	}
-
-	o.buf = append(o.buf, p.tagcode...)
-	return o.enc_len_struct(p.sprop, structp, &state)
-}
-
-//TODO this is only copied, please fix this
-func size_ref_struct_message(p *Properties, base structPointer) int {
-	structp := structPointer_GetRefStructPointer(base, p.field)
-	if structPointer_IsNil(structp) {
-		return 0
-	}
-
-	// Can the object marshal itself?
-	if p.isMarshaler {
-		m := structPointer_Interface(structp, p.stype).(Marshaler)
-		data, _ := m.Marshal()
-		n0 := len(p.tagcode)
-		n1 := sizeRawBytes(data)
-		return n0 + n1
-	}
-
-	n0 := len(p.tagcode)
-	n1 := size_struct(p.sprop, structp)
-	n2 := sizeVarint(uint64(n1)) // size of encoded length
-	return n0 + n1 + n2
-}
-
-// Encode a slice of references to message struct pointers ([]struct).
-func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) error {
-	var state errorState
-	ss := structPointer_StructRefSlice(base, p.field, p.stype.Size())
-	l := ss.Len()
-	for i := 0; i < l; i++ {
-		structp := ss.Index(i)
-		if structPointer_IsNil(structp) {
-			return errRepeatedHasNil
-		}
-
-		// Can the object marshal itself?
-		if p.isMarshaler {
-			m := structPointer_Interface(structp, p.stype).(Marshaler)
-			data, err := m.Marshal()
-			if err != nil && !state.shouldContinue(err, nil) {
-				return err
-			}
-			o.buf = append(o.buf, p.tagcode...)
-			o.EncodeRawBytes(data)
-			continue
-		}
-
-		o.buf = append(o.buf, p.tagcode...)
-		err := o.enc_len_struct(p.sprop, structp, &state)
-		if err != nil && !state.shouldContinue(err, nil) {
-			if err == ErrNil {
-				return errRepeatedHasNil
-			}
-			return err
-		}
-
-	}
-	return state.err
-}
-
-//TODO this is only copied, please fix this
-func size_slice_ref_struct_message(p *Properties, base structPointer) (n int) {
-	ss := structPointer_StructRefSlice(base, p.field, p.stype.Size())
-	l := ss.Len()
-	n += l * len(p.tagcode)
-	for i := 0; i < l; i++ {
-		structp := ss.Index(i)
-		if structPointer_IsNil(structp) {
-			return // return the size up to this point
-		}
-
-		// Can the object marshal itself?
-		if p.isMarshaler {
-			m := structPointer_Interface(structp, p.stype).(Marshaler)
-			data, _ := m.Marshal()
-			n += len(p.tagcode)
-			n += sizeRawBytes(data)
-			continue
-		}
-
-		n0 := size_struct(p.sprop, structp)
-		n1 := sizeVarint(uint64(n0)) // size of encoded length
-		n += n0 + n1
-	}
-	return
-}
-
-func (o *Buffer) enc_custom_bytes(p *Properties, base structPointer) error {
-	i := structPointer_InterfaceRef(base, p.field, p.ctype)
-	if i == nil {
-		return ErrNil
-	}
-	custom := i.(Marshaler)
-	data, err := custom.Marshal()
-	if err != nil {
-		return err
-	}
-	if data == nil {
-		return ErrNil
-	}
-	o.buf = append(o.buf, p.tagcode...)
-	o.EncodeRawBytes(data)
-	return nil
-}
-
-func size_custom_bytes(p *Properties, base structPointer) (n int) {
-	n += len(p.tagcode)
-	i := structPointer_InterfaceRef(base, p.field, p.ctype)
-	if i == nil {
-		return 0
-	}
-	custom := i.(Marshaler)
-	data, _ := custom.Marshal()
-	n += sizeRawBytes(data)
-	return
-}
-
-func (o *Buffer) enc_custom_ref_bytes(p *Properties, base structPointer) error {
-	custom := structPointer_InterfaceAt(base, p.field, p.ctype).(Marshaler)
-	data, err := custom.Marshal()
-	if err != nil {
-		return err
-	}
-	if data == nil {
-		return ErrNil
-	}
-	o.buf = append(o.buf, p.tagcode...)
-	o.EncodeRawBytes(data)
-	return nil
-}
-
-func size_custom_ref_bytes(p *Properties, base structPointer) (n int) {
-	n += len(p.tagcode)
-	i := structPointer_InterfaceAt(base, p.field, p.ctype)
-	if i == nil {
-		return 0
-	}
-	custom := i.(Marshaler)
-	data, _ := custom.Marshal()
-	n += sizeRawBytes(data)
-	return
-}
-
-func (o *Buffer) enc_custom_slice_bytes(p *Properties, base structPointer) error {
-	inter := structPointer_InterfaceRef(base, p.field, p.ctype)
-	if inter == nil {
-		return ErrNil
-	}
-	slice := reflect.ValueOf(inter)
-	l := slice.Len()
-	for i := 0; i < l; i++ {
-		v := slice.Index(i)
-		custom := v.Interface().(Marshaler)
-		data, err := custom.Marshal()
-		if err != nil {
-			return err
-		}
-		o.buf = append(o.buf, p.tagcode...)
-		o.EncodeRawBytes(data)
-	}
-	return nil
-}
-
-func size_custom_slice_bytes(p *Properties, base structPointer) (n int) {
-	inter := structPointer_InterfaceRef(base, p.field, p.ctype)
-	if inter == nil {
-		return 0
-	}
-	slice := reflect.ValueOf(inter)
-	l := slice.Len()
-	n += l * len(p.tagcode)
-	for i := 0; i < l; i++ {
-		v := slice.Index(i)
-		custom := v.Interface().(Marshaler)
-		data, _ := custom.Marshal()
-		n += sizeRawBytes(data)
-	}
-	return
-}

Một số tệp đã không được hiển thị bởi vì quá nhiều tập tin thay đổi trong này khác