Pārlūkot izejas kodu

bump containerd v1.3.0

full diff: https://github.com/containerd/containerd/compare/7c1e88399ec0b0b077121d9d5ad97e647b11c870...v1.3.0

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
Signed-off-by: Derek McGowan <derek@mcgstyle.net>
Derek McGowan 5 gadi atpakaļ
vecāks
revīzija
12f9887c8e
100 mainītis faili ar 5885 papildinājumiem un 1385 dzēšanām
  1. 1 1
      vendor.conf
  2. 1 1
      vendor/github.com/containerd/containerd/README.md
  3. 227 39
      vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go
  4. 3 0
      vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto
  5. 252 32
      vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go
  6. 7 0
      vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto
  7. 1309 96
      vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go
  8. 38 0
      vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto
  9. 1 1
      vendor/github.com/containerd/containerd/archive/compression/compression.go
  10. 150 107
      vendor/github.com/containerd/containerd/archive/tar.go
  11. 37 1
      vendor/github.com/containerd/containerd/archive/tar_opts.go
  12. 59 0
      vendor/github.com/containerd/containerd/archive/tar_opts_linux.go
  13. 1 17
      vendor/github.com/containerd/containerd/archive/tar_opts_windows.go
  14. 67 10
      vendor/github.com/containerd/containerd/archive/tar_unix.go
  15. 15 16
      vendor/github.com/containerd/containerd/archive/tar_windows.go
  16. 1 1
      vendor/github.com/containerd/containerd/archive/time_unix.go
  17. 22 7
      vendor/github.com/containerd/containerd/cio/io.go
  18. 12 10
      vendor/github.com/containerd/containerd/cio/io_unix.go
  19. 124 33
      vendor/github.com/containerd/containerd/client.go
  20. 17 9
      vendor/github.com/containerd/containerd/client_opts.go
  21. 32 9
      vendor/github.com/containerd/containerd/container.go
  22. 61 27
      vendor/github.com/containerd/containerd/container_opts.go
  23. 11 7
      vendor/github.com/containerd/containerd/container_opts_unix.go
  24. 1 2
      vendor/github.com/containerd/containerd/container_restore_opts.go
  25. 1 1
      vendor/github.com/containerd/containerd/containers/containers.go
  26. 30 1
      vendor/github.com/containerd/containerd/content/helpers.go
  27. 14 2
      vendor/github.com/containerd/containerd/content/local/store.go
  28. 1 1
      vendor/github.com/containerd/containerd/contrib/nvidia/nvidia.go
  29. 0 2
      vendor/github.com/containerd/containerd/contrib/seccomp/seccomp.go
  30. 5 3
      vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go
  31. 9 2
      vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default_unsupported.go
  32. 2 2
      vendor/github.com/containerd/containerd/defaults/defaults.go
  33. 2 0
      vendor/github.com/containerd/containerd/defaults/defaults_unix.go
  34. 2 0
      vendor/github.com/containerd/containerd/defaults/defaults_windows.go
  35. 10 3
      vendor/github.com/containerd/containerd/diff.go
  36. 19 1
      vendor/github.com/containerd/containerd/diff/diff.go
  37. 187 0
      vendor/github.com/containerd/containerd/diff/stream.go
  38. 146 0
      vendor/github.com/containerd/containerd/diff/stream_unix.go
  39. 165 0
      vendor/github.com/containerd/containerd/diff/stream_windows.go
  40. 16 1
      vendor/github.com/containerd/containerd/errdefs/errors.go
  41. 9 0
      vendor/github.com/containerd/containerd/errdefs/grpc.go
  42. 1 1
      vendor/github.com/containerd/containerd/events/exchange/exchange.go
  43. 6 20
      vendor/github.com/containerd/containerd/export.go
  44. 7 0
      vendor/github.com/containerd/containerd/gc/gc.go
  45. 174 9
      vendor/github.com/containerd/containerd/image.go
  46. 6 7
      vendor/github.com/containerd/containerd/images/annotations.go
  47. 468 0
      vendor/github.com/containerd/containerd/images/archive/exporter.go
  48. 133 21
      vendor/github.com/containerd/containerd/images/archive/importer.go
  49. 28 2
      vendor/github.com/containerd/containerd/images/archive/reference.go
  50. 4 3
      vendor/github.com/containerd/containerd/images/handlers.go
  51. 19 41
      vendor/github.com/containerd/containerd/images/image.go
  52. 84 0
      vendor/github.com/containerd/containerd/images/mediatypes.go
  53. 0 241
      vendor/github.com/containerd/containerd/images/oci/exporter.go
  54. 38 13
      vendor/github.com/containerd/containerd/import.go
  55. 1 2
      vendor/github.com/containerd/containerd/install.go
  56. 10 0
      vendor/github.com/containerd/containerd/leases/lease.go
  57. 40 0
      vendor/github.com/containerd/containerd/leases/proxy/manager.go
  58. 1 1
      vendor/github.com/containerd/containerd/log/context.go
  59. 145 114
      vendor/github.com/containerd/containerd/metadata/containers.go
  60. 5 3
      vendor/github.com/containerd/containerd/metadata/content.go
  61. 18 15
      vendor/github.com/containerd/containerd/metadata/db.go
  62. 33 6
      vendor/github.com/containerd/containerd/metadata/gc.go
  63. 8 10
      vendor/github.com/containerd/containerd/metadata/images.go
  64. 242 56
      vendor/github.com/containerd/containerd/metadata/leases.go
  65. 9 1
      vendor/github.com/containerd/containerd/metadata/namespaces.go
  66. 38 5
      vendor/github.com/containerd/containerd/metadata/snapshot.go
  67. 12 4
      vendor/github.com/containerd/containerd/namespaces.go
  68. 6 8
      vendor/github.com/containerd/containerd/namespaces/context.go
  69. 10 1
      vendor/github.com/containerd/containerd/namespaces/store.go
  70. 51 0
      vendor/github.com/containerd/containerd/namespaces/ttrpc.go
  71. 1 2
      vendor/github.com/containerd/containerd/oci/spec.go
  72. 112 4
      vendor/github.com/containerd/containerd/oci/spec_opts.go
  73. 64 0
      vendor/github.com/containerd/containerd/oci/spec_opts_linux.go
  74. 63 0
      vendor/github.com/containerd/containerd/oci/spec_opts_unix.go
  75. 5 0
      vendor/github.com/containerd/containerd/oci/spec_opts_windows.go
  76. 13 1
      vendor/github.com/containerd/containerd/pkg/dialer/dialer.go
  77. 2 3
      vendor/github.com/containerd/containerd/pkg/process/deleted_state.go
  78. 18 8
      vendor/github.com/containerd/containerd/pkg/process/exec.go
  79. 1 1
      vendor/github.com/containerd/containerd/pkg/process/exec_state.go
  80. 18 22
      vendor/github.com/containerd/containerd/pkg/process/init.go
  81. 11 31
      vendor/github.com/containerd/containerd/pkg/process/init_state.go
  82. 16 12
      vendor/github.com/containerd/containerd/pkg/process/io.go
  83. 3 25
      vendor/github.com/containerd/containerd/pkg/process/process.go
  84. 1 1
      vendor/github.com/containerd/containerd/pkg/process/types.go
  85. 53 4
      vendor/github.com/containerd/containerd/pkg/process/utils.go
  86. 13 8
      vendor/github.com/containerd/containerd/pkg/stdio/platform.go
  87. 11 9
      vendor/github.com/containerd/containerd/pkg/stdio/stdio.go
  88. 37 0
      vendor/github.com/containerd/containerd/platforms/compare.go
  89. 1 1
      vendor/github.com/containerd/containerd/platforms/cpuinfo.go
  90. 3 3
      vendor/github.com/containerd/containerd/platforms/database.go
  91. 1 1
      vendor/github.com/containerd/containerd/platforms/platforms.go
  92. 7 6
      vendor/github.com/containerd/containerd/plugin/context.go
  93. 35 14
      vendor/github.com/containerd/containerd/plugin/plugin.go
  94. 2 2
      vendor/github.com/containerd/containerd/process.go
  95. 46 17
      vendor/github.com/containerd/containerd/pull.go
  96. 232 67
      vendor/github.com/containerd/containerd/remotes/docker/authorizer.go
  97. 79 54
      vendor/github.com/containerd/containerd/remotes/docker/fetcher.go
  98. 42 0
      vendor/github.com/containerd/containerd/remotes/docker/handler.go
  99. 129 60
      vendor/github.com/containerd/containerd/remotes/docker/pusher.go
  100. 202 0
      vendor/github.com/containerd/containerd/remotes/docker/registry.go

+ 1 - 1
vendor.conf

@@ -117,7 +117,7 @@ github.com/googleapis/gax-go                        317e0006254c44a0ac427cc52a0e
 google.golang.org/genproto                          694d95ba50e67b2e363f3483057db5d4910c18f9
 
 # containerd
-github.com/containerd/containerd                    7c1e88399ec0b0b077121d9d5ad97e647b11c870
+github.com/containerd/containerd                    36cf5b690dcc00ff0f34ff7799209050c3d0c59a # v1.3.0
 github.com/containerd/fifo                          a9fb20d87448d386e6d50b1f2e1fa70dcf0de43c
 github.com/containerd/continuity                    aaeac12a7ffcd198ae25440a9dff125c2e2703a7
 github.com/containerd/cgroups                       4994991857f9b0ae8dc439551e8bebdbb4bf66c1

+ 1 - 1
vendor/github.com/containerd/containerd/README.md

@@ -218,7 +218,7 @@ This will be the best place to discuss design and implementation.
 For sync communication we have a community slack with a #containerd channel that everyone is welcome to join and chat about development.
 
 **Slack:** Catch us in the #containerd and #containerd-dev channels on dockercommunity.slack.com.
-[Click here for an invite to docker community slack.](https://join.slack.com/t/dockercommunity/shared_invite/enQtNDY4MDc1Mzc0MzIwLTgxZDBlMmM4ZGEyNDc1N2FkMzlhODJkYmE1YTVkYjM1MDE3ZjAwZjBkOGFlOTJkZjRmZGYzNjYyY2M3ZTUxYzQ)
+[Click here for an invite to docker community slack.](https://dockr.ly/slack)
 
 ### Security audit
 

+ 227 - 39
vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go

@@ -9,6 +9,7 @@ import (
 	types "github.com/containerd/containerd/api/types"
 	proto "github.com/gogo/protobuf/proto"
 	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+	types1 "github.com/gogo/protobuf/types"
 	grpc "google.golang.org/grpc"
 	io "io"
 	math "math"
@@ -29,11 +30,12 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
 
 type ApplyRequest struct {
 	// Diff is the descriptor of the diff to be extracted
-	Diff                 *types.Descriptor `protobuf:"bytes,1,opt,name=diff,proto3" json:"diff,omitempty"`
-	Mounts               []*types.Mount    `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
-	XXX_unrecognized     []byte            `json:"-"`
-	XXX_sizecache        int32             `json:"-"`
+	Diff                 *types.Descriptor      `protobuf:"bytes,1,opt,name=diff,proto3" json:"diff,omitempty"`
+	Mounts               []*types.Mount         `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"`
+	Payloads             map[string]*types1.Any `protobuf:"bytes,3,rep,name=payloads,proto3" json:"payloads,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
+	XXX_unrecognized     []byte                 `json:"-"`
+	XXX_sizecache        int32                  `json:"-"`
 }
 
 func (m *ApplyRequest) Reset()      { *m = ApplyRequest{} }
@@ -205,6 +207,7 @@ var xxx_messageInfo_DiffResponse proto.InternalMessageInfo
 
 func init() {
 	proto.RegisterType((*ApplyRequest)(nil), "containerd.services.diff.v1.ApplyRequest")
+	proto.RegisterMapType((map[string]*types1.Any)(nil), "containerd.services.diff.v1.ApplyRequest.PayloadsEntry")
 	proto.RegisterType((*ApplyResponse)(nil), "containerd.services.diff.v1.ApplyResponse")
 	proto.RegisterType((*DiffRequest)(nil), "containerd.services.diff.v1.DiffRequest")
 	proto.RegisterMapType((map[string]string)(nil), "containerd.services.diff.v1.DiffRequest.LabelsEntry")
@@ -216,36 +219,40 @@ func init() {
 }
 
 var fileDescriptor_3b36a99e6faaa935 = []byte{
-	// 457 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x4f, 0x6f, 0xd3, 0x30,
-	0x14, 0xaf, 0xfb, 0x0f, 0xf5, 0x75, 0x48, 0xc8, 0x9a, 0x44, 0x14, 0x20, 0xaa, 0x7a, 0xea, 0x40,
-	0x38, 0xac, 0xa0, 0x09, 0xb6, 0xcb, 0x40, 0x43, 0x5c, 0xc6, 0x25, 0xda, 0x01, 0x81, 0x04, 0x4a,
-	0x9b, 0x97, 0xce, 0x22, 0x8d, 0xbd, 0xd8, 0xad, 0x94, 0x1b, 0xdf, 0x85, 0x8f, 0xc2, 0x65, 0x47,
-	0x8e, 0x1c, 0x69, 0x3f, 0x09, 0xb2, 0x93, 0x40, 0x24, 0xa4, 0x12, 0x76, 0xca, 0xcb, 0xf3, 0xef,
-	0x9f, 0xfd, 0x6c, 0x38, 0x5d, 0x70, 0x7d, 0xb9, 0x9a, 0xb1, 0xb9, 0x58, 0xfa, 0x73, 0x91, 0xea,
-	0x90, 0xa7, 0x98, 0x45, 0xf5, 0x32, 0x94, 0xdc, 0x57, 0x98, 0xad, 0xf9, 0x1c, 0x95, 0x1f, 0xf1,
-	0x38, 0xf6, 0xd7, 0x87, 0xf6, 0xcb, 0x64, 0x26, 0xb4, 0xa0, 0xf7, 0xfe, 0x60, 0x59, 0x85, 0x63,
-	0x76, 0x7d, 0x7d, 0xe8, 0xee, 0x2f, 0xc4, 0x42, 0x58, 0x9c, 0x6f, 0xaa, 0x82, 0xe2, 0x1e, 0x35,
-	0x32, 0xd5, 0xb9, 0x44, 0xe5, 0x2f, 0xc5, 0x2a, 0xd5, 0x25, 0xef, 0xe4, 0x3f, 0x78, 0x11, 0xaa,
-	0x79, 0xc6, 0xa5, 0x16, 0x59, 0x41, 0x1e, 0x5f, 0xc1, 0xde, 0x4b, 0x29, 0x93, 0x3c, 0xc0, 0xab,
-	0x15, 0x2a, 0x4d, 0x9f, 0x40, 0xd7, 0xa4, 0x74, 0xc8, 0x88, 0x4c, 0x86, 0xd3, 0xfb, 0xac, 0xb6,
-	0x0d, 0xab, 0xc0, 0xce, 0x7e, 0x2b, 0x04, 0x16, 0x49, 0x7d, 0xe8, 0xdb, 0x34, 0xca, 0x69, 0x8f,
-	0x3a, 0x93, 0xe1, 0xf4, 0xee, 0xdf, 0x9c, 0xb7, 0x66, 0x3d, 0x28, 0x61, 0xe3, 0x37, 0x70, 0xbb,
-	0xb4, 0x54, 0x52, 0xa4, 0x0a, 0xe9, 0x11, 0xdc, 0x0a, 0xa5, 0x4c, 0x38, 0x46, 0x8d, 0x6c, 0x2b,
-	0xf0, 0xf8, 0x6b, 0x1b, 0x86, 0x67, 0x3c, 0x8e, 0xab, 0xec, 0x8f, 0xa0, 0x9b, 0x60, 0xac, 0x1d,
-	0xb2, 0x3b, 0x87, 0x05, 0xd1, 0xc7, 0xd0, 0xcb, 0xf8, 0xe2, 0x52, 0xff, 0x2b, 0x75, 0x81, 0xa2,
-	0x0f, 0x00, 0x96, 0x18, 0xf1, 0xf0, 0x93, 0x59, 0x73, 0x3a, 0x23, 0x32, 0x19, 0x04, 0x03, 0xdb,
-	0xb9, 0xc8, 0x25, 0xd2, 0x3b, 0xd0, 0xc9, 0x30, 0x76, 0xba, 0xb6, 0x6f, 0x4a, 0x7a, 0x0e, 0xfd,
-	0x24, 0x9c, 0x61, 0xa2, 0x9c, 0x9e, 0x35, 0x78, 0xc6, 0x76, 0xdc, 0x08, 0x56, 0xdb, 0x06, 0x3b,
-	0xb7, 0xb4, 0xd7, 0xa9, 0xce, 0xf2, 0xa0, 0xd4, 0x70, 0x5f, 0xc0, 0xb0, 0xd6, 0x36, 0x76, 0x9f,
-	0x31, 0xb7, 0xa7, 0x35, 0x08, 0x4c, 0x49, 0xf7, 0xa1, 0xb7, 0x0e, 0x93, 0x15, 0x3a, 0x6d, 0xdb,
-	0x2b, 0x7e, 0x8e, 0xdb, 0xcf, 0xc9, 0xf8, 0x14, 0xf6, 0x0a, 0xf5, 0xf2, 0xb4, 0xab, 0x09, 0x77,
-	0x9a, 0x4e, 0x78, 0xfa, 0x8d, 0x40, 0xd7, 0x48, 0xd0, 0x8f, 0xd0, 0xb3, 0x93, 0xa3, 0x07, 0x3b,
-	0x37, 0x53, 0xbf, 0x50, 0xee, 0xc3, 0x26, 0xd0, 0x32, 0xda, 0x87, 0xd2, 0x67, 0xd2, 0xf4, 0xac,
-	0xdc, 0x83, 0x06, 0xc8, 0x42, 0xfc, 0xd5, 0xc5, 0xf5, 0xc6, 0x6b, 0xfd, 0xd8, 0x78, 0xad, 0x2f,
-	0x5b, 0x8f, 0x5c, 0x6f, 0x3d, 0xf2, 0x7d, 0xeb, 0x91, 0x9f, 0x5b, 0x8f, 0xbc, 0x3f, 0xbe, 0xd1,
-	0x6b, 0x3f, 0x31, 0xdf, 0x77, 0xad, 0x59, 0xdf, 0x3e, 0xa4, 0xa7, 0xbf, 0x02, 0x00, 0x00, 0xff,
-	0xff, 0x61, 0xd1, 0x6e, 0x9e, 0x34, 0x04, 0x00, 0x00,
+	// 526 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x41, 0x6f, 0xd3, 0x4c,
+	0x10, 0x8d, 0xed, 0x24, 0xdf, 0x97, 0x49, 0x2b, 0xa1, 0x55, 0x24, 0x8c, 0x01, 0xab, 0xca, 0x29,
+	0x2d, 0x62, 0x4d, 0x03, 0x2a, 0xd0, 0x5e, 0x5a, 0x54, 0xc4, 0xa5, 0x48, 0x60, 0x7a, 0x40, 0x20,
+	0x81, 0x9c, 0x78, 0xed, 0xae, 0x70, 0xbc, 0x8b, 0x77, 0x1d, 0xc9, 0x37, 0xfe, 0x06, 0x67, 0x7e,
+	0x0a, 0x97, 0x1e, 0x39, 0x72, 0xa4, 0xf9, 0x25, 0xc8, 0xeb, 0x75, 0x31, 0x02, 0x05, 0xc3, 0xc9,
+	0x9b, 0x9d, 0xf7, 0xde, 0xce, 0xbc, 0x37, 0x0a, 0x1c, 0xc6, 0x54, 0x9e, 0xe5, 0x33, 0x3c, 0x67,
+	0x0b, 0x6f, 0xce, 0x52, 0x19, 0xd0, 0x94, 0x64, 0x61, 0xf3, 0x18, 0x70, 0xea, 0x09, 0x92, 0x2d,
+	0xe9, 0x9c, 0x08, 0x2f, 0xa4, 0x51, 0xe4, 0x2d, 0x77, 0xd5, 0x17, 0xf3, 0x8c, 0x49, 0x86, 0xae,
+	0xff, 0xc0, 0xe2, 0x1a, 0x87, 0x55, 0x7d, 0xb9, 0xeb, 0x8c, 0x62, 0x16, 0x33, 0x85, 0xf3, 0xca,
+	0x53, 0x45, 0x71, 0xae, 0xc5, 0x8c, 0xc5, 0x09, 0xf1, 0xd4, 0xaf, 0x59, 0x1e, 0x79, 0x41, 0x5a,
+	0xe8, 0xd2, 0x5e, 0xab, 0x7e, 0x64, 0xc1, 0x89, 0xf0, 0x16, 0x2c, 0x4f, 0xa5, 0xe6, 0x1d, 0xfc,
+	0x05, 0x2f, 0x24, 0x62, 0x9e, 0x51, 0x2e, 0x59, 0x56, 0x91, 0xc7, 0x1f, 0x4d, 0xd8, 0x38, 0xe2,
+	0x3c, 0x29, 0x7c, 0xf2, 0x3e, 0x27, 0x42, 0xa2, 0x3b, 0xd0, 0x2d, 0x27, 0xb0, 0x8d, 0x2d, 0x63,
+	0x32, 0x9c, 0xde, 0xc0, 0x8d, 0x11, 0x95, 0x04, 0x3e, 0xbe, 0x94, 0xf0, 0x15, 0x12, 0x79, 0xd0,
+	0x57, 0xed, 0x08, 0xdb, 0xdc, 0xb2, 0x26, 0xc3, 0xe9, 0xd5, 0x5f, 0x39, 0x4f, 0xcb, 0xba, 0xaf,
+	0x61, 0xe8, 0x05, 0xfc, 0xcf, 0x83, 0x22, 0x61, 0x41, 0x28, 0x6c, 0x4b, 0x51, 0xee, 0xe3, 0x35,
+	0x4e, 0xe2, 0x66, 0x7f, 0xf8, 0x99, 0x66, 0x3e, 0x4e, 0x65, 0x56, 0xf8, 0x97, 0x42, 0xce, 0x73,
+	0xd8, 0xfc, 0xa9, 0x84, 0xae, 0x80, 0xf5, 0x8e, 0x14, 0x6a, 0x8e, 0x81, 0x5f, 0x1e, 0xd1, 0x0e,
+	0xf4, 0x96, 0x41, 0x92, 0x13, 0xdb, 0x54, 0xb3, 0x8d, 0x70, 0x95, 0x05, 0xae, 0xb3, 0xc0, 0x47,
+	0x69, 0xe1, 0x57, 0x90, 0x7d, 0xf3, 0x81, 0x31, 0x7e, 0x02, 0x9b, 0xfa, 0x69, 0xc1, 0x59, 0x2a,
+	0x08, 0xda, 0x83, 0xff, 0x02, 0xce, 0x13, 0x4a, 0xc2, 0x56, 0xf6, 0xd4, 0xe0, 0xf1, 0x27, 0x13,
+	0x86, 0xc7, 0x34, 0x8a, 0x6a, 0x8f, 0x6f, 0x41, 0x37, 0x21, 0x91, 0xb4, 0x8d, 0xf5, 0x7e, 0x29,
+	0x10, 0xba, 0x0d, 0xbd, 0x8c, 0xc6, 0x67, 0xf2, 0x4f, 0xee, 0x56, 0x28, 0x74, 0x13, 0x60, 0x41,
+	0x42, 0x1a, 0xbc, 0x2d, 0x6b, 0xb6, 0xa5, 0xa6, 0x1f, 0xa8, 0x9b, 0xd3, 0x82, 0x93, 0xd2, 0x95,
+	0x8c, 0x44, 0x76, 0xb7, 0x72, 0x25, 0x23, 0x11, 0x3a, 0x81, 0x7e, 0x12, 0xcc, 0x48, 0x22, 0xec,
+	0x9e, 0x7a, 0xe0, 0xde, 0xda, 0x2c, 0x1a, 0x63, 0xe0, 0x13, 0x45, 0xab, 0x82, 0xd0, 0x1a, 0xce,
+	0x43, 0x18, 0x36, 0xae, 0x7f, 0x13, 0xc2, 0xa8, 0x19, 0xc2, 0xa0, 0x69, 0xf7, 0x21, 0x6c, 0x54,
+	0xea, 0xda, 0xed, 0x7a, 0x13, 0xad, 0xb6, 0x9b, 0x38, 0xfd, 0x6c, 0x40, 0xb7, 0x94, 0x40, 0x6f,
+	0xa0, 0xa7, 0x92, 0x43, 0xdb, 0xad, 0x17, 0xcb, 0xd9, 0x69, 0x03, 0xd5, 0xad, 0xbd, 0xd6, 0xef,
+	0x4c, 0xda, 0x7a, 0xe5, 0x6c, 0xb7, 0x40, 0x56, 0xe2, 0x8f, 0x4e, 0xcf, 0x2f, 0xdc, 0xce, 0xd7,
+	0x0b, 0xb7, 0xf3, 0x61, 0xe5, 0x1a, 0xe7, 0x2b, 0xd7, 0xf8, 0xb2, 0x72, 0x8d, 0x6f, 0x2b, 0xd7,
+	0x78, 0xb5, 0xff, 0x4f, 0xff, 0x58, 0x07, 0xe5, 0xf7, 0x65, 0x67, 0xd6, 0x57, 0x7b, 0x7e, 0xf7,
+	0x7b, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x85, 0x25, 0xb8, 0xf8, 0x04, 0x00, 0x00,
 }
 
 // Reference imports to suppress errors if they are not otherwise used.
@@ -400,6 +407,34 @@ func (m *ApplyRequest) MarshalTo(dAtA []byte) (int, error) {
 			i += n
 		}
 	}
+	if len(m.Payloads) > 0 {
+		for k, _ := range m.Payloads {
+			dAtA[i] = 0x1a
+			i++
+			v := m.Payloads[k]
+			msgSize := 0
+			if v != nil {
+				msgSize = v.Size()
+				msgSize += 1 + sovDiff(uint64(msgSize))
+			}
+			mapSize := 1 + len(k) + sovDiff(uint64(len(k))) + msgSize
+			i = encodeVarintDiff(dAtA, i, uint64(mapSize))
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintDiff(dAtA, i, uint64(len(k)))
+			i += copy(dAtA[i:], k)
+			if v != nil {
+				dAtA[i] = 0x12
+				i++
+				i = encodeVarintDiff(dAtA, i, uint64(v.Size()))
+				n2, err := v.MarshalTo(dAtA[i:])
+				if err != nil {
+					return 0, err
+				}
+				i += n2
+			}
+		}
+	}
 	if m.XXX_unrecognized != nil {
 		i += copy(dAtA[i:], m.XXX_unrecognized)
 	}
@@ -425,11 +460,11 @@ func (m *ApplyResponse) MarshalTo(dAtA []byte) (int, error) {
 		dAtA[i] = 0xa
 		i++
 		i = encodeVarintDiff(dAtA, i, uint64(m.Applied.Size()))
-		n2, err := m.Applied.MarshalTo(dAtA[i:])
+		n3, err := m.Applied.MarshalTo(dAtA[i:])
 		if err != nil {
 			return 0, err
 		}
-		i += n2
+		i += n3
 	}
 	if m.XXX_unrecognized != nil {
 		i += copy(dAtA[i:], m.XXX_unrecognized)
@@ -530,11 +565,11 @@ func (m *DiffResponse) MarshalTo(dAtA []byte) (int, error) {
 		dAtA[i] = 0x1a
 		i++
 		i = encodeVarintDiff(dAtA, i, uint64(m.Diff.Size()))
-		n3, err := m.Diff.MarshalTo(dAtA[i:])
+		n4, err := m.Diff.MarshalTo(dAtA[i:])
 		if err != nil {
 			return 0, err
 		}
-		i += n3
+		i += n4
 	}
 	if m.XXX_unrecognized != nil {
 		i += copy(dAtA[i:], m.XXX_unrecognized)
@@ -567,6 +602,19 @@ func (m *ApplyRequest) Size() (n int) {
 			n += 1 + l + sovDiff(uint64(l))
 		}
 	}
+	if len(m.Payloads) > 0 {
+		for k, v := range m.Payloads {
+			_ = k
+			_ = v
+			l = 0
+			if v != nil {
+				l = v.Size()
+				l += 1 + sovDiff(uint64(l))
+			}
+			mapEntrySize := 1 + len(k) + sovDiff(uint64(len(k))) + l
+			n += mapEntrySize + 1 + sovDiff(uint64(mapEntrySize))
+		}
+	}
 	if m.XXX_unrecognized != nil {
 		n += len(m.XXX_unrecognized)
 	}
@@ -662,9 +710,20 @@ func (this *ApplyRequest) String() string {
 	if this == nil {
 		return "nil"
 	}
+	keysForPayloads := make([]string, 0, len(this.Payloads))
+	for k, _ := range this.Payloads {
+		keysForPayloads = append(keysForPayloads, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForPayloads)
+	mapStringForPayloads := "map[string]*types1.Any{"
+	for _, k := range keysForPayloads {
+		mapStringForPayloads += fmt.Sprintf("%v: %v,", k, this.Payloads[k])
+	}
+	mapStringForPayloads += "}"
 	s := strings.Join([]string{`&ApplyRequest{`,
 		`Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "types.Descriptor", 1) + `,`,
 		`Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "types.Mount", 1) + `,`,
+		`Payloads:` + mapStringForPayloads + `,`,
 		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
@@ -824,6 +883,135 @@ func (m *ApplyRequest) Unmarshal(dAtA []byte) error {
 				return err
 			}
 			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Payloads", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowDiff
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthDiff
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthDiff
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Payloads == nil {
+				m.Payloads = make(map[string]*types1.Any)
+			}
+			var mapkey string
+			var mapvalue *types1.Any
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowDiff
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowDiff
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthDiff
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthDiff
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowDiff
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= int(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthDiff
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if postmsgIndex < 0 {
+						return ErrInvalidLengthDiff
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &types1.Any{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipDiff(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthDiff
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Payloads[mapkey] = mapvalue
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipDiff(dAtA[iNdEx:])

+ 3 - 0
vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto

@@ -3,6 +3,7 @@ syntax = "proto3";
 package containerd.services.diff.v1;
 
 import weak "gogoproto/gogo.proto";
+import "google/protobuf/any.proto";
 import "github.com/containerd/containerd/api/types/mount.proto";
 import "github.com/containerd/containerd/api/types/descriptor.proto";
 
@@ -25,6 +26,8 @@ message ApplyRequest {
 	containerd.types.Descriptor diff = 1;
 
 	repeated containerd.types.Mount mounts = 2;
+
+	map<string, google.protobuf.Any> payloads = 3;
 }
 
 message ApplyResponse {

+ 252 - 32
vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go

@@ -10,6 +10,7 @@ import (
 	rpc "github.com/gogo/googleapis/google/rpc"
 	proto "github.com/gogo/protobuf/proto"
 	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+	types1 "github.com/gogo/protobuf/types"
 	grpc "google.golang.org/grpc"
 	io "io"
 	math "math"
@@ -191,11 +192,51 @@ func (m *PluginsResponse) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_PluginsResponse proto.InternalMessageInfo
 
+type ServerResponse struct {
+	UUID                 string   `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ServerResponse) Reset()      { *m = ServerResponse{} }
+func (*ServerResponse) ProtoMessage() {}
+func (*ServerResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_1a14fda866f10715, []int{3}
+}
+func (m *ServerResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ServerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ServerResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ServerResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ServerResponse.Merge(m, src)
+}
+func (m *ServerResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *ServerResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ServerResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServerResponse proto.InternalMessageInfo
+
 func init() {
 	proto.RegisterType((*Plugin)(nil), "containerd.services.introspection.v1.Plugin")
 	proto.RegisterMapType((map[string]string)(nil), "containerd.services.introspection.v1.Plugin.ExportsEntry")
 	proto.RegisterType((*PluginsRequest)(nil), "containerd.services.introspection.v1.PluginsRequest")
 	proto.RegisterType((*PluginsResponse)(nil), "containerd.services.introspection.v1.PluginsResponse")
+	proto.RegisterType((*ServerResponse)(nil), "containerd.services.introspection.v1.ServerResponse")
 }
 
 func init() {
@@ -203,38 +244,42 @@ func init() {
 }
 
 var fileDescriptor_1a14fda866f10715 = []byte{
-	// 487 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0x4d, 0x6f, 0xd3, 0x40,
-	0x10, 0xcd, 0x3a, 0x69, 0xdc, 0x4c, 0xca, 0x87, 0x56, 0x15, 0x58, 0x3e, 0xb8, 0x51, 0xc4, 0x21,
-	0x42, 0xb0, 0x56, 0x03, 0x48, 0xb4, 0x48, 0x1c, 0x22, 0x72, 0xa8, 0xd4, 0x43, 0xe5, 0x5e, 0x10,
-	0x97, 0xca, 0x71, 0x36, 0x66, 0x85, 0xeb, 0xdd, 0xee, 0xae, 0x2d, 0x72, 0xe3, 0xc6, 0x5f, 0xcb,
-	0x91, 0x23, 0xa7, 0x8a, 0xfa, 0x37, 0xf0, 0x03, 0x90, 0xbd, 0x76, 0x9b, 0xdc, 0x12, 0x71, 0x9b,
-	0x79, 0x7e, 0x6f, 0xe6, 0xcd, 0x93, 0x17, 0x82, 0x98, 0xe9, 0xaf, 0xd9, 0x8c, 0x44, 0xfc, 0xda,
-	0x8f, 0x78, 0xaa, 0x43, 0x96, 0x52, 0x39, 0x5f, 0x2f, 0x43, 0xc1, 0x7c, 0x45, 0x65, 0xce, 0x22,
-	0xaa, 0x7c, 0x96, 0x6a, 0xc9, 0x95, 0xa0, 0x91, 0x66, 0x3c, 0xf5, 0xf3, 0xe3, 0x4d, 0x80, 0x08,
-	0xc9, 0x35, 0xc7, 0x2f, 0x1e, 0xd4, 0xa4, 0x51, 0x92, 0x4d, 0x62, 0x7e, 0xec, 0x9e, 0x6c, 0xb5,
-	0x59, 0x2f, 0x05, 0x55, 0xbe, 0x48, 0x42, 0xbd, 0xe0, 0xf2, 0xda, 0x2c, 0x70, 0x9f, 0xc7, 0x9c,
-	0xc7, 0x09, 0xf5, 0xa5, 0x88, 0x7c, 0xa5, 0x43, 0x9d, 0xa9, 0xfa, 0xc3, 0x61, 0xcc, 0x63, 0x5e,
-	0x95, 0x7e, 0x59, 0x19, 0x74, 0xf8, 0xd7, 0x82, 0xee, 0x45, 0x92, 0xc5, 0x2c, 0xc5, 0x18, 0x3a,
-	0xe5, 0x44, 0x07, 0x0d, 0xd0, 0xa8, 0x17, 0x54, 0x35, 0x7e, 0x06, 0x16, 0x9b, 0x3b, 0x56, 0x89,
-	0x4c, 0xba, 0xc5, 0xed, 0x91, 0x75, 0xf6, 0x29, 0xb0, 0xd8, 0x1c, 0xbb, 0xb0, 0x2f, 0xe9, 0x4d,
-	0xc6, 0x24, 0x55, 0x4e, 0x7b, 0xd0, 0x1e, 0xf5, 0x82, 0xfb, 0x1e, 0x7f, 0x84, 0x5e, 0xe3, 0x49,
-	0x39, 0x9d, 0x41, 0x7b, 0xd4, 0x1f, 0xbb, 0x64, 0xed, 0xec, 0xca, 0x36, 0xb9, 0xa8, 0x29, 0x93,
-	0xce, 0xea, 0xf6, 0xa8, 0x15, 0x3c, 0x48, 0xf0, 0x25, 0xd8, 0xf4, 0xbb, 0xe0, 0x52, 0x2b, 0x67,
-	0xaf, 0x52, 0x9f, 0x90, 0x6d, 0x42, 0x23, 0xe6, 0x0c, 0x32, 0x35, 0xda, 0x69, 0xaa, 0xe5, 0x32,
-	0x68, 0x26, 0xe1, 0x21, 0x1c, 0x44, 0xa1, 0x08, 0x67, 0x2c, 0x61, 0x9a, 0x51, 0xe5, 0x74, 0x2b,
-	0xd3, 0x1b, 0x18, 0x7e, 0x0d, 0xfb, 0x2c, 0x65, 0xfa, 0x8a, 0x4a, 0xe9, 0xd8, 0x03, 0x34, 0xea,
-	0x8f, 0x31, 0x31, 0x69, 0x12, 0x29, 0x22, 0x72, 0x59, 0xa5, 0x19, 0xd8, 0x25, 0x67, 0x2a, 0xa5,
-	0x7b, 0x0a, 0x07, 0xeb, 0xbb, 0xf0, 0x53, 0x68, 0x7f, 0xa3, 0xcb, 0x3a, 0xbe, 0xb2, 0xc4, 0x87,
-	0xb0, 0x97, 0x87, 0x49, 0x46, 0x4d, 0x80, 0x81, 0x69, 0x4e, 0xad, 0xf7, 0x68, 0xf8, 0x12, 0x1e,
-	0x1b, 0xbb, 0x2a, 0xa0, 0x37, 0x19, 0x55, 0x1a, 0x3b, 0x60, 0x2f, 0x58, 0xa2, 0xa9, 0x54, 0x0e,
-	0xaa, 0xbc, 0x35, 0xed, 0xf0, 0x0a, 0x9e, 0xdc, 0x73, 0x95, 0xe0, 0xa9, 0xa2, 0xf8, 0x1c, 0x6c,
-	0x61, 0xa0, 0x8a, 0xdc, 0x1f, 0xbf, 0xda, 0x25, 0xa2, 0x3a, 0xf2, 0x66, 0xc4, 0xf8, 0x27, 0x82,
-	0x47, 0x67, 0xeb, 0x54, 0x9c, 0x83, 0x5d, 0xaf, 0xc4, 0x6f, 0x77, 0x99, 0xdc, 0x5c, 0xe3, 0xbe,
-	0xdb, 0x51, 0x65, 0xee, 0x9a, 0x2c, 0x56, 0x77, 0x5e, 0xeb, 0xf7, 0x9d, 0xd7, 0xfa, 0x51, 0x78,
-	0x68, 0x55, 0x78, 0xe8, 0x57, 0xe1, 0xa1, 0x3f, 0x85, 0x87, 0xbe, 0x9c, 0xff, 0xdf, 0x5b, 0xfc,
-	0xb0, 0x01, 0x7c, 0xb6, 0x66, 0xdd, 0xea, 0xf7, 0x7f, 0xf3, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xe6,
-	0x72, 0xde, 0x35, 0xe4, 0x03, 0x00, 0x00,
+	// 549 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xc1, 0x6e, 0xd3, 0x40,
+	0x10, 0xad, 0x9d, 0x34, 0x6e, 0x37, 0xa5, 0xa0, 0x55, 0x55, 0x2c, 0x83, 0x9c, 0x28, 0xe2, 0x10,
+	0x21, 0x58, 0xab, 0x01, 0x24, 0x5a, 0x24, 0x0e, 0x51, 0x73, 0x88, 0xd4, 0x43, 0xe5, 0xa8, 0x08,
+	0x71, 0xa9, 0x1c, 0x67, 0x63, 0x56, 0x38, 0xde, 0xed, 0xee, 0xda, 0x22, 0x37, 0x3e, 0x2f, 0x47,
+	0x8e, 0x9c, 0x02, 0xf5, 0x37, 0xf0, 0x01, 0xc8, 0xbb, 0x76, 0x9a, 0xdc, 0x12, 0x71, 0x9b, 0x79,
+	0x33, 0x6f, 0xe6, 0xcd, 0xf3, 0xca, 0xc0, 0x8f, 0x88, 0xfc, 0x9a, 0x8e, 0x51, 0x48, 0x67, 0x5e,
+	0x48, 0x13, 0x19, 0x90, 0x04, 0xf3, 0xc9, 0x7a, 0x18, 0x30, 0xe2, 0x09, 0xcc, 0x33, 0x12, 0x62,
+	0xe1, 0x91, 0x44, 0x72, 0x2a, 0x18, 0x0e, 0x25, 0xa1, 0x89, 0x97, 0x9d, 0x6d, 0x02, 0x88, 0x71,
+	0x2a, 0x29, 0x7c, 0xf1, 0xc0, 0x46, 0x15, 0x13, 0x6d, 0x36, 0x66, 0x67, 0xce, 0xf9, 0x56, 0x9b,
+	0xe5, 0x9c, 0x61, 0xe1, 0xb1, 0x38, 0x90, 0x53, 0xca, 0x67, 0x7a, 0x81, 0xf3, 0x34, 0xa2, 0x34,
+	0x8a, 0xb1, 0xc7, 0x59, 0xe8, 0x09, 0x19, 0xc8, 0x54, 0x94, 0x85, 0x67, 0x65, 0x41, 0x65, 0xe3,
+	0x74, 0xea, 0xe1, 0x19, 0x93, 0xf3, 0xb2, 0x78, 0x12, 0xd1, 0x88, 0xaa, 0xd0, 0x2b, 0x22, 0x8d,
+	0x76, 0xfe, 0x9a, 0xa0, 0x71, 0x1d, 0xa7, 0x11, 0x49, 0x20, 0x04, 0xf5, 0x62, 0x9d, 0x6d, 0xb4,
+	0x8d, 0xee, 0xa1, 0xaf, 0x62, 0x78, 0x0a, 0x4c, 0x32, 0xb1, 0xcd, 0x02, 0xe9, 0x37, 0xf2, 0x65,
+	0xcb, 0x1c, 0x5e, 0xfa, 0x26, 0x99, 0x40, 0x07, 0x1c, 0x70, 0x7c, 0x97, 0x12, 0x8e, 0x85, 0x5d,
+	0x6b, 0xd7, 0xba, 0x87, 0xfe, 0x2a, 0x87, 0x1f, 0xc1, 0x61, 0x25, 0x58, 0xd8, 0xf5, 0x76, 0xad,
+	0xdb, 0xec, 0x39, 0x68, 0xcd, 0x13, 0x75, 0x13, 0xba, 0x2e, 0x5b, 0xfa, 0xf5, 0xc5, 0xb2, 0xb5,
+	0xe7, 0x3f, 0x50, 0xe0, 0x08, 0x58, 0xf8, 0x3b, 0xa3, 0x5c, 0x0a, 0x7b, 0x5f, 0xb1, 0xcf, 0xd1,
+	0x36, 0x8e, 0x22, 0x7d, 0x06, 0x1a, 0x68, 0xee, 0x20, 0x91, 0x7c, 0xee, 0x57, 0x93, 0x60, 0x07,
+	0x1c, 0x85, 0x01, 0x0b, 0xc6, 0x24, 0x26, 0x92, 0x60, 0x61, 0x37, 0x94, 0xe8, 0x0d, 0x0c, 0xbe,
+	0x06, 0x07, 0x24, 0x21, 0xf2, 0x16, 0x73, 0x6e, 0x5b, 0x6d, 0xa3, 0xdb, 0xec, 0x41, 0xa4, 0x1d,
+	0x45, 0x9c, 0x85, 0x68, 0xa4, 0xac, 0xf6, 0xad, 0xa2, 0x67, 0xc0, 0xb9, 0x73, 0x01, 0x8e, 0xd6,
+	0x77, 0xc1, 0x27, 0xa0, 0xf6, 0x0d, 0xcf, 0x4b, 0xfb, 0x8a, 0x10, 0x9e, 0x80, 0xfd, 0x2c, 0x88,
+	0x53, 0xac, 0x0d, 0xf4, 0x75, 0x72, 0x61, 0xbe, 0x37, 0x3a, 0x2f, 0xc1, 0xb1, 0x96, 0x2b, 0x7c,
+	0x7c, 0x97, 0x62, 0x21, 0xa1, 0x0d, 0xac, 0x29, 0x89, 0x25, 0xe6, 0xc2, 0x36, 0x94, 0xb6, 0x2a,
+	0xed, 0xdc, 0x82, 0xc7, 0xab, 0x5e, 0xc1, 0x68, 0x22, 0x30, 0xbc, 0x02, 0x16, 0xd3, 0x90, 0x6a,
+	0x6e, 0xf6, 0x5e, 0xed, 0x62, 0x51, 0x69, 0x79, 0x35, 0xa2, 0x83, 0xc0, 0xf1, 0x08, 0xf3, 0x0c,
+	0xf3, 0xd5, 0xfc, 0xe7, 0xa0, 0x9e, 0xa6, 0x64, 0xa2, 0x6f, 0xe9, 0x1f, 0xe4, 0xcb, 0x56, 0xfd,
+	0xe6, 0x66, 0x78, 0xe9, 0x2b, 0xb4, 0xf7, 0xdb, 0x00, 0x8f, 0x86, 0xeb, 0xa3, 0x61, 0x06, 0xac,
+	0x52, 0x22, 0x7c, 0xbb, 0x8b, 0x92, 0xea, 0x7a, 0xe7, 0xdd, 0x8e, 0xac, 0x52, 0xe7, 0x27, 0xd0,
+	0xd0, 0xca, 0xe1, 0x69, 0xf5, 0xa5, 0xaa, 0xb7, 0x8f, 0x06, 0xc5, 0xdb, 0x77, 0xb6, 0x94, 0xb3,
+	0x79, 0x7f, 0x7f, 0xba, 0xb8, 0x77, 0xf7, 0x7e, 0xdd, 0xbb, 0x7b, 0x3f, 0x72, 0xd7, 0x58, 0xe4,
+	0xae, 0xf1, 0x33, 0x77, 0x8d, 0x3f, 0xb9, 0x6b, 0x7c, 0xb9, 0xfa, 0xbf, 0x1f, 0xc6, 0x87, 0x0d,
+	0xe0, 0x73, 0x6d, 0xdc, 0x50, 0x7a, 0xdf, 0xfc, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x0c, 0xb3, 0x50,
+	0xdc, 0x89, 0x04, 0x00, 0x00,
 }
 
 // Reference imports to suppress errors if they are not otherwise used.
@@ -254,6 +299,8 @@ type IntrospectionClient interface {
 	// Clients can use this to detect features and capabilities when using
 	// containerd.
 	Plugins(ctx context.Context, in *PluginsRequest, opts ...grpc.CallOption) (*PluginsResponse, error)
+	// Server returns information about the containerd server
+	Server(ctx context.Context, in *types1.Empty, opts ...grpc.CallOption) (*ServerResponse, error)
 }
 
 type introspectionClient struct {
@@ -273,6 +320,15 @@ func (c *introspectionClient) Plugins(ctx context.Context, in *PluginsRequest, o
 	return out, nil
 }
 
+func (c *introspectionClient) Server(ctx context.Context, in *types1.Empty, opts ...grpc.CallOption) (*ServerResponse, error) {
+	out := new(ServerResponse)
+	err := c.cc.Invoke(ctx, "/containerd.services.introspection.v1.Introspection/Server", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
 // IntrospectionServer is the server API for Introspection service.
 type IntrospectionServer interface {
 	// Plugins returns a list of plugins in containerd.
@@ -280,6 +336,8 @@ type IntrospectionServer interface {
 	// Clients can use this to detect features and capabilities when using
 	// containerd.
 	Plugins(context.Context, *PluginsRequest) (*PluginsResponse, error)
+	// Server returns information about the containerd server
+	Server(context.Context, *types1.Empty) (*ServerResponse, error)
 }
 
 func RegisterIntrospectionServer(s *grpc.Server, srv IntrospectionServer) {
@@ -304,6 +362,24 @@ func _Introspection_Plugins_Handler(srv interface{}, ctx context.Context, dec fu
 	return interceptor(ctx, in, info, handler)
 }
 
+func _Introspection_Server_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(types1.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(IntrospectionServer).Server(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.introspection.v1.Introspection/Server",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(IntrospectionServer).Server(ctx, req.(*types1.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
 var _Introspection_serviceDesc = grpc.ServiceDesc{
 	ServiceName: "containerd.services.introspection.v1.Introspection",
 	HandlerType: (*IntrospectionServer)(nil),
@@ -312,6 +388,10 @@ var _Introspection_serviceDesc = grpc.ServiceDesc{
 			MethodName: "Plugins",
 			Handler:    _Introspection_Plugins_Handler,
 		},
+		{
+			MethodName: "Server",
+			Handler:    _Introspection_Server_Handler,
+		},
 	},
 	Streams:  []grpc.StreamDesc{},
 	Metadata: "github.com/containerd/containerd/api/services/introspection/v1/introspection.proto",
@@ -488,6 +568,33 @@ func (m *PluginsResponse) MarshalTo(dAtA []byte) (int, error) {
 	return i, nil
 }
 
+func (m *ServerResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ServerResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.UUID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintIntrospection(dAtA, i, uint64(len(m.UUID)))
+		i += copy(dAtA[i:], m.UUID)
+	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return i, nil
+}
+
 func encodeVarintIntrospection(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -583,6 +690,22 @@ func (m *PluginsResponse) Size() (n int) {
 	return n
 }
 
+func (m *ServerResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.UUID)
+	if l > 0 {
+		n += 1 + l + sovIntrospection(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
 func sovIntrospection(x uint64) (n int) {
 	for {
 		n++
@@ -645,6 +768,17 @@ func (this *PluginsResponse) String() string {
 	}, "")
 	return s
 }
+func (this *ServerResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ServerResponse{`,
+		`UUID:` + fmt.Sprintf("%v", this.UUID) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
+		`}`,
+	}, "")
+	return s
+}
 func valueToStringIntrospection(v interface{}) string {
 	rv := reflect.ValueOf(v)
 	if rv.IsNil() {
@@ -1206,6 +1340,92 @@ func (m *PluginsResponse) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
+func (m *ServerResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowIntrospection
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ServerResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ServerResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UUID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowIntrospection
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthIntrospection
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthIntrospection
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.UUID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipIntrospection(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthIntrospection
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthIntrospection
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
 func skipIntrospection(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0

+ 7 - 0
vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto

@@ -4,6 +4,7 @@ package containerd.services.introspection.v1;
 
 import "github.com/containerd/containerd/api/types/platform.proto";
 import "google/rpc/status.proto";
+import "google/protobuf/empty.proto";
 import weak "gogoproto/gogo.proto";
 
 option go_package = "github.com/containerd/containerd/api/services/introspection/v1;introspection";
@@ -14,6 +15,8 @@ service Introspection {
 	// Clients can use this to detect features and capabilities when using
 	// containerd.
 	rpc Plugins(PluginsRequest) returns (PluginsResponse);
+	// Server returns information about the containerd server
+	rpc Server(google.protobuf.Empty) returns (ServerResponse);
 }
 
 message Plugin {
@@ -79,3 +82,7 @@ message PluginsRequest {
 message PluginsResponse {
 	repeated Plugin plugins = 1 [(gogoproto.nullable) = false];
 }
+
+message ServerResponse {
+	string uuid = 1 [(gogoproto.customname) = "UUID"];
+}

Failā izmaiņas netiks attēlotas, jo tās ir par lielu
+ 1309 - 96
vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go


+ 38 - 0
vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto

@@ -22,6 +22,15 @@ service Leases {
 	// List lists all active leases, returning the full list of
 	// leases and optionally including the referenced resources.
 	rpc List(ListRequest) returns (ListResponse);
+
+	// AddResource references the resource by the provided lease.
+	rpc AddResource(AddResourceRequest) returns (google.protobuf.Empty);
+
+	// DeleteResource dereferences the resource by the provided lease.
+	rpc DeleteResource(DeleteResourceRequest) returns (google.protobuf.Empty);
+
+	// ListResources lists all the resources referenced by the lease.
+	rpc ListResources(ListResourcesRequest) returns (ListResourcesResponse);
 }
 
 // Lease is an object which retains resources while it exists.
@@ -62,3 +71,32 @@ message ListRequest {
 message ListResponse {
 	repeated Lease leases = 1;
 }
+
+message Resource {
+	string id = 1;
+
+	// For snapshotter resource, there are many snapshotter types here, like
+	// overlayfs, devmapper etc. The type will be formatted with type,
+	// like "snapshotter/overlayfs".
+	string type = 2;
+}
+
+message AddResourceRequest {
+	string id = 1;
+
+	Resource resource = 2 [(gogoproto.nullable) = false];
+}
+
+message DeleteResourceRequest {
+	string id = 1;
+
+	Resource resource = 2 [(gogoproto.nullable) = false];
+}
+
+message ListResourcesRequest {
+	string id = 1;
+}
+
+message ListResourcesResponse {
+	repeated Resource resources = 1	[(gogoproto.nullable) = false];
+}

+ 1 - 1
vendor/github.com/containerd/containerd/archive/compression/compression.go

@@ -180,7 +180,7 @@ func DecompressStream(archive io.Reader) (DecompressReadCloser, error) {
 	}
 }
 
-// CompressStream compresseses the dest with specified compression algorithm.
+// CompressStream compresses the dest with specified compression algorithm.
 func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
 	switch compression {
 	case Uncompressed:

+ 150 - 107
vendor/github.com/containerd/containerd/archive/tar.go

@@ -19,9 +19,7 @@ package archive
 import (
 	"archive/tar"
 	"context"
-	"fmt"
 	"io"
-	"io/ioutil"
 	"os"
 	"path/filepath"
 	"runtime"
@@ -91,11 +89,6 @@ const (
 	// archives.
 	whiteoutMetaPrefix = whiteoutPrefix + whiteoutPrefix
 
-	// whiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
-	// layers. Normally these should not go into exported archives and all changed
-	// hardlinks should be copied to the top layer.
-	whiteoutLinkDir = whiteoutMetaPrefix + "plnk"
-
 	// whiteoutOpaqueDir file means directory has been made opaque - meaning
 	// readdir calls to this directory do not follow to lower layers.
 	whiteoutOpaqueDir = whiteoutMetaPrefix + ".opq"
@@ -117,11 +110,15 @@ func Apply(ctx context.Context, root string, r io.Reader, opts ...ApplyOpt) (int
 	if options.Filter == nil {
 		options.Filter = all
 	}
+	if options.applyFunc == nil {
+		options.applyFunc = applyNaive
+	}
 
-	return apply(ctx, root, tar.NewReader(r), options)
+	return options.applyFunc(ctx, root, tar.NewReader(r), options)
 }
 
-// applyNaive applies a tar stream of an OCI style diff tar.
+// applyNaive applies a tar stream of an OCI style diff tar to a directory
+// applying each file as either a whole file or whiteout.
 // See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
 func applyNaive(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) {
 	var (
@@ -131,11 +128,49 @@ func applyNaive(ctx context.Context, root string, tr *tar.Reader, options ApplyO
 		// may occur out of order
 		unpackedPaths = make(map[string]struct{})
 
-		// Used for aufs plink directory
-		aufsTempdir   = ""
-		aufsHardlinks = make(map[string]*tar.Header)
+		convertWhiteout = options.ConvertWhiteout
 	)
 
+	if convertWhiteout == nil {
+		// handle whiteouts by removing the target files
+		convertWhiteout = func(hdr *tar.Header, path string) (bool, error) {
+			base := filepath.Base(path)
+			dir := filepath.Dir(path)
+			if base == whiteoutOpaqueDir {
+				_, err := os.Lstat(dir)
+				if err != nil {
+					return false, err
+				}
+				err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+					if err != nil {
+						if os.IsNotExist(err) {
+							err = nil // parent was deleted
+						}
+						return err
+					}
+					if path == dir {
+						return nil
+					}
+					if _, exists := unpackedPaths[path]; !exists {
+						err := os.RemoveAll(path)
+						return err
+					}
+					return nil
+				})
+				return false, err
+			}
+
+			if strings.HasPrefix(base, whiteoutPrefix) {
+				originalBase := base[len(whiteoutPrefix):]
+				originalPath := filepath.Join(dir, originalBase)
+
+				return false, os.RemoveAll(originalPath)
+			}
+
+			return true, nil
+		}
+	}
+
 	// Iterate through the files in the archive.
 	for {
 		select {
@@ -193,85 +228,21 @@ func applyNaive(ctx context.Context, root string, tr *tar.Reader, options ApplyO
 			if base == "" {
 				parentPath = filepath.Dir(path)
 			}
-			if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
-				err = mkdirAll(parentPath, 0755)
-				if err != nil {
-					return 0, err
-				}
+			if err := mkparent(ctx, parentPath, root, options.Parents); err != nil {
+				return 0, err
 			}
 		}
 
-		// Skip AUFS metadata dirs
-		if strings.HasPrefix(hdr.Name, whiteoutMetaPrefix) {
-			// Regular files inside /.wh..wh.plnk can be used as hardlink targets
-			// We don't want this directory, but we need the files in them so that
-			// such hardlinks can be resolved.
-			if strings.HasPrefix(hdr.Name, whiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
-				basename := filepath.Base(hdr.Name)
-				aufsHardlinks[basename] = hdr
-				if aufsTempdir == "" {
-					if aufsTempdir, err = ioutil.TempDir(os.Getenv("XDG_RUNTIME_DIR"), "dockerplnk"); err != nil {
-						return 0, err
-					}
-					defer os.RemoveAll(aufsTempdir)
-				}
-				p, err := fs.RootPath(aufsTempdir, basename)
-				if err != nil {
-					return 0, err
-				}
-				if err := createTarFile(ctx, p, root, hdr, tr); err != nil {
-					return 0, err
-				}
-			}
-
-			if hdr.Name != whiteoutOpaqueDir {
-				continue
-			}
+		// Naive whiteout convert function which handles whiteout files by
+		// removing the target files.
+		if err := validateWhiteout(path); err != nil {
+			return 0, err
 		}
-
-		if strings.HasPrefix(base, whiteoutPrefix) {
-			dir := filepath.Dir(path)
-			if base == whiteoutOpaqueDir {
-				_, err := os.Lstat(dir)
-				if err != nil {
-					return 0, err
-				}
-				err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
-					if err != nil {
-						if os.IsNotExist(err) {
-							err = nil // parent was deleted
-						}
-						return err
-					}
-					if path == dir {
-						return nil
-					}
-					if _, exists := unpackedPaths[path]; !exists {
-						err := os.RemoveAll(path)
-						return err
-					}
-					return nil
-				})
-				if err != nil {
-					return 0, err
-				}
-				continue
-			}
-
-			originalBase := base[len(whiteoutPrefix):]
-			originalPath := filepath.Join(dir, originalBase)
-
-			// Ensure originalPath is under dir
-			if dir[len(dir)-1] != filepath.Separator {
-				dir += string(filepath.Separator)
-			}
-			if !strings.HasPrefix(originalPath, dir) {
-				return 0, errors.Wrapf(errInvalidArchive, "invalid whiteout name: %v", base)
-			}
-
-			if err := os.RemoveAll(originalPath); err != nil {
-				return 0, err
-			}
+		writeFile, err := convertWhiteout(hdr, path)
+		if err != nil {
+			return 0, errors.Wrapf(err, "failed to convert whiteout file %q", hdr.Name)
+		}
+		if !writeFile {
 			continue
 		}
 		// If path exits we almost always just want to remove and replace it.
@@ -289,26 +260,6 @@ func applyNaive(ctx context.Context, root string, tr *tar.Reader, options ApplyO
 		srcData := io.Reader(tr)
 		srcHdr := hdr
 
-		// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
-		// we manually retarget these into the temporary files we extracted them into
-		if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), whiteoutLinkDir) {
-			linkBasename := filepath.Base(hdr.Linkname)
-			srcHdr = aufsHardlinks[linkBasename]
-			if srcHdr == nil {
-				return 0, fmt.Errorf("invalid aufs hardlink")
-			}
-			p, err := fs.RootPath(aufsTempdir, linkBasename)
-			if err != nil {
-				return 0, err
-			}
-			tmpFile, err := os.Open(p)
-			if err != nil {
-				return 0, err
-			}
-			defer tmpFile.Close()
-			srcData = tmpFile
-		}
-
 		if err := createTarFile(ctx, path, root, srcHdr, srcData); err != nil {
 			return 0, err
 		}
@@ -428,6 +379,66 @@ func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header
 	return chtimes(path, boundTime(latestTime(hdr.AccessTime, hdr.ModTime)), boundTime(hdr.ModTime))
 }
 
+func mkparent(ctx context.Context, path, root string, parents []string) error {
+	if dir, err := os.Lstat(path); err == nil {
+		if dir.IsDir() {
+			return nil
+		}
+		return &os.PathError{
+			Op:   "mkparent",
+			Path: path,
+			Err:  syscall.ENOTDIR,
+		}
+	} else if !os.IsNotExist(err) {
+		return err
+	}
+
+	i := len(path)
+	for i > len(root) && !os.IsPathSeparator(path[i-1]) {
+		i--
+	}
+
+	if i > len(root)+1 {
+		if err := mkparent(ctx, path[:i-1], root, parents); err != nil {
+			return err
+		}
+	}
+
+	if err := mkdir(path, 0755); err != nil {
+		// Check that still doesn't exist
+		dir, err1 := os.Lstat(path)
+		if err1 == nil && dir.IsDir() {
+			return nil
+		}
+		return err
+	}
+
+	for _, p := range parents {
+		ppath, err := fs.RootPath(p, path[len(root):])
+		if err != nil {
+			return err
+		}
+
+		dir, err := os.Lstat(ppath)
+		if err == nil {
+			if !dir.IsDir() {
+				// Replaced, do not copy attributes
+				break
+			}
+			if err := copyDirInfo(dir, path); err != nil {
+				return err
+			}
+			return copyUpXAttrs(path, ppath)
+		} else if !os.IsNotExist(err) {
+			return err
+		}
+	}
+
+	log.G(ctx).Debugf("parent directory %q not found: default permissions(0755) used", path)
+
+	return nil
+}
+
 type changeWriter struct {
 	tw        *tar.Writer
 	source    string
@@ -493,6 +504,12 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e
 
 		hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
 
+		// truncate timestamp for compatibility. without PAX stdlib rounds timestamps instead
+		hdr.Format = tar.FormatPAX
+		hdr.ModTime = hdr.ModTime.Truncate(time.Second)
+		hdr.AccessTime = time.Time{}
+		hdr.ChangeTime = time.Time{}
+
 		name := p
 		if strings.HasPrefix(name, string(filepath.Separator)) {
 			name, err = filepath.Rel(string(filepath.Separator), name)
@@ -598,6 +615,9 @@ func (cw *changeWriter) Close() error {
 }
 
 func (cw *changeWriter) includeParents(hdr *tar.Header) error {
+	if cw.addedDirs == nil {
+		return nil
+	}
 	name := strings.TrimRight(hdr.Name, "/")
 	fname := filepath.Join(cw.source, name)
 	parent := filepath.Dir(name)
@@ -684,3 +704,26 @@ func hardlinkRootPath(root, linkname string) (string, error) {
 	}
 	return targetPath, nil
 }
+
+func validateWhiteout(path string) error {
+	base := filepath.Base(path)
+	dir := filepath.Dir(path)
+
+	if base == whiteoutOpaqueDir {
+		return nil
+	}
+
+	if strings.HasPrefix(base, whiteoutPrefix) {
+		originalBase := base[len(whiteoutPrefix):]
+		originalPath := filepath.Join(dir, originalBase)
+
+		// Ensure originalPath is under dir
+		if dir[len(dir)-1] != filepath.Separator {
+			dir += string(filepath.Separator)
+		}
+		if !strings.HasPrefix(originalPath, dir) {
+			return errors.Wrapf(errInvalidArchive, "invalid whiteout name: %v", base)
+		}
+	}
+	return nil
+}

+ 37 - 1
vendor/github.com/containerd/containerd/archive/tar_opts.go

@@ -16,7 +16,19 @@
 
 package archive
 
-import "archive/tar"
+import (
+	"archive/tar"
+	"context"
+)
+
+// ApplyOptions provides additional options for an Apply operation
+type ApplyOptions struct {
+	Filter          Filter          // Filter tar headers
+	ConvertWhiteout ConvertWhiteout // Convert whiteout files
+	Parents         []string        // Parent directories to handle inherited attributes without CoW
+
+	applyFunc func(context.Context, string, *tar.Reader, ApplyOptions) (int64, error)
+}
 
 // ApplyOpt allows setting mutable archive apply properties on creation
 type ApplyOpt func(options *ApplyOptions) error
@@ -24,6 +36,9 @@ type ApplyOpt func(options *ApplyOptions) error
 // Filter specific files from the archive
 type Filter func(*tar.Header) (bool, error)
 
+// ConvertWhiteout converts whiteout files from the archive
+type ConvertWhiteout func(*tar.Header, string) (bool, error)
+
 // all allows all files
 func all(_ *tar.Header) (bool, error) {
 	return true, nil
@@ -36,3 +51,24 @@ func WithFilter(f Filter) ApplyOpt {
 		return nil
 	}
 }
+
+// WithConvertWhiteout uses the convert function to convert the whiteout files.
+func WithConvertWhiteout(c ConvertWhiteout) ApplyOpt {
+	return func(options *ApplyOptions) error {
+		options.ConvertWhiteout = c
+		return nil
+	}
+}
+
+// WithParents provides parent directories for resolving inherited attributes
+// directory from the filesystem.
+// Inherited attributes are searched from first to last, making the first
+// element in the list the most immediate parent directory.
+// NOTE: When applying to a filesystem which supports CoW, file attributes
+// should be inherited by the filesystem.
+func WithParents(p []string) ApplyOpt {
+	return func(options *ApplyOptions) error {
+		options.Parents = p
+		return nil
+	}
+}

+ 59 - 0
vendor/github.com/containerd/containerd/archive/tar_opts_linux.go

@@ -0,0 +1,59 @@
+// +build linux
+
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package archive
+
+import (
+	"archive/tar"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"golang.org/x/sys/unix"
+)
+
+// AufsConvertWhiteout converts whiteout files for aufs.
+func AufsConvertWhiteout(_ *tar.Header, _ string) (bool, error) {
+	return true, nil
+}
+
+// OverlayConvertWhiteout converts whiteout files for overlay.
+func OverlayConvertWhiteout(hdr *tar.Header, path string) (bool, error) {
+	base := filepath.Base(path)
+	dir := filepath.Dir(path)
+
+	// if a directory is marked as opaque, we need to translate that to overlay
+	if base == whiteoutOpaqueDir {
+		// don't write the file itself
+		return false, unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0)
+	}
+
+	// if a file was deleted and we are using overlay, we need to create a character device
+	if strings.HasPrefix(base, whiteoutPrefix) {
+		originalBase := base[len(whiteoutPrefix):]
+		originalPath := filepath.Join(dir, originalBase)
+
+		if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
+			return false, err
+		}
+		// don't write the file itself
+		return false, os.Chown(originalPath, hdr.Uid, hdr.Gid)
+	}
+
+	return true, nil
+}

+ 1 - 17
vendor/github.com/containerd/containerd/archive/tar_opts_windows.go

@@ -18,28 +18,12 @@
 
 package archive
 
-// ApplyOptions provides additional options for an Apply operation
-type ApplyOptions struct {
-	ParentLayerPaths        []string // Parent layer paths used for Windows layer apply
-	IsWindowsContainerLayer bool     // True if the tar stream to be applied is a Windows Container Layer
-	Filter                  Filter   // Filter tar headers
-}
-
-// WithParentLayers adds parent layers to the apply process this is required
-// for all Windows layers except the base layer.
-func WithParentLayers(parentPaths []string) ApplyOpt {
-	return func(options *ApplyOptions) error {
-		options.ParentLayerPaths = parentPaths
-		return nil
-	}
-}
-
 // AsWindowsContainerLayer indicates that the tar stream to apply is that of
 // a Windows Container Layer. The caller must be holding SeBackupPrivilege and
 // SeRestorePrivilege.
 func AsWindowsContainerLayer() ApplyOpt {
 	return func(options *ApplyOptions) error {
-		options.IsWindowsContainerLayer = true
+		options.applyFunc = applyWindowsLayer
 		return nil
 	}
 }

+ 67 - 10
vendor/github.com/containerd/containerd/archive/tar_unix.go

@@ -20,11 +20,12 @@ package archive
 
 import (
 	"archive/tar"
-	"context"
 	"os"
+	"strings"
 	"sync"
 	"syscall"
 
+	"github.com/containerd/continuity/fs"
 	"github.com/containerd/continuity/sysx"
 	"github.com/opencontainers/runc/libcontainer/system"
 	"github.com/pkg/errors"
@@ -74,10 +75,6 @@ func openFile(name string, flag int, perm os.FileMode) (*os.File, error) {
 	return f, err
 }
 
-func mkdirAll(path string, perm os.FileMode) error {
-	return os.MkdirAll(path, perm)
-}
-
 func mkdir(path string, perm os.FileMode) error {
 	if err := os.Mkdir(path, perm); err != nil {
 		return err
@@ -149,11 +146,71 @@ func getxattr(path, attr string) ([]byte, error) {
 }
 
 func setxattr(path, key, value string) error {
-	return sysx.LSetxattr(path, key, []byte(value), 0)
+	// Do not set trusted attributes
+	if strings.HasPrefix(key, "trusted.") {
+		return errors.Wrap(unix.ENOTSUP, "admin attributes from archive not supported")
+	}
+	return unix.Lsetxattr(path, key, []byte(value), 0)
+}
+
+func copyDirInfo(fi os.FileInfo, path string) error {
+	st := fi.Sys().(*syscall.Stat_t)
+	if err := os.Lchown(path, int(st.Uid), int(st.Gid)); err != nil {
+		if os.IsPermission(err) {
+			// Normally if uid/gid are the same this would be a no-op, but some
+			// filesystems may still return EPERM... for instance NFS does this.
+			// In such a case, this is not an error.
+			if dstStat, err2 := os.Lstat(path); err2 == nil {
+				st2 := dstStat.Sys().(*syscall.Stat_t)
+				if st.Uid == st2.Uid && st.Gid == st2.Gid {
+					err = nil
+				}
+			}
+		}
+		if err != nil {
+			return errors.Wrapf(err, "failed to chown %s", path)
+		}
+	}
+
+	if err := os.Chmod(path, fi.Mode()); err != nil {
+		return errors.Wrapf(err, "failed to chmod %s", path)
+	}
+
+	timespec := []unix.Timespec{unix.Timespec(fs.StatAtime(st)), unix.Timespec(fs.StatMtime(st))}
+	if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil {
+		return errors.Wrapf(err, "failed to utime %s", path)
+	}
+
+	return nil
 }
 
-// apply applies a tar stream of an OCI style diff tar.
-// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
-func apply(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) {
-	return applyNaive(ctx, root, tr, options)
+func copyUpXAttrs(dst, src string) error {
+	xattrKeys, err := sysx.LListxattr(src)
+	if err != nil {
+		if err == unix.ENOTSUP || err == sysx.ENODATA {
+			return nil
+		}
+		return errors.Wrapf(err, "failed to list xattrs on %s", src)
+	}
+	for _, xattr := range xattrKeys {
+		// Do not copy up trusted attributes
+		if strings.HasPrefix(xattr, "trusted.") {
+			continue
+		}
+		data, err := sysx.LGetxattr(src, xattr)
+		if err != nil {
+			if err == unix.ENOTSUP || err == sysx.ENODATA {
+				continue
+			}
+			return errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src)
+		}
+		if err := unix.Lsetxattr(dst, xattr, data, unix.XATTR_CREATE); err != nil {
+			if err == unix.ENOTSUP || err == unix.ENODATA || err == unix.EEXIST {
+				continue
+			}
+			return errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst)
+		}
+	}
+
+	return nil
 }

+ 15 - 16
vendor/github.com/containerd/containerd/archive/tar_windows.go

@@ -23,7 +23,6 @@ import (
 	"bufio"
 	"context"
 	"encoding/base64"
-	"errors"
 	"fmt"
 	"io"
 	"os"
@@ -36,6 +35,7 @@ import (
 	"github.com/Microsoft/go-winio"
 	"github.com/Microsoft/hcsshim"
 	"github.com/containerd/containerd/sys"
+	"github.com/pkg/errors"
 )
 
 const (
@@ -107,10 +107,6 @@ func openFile(name string, flag int, perm os.FileMode) (*os.File, error) {
 	return sys.OpenFileSequential(name, flag, perm)
 }
 
-func mkdirAll(path string, perm os.FileMode) error {
-	return sys.MkdirAll(path, perm)
-}
-
 func mkdir(path string, perm os.FileMode) error {
 	return os.Mkdir(path, perm)
 }
@@ -153,16 +149,8 @@ func setxattr(path, key, value string) error {
 	return errors.New("xattrs not supported on Windows")
 }
 
-// apply applies a tar stream of an OCI style diff tar of a Windows layer.
-// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
-func apply(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) {
-	if options.IsWindowsContainerLayer {
-		return applyWindowsLayer(ctx, root, tr, options)
-	}
-	return applyNaive(ctx, root, tr, options)
-}
-
-// applyWindowsLayer applies a tar stream of an OCI style diff tar of a Windows layer.
+// applyWindowsLayer applies a tar stream of an OCI style diff tar of a Windows
+// layer using the hcsshim layer writer and backup streams.
 // See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
 func applyWindowsLayer(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) {
 	home, id := filepath.Split(root)
@@ -170,7 +158,7 @@ func applyWindowsLayer(ctx context.Context, root string, tr *tar.Reader, options
 		HomeDir: home,
 	}
 
-	w, err := hcsshim.NewLayerWriter(info, id, options.ParentLayerPaths)
+	w, err := hcsshim.NewLayerWriter(info, id, options.Parents)
 	if err != nil {
 		return 0, err
 	}
@@ -443,3 +431,14 @@ func writeBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
 		}
 	}
 }
+
+func copyDirInfo(fi os.FileInfo, path string) error {
+	if err := os.Chmod(path, fi.Mode()); err != nil {
+		return errors.Wrapf(err, "failed to chmod %s", path)
+	}
+	return nil
+}
+
+func copyUpXAttrs(dst, src string) error {
+	return nil
+}

+ 1 - 1
vendor/github.com/containerd/containerd/archive/time_unix.go

@@ -32,7 +32,7 @@ func chtimes(path string, atime, mtime time.Time) error {
 	utimes[1] = unix.NsecToTimespec(mtime.UnixNano())
 
 	if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); err != nil {
-		return errors.Wrap(err, "failed call to UtimesNanoAt")
+		return errors.Wrapf(err, "failed call to UtimesNanoAt for %s", path)
 	}
 
 	return nil

+ 22 - 7
vendor/github.com/containerd/containerd/cio/io.go

@@ -18,10 +18,13 @@ package cio
 
 import (
 	"context"
+	"errors"
 	"fmt"
 	"io"
 	"net/url"
 	"os"
+	"path/filepath"
+	"strings"
 	"sync"
 
 	"github.com/containerd/containerd/defaults"
@@ -242,17 +245,24 @@ func LogURI(uri *url.URL) Creator {
 // BinaryIO forwards container STDOUT|STDERR directly to a logging binary
 func BinaryIO(binary string, args map[string]string) Creator {
 	return func(_ string) (IO, error) {
+		binary = filepath.Clean(binary)
+		if !strings.HasPrefix(binary, "/") {
+			return nil, errors.New("absolute path needed")
+		}
 		uri := &url.URL{
 			Scheme: "binary",
-			Host:   binary,
+			Path:   binary,
 		}
+		q := uri.Query()
 		for k, v := range args {
-			uri.Query().Set(k, v)
+			q.Set(k, v)
 		}
+		uri.RawQuery = q.Encode()
+		res := uri.String()
 		return &logURI{
 			config: Config{
-				Stdout: uri.String(),
-				Stderr: uri.String(),
+				Stdout: res,
+				Stderr: res,
 			},
 		}, nil
 	}
@@ -262,14 +272,19 @@ func BinaryIO(binary string, args map[string]string) Creator {
 // If the log file already exists, the logs will be appended to the file.
 func LogFile(path string) Creator {
 	return func(_ string) (IO, error) {
+		path = filepath.Clean(path)
+		if !strings.HasPrefix(path, "/") {
+			return nil, errors.New("absolute path needed")
+		}
 		uri := &url.URL{
 			Scheme: "file",
-			Host:   path,
+			Path:   path,
 		}
+		res := uri.String()
 		return &logURI{
 			config: Config{
-				Stdout: uri.String(),
-				Stderr: uri.String(),
+				Stdout: res,
+				Stderr: res,
 			},
 		}, nil
 	}

+ 12 - 10
vendor/github.com/containerd/containerd/cio/io_unix.go

@@ -72,17 +72,19 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (*cio, error) {
 	}
 
 	var wg = &sync.WaitGroup{}
-	wg.Add(1)
-	go func() {
-		p := bufPool.Get().(*[]byte)
-		defer bufPool.Put(p)
-
-		io.CopyBuffer(ioset.Stdout, pipes.Stdout, *p)
-		pipes.Stdout.Close()
-		wg.Done()
-	}()
+	if fifos.Stdout != "" {
+		wg.Add(1)
+		go func() {
+			p := bufPool.Get().(*[]byte)
+			defer bufPool.Put(p)
+
+			io.CopyBuffer(ioset.Stdout, pipes.Stdout, *p)
+			pipes.Stdout.Close()
+			wg.Done()
+		}()
+	}
 
-	if !fifos.Terminal {
+	if !fifos.Terminal && fifos.Stderr != "" {
 		wg.Add(1)
 		go func() {
 			p := bufPool.Get().(*[]byte)

+ 124 - 33
vendor/github.com/containerd/containerd/client.go

@@ -43,6 +43,7 @@ import (
 	"github.com/containerd/containerd/content"
 	contentproxy "github.com/containerd/containerd/content/proxy"
 	"github.com/containerd/containerd/defaults"
+	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/events"
 	"github.com/containerd/containerd/images"
 	"github.com/containerd/containerd/leases"
@@ -56,6 +57,7 @@ import (
 	"github.com/containerd/containerd/snapshots"
 	snproxy "github.com/containerd/containerd/snapshots/proxy"
 	"github.com/containerd/typeurl"
+	"github.com/gogo/protobuf/types"
 	ptypes "github.com/gogo/protobuf/types"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
@@ -86,13 +88,23 @@ func New(address string, opts ...ClientOpt) (*Client, error) {
 	if copts.timeout == 0 {
 		copts.timeout = 10 * time.Second
 	}
-	rt := fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS)
+
+	c := &Client{
+		defaultns: copts.defaultns,
+	}
+
 	if copts.defaultRuntime != "" {
-		rt = copts.defaultRuntime
+		c.runtime = copts.defaultRuntime
+	} else {
+		c.runtime = defaults.DefaultRuntime
 	}
-	c := &Client{
-		runtime: rt,
+
+	if copts.defaultPlatform != nil {
+		c.platform = copts.defaultPlatform
+	} else {
+		c.platform = platforms.Default()
 	}
+
 	if copts.services != nil {
 		c.services = *copts.services
 	}
@@ -102,7 +114,7 @@ func New(address string, opts ...ClientOpt) (*Client, error) {
 			grpc.WithInsecure(),
 			grpc.FailOnNonTempDialError(true),
 			grpc.WithBackoffMaxDelay(3 * time.Second),
-			grpc.WithDialer(dialer.Dialer),
+			grpc.WithContextDialer(dialer.ContextDialer),
 
 			// TODO(stevvooe): We may need to allow configuration of this on the client.
 			grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)),
@@ -134,19 +146,15 @@ func New(address string, opts ...ClientOpt) (*Client, error) {
 		c.conn, c.connector = conn, connector
 	}
 	if copts.services == nil && c.conn == nil {
-		return nil, errors.New("no grpc connection or services is available")
+		return nil, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection or services is available")
 	}
 
 	// check namespace labels for default runtime
-	if copts.defaultRuntime == "" && copts.defaultns != "" {
-		namespaces := c.NamespaceService()
-		ctx := context.Background()
-		if labels, err := namespaces.Labels(ctx, copts.defaultns); err == nil {
-			if defaultRuntime, ok := labels[defaults.DefaultRuntimeNSLabel]; ok {
-				c.runtime = defaultRuntime
-			}
-		} else {
+	if copts.defaultRuntime == "" && c.defaultns != "" {
+		if label, err := c.GetLabel(context.Background(), defaults.DefaultRuntimeNSLabel); err != nil {
 			return nil, err
+		} else if label != "" {
+			c.runtime = label
 		}
 	}
 
@@ -163,20 +171,17 @@ func NewWithConn(conn *grpc.ClientConn, opts ...ClientOpt) (*Client, error) {
 		}
 	}
 	c := &Client{
-		conn:    conn,
-		runtime: fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS),
+		defaultns: copts.defaultns,
+		conn:      conn,
+		runtime:   fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS),
 	}
 
 	// check namespace labels for default runtime
-	if copts.defaultRuntime == "" && copts.defaultns != "" {
-		namespaces := c.NamespaceService()
-		ctx := context.Background()
-		if labels, err := namespaces.Labels(ctx, copts.defaultns); err == nil {
-			if defaultRuntime, ok := labels[defaults.DefaultRuntimeNSLabel]; ok {
-				c.runtime = defaultRuntime
-			}
-		} else {
+	if copts.defaultRuntime == "" && c.defaultns != "" {
+		if label, err := c.GetLabel(context.Background(), defaults.DefaultRuntimeNSLabel); err != nil {
 			return nil, err
+		} else if label != "" {
+			c.runtime = label
 		}
 	}
 
@@ -193,13 +198,15 @@ type Client struct {
 	connMu    sync.Mutex
 	conn      *grpc.ClientConn
 	runtime   string
+	defaultns string
+	platform  platforms.MatchComparer
 	connector func() (*grpc.ClientConn, error)
 }
 
 // Reconnect re-establishes the GRPC connection to the containerd daemon
 func (c *Client) Reconnect() error {
 	if c.connector == nil {
-		return errors.New("unable to reconnect to containerd, no connector available")
+		return errors.Wrap(errdefs.ErrUnavailable, "unable to reconnect to containerd, no connector available")
 	}
 	c.connMu.Lock()
 	defer c.connMu.Unlock()
@@ -222,10 +229,10 @@ func (c *Client) IsServing(ctx context.Context) (bool, error) {
 	c.connMu.Lock()
 	if c.conn == nil {
 		c.connMu.Unlock()
-		return false, errors.New("no grpc connection available")
+		return false, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available")
 	}
 	c.connMu.Unlock()
-	r, err := c.HealthService().Check(ctx, &grpc_health_v1.HealthCheckRequest{}, grpc.FailFast(false))
+	r, err := c.HealthService().Check(ctx, &grpc_health_v1.HealthCheckRequest{}, grpc.WaitForReady(true))
 	if err != nil {
 		return false, err
 	}
@@ -294,10 +301,14 @@ type RemoteContext struct {
 	PlatformMatcher platforms.MatchComparer
 
 	// Unpack is done after an image is pulled to extract into a snapshotter.
+	// It is done simultaneously for schema 2 images when they are pulled.
 	// If an image is not unpacked on pull, it can be unpacked any time
 	// afterwards. Unpacking is required to run an image.
 	Unpack bool
 
+	// UnpackOpts handles options to the unpack call.
+	UnpackOpts []UnpackOpt
+
 	// Snapshotter used for unpacking
 	Snapshotter string
 
@@ -329,9 +340,8 @@ type RemoteContext struct {
 	// MaxConcurrentDownloads is the max concurrent content downloads for each pull.
 	MaxConcurrentDownloads int
 
-	// AppendDistributionSourceLabel allows fetcher to add distribute source
-	// label for each blob content, which doesn't work for legacy schema1.
-	AppendDistributionSourceLabel bool
+	// AllMetadata downloads all manifests and known-configuration files
+	AllMetadata bool
 }
 
 func defaultRemoteContext() *RemoteContext {
@@ -339,7 +349,6 @@ func defaultRemoteContext() *RemoteContext {
 		Resolver: docker.NewResolver(docker.ResolverOptions{
 			Client: http.DefaultClient,
 		}),
-		Snapshotter: DefaultSnapshotter,
 	}
 }
 
@@ -354,7 +363,7 @@ func (c *Client) Fetch(ctx context.Context, ref string, opts ...RemoteOpt) (imag
 	}
 
 	if fetchCtx.Unpack {
-		return images.Image{}, errors.New("unpack on fetch not supported, try pull")
+		return images.Image{}, errors.Wrap(errdefs.ErrNotImplemented, "unpack on fetch not supported, try pull")
 	}
 
 	if fetchCtx.PlatformMatcher == nil {
@@ -407,6 +416,11 @@ func (c *Client) Push(ctx context.Context, ref string, desc ocispec.Descriptor,
 		}
 	}
 
+	// Annotate ref with digest to push only push tag for single digest
+	if !strings.Contains(ref, "@") {
+		ref = ref + "@" + desc.Digest.String()
+	}
+
 	pusher, err := pushCtx.Resolver.Pusher(ctx, ref)
 	if err != nil {
 		return err
@@ -490,6 +504,27 @@ func writeIndex(ctx context.Context, index *ocispec.Index, client *Client, ref s
 	return writeContent(ctx, client.ContentStore(), ocispec.MediaTypeImageIndex, ref, bytes.NewReader(data), content.WithLabels(labels))
 }
 
+// GetLabel gets a label value from namespace store
+// If there is no default label, an empty string returned with nil error
+func (c *Client) GetLabel(ctx context.Context, label string) (string, error) {
+	ns, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		if c.defaultns == "" {
+			return "", err
+		}
+		ns = c.defaultns
+	}
+
+	srv := c.NamespaceService()
+	labels, err := srv.Labels(ctx, ns)
+	if err != nil {
+		return "", err
+	}
+
+	value := labels[label]
+	return value, nil
+}
+
 // Subscribe to events that match one or more of the provided filters.
 //
 // Callers should listen on both the envelope and errs channels. If the errs
@@ -543,6 +578,10 @@ func (c *Client) ContentStore() content.Store {
 
 // SnapshotService returns the underlying snapshotter for the provided snapshotter name
 func (c *Client) SnapshotService(snapshotterName string) snapshots.Snapshotter {
+	snapshotterName, err := c.resolveSnapshotterName(context.Background(), snapshotterName)
+	if err != nil {
+		snapshotterName = DefaultSnapshotter
+	}
 	if c.snapshotters != nil {
 		return c.snapshotters[snapshotterName]
 	}
@@ -642,7 +681,7 @@ func (c *Client) Version(ctx context.Context) (Version, error) {
 	c.connMu.Lock()
 	if c.conn == nil {
 		c.connMu.Unlock()
-		return Version{}, errors.New("no grpc connection available")
+		return Version{}, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available")
 	}
 	c.connMu.Unlock()
 	response, err := c.VersionService().Version(ctx, &ptypes.Empty{})
@@ -655,6 +694,58 @@ func (c *Client) Version(ctx context.Context) (Version, error) {
 	}, nil
 }
 
+type ServerInfo struct {
+	UUID string
+}
+
+func (c *Client) Server(ctx context.Context) (ServerInfo, error) {
+	c.connMu.Lock()
+	if c.conn == nil {
+		c.connMu.Unlock()
+		return ServerInfo{}, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available")
+	}
+	c.connMu.Unlock()
+
+	response, err := c.IntrospectionService().Server(ctx, &types.Empty{})
+	if err != nil {
+		return ServerInfo{}, err
+	}
+	return ServerInfo{
+		UUID: response.UUID,
+	}, nil
+}
+
+func (c *Client) resolveSnapshotterName(ctx context.Context, name string) (string, error) {
+	if name == "" {
+		label, err := c.GetLabel(ctx, defaults.DefaultSnapshotterNSLabel)
+		if err != nil {
+			return "", err
+		}
+
+		if label != "" {
+			name = label
+		} else {
+			name = DefaultSnapshotter
+		}
+	}
+
+	return name, nil
+}
+
+func (c *Client) getSnapshotter(ctx context.Context, name string) (snapshots.Snapshotter, error) {
+	name, err := c.resolveSnapshotterName(ctx, name)
+	if err != nil {
+		return nil, err
+	}
+
+	s := c.SnapshotService(name)
+	if s == nil {
+		return nil, errors.Wrapf(errdefs.ErrNotFound, "snapshotter %s was not found", name)
+	}
+
+	return s, nil
+}
+
 // CheckRuntime returns true if the current runtime matches the expected
 // runtime. Providing various parts of the runtime schema will match those
 // parts of the expected runtime

+ 17 - 9
vendor/github.com/containerd/containerd/client_opts.go

@@ -26,11 +26,12 @@ import (
 )
 
 type clientOpts struct {
-	defaultns      string
-	defaultRuntime string
-	services       *services
-	dialOptions    []grpc.DialOption
-	timeout        time.Duration
+	defaultns       string
+	defaultRuntime  string
+	defaultPlatform platforms.MatchComparer
+	services        *services
+	dialOptions     []grpc.DialOption
+	timeout         time.Duration
 }
 
 // ClientOpt allows callers to set options on the containerd client
@@ -55,6 +56,14 @@ func WithDefaultRuntime(rt string) ClientOpt {
 	}
 }
 
+// WithDefaultPlatform sets the default platform matcher on the client
+func WithDefaultPlatform(platform platforms.MatchComparer) ClientOpt {
+	return func(c *clientOpts) error {
+		c.defaultPlatform = platform
+		return nil
+	}
+}
+
 // WithDialOpts allows grpc.DialOptions to be set on the connection
 func WithDialOpts(opts []grpc.DialOption) ClientOpt {
 	return func(c *clientOpts) error {
@@ -195,11 +204,10 @@ func WithMaxConcurrentDownloads(max int) RemoteOpt {
 	}
 }
 
-// WithAppendDistributionSourceLabel allows fetcher to add distribute source
-// label for each blob content, which doesn't work for legacy schema1.
-func WithAppendDistributionSourceLabel() RemoteOpt {
+// WithAllMetadata downloads all manifests and known-configuration files
+func WithAllMetadata() RemoteOpt {
 	return func(_ *Client, c *RemoteContext) error {
-		c.AppendDistributionSourceLabel = true
+		c.AllMetadata = true
 		return nil
 	}
 }

+ 32 - 9
vendor/github.com/containerd/containerd/container.go

@@ -25,6 +25,7 @@ import (
 
 	"github.com/containerd/containerd/api/services/tasks/v1"
 	"github.com/containerd/containerd/api/types"
+	tasktypes "github.com/containerd/containerd/api/types/task"
 	"github.com/containerd/containerd/cio"
 	"github.com/containerd/containerd/containers"
 	"github.com/containerd/containerd/errdefs"
@@ -49,7 +50,7 @@ type Container interface {
 	// ID identifies the container
 	ID() string
 	// Info returns the underlying container record type
-	Info(context.Context) (containers.Container, error)
+	Info(context.Context, ...InfoOpts) (containers.Container, error)
 	// Delete removes the container
 	Delete(context.Context, ...DeleteOpts) error
 	// NewTask creates a new task based on the container metadata
@@ -80,16 +81,18 @@ type Container interface {
 
 func containerFromRecord(client *Client, c containers.Container) *container {
 	return &container{
-		client: client,
-		id:     c.ID,
+		client:   client,
+		id:       c.ID,
+		metadata: c,
 	}
 }
 
 var _ = (Container)(&container{})
 
 type container struct {
-	client *Client
-	id     string
+	client   *Client
+	id       string
+	metadata containers.Container
 }
 
 // ID returns the container's unique id
@@ -97,8 +100,22 @@ func (c *container) ID() string {
 	return c.id
 }
 
-func (c *container) Info(ctx context.Context) (containers.Container, error) {
-	return c.get(ctx)
+func (c *container) Info(ctx context.Context, opts ...InfoOpts) (containers.Container, error) {
+	i := &InfoConfig{
+		// default to refreshing the container's local metadata
+		Refresh: true,
+	}
+	for _, o := range opts {
+		o(i)
+	}
+	if i.Refresh {
+		metadata, err := c.get(ctx)
+		if err != nil {
+			return c.metadata, err
+		}
+		c.metadata = metadata
+	}
+	return c.metadata, nil
 }
 
 func (c *container) Extensions(ctx context.Context) (map[string]prototypes.Any, error) {
@@ -217,7 +234,11 @@ func (c *container) NewTask(ctx context.Context, ioCreate cio.Creator, opts ...N
 		}
 
 		// get the rootfs from the snapshotter and add it to the request
-		mounts, err := c.client.SnapshotService(r.Snapshotter).Mounts(ctx, r.SnapshotKey)
+		s, err := c.client.getSnapshotter(ctx, r.Snapshotter)
+		if err != nil {
+			return nil, err
+		}
+		mounts, err := s.Mounts(ctx, r.SnapshotKey)
 		if err != nil {
 			return nil, err
 		}
@@ -362,7 +383,9 @@ func (c *container) loadTask(ctx context.Context, ioAttach cio.Attach) (Task, er
 		return nil, err
 	}
 	var i cio.IO
-	if ioAttach != nil {
+	if ioAttach != nil && response.Process.Status != tasktypes.StatusUnknown {
+		// Do not attach IO for task in unknown state, because there
+		// are no fifo paths anyway.
 		if i, err = attachExistingIO(response, ioAttach); err != nil {
 			return nil, err
 		}

+ 61 - 27
vendor/github.com/containerd/containerd/container_opts.go

@@ -20,11 +20,8 @@ import (
 	"context"
 
 	"github.com/containerd/containerd/containers"
-	"github.com/containerd/containerd/defaults"
 	"github.com/containerd/containerd/errdefs"
-	"github.com/containerd/containerd/namespaces"
 	"github.com/containerd/containerd/oci"
-	"github.com/containerd/containerd/platforms"
 	"github.com/containerd/containerd/snapshots"
 	"github.com/containerd/typeurl"
 	"github.com/gogo/protobuf/types"
@@ -41,6 +38,15 @@ type NewContainerOpts func(ctx context.Context, client *Client, c *containers.Co
 // UpdateContainerOpts allows the caller to set additional options when updating a container
 type UpdateContainerOpts func(ctx context.Context, client *Client, c *containers.Container) error
 
+// InfoOpts controls how container metadata is fetched and returned
+type InfoOpts func(*InfoConfig)
+
+// InfoConfig specifies how container metadata is fetched
+type InfoConfig struct {
+	// Refresh will to a fetch of the latest container metadata
+	Refresh bool
+}
+
 // WithRuntime allows a user to specify the runtime name and additional options that should
 // be used to create tasks for the container
 func WithRuntime(name string, options interface{}) NewContainerOpts {
@@ -71,6 +77,14 @@ func WithImage(i Image) NewContainerOpts {
 	}
 }
 
+// WithImageName allows setting the image name as the base for the container
+func WithImageName(n string) NewContainerOpts {
+	return func(ctx context.Context, _ *Client, c *containers.Container) error {
+		c.Image = n
+		return nil
+	}
+}
+
 // WithContainerLabels adds the provided labels to the container
 func WithContainerLabels(labels map[string]string) NewContainerOpts {
 	return func(_ context.Context, _ *Client, c *containers.Container) error {
@@ -109,9 +123,17 @@ func WithSnapshotter(name string) NewContainerOpts {
 // WithSnapshot uses an existing root filesystem for the container
 func WithSnapshot(id string) NewContainerOpts {
 	return func(ctx context.Context, client *Client, c *containers.Container) error {
-		setSnapshotterIfEmpty(ctx, client, c)
 		// check that the snapshot exists, if not, fail on creation
-		if _, err := client.SnapshotService(c.Snapshotter).Mounts(ctx, id); err != nil {
+		var err error
+		c.Snapshotter, err = client.resolveSnapshotterName(ctx, c.Snapshotter)
+		if err != nil {
+			return err
+		}
+		s, err := client.getSnapshotter(ctx, c.Snapshotter)
+		if err != nil {
+			return err
+		}
+		if _, err := s.Mounts(ctx, id); err != nil {
 			return err
 		}
 		c.SnapshotKey = id
@@ -123,13 +145,21 @@ func WithSnapshot(id string) NewContainerOpts {
 // root filesystem in read-write mode
 func WithNewSnapshot(id string, i Image, opts ...snapshots.Opt) NewContainerOpts {
 	return func(ctx context.Context, client *Client, c *containers.Container) error {
-		diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default())
+		diffIDs, err := i.RootFS(ctx)
 		if err != nil {
 			return err
 		}
-		setSnapshotterIfEmpty(ctx, client, c)
+
 		parent := identity.ChainID(diffIDs).String()
-		if _, err := client.SnapshotService(c.Snapshotter).Prepare(ctx, id, parent, opts...); err != nil {
+		c.Snapshotter, err = client.resolveSnapshotterName(ctx, c.Snapshotter)
+		if err != nil {
+			return err
+		}
+		s, err := client.getSnapshotter(ctx, c.Snapshotter)
+		if err != nil {
+			return err
+		}
+		if _, err := s.Prepare(ctx, id, parent, opts...); err != nil {
 			return err
 		}
 		c.SnapshotKey = id
@@ -144,7 +174,13 @@ func WithSnapshotCleanup(ctx context.Context, client *Client, c containers.Conta
 		if c.Snapshotter == "" {
 			return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter must be set to cleanup rootfs snapshot")
 		}
-		return client.SnapshotService(c.Snapshotter).Remove(ctx, c.SnapshotKey)
+		s, err := client.getSnapshotter(ctx, c.Snapshotter)
+		if err != nil {
+			return err
+		}
+		if err := s.Remove(ctx, c.SnapshotKey); err != nil && !errdefs.IsNotFound(err) {
+			return err
+		}
 	}
 	return nil
 }
@@ -153,13 +189,21 @@ func WithSnapshotCleanup(ctx context.Context, client *Client, c containers.Conta
 // root filesystem in read-only mode
 func WithNewSnapshotView(id string, i Image, opts ...snapshots.Opt) NewContainerOpts {
 	return func(ctx context.Context, client *Client, c *containers.Container) error {
-		diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default())
+		diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), client.platform)
 		if err != nil {
 			return err
 		}
-		setSnapshotterIfEmpty(ctx, client, c)
+
 		parent := identity.ChainID(diffIDs).String()
-		if _, err := client.SnapshotService(c.Snapshotter).View(ctx, id, parent, opts...); err != nil {
+		c.Snapshotter, err = client.resolveSnapshotterName(ctx, c.Snapshotter)
+		if err != nil {
+			return err
+		}
+		s, err := client.getSnapshotter(ctx, c.Snapshotter)
+		if err != nil {
+			return err
+		}
+		if _, err := s.View(ctx, id, parent, opts...); err != nil {
 			return err
 		}
 		c.SnapshotKey = id
@@ -168,21 +212,6 @@ func WithNewSnapshotView(id string, i Image, opts ...snapshots.Opt) NewContainer
 	}
 }
 
-func setSnapshotterIfEmpty(ctx context.Context, client *Client, c *containers.Container) {
-	if c.Snapshotter == "" {
-		defaultSnapshotter := DefaultSnapshotter
-		namespaceService := client.NamespaceService()
-		if ns, err := namespaces.NamespaceRequired(ctx); err == nil {
-			if labels, err := namespaceService.Labels(ctx, ns); err == nil {
-				if snapshotLabel, ok := labels[defaults.DefaultSnapshotterNSLabel]; ok {
-					defaultSnapshotter = snapshotLabel
-				}
-			}
-		}
-		c.Snapshotter = defaultSnapshotter
-	}
-}
-
 // WithContainerExtension appends extension data to the container object.
 // Use this to decorate the container object with additional data for the client
 // integration.
@@ -235,3 +264,8 @@ func WithSpec(s *oci.Spec, opts ...oci.SpecOpts) NewContainerOpts {
 		return err
 	}
 }
+
+// WithoutRefreshedMetadata will use the current metadata attached to the container object
+func WithoutRefreshedMetadata(i *InfoConfig) {
+	i.Refresh = false
+}

+ 11 - 7
vendor/github.com/containerd/containerd/container_opts_unix.go

@@ -28,7 +28,6 @@ import (
 	"github.com/containerd/containerd/containers"
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/mount"
-	"github.com/containerd/containerd/platforms"
 	"github.com/opencontainers/image-spec/identity"
 )
 
@@ -45,18 +44,23 @@ func WithRemappedSnapshotView(id string, i Image, uid, gid uint32) NewContainerO
 
 func withRemappedSnapshotBase(id string, i Image, uid, gid uint32, readonly bool) NewContainerOpts {
 	return func(ctx context.Context, client *Client, c *containers.Container) error {
-		diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default())
+		diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), client.platform)
 		if err != nil {
 			return err
 		}
 
-		setSnapshotterIfEmpty(ctx, client, c)
-
 		var (
-			snapshotter = client.SnapshotService(c.Snapshotter)
-			parent      = identity.ChainID(diffIDs).String()
-			usernsID    = fmt.Sprintf("%s-%d-%d", parent, uid, gid)
+			parent   = identity.ChainID(diffIDs).String()
+			usernsID = fmt.Sprintf("%s-%d-%d", parent, uid, gid)
 		)
+		c.Snapshotter, err = client.resolveSnapshotterName(ctx, c.Snapshotter)
+		if err != nil {
+			return err
+		}
+		snapshotter, err := client.getSnapshotter(ctx, c.Snapshotter)
+		if err != nil {
+			return err
+		}
 		if _, err := snapshotter.Stat(ctx, usernsID); err == nil {
 			if _, err := snapshotter.Prepare(ctx, id, usernsID); err == nil {
 				c.SnapshotKey = id

+ 1 - 2
vendor/github.com/containerd/containerd/container_restore_opts.go

@@ -22,7 +22,6 @@ import (
 	"github.com/containerd/containerd/containers"
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/images"
-	"github.com/containerd/containerd/platforms"
 	"github.com/gogo/protobuf/proto"
 	ptypes "github.com/gogo/protobuf/types"
 	"github.com/opencontainers/image-spec/identity"
@@ -58,7 +57,7 @@ func WithRestoreImage(ctx context.Context, id string, client *Client, checkpoint
 			return err
 		}
 
-		diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default())
+		diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), client.platform)
 		if err != nil {
 			return err
 		}

+ 1 - 1
vendor/github.com/containerd/containerd/containers/containers.go

@@ -49,7 +49,7 @@ type Container struct {
 	// This property is required and immutable.
 	Runtime RuntimeInfo
 
-	// Spec should carry the the runtime specification used to implement the
+	// Spec should carry the runtime specification used to implement the
 	// container.
 	//
 	// This field is required but mutable.

+ 30 - 1
vendor/github.com/containerd/containerd/content/helpers.go

@@ -55,7 +55,14 @@ func ReadBlob(ctx context.Context, provider Provider, desc ocispec.Descriptor) (
 
 	p := make([]byte, ra.Size())
 
-	_, err = ra.ReadAt(p, 0)
+	n, err := ra.ReadAt(p, 0)
+	if err == io.EOF {
+		if int64(n) != ra.Size() {
+			err = io.ErrUnexpectedEOF
+		} else {
+			err = nil
+		}
+	}
 	return p, err
 }
 
@@ -162,6 +169,28 @@ func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error {
 	return err
 }
 
+// CopyReader copies to a writer from a given reader, returning
+// the number of bytes copied.
+// Note: if the writer has a non-zero offset, the total number
+// of bytes read may be greater than those copied if the reader
+// is not an io.Seeker.
+// This copy does not commit the writer.
+func CopyReader(cw Writer, r io.Reader) (int64, error) {
+	ws, err := cw.Status()
+	if err != nil {
+		return 0, errors.Wrap(err, "failed to get status")
+	}
+
+	if ws.Offset > 0 {
+		r, err = seekReader(r, ws.Offset, 0)
+		if err != nil {
+			return 0, errors.Wrapf(err, "unable to resume write to %v", ws.Ref)
+		}
+	}
+
+	return copyWithBuffer(cw, r)
+}
+
 // seekReader attempts to seek the reader to the given offset, either by
 // resolving `io.Seeker`, by detecting `io.ReaderAt`, or discarding
 // up to the given offset.

+ 14 - 2
vendor/github.com/containerd/containerd/content/local/store.go

@@ -35,7 +35,6 @@ import (
 	"github.com/containerd/containerd/log"
 	"github.com/sirupsen/logrus"
 
-	"github.com/containerd/continuity"
 	digest "github.com/opencontainers/go-digest"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
@@ -661,6 +660,19 @@ func writeTimestampFile(p string, t time.Time) error {
 	if err != nil {
 		return err
 	}
+	return atomicWrite(p, b, 0666)
+}
 
-	return continuity.AtomicWriteFile(p, b, 0666)
+func atomicWrite(path string, data []byte, mode os.FileMode) error {
+	tmp := fmt.Sprintf("%s.tmp", path)
+	f, err := os.OpenFile(tmp, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, mode)
+	if err != nil {
+		return errors.Wrap(err, "create tmp file")
+	}
+	_, err = f.Write(data)
+	f.Close()
+	if err != nil {
+		return errors.Wrap(err, "write atomic data")
+	}
+	return os.Rename(tmp, path)
 }

+ 1 - 1
vendor/github.com/containerd/containerd/contrib/nvidia/nvidia.go

@@ -52,7 +52,7 @@ const (
 	Display Capability = "display"
 )
 
-// AllCaps returns the complete list of supported Nvidia capabilties.
+// AllCaps returns the complete list of supported Nvidia capabilities.
 func AllCaps() []Capability {
 	return []Capability{
 		Compute,

+ 0 - 2
vendor/github.com/containerd/containerd/contrib/seccomp/seccomp.go

@@ -1,5 +1,3 @@
-// +build linux
-
 /*
    Copyright The containerd Authors.
 

+ 5 - 3
vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go

@@ -20,7 +20,8 @@ package seccomp
 
 import (
 	"runtime"
-	"syscall"
+
+	"golang.org/x/sys/unix"
 
 	"github.com/opencontainers/runtime-spec/specs-go"
 )
@@ -311,6 +312,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
 				"sigaltstack",
 				"signalfd",
 				"signalfd4",
+				"sigprocmask",
 				"sigreturn",
 				"socket",
 				"socketcall",
@@ -555,7 +557,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
 				Args: []specs.LinuxSeccompArg{
 					{
 						Index:    1,
-						Value:    syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET,
+						Value:    unix.CLONE_NEWNS | unix.CLONE_NEWUTS | unix.CLONE_NEWIPC | unix.CLONE_NEWUSER | unix.CLONE_NEWPID | unix.CLONE_NEWNET | unix.CLONE_NEWCGROUP,
 						ValueTwo: 0,
 						Op:       specs.OpMaskedEqual,
 					},
@@ -570,7 +572,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
 				Args: []specs.LinuxSeccompArg{
 					{
 						Index:    0,
-						Value:    syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET,
+						Value:    unix.CLONE_NEWNS | unix.CLONE_NEWUTS | unix.CLONE_NEWIPC | unix.CLONE_NEWUSER | unix.CLONE_NEWPID | unix.CLONE_NEWNET | unix.CLONE_NEWCGROUP,
 						ValueTwo: 0,
 						Op:       specs.OpMaskedEqual,
 					},

+ 9 - 2
vendor/github.com/containerd/continuity/proto/gen.go → vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default_unsupported.go

@@ -1,3 +1,5 @@
+// +build !linux
+
 /*
    Copyright The containerd Authors.
 
@@ -14,6 +16,11 @@
    limitations under the License.
 */
 
-package proto
+package seccomp
+
+import specs "github.com/opencontainers/runtime-spec/specs-go"
 
-//go:generate protoc --go_out=. manifest.proto
+// DefaultProfile defines the whitelist for the default seccomp profile.
+func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
+	return &specs.LinuxSeccomp{}
+}

+ 2 - 2
vendor/github.com/containerd/containerd/defaults/defaults.go

@@ -23,10 +23,10 @@ const (
 	// DefaultMaxSendMsgSize defines the default maximum message size for
 	// sending protobufs passed over the GRPC API.
 	DefaultMaxSendMsgSize = 16 << 20
-	// DefaultRuntimeNSLabel defines the namespace label to check for
+	// DefaultRuntimeNSLabel defines the namespace label to check for the
 	// default runtime
 	DefaultRuntimeNSLabel = "containerd.io/defaults/runtime"
-	// DefaultSnapshotterNSLabel defines the namespances label to check for
+	// DefaultSnapshotterNSLabel defines the namespace label to check for the
 	// default snapshotter
 	DefaultSnapshotterNSLabel = "containerd.io/defaults/snapshotter"
 )

+ 2 - 0
vendor/github.com/containerd/containerd/defaults/defaults_unix.go

@@ -32,4 +32,6 @@ const (
 	// DefaultFIFODir is the default location used by client-side cio library
 	// to store FIFOs.
 	DefaultFIFODir = "/run/containerd/fifo"
+	// DefaultRuntime is the default linux runtime
+	DefaultRuntime = "io.containerd.runc.v2"
 )

+ 2 - 0
vendor/github.com/containerd/containerd/defaults/defaults_windows.go

@@ -40,4 +40,6 @@ const (
 	// DefaultFIFODir is the default location used by client-side cio library
 	// to store FIFOs. Unused on Windows.
 	DefaultFIFODir = ""
+	// DefaultRuntime is the default windows runtime
+	DefaultRuntime = "io.containerd.runhcs.v1"
 )

+ 10 - 3
vendor/github.com/containerd/containerd/diff.go

@@ -45,10 +45,17 @@ type diffRemote struct {
 	client diffapi.DiffClient
 }
 
-func (r *diffRemote) Apply(ctx context.Context, diff ocispec.Descriptor, mounts []mount.Mount) (ocispec.Descriptor, error) {
+func (r *diffRemote) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (ocispec.Descriptor, error) {
+	var config diff.ApplyConfig
+	for _, opt := range opts {
+		if err := opt(ctx, desc, &config); err != nil {
+			return ocispec.Descriptor{}, err
+		}
+	}
 	req := &diffapi.ApplyRequest{
-		Diff:   fromDescriptor(diff),
-		Mounts: fromMounts(mounts),
+		Diff:     fromDescriptor(desc),
+		Mounts:   fromMounts(mounts),
+		Payloads: config.ProcessorPayloads,
 	}
 	resp, err := r.client.Apply(ctx, req)
 	if err != nil {

+ 19 - 1
vendor/github.com/containerd/containerd/diff/diff.go

@@ -20,6 +20,7 @@ import (
 	"context"
 
 	"github.com/containerd/containerd/mount"
+	"github.com/gogo/protobuf/types"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 )
 
@@ -51,6 +52,15 @@ type Comparer interface {
 	Compare(ctx context.Context, lower, upper []mount.Mount, opts ...Opt) (ocispec.Descriptor, error)
 }
 
+// ApplyConfig is used to hold parameters needed for a apply operation
+type ApplyConfig struct {
+	// ProcessorPayloads specifies the payload sent to various processors
+	ProcessorPayloads map[string]*types.Any
+}
+
+// ApplyOpt is used to configure an Apply operation
+type ApplyOpt func(context.Context, ocispec.Descriptor, *ApplyConfig) error
+
 // Applier allows applying diffs between mounts
 type Applier interface {
 	// Apply applies the content referred to by the given descriptor to
@@ -58,7 +68,7 @@ type Applier interface {
 	// implementation and content descriptor. For example, in the common
 	// case the descriptor is a file system difference in tar format,
 	// that tar would be applied on top of the mounts.
-	Apply(ctx context.Context, desc ocispec.Descriptor, mount []mount.Mount) (ocispec.Descriptor, error)
+	Apply(ctx context.Context, desc ocispec.Descriptor, mount []mount.Mount, opts ...ApplyOpt) (ocispec.Descriptor, error)
 }
 
 // WithMediaType sets the media type to use for creating the diff, without
@@ -87,3 +97,11 @@ func WithLabels(labels map[string]string) Opt {
 		return nil
 	}
 }
+
+// WithPayloads sets the apply processor payloads to the config
+func WithPayloads(payloads map[string]*types.Any) ApplyOpt {
+	return func(_ context.Context, _ ocispec.Descriptor, c *ApplyConfig) error {
+		c.ProcessorPayloads = payloads
+		return nil
+	}
+}

+ 187 - 0
vendor/github.com/containerd/containerd/diff/stream.go

@@ -0,0 +1,187 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package diff
+
+import (
+	"context"
+	"io"
+	"os"
+
+	"github.com/containerd/containerd/archive/compression"
+	"github.com/containerd/containerd/images"
+	"github.com/gogo/protobuf/types"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+)
+
+var (
+	handlers []Handler
+
+	// ErrNoProcessor is returned when no stream processor is available for a media-type
+	ErrNoProcessor = errors.New("no processor for media-type")
+)
+
+func init() {
+	// register the default compression handler
+	RegisterProcessor(compressedHandler)
+}
+
+// RegisterProcessor registers a stream processor for media-types
+func RegisterProcessor(handler Handler) {
+	handlers = append(handlers, handler)
+}
+
+// GetProcessor returns the processor for a media-type
+func GetProcessor(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) {
+	// reverse this list so that user configured handlers come up first
+	for i := len(handlers) - 1; i >= 0; i-- {
+		processor, ok := handlers[i](ctx, stream.MediaType())
+		if ok {
+			return processor(ctx, stream, payloads)
+		}
+	}
+	return nil, ErrNoProcessor
+}
+
+// Handler checks a media-type and initializes the processor
+type Handler func(ctx context.Context, mediaType string) (StreamProcessorInit, bool)
+
+// StaticHandler returns the processor init func for a static media-type
+func StaticHandler(expectedMediaType string, fn StreamProcessorInit) Handler {
+	return func(ctx context.Context, mediaType string) (StreamProcessorInit, bool) {
+		if mediaType == expectedMediaType {
+			return fn, true
+		}
+		return nil, false
+	}
+}
+
+// StreamProcessorInit returns the initialized stream processor
+type StreamProcessorInit func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error)
+
+// RawProcessor provides access to direct fd for processing
+type RawProcessor interface {
+	// File returns the fd for the read stream of the underlying processor
+	File() *os.File
+}
+
+// StreamProcessor handles processing a content stream and transforming it into a different media-type
+type StreamProcessor interface {
+	io.ReadCloser
+
+	// MediaType is the resulting media-type that the processor processes the stream into
+	MediaType() string
+}
+
+func compressedHandler(ctx context.Context, mediaType string) (StreamProcessorInit, bool) {
+	compressed, err := images.DiffCompression(ctx, mediaType)
+	if err != nil {
+		return nil, false
+	}
+	if compressed != "" {
+		return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) {
+			ds, err := compression.DecompressStream(stream)
+			if err != nil {
+				return nil, err
+			}
+
+			return &compressedProcessor{
+				rc: ds,
+			}, nil
+		}, true
+	}
+	return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) {
+		return &stdProcessor{
+			rc: stream,
+		}, nil
+	}, true
+}
+
+// NewProcessorChain initialized the root StreamProcessor
+func NewProcessorChain(mt string, r io.Reader) StreamProcessor {
+	return &processorChain{
+		mt: mt,
+		rc: r,
+	}
+}
+
+type processorChain struct {
+	mt string
+	rc io.Reader
+}
+
+func (c *processorChain) MediaType() string {
+	return c.mt
+}
+
+func (c *processorChain) Read(p []byte) (int, error) {
+	return c.rc.Read(p)
+}
+
+func (c *processorChain) Close() error {
+	return nil
+}
+
+type stdProcessor struct {
+	rc StreamProcessor
+}
+
+func (c *stdProcessor) MediaType() string {
+	return ocispec.MediaTypeImageLayer
+}
+
+func (c *stdProcessor) Read(p []byte) (int, error) {
+	return c.rc.Read(p)
+}
+
+func (c *stdProcessor) Close() error {
+	return nil
+}
+
+type compressedProcessor struct {
+	rc io.ReadCloser
+}
+
+func (c *compressedProcessor) MediaType() string {
+	return ocispec.MediaTypeImageLayer
+}
+
+func (c *compressedProcessor) Read(p []byte) (int, error) {
+	return c.rc.Read(p)
+}
+
+func (c *compressedProcessor) Close() error {
+	return c.rc.Close()
+}
+
+func BinaryHandler(id, returnsMediaType string, mediaTypes []string, path string, args []string) Handler {
+	set := make(map[string]struct{}, len(mediaTypes))
+	for _, m := range mediaTypes {
+		set[m] = struct{}{}
+	}
+	return func(_ context.Context, mediaType string) (StreamProcessorInit, bool) {
+		if _, ok := set[mediaType]; ok {
+			return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) {
+				payload := payloads[id]
+				return NewBinaryProcessor(ctx, mediaType, returnsMediaType, stream, path, args, payload)
+			}, true
+		}
+		return nil, false
+	}
+}
+
+const mediaTypeEnvVar = "STREAM_PROCESSOR_MEDIATYPE"

+ 146 - 0
vendor/github.com/containerd/containerd/diff/stream_unix.go

@@ -0,0 +1,146 @@
+// +build !windows
+
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package diff
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"io"
+	"os"
+	"os/exec"
+	"sync"
+
+	"github.com/gogo/protobuf/proto"
+	"github.com/gogo/protobuf/types"
+	"github.com/pkg/errors"
+)
+
+// NewBinaryProcessor returns a binary processor for use with processing content streams
+func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args []string, payload *types.Any) (StreamProcessor, error) {
+	cmd := exec.CommandContext(ctx, name, args...)
+	cmd.Env = os.Environ()
+
+	var payloadC io.Closer
+	if payload != nil {
+		data, err := proto.Marshal(payload)
+		if err != nil {
+			return nil, err
+		}
+		r, w, err := os.Pipe()
+		if err != nil {
+			return nil, err
+		}
+		go func() {
+			io.Copy(w, bytes.NewReader(data))
+			w.Close()
+		}()
+
+		cmd.ExtraFiles = append(cmd.ExtraFiles, r)
+		payloadC = r
+	}
+	cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", mediaTypeEnvVar, imt))
+	var (
+		stdin  io.Reader
+		closer func() error
+		err    error
+	)
+	if f, ok := stream.(RawProcessor); ok {
+		stdin = f.File()
+		closer = f.File().Close
+	} else {
+		stdin = stream
+	}
+	cmd.Stdin = stdin
+	r, w, err := os.Pipe()
+	if err != nil {
+		return nil, err
+	}
+	cmd.Stdout = w
+
+	stderr := bytes.NewBuffer(nil)
+	cmd.Stderr = stderr
+
+	if err := cmd.Start(); err != nil {
+		return nil, err
+	}
+	p := &binaryProcessor{
+		cmd:    cmd,
+		r:      r,
+		mt:     rmt,
+		stderr: stderr,
+	}
+	go p.wait()
+
+	// close after start and dup
+	w.Close()
+	if closer != nil {
+		closer()
+	}
+	if payloadC != nil {
+		payloadC.Close()
+	}
+	return p, nil
+}
+
+type binaryProcessor struct {
+	cmd    *exec.Cmd
+	r      *os.File
+	mt     string
+	stderr *bytes.Buffer
+
+	mu  sync.Mutex
+	err error
+}
+
+func (c *binaryProcessor) Err() error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	return c.err
+}
+
+func (c *binaryProcessor) wait() {
+	if err := c.cmd.Wait(); err != nil {
+		if _, ok := err.(*exec.ExitError); ok {
+			c.mu.Lock()
+			c.err = errors.New(c.stderr.String())
+			c.mu.Unlock()
+		}
+	}
+}
+
+func (c *binaryProcessor) File() *os.File {
+	return c.r
+}
+
+func (c *binaryProcessor) MediaType() string {
+	return c.mt
+}
+
+func (c *binaryProcessor) Read(p []byte) (int, error) {
+	return c.r.Read(p)
+}
+
+func (c *binaryProcessor) Close() error {
+	err := c.r.Close()
+	if kerr := c.cmd.Process.Kill(); err == nil {
+		err = kerr
+	}
+	return err
+}

+ 165 - 0
vendor/github.com/containerd/containerd/diff/stream_windows.go

@@ -0,0 +1,165 @@
+// +build windows
+
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package diff
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"sync"
+
+	winio "github.com/Microsoft/go-winio"
+	"github.com/gogo/protobuf/proto"
+	"github.com/gogo/protobuf/types"
+	"github.com/pkg/errors"
+	"github.com/sirupsen/logrus"
+)
+
+const processorPipe = "STREAM_PROCESSOR_PIPE"
+
+// NewBinaryProcessor returns a binary processor for use with processing content streams
+func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args []string, payload *types.Any) (StreamProcessor, error) {
+	cmd := exec.CommandContext(ctx, name, args...)
+	cmd.Env = os.Environ()
+
+	if payload != nil {
+		data, err := proto.Marshal(payload)
+		if err != nil {
+			return nil, err
+		}
+		up, err := getUiqPath()
+		if err != nil {
+			return nil, err
+		}
+		path := fmt.Sprintf("\\\\.\\pipe\\containerd-processor-%s-pipe", up)
+		l, err := winio.ListenPipe(path, nil)
+		if err != nil {
+			return nil, err
+		}
+		go func() {
+			defer l.Close()
+			conn, err := l.Accept()
+			if err != nil {
+				logrus.WithError(err).Error("accept npipe connection")
+				return
+			}
+			io.Copy(conn, bytes.NewReader(data))
+			conn.Close()
+		}()
+		cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", processorPipe, path))
+	}
+	cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", mediaTypeEnvVar, imt))
+	var (
+		stdin  io.Reader
+		closer func() error
+		err    error
+	)
+	if f, ok := stream.(RawProcessor); ok {
+		stdin = f.File()
+		closer = f.File().Close
+	} else {
+		stdin = stream
+	}
+	cmd.Stdin = stdin
+	r, w, err := os.Pipe()
+	if err != nil {
+		return nil, err
+	}
+	cmd.Stdout = w
+	stderr := bytes.NewBuffer(nil)
+	cmd.Stderr = stderr
+
+	if err := cmd.Start(); err != nil {
+		return nil, err
+	}
+	p := &binaryProcessor{
+		cmd:    cmd,
+		r:      r,
+		mt:     rmt,
+		stderr: stderr,
+	}
+	go p.wait()
+
+	// close after start and dup
+	w.Close()
+	if closer != nil {
+		closer()
+	}
+	return p, nil
+}
+
+type binaryProcessor struct {
+	cmd    *exec.Cmd
+	r      *os.File
+	mt     string
+	stderr *bytes.Buffer
+
+	mu  sync.Mutex
+	err error
+}
+
+func (c *binaryProcessor) Err() error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	return c.err
+}
+
+func (c *binaryProcessor) wait() {
+	if err := c.cmd.Wait(); err != nil {
+		if _, ok := err.(*exec.ExitError); ok {
+			c.mu.Lock()
+			c.err = errors.New(c.stderr.String())
+			c.mu.Unlock()
+		}
+	}
+}
+
+func (c *binaryProcessor) File() *os.File {
+	return c.r
+}
+
+func (c *binaryProcessor) MediaType() string {
+	return c.mt
+}
+
+func (c *binaryProcessor) Read(p []byte) (int, error) {
+	return c.r.Read(p)
+}
+
+func (c *binaryProcessor) Close() error {
+	err := c.r.Close()
+	if kerr := c.cmd.Process.Kill(); err == nil {
+		err = kerr
+	}
+	return err
+}
+
+func getUiqPath() (string, error) {
+	dir, err := ioutil.TempDir("", "")
+	if err != nil {
+		return "", err
+	}
+	os.Remove(dir)
+	return filepath.Base(dir), nil
+}

+ 16 - 1
vendor/github.com/containerd/containerd/errdefs/errors.go

@@ -26,7 +26,11 @@
 // client-side errors to the correct types.
 package errdefs
 
-import "github.com/pkg/errors"
+import (
+	"context"
+
+	"github.com/pkg/errors"
+)
 
 // Definitions of common error types used throughout containerd. All containerd
 // errors returned by most packages will map into one of these errors classes.
@@ -76,3 +80,14 @@ func IsUnavailable(err error) bool {
 func IsNotImplemented(err error) bool {
 	return errors.Cause(err) == ErrNotImplemented
 }
+
+// IsCanceled returns true if the error is due to `context.Canceled`.
+func IsCanceled(err error) bool {
+	return errors.Cause(err) == context.Canceled
+}
+
+// IsDeadlineExceeded returns true if the error is due to
+// `context.DeadlineExceeded`.
+func IsDeadlineExceeded(err error) bool {
+	return errors.Cause(err) == context.DeadlineExceeded
+}

+ 9 - 0
vendor/github.com/containerd/containerd/errdefs/grpc.go

@@ -17,6 +17,7 @@
 package errdefs
 
 import (
+	"context"
 	"strings"
 
 	"github.com/pkg/errors"
@@ -55,6 +56,10 @@ func ToGRPC(err error) error {
 		return status.Errorf(codes.Unavailable, err.Error())
 	case IsNotImplemented(err):
 		return status.Errorf(codes.Unimplemented, err.Error())
+	case IsCanceled(err):
+		return status.Errorf(codes.Canceled, err.Error())
+	case IsDeadlineExceeded(err):
+		return status.Errorf(codes.DeadlineExceeded, err.Error())
 	}
 
 	return err
@@ -89,6 +94,10 @@ func FromGRPC(err error) error {
 		cls = ErrFailedPrecondition
 	case codes.Unimplemented:
 		cls = ErrNotImplemented
+	case codes.Canceled:
+		cls = context.Canceled
+	case codes.DeadlineExceeded:
+		cls = context.DeadlineExceeded
 	default:
 		cls = ErrUnknown
 	}

+ 1 - 1
vendor/github.com/containerd/containerd/events/exchange/exchange.go

@@ -50,7 +50,7 @@ var _ events.Publisher = &Exchange{}
 var _ events.Forwarder = &Exchange{}
 var _ events.Subscriber = &Exchange{}
 
-// Forward accepts an envelope to be direcly distributed on the exchange.
+// Forward accepts an envelope to be directly distributed on the exchange.
 //
 // This is useful when an event is forwarded on behalf of another namespace or
 // when the event is propagated on behalf of another publisher.

+ 6 - 20
vendor/github.com/containerd/containerd/export.go

@@ -20,26 +20,12 @@ import (
 	"context"
 	"io"
 
-	"github.com/containerd/containerd/images/oci"
-
-	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
-	"github.com/pkg/errors"
+	"github.com/containerd/containerd/images/archive"
 )
 
-// Export exports an image to a Tar stream.
-// OCI format is used by default.
-// It is up to caller to put "org.opencontainers.image.ref.name" annotation to desc.
-// TODO(AkihiroSuda): support exporting multiple descriptors at once to a single archive stream.
-func (c *Client) Export(ctx context.Context, desc ocispec.Descriptor, opts ...oci.V1ExporterOpt) (io.ReadCloser, error) {
-
-	exporter, err := oci.ResolveV1ExportOpt(opts...)
-	if err != nil {
-		return nil, err
-	}
-
-	pr, pw := io.Pipe()
-	go func() {
-		pw.CloseWithError(errors.Wrap(exporter.Export(ctx, c.ContentStore(), desc, pw), "export failed"))
-	}()
-	return pr, nil
+// Export exports images to a Tar stream.
+// The tar archive is in OCI format with a Docker compatible manifest
+// when a single target platform is given.
+func (c *Client) Export(ctx context.Context, w io.Writer, opts ...archive.ExportOpt) error {
+	return archive.Export(ctx, c.ContentStore(), w, opts...)
 }

+ 7 - 0
vendor/github.com/containerd/containerd/gc/gc.go

@@ -30,6 +30,11 @@ import (
 // ResourceType represents type of resource at a node
 type ResourceType uint8
 
+// ResourceMax represents the max resource.
+// Upper bits are stripped out during the mark phase, allowing the upper 3 bits
+// to be used by the caller reference function.
+const ResourceMax = ResourceType(0x1F)
+
 // Node presents a resource which has a type and key,
 // this node can be used to lookup other nodes.
 type Node struct {
@@ -80,6 +85,8 @@ func Tricolor(roots []Node, refs func(ref Node) ([]Node, error)) (map[Node]struc
 			}
 		}
 
+		// strip bits above max resource type
+		id.Type = id.Type & ResourceMax
 		// mark as black when done
 		reachable[id] = struct{}{}
 	}

+ 174 - 9
vendor/github.com/containerd/containerd/image.go

@@ -19,16 +19,21 @@ package containerd
 import (
 	"context"
 	"fmt"
+	"strings"
+	"sync/atomic"
 
 	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/diff"
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/images"
 	"github.com/containerd/containerd/platforms"
 	"github.com/containerd/containerd/rootfs"
-	digest "github.com/opencontainers/go-digest"
+	"github.com/containerd/containerd/snapshots"
+	"github.com/opencontainers/go-digest"
 	"github.com/opencontainers/image-spec/identity"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
+	"golang.org/x/sync/semaphore"
 )
 
 // Image describes an image used by containers
@@ -40,11 +45,13 @@ type Image interface {
 	// Labels of the image
 	Labels() map[string]string
 	// Unpack unpacks the image's content into a snapshot
-	Unpack(context.Context, string) error
+	Unpack(context.Context, string, ...UnpackOpt) error
 	// RootFS returns the unpacked diffids that make up images rootfs.
 	RootFS(ctx context.Context) ([]digest.Digest, error)
 	// Size returns the total size of the image's packed resources.
 	Size(ctx context.Context) (int64, error)
+	// Usage returns a usage calculation for the image.
+	Usage(context.Context, ...UsageOpt) (int64, error)
 	// Config descriptor for the image.
 	Config(ctx context.Context) (ocispec.Descriptor, error)
 	// IsUnpacked returns whether or not an image is unpacked.
@@ -53,6 +60,49 @@ type Image interface {
 	ContentStore() content.Store
 }
 
+type usageOptions struct {
+	manifestLimit *int
+	manifestOnly  bool
+	snapshots     bool
+}
+
+// UsageOpt is used to configure the usage calculation
+type UsageOpt func(*usageOptions) error
+
+// WithUsageManifestLimit sets the limit to the number of manifests which will
+// be walked for usage. Setting this value to 0 will require all manifests to
+// be walked, returning ErrNotFound if manifests are missing.
+// NOTE: By default all manifests which exist will be walked
+// and any non-existent manifests and their subobjects will be ignored.
+func WithUsageManifestLimit(i int) UsageOpt {
+	// If 0 then don't filter any manifests
+	// By default limits to current platform
+	return func(o *usageOptions) error {
+		o.manifestLimit = &i
+		return nil
+	}
+}
+
+// WithSnapshotUsage will check for referenced snapshots from the image objects
+// and include the snapshot size in the total usage.
+func WithSnapshotUsage() UsageOpt {
+	return func(o *usageOptions) error {
+		o.snapshots = true
+		return nil
+	}
+}
+
+// WithManifestUsage is used to get the usage for an image based on what is
+// reported by the manifests rather than what exists in the content store.
+// NOTE: This function is best used with the manifest limit set to get a
+// consistent value, otherwise non-existent manifests will be excluded.
+func WithManifestUsage() UsageOpt {
+	return func(o *usageOptions) error {
+		o.manifestOnly = true
+		return nil
+	}
+}
+
 var _ = (Image)(&image{})
 
 // NewImage returns a client image object from the metadata image
@@ -60,7 +110,7 @@ func NewImage(client *Client, i images.Image) Image {
 	return &image{
 		client:   client,
 		i:        i,
-		platform: platforms.Default(),
+		platform: client.platform,
 	}
 }
 
@@ -98,8 +148,95 @@ func (i *image) RootFS(ctx context.Context) ([]digest.Digest, error) {
 }
 
 func (i *image) Size(ctx context.Context) (int64, error) {
-	provider := i.client.ContentStore()
-	return i.i.Size(ctx, provider, i.platform)
+	return i.Usage(ctx, WithUsageManifestLimit(1), WithManifestUsage())
+}
+
+func (i *image) Usage(ctx context.Context, opts ...UsageOpt) (int64, error) {
+	var config usageOptions
+	for _, opt := range opts {
+		if err := opt(&config); err != nil {
+			return 0, err
+		}
+	}
+
+	var (
+		provider  = i.client.ContentStore()
+		handler   = images.ChildrenHandler(provider)
+		size      int64
+		mustExist bool
+	)
+
+	if config.manifestLimit != nil {
+		handler = images.LimitManifests(handler, i.platform, *config.manifestLimit)
+		mustExist = true
+	}
+
+	var wh images.HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+		var usage int64
+		children, err := handler(ctx, desc)
+		if err != nil {
+			if !errdefs.IsNotFound(err) || mustExist {
+				return nil, err
+			}
+			if !config.manifestOnly {
+				// Do not count size of non-existent objects
+				desc.Size = 0
+			}
+		} else if config.snapshots || !config.manifestOnly {
+			info, err := provider.Info(ctx, desc.Digest)
+			if err != nil {
+				if !errdefs.IsNotFound(err) {
+					return nil, err
+				}
+				if !config.manifestOnly {
+					// Do not count size of non-existent objects
+					desc.Size = 0
+				}
+			} else if info.Size > desc.Size {
+				// Count actual usage, Size may be unset or -1
+				desc.Size = info.Size
+			}
+
+			for k, v := range info.Labels {
+				const prefix = "containerd.io/gc.ref.snapshot."
+				if !strings.HasPrefix(k, prefix) {
+					continue
+				}
+
+				sn := i.client.SnapshotService(k[len(prefix):])
+				if sn == nil {
+					continue
+				}
+
+				u, err := sn.Usage(ctx, v)
+				if err != nil {
+					if !errdefs.IsNotFound(err) && !errdefs.IsInvalidArgument(err) {
+						return nil, err
+					}
+				} else {
+					usage += u.Size
+				}
+			}
+		}
+
+		// Ignore unknown sizes. Generally unknown sizes should
+		// never be set in manifests, however, the usage
+		// calculation does not need to enforce this.
+		if desc.Size >= 0 {
+			usage += desc.Size
+		}
+
+		atomic.AddInt64(&size, usage)
+
+		return children, nil
+	}
+
+	l := semaphore.NewWeighted(3)
+	if err := images.Dispatch(ctx, wh, l, i.i.Target); err != nil {
+		return 0, err
+	}
+
+	return size, nil
 }
 
 func (i *image) Config(ctx context.Context) (ocispec.Descriptor, error) {
@@ -108,7 +245,10 @@ func (i *image) Config(ctx context.Context) (ocispec.Descriptor, error) {
 }
 
 func (i *image) IsUnpacked(ctx context.Context, snapshotterName string) (bool, error) {
-	sn := i.client.SnapshotService(snapshotterName)
+	sn, err := i.client.getSnapshotter(ctx, snapshotterName)
+	if err != nil {
+		return false, err
+	}
 	cs := i.client.ContentStore()
 
 	diffs, err := i.i.RootFS(ctx, cs, i.platform)
@@ -127,28 +267,53 @@ func (i *image) IsUnpacked(ctx context.Context, snapshotterName string) (bool, e
 	return false, nil
 }
 
-func (i *image) Unpack(ctx context.Context, snapshotterName string) error {
+// UnpackConfig provides configuration for the unpack of an image
+type UnpackConfig struct {
+	// ApplyOpts for applying a diff to a snapshotter
+	ApplyOpts []diff.ApplyOpt
+	// SnapshotOpts for configuring a snapshotter
+	SnapshotOpts []snapshots.Opt
+}
+
+// UnpackOpt provides configuration for unpack
+type UnpackOpt func(context.Context, *UnpackConfig) error
+
+func (i *image) Unpack(ctx context.Context, snapshotterName string, opts ...UnpackOpt) error {
 	ctx, done, err := i.client.WithLease(ctx)
 	if err != nil {
 		return err
 	}
 	defer done(ctx)
 
+	var config UnpackConfig
+	for _, o := range opts {
+		if err := o(ctx, &config); err != nil {
+			return err
+		}
+	}
+
 	layers, err := i.getLayers(ctx, i.platform)
 	if err != nil {
 		return err
 	}
 
 	var (
-		sn = i.client.SnapshotService(snapshotterName)
 		a  = i.client.DiffService()
 		cs = i.client.ContentStore()
 
 		chain    []digest.Digest
 		unpacked bool
 	)
+	snapshotterName, err = i.client.resolveSnapshotterName(ctx, snapshotterName)
+	if err != nil {
+		return err
+	}
+	sn, err := i.client.getSnapshotter(ctx, snapshotterName)
+	if err != nil {
+		return err
+	}
 	for _, layer := range layers {
-		unpacked, err = rootfs.ApplyLayer(ctx, layer, chain, sn, a)
+		unpacked, err = rootfs.ApplyLayerWithOpts(ctx, layer, chain, sn, a, config.SnapshotOpts, config.ApplyOpts)
 		if err != nil {
 			return err
 		}

+ 6 - 7
vendor/github.com/containerd/containerd/archive/tar_opts_unix.go → vendor/github.com/containerd/containerd/images/annotations.go

@@ -1,5 +1,3 @@
-// +build !windows
-
 /*
    Copyright The containerd Authors.
 
@@ -16,9 +14,10 @@
    limitations under the License.
 */
 
-package archive
+package images
 
-// ApplyOptions provides additional options for an Apply operation
-type ApplyOptions struct {
-	Filter Filter // Filter tar headers
-}
+const (
+	// AnnotationImageName is an annotation on a Descriptor in an index.json
+	// containing the `Name` value as used by an `Image` struct
+	AnnotationImageName = "io.containerd.image.name"
+)

+ 468 - 0
vendor/github.com/containerd/containerd/images/archive/exporter.go

@@ -0,0 +1,468 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package archive
+
+import (
+	"archive/tar"
+	"context"
+	"encoding/json"
+	"io"
+	"path"
+	"sort"
+
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/platforms"
+	digest "github.com/opencontainers/go-digest"
+	ocispecs "github.com/opencontainers/image-spec/specs-go"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+)
+
+type exportOptions struct {
+	manifests          []ocispec.Descriptor
+	platform           platforms.MatchComparer
+	allPlatforms       bool
+	skipDockerManifest bool
+}
+
+// ExportOpt defines options for configuring exported descriptors
+type ExportOpt func(context.Context, *exportOptions) error
+
+// WithPlatform defines the platform to require manifest lists have
+// not exporting all platforms.
+// Additionally, platform is used to resolve image configs for
+// Docker v1.1, v1.2 format compatibility.
+func WithPlatform(p platforms.MatchComparer) ExportOpt {
+	return func(ctx context.Context, o *exportOptions) error {
+		o.platform = p
+		return nil
+	}
+}
+
+// WithAllPlatforms exports all manifests from a manifest list.
+// Missing content will fail the export.
+func WithAllPlatforms() ExportOpt {
+	return func(ctx context.Context, o *exportOptions) error {
+		o.allPlatforms = true
+		return nil
+	}
+}
+
+// WithSkipDockerManifest skips creation of the Docker compatible
+// manifest.json file.
+func WithSkipDockerManifest() ExportOpt {
+	return func(ctx context.Context, o *exportOptions) error {
+		o.skipDockerManifest = true
+		return nil
+	}
+}
+
+// WithImage adds the provided images to the exported archive.
+func WithImage(is images.Store, name string) ExportOpt {
+	return func(ctx context.Context, o *exportOptions) error {
+		img, err := is.Get(ctx, name)
+		if err != nil {
+			return err
+		}
+
+		img.Target.Annotations = addNameAnnotation(name, img.Target.Annotations)
+		o.manifests = append(o.manifests, img.Target)
+
+		return nil
+	}
+}
+
+// WithManifest adds a manifest to the exported archive.
+// When names are given they will be set on the manifest in the
+// exported archive, creating an index record for each name.
+// When no names are provided, it is up to caller to put name annotation to
+// on the manifest descriptor if needed.
+func WithManifest(manifest ocispec.Descriptor, names ...string) ExportOpt {
+	return func(ctx context.Context, o *exportOptions) error {
+		if len(names) == 0 {
+			o.manifests = append(o.manifests, manifest)
+		}
+		for _, name := range names {
+			mc := manifest
+			mc.Annotations = addNameAnnotation(name, manifest.Annotations)
+			o.manifests = append(o.manifests, mc)
+		}
+
+		return nil
+	}
+}
+
+func addNameAnnotation(name string, base map[string]string) map[string]string {
+	annotations := map[string]string{}
+	for k, v := range base {
+		annotations[k] = v
+	}
+
+	annotations[images.AnnotationImageName] = name
+	annotations[ocispec.AnnotationRefName] = ociReferenceName(name)
+
+	return annotations
+}
+
+// Export implements Exporter.
+func Export(ctx context.Context, store content.Provider, writer io.Writer, opts ...ExportOpt) error {
+	var eo exportOptions
+	for _, opt := range opts {
+		if err := opt(ctx, &eo); err != nil {
+			return err
+		}
+	}
+
+	records := []tarRecord{
+		ociLayoutFile(""),
+		ociIndexRecord(eo.manifests),
+	}
+
+	algorithms := map[string]struct{}{}
+	dManifests := map[digest.Digest]*exportManifest{}
+	resolvedIndex := map[digest.Digest]digest.Digest{}
+	for _, desc := range eo.manifests {
+		switch desc.MediaType {
+		case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
+			mt, ok := dManifests[desc.Digest]
+			if !ok {
+				// TODO(containerd): Skip if already added
+				r, err := getRecords(ctx, store, desc, algorithms)
+				if err != nil {
+					return err
+				}
+				records = append(records, r...)
+
+				mt = &exportManifest{
+					manifest: desc,
+				}
+				dManifests[desc.Digest] = mt
+			}
+
+			name := desc.Annotations[images.AnnotationImageName]
+			if name != "" && !eo.skipDockerManifest {
+				mt.names = append(mt.names, name)
+			}
+		case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
+			d, ok := resolvedIndex[desc.Digest]
+			if !ok {
+				records = append(records, blobRecord(store, desc))
+
+				p, err := content.ReadBlob(ctx, store, desc)
+				if err != nil {
+					return err
+				}
+
+				var index ocispec.Index
+				if err := json.Unmarshal(p, &index); err != nil {
+					return err
+				}
+
+				var manifests []ocispec.Descriptor
+				for _, m := range index.Manifests {
+					if eo.platform != nil {
+						if m.Platform == nil || eo.platform.Match(*m.Platform) {
+							manifests = append(manifests, m)
+						} else if !eo.allPlatforms {
+							continue
+						}
+					}
+
+					r, err := getRecords(ctx, store, m, algorithms)
+					if err != nil {
+						return err
+					}
+
+					records = append(records, r...)
+				}
+
+				if !eo.skipDockerManifest {
+					if len(manifests) >= 1 {
+						if len(manifests) > 1 {
+							sort.SliceStable(manifests, func(i, j int) bool {
+								if manifests[i].Platform == nil {
+									return false
+								}
+								if manifests[j].Platform == nil {
+									return true
+								}
+								return eo.platform.Less(*manifests[i].Platform, *manifests[j].Platform)
+							})
+						}
+						d = manifests[0].Digest
+						dManifests[d] = &exportManifest{
+							manifest: manifests[0],
+						}
+					} else if eo.platform != nil {
+						return errors.Wrap(errdefs.ErrNotFound, "no manifest found for platform")
+					}
+				}
+				resolvedIndex[desc.Digest] = d
+			}
+			if d != "" {
+				if name := desc.Annotations[images.AnnotationImageName]; name != "" {
+					mt := dManifests[d]
+					mt.names = append(mt.names, name)
+				}
+
+			}
+		default:
+			return errors.Wrap(errdefs.ErrInvalidArgument, "only manifests may be exported")
+		}
+	}
+
+	if len(dManifests) > 0 {
+		tr, err := manifestsRecord(ctx, store, dManifests)
+		if err != nil {
+			return errors.Wrap(err, "unable to create manifests file")
+		}
+
+		records = append(records, tr)
+	}
+
+	if len(algorithms) > 0 {
+		records = append(records, directoryRecord("blobs/", 0755))
+		for alg := range algorithms {
+			records = append(records, directoryRecord("blobs/"+alg+"/", 0755))
+		}
+	}
+
+	tw := tar.NewWriter(writer)
+	defer tw.Close()
+	return writeTar(ctx, tw, records)
+}
+
+func getRecords(ctx context.Context, store content.Provider, desc ocispec.Descriptor, algorithms map[string]struct{}) ([]tarRecord, error) {
+	var records []tarRecord
+	exportHandler := func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+		records = append(records, blobRecord(store, desc))
+		algorithms[desc.Digest.Algorithm().String()] = struct{}{}
+		return nil, nil
+	}
+
+	childrenHandler := images.ChildrenHandler(store)
+
+	handlers := images.Handlers(
+		childrenHandler,
+		images.HandlerFunc(exportHandler),
+	)
+
+	// Walk sequentially since the number of fetchs is likely one and doing in
+	// parallel requires locking the export handler
+	if err := images.Walk(ctx, handlers, desc); err != nil {
+		return nil, err
+	}
+
+	return records, nil
+}
+
+type tarRecord struct {
+	Header *tar.Header
+	CopyTo func(context.Context, io.Writer) (int64, error)
+}
+
+func blobRecord(cs content.Provider, desc ocispec.Descriptor) tarRecord {
+	path := path.Join("blobs", desc.Digest.Algorithm().String(), desc.Digest.Encoded())
+	return tarRecord{
+		Header: &tar.Header{
+			Name:     path,
+			Mode:     0444,
+			Size:     desc.Size,
+			Typeflag: tar.TypeReg,
+		},
+		CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
+			r, err := cs.ReaderAt(ctx, desc)
+			if err != nil {
+				return 0, errors.Wrap(err, "failed to get reader")
+			}
+			defer r.Close()
+
+			// Verify digest
+			dgstr := desc.Digest.Algorithm().Digester()
+
+			n, err := io.Copy(io.MultiWriter(w, dgstr.Hash()), content.NewReader(r))
+			if err != nil {
+				return 0, errors.Wrap(err, "failed to copy to tar")
+			}
+			if dgstr.Digest() != desc.Digest {
+				return 0, errors.Errorf("unexpected digest %s copied", dgstr.Digest())
+			}
+			return n, nil
+		},
+	}
+}
+
+func directoryRecord(name string, mode int64) tarRecord {
+	return tarRecord{
+		Header: &tar.Header{
+			Name:     name,
+			Mode:     mode,
+			Typeflag: tar.TypeDir,
+		},
+	}
+}
+
+func ociLayoutFile(version string) tarRecord {
+	if version == "" {
+		version = ocispec.ImageLayoutVersion
+	}
+	layout := ocispec.ImageLayout{
+		Version: version,
+	}
+
+	b, err := json.Marshal(layout)
+	if err != nil {
+		panic(err)
+	}
+
+	return tarRecord{
+		Header: &tar.Header{
+			Name:     ocispec.ImageLayoutFile,
+			Mode:     0444,
+			Size:     int64(len(b)),
+			Typeflag: tar.TypeReg,
+		},
+		CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
+			n, err := w.Write(b)
+			return int64(n), err
+		},
+	}
+
+}
+
+func ociIndexRecord(manifests []ocispec.Descriptor) tarRecord {
+	index := ocispec.Index{
+		Versioned: ocispecs.Versioned{
+			SchemaVersion: 2,
+		},
+		Manifests: manifests,
+	}
+
+	b, err := json.Marshal(index)
+	if err != nil {
+		panic(err)
+	}
+
+	return tarRecord{
+		Header: &tar.Header{
+			Name:     "index.json",
+			Mode:     0644,
+			Size:     int64(len(b)),
+			Typeflag: tar.TypeReg,
+		},
+		CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
+			n, err := w.Write(b)
+			return int64(n), err
+		},
+	}
+}
+
+type exportManifest struct {
+	manifest ocispec.Descriptor
+	names    []string
+}
+
+func manifestsRecord(ctx context.Context, store content.Provider, manifests map[digest.Digest]*exportManifest) (tarRecord, error) {
+	mfsts := make([]struct {
+		Config   string
+		RepoTags []string
+		Layers   []string
+	}, len(manifests))
+
+	var i int
+	for _, m := range manifests {
+		p, err := content.ReadBlob(ctx, store, m.manifest)
+		if err != nil {
+			return tarRecord{}, err
+		}
+
+		var manifest ocispec.Manifest
+		if err := json.Unmarshal(p, &manifest); err != nil {
+			return tarRecord{}, err
+		}
+		if err := manifest.Config.Digest.Validate(); err != nil {
+			return tarRecord{}, errors.Wrapf(err, "invalid manifest %q", m.manifest.Digest)
+		}
+
+		dgst := manifest.Config.Digest
+		mfsts[i].Config = path.Join("blobs", dgst.Algorithm().String(), dgst.Encoded())
+		for _, l := range manifest.Layers {
+			path := path.Join("blobs", l.Digest.Algorithm().String(), l.Digest.Encoded())
+			mfsts[i].Layers = append(mfsts[i].Layers, path)
+		}
+
+		for _, name := range m.names {
+			nname, err := familiarizeReference(name)
+			if err != nil {
+				return tarRecord{}, err
+			}
+
+			mfsts[i].RepoTags = append(mfsts[i].RepoTags, nname)
+		}
+
+		i++
+	}
+
+	b, err := json.Marshal(mfsts)
+	if err != nil {
+		return tarRecord{}, err
+	}
+
+	return tarRecord{
+		Header: &tar.Header{
+			Name:     "manifest.json",
+			Mode:     0644,
+			Size:     int64(len(b)),
+			Typeflag: tar.TypeReg,
+		},
+		CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
+			n, err := w.Write(b)
+			return int64(n), err
+		},
+	}, nil
+}
+
+func writeTar(ctx context.Context, tw *tar.Writer, records []tarRecord) error {
+	sort.Slice(records, func(i, j int) bool {
+		return records[i].Header.Name < records[j].Header.Name
+	})
+
+	var last string
+	for _, record := range records {
+		if record.Header.Name == last {
+			continue
+		}
+		last = record.Header.Name
+		if err := tw.WriteHeader(record.Header); err != nil {
+			return err
+		}
+		if record.CopyTo != nil {
+			n, err := record.CopyTo(ctx, tw)
+			if err != nil {
+				return err
+			}
+			if n != record.Header.Size {
+				return errors.Errorf("unexpected copy size for %s", record.Header.Name)
+			}
+		} else if record.Header.Size > 0 {
+			return errors.Errorf("no content to write to record with non-zero size for %s", record.Header.Name)
+		}
+	}
+	return nil
+}

+ 133 - 21
vendor/github.com/containerd/containerd/images/archive/importer.go

@@ -22,12 +22,14 @@ import (
 	"bytes"
 	"context"
 	"encoding/json"
+	"fmt"
 	"io"
 	"io/ioutil"
 	"path"
 
 	"github.com/containerd/containerd/archive/compression"
 	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/images"
 	"github.com/containerd/containerd/log"
 	digest "github.com/opencontainers/go-digest"
@@ -36,6 +38,22 @@ import (
 	"github.com/pkg/errors"
 )
 
+type importOpts struct {
+	compress bool
+}
+
+// ImportOpt is an option for importing an OCI index
+type ImportOpt func(*importOpts) error
+
+// WithImportCompression compresses uncompressed layers on import.
+// This is used for import formats which do not include the manifest.
+func WithImportCompression() ImportOpt {
+	return func(io *importOpts) error {
+		io.compress = true
+		return nil
+	}
+}
+
 // ImportIndex imports an index from a tar archive image bundle
 // - implements Docker v1.1, v1.2 and OCI v1.
 // - prefers OCI v1 when provided
@@ -43,8 +61,7 @@ import (
 // - normalizes Docker references and adds as OCI ref name
 //      e.g. alpine:latest -> docker.io/library/alpine:latest
 // - existing OCI reference names are untouched
-// - TODO: support option to compress layers on ingest
-func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (ocispec.Descriptor, error) {
+func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opts ...ImportOpt) (ocispec.Descriptor, error) {
 	var (
 		tr = tar.NewReader(reader)
 
@@ -56,7 +73,15 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (oc
 		}
 		symlinks = make(map[string]string)
 		blobs    = make(map[string]ocispec.Descriptor)
+		iopts    importOpts
 	)
+
+	for _, o := range opts {
+		if err := o(&iopts); err != nil {
+			return ocispec.Descriptor{}, err
+		}
+	}
+
 	for {
 		hdr, err := tr.Next()
 		if err == io.EOF {
@@ -99,7 +124,7 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (oc
 	}
 
 	// If OCI layout was given, interpret the tar as an OCI layout.
-	// When not provided, the layout of the tar will be interpretted
+	// When not provided, the layout of the tar will be interpreted
 	// as Docker v1.1 or v1.2.
 	if ociLayout.Version != "" {
 		if ociLayout.Version != ocispec.ImageLayoutVersion {
@@ -137,19 +162,23 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (oc
 		if !ok {
 			return ocispec.Descriptor{}, errors.Errorf("image config %q not found", mfst.Config)
 		}
-		config.MediaType = ocispec.MediaTypeImageConfig
+		config.MediaType = images.MediaTypeDockerSchema2Config
 
-		layers, err := resolveLayers(ctx, store, mfst.Layers, blobs)
+		layers, err := resolveLayers(ctx, store, mfst.Layers, blobs, iopts.compress)
 		if err != nil {
 			return ocispec.Descriptor{}, errors.Wrap(err, "failed to resolve layers")
 		}
 
-		manifest := ocispec.Manifest{
-			Versioned: specs.Versioned{
-				SchemaVersion: 2,
-			},
-			Config: config,
-			Layers: layers,
+		manifest := struct {
+			SchemaVersion int                  `json:"schemaVersion"`
+			MediaType     string               `json:"mediaType"`
+			Config        ocispec.Descriptor   `json:"config"`
+			Layers        []ocispec.Descriptor `json:"layers"`
+		}{
+			SchemaVersion: 2,
+			MediaType:     images.MediaTypeDockerSchema2Manifest,
+			Config:        config,
+			Layers:        layers,
 		}
 
 		desc, err := writeManifest(ctx, store, manifest, ocispec.MediaTypeImageManifest)
@@ -181,7 +210,8 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (oc
 				}
 
 				mfstdesc.Annotations = map[string]string{
-					ocispec.AnnotationRefName: normalized,
+					images.AnnotationImageName: normalized,
+					ocispec.AnnotationRefName:  ociReferenceName(normalized),
 				}
 
 				idx.Manifests = append(idx.Manifests, mfstdesc)
@@ -210,36 +240,118 @@ func onUntarBlob(ctx context.Context, r io.Reader, store content.Ingester, size
 	return dgstr.Digest(), nil
 }
 
-func resolveLayers(ctx context.Context, store content.Store, layerFiles []string, blobs map[string]ocispec.Descriptor) ([]ocispec.Descriptor, error) {
-	var layers []ocispec.Descriptor
-	for _, f := range layerFiles {
+func resolveLayers(ctx context.Context, store content.Store, layerFiles []string, blobs map[string]ocispec.Descriptor, compress bool) ([]ocispec.Descriptor, error) {
+	layers := make([]ocispec.Descriptor, len(layerFiles))
+	descs := map[digest.Digest]*ocispec.Descriptor{}
+	filters := []string{}
+	for i, f := range layerFiles {
 		desc, ok := blobs[f]
 		if !ok {
 			return nil, errors.Errorf("layer %q not found", f)
 		}
+		layers[i] = desc
+		descs[desc.Digest] = &layers[i]
+		filters = append(filters, "labels.\"containerd.io/uncompressed\"=="+desc.Digest.String())
+	}
 
+	err := store.Walk(ctx, func(info content.Info) error {
+		dgst, ok := info.Labels["containerd.io/uncompressed"]
+		if ok {
+			desc := descs[digest.Digest(dgst)]
+			if desc != nil {
+				desc.MediaType = images.MediaTypeDockerSchema2LayerGzip
+				desc.Digest = info.Digest
+				desc.Size = info.Size
+			}
+		}
+		return nil
+	}, filters...)
+	if err != nil {
+		return nil, errors.Wrap(err, "failure checking for compressed blobs")
+	}
+
+	for i, desc := range layers {
+		if desc.MediaType != "" {
+			continue
+		}
 		// Open blob, resolve media type
 		ra, err := store.ReaderAt(ctx, desc)
 		if err != nil {
-			return nil, errors.Wrapf(err, "failed to open %q (%s)", f, desc.Digest)
+			return nil, errors.Wrapf(err, "failed to open %q (%s)", layerFiles[i], desc.Digest)
 		}
 		s, err := compression.DecompressStream(content.NewReader(ra))
 		if err != nil {
-			return nil, errors.Wrapf(err, "failed to detect compression for %q", f)
+			return nil, errors.Wrapf(err, "failed to detect compression for %q", layerFiles[i])
 		}
 		if s.GetCompression() == compression.Uncompressed {
-			// TODO: Support compressing and writing back to content store
-			desc.MediaType = ocispec.MediaTypeImageLayer
+			if compress {
+				ref := fmt.Sprintf("compress-blob-%s-%s", desc.Digest.Algorithm().String(), desc.Digest.Encoded())
+				labels := map[string]string{
+					"containerd.io/uncompressed": desc.Digest.String(),
+				}
+				layers[i], err = compressBlob(ctx, store, s, ref, content.WithLabels(labels))
+				if err != nil {
+					s.Close()
+					return nil, err
+				}
+				layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip
+			} else {
+				layers[i].MediaType = images.MediaTypeDockerSchema2Layer
+			}
 		} else {
-			desc.MediaType = ocispec.MediaTypeImageLayerGzip
+			layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip
 		}
 		s.Close()
 
-		layers = append(layers, desc)
 	}
 	return layers, nil
 }
 
+func compressBlob(ctx context.Context, cs content.Store, r io.Reader, ref string, opts ...content.Opt) (desc ocispec.Descriptor, err error) {
+	w, err := content.OpenWriter(ctx, cs, content.WithRef(ref))
+	if err != nil {
+		return ocispec.Descriptor{}, errors.Wrap(err, "failed to open writer")
+	}
+
+	defer func() {
+		w.Close()
+		if err != nil {
+			cs.Abort(ctx, ref)
+		}
+	}()
+	if err := w.Truncate(0); err != nil {
+		return ocispec.Descriptor{}, errors.Wrap(err, "failed to truncate writer")
+	}
+
+	cw, err := compression.CompressStream(w, compression.Gzip)
+	if err != nil {
+		return ocispec.Descriptor{}, err
+	}
+
+	if _, err := io.Copy(cw, r); err != nil {
+		return ocispec.Descriptor{}, err
+	}
+	if err := cw.Close(); err != nil {
+		return ocispec.Descriptor{}, err
+	}
+
+	cst, err := w.Status()
+	if err != nil {
+		return ocispec.Descriptor{}, errors.Wrap(err, "failed to get writer status")
+	}
+
+	desc.Digest = w.Digest()
+	desc.Size = cst.Offset
+
+	if err := w.Commit(ctx, desc.Size, desc.Digest, opts...); err != nil {
+		if !errdefs.IsAlreadyExists(err) {
+			return ocispec.Descriptor{}, errors.Wrap(err, "failed to commit")
+		}
+	}
+
+	return desc, nil
+}
+
 func writeManifest(ctx context.Context, cs content.Ingester, manifest interface{}, mediaType string) (ocispec.Descriptor, error) {
 	manifestBytes, err := json.Marshal(manifest)
 	if err != nil {

+ 28 - 2
vendor/github.com/containerd/containerd/images/archive/reference.go

@@ -19,7 +19,8 @@ package archive
 import (
 	"strings"
 
-	"github.com/docker/distribution/reference"
+	"github.com/containerd/containerd/reference"
+	distref "github.com/docker/distribution/reference"
 	"github.com/opencontainers/go-digest"
 	"github.com/pkg/errors"
 )
@@ -69,7 +70,7 @@ func isImagePrefix(s, prefix string) bool {
 
 func normalizeReference(ref string) (string, error) {
 	// TODO: Replace this function to not depend on reference package
-	normalized, err := reference.ParseDockerRef(ref)
+	normalized, err := distref.ParseDockerRef(ref)
 	if err != nil {
 		return "", errors.Wrapf(err, "normalize image ref %q", ref)
 	}
@@ -77,6 +78,31 @@ func normalizeReference(ref string) (string, error) {
 	return normalized.String(), nil
 }
 
+func familiarizeReference(ref string) (string, error) {
+	named, err := distref.ParseNormalizedNamed(ref)
+	if err != nil {
+		return "", errors.Wrapf(err, "failed to parse %q", ref)
+	}
+	named = distref.TagNameOnly(named)
+
+	return distref.FamiliarString(named), nil
+}
+
+func ociReferenceName(name string) string {
+	// OCI defines the reference name as only a tag excluding the
+	// repository. The containerd annotation contains the full image name
+	// since the tag is insufficient for correctly naming and referring to an
+	// image
+	var ociRef string
+	if spec, err := reference.Parse(name); err == nil {
+		ociRef = spec.Object
+	} else {
+		ociRef = name
+	}
+
+	return ociRef
+}
+
 // DigestTranslator creates a digest reference by adding the
 // digest to an image name
 func DigestTranslator(prefix string) func(digest.Digest) string {

+ 4 - 3
vendor/github.com/containerd/containerd/images/handlers.go

@@ -117,7 +117,7 @@ func Walk(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) err
 //
 // If any handler returns an error, the dispatch session will be canceled.
 func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted, descs ...ocispec.Descriptor) error {
-	eg, ctx := errgroup.WithContext(ctx)
+	eg, ctx2 := errgroup.WithContext(ctx)
 	for _, desc := range descs {
 		desc := desc
 
@@ -126,10 +126,11 @@ func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted,
 				return err
 			}
 		}
+
 		eg.Go(func() error {
 			desc := desc
 
-			children, err := handler.Handle(ctx, desc)
+			children, err := handler.Handle(ctx2, desc)
 			if limiter != nil {
 				limiter.Release(1)
 			}
@@ -141,7 +142,7 @@ func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted,
 			}
 
 			if len(children) > 0 {
-				return Dispatch(ctx, handler, limiter, children...)
+				return Dispatch(ctx2, handler, limiter, children...)
 			}
 
 			return nil

+ 19 - 41
vendor/github.com/containerd/containerd/images/image.go

@@ -20,7 +20,6 @@ import (
 	"context"
 	"encoding/json"
 	"sort"
-	"strings"
 	"time"
 
 	"github.com/containerd/containerd/content"
@@ -119,7 +118,7 @@ func (image *Image) Size(ctx context.Context, provider content.Provider, platfor
 		}
 		size += desc.Size
 		return nil, nil
-	}), FilterPlatforms(ChildrenHandler(provider), platform)), image.Target)
+	}), LimitManifests(FilterPlatforms(ChildrenHandler(provider), platform), platform, 1)), image.Target)
 }
 
 type platformManifest struct {
@@ -142,6 +141,7 @@ type platformManifest struct {
 // this direction because this abstraction is not needed.`
 func Manifest(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Manifest, error) {
 	var (
+		limit    = 1
 		m        []platformManifest
 		wasIndex bool
 	)
@@ -210,10 +210,22 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc
 				}
 			}
 
+			sort.SliceStable(descs, func(i, j int) bool {
+				if descs[i].Platform == nil {
+					return false
+				}
+				if descs[j].Platform == nil {
+					return true
+				}
+				return platform.Less(*descs[i].Platform, *descs[j].Platform)
+			})
+
 			wasIndex = true
 
+			if len(descs) > limit {
+				return descs[:limit], nil
+			}
 			return descs, nil
-
 		}
 		return nil, errors.Wrapf(errdefs.ErrNotFound, "unexpected media type %v for %v", desc.MediaType, desc.Digest)
 	}), image); err != nil {
@@ -227,17 +239,6 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc
 		}
 		return ocispec.Manifest{}, err
 	}
-
-	sort.SliceStable(m, func(i, j int) bool {
-		if m[i].p == nil {
-			return false
-		}
-		if m[j].p == nil {
-			return true
-		}
-		return platform.Less(*m[i].p, *m[j].p)
-	})
-
 	return *m[0].m, nil
 }
 
@@ -356,15 +357,11 @@ func Children(ctx context.Context, provider content.Provider, desc ocispec.Descr
 		}
 
 		descs = append(descs, index.Manifests...)
-	case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip,
-		MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip,
-		MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig,
-		ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip,
-		ocispec.MediaTypeImageLayerNonDistributable, ocispec.MediaTypeImageLayerNonDistributableGzip,
-		MediaTypeContainerd1Checkpoint, MediaTypeContainerd1CheckpointConfig:
-		// childless data types.
-		return nil, nil
 	default:
+		if IsLayerType(desc.MediaType) || IsKnownConfig(desc.MediaType) {
+			// childless data types.
+			return nil, nil
+		}
 		log.G(ctx).Warnf("encountered unknown type %v; children may not be fetched", desc.MediaType)
 	}
 
@@ -387,22 +384,3 @@ func RootFS(ctx context.Context, provider content.Provider, configDesc ocispec.D
 	}
 	return config.RootFS.DiffIDs, nil
 }
-
-// IsCompressedDiff returns true if mediaType is a known compressed diff media type.
-// It returns false if the media type is a diff, but not compressed. If the media type
-// is not a known diff type, it returns errdefs.ErrNotImplemented
-func IsCompressedDiff(ctx context.Context, mediaType string) (bool, error) {
-	switch mediaType {
-	case ocispec.MediaTypeImageLayer, MediaTypeDockerSchema2Layer:
-	case ocispec.MediaTypeImageLayerGzip, MediaTypeDockerSchema2LayerGzip:
-		return true, nil
-	default:
-		// Still apply all generic media types *.tar[.+]gzip and *.tar
-		if strings.HasSuffix(mediaType, ".tar.gzip") || strings.HasSuffix(mediaType, ".tar+gzip") {
-			return true, nil
-		} else if !strings.HasSuffix(mediaType, ".tar") {
-			return false, errdefs.ErrNotImplemented
-		}
-	}
-	return false, nil
-}

+ 84 - 0
vendor/github.com/containerd/containerd/images/mediatypes.go

@@ -16,6 +16,15 @@
 
 package images
 
+import (
+	"context"
+	"sort"
+	"strings"
+
+	"github.com/containerd/containerd/errdefs"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
 // mediatype definitions for image components handled in containerd.
 //
 // oci components are generally referenced directly, although we may centralize
@@ -40,3 +49,78 @@ const (
 	// Legacy Docker schema1 manifest
 	MediaTypeDockerSchema1Manifest = "application/vnd.docker.distribution.manifest.v1+prettyjws"
 )
+
+// DiffCompression returns the compression as defined by the layer diff media
+// type. For Docker media types without compression, "unknown" is returned to
+// indicate that the media type may be compressed. If the media type is not
+// recognized as a layer diff, then it returns errdefs.ErrNotImplemented
+func DiffCompression(ctx context.Context, mediaType string) (string, error) {
+	base, ext := parseMediaTypes(mediaType)
+	switch base {
+	case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerForeign:
+		if len(ext) > 0 {
+			// Type is wrapped
+			return "", nil
+		}
+		// These media types may have been compressed but failed to
+		// use the correct media type. The decompression function
+		// should detect and handle this case.
+		return "unknown", nil
+	case MediaTypeDockerSchema2LayerGzip, MediaTypeDockerSchema2LayerForeignGzip:
+		if len(ext) > 0 {
+			// Type is wrapped
+			return "", nil
+		}
+		return "gzip", nil
+	case ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerNonDistributable:
+		if len(ext) > 0 {
+			switch ext[len(ext)-1] {
+			case "gzip":
+				return "gzip", nil
+			}
+		}
+		return "", nil
+	default:
+		return "", errdefs.ErrNotImplemented
+	}
+}
+
+// parseMediaTypes splits the media type into the base type and
+// an array of sorted extensions
+func parseMediaTypes(mt string) (string, []string) {
+	if mt == "" {
+		return "", []string{}
+	}
+
+	s := strings.Split(mt, "+")
+	ext := s[1:]
+	sort.Strings(ext)
+
+	return s[0], ext
+}
+
+// IsLayerTypes returns true if the media type is a layer
+func IsLayerType(mt string) bool {
+	if strings.HasPrefix(mt, "application/vnd.oci.image.layer.") {
+		return true
+	}
+
+	// Parse Docker media types, strip off any + suffixes first
+	base, _ := parseMediaTypes(mt)
+	switch base {
+	case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip,
+		MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip:
+		return true
+	}
+	return false
+}
+
+// IsKnownConfig returns true if the media type is a known config type
+func IsKnownConfig(mt string) bool {
+	switch mt {
+	case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig,
+		MediaTypeContainerd1Checkpoint, MediaTypeContainerd1CheckpointConfig:
+		return true
+	}
+	return false
+}

+ 0 - 241
vendor/github.com/containerd/containerd/images/oci/exporter.go

@@ -1,241 +0,0 @@
-/*
-   Copyright The containerd Authors.
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-*/
-
-package oci
-
-import (
-	"archive/tar"
-	"context"
-	"encoding/json"
-	"io"
-	"sort"
-
-	"github.com/containerd/containerd/content"
-	"github.com/containerd/containerd/images"
-	"github.com/containerd/containerd/platforms"
-	ocispecs "github.com/opencontainers/image-spec/specs-go"
-	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
-	"github.com/pkg/errors"
-)
-
-// V1Exporter implements OCI Image Spec v1.
-// It is up to caller to put "org.opencontainers.image.ref.name" annotation to desc.
-//
-// TODO(AkihiroSuda): add V1Exporter{TranslateMediaTypes: true} that transforms media types,
-//                    e.g. application/vnd.docker.image.rootfs.diff.tar.gzip
-//                         -> application/vnd.oci.image.layer.v1.tar+gzip
-type V1Exporter struct {
-	AllPlatforms bool
-}
-
-// V1ExporterOpt allows the caller to set additional options to a new V1Exporter
-type V1ExporterOpt func(c *V1Exporter) error
-
-// DefaultV1Exporter return a default V1Exporter pointer
-func DefaultV1Exporter() *V1Exporter {
-	return &V1Exporter{
-		AllPlatforms: false,
-	}
-}
-
-// ResolveV1ExportOpt return a new V1Exporter with V1ExporterOpt
-func ResolveV1ExportOpt(opts ...V1ExporterOpt) (*V1Exporter, error) {
-	exporter := DefaultV1Exporter()
-	for _, o := range opts {
-		if err := o(exporter); err != nil {
-			return exporter, err
-		}
-	}
-	return exporter, nil
-}
-
-// WithAllPlatforms set V1Exporter`s AllPlatforms option
-func WithAllPlatforms(allPlatforms bool) V1ExporterOpt {
-	return func(c *V1Exporter) error {
-		c.AllPlatforms = allPlatforms
-		return nil
-	}
-}
-
-// Export implements Exporter.
-func (oe *V1Exporter) Export(ctx context.Context, store content.Provider, desc ocispec.Descriptor, writer io.Writer) error {
-	tw := tar.NewWriter(writer)
-	defer tw.Close()
-
-	records := []tarRecord{
-		ociLayoutFile(""),
-		ociIndexRecord(desc),
-	}
-
-	algorithms := map[string]struct{}{}
-	exportHandler := func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
-		records = append(records, blobRecord(store, desc))
-		algorithms[desc.Digest.Algorithm().String()] = struct{}{}
-		return nil, nil
-	}
-
-	childrenHandler := images.ChildrenHandler(store)
-
-	if !oe.AllPlatforms {
-		// get local default platform to fetch image manifest
-		childrenHandler = images.FilterPlatforms(childrenHandler, platforms.Any(platforms.DefaultSpec()))
-	}
-
-	handlers := images.Handlers(
-		childrenHandler,
-		images.HandlerFunc(exportHandler),
-	)
-
-	// Walk sequentially since the number of fetchs is likely one and doing in
-	// parallel requires locking the export handler
-	if err := images.Walk(ctx, handlers, desc); err != nil {
-		return err
-	}
-
-	if len(algorithms) > 0 {
-		records = append(records, directoryRecord("blobs/", 0755))
-		for alg := range algorithms {
-			records = append(records, directoryRecord("blobs/"+alg+"/", 0755))
-		}
-	}
-
-	return writeTar(ctx, tw, records)
-}
-
-type tarRecord struct {
-	Header *tar.Header
-	CopyTo func(context.Context, io.Writer) (int64, error)
-}
-
-func blobRecord(cs content.Provider, desc ocispec.Descriptor) tarRecord {
-	path := "blobs/" + desc.Digest.Algorithm().String() + "/" + desc.Digest.Hex()
-	return tarRecord{
-		Header: &tar.Header{
-			Name:     path,
-			Mode:     0444,
-			Size:     desc.Size,
-			Typeflag: tar.TypeReg,
-		},
-		CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
-			r, err := cs.ReaderAt(ctx, desc)
-			if err != nil {
-				return 0, errors.Wrap(err, "failed to get reader")
-			}
-			defer r.Close()
-
-			// Verify digest
-			dgstr := desc.Digest.Algorithm().Digester()
-
-			n, err := io.Copy(io.MultiWriter(w, dgstr.Hash()), content.NewReader(r))
-			if err != nil {
-				return 0, errors.Wrap(err, "failed to copy to tar")
-			}
-			if dgstr.Digest() != desc.Digest {
-				return 0, errors.Errorf("unexpected digest %s copied", dgstr.Digest())
-			}
-			return n, nil
-		},
-	}
-}
-
-func directoryRecord(name string, mode int64) tarRecord {
-	return tarRecord{
-		Header: &tar.Header{
-			Name:     name,
-			Mode:     mode,
-			Typeflag: tar.TypeDir,
-		},
-	}
-}
-
-func ociLayoutFile(version string) tarRecord {
-	if version == "" {
-		version = ocispec.ImageLayoutVersion
-	}
-	layout := ocispec.ImageLayout{
-		Version: version,
-	}
-
-	b, err := json.Marshal(layout)
-	if err != nil {
-		panic(err)
-	}
-
-	return tarRecord{
-		Header: &tar.Header{
-			Name:     ocispec.ImageLayoutFile,
-			Mode:     0444,
-			Size:     int64(len(b)),
-			Typeflag: tar.TypeReg,
-		},
-		CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
-			n, err := w.Write(b)
-			return int64(n), err
-		},
-	}
-
-}
-
-func ociIndexRecord(manifests ...ocispec.Descriptor) tarRecord {
-	index := ocispec.Index{
-		Versioned: ocispecs.Versioned{
-			SchemaVersion: 2,
-		},
-		Manifests: manifests,
-	}
-
-	b, err := json.Marshal(index)
-	if err != nil {
-		panic(err)
-	}
-
-	return tarRecord{
-		Header: &tar.Header{
-			Name:     "index.json",
-			Mode:     0644,
-			Size:     int64(len(b)),
-			Typeflag: tar.TypeReg,
-		},
-		CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
-			n, err := w.Write(b)
-			return int64(n), err
-		},
-	}
-}
-
-func writeTar(ctx context.Context, tw *tar.Writer, records []tarRecord) error {
-	sort.Slice(records, func(i, j int) bool {
-		return records[i].Header.Name < records[j].Header.Name
-	})
-
-	for _, record := range records {
-		if err := tw.WriteHeader(record.Header); err != nil {
-			return err
-		}
-		if record.CopyTo != nil {
-			n, err := record.CopyTo(ctx, tw)
-			if err != nil {
-				return err
-			}
-			if n != record.Header.Size {
-				return errors.Errorf("unexpected copy size for %s", record.Header.Name)
-			}
-		} else if record.Header.Size > 0 {
-			return errors.Errorf("no content to write to record with non-zero size for %s", record.Header.Name)
-		}
-	}
-	return nil
-}

+ 38 - 13
vendor/github.com/containerd/containerd/import.go

@@ -35,6 +35,7 @@ type importOpts struct {
 	imageRefT    func(string) string
 	dgstRefT     func(digest.Digest) string
 	allPlatforms bool
+	compress     bool
 }
 
 // ImportOpt allows the caller to specify import specific options
@@ -74,9 +75,18 @@ func WithAllPlatforms(allPlatforms bool) ImportOpt {
 	}
 }
 
+// WithImportCompression compresses uncompressed layers on import.
+// This is used for import formats which do not include the manifest.
+func WithImportCompression() ImportOpt {
+	return func(c *importOpts) error {
+		c.compress = true
+		return nil
+	}
+}
+
 // Import imports an image from a Tar stream using reader.
 // Caller needs to specify importer. Future version may use oci.v1 as the default.
-// Note that unreferrenced blobs may be imported to the content store as well.
+// Note that unreferenced blobs may be imported to the content store as well.
 func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt) ([]images.Image, error) {
 	var iopts importOpts
 	for _, o := range opts {
@@ -91,7 +101,12 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt
 	}
 	defer done(ctx)
 
-	index, err := archive.ImportIndex(ctx, c.ContentStore(), reader)
+	var aio []archive.ImportOpt
+	if iopts.compress {
+		aio = append(aio, archive.WithImportCompression())
+	}
+
+	index, err := archive.ImportIndex(ctx, c.ContentStore(), reader, aio...)
 	if err != nil {
 		return nil, err
 	}
@@ -110,7 +125,7 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt
 	}
 	var platformMatcher = platforms.All
 	if !iopts.allPlatforms {
-		platformMatcher = platforms.Default()
+		platformMatcher = c.platform
 	}
 
 	var handler images.HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
@@ -130,16 +145,12 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt
 		}
 
 		for _, m := range idx.Manifests {
-			if ref := m.Annotations[ocispec.AnnotationRefName]; ref != "" {
-				if iopts.imageRefT != nil {
-					ref = iopts.imageRefT(ref)
-				}
-				if ref != "" {
-					imgs = append(imgs, images.Image{
-						Name:   ref,
-						Target: m,
-					})
-				}
+			name := imageName(m.Annotations, iopts.imageRefT)
+			if name != "" {
+				imgs = append(imgs, images.Image{
+					Name:   name,
+					Target: m,
+				})
 			}
 			if iopts.dgstRefT != nil {
 				ref := iopts.dgstRefT(m.Digest)
@@ -178,3 +189,17 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt
 
 	return imgs, nil
 }
+
+func imageName(annotations map[string]string, ociCleanup func(string) string) string {
+	name := annotations[images.AnnotationImageName]
+	if name != "" {
+		return name
+	}
+	name = annotations[ocispec.AnnotationRefName]
+	if name != "" {
+		if ociCleanup != nil {
+			name = ociCleanup(name)
+		}
+	}
+	return name
+}

+ 1 - 2
vendor/github.com/containerd/containerd/install.go

@@ -27,7 +27,6 @@ import (
 	"github.com/containerd/containerd/archive/compression"
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/images"
-	"github.com/containerd/containerd/platforms"
 	"github.com/pkg/errors"
 )
 
@@ -43,7 +42,7 @@ func (c *Client) Install(ctx context.Context, image Image, opts ...InstallOpts)
 	}
 	var (
 		cs       = image.ContentStore()
-		platform = platforms.Default()
+		platform = c.platform
 	)
 	manifest, err := images.Manifest(ctx, cs, image.Target(), platform)
 	if err != nil {

+ 10 - 0
vendor/github.com/containerd/containerd/leases/lease.go

@@ -32,6 +32,9 @@ type Manager interface {
 	Create(context.Context, ...Opt) (Lease, error)
 	Delete(context.Context, Lease, ...DeleteOpt) error
 	List(context.Context, ...string) ([]Lease, error)
+	AddResource(context.Context, Lease, Resource) error
+	DeleteResource(context.Context, Lease, Resource) error
+	ListResources(context.Context, Lease) ([]Resource, error)
 }
 
 // Lease retains resources to prevent cleanup before
@@ -42,6 +45,13 @@ type Lease struct {
 	Labels    map[string]string
 }
 
+// Resource represents low level resource of image, like content, ingest and
+// snapshotter.
+type Resource struct {
+	ID   string
+	Type string
+}
+
 // DeleteOptions provide options on image delete
 type DeleteOptions struct {
 	Synchronous bool

+ 40 - 0
vendor/github.com/containerd/containerd/leases/proxy/manager.go

@@ -91,3 +91,43 @@ func (pm *proxyManager) List(ctx context.Context, filters ...string) ([]leases.L
 
 	return l, nil
 }
+
+func (pm *proxyManager) AddResource(ctx context.Context, lease leases.Lease, r leases.Resource) error {
+	_, err := pm.client.AddResource(ctx, &leasesapi.AddResourceRequest{
+		ID: lease.ID,
+		Resource: leasesapi.Resource{
+			ID:   r.ID,
+			Type: r.Type,
+		},
+	})
+	return errdefs.FromGRPC(err)
+}
+
+func (pm *proxyManager) DeleteResource(ctx context.Context, lease leases.Lease, r leases.Resource) error {
+	_, err := pm.client.DeleteResource(ctx, &leasesapi.DeleteResourceRequest{
+		ID: lease.ID,
+		Resource: leasesapi.Resource{
+			ID:   r.ID,
+			Type: r.Type,
+		},
+	})
+	return errdefs.FromGRPC(err)
+}
+
+func (pm *proxyManager) ListResources(ctx context.Context, lease leases.Lease) ([]leases.Resource, error) {
+	resp, err := pm.client.ListResources(ctx, &leasesapi.ListResourcesRequest{
+		ID: lease.ID,
+	})
+	if err != nil {
+		return nil, errdefs.FromGRPC(err)
+	}
+
+	rs := make([]leases.Resource, 0, len(resp.Resources))
+	for _, i := range resp.Resources {
+		rs = append(rs, leases.Resource{
+			ID:   i.ID,
+			Type: i.Type,
+		})
+	}
+	return rs, nil
+}

+ 1 - 1
vendor/github.com/containerd/containerd/log/context.go

@@ -30,7 +30,7 @@ var (
 	// messages.
 	G = GetLogger
 
-	// L is an alias for the the standard logger.
+	// L is an alias for the standard logger.
 	L = logrus.NewEntry(logrus.StandardLogger())
 )
 

+ 145 - 114
vendor/github.com/containerd/containerd/metadata/containers.go

@@ -19,6 +19,7 @@ package metadata
 import (
 	"context"
 	"strings"
+	"sync/atomic"
 	"time"
 
 	"github.com/containerd/containerd/containers"
@@ -35,13 +36,13 @@ import (
 )
 
 type containerStore struct {
-	tx *bolt.Tx
+	db *DB
 }
 
 // NewContainerStore returns a Store backed by an underlying bolt DB
-func NewContainerStore(tx *bolt.Tx) containers.Store {
+func NewContainerStore(db *DB) containers.Store {
 	return &containerStore{
-		tx: tx,
+		db: db,
 	}
 }
 
@@ -51,14 +52,21 @@ func (s *containerStore) Get(ctx context.Context, id string) (containers.Contain
 		return containers.Container{}, err
 	}
 
-	bkt := getContainerBucket(s.tx, namespace, id)
-	if bkt == nil {
-		return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "container %q in namespace %q", id, namespace)
-	}
-
 	container := containers.Container{ID: id}
-	if err := readContainer(&container, bkt); err != nil {
-		return containers.Container{}, errors.Wrapf(err, "failed to read container %q", id)
+
+	if err := view(ctx, s.db, func(tx *bolt.Tx) error {
+		bkt := getContainerBucket(tx, namespace, id)
+		if bkt == nil {
+			return errors.Wrapf(errdefs.ErrNotFound, "container %q in namespace %q", id, namespace)
+		}
+
+		if err := readContainer(&container, bkt); err != nil {
+			return errors.Wrapf(err, "failed to read container %q", id)
+		}
+
+		return nil
+	}); err != nil {
+		return containers.Container{}, err
 	}
 
 	return container, nil
@@ -75,27 +83,30 @@ func (s *containerStore) List(ctx context.Context, fs ...string) ([]containers.C
 		return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error())
 	}
 
-	bkt := getContainersBucket(s.tx, namespace)
-	if bkt == nil {
-		return nil, nil // empty store
-	}
-
 	var m []containers.Container
-	if err := bkt.ForEach(func(k, v []byte) error {
-		cbkt := bkt.Bucket(k)
-		if cbkt == nil {
-			return nil
-		}
-		container := containers.Container{ID: string(k)}
 
-		if err := readContainer(&container, cbkt); err != nil {
-			return errors.Wrapf(err, "failed to read container %q", string(k))
+	if err := view(ctx, s.db, func(tx *bolt.Tx) error {
+		bkt := getContainersBucket(tx, namespace)
+		if bkt == nil {
+			return nil // empty store
 		}
 
-		if filter.Match(adaptContainer(container)) {
-			m = append(m, container)
-		}
-		return nil
+		return bkt.ForEach(func(k, v []byte) error {
+			cbkt := bkt.Bucket(k)
+			if cbkt == nil {
+				return nil
+			}
+			container := containers.Container{ID: string(k)}
+
+			if err := readContainer(&container, cbkt); err != nil {
+				return errors.Wrapf(err, "failed to read container %q", string(k))
+			}
+
+			if filter.Match(adaptContainer(container)) {
+				m = append(m, container)
+			}
+			return nil
+		})
 	}); err != nil {
 		return nil, err
 	}
@@ -113,23 +124,29 @@ func (s *containerStore) Create(ctx context.Context, container containers.Contai
 		return containers.Container{}, errors.Wrap(err, "create container failed validation")
 	}
 
-	bkt, err := createContainersBucket(s.tx, namespace)
-	if err != nil {
-		return containers.Container{}, err
-	}
+	if err := update(ctx, s.db, func(tx *bolt.Tx) error {
+		bkt, err := createContainersBucket(tx, namespace)
+		if err != nil {
+			return err
+		}
 
-	cbkt, err := bkt.CreateBucket([]byte(container.ID))
-	if err != nil {
-		if err == bolt.ErrBucketExists {
-			err = errors.Wrapf(errdefs.ErrAlreadyExists, "container %q", container.ID)
+		cbkt, err := bkt.CreateBucket([]byte(container.ID))
+		if err != nil {
+			if err == bolt.ErrBucketExists {
+				err = errors.Wrapf(errdefs.ErrAlreadyExists, "container %q", container.ID)
+			}
+			return err
 		}
-		return containers.Container{}, err
-	}
 
-	container.CreatedAt = time.Now().UTC()
-	container.UpdatedAt = container.CreatedAt
-	if err := writeContainer(cbkt, &container); err != nil {
-		return containers.Container{}, errors.Wrapf(err, "failed to write container %q", container.ID)
+		container.CreatedAt = time.Now().UTC()
+		container.UpdatedAt = container.CreatedAt
+		if err := writeContainer(cbkt, &container); err != nil {
+			return errors.Wrapf(err, "failed to write container %q", container.ID)
+		}
+
+		return nil
+	}); err != nil {
+		return containers.Container{}, err
 	}
 
 	return container, nil
@@ -145,85 +162,91 @@ func (s *containerStore) Update(ctx context.Context, container containers.Contai
 		return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "must specify a container id")
 	}
 
-	bkt := getContainersBucket(s.tx, namespace)
-	if bkt == nil {
-		return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "cannot update container %q in namespace %q", container.ID, namespace)
-	}
-
-	cbkt := bkt.Bucket([]byte(container.ID))
-	if cbkt == nil {
-		return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "container %q", container.ID)
-	}
-
 	var updated containers.Container
-	if err := readContainer(&updated, cbkt); err != nil {
-		return updated, errors.Wrapf(err, "failed to read container %q", container.ID)
-	}
-	createdat := updated.CreatedAt
-	updated.ID = container.ID
-
-	if len(fieldpaths) == 0 {
-		// only allow updates to these field on full replace.
-		fieldpaths = []string{"labels", "spec", "extensions", "image", "snapshotkey"}
-
-		// Fields that are immutable must cause an error when no field paths
-		// are provided. This allows these fields to become mutable in the
-		// future.
-		if updated.Snapshotter != container.Snapshotter {
-			return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter field is immutable")
+	if err := update(ctx, s.db, func(tx *bolt.Tx) error {
+		bkt := getContainersBucket(tx, namespace)
+		if bkt == nil {
+			return errors.Wrapf(errdefs.ErrNotFound, "cannot update container %q in namespace %q", container.ID, namespace)
 		}
 
-		if updated.Runtime.Name != container.Runtime.Name {
-			return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "container.Runtime.Name field is immutable")
+		cbkt := bkt.Bucket([]byte(container.ID))
+		if cbkt == nil {
+			return errors.Wrapf(errdefs.ErrNotFound, "container %q", container.ID)
 		}
-	}
 
-	// apply the field mask. If you update this code, you better follow the
-	// field mask rules in field_mask.proto. If you don't know what this
-	// is, do not update this code.
-	for _, path := range fieldpaths {
-		if strings.HasPrefix(path, "labels.") {
-			if updated.Labels == nil {
-				updated.Labels = map[string]string{}
+		if err := readContainer(&updated, cbkt); err != nil {
+			return errors.Wrapf(err, "failed to read container %q", container.ID)
+		}
+		createdat := updated.CreatedAt
+		updated.ID = container.ID
+
+		if len(fieldpaths) == 0 {
+			// only allow updates to these field on full replace.
+			fieldpaths = []string{"labels", "spec", "extensions", "image", "snapshotkey"}
+
+			// Fields that are immutable must cause an error when no field paths
+			// are provided. This allows these fields to become mutable in the
+			// future.
+			if updated.Snapshotter != container.Snapshotter {
+				return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter field is immutable")
+			}
+
+			if updated.Runtime.Name != container.Runtime.Name {
+				return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Runtime.Name field is immutable")
 			}
-			key := strings.TrimPrefix(path, "labels.")
-			updated.Labels[key] = container.Labels[key]
-			continue
 		}
 
-		if strings.HasPrefix(path, "extensions.") {
-			if updated.Extensions == nil {
-				updated.Extensions = map[string]types.Any{}
+		// apply the field mask. If you update this code, you better follow the
+		// field mask rules in field_mask.proto. If you don't know what this
+		// is, do not update this code.
+		for _, path := range fieldpaths {
+			if strings.HasPrefix(path, "labels.") {
+				if updated.Labels == nil {
+					updated.Labels = map[string]string{}
+				}
+				key := strings.TrimPrefix(path, "labels.")
+				updated.Labels[key] = container.Labels[key]
+				continue
+			}
+
+			if strings.HasPrefix(path, "extensions.") {
+				if updated.Extensions == nil {
+					updated.Extensions = map[string]types.Any{}
+				}
+				key := strings.TrimPrefix(path, "extensions.")
+				updated.Extensions[key] = container.Extensions[key]
+				continue
+			}
+
+			switch path {
+			case "labels":
+				updated.Labels = container.Labels
+			case "spec":
+				updated.Spec = container.Spec
+			case "extensions":
+				updated.Extensions = container.Extensions
+			case "image":
+				updated.Image = container.Image
+			case "snapshotkey":
+				updated.SnapshotKey = container.SnapshotKey
+			default:
+				return errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on %q", path, container.ID)
 			}
-			key := strings.TrimPrefix(path, "extensions.")
-			updated.Extensions[key] = container.Extensions[key]
-			continue
 		}
 
-		switch path {
-		case "labels":
-			updated.Labels = container.Labels
-		case "spec":
-			updated.Spec = container.Spec
-		case "extensions":
-			updated.Extensions = container.Extensions
-		case "image":
-			updated.Image = container.Image
-		case "snapshotkey":
-			updated.SnapshotKey = container.SnapshotKey
-		default:
-			return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on %q", path, container.ID)
+		if err := validateContainer(&updated); err != nil {
+			return errors.Wrap(err, "update failed validation")
 		}
-	}
 
-	if err := validateContainer(&updated); err != nil {
-		return containers.Container{}, errors.Wrap(err, "update failed validation")
-	}
+		updated.CreatedAt = createdat
+		updated.UpdatedAt = time.Now().UTC()
+		if err := writeContainer(cbkt, &updated); err != nil {
+			return errors.Wrapf(err, "failed to write container %q", container.ID)
+		}
 
-	updated.CreatedAt = createdat
-	updated.UpdatedAt = time.Now().UTC()
-	if err := writeContainer(cbkt, &updated); err != nil {
-		return containers.Container{}, errors.Wrapf(err, "failed to write container %q", container.ID)
+		return nil
+	}); err != nil {
+		return containers.Container{}, err
 	}
 
 	return updated, nil
@@ -235,15 +258,23 @@ func (s *containerStore) Delete(ctx context.Context, id string) error {
 		return err
 	}
 
-	bkt := getContainersBucket(s.tx, namespace)
-	if bkt == nil {
-		return errors.Wrapf(errdefs.ErrNotFound, "cannot delete container %q in namespace %q", id, namespace)
-	}
+	return update(ctx, s.db, func(tx *bolt.Tx) error {
+		bkt := getContainersBucket(tx, namespace)
+		if bkt == nil {
+			return errors.Wrapf(errdefs.ErrNotFound, "cannot delete container %q in namespace %q", id, namespace)
+		}
 
-	if err := bkt.DeleteBucket([]byte(id)); err == bolt.ErrBucketNotFound {
-		return errors.Wrapf(errdefs.ErrNotFound, "container %v", id)
-	}
-	return err
+		if err := bkt.DeleteBucket([]byte(id)); err != nil {
+			if err == bolt.ErrBucketNotFound {
+				err = errors.Wrapf(errdefs.ErrNotFound, "container %v", id)
+			}
+			return err
+		}
+
+		atomic.AddUint32(&s.db.dirty, 1)
+
+		return nil
+	})
 }
 
 func validateContainer(container *containers.Container) error {

+ 5 - 3
vendor/github.com/containerd/containerd/metadata/content.go

@@ -21,6 +21,7 @@ import (
 	"encoding/binary"
 	"strings"
 	"sync"
+	"sync/atomic"
 	"time"
 
 	"github.com/containerd/containerd/content"
@@ -221,9 +222,8 @@ func (cs *contentStore) Delete(ctx context.Context, dgst digest.Digest) error {
 		}
 
 		// Mark content store as dirty for triggering garbage collection
-		cs.db.dirtyL.Lock()
+		atomic.AddUint32(&cs.db.dirty, 1)
 		cs.db.dirtyCS = true
-		cs.db.dirtyL.Unlock()
 
 		return nil
 	})
@@ -567,6 +567,8 @@ func (nw *namespacedWriter) createAndCopy(ctx context.Context, desc ocispec.Desc
 }
 
 func (nw *namespacedWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
+	ctx = namespaces.WithNamespace(ctx, nw.namespace)
+
 	nw.l.RLock()
 	defer nw.l.RUnlock()
 
@@ -635,11 +637,11 @@ func (nw *namespacedWriter) commit(ctx context.Context, tx *bolt.Tx, size int64,
 			return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "%q failed size validation: %v != %v", nw.ref, status.Offset, size)
 		}
 		size = status.Offset
-		actual = nw.w.Digest()
 
 		if err := nw.w.Commit(ctx, size, expected); err != nil && !errdefs.IsAlreadyExists(err) {
 			return "", err
 		}
+		actual = nw.w.Digest()
 	}
 
 	bkt, err := createBlobBucket(tx, nw.namespace, actual)

+ 18 - 15
vendor/github.com/containerd/containerd/metadata/db.go

@@ -21,6 +21,7 @@ import (
 	"encoding/binary"
 	"strings"
 	"sync"
+	"sync/atomic"
 	"time"
 
 	"github.com/containerd/containerd/content"
@@ -75,10 +76,16 @@ type DB struct {
 	// sweep phases without preventing read transactions.
 	wlock sync.RWMutex
 
-	// dirty flags and lock keeps track of datastores which have had deletions
-	// since the last garbage collection. These datastores will will be garbage
-	// collected during the next garbage collection.
-	dirtyL  sync.Mutex
+	// dirty flag indicates that references have been removed which require
+	// a garbage collection to ensure the database is clean. This tracks
+	// the number of dirty operations. This should be updated and read
+	// atomically if outside of wlock.Lock.
+	dirty uint32
+
+	// dirtySS and dirtyCS flags keeps track of datastores which have had
+	// deletions since the last garbage collection. These datastores will
+	// be garbage collected during the next garbage collection. These
+	// should only be updated inside of a write transaction or wlock.Lock.
 	dirtySS map[string]struct{}
 	dirtyCS bool
 
@@ -162,7 +169,7 @@ func (m *DB) Init(ctx context.Context) error {
 			}
 		}
 
-		// Previous version fo database found
+		// Previous version of database found
 		if schema != "v0" {
 			updates := migrations[i:]
 
@@ -237,12 +244,10 @@ func (m *DB) Update(fn func(*bolt.Tx) error) error {
 	defer m.wlock.RUnlock()
 	err := m.db.Update(fn)
 	if err == nil {
-		m.dirtyL.Lock()
-		dirty := m.dirtyCS || len(m.dirtySS) > 0
+		dirty := atomic.LoadUint32(&m.dirty) > 0
 		for _, fn := range m.mutationCallbacks {
 			fn(dirty)
 		}
-		m.dirtyL.Unlock()
 	}
 
 	return err
@@ -254,9 +259,9 @@ func (m *DB) Update(fn func(*bolt.Tx) error) error {
 // The callback function is an argument for whether a deletion has occurred
 // since the last garbage collection.
 func (m *DB) RegisterMutationCallback(fn func(bool)) {
-	m.dirtyL.Lock()
+	m.wlock.Lock()
 	m.mutationCallbacks = append(m.mutationCallbacks, fn)
-	m.dirtyL.Unlock()
+	m.wlock.Unlock()
 }
 
 // GCStats holds the duration for the different phases of the garbage collector
@@ -282,8 +287,6 @@ func (m *DB) GarbageCollect(ctx context.Context) (gc.Stats, error) {
 		return nil, err
 	}
 
-	m.dirtyL.Lock()
-
 	if err := m.db.Update(func(tx *bolt.Tx) error {
 		ctx, cancel := context.WithCancel(ctx)
 		defer cancel()
@@ -309,7 +312,6 @@ func (m *DB) GarbageCollect(ctx context.Context) (gc.Stats, error) {
 
 		return nil
 	}); err != nil {
-		m.dirtyL.Unlock()
 		m.wlock.Unlock()
 		return nil, err
 	}
@@ -317,6 +319,9 @@ func (m *DB) GarbageCollect(ctx context.Context) (gc.Stats, error) {
 	var stats GCStats
 	var wg sync.WaitGroup
 
+	// reset dirty, no need for atomic inside of wlock.Lock
+	m.dirty = 0
+
 	if len(m.dirtySS) > 0 {
 		var sl sync.Mutex
 		stats.SnapshotD = map[string]time.Duration{}
@@ -349,8 +354,6 @@ func (m *DB) GarbageCollect(ctx context.Context) (gc.Stats, error) {
 		m.dirtyCS = false
 	}
 
-	m.dirtyL.Unlock()
-
 	stats.MetaD = time.Since(t1)
 	m.wlock.Unlock()
 

+ 33 - 6
vendor/github.com/containerd/containerd/metadata/gc.go

@@ -46,11 +46,17 @@ const (
 	ResourceIngest
 )
 
+const (
+	resourceContentFlat  = ResourceContent | 0x20
+	resourceSnapshotFlat = ResourceSnapshot | 0x20
+)
+
 var (
 	labelGCRoot       = []byte("containerd.io/gc.root")
 	labelGCSnapRef    = []byte("containerd.io/gc.ref.snapshot.")
 	labelGCContentRef = []byte("containerd.io/gc.ref.content")
 	labelGCExpire     = []byte("containerd.io/gc.expire")
+	labelGCFlat       = []byte("containerd.io/gc.flat")
 )
 
 func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error {
@@ -90,6 +96,7 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error {
 					return nil
 				}
 				libkt := lbkt.Bucket(k)
+				var flat bool
 
 				if lblbkt := libkt.Bucket(bucketKeyObjectLabels); lblbkt != nil {
 					if expV := lblbkt.Get(labelGCExpire); expV != nil {
@@ -102,6 +109,10 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error {
 							return nil
 						}
 					}
+
+					if flatV := lblbkt.Get(labelGCFlat); flatV != nil {
+						flat = true
+					}
 				}
 
 				fn(gcnode(ResourceLease, ns, string(k)))
@@ -111,16 +122,26 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error {
 				// no need to allow the lookup to be recursive, handling here
 				// therefore reduces the number of database seeks.
 
+				ctype := ResourceContent
+				if flat {
+					ctype = resourceContentFlat
+				}
+
 				cbkt := libkt.Bucket(bucketKeyObjectContent)
 				if cbkt != nil {
 					if err := cbkt.ForEach(func(k, v []byte) error {
-						fn(gcnode(ResourceContent, ns, string(k)))
+						fn(gcnode(ctype, ns, string(k)))
 						return nil
 					}); err != nil {
 						return err
 					}
 				}
 
+				stype := ResourceSnapshot
+				if flat {
+					stype = resourceSnapshotFlat
+				}
+
 				sbkt := libkt.Bucket(bucketKeyObjectSnapshots)
 				if sbkt != nil {
 					if err := sbkt.ForEach(func(sk, sv []byte) error {
@@ -130,7 +151,7 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error {
 						snbkt := sbkt.Bucket(sk)
 
 						return snbkt.ForEach(func(k, v []byte) error {
-							fn(gcnode(ResourceSnapshot, ns, fmt.Sprintf("%s/%s", sk, k)))
+							fn(gcnode(stype, ns, fmt.Sprintf("%s/%s", sk, k)))
 							return nil
 						})
 					}); err != nil {
@@ -257,7 +278,8 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error {
 }
 
 func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node)) error {
-	if node.Type == ResourceContent {
+	switch node.Type {
+	case ResourceContent:
 		bkt := getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectContent, bucketKeyObjectBlob, []byte(node.Key))
 		if bkt == nil {
 			// Node may be created from dead edge
@@ -265,7 +287,7 @@ func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node)
 		}
 
 		return sendLabelRefs(node.Namespace, bkt, fn)
-	} else if node.Type == ResourceSnapshot {
+	case ResourceSnapshot, resourceSnapshotFlat:
 		parts := strings.SplitN(node.Key, "/", 2)
 		if len(parts) != 2 {
 			return errors.Errorf("invalid snapshot gc key %s", node.Key)
@@ -280,11 +302,16 @@ func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node)
 		}
 
 		if pv := bkt.Get(bucketKeyParent); len(pv) > 0 {
-			fn(gcnode(ResourceSnapshot, node.Namespace, fmt.Sprintf("%s/%s", ss, pv)))
+			fn(gcnode(node.Type, node.Namespace, fmt.Sprintf("%s/%s", ss, pv)))
+		}
+
+		// Do not send labeled references for flat snapshot refs
+		if node.Type == resourceSnapshotFlat {
+			return nil
 		}
 
 		return sendLabelRefs(node.Namespace, bkt, fn)
-	} else if node.Type == ResourceIngest {
+	case ResourceIngest:
 		// Send expected value
 		bkt := getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectContent, bucketKeyObjectIngests, []byte(node.Key))
 		if bkt == nil {

+ 8 - 10
vendor/github.com/containerd/containerd/metadata/images.go

@@ -21,6 +21,7 @@ import (
 	"encoding/binary"
 	"fmt"
 	"strings"
+	"sync/atomic"
 	"time"
 
 	"github.com/containerd/containerd/errdefs"
@@ -249,19 +250,16 @@ func (s *imageStore) Delete(ctx context.Context, name string, opts ...images.Del
 			return errors.Wrapf(errdefs.ErrNotFound, "image %q", name)
 		}
 
-		err = bkt.DeleteBucket([]byte(name))
-		if err == bolt.ErrBucketNotFound {
-			return errors.Wrapf(errdefs.ErrNotFound, "image %q", name)
+		if err = bkt.DeleteBucket([]byte(name)); err != nil {
+			if err == bolt.ErrBucketNotFound {
+				err = errors.Wrapf(errdefs.ErrNotFound, "image %q", name)
+			}
+			return err
 		}
 
-		// A reference to a piece of content has been removed,
-		// mark content store as dirty for triggering garbage
-		// collection
-		s.db.dirtyL.Lock()
-		s.db.dirtyCS = true
-		s.db.dirtyL.Unlock()
+		atomic.AddUint32(&s.db.dirty, 1)
 
-		return err
+		return nil
 	})
 }
 

+ 242 - 56
vendor/github.com/containerd/containerd/metadata/leases.go

@@ -18,6 +18,9 @@ package metadata
 
 import (
 	"context"
+	"fmt"
+	"strings"
+	"sync/atomic"
 	"time"
 
 	"github.com/containerd/containerd/errdefs"
@@ -30,17 +33,17 @@ import (
 	bolt "go.etcd.io/bbolt"
 )
 
-// LeaseManager manages the create/delete lifecyle of leases
+// LeaseManager manages the create/delete lifecycle of leases
 // and also returns existing leases
 type LeaseManager struct {
-	tx *bolt.Tx
+	db *DB
 }
 
 // NewLeaseManager creates a new lease manager for managing leases using
 // the provided database transaction.
-func NewLeaseManager(tx *bolt.Tx) *LeaseManager {
+func NewLeaseManager(db *DB) *LeaseManager {
 	return &LeaseManager{
-		tx: tx,
+		db: db,
 	}
 }
 
@@ -61,56 +64,66 @@ func (lm *LeaseManager) Create(ctx context.Context, opts ...leases.Opt) (leases.
 		return leases.Lease{}, err
 	}
 
-	topbkt, err := createBucketIfNotExists(lm.tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases)
-	if err != nil {
-		return leases.Lease{}, err
-	}
+	if err := update(ctx, lm.db, func(tx *bolt.Tx) error {
+		topbkt, err := createBucketIfNotExists(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases)
+		if err != nil {
+			return err
+		}
 
-	txbkt, err := topbkt.CreateBucket([]byte(l.ID))
-	if err != nil {
-		if err == bolt.ErrBucketExists {
-			err = errdefs.ErrAlreadyExists
+		txbkt, err := topbkt.CreateBucket([]byte(l.ID))
+		if err != nil {
+			if err == bolt.ErrBucketExists {
+				err = errdefs.ErrAlreadyExists
+			}
+			return errors.Wrapf(err, "lease %q", l.ID)
 		}
-		return leases.Lease{}, errors.Wrapf(err, "lease %q", l.ID)
-	}
 
-	t := time.Now().UTC()
-	createdAt, err := t.MarshalBinary()
-	if err != nil {
-		return leases.Lease{}, err
-	}
-	if err := txbkt.Put(bucketKeyCreatedAt, createdAt); err != nil {
-		return leases.Lease{}, err
-	}
+		t := time.Now().UTC()
+		createdAt, err := t.MarshalBinary()
+		if err != nil {
+			return err
+		}
+		if err := txbkt.Put(bucketKeyCreatedAt, createdAt); err != nil {
+			return err
+		}
 
-	if l.Labels != nil {
-		if err := boltutil.WriteLabels(txbkt, l.Labels); err != nil {
-			return leases.Lease{}, err
+		if l.Labels != nil {
+			if err := boltutil.WriteLabels(txbkt, l.Labels); err != nil {
+				return err
+			}
 		}
-	}
-	l.CreatedAt = t
+		l.CreatedAt = t
 
+		return nil
+	}); err != nil {
+		return leases.Lease{}, err
+	}
 	return l, nil
 }
 
-// Delete delets the lease with the provided lease ID
+// Delete deletes the lease with the provided lease ID
 func (lm *LeaseManager) Delete(ctx context.Context, lease leases.Lease, _ ...leases.DeleteOpt) error {
 	namespace, err := namespaces.NamespaceRequired(ctx)
 	if err != nil {
 		return err
 	}
 
-	topbkt := getBucket(lm.tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases)
-	if topbkt == nil {
-		return errors.Wrapf(errdefs.ErrNotFound, "lease %q", lease.ID)
-	}
-	if err := topbkt.DeleteBucket([]byte(lease.ID)); err != nil {
-		if err == bolt.ErrBucketNotFound {
-			err = errors.Wrapf(errdefs.ErrNotFound, "lease %q", lease.ID)
+	return update(ctx, lm.db, func(tx *bolt.Tx) error {
+		topbkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases)
+		if topbkt == nil {
+			return errors.Wrapf(errdefs.ErrNotFound, "lease %q", lease.ID)
 		}
-		return err
-	}
-	return nil
+		if err := topbkt.DeleteBucket([]byte(lease.ID)); err != nil {
+			if err == bolt.ErrBucketNotFound {
+				err = errors.Wrapf(errdefs.ErrNotFound, "lease %q", lease.ID)
+			}
+			return err
+		}
+
+		atomic.AddUint32(&lm.db.dirty, 1)
+
+		return nil
+	})
 }
 
 // List lists all active leases
@@ -127,44 +140,184 @@ func (lm *LeaseManager) List(ctx context.Context, fs ...string) ([]leases.Lease,
 
 	var ll []leases.Lease
 
-	topbkt := getBucket(lm.tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases)
-	if topbkt == nil {
-		return ll, nil
-	}
+	if err := view(ctx, lm.db, func(tx *bolt.Tx) error {
+		topbkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases)
+		if topbkt == nil {
+			return nil
+		}
+
+		return topbkt.ForEach(func(k, v []byte) error {
+			if v != nil {
+				return nil
+			}
+			txbkt := topbkt.Bucket(k)
+
+			l := leases.Lease{
+				ID: string(k),
+			}
+
+			if v := txbkt.Get(bucketKeyCreatedAt); v != nil {
+				t := &l.CreatedAt
+				if err := t.UnmarshalBinary(v); err != nil {
+					return err
+				}
+			}
+
+			labels, err := boltutil.ReadLabels(txbkt)
+			if err != nil {
+				return err
+			}
+			l.Labels = labels
+
+			if filter.Match(adaptLease(l)) {
+				ll = append(ll, l)
+			}
 
-	if err := topbkt.ForEach(func(k, v []byte) error {
-		if v != nil {
 			return nil
+		})
+	}); err != nil {
+		return nil, err
+	}
+
+	return ll, nil
+}
+
+// AddResource references the resource by the provided lease.
+func (lm *LeaseManager) AddResource(ctx context.Context, lease leases.Lease, r leases.Resource) error {
+	namespace, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return err
+	}
+
+	return update(ctx, lm.db, func(tx *bolt.Tx) error {
+		topbkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lease.ID))
+		if topbkt == nil {
+			return errors.Wrapf(errdefs.ErrNotFound, "lease %q", lease.ID)
 		}
-		txbkt := topbkt.Bucket(k)
 
-		l := leases.Lease{
-			ID: string(k),
+		keys, ref, err := parseLeaseResource(r)
+		if err != nil {
+			return err
 		}
 
-		if v := txbkt.Get(bucketKeyCreatedAt); v != nil {
-			t := &l.CreatedAt
-			if err := t.UnmarshalBinary(v); err != nil {
+		bkt := topbkt
+		for _, key := range keys {
+			bkt, err = bkt.CreateBucketIfNotExists([]byte(key))
+			if err != nil {
 				return err
 			}
 		}
+		return bkt.Put([]byte(ref), nil)
+	})
+}
 
-		labels, err := boltutil.ReadLabels(txbkt)
+// DeleteResource dereferences the resource by the provided lease.
+func (lm *LeaseManager) DeleteResource(ctx context.Context, lease leases.Lease, r leases.Resource) error {
+	namespace, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return err
+	}
+
+	return update(ctx, lm.db, func(tx *bolt.Tx) error {
+		topbkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lease.ID))
+		if topbkt == nil {
+			return errors.Wrapf(errdefs.ErrNotFound, "lease %q", lease.ID)
+		}
+
+		keys, ref, err := parseLeaseResource(r)
 		if err != nil {
 			return err
 		}
-		l.Labels = labels
 
-		if filter.Match(adaptLease(l)) {
-			ll = append(ll, l)
+		bkt := topbkt
+		for _, key := range keys {
+			if bkt == nil {
+				break
+			}
+			bkt = bkt.Bucket([]byte(key))
+		}
+
+		if bkt != nil {
+			if err := bkt.Delete([]byte(ref)); err != nil {
+				return err
+			}
 		}
 
+		atomic.AddUint32(&lm.db.dirty, 1)
+
 		return nil
-	}); err != nil {
+	})
+}
+
+// ListResources lists all the resources referenced by the lease.
+func (lm *LeaseManager) ListResources(ctx context.Context, lease leases.Lease) ([]leases.Resource, error) {
+	namespace, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
 		return nil, err
 	}
 
-	return ll, nil
+	var rs []leases.Resource
+
+	if err := view(ctx, lm.db, func(tx *bolt.Tx) error {
+
+		topbkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lease.ID))
+		if topbkt == nil {
+			return errors.Wrapf(errdefs.ErrNotFound, "lease %q", lease.ID)
+		}
+
+		// content resources
+		if cbkt := topbkt.Bucket(bucketKeyObjectContent); cbkt != nil {
+			if err := cbkt.ForEach(func(k, _ []byte) error {
+				rs = append(rs, leases.Resource{
+					ID:   string(k),
+					Type: string(bucketKeyObjectContent),
+				})
+
+				return nil
+			}); err != nil {
+				return err
+			}
+		}
+
+		// ingest resources
+		if lbkt := topbkt.Bucket(bucketKeyObjectIngests); lbkt != nil {
+			if err := lbkt.ForEach(func(k, _ []byte) error {
+				rs = append(rs, leases.Resource{
+					ID:   string(k),
+					Type: string(bucketKeyObjectIngests),
+				})
+
+				return nil
+			}); err != nil {
+				return err
+			}
+		}
+
+		// snapshot resources
+		if sbkt := topbkt.Bucket(bucketKeyObjectSnapshots); sbkt != nil {
+			if err := sbkt.ForEach(func(sk, sv []byte) error {
+				if sv != nil {
+					return nil
+				}
+
+				snbkt := sbkt.Bucket(sk)
+				return snbkt.ForEach(func(k, _ []byte) error {
+					rs = append(rs, leases.Resource{
+						ID:   string(k),
+						Type: fmt.Sprintf("%s/%s", bucketKeyObjectSnapshots, sk),
+					})
+					return nil
+				})
+			}); err != nil {
+				return err
+			}
+		}
+
+		return nil
+	}); err != nil {
+		return nil, err
+	}
+	return rs, nil
 }
 
 func addSnapshotLease(ctx context.Context, tx *bolt.Tx, snapshotter, key string) error {
@@ -307,3 +460,36 @@ func removeIngestLease(ctx context.Context, tx *bolt.Tx, ref string) error {
 
 	return bkt.Delete([]byte(ref))
 }
+
+func parseLeaseResource(r leases.Resource) ([]string, string, error) {
+	var (
+		ref  = r.ID
+		typ  = r.Type
+		keys = strings.Split(typ, "/")
+	)
+
+	switch k := keys[0]; k {
+	case string(bucketKeyObjectContent),
+		string(bucketKeyObjectIngests):
+
+		if len(keys) != 1 {
+			return nil, "", errors.Wrapf(errdefs.ErrInvalidArgument, "invalid resource type %s", typ)
+		}
+
+		if k == string(bucketKeyObjectContent) {
+			dgst, err := digest.Parse(ref)
+			if err != nil {
+				return nil, "", errors.Wrapf(errdefs.ErrInvalidArgument, "invalid content resource id %s: %v", ref, err)
+			}
+			ref = dgst.String()
+		}
+	case string(bucketKeyObjectSnapshots):
+		if len(keys) != 2 {
+			return nil, "", errors.Wrapf(errdefs.ErrInvalidArgument, "invalid snapshot resource type %s", typ)
+		}
+	default:
+		return nil, "", errors.Wrapf(errdefs.ErrNotImplemented, "resource type %s not supported yet", typ)
+	}
+
+	return keys, ref, nil
+}

+ 9 - 1
vendor/github.com/containerd/containerd/metadata/namespaces.go

@@ -129,7 +129,15 @@ func (s *namespaceStore) List(ctx context.Context) ([]string, error) {
 	return namespaces, nil
 }
 
-func (s *namespaceStore) Delete(ctx context.Context, namespace string) error {
+func (s *namespaceStore) Delete(ctx context.Context, namespace string, opts ...namespaces.DeleteOpts) error {
+	i := &namespaces.DeleteInfo{
+		Name: namespace,
+	}
+	for _, o := range opts {
+		if err := o(ctx, i); err != nil {
+			return err
+		}
+	}
 	bkt := getBucket(s.tx, bucketKeyVersion)
 	if empty, err := s.namespaceEmpty(ctx, namespace); err != nil {
 		return err

+ 38 - 5
vendor/github.com/containerd/containerd/metadata/snapshot.go

@@ -21,6 +21,7 @@ import (
 	"fmt"
 	"strings"
 	"sync"
+	"sync/atomic"
 	"time"
 
 	"github.com/containerd/containerd/errdefs"
@@ -34,6 +35,10 @@ import (
 	bolt "go.etcd.io/bbolt"
 )
 
+const (
+	inheritedLabelsPrefix = "containerd.io/snapshot/"
+)
+
 type snapshotter struct {
 	snapshots.Snapshotter
 	name string
@@ -209,6 +214,15 @@ func (s *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpath
 		bkey = string(sbkt.Get(bucketKeyName))
 		local.Parent = string(sbkt.Get(bucketKeyParent))
 
+		inner := snapshots.Info{
+			Name:   bkey,
+			Labels: filterInheritedLabels(local.Labels),
+		}
+
+		if _, err := s.Snapshotter.Update(ctx, inner, fieldpaths...); err != nil {
+			return err
+		}
+
 		return nil
 	}); err != nil {
 		return snapshots.Info{}, err
@@ -338,12 +352,14 @@ func (s *snapshotter) createSnapshot(ctx context.Context, key, parent string, re
 			return err
 		}
 
+		inheritedOpt := snapshots.WithLabels(filterInheritedLabels(base.Labels))
+
 		// TODO: Consider doing this outside of transaction to lessen
 		// metadata lock time
 		if readonly {
-			m, err = s.Snapshotter.View(ctx, bkey, bparent)
+			m, err = s.Snapshotter.View(ctx, bkey, bparent, inheritedOpt)
 		} else {
-			m, err = s.Snapshotter.Prepare(ctx, bkey, bparent)
+			m, err = s.Snapshotter.Prepare(ctx, bkey, bparent, inheritedOpt)
 		}
 		return err
 	}); err != nil {
@@ -445,9 +461,11 @@ func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap
 			return err
 		}
 
+		inheritedOpt := snapshots.WithLabels(filterInheritedLabels(base.Labels))
+
 		// TODO: Consider doing this outside of transaction to lessen
 		// metadata lock time
-		return s.Snapshotter.Commit(ctx, nameKey, bkey)
+		return s.Snapshotter.Commit(ctx, nameKey, bkey, inheritedOpt)
 	})
 
 }
@@ -500,9 +518,8 @@ func (s *snapshotter) Remove(ctx context.Context, key string) error {
 		}
 
 		// Mark snapshotter as dirty for triggering garbage collection
-		s.db.dirtyL.Lock()
+		atomic.AddUint32(&s.db.dirty, 1)
 		s.db.dirtySS[s.name] = struct{}{}
-		s.db.dirtyL.Unlock()
 
 		return nil
 	})
@@ -761,3 +778,19 @@ func (s *snapshotter) pruneBranch(ctx context.Context, node *treeNode) error {
 func (s *snapshotter) Close() error {
 	return s.Snapshotter.Close()
 }
+
+// filterInheritedLabels filters the provided labels by removing any key which doesn't have
+// a prefix of "containerd.io/snapshot/".
+func filterInheritedLabels(labels map[string]string) map[string]string {
+	if labels == nil {
+		return nil
+	}
+
+	filtered := make(map[string]string)
+	for k, v := range labels {
+		if strings.HasPrefix(k, inheritedLabelsPrefix) {
+			filtered[k] = v
+		}
+	}
+	return filtered
+}

+ 12 - 4
vendor/github.com/containerd/containerd/namespaces.go

@@ -100,10 +100,18 @@ func (r *remoteNamespaces) List(ctx context.Context) ([]string, error) {
 	return namespaces, nil
 }
 
-func (r *remoteNamespaces) Delete(ctx context.Context, namespace string) error {
-	var req api.DeleteNamespaceRequest
-
-	req.Name = namespace
+func (r *remoteNamespaces) Delete(ctx context.Context, namespace string, opts ...namespaces.DeleteOpts) error {
+	i := namespaces.DeleteInfo{
+		Name: namespace,
+	}
+	for _, o := range opts {
+		if err := o(ctx, &i); err != nil {
+			return err
+		}
+	}
+	req := api.DeleteNamespaceRequest{
+		Name: namespace,
+	}
 	_, err := r.client.Delete(ctx, &req)
 	if err != nil {
 		return errdefs.FromGRPC(err)

+ 6 - 8
vendor/github.com/containerd/containerd/namespaces/context.go

@@ -36,10 +36,9 @@ type namespaceKey struct{}
 // WithNamespace sets a given namespace on the context
 func WithNamespace(ctx context.Context, namespace string) context.Context {
 	ctx = context.WithValue(ctx, namespaceKey{}, namespace) // set our key for namespace
-
-	// also store on the grpc headers so it gets picked up by any clients that
+	// also store on the grpc and ttrpc headers so it gets picked up by any clients that
 	// are using this.
-	return withGRPCNamespaceHeader(ctx, namespace)
+	return withTTRPCNamespaceHeader(withGRPCNamespaceHeader(ctx, namespace), namespace)
 }
 
 // NamespaceFromEnv uses the namespace defined in CONTAINERD_NAMESPACE or
@@ -58,22 +57,21 @@ func NamespaceFromEnv(ctx context.Context) context.Context {
 func Namespace(ctx context.Context) (string, bool) {
 	namespace, ok := ctx.Value(namespaceKey{}).(string)
 	if !ok {
-		return fromGRPCHeader(ctx)
+		if namespace, ok = fromGRPCHeader(ctx); !ok {
+			return fromTTRPCHeader(ctx)
+		}
 	}
-
 	return namespace, ok
 }
 
-// NamespaceRequired returns the valid namepace from the context or an error.
+// NamespaceRequired returns the valid namespace from the context or an error.
 func NamespaceRequired(ctx context.Context) (string, error) {
 	namespace, ok := Namespace(ctx)
 	if !ok || namespace == "" {
 		return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "namespace is required")
 	}
-
 	if err := Validate(namespace); err != nil {
 		return "", errors.Wrap(err, "namespace validation")
 	}
-
 	return namespace, nil
 }

+ 10 - 1
vendor/github.com/containerd/containerd/namespaces/store.go

@@ -33,5 +33,14 @@ type Store interface {
 	List(ctx context.Context) ([]string, error)
 
 	// Delete removes the namespace. The namespace must be empty to be deleted.
-	Delete(ctx context.Context, namespace string) error
+	Delete(ctx context.Context, namespace string, opts ...DeleteOpts) error
 }
+
+// DeleteInfo specifies information for the deletion of a namespace
+type DeleteInfo struct {
+	// Name of the namespace
+	Name string
+}
+
+// DeleteOpts allows the caller to set options for namespace deletion
+type DeleteOpts func(context.Context, *DeleteInfo) error

+ 51 - 0
vendor/github.com/containerd/containerd/namespaces/ttrpc.go

@@ -0,0 +1,51 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package namespaces
+
+import (
+	"context"
+
+	"github.com/containerd/ttrpc"
+)
+
+const (
+	// TTRPCHeader defines the header name for specifying a containerd namespace
+	TTRPCHeader = "containerd-namespace-ttrpc"
+)
+
+func copyMetadata(src ttrpc.MD) ttrpc.MD {
+	md := ttrpc.MD{}
+	for k, v := range src {
+		md[k] = append(md[k], v...)
+	}
+	return md
+}
+
+func withTTRPCNamespaceHeader(ctx context.Context, namespace string) context.Context {
+	md, ok := ttrpc.GetMetadata(ctx)
+	if !ok {
+		md = ttrpc.MD{}
+	} else {
+		md = copyMetadata(md)
+	}
+	md.Set(TTRPCHeader, namespace)
+	return ttrpc.WithMetadata(ctx, md)
+}
+
+func fromTTRPCHeader(ctx context.Context) (string, bool) {
+	return ttrpc.GetMetadataValue(ctx, TTRPCHeader)
+}

+ 1 - 2
vendor/github.com/containerd/containerd/oci/spec.go

@@ -78,7 +78,7 @@ func generateDefaultSpecWithPlatform(ctx context.Context, platform, id string, s
 	return err
 }
 
-// ApplyOpts applys the options to the given spec, injecting data from the
+// ApplyOpts applies the options to the given spec, injecting data from the
 // context, client and container instance.
 func ApplyOpts(ctx context.Context, client Client, c *containers.Container, s *Spec, opts ...SpecOpts) error {
 	for _, o := range opts {
@@ -141,7 +141,6 @@ func populateDefaultUnixSpec(ctx context.Context, s *Spec, id string) error {
 			Path: defaultRootfsPath,
 		},
 		Process: &specs.Process{
-			Env:             defaultUnixEnv,
 			Cwd:             "/",
 			NoNewPrivileges: true,
 			User: specs.User{

+ 112 - 4
vendor/github.com/containerd/containerd/oci/spec_opts.go

@@ -17,6 +17,7 @@
 package oci
 
 import (
+	"bufio"
 	"context"
 	"encoding/json"
 	"fmt"
@@ -76,6 +77,20 @@ func setLinux(s *Spec) {
 	}
 }
 
+// nolint
+func setResources(s *Spec) {
+	if s.Linux != nil {
+		if s.Linux.Resources == nil {
+			s.Linux.Resources = &specs.LinuxResources{}
+		}
+	}
+	if s.Windows != nil {
+		if s.Windows.Resources == nil {
+			s.Windows.Resources = &specs.WindowsResources{}
+		}
+	}
+}
+
 // setCapabilities sets Linux Capabilities to empty if unset
 func setCapabilities(s *Spec) {
 	setProcess(s)
@@ -104,7 +119,7 @@ func WithDefaultSpecForPlatform(platform string) SpecOpts {
 	}
 }
 
-// WithSpecFromBytes loads the the spec from the provided byte slice.
+// WithSpecFromBytes loads the spec from the provided byte slice.
 func WithSpecFromBytes(p []byte) SpecOpts {
 	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
 		*s = Spec{} // make sure spec is cleared.
@@ -137,6 +152,13 @@ func WithEnv(environmentVariables []string) SpecOpts {
 	}
 }
 
+// WithDefaultPathEnv sets the $PATH environment variable to the
+// default PATH defined in this package.
+func WithDefaultPathEnv(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+	s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, defaultUnixEnv)
+	return nil
+}
+
 // replaceOrAppendEnvValues returns the defaults with the overrides either
 // replaced by env key or appended to the list
 func replaceOrAppendEnvValues(defaults, overrides []string) []string {
@@ -312,7 +334,11 @@ func WithImageConfigArgs(image Image, args []string) SpecOpts {
 
 		setProcess(s)
 		if s.Linux != nil {
-			s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, config.Env)
+			defaults := config.Env
+			if len(defaults) == 0 {
+				defaults = defaultUnixEnv
+			}
+			s.Process.Env = replaceOrAppendEnvValues(defaults, s.Process.Env)
 			cmd := config.Cmd
 			if len(args) > 0 {
 				cmd = args
@@ -334,7 +360,7 @@ func WithImageConfigArgs(image Image, args []string) SpecOpts {
 			// even if there is no specified user in the image config
 			return WithAdditionalGIDs("root")(ctx, client, c, s)
 		} else if s.Windows != nil {
-			s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, config.Env)
+			s.Process.Env = replaceOrAppendEnvValues(config.Env, s.Process.Env)
 			cmd := config.Cmd
 			if len(args) > 0 {
 				cmd = args
@@ -607,7 +633,7 @@ func WithUserID(uid uint32) SpecOpts {
 }
 
 // WithUsername sets the correct UID and GID for the container
-// based on the the image's /etc/passwd contents. If /etc/passwd
+// based on the image's /etc/passwd contents. If /etc/passwd
 // does not exist, or the username is not found in /etc/passwd,
 // it returns error.
 func WithUsername(username string) SpecOpts {
@@ -1139,3 +1165,85 @@ func WithAnnotations(annotations map[string]string) SpecOpts {
 		return nil
 	}
 }
+
+// WithLinuxDevices adds the provided linux devices to the spec
+func WithLinuxDevices(devices []specs.LinuxDevice) SpecOpts {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+		setLinux(s)
+		s.Linux.Devices = append(s.Linux.Devices, devices...)
+		return nil
+	}
+}
+
+var ErrNotADevice = errors.New("not a device node")
+
+// WithLinuxDevice adds the device specified by path to the spec
+func WithLinuxDevice(path, permissions string) SpecOpts {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+		setLinux(s)
+		setResources(s)
+
+		dev, err := deviceFromPath(path, permissions)
+		if err != nil {
+			return err
+		}
+
+		s.Linux.Devices = append(s.Linux.Devices, *dev)
+
+		s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, specs.LinuxDeviceCgroup{
+			Type:   dev.Type,
+			Allow:  true,
+			Major:  &dev.Major,
+			Minor:  &dev.Minor,
+			Access: permissions,
+		})
+
+		return nil
+	}
+}
+
+// WithEnvFile adds environment variables from a file to the container's spec
+func WithEnvFile(path string) SpecOpts {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+		var vars []string
+		f, err := os.Open(path)
+		if err != nil {
+			return err
+		}
+		defer f.Close()
+
+		sc := bufio.NewScanner(f)
+		for sc.Scan() {
+			if sc.Err() != nil {
+				return sc.Err()
+			}
+			vars = append(vars, sc.Text())
+		}
+		return WithEnv(vars)(nil, nil, nil, s)
+	}
+}
+
+// ErrNoShmMount is returned when there is no /dev/shm mount specified in the config
+// and an Opts was trying to set a configuration value on the mount.
+var ErrNoShmMount = errors.New("no /dev/shm mount specified")
+
+// WithDevShmSize sets the size of the /dev/shm mount for the container.
+//
+// The size value is specified in kb, kilobytes.
+func WithDevShmSize(kb int64) SpecOpts {
+	return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error {
+		for _, m := range s.Mounts {
+			if m.Source == "shm" && m.Type == "tmpfs" {
+				for i, o := range m.Options {
+					if strings.HasPrefix(o, "size=") {
+						m.Options[i] = fmt.Sprintf("size=%dk", kb)
+						return nil
+					}
+				}
+				m.Options = append(m.Options, fmt.Sprintf("size=%dk", kb))
+				return nil
+			}
+		}
+		return ErrNoShmMount
+	}
+}

+ 64 - 0
vendor/github.com/containerd/containerd/oci/spec_opts_linux.go

@@ -0,0 +1,64 @@
+// +build linux
+
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package oci
+
+import (
+	"os"
+
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+	"golang.org/x/sys/unix"
+)
+
+func deviceFromPath(path, permissions string) (*specs.LinuxDevice, error) {
+	var stat unix.Stat_t
+	if err := unix.Lstat(path, &stat); err != nil {
+		return nil, err
+	}
+
+	var (
+		// The type is 32bit on mips.
+		devNumber = uint64(stat.Rdev) // nolint: unconvert
+		major     = unix.Major(devNumber)
+		minor     = unix.Minor(devNumber)
+	)
+	if major == 0 {
+		return nil, ErrNotADevice
+	}
+
+	var (
+		devType string
+		mode    = stat.Mode
+	)
+	switch {
+	case mode&unix.S_IFBLK == unix.S_IFBLK:
+		devType = "b"
+	case mode&unix.S_IFCHR == unix.S_IFCHR:
+		devType = "c"
+	}
+	fm := os.FileMode(mode)
+	return &specs.LinuxDevice{
+		Type:     devType,
+		Path:     path,
+		Major:    int64(major),
+		Minor:    int64(minor),
+		FileMode: &fm,
+		UID:      &stat.Uid,
+		GID:      &stat.Gid,
+	}, nil
+}

+ 63 - 0
vendor/github.com/containerd/containerd/oci/spec_opts_unix.go

@@ -0,0 +1,63 @@
+// +build !linux,!windows
+
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package oci
+
+import (
+	"os"
+
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+	"golang.org/x/sys/unix"
+)
+
+func deviceFromPath(path, permissions string) (*specs.LinuxDevice, error) {
+	var stat unix.Stat_t
+	if err := unix.Lstat(path, &stat); err != nil {
+		return nil, err
+	}
+
+	var (
+		devNumber = uint64(stat.Rdev)
+		major     = unix.Major(devNumber)
+		minor     = unix.Minor(devNumber)
+	)
+	if major == 0 {
+		return nil, ErrNotADevice
+	}
+
+	var (
+		devType string
+		mode    = stat.Mode
+	)
+	switch {
+	case mode&unix.S_IFBLK == unix.S_IFBLK:
+		devType = "b"
+	case mode&unix.S_IFCHR == unix.S_IFCHR:
+		devType = "c"
+	}
+	fm := os.FileMode(mode)
+	return &specs.LinuxDevice{
+		Type:     devType,
+		Path:     path,
+		Major:    int64(major),
+		Minor:    int64(minor),
+		FileMode: &fm,
+		UID:      &stat.Uid,
+		GID:      &stat.Gid,
+	}, nil
+}

+ 5 - 0
vendor/github.com/containerd/containerd/oci/spec_opts_windows.go

@@ -23,6 +23,7 @@ import (
 
 	"github.com/containerd/containerd/containers"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
+	"github.com/pkg/errors"
 )
 
 // WithWindowsCPUCount sets the `Windows.Resources.CPU.Count` section to the
@@ -65,3 +66,7 @@ func WithWindowNetworksAllowUnqualifiedDNSQuery() SpecOpts {
 		return nil
 	}
 }
+
+func deviceFromPath(path, permissions string) (*specs.LinuxDevice, error) {
+	return nil, errors.New("device from path not supported on Windows")
+}

+ 13 - 1
vendor/github.com/containerd/containerd/pkg/dialer/dialer.go

@@ -17,6 +17,7 @@
 package dialer
 
 import (
+	"context"
 	"net"
 	"time"
 
@@ -28,8 +29,19 @@ type dialResult struct {
 	err error
 }
 
+// ContextDialer returns a GRPC net.Conn connected to the provided address
+func ContextDialer(ctx context.Context, address string) (net.Conn, error) {
+	if deadline, ok := ctx.Deadline(); ok {
+		return timeoutDialer(address, time.Until(deadline))
+	}
+	return timeoutDialer(address, 0)
+}
+
 // Dialer returns a GRPC net.Conn connected to the provided address
-func Dialer(address string, timeout time.Duration) (net.Conn, error) {
+// Deprecated: use ContextDialer and grpc.WithContextDialer.
+var Dialer = timeoutDialer
+
+func timeoutDialer(address string, timeout time.Duration) (net.Conn, error) {
 	var (
 		stopC = make(chan struct{})
 		synC  = make(chan *dialResult)

+ 2 - 3
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/deleted_state.go → vendor/github.com/containerd/containerd/pkg/process/deleted_state.go

@@ -16,14 +16,13 @@
    limitations under the License.
 */
 
-package proc
+package process
 
 import (
 	"context"
 
 	"github.com/containerd/console"
 	"github.com/containerd/containerd/errdefs"
-	"github.com/containerd/containerd/runtime/proc"
 	google_protobuf "github.com/gogo/protobuf/types"
 	"github.com/pkg/errors"
 )
@@ -67,6 +66,6 @@ func (s *deletedState) SetExited(status int) {
 	// no op
 }
 
-func (s *deletedState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) {
+func (s *deletedState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) {
 	return nil, errors.Errorf("cannot exec in a deleted state")
 }

+ 18 - 8
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec.go → vendor/github.com/containerd/containerd/pkg/process/exec.go

@@ -16,7 +16,7 @@
    limitations under the License.
 */
 
-package proc
+package process
 
 import (
 	"context"
@@ -31,7 +31,8 @@ import (
 	"golang.org/x/sys/unix"
 
 	"github.com/containerd/console"
-	"github.com/containerd/containerd/runtime/proc"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/pkg/stdio"
 	"github.com/containerd/fifo"
 	runc "github.com/containerd/go-runc"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
@@ -49,10 +50,10 @@ type execProcess struct {
 	io      *processIO
 	status  int
 	exited  time.Time
-	pid     *safePid
+	pid     safePid
 	closers []io.Closer
 	stdin   io.Closer
-	stdio   proc.Stdio
+	stdio   stdio.Stdio
 	path    string
 	spec    specs.Process
 
@@ -95,6 +96,7 @@ func (e *execProcess) setExited(status int) {
 	e.status = status
 	e.exited = time.Now()
 	e.parent.Platform.ShutdownConsole(context.Background(), e.console)
+	e.pid.set(StoppedPID)
 	close(e.waitBlock)
 }
 
@@ -106,7 +108,7 @@ func (e *execProcess) Delete(ctx context.Context) error {
 }
 
 func (e *execProcess) delete(ctx context.Context) error {
-	e.wg.Wait()
+	waitTimeout(ctx, &e.wg, 2*time.Second)
 	if e.io != nil {
 		for _, c := range e.closers {
 			c.Close()
@@ -142,7 +144,12 @@ func (e *execProcess) Kill(ctx context.Context, sig uint32, _ bool) error {
 
 func (e *execProcess) kill(ctx context.Context, sig uint32, _ bool) error {
 	pid := e.pid.get()
-	if pid != 0 {
+	switch {
+	case pid == 0:
+		return errors.Wrap(errdefs.ErrFailedPrecondition, "process not created")
+	case pid < 0:
+		return errors.Wrapf(errdefs.ErrNotFound, "process already finished")
+	default:
 		if err := unix.Kill(pid, syscall.Signal(sig)); err != nil {
 			return errors.Wrapf(checkKillError(err), "exec kill error")
 		}
@@ -154,7 +161,7 @@ func (e *execProcess) Stdin() io.Closer {
 	return e.stdin
 }
 
-func (e *execProcess) Stdio() proc.Stdio {
+func (e *execProcess) Stdio() stdio.Stdio {
 	return e.stdio
 }
 
@@ -254,10 +261,13 @@ func (e *execProcess) Status(ctx context.Context) (string, error) {
 	}
 	e.mu.Lock()
 	defer e.mu.Unlock()
-	// if we don't have a pid then the exec process has just been created
+	// if we don't have a pid(pid=0) then the exec process has just been created
 	if e.pid.get() == 0 {
 		return "created", nil
 	}
+	if e.pid.get() == StoppedPID {
+		return "stopped", nil
+	}
 	// if we have a pid and it can be signaled, the process is running
 	if err := unix.Kill(e.pid.get(), 0); err == nil {
 		return "running", nil

+ 1 - 1
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec_state.go → vendor/github.com/containerd/containerd/pkg/process/exec_state.go

@@ -16,7 +16,7 @@
    limitations under the License.
 */
 
-package proc
+package process
 
 import (
 	"context"

+ 18 - 22
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init.go → vendor/github.com/containerd/containerd/pkg/process/init.go

@@ -16,7 +16,7 @@
    limitations under the License.
 */
 
-package proc
+package process
 
 import (
 	"context"
@@ -33,7 +33,7 @@ import (
 	"github.com/containerd/console"
 	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/mount"
-	"github.com/containerd/containerd/runtime/proc"
+	"github.com/containerd/containerd/pkg/stdio"
 	"github.com/containerd/fifo"
 	runc "github.com/containerd/go-runc"
 	google_protobuf "github.com/gogo/protobuf/types"
@@ -59,15 +59,15 @@ type Init struct {
 	id           string
 	Bundle       string
 	console      console.Console
-	Platform     proc.Platform
+	Platform     stdio.Platform
 	io           *processIO
 	runtime      *runc.Runc
 	status       int
 	exited       time.Time
-	pid          int
+	pid          safePid
 	closers      []io.Closer
 	stdin        io.Closer
-	stdio        proc.Stdio
+	stdio        stdio.Stdio
 	Rootfs       string
 	IoUID        int
 	IoGID        int
@@ -93,7 +93,7 @@ func NewRunc(root, path, namespace, runtime, criu string, systemd bool) *runc.Ru
 }
 
 // New returns a new process
-func New(id string, runtime *runc.Runc, stdio proc.Stdio) *Init {
+func New(id string, runtime *runc.Runc, stdio stdio.Stdio) *Init {
 	p := &Init{
 		id:        id,
 		runtime:   runtime,
@@ -113,6 +113,9 @@ func (p *Init) Create(ctx context.Context, r *CreateConfig) error {
 		pio     *processIO
 		pidFile = newPidFile(p.Bundle)
 	)
+	p.pid.Lock()
+	defer p.pid.Unlock()
+
 	if r.Terminal {
 		if socket, err = runc.NewTempConsoleSocket(); err != nil {
 			return errors.Wrap(err, "failed to create OCI runtime console socket")
@@ -167,7 +170,7 @@ func (p *Init) Create(ctx context.Context, r *CreateConfig) error {
 	if err != nil {
 		return errors.Wrap(err, "failed to retrieve OCI runtime container pid")
 	}
-	p.pid = pid
+	p.pid.pid = pid
 	return nil
 }
 
@@ -213,7 +216,7 @@ func (p *Init) ID() string {
 
 // Pid of the process
 func (p *Init) Pid() int {
-	return p.pid
+	return p.pid.get()
 }
 
 // ExitStatus of the process
@@ -272,6 +275,7 @@ func (p *Init) setExited(status int) {
 	p.exited = time.Now()
 	p.status = status
 	p.Platform.ShutdownConsole(context.Background(), p.console)
+	p.pid.set(StoppedPID)
 	close(p.waitBlock)
 }
 
@@ -284,7 +288,7 @@ func (p *Init) Delete(ctx context.Context) error {
 }
 
 func (p *Init) delete(ctx context.Context) error {
-	p.wg.Wait()
+	waitTimeout(ctx, &p.wg, 2*time.Second)
 	err := p.runtime.Delete(ctx, p.id, nil)
 	// ignore errors if a runtime has already deleted the process
 	// but we still hold metadata and pipes
@@ -324,13 +328,6 @@ func (p *Init) Resize(ws console.WinSize) error {
 	return p.console.Resize(ws)
 }
 
-func (p *Init) resize(ws console.WinSize) error {
-	if p.console == nil {
-		return nil
-	}
-	return p.console.Resize(ws)
-}
-
 // Pause the init process and all its child processes
 func (p *Init) Pause(ctx context.Context) error {
 	p.mu.Lock()
@@ -384,7 +381,7 @@ func (p *Init) Runtime() *runc.Runc {
 }
 
 // Exec returns a new child process
-func (p *Init) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) {
+func (p *Init) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) {
 	p.mu.Lock()
 	defer p.mu.Unlock()
 
@@ -392,7 +389,7 @@ func (p *Init) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Proce
 }
 
 // exec returns a new exec'd process
-func (p *Init) exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) {
+func (p *Init) exec(ctx context.Context, path string, r *ExecConfig) (Process, error) {
 	// process exec request
 	var spec specs.Process
 	if err := json.Unmarshal(r.Spec.Value, &spec); err != nil {
@@ -405,14 +402,13 @@ func (p *Init) exec(ctx context.Context, path string, r *ExecConfig) (proc.Proce
 		path:   path,
 		parent: p,
 		spec:   spec,
-		stdio: proc.Stdio{
+		stdio: stdio.Stdio{
 			Stdin:    r.Stdin,
 			Stdout:   r.Stdout,
 			Stderr:   r.Stderr,
 			Terminal: r.Terminal,
 		},
 		waitBlock: make(chan struct{}),
-		pid:       &safePid{},
 	}
 	e.execState = &execCreatedState{p: e}
 	return e, nil
@@ -472,7 +468,7 @@ func (p *Init) update(ctx context.Context, r *google_protobuf.Any) error {
 }
 
 // Stdio of the process
-func (p *Init) Stdio() proc.Stdio {
+func (p *Init) Stdio() stdio.Stdio {
 	return p.stdio
 }
 
@@ -492,7 +488,7 @@ func (p *Init) runtimeError(rErr error, msg string) error {
 	}
 }
 
-func withConditionalIO(c proc.Stdio) runc.IOOpt {
+func withConditionalIO(c stdio.Stdio) runc.IOOpt {
 	return func(o *runc.IOOption) {
 		o.OpenStdin = c.Stdin != ""
 		o.OpenStdout = c.Stdout != ""

+ 11 - 31
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init_state.go → vendor/github.com/containerd/containerd/pkg/process/init_state.go

@@ -16,13 +16,11 @@
    limitations under the License.
 */
 
-package proc
+package process
 
 import (
 	"context"
 
-	"github.com/containerd/console"
-	"github.com/containerd/containerd/runtime/proc"
 	runc "github.com/containerd/go-runc"
 	google_protobuf "github.com/gogo/protobuf/types"
 	"github.com/pkg/errors"
@@ -30,14 +28,13 @@ import (
 )
 
 type initState interface {
-	Resize(console.WinSize) error
 	Start(context.Context) error
 	Delete(context.Context) error
 	Pause(context.Context) error
 	Resume(context.Context) error
 	Update(context.Context, *google_protobuf.Any) error
 	Checkpoint(context.Context, *CheckpointConfig) error
-	Exec(context.Context, string, *ExecConfig) (proc.Process, error)
+	Exec(context.Context, string, *ExecConfig) (Process, error)
 	Kill(context.Context, uint32, bool) error
 	SetExited(int)
 }
@@ -76,10 +73,6 @@ func (s *createdState) Checkpoint(ctx context.Context, r *CheckpointConfig) erro
 	return errors.Errorf("cannot checkpoint a task in created state")
 }
 
-func (s *createdState) Resize(ws console.WinSize) error {
-	return s.p.resize(ws)
-}
-
 func (s *createdState) Start(ctx context.Context) error {
 	if err := s.p.start(ctx); err != nil {
 		return err
@@ -106,7 +99,7 @@ func (s *createdState) SetExited(status int) {
 	}
 }
 
-func (s *createdState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) {
+func (s *createdState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) {
 	return s.p.exec(ctx, path, r)
 }
 
@@ -145,14 +138,13 @@ func (s *createdCheckpointState) Checkpoint(ctx context.Context, r *CheckpointCo
 	return errors.Errorf("cannot checkpoint a task in created state")
 }
 
-func (s *createdCheckpointState) Resize(ws console.WinSize) error {
-	return s.p.resize(ws)
-}
-
 func (s *createdCheckpointState) Start(ctx context.Context) error {
 	p := s.p
 	sio := p.stdio
 
+	p.pid.Lock()
+	defer p.pid.Unlock()
+
 	var (
 		err    error
 		socket *runc.Socket
@@ -192,7 +184,7 @@ func (s *createdCheckpointState) Start(ctx context.Context) error {
 	if err != nil {
 		return errors.Wrap(err, "failed to retrieve OCI runtime container pid")
 	}
-	p.pid = pid
+	p.pid.pid = pid
 	return s.transition("running")
 }
 
@@ -215,7 +207,7 @@ func (s *createdCheckpointState) SetExited(status int) {
 	}
 }
 
-func (s *createdCheckpointState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) {
+func (s *createdCheckpointState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) {
 	return nil, errors.Errorf("cannot exec in a created state")
 }
 
@@ -255,10 +247,6 @@ func (s *runningState) Checkpoint(ctx context.Context, r *CheckpointConfig) erro
 	return s.p.checkpoint(ctx, r)
 }
 
-func (s *runningState) Resize(ws console.WinSize) error {
-	return s.p.resize(ws)
-}
-
 func (s *runningState) Start(ctx context.Context) error {
 	return errors.Errorf("cannot start a running process")
 }
@@ -279,7 +267,7 @@ func (s *runningState) SetExited(status int) {
 	}
 }
 
-func (s *runningState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) {
+func (s *runningState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) {
 	return s.p.exec(ctx, path, r)
 }
 
@@ -319,10 +307,6 @@ func (s *pausedState) Checkpoint(ctx context.Context, r *CheckpointConfig) error
 	return s.p.checkpoint(ctx, r)
 }
 
-func (s *pausedState) Resize(ws console.WinSize) error {
-	return s.p.resize(ws)
-}
-
 func (s *pausedState) Start(ctx context.Context) error {
 	return errors.Errorf("cannot start a paused process")
 }
@@ -347,7 +331,7 @@ func (s *pausedState) SetExited(status int) {
 	}
 }
 
-func (s *pausedState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) {
+func (s *pausedState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) {
 	return nil, errors.Errorf("cannot exec in a paused state")
 }
 
@@ -381,10 +365,6 @@ func (s *stoppedState) Checkpoint(ctx context.Context, r *CheckpointConfig) erro
 	return errors.Errorf("cannot checkpoint a stopped container")
 }
 
-func (s *stoppedState) Resize(ws console.WinSize) error {
-	return errors.Errorf("cannot resize a stopped container")
-}
-
 func (s *stoppedState) Start(ctx context.Context) error {
 	return errors.Errorf("cannot start a stopped process")
 }
@@ -404,6 +384,6 @@ func (s *stoppedState) SetExited(status int) {
 	// no op
 }
 
-func (s *stoppedState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) {
+func (s *stoppedState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) {
 	return nil, errors.Errorf("cannot exec in a stopped state")
 }

+ 16 - 12
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/io.go → vendor/github.com/containerd/containerd/pkg/process/io.go

@@ -16,7 +16,7 @@
    limitations under the License.
 */
 
-package proc
+package process
 
 import (
 	"context"
@@ -32,7 +32,7 @@ import (
 
 	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/namespaces"
-	"github.com/containerd/containerd/runtime/proc"
+	"github.com/containerd/containerd/pkg/stdio"
 	"github.com/containerd/fifo"
 	runc "github.com/containerd/go-runc"
 	"github.com/pkg/errors"
@@ -50,7 +50,7 @@ type processIO struct {
 
 	uri   *url.URL
 	copy  bool
-	stdio proc.Stdio
+	stdio stdio.Stdio
 }
 
 func (p *processIO) Close() error {
@@ -76,7 +76,7 @@ func (p *processIO) Copy(ctx context.Context, wg *sync.WaitGroup) error {
 	return nil
 }
 
-func createIO(ctx context.Context, id string, ioUID, ioGID int, stdio proc.Stdio) (*processIO, error) {
+func createIO(ctx context.Context, id string, ioUID, ioGID int, stdio stdio.Stdio) (*processIO, error) {
 	pio := &processIO{
 		stdio: stdio,
 	}
@@ -101,17 +101,20 @@ func createIO(ctx context.Context, id string, ioUID, ioGID int, stdio proc.Stdio
 		pio.copy = true
 		pio.io, err = runc.NewPipeIO(ioUID, ioGID, withConditionalIO(stdio))
 	case "binary":
-		pio.io, err = newBinaryIO(ctx, id, u)
+		pio.io, err = NewBinaryIO(ctx, id, u)
 	case "file":
-		if err := os.MkdirAll(filepath.Dir(u.Host), 0755); err != nil {
+		filePath := u.Path
+		if err := os.MkdirAll(filepath.Dir(filePath), 0755); err != nil {
 			return nil, err
 		}
 		var f *os.File
-		f, err = os.OpenFile(u.Host, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+		f, err = os.OpenFile(filePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
 		if err != nil {
 			return nil, err
 		}
 		f.Close()
+		pio.stdio.Stdout = filePath
+		pio.stdio.Stderr = filePath
 		pio.copy = true
 		pio.io, err = runc.NewPipeIO(ioUID, ioGID, withConditionalIO(stdio))
 	default:
@@ -179,10 +182,10 @@ func copyPipes(ctx context.Context, rio runc.IO, stdin, stdout, stderr string, w
 		)
 		if ok {
 			if fw, err = fifo.OpenFifo(ctx, i.name, syscall.O_WRONLY, 0); err != nil {
-				return fmt.Errorf("containerd-shim: opening %s failed: %s", i.name, err)
+				return errors.Wrapf(err, "containerd-shim: opening w/o fifo %q failed", i.name)
 			}
 			if fr, err = fifo.OpenFifo(ctx, i.name, syscall.O_RDONLY, 0); err != nil {
-				return fmt.Errorf("containerd-shim: opening %s failed: %s", i.name, err)
+				return errors.Wrapf(err, "containerd-shim: opening r/o fifo %q failed", i.name)
 			}
 		} else {
 			if sameFile != nil {
@@ -191,7 +194,7 @@ func copyPipes(ctx context.Context, rio runc.IO, stdin, stdout, stderr string, w
 				continue
 			}
 			if fw, err = os.OpenFile(i.name, syscall.O_WRONLY|syscall.O_APPEND, 0); err != nil {
-				return fmt.Errorf("containerd-shim: opening %s failed: %s", i.name, err)
+				return errors.Wrapf(err, "containerd-shim: opening file %q failed", i.name)
 			}
 			if stdout == stderr {
 				sameFile = &countingWriteCloser{
@@ -251,7 +254,8 @@ func isFifo(path string) (bool, error) {
 	return false, nil
 }
 
-func newBinaryIO(ctx context.Context, id string, uri *url.URL) (runc.IO, error) {
+// NewBinaryIO runs a custom binary process for pluggable shim logging
+func NewBinaryIO(ctx context.Context, id string, uri *url.URL) (runc.IO, error) {
 	ns, err := namespaces.NamespaceRequired(ctx)
 	if err != nil {
 		return nil, err
@@ -264,7 +268,7 @@ func newBinaryIO(ctx context.Context, id string, uri *url.URL) (runc.IO, error)
 		}
 	}
 	ctx, cancel := context.WithCancel(ctx)
-	cmd := exec.CommandContext(ctx, uri.Host, args...)
+	cmd := exec.CommandContext(ctx, uri.Path, args...)
 	cmd.Env = append(cmd.Env,
 		"CONTAINER_ID="+id,
 		"CONTAINER_NAMESPACE="+ns,

+ 3 - 25
vendor/github.com/containerd/containerd/runtime/proc/proc.go → vendor/github.com/containerd/containerd/pkg/process/process.go

@@ -14,30 +14,17 @@
    limitations under the License.
 */
 
-package proc
+package process
 
 import (
 	"context"
 	"io"
-	"sync"
 	"time"
 
 	"github.com/containerd/console"
+	"github.com/containerd/containerd/pkg/stdio"
 )
 
-// Stdio of a process
-type Stdio struct {
-	Stdin    string
-	Stdout   string
-	Stderr   string
-	Terminal bool
-}
-
-// IsNull returns true if the stdio is not defined
-func (s Stdio) IsNull() bool {
-	return s.Stdin == "" && s.Stdout == "" && s.Stderr == ""
-}
-
 // Process on a system
 type Process interface {
 	// ID returns the id for the process
@@ -51,7 +38,7 @@ type Process interface {
 	// Stdin returns the process STDIN
 	Stdin() io.Closer
 	// Stdio returns io information for the container
-	Stdio() Stdio
+	Stdio() stdio.Stdio
 	// Status returns the process status
 	Status(context.Context) (string, error)
 	// Wait blocks until the process has exited
@@ -67,12 +54,3 @@ type Process interface {
 	// SetExited sets the exit status for the process
 	SetExited(status int)
 }
-
-// Platform handles platform-specific behavior that may differs across
-// platform implementations
-type Platform interface {
-	CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string,
-		wg *sync.WaitGroup) (console.Console, error)
-	ShutdownConsole(ctx context.Context, console console.Console) error
-	Close() error
-}

+ 1 - 1
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/types.go → vendor/github.com/containerd/containerd/pkg/process/types.go

@@ -14,7 +14,7 @@
    limitations under the License.
 */
 
-package proc
+package process
 
 import (
 	google_protobuf "github.com/gogo/protobuf/types"

+ 53 - 4
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/utils.go → vendor/github.com/containerd/containerd/pkg/process/utils.go

@@ -16,9 +16,10 @@
    limitations under the License.
 */
 
-package proc
+package process
 
 import (
+	"context"
 	"encoding/json"
 	"fmt"
 	"io"
@@ -34,6 +35,15 @@ import (
 	"golang.org/x/sys/unix"
 )
 
+const (
+	// RuncRoot is the path to the root runc state directory
+	RuncRoot = "/run/containerd/runc"
+	// StoppedPID is the pid assigned after a container has run and stopped
+	StoppedPID = -1
+	// InitPidFile name of the file that contains the init pid
+	InitPidFile = "init.pid"
+)
+
 // safePid is a thread safe wrapper for pid.
 type safePid struct {
 	sync.Mutex
@@ -46,6 +56,12 @@ func (s *safePid) get() int {
 	return s.pid
 }
 
+func (s *safePid) set(pid int) {
+	s.Lock()
+	s.pid = pid
+	s.Unlock()
+}
+
 // TODO(mlaventure): move to runc package?
 func getLastRuntimeError(r *runc.Runc) (string, error) {
 	if r.Log == "" {
@@ -56,6 +72,7 @@ func getLastRuntimeError(r *runc.Runc) (string, error) {
 	if err != nil {
 		return "", err
 	}
+	defer f.Close()
 
 	var (
 		errMsg string
@@ -110,15 +127,13 @@ func checkKillError(err error) error {
 	}
 	if strings.Contains(err.Error(), "os: process already finished") ||
 		strings.Contains(err.Error(), "container not running") ||
+		strings.Contains(strings.ToLower(err.Error()), "no such process") ||
 		err == unix.ESRCH {
 		return errors.Wrapf(errdefs.ErrNotFound, "process already finished")
 	}
 	return errors.Wrapf(err, "unknown error after kill")
 }
 
-// InitPidFile name of the file that contains the init pid
-const InitPidFile = "init.pid"
-
 func newPidFile(bundle string) *pidFile {
 	return &pidFile{
 		path: filepath.Join(bundle, InitPidFile),
@@ -142,3 +157,37 @@ func (p *pidFile) Path() string {
 func (p *pidFile) Read() (int, error) {
 	return runc.ReadPidFile(p.path)
 }
+
+// waitTimeout handles waiting on a waitgroup with a specified timeout.
+// this is commonly used for waiting on IO to finish after a process has exited
+func waitTimeout(ctx context.Context, wg *sync.WaitGroup, timeout time.Duration) error {
+	ctx, cancel := context.WithTimeout(ctx, timeout)
+	defer cancel()
+	done := make(chan struct{}, 1)
+	go func() {
+		wg.Wait()
+		close(done)
+	}()
+	select {
+	case <-done:
+		return nil
+	case <-ctx.Done():
+		return ctx.Err()
+	}
+}
+
+func stateName(v interface{}) string {
+	switch v.(type) {
+	case *runningState, *execRunningState:
+		return "running"
+	case *createdState, *execCreatedState, *createdCheckpointState:
+		return "created"
+	case *pausedState:
+		return "paused"
+	case *deletedState:
+		return "deleted"
+	case *stoppedState:
+		return "stopped"
+	}
+	panic(errors.Errorf("invalid state %v", v))
+}

+ 13 - 8
vendor/github.com/containerd/continuity/hardlinks_windows.go → vendor/github.com/containerd/containerd/pkg/stdio/platform.go

@@ -14,15 +14,20 @@
    limitations under the License.
 */
 
-package continuity
+package stdio
 
-import "os"
+import (
+	"context"
+	"sync"
 
-type hardlinkKey struct{}
+	"github.com/containerd/console"
+)
 
-func newHardlinkKey(fi os.FileInfo) (hardlinkKey, error) {
-	// NOTE(stevvooe): Obviously, this is not yet implemented. However, the
-	// makings of an implementation are available in src/os/types_windows.go. More
-	// investigation needs to be done to figure out exactly how to do this.
-	return hardlinkKey{}, errNotAHardLink
+// Platform handles platform-specific behavior that may differs across
+// platform implementations
+type Platform interface {
+	CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string,
+		wg *sync.WaitGroup) (console.Console, error)
+	ShutdownConsole(ctx context.Context, console console.Console) error
+	Close() error
 }

+ 11 - 9
vendor/github.com/containerd/continuity/resource_windows.go → vendor/github.com/containerd/containerd/pkg/stdio/stdio.go

@@ -14,15 +14,17 @@
    limitations under the License.
 */
 
-package continuity
+package stdio
 
-import "os"
+// Stdio of a process
+type Stdio struct {
+	Stdin    string
+	Stdout   string
+	Stderr   string
+	Terminal bool
+}
 
-// newBaseResource returns a *resource, populated with data from p and fi,
-// where p will be populated directly.
-func newBaseResource(p string, fi os.FileInfo) (*resource, error) {
-	return &resource{
-		paths: []string{p},
-		mode:  fi.Mode(),
-	}, nil
+// IsNull returns true if the stdio is not defined
+func (s Stdio) IsNull() bool {
+	return s.Stdin == "" && s.Stdout == "" && s.Stderr == ""
 }

+ 37 - 0
vendor/github.com/containerd/containerd/platforms/compare.go

@@ -29,11 +29,48 @@ type MatchComparer interface {
 // Only returns a match comparer for a single platform
 // using default resolution logic for the platform.
 //
+// For ARMv8, will also match ARMv7, ARMv6 and ARMv5 (for 32bit runtimes)
 // For ARMv7, will also match ARMv6 and ARMv5
 // For ARMv6, will also match ARMv5
 func Only(platform specs.Platform) MatchComparer {
 	platform = Normalize(platform)
 	if platform.Architecture == "arm" {
+		if platform.Variant == "v8" {
+			return orderedPlatformComparer{
+				matchers: []Matcher{
+					&matcher{
+						Platform: platform,
+					},
+					&matcher{
+						Platform: specs.Platform{
+							Architecture: platform.Architecture,
+							OS:           platform.OS,
+							OSVersion:    platform.OSVersion,
+							OSFeatures:   platform.OSFeatures,
+							Variant:      "v7",
+						},
+					},
+					&matcher{
+						Platform: specs.Platform{
+							Architecture: platform.Architecture,
+							OS:           platform.OS,
+							OSVersion:    platform.OSVersion,
+							OSFeatures:   platform.OSFeatures,
+							Variant:      "v6",
+						},
+					},
+					&matcher{
+						Platform: specs.Platform{
+							Architecture: platform.Architecture,
+							OS:           platform.OS,
+							OSVersion:    platform.OSVersion,
+							OSFeatures:   platform.OSFeatures,
+							Variant:      "v5",
+						},
+					},
+				},
+			}
+		}
 		if platform.Variant == "v7" {
 			return orderedPlatformComparer{
 				matchers: []Matcher{

+ 1 - 1
vendor/github.com/containerd/containerd/platforms/cpuinfo.go

@@ -97,7 +97,7 @@ func getCPUVariant() string {
 	}
 
 	switch variant {
-	case "8":
+	case "8", "AArch64":
 		variant = "v8"
 	case "7", "7M", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)":
 		variant = "v7"

+ 3 - 3
vendor/github.com/containerd/containerd/platforms/database.go

@@ -28,7 +28,7 @@ func isLinuxOS(os string) bool {
 	return os == "linux"
 }
 
-// These function are generated from from https://golang.org/src/go/build/syslist.go.
+// These function are generated from https://golang.org/src/go/build/syslist.go.
 //
 // We use switch statements because they are slightly faster than map lookups
 // and use a little less memory.
@@ -38,7 +38,7 @@ func isLinuxOS(os string) bool {
 // The OS value should be normalized before calling this function.
 func isKnownOS(os string) bool {
 	switch os {
-	case "android", "darwin", "dragonfly", "freebsd", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos":
+	case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos":
 		return true
 	}
 	return false
@@ -60,7 +60,7 @@ func isArmArch(arch string) bool {
 // The arch value should be normalized before being passed to this function.
 func isKnownArch(arch string) bool {
 	switch arch {
-	case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "s390", "s390x", "sparc", "sparc64":
+	case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm":
 		return true
 	}
 	return false

+ 1 - 1
vendor/github.com/containerd/containerd/platforms/platforms.go

@@ -130,7 +130,7 @@ type Matcher interface {
 // specification. The returned matcher only looks for equality based on os,
 // architecture and variant.
 //
-// One may implement their own matcher if this doesn't provide the the required
+// One may implement their own matcher if this doesn't provide the required
 // functionality.
 //
 // Applications should opt to use `Match` over directly parsing specifiers.

+ 7 - 6
vendor/github.com/containerd/containerd/plugin/context.go

@@ -28,12 +28,13 @@ import (
 
 // InitContext is used for plugin inititalization
 type InitContext struct {
-	Context context.Context
-	Root    string
-	State   string
-	Config  interface{}
-	Address string
-	Events  *exchange.Exchange
+	Context      context.Context
+	Root         string
+	State        string
+	Config       interface{}
+	Address      string
+	TTRPCAddress string
+	Events       *exchange.Exchange
 
 	Meta *Meta // plugins can fill in metadata at init.
 

+ 35 - 14
vendor/github.com/containerd/containerd/plugin/plugin.go

@@ -30,7 +30,8 @@ var (
 	ErrNoType = errors.New("plugin: no type")
 	// ErrNoPluginID is returned when no id is specified
 	ErrNoPluginID = errors.New("plugin: no id")
-
+	// ErrIDRegistered is returned when a duplicate id is already registered
+	ErrIDRegistered = errors.New("plugin: id already registered")
 	// ErrSkipPlugin is used when a plugin is not initialized and should not be loaded,
 	// this allows the plugin loader differentiate between a plugin which is configured
 	// not to load and one that fails to load.
@@ -100,6 +101,8 @@ type Registration struct {
 	// context are passed in. The init function may modify the registration to
 	// add exports, capabilities and platform support declarations.
 	InitFn func(*InitContext) (interface{}, error)
+	// Disable the plugin from loading
+	Disable bool
 }
 
 // Init the registered plugin
@@ -157,12 +160,16 @@ func Load(path string) (err error) {
 func Register(r *Registration) {
 	register.Lock()
 	defer register.Unlock()
+
 	if r.Type == "" {
 		panic(ErrNoType)
 	}
 	if r.ID == "" {
 		panic(ErrNoPluginID)
 	}
+	if err := checkUnique(r); err != nil {
+		panic(err)
+	}
 
 	var last bool
 	for _, requires := range r.Requires {
@@ -177,24 +184,36 @@ func Register(r *Registration) {
 	register.r = append(register.r, r)
 }
 
+func checkUnique(r *Registration) error {
+	for _, registered := range register.r {
+		if r.URI() == registered.URI() {
+			return errors.Wrap(ErrIDRegistered, r.URI())
+		}
+	}
+	return nil
+}
+
+// DisableFilter filters out disabled plugins
+type DisableFilter func(r *Registration) bool
+
 // Graph returns an ordered list of registered plugins for initialization.
 // Plugins in disableList specified by id will be disabled.
-func Graph(disableList []string) (ordered []*Registration) {
+func Graph(filter DisableFilter) (ordered []*Registration) {
 	register.RLock()
 	defer register.RUnlock()
-	for _, d := range disableList {
-		for i, r := range register.r {
-			if r.ID == d {
-				register.r = append(register.r[:i], register.r[i+1:]...)
-				break
-			}
+
+	for _, r := range register.r {
+		if filter(r) {
+			r.Disable = true
 		}
 	}
 
 	added := map[*Registration]bool{}
 	for _, r := range register.r {
-
-		children(r.ID, r.Requires, added, &ordered)
+		if r.Disable {
+			continue
+		}
+		children(r, added, &ordered)
 		if !added[r] {
 			ordered = append(ordered, r)
 			added[r] = true
@@ -203,11 +222,13 @@ func Graph(disableList []string) (ordered []*Registration) {
 	return ordered
 }
 
-func children(id string, types []Type, added map[*Registration]bool, ordered *[]*Registration) {
-	for _, t := range types {
+func children(reg *Registration, added map[*Registration]bool, ordered *[]*Registration) {
+	for _, t := range reg.Requires {
 		for _, r := range register.r {
-			if r.ID != id && (t == "*" || r.Type == t) {
-				children(r.ID, r.Requires, added, ordered)
+			if !r.Disable &&
+				r.URI() != reg.URI() &&
+				(t == "*" || r.Type == t) {
+				children(r, added, ordered)
 				if !added[r] {
 					*ordered = append(*ordered, r)
 					added[r] = true

+ 2 - 2
vendor/github.com/containerd/containerd/process.go

@@ -44,7 +44,7 @@ type Process interface {
 	Wait(context.Context) (<-chan ExitStatus, error)
 	// CloseIO allows various pipes to be closed on the process
 	CloseIO(context.Context, ...IOCloserOpts) error
-	// Resize changes the width and heigh of the process's terminal
+	// Resize changes the width and height of the process's terminal
 	Resize(ctx context.Context, w, h uint32) error
 	// IO returns the io set for the process
 	IO() cio.IO
@@ -61,7 +61,7 @@ func NewExitStatus(code uint32, t time.Time, err error) *ExitStatus {
 	}
 }
 
-// ExitStatus encapsulates a process' exit status.
+// ExitStatus encapsulates a process's exit status.
 // It is used by `Wait()` to return either a process exit code or an error
 type ExitStatus struct {
 	code     uint32

+ 46 - 17
vendor/github.com/containerd/containerd/pull.go

@@ -32,7 +32,7 @@ import (
 
 // Pull downloads the provided content into containerd's content store
 // and returns a platform specific image object
-func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (Image, error) {
+func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (_ Image, retErr error) {
 	pullCtx := defaultRemoteContext()
 	for _, o := range opts {
 		if err := o(c, pullCtx); err != nil {
@@ -44,7 +44,7 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (Image
 		if len(pullCtx.Platforms) > 1 {
 			return nil, errors.New("cannot pull multiplatform image locally, try Fetch")
 		} else if len(pullCtx.Platforms) == 0 {
-			pullCtx.PlatformMatcher = platforms.Default()
+			pullCtx.PlatformMatcher = c.platform
 		} else {
 			p, err := platforms.Parse(pullCtx.Platforms[0])
 			if err != nil {
@@ -61,6 +61,30 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (Image
 	}
 	defer done(ctx)
 
+	var unpacks int32
+	if pullCtx.Unpack {
+		// unpacker only supports schema 2 image, for schema 1 this is noop.
+		u, err := c.newUnpacker(ctx, pullCtx)
+		if err != nil {
+			return nil, errors.Wrap(err, "create unpacker")
+		}
+		unpackWrapper, eg := u.handlerWrapper(ctx, &unpacks)
+		defer func() {
+			if err := eg.Wait(); err != nil {
+				if retErr == nil {
+					retErr = errors.Wrap(err, "unpack")
+				}
+			}
+		}()
+		wrapper := pullCtx.HandlerWrapper
+		pullCtx.HandlerWrapper = func(h images.Handler) images.Handler {
+			if wrapper == nil {
+				return unpackWrapper(h)
+			}
+			return wrapper(unpackWrapper(h))
+		}
+	}
+
 	img, err := c.fetch(ctx, pullCtx, ref, 1)
 	if err != nil {
 		return nil, err
@@ -69,8 +93,12 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (Image
 	i := NewImageWithPlatform(c, img, pullCtx.PlatformMatcher)
 
 	if pullCtx.Unpack {
-		if err := i.Unpack(ctx, pullCtx.Snapshotter); err != nil {
-			return nil, errors.Wrapf(err, "failed to unpack image on snapshotter %s", pullCtx.Snapshotter)
+		if unpacks == 0 {
+			// Try to unpack is none is done previously.
+			// This is at least required for schema 1 image.
+			if err := i.Unpack(ctx, pullCtx.Snapshotter, pullCtx.UnpackOpts...); err != nil {
+				return nil, errors.Wrapf(err, "failed to unpack image on snapshotter %s", pullCtx.Snapshotter)
+			}
 		}
 	}
 
@@ -112,9 +140,14 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim
 		childrenHandler := images.ChildrenHandler(store)
 		// Set any children labels for that content
 		childrenHandler = images.SetChildrenLabels(store, childrenHandler)
-		// Filter manifests by platforms but allow to handle manifest
-		// and configuration for not-target platforms
-		childrenHandler = remotes.FilterManifestByPlatformHandler(childrenHandler, rCtx.PlatformMatcher)
+		if rCtx.AllMetadata {
+			// Filter manifests by platforms but allow to handle manifest
+			// and configuration for not-target platforms
+			childrenHandler = remotes.FilterManifestByPlatformHandler(childrenHandler, rCtx.PlatformMatcher)
+		} else {
+			// Filter children by platforms if specified.
+			childrenHandler = images.FilterPlatforms(childrenHandler, rCtx.PlatformMatcher)
+		}
 		// Sort and limit manifests if a finite number is needed
 		if limit > 0 {
 			childrenHandler = images.LimitManifests(childrenHandler, rCtx.PlatformMatcher, limit)
@@ -131,22 +164,18 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim
 			},
 		)
 
+		appendDistSrcLabelHandler, err := docker.AppendDistributionSourceLabel(store, ref)
+		if err != nil {
+			return images.Image{}, err
+		}
+
 		handlers := append(rCtx.BaseHandlers,
 			remotes.FetchHandler(store, fetcher),
 			convertibleHandler,
 			childrenHandler,
+			appendDistSrcLabelHandler,
 		)
 
-		// append distribution source label to blob data
-		if rCtx.AppendDistributionSourceLabel {
-			appendDistSrcLabelHandler, err := docker.AppendDistributionSourceLabel(store, ref)
-			if err != nil {
-				return images.Image{}, err
-			}
-
-			handlers = append(handlers, appendDistSrcLabelHandler)
-		}
-
 		handler = images.Handlers(handlers...)
 
 		converterFunc = func(ctx context.Context, desc ocispec.Descriptor) (ocispec.Descriptor, error) {

+ 232 - 67
vendor/github.com/containerd/containerd/remotes/docker/authorizer.go

@@ -40,126 +40,278 @@ type dockerAuthorizer struct {
 	credentials func(string) (string, string, error)
 
 	client *http.Client
+	header http.Header
 	mu     sync.Mutex
 
-	auth map[string]string
+	// indexed by host name
+	handlers map[string]*authHandler
 }
 
 // NewAuthorizer creates a Docker authorizer using the provided function to
 // get credentials for the token server or basic auth.
+// Deprecated: Use NewDockerAuthorizer
 func NewAuthorizer(client *http.Client, f func(string) (string, string, error)) Authorizer {
-	if client == nil {
-		client = http.DefaultClient
+	return NewDockerAuthorizer(WithAuthClient(client), WithAuthCreds(f))
+}
+
+type authorizerConfig struct {
+	credentials func(string) (string, string, error)
+	client      *http.Client
+	header      http.Header
+}
+
+// AuthorizerOpt configures an authorizer
+type AuthorizerOpt func(*authorizerConfig)
+
+// WithAuthClient provides the HTTP client for the authorizer
+func WithAuthClient(client *http.Client) AuthorizerOpt {
+	return func(opt *authorizerConfig) {
+		opt.client = client
 	}
+}
+
+// WithAuthCreds provides a credential function to the authorizer
+func WithAuthCreds(creds func(string) (string, string, error)) AuthorizerOpt {
+	return func(opt *authorizerConfig) {
+		opt.credentials = creds
+	}
+}
+
+// WithAuthHeader provides HTTP headers for authorization
+func WithAuthHeader(hdr http.Header) AuthorizerOpt {
+	return func(opt *authorizerConfig) {
+		opt.header = hdr
+	}
+}
+
+// NewDockerAuthorizer creates an authorizer using Docker's registry
+// authentication spec.
+// See https://docs.docker.com/registry/spec/auth/
+func NewDockerAuthorizer(opts ...AuthorizerOpt) Authorizer {
+	var ao authorizerConfig
+	for _, opt := range opts {
+		opt(&ao)
+	}
+
+	if ao.client == nil {
+		ao.client = http.DefaultClient
+	}
+
 	return &dockerAuthorizer{
-		credentials: f,
-		client:      client,
-		auth:        map[string]string{},
+		credentials: ao.credentials,
+		client:      ao.client,
+		header:      ao.header,
+		handlers:    make(map[string]*authHandler),
 	}
 }
 
+// Authorize handles auth request.
 func (a *dockerAuthorizer) Authorize(ctx context.Context, req *http.Request) error {
-	// TODO: Lookup matching challenge and scope rather than just host
-	if auth := a.getAuth(req.URL.Host); auth != "" {
-		req.Header.Set("Authorization", auth)
+	// skip if there is no auth handler
+	ah := a.getAuthHandler(req.URL.Host)
+	if ah == nil {
+		return nil
 	}
 
+	auth, err := ah.authorize(ctx)
+	if err != nil {
+		return err
+	}
+
+	req.Header.Set("Authorization", auth)
 	return nil
 }
 
+func (a *dockerAuthorizer) getAuthHandler(host string) *authHandler {
+	a.mu.Lock()
+	defer a.mu.Unlock()
+
+	return a.handlers[host]
+}
+
 func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.Response) error {
 	last := responses[len(responses)-1]
 	host := last.Request.URL.Host
+
+	a.mu.Lock()
+	defer a.mu.Unlock()
 	for _, c := range parseAuthHeader(last.Header) {
 		if c.scheme == bearerAuth {
 			if err := invalidAuthorization(c, responses); err != nil {
-				// TODO: Clear token
-				a.setAuth(host, "")
+				delete(a.handlers, host)
 				return err
 			}
 
-			// TODO(dmcg): Store challenge, not token
-			// Move token fetching to authorize
-			return a.setTokenAuth(ctx, host, c.parameters)
+			// reuse existing handler
+			//
+			// assume that one registry will return the common
+			// challenge information, including realm and service.
+			// and the resource scope is only different part
+			// which can be provided by each request.
+			if _, ok := a.handlers[host]; ok {
+				return nil
+			}
+
+			common, err := a.generateTokenOptions(ctx, host, c)
+			if err != nil {
+				return err
+			}
+
+			a.handlers[host] = newAuthHandler(a.client, a.header, c.scheme, common)
+			return nil
 		} else if c.scheme == basicAuth && a.credentials != nil {
-			// TODO: Resolve credentials on authorize
 			username, secret, err := a.credentials(host)
 			if err != nil {
 				return err
 			}
+
 			if username != "" && secret != "" {
-				auth := username + ":" + secret
-				a.setAuth(host, fmt.Sprintf("Basic %s", base64.StdEncoding.EncodeToString([]byte(auth))))
+				common := tokenOptions{
+					username: username,
+					secret:   secret,
+				}
+
+				a.handlers[host] = newAuthHandler(a.client, a.header, c.scheme, common)
 				return nil
 			}
 		}
 	}
-
 	return errors.Wrap(errdefs.ErrNotImplemented, "failed to find supported auth scheme")
 }
 
-func (a *dockerAuthorizer) getAuth(host string) string {
-	a.mu.Lock()
-	defer a.mu.Unlock()
-
-	return a.auth[host]
-}
-
-func (a *dockerAuthorizer) setAuth(host string, auth string) bool {
-	a.mu.Lock()
-	defer a.mu.Unlock()
-
-	changed := a.auth[host] != auth
-	a.auth[host] = auth
-
-	return changed
-}
-
-func (a *dockerAuthorizer) setTokenAuth(ctx context.Context, host string, params map[string]string) error {
-	realm, ok := params["realm"]
+func (a *dockerAuthorizer) generateTokenOptions(ctx context.Context, host string, c challenge) (tokenOptions, error) {
+	realm, ok := c.parameters["realm"]
 	if !ok {
-		return errors.New("no realm specified for token auth challenge")
+		return tokenOptions{}, errors.New("no realm specified for token auth challenge")
 	}
 
 	realmURL, err := url.Parse(realm)
 	if err != nil {
-		return errors.Wrap(err, "invalid token auth challenge realm")
+		return tokenOptions{}, errors.Wrap(err, "invalid token auth challenge realm")
 	}
 
 	to := tokenOptions{
 		realm:   realmURL.String(),
-		service: params["service"],
+		service: c.parameters["service"],
 	}
 
-	to.scopes = getTokenScopes(ctx, params)
-	if len(to.scopes) == 0 {
-		return errors.Errorf("no scope specified for token auth challenge")
+	scope, ok := c.parameters["scope"]
+	if !ok {
+		return tokenOptions{}, errors.Errorf("no scope specified for token auth challenge")
 	}
+	to.scopes = append(to.scopes, scope)
 
 	if a.credentials != nil {
 		to.username, to.secret, err = a.credentials(host)
 		if err != nil {
-			return err
+			return tokenOptions{}, err
 		}
 	}
+	return to, nil
+}
+
+// authResult is used to control limit rate.
+type authResult struct {
+	sync.WaitGroup
+	token string
+	err   error
+}
+
+// authHandler is used to handle auth request per registry server.
+type authHandler struct {
+	sync.Mutex
+
+	header http.Header
+
+	client *http.Client
+
+	// only support basic and bearer schemes
+	scheme authenticationScheme
+
+	// common contains common challenge answer
+	common tokenOptions
+
+	// scopedTokens caches token indexed by scopes, which used in
+	// bearer auth case
+	scopedTokens map[string]*authResult
+}
+
+func newAuthHandler(client *http.Client, hdr http.Header, scheme authenticationScheme, opts tokenOptions) *authHandler {
+	return &authHandler{
+		header:       hdr,
+		client:       client,
+		scheme:       scheme,
+		common:       opts,
+		scopedTokens: map[string]*authResult{},
+	}
+}
+
+func (ah *authHandler) authorize(ctx context.Context) (string, error) {
+	switch ah.scheme {
+	case basicAuth:
+		return ah.doBasicAuth(ctx)
+	case bearerAuth:
+		return ah.doBearerAuth(ctx)
+	default:
+		return "", errors.Wrap(errdefs.ErrNotImplemented, "failed to find supported auth scheme")
+	}
+}
+
+func (ah *authHandler) doBasicAuth(ctx context.Context) (string, error) {
+	username, secret := ah.common.username, ah.common.secret
+
+	if username == "" || secret == "" {
+		return "", fmt.Errorf("failed to handle basic auth because missing username or secret")
+	}
+
+	auth := base64.StdEncoding.EncodeToString([]byte(username + ":" + secret))
+	return fmt.Sprintf("Basic %s", auth), nil
+}
 
-	var token string
+func (ah *authHandler) doBearerAuth(ctx context.Context) (string, error) {
+	// copy common tokenOptions
+	to := ah.common
+
+	to.scopes = getTokenScopes(ctx, to.scopes)
+	if len(to.scopes) == 0 {
+		return "", errors.Errorf("no scope specified for token auth challenge")
+	}
+
+	// Docs: https://docs.docker.com/registry/spec/auth/scope
+	scoped := strings.Join(to.scopes, " ")
+
+	ah.Lock()
+	if r, exist := ah.scopedTokens[scoped]; exist {
+		ah.Unlock()
+		r.Wait()
+		return r.token, r.err
+	}
+
+	// only one fetch token job
+	r := new(authResult)
+	r.Add(1)
+	ah.scopedTokens[scoped] = r
+	ah.Unlock()
+
+	// fetch token for the resource scope
+	var (
+		token string
+		err   error
+	)
 	if to.secret != "" {
-		// Credential information is provided, use oauth POST endpoint
-		token, err = a.fetchTokenWithOAuth(ctx, to)
-		if err != nil {
-			return errors.Wrap(err, "failed to fetch oauth token")
-		}
+		// credential information is provided, use oauth POST endpoint
+		token, err = ah.fetchTokenWithOAuth(ctx, to)
+		err = errors.Wrap(err, "failed to fetch oauth token")
 	} else {
-		// Do request anonymously
-		token, err = a.fetchToken(ctx, to)
-		if err != nil {
-			return errors.Wrap(err, "failed to fetch anonymous token")
-		}
+		// do request anonymously
+		token, err = ah.fetchToken(ctx, to)
+		err = errors.Wrap(err, "failed to fetch anonymous token")
 	}
-	a.setAuth(host, fmt.Sprintf("Bearer %s", token))
+	token = fmt.Sprintf("Bearer %s", token)
 
-	return nil
+	r.token, r.err = token, err
+	r.Done()
+	return r.token, r.err
 }
 
 type tokenOptions struct {
@@ -178,7 +330,7 @@ type postTokenResponse struct {
 	Scope        string    `json:"scope"`
 }
 
-func (a *dockerAuthorizer) fetchTokenWithOAuth(ctx context.Context, to tokenOptions) (string, error) {
+func (ah *authHandler) fetchTokenWithOAuth(ctx context.Context, to tokenOptions) (string, error) {
 	form := url.Values{}
 	form.Set("scope", strings.Join(to.scopes, " "))
 	form.Set("service", to.service)
@@ -194,11 +346,18 @@ func (a *dockerAuthorizer) fetchTokenWithOAuth(ctx context.Context, to tokenOpti
 		form.Set("password", to.secret)
 	}
 
-	resp, err := ctxhttp.Post(
-		ctx, a.client, to.realm,
-		"application/x-www-form-urlencoded; charset=utf-8",
-		strings.NewReader(form.Encode()),
-	)
+	req, err := http.NewRequest("POST", to.realm, strings.NewReader(form.Encode()))
+	if err != nil {
+		return "", err
+	}
+	req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
+	if ah.header != nil {
+		for k, v := range ah.header {
+			req.Header[k] = append(req.Header[k], v...)
+		}
+	}
+
+	resp, err := ctxhttp.Do(ctx, ah.client, req)
 	if err != nil {
 		return "", err
 	}
@@ -208,7 +367,7 @@ func (a *dockerAuthorizer) fetchTokenWithOAuth(ctx context.Context, to tokenOpti
 	// As of September 2017, GCR is known to return 404.
 	// As of February 2018, JFrog Artifactory is known to return 401.
 	if (resp.StatusCode == 405 && to.username != "") || resp.StatusCode == 404 || resp.StatusCode == 401 {
-		return a.fetchToken(ctx, to)
+		return ah.fetchToken(ctx, to)
 	} else if resp.StatusCode < 200 || resp.StatusCode >= 400 {
 		b, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 64000)) // 64KB
 		log.G(ctx).WithFields(logrus.Fields{
@@ -237,13 +396,19 @@ type getTokenResponse struct {
 	RefreshToken string    `json:"refresh_token"`
 }
 
-// getToken fetches a token using a GET request
-func (a *dockerAuthorizer) fetchToken(ctx context.Context, to tokenOptions) (string, error) {
+// fetchToken fetches a token using a GET request
+func (ah *authHandler) fetchToken(ctx context.Context, to tokenOptions) (string, error) {
 	req, err := http.NewRequest("GET", to.realm, nil)
 	if err != nil {
 		return "", err
 	}
 
+	if ah.header != nil {
+		for k, v := range ah.header {
+			req.Header[k] = append(req.Header[k], v...)
+		}
+	}
+
 	reqParams := req.URL.Query()
 
 	if to.service != "" {
@@ -260,7 +425,7 @@ func (a *dockerAuthorizer) fetchToken(ctx context.Context, to tokenOptions) (str
 
 	req.URL.RawQuery = reqParams.Encode()
 
-	resp, err := ctxhttp.Do(ctx, a.client, req)
+	resp, err := ctxhttp.Do(ctx, ah.client, req)
 	if err != nil {
 		return "", err
 	}

+ 79 - 54
vendor/github.com/containerd/containerd/remotes/docker/fetcher.go

@@ -23,7 +23,7 @@ import (
 	"io"
 	"io/ioutil"
 	"net/http"
-	"path"
+	"net/url"
 	"strings"
 
 	"github.com/containerd/containerd/errdefs"
@@ -32,7 +32,6 @@ import (
 	"github.com/docker/distribution/registry/api/errcode"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 )
 
 type dockerFetcher struct {
@@ -40,26 +39,46 @@ type dockerFetcher struct {
 }
 
 func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
-	ctx = log.WithLogger(ctx, log.G(ctx).WithFields(
-		logrus.Fields{
-			"base":   r.base.String(),
-			"digest": desc.Digest,
-		},
-	))
-
-	urls, err := r.getV2URLPaths(ctx, desc)
-	if err != nil {
-		return nil, err
+	ctx = log.WithLogger(ctx, log.G(ctx).WithField("digest", desc.Digest))
+
+	hosts := r.filterHosts(HostCapabilityPull)
+	if len(hosts) == 0 {
+		return nil, errors.Wrap(errdefs.ErrNotFound, "no pull hosts")
 	}
 
-	ctx, err = contextWithRepositoryScope(ctx, r.refspec, false)
+	ctx, err := contextWithRepositoryScope(ctx, r.refspec, false)
 	if err != nil {
 		return nil, err
 	}
 
 	return newHTTPReadSeeker(desc.Size, func(offset int64) (io.ReadCloser, error) {
-		for _, u := range urls {
-			rc, err := r.open(ctx, u, desc.MediaType, offset)
+		// firstly try fetch via external urls
+		for _, us := range desc.URLs {
+			ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", us))
+
+			u, err := url.Parse(us)
+			if err != nil {
+				log.G(ctx).WithError(err).Debug("failed to parse")
+				continue
+			}
+			log.G(ctx).Debug("trying alternative url")
+
+			// Try this first, parse it
+			host := RegistryHost{
+				Client:       http.DefaultClient,
+				Host:         u.Host,
+				Scheme:       u.Scheme,
+				Path:         u.Path,
+				Capabilities: HostCapabilityPull,
+			}
+			req := r.request(host, http.MethodGet)
+			// Strip namespace from base
+			req.path = u.Path
+			if u.RawQuery != "" {
+				req.path = req.path + "?" + u.RawQuery
+			}
+
+			rc, err := r.open(ctx, req, desc.MediaType, offset)
 			if err != nil {
 				if errdefs.IsNotFound(err) {
 					continue // try one of the other urls.
@@ -71,6 +90,44 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R
 			return rc, nil
 		}
 
+		// Try manifests endpoints for manifests types
+		switch desc.MediaType {
+		case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList,
+			images.MediaTypeDockerSchema1Manifest,
+			ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex:
+
+			for _, host := range r.hosts {
+				req := r.request(host, http.MethodGet, "manifests", desc.Digest.String())
+
+				rc, err := r.open(ctx, req, desc.MediaType, offset)
+				if err != nil {
+					if errdefs.IsNotFound(err) {
+						continue // try another host
+					}
+
+					return nil, err
+				}
+
+				return rc, nil
+			}
+		}
+
+		// Finally use blobs endpoints
+		for _, host := range r.hosts {
+			req := r.request(host, http.MethodGet, "blobs", desc.Digest.String())
+
+			rc, err := r.open(ctx, req, desc.MediaType, offset)
+			if err != nil {
+				if errdefs.IsNotFound(err) {
+					continue // try another host
+				}
+
+				return nil, err
+			}
+
+			return rc, nil
+		}
+
 		return nil, errors.Wrapf(errdefs.ErrNotFound,
 			"could not fetch content descriptor %v (%v) from remote",
 			desc.Digest, desc.MediaType)
@@ -78,22 +135,17 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R
 	})
 }
 
-func (r dockerFetcher) open(ctx context.Context, u, mediatype string, offset int64) (io.ReadCloser, error) {
-	req, err := http.NewRequest(http.MethodGet, u, nil)
-	if err != nil {
-		return nil, err
-	}
-
-	req.Header.Set("Accept", strings.Join([]string{mediatype, `*`}, ", "))
+func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string, offset int64) (io.ReadCloser, error) {
+	req.header.Set("Accept", strings.Join([]string{mediatype, `*/*`}, ", "))
 
 	if offset > 0 {
 		// Note: "Accept-Ranges: bytes" cannot be trusted as some endpoints
 		// will return the header without supporting the range. The content
 		// range must always be checked.
-		req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
+		req.header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
 	}
 
-	resp, err := r.doRequestWithRetries(ctx, req, nil)
+	resp, err := req.doWithRetries(ctx, nil)
 	if err != nil {
 		return nil, err
 	}
@@ -106,13 +158,13 @@ func (r dockerFetcher) open(ctx context.Context, u, mediatype string, offset int
 		defer resp.Body.Close()
 
 		if resp.StatusCode == http.StatusNotFound {
-			return nil, errors.Wrapf(errdefs.ErrNotFound, "content at %v not found", u)
+			return nil, errors.Wrapf(errdefs.ErrNotFound, "content at %v not found", req.String())
 		}
 		var registryErr errcode.Errors
 		if err := json.NewDecoder(resp.Body).Decode(&registryErr); err != nil || registryErr.Len() < 1 {
-			return nil, errors.Errorf("unexpected status code %v: %v", u, resp.Status)
+			return nil, errors.Errorf("unexpected status code %v: %v", req.String(), resp.Status)
 		}
-		return nil, errors.Errorf("unexpected status code %v: %s - Server message: %s", u, resp.Status, registryErr.Error())
+		return nil, errors.Errorf("unexpected status code %v: %s - Server message: %s", req.String(), resp.Status, registryErr.Error())
 	}
 	if offset > 0 {
 		cr := resp.Header.Get("content-range")
@@ -141,30 +193,3 @@ func (r dockerFetcher) open(ctx context.Context, u, mediatype string, offset int
 
 	return resp.Body, nil
 }
-
-// getV2URLPaths generates the candidate urls paths for the object based on the
-// set of hints and the provided object id. URLs are returned in the order of
-// most to least likely succeed.
-func (r *dockerFetcher) getV2URLPaths(ctx context.Context, desc ocispec.Descriptor) ([]string, error) {
-	var urls []string
-
-	if len(desc.URLs) > 0 {
-		// handle fetch via external urls.
-		for _, u := range desc.URLs {
-			log.G(ctx).WithField("url", u).Debug("adding alternative url")
-			urls = append(urls, u)
-		}
-	}
-
-	switch desc.MediaType {
-	case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList,
-		images.MediaTypeDockerSchema1Manifest,
-		ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex:
-		urls = append(urls, r.url(path.Join("manifests", desc.Digest.String())))
-	}
-
-	// always fallback to attempting to get the object out of the blobs store.
-	urls = append(urls, r.url(path.Join("blobs", desc.Digest.String())))
-
-	return urls, nil
-}

+ 42 - 0
vendor/github.com/containerd/containerd/remotes/docker/handler.go

@@ -110,3 +110,45 @@ func appendDistributionSourceLabel(originLabel, repo string) string {
 func distributionSourceLabelKey(source string) string {
 	return fmt.Sprintf("%s.%s", labelDistributionSource, source)
 }
+
+// selectRepositoryMountCandidate will select the repo which has longest
+// common prefix components as the candidate.
+func selectRepositoryMountCandidate(refspec reference.Spec, sources map[string]string) string {
+	u, err := url.Parse("dummy://" + refspec.Locator)
+	if err != nil {
+		// NOTE: basically, it won't be error here
+		return ""
+	}
+
+	source, target := u.Hostname(), strings.TrimPrefix(u.Path, "/")
+	repoLabel, ok := sources[distributionSourceLabelKey(source)]
+	if !ok || repoLabel == "" {
+		return ""
+	}
+
+	n, match := 0, ""
+	components := strings.Split(target, "/")
+	for _, repo := range strings.Split(repoLabel, ",") {
+		// the target repo is not a candidate
+		if repo == target {
+			continue
+		}
+
+		if l := commonPrefixComponents(components, repo); l >= n {
+			n, match = l, repo
+		}
+	}
+	return match
+}
+
+func commonPrefixComponents(components []string, target string) int {
+	targetComponents := strings.Split(target, "/")
+
+	i := 0
+	for ; i < len(components) && i < len(targetComponents); i++ {
+		if components[i] != targetComponents[i] {
+			break
+		}
+	}
+	return i
+}

+ 129 - 60
vendor/github.com/containerd/containerd/remotes/docker/pusher.go

@@ -21,7 +21,7 @@ import (
 	"io"
 	"io/ioutil"
 	"net/http"
-	"path"
+	"net/url"
 	"strings"
 	"time"
 
@@ -37,7 +37,7 @@ import (
 
 type dockerPusher struct {
 	*dockerBase
-	tag string
+	object string
 
 	// TODO: namespace tracker
 	tracker StatusTracker
@@ -59,31 +59,32 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten
 		return nil, errors.Wrap(err, "failed to get status")
 	}
 
+	hosts := p.filterHosts(HostCapabilityPush)
+	if len(hosts) == 0 {
+		return nil, errors.Wrap(errdefs.ErrNotFound, "no push hosts")
+	}
+
 	var (
 		isManifest bool
-		existCheck string
+		existCheck []string
+		host       = hosts[0]
 	)
 
 	switch desc.MediaType {
 	case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList,
 		ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex:
 		isManifest = true
-		if p.tag == "" {
-			existCheck = path.Join("manifests", desc.Digest.String())
-		} else {
-			existCheck = path.Join("manifests", p.tag)
-		}
+		existCheck = getManifestPath(p.object, desc.Digest)
 	default:
-		existCheck = path.Join("blobs", desc.Digest.String())
+		existCheck = []string{"blobs", desc.Digest.String()}
 	}
 
-	req, err := http.NewRequest(http.MethodHead, p.url(existCheck), nil)
-	if err != nil {
-		return nil, err
-	}
+	req := p.request(host, http.MethodHead, existCheck...)
+	req.header.Set("Accept", strings.Join([]string{desc.MediaType, `*/*`}, ", "))
 
-	req.Header.Set("Accept", strings.Join([]string{desc.MediaType, `*`}, ", "))
-	resp, err := p.doRequestWithRetries(ctx, req, nil)
+	log.G(ctx).WithField("url", req.String()).Debugf("checking and pushing to")
+
+	resp, err := req.doWithRetries(ctx, nil)
 	if err != nil {
 		if errors.Cause(err) != ErrInvalidAuthorization {
 			return nil, err
@@ -92,7 +93,7 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten
 	} else {
 		if resp.StatusCode == http.StatusOK {
 			var exists bool
-			if isManifest && p.tag != "" {
+			if isManifest && existCheck[1] != desc.Digest.String() {
 				dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest"))
 				if dgstHeader == desc.Digest {
 					exists = true
@@ -116,67 +117,94 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten
 		}
 	}
 
-	// TODO: Lookup related objects for cross repository push
-
 	if isManifest {
-		var putPath string
-		if p.tag != "" {
-			putPath = path.Join("manifests", p.tag)
-		} else {
-			putPath = path.Join("manifests", desc.Digest.String())
-		}
-
-		req, err = http.NewRequest(http.MethodPut, p.url(putPath), nil)
-		if err != nil {
-			return nil, err
-		}
-		req.Header.Add("Content-Type", desc.MediaType)
+		putPath := getManifestPath(p.object, desc.Digest)
+		req = p.request(host, http.MethodPut, putPath...)
+		req.header.Add("Content-Type", desc.MediaType)
 	} else {
-		// TODO: Do monolithic upload if size is small
-
 		// Start upload request
-		req, err = http.NewRequest(http.MethodPost, p.url("blobs", "uploads")+"/", nil)
-		if err != nil {
-			return nil, err
+		req = p.request(host, http.MethodPost, "blobs", "uploads/")
+
+		var resp *http.Response
+		if fromRepo := selectRepositoryMountCandidate(p.refspec, desc.Annotations); fromRepo != "" {
+			preq := requestWithMountFrom(req, desc.Digest.String(), fromRepo)
+			pctx := contextWithAppendPullRepositoryScope(ctx, fromRepo)
+
+			// NOTE: the fromRepo might be private repo and
+			// auth service still can grant token without error.
+			// but the post request will fail because of 401.
+			//
+			// for the private repo, we should remove mount-from
+			// query and send the request again.
+			resp, err = preq.do(pctx)
+			if err != nil {
+				return nil, err
+			}
+
+			if resp.StatusCode == http.StatusUnauthorized {
+				log.G(ctx).Debugf("failed to mount from repository %s", fromRepo)
+
+				resp.Body.Close()
+				resp = nil
+			}
 		}
 
-		resp, err := p.doRequestWithRetries(ctx, req, nil)
-		if err != nil {
-			return nil, err
+		if resp == nil {
+			resp, err = req.doWithRetries(ctx, nil)
+			if err != nil {
+				return nil, err
+			}
 		}
 
 		switch resp.StatusCode {
 		case http.StatusOK, http.StatusAccepted, http.StatusNoContent:
+		case http.StatusCreated:
+			p.tracker.SetStatus(ref, Status{
+				Status: content.Status{
+					Ref: ref,
+				},
+			})
+			return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v on remote", desc.Digest)
 		default:
 			// TODO: log error
 			return nil, errors.Errorf("unexpected response: %s", resp.Status)
 		}
 
-		location := resp.Header.Get("Location")
+		var (
+			location = resp.Header.Get("Location")
+			lurl     *url.URL
+			lhost    = host
+		)
 		// Support paths without host in location
 		if strings.HasPrefix(location, "/") {
-			// Support location string containing path and query
-			qmIndex := strings.Index(location, "?")
-			if qmIndex > 0 {
-				u := p.base
-				u.Path = location[:qmIndex]
-				u.RawQuery = location[qmIndex+1:]
-				location = u.String()
-			} else {
-				u := p.base
-				u.Path = location
-				location = u.String()
+			lurl, err = url.Parse(lhost.Scheme + "://" + lhost.Host + location)
+			if err != nil {
+				return nil, errors.Wrapf(err, "unable to parse location %v", location)
+			}
+		} else {
+			if !strings.Contains(location, "://") {
+				location = lhost.Scheme + "://" + location
+			}
+			lurl, err = url.Parse(location)
+			if err != nil {
+				return nil, errors.Wrapf(err, "unable to parse location %v", location)
 			}
-		}
 
-		req, err = http.NewRequest(http.MethodPut, location, nil)
-		if err != nil {
-			return nil, err
+			if lurl.Host != lhost.Host || lhost.Scheme != lurl.Scheme {
+
+				lhost.Scheme = lurl.Scheme
+				lhost.Host = lurl.Host
+				log.G(ctx).WithField("host", lhost.Host).WithField("scheme", lhost.Scheme).Debug("upload changed destination")
+
+				// Strip authorizer if change to host or scheme
+				lhost.Authorizer = nil
+			}
 		}
-		q := req.URL.Query()
+		q := lurl.Query()
 		q.Add("digest", desc.Digest.String())
-		req.URL.RawQuery = q.Encode()
 
+		req = p.request(lhost, http.MethodPut)
+		req.path = lurl.Path + "?" + q.Encode()
 	}
 	p.tracker.SetStatus(ref, Status{
 		Status: content.Status{
@@ -191,13 +219,22 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten
 
 	pr, pw := io.Pipe()
 	respC := make(chan *http.Response, 1)
+	body := ioutil.NopCloser(pr)
 
-	req.Body = ioutil.NopCloser(pr)
-	req.ContentLength = desc.Size
+	req.body = func() (io.ReadCloser, error) {
+		if body == nil {
+			return nil, errors.New("cannot reuse body, request must be retried")
+		}
+		// Only use the body once since pipe cannot be seeked
+		ob := body
+		body = nil
+		return ob, nil
+	}
+	req.size = desc.Size
 
 	go func() {
 		defer close(respC)
-		resp, err = p.doRequest(ctx, req)
+		resp, err = req.do(ctx)
 		if err != nil {
 			pr.CloseWithError(err)
 			return
@@ -223,6 +260,25 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten
 	}, nil
 }
 
+func getManifestPath(object string, dgst digest.Digest) []string {
+	if i := strings.IndexByte(object, '@'); i >= 0 {
+		if object[i+1:] != dgst.String() {
+			// use digest, not tag
+			object = ""
+		} else {
+			// strip @<digest> for registry path to make tag
+			object = object[:i]
+		}
+
+	}
+
+	if object == "" {
+		return []string{"manifests", dgst.String()}
+	}
+
+	return []string{"manifests", object}
+}
+
 type pushWriter struct {
 	base *dockerBase
 	ref  string
@@ -296,7 +352,7 @@ func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Di
 	}
 
 	if size > 0 && size != status.Offset {
-		return errors.Errorf("unxpected size %d, expected %d", status.Offset, size)
+		return errors.Errorf("unexpected size %d, expected %d", status.Offset, size)
 	}
 
 	if expected == "" {
@@ -320,3 +376,16 @@ func (pw *pushWriter) Truncate(size int64) error {
 	// TODO: always error on manifest
 	return errors.New("cannot truncate remote upload")
 }
+
+func requestWithMountFrom(req *request, mount, from string) *request {
+	creq := *req
+
+	sep := "?"
+	if strings.Contains(creq.path, sep) {
+		sep = "&"
+	}
+
+	creq.path = creq.path + sep + "mount=" + mount + "&from=" + from
+
+	return &creq
+}

+ 202 - 0
vendor/github.com/containerd/containerd/remotes/docker/registry.go

@@ -0,0 +1,202 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package docker
+
+import (
+	"net/http"
+)
+
+// HostCapabilities represent the capabilities of the registry
+// host. This also represents the set of operations for which
+// the registry host may be trusted to perform.
+//
+// For example pushing is a capability which should only be
+// performed on an upstream source, not a mirror.
+// Resolving (the process of converting a name into a digest)
+// must be considered a trusted operation and only done by
+// a host which is trusted (or more preferably by secure process
+// which can prove the provenance of the mapping). A public
+// mirror should never be trusted to do a resolve action.
+//
+// | Registry Type    | Pull | Resolve | Push |
+// |------------------|------|---------|------|
+// | Public Registry  | yes  | yes     | yes  |
+// | Private Registry | yes  | yes     | yes  |
+// | Public Mirror    | yes  | no      | no   |
+// | Private Mirror   | yes  | yes     | no   |
+type HostCapabilities uint8
+
+const (
+	// HostCapabilityPull represents the capability to fetch manifests
+	// and blobs by digest
+	HostCapabilityPull HostCapabilities = 1 << iota
+
+	// HostCapabilityResolve represents the capability to fetch manifests
+	// by name
+	HostCapabilityResolve
+
+	// HostCapabilityPush represents the capability to push blobs and
+	// manifests
+	HostCapabilityPush
+
+	// Reserved for future capabilities (i.e. search, catalog, remove)
+)
+
+func (c HostCapabilities) Has(t HostCapabilities) bool {
+	return c&t == t
+}
+
+// RegistryHost represents a complete configuration for a registry
+// host, representing the capabilities, authorizations, connection
+// configuration, and location.
+type RegistryHost struct {
+	Client       *http.Client
+	Authorizer   Authorizer
+	Host         string
+	Scheme       string
+	Path         string
+	Capabilities HostCapabilities
+}
+
+// RegistryHosts fetches the registry hosts for a given namespace,
+// provided by the host component of an distribution image reference.
+type RegistryHosts func(string) ([]RegistryHost, error)
+
+// Registries joins multiple registry configuration functions, using the same
+// order as provided within the arguments. When an empty registry configuration
+// is returned with a nil error, the next function will be called.
+// NOTE: This function will not join configurations, as soon as a non-empty
+// configuration is returned from a configuration function, it will be returned
+// to the caller.
+func Registries(registries ...RegistryHosts) RegistryHosts {
+	return func(host string) ([]RegistryHost, error) {
+		for _, registry := range registries {
+			config, err := registry(host)
+			if err != nil {
+				return config, err
+			}
+			if len(config) > 0 {
+				return config, nil
+			}
+		}
+		return nil, nil
+	}
+}
+
+type registryOpts struct {
+	authorizer Authorizer
+	plainHTTP  func(string) (bool, error)
+	host       func(string) (string, error)
+	client     *http.Client
+}
+
+// RegistryOpt defines a registry default option
+type RegistryOpt func(*registryOpts)
+
+// WithPlainHTTP configures registries to use plaintext http scheme
+// for the provided host match function.
+func WithPlainHTTP(f func(string) (bool, error)) RegistryOpt {
+	return func(opts *registryOpts) {
+		opts.plainHTTP = f
+	}
+}
+
+// WithAuthorizer configures the default authorizer for a registry
+func WithAuthorizer(a Authorizer) RegistryOpt {
+	return func(opts *registryOpts) {
+		opts.authorizer = a
+	}
+}
+
+// WithHostTranslator defines the default translator to use for registry hosts
+func WithHostTranslator(h func(string) (string, error)) RegistryOpt {
+	return func(opts *registryOpts) {
+		opts.host = h
+	}
+}
+
+// WithClient configures the default http client for a registry
+func WithClient(c *http.Client) RegistryOpt {
+	return func(opts *registryOpts) {
+		opts.client = c
+	}
+}
+
+// ConfigureDefaultRegistries is used to create a default configuration for
+// registries. For more advanced configurations or per-domain setups,
+// the RegistryHosts interface should be used directly.
+// NOTE: This function will always return a non-empty value or error
+func ConfigureDefaultRegistries(ropts ...RegistryOpt) RegistryHosts {
+	var opts registryOpts
+	for _, opt := range ropts {
+		opt(&opts)
+	}
+
+	return func(host string) ([]RegistryHost, error) {
+		config := RegistryHost{
+			Client:       opts.client,
+			Authorizer:   opts.authorizer,
+			Host:         host,
+			Scheme:       "https",
+			Path:         "/v2",
+			Capabilities: HostCapabilityPull | HostCapabilityResolve | HostCapabilityPush,
+		}
+
+		if config.Client == nil {
+			config.Client = http.DefaultClient
+		}
+
+		if opts.plainHTTP != nil {
+			match, err := opts.plainHTTP(host)
+			if err != nil {
+				return nil, err
+			}
+			if match {
+				config.Scheme = "http"
+			}
+		}
+
+		if opts.host != nil {
+			var err error
+			config.Host, err = opts.host(config.Host)
+			if err != nil {
+				return nil, err
+			}
+		} else if host == "docker.io" {
+			config.Host = "registry-1.docker.io"
+		}
+
+		return []RegistryHost{config}, nil
+	}
+}
+
+// MatchAllHosts is a host match function which is always true.
+func MatchAllHosts(string) (bool, error) {
+	return true, nil
+}
+
+// MatchLocalhost is a host match function which returns true for
+// localhost.
+func MatchLocalhost(host string) (bool, error) {
+	for _, s := range []string{"localhost", "127.0.0.1", "[::1]"} {
+		if len(host) >= len(s) && host[0:len(s)] == s && (len(host) == len(s) || host[len(s)] == ':') {
+			return true, nil
+		}
+	}
+	return host == "::1", nil
+
+}

Daži faili netika attēloti, jo izmaiņu fails ir pārāk liels